diff --git "a/445.jsonl" "b/445.jsonl" new file mode 100644--- /dev/null +++ "b/445.jsonl" @@ -0,0 +1,732 @@ +{"seq_id":"501128730","text":"from urllib import request\nfrom bs4 import BeautifulSoup\nimport re\n#百度百科的\n# resUrl=\"https://baike.baidu.com/item/Python/407313?fr=aladdin\"\n#东方财富网站的\nresUrl=\"http://tianqi.2345.com/heyuan/59293.htm\"\n#response=request.urlopen(resUrl)\nreq=request.Request(resUrl)\n#模拟浏览器去请求\nreq.add_header(\"User-Agent\",\"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:56.0) Gecko/20100101 Firefox/56.0\")\nresponse=request.urlopen(req)\nif response.getcode() == 200:\n html_doc=response.read()\n soup=BeautifulSoup(html_doc,\"html.parser\",from_encoding=\"gbk\")\n # links=soup.find_all('a',target=\"_blank\",text=re.compile(r\"人民日报\"))\n links = soup.find_all('a')\n # print(soup.find_all('a'))\n base_url=\"https://baike.baidu.com/item\"\n\n for link in links:\n # print(link.get_text()+\":\"+base_url+link[\"href\"])\n # if link.get_text()== \"华为\":\n # print(link.get_text() + \":\" + base_url + link[\"href\"])\n print(link.get_text() + \":\" + link[\"href\"])\n # print(response.read().decode(\"utf8\"))","sub_path":"webcrawler/du_python_crawdler.py","file_name":"du_python_crawdler.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"454192263","text":"# -*- coding: utf-8 -*-\nclass SimpleListModel(QtGui.QStandardItemModel):\n def __init__(self, mlist):\n QtGui.QStandardItemModel.__init__(self)\n self.setColumnCount(5)\n\n # Cache the passed data list as a class member.\n self._items = mlist\n\n # We need to tell the view how many rows we have present in our data.\n # For us, at least, it's fairly straightforward, as we have a python list of data,\n # so we can just return the length of that list.\n def rowCount(self, parent = QtCore.QModelIndex()):\n return len(self._items)\n\n # Here, it's a little more complex.\n # data() is where the view asks us for all sorts of information about our data:\n # this can be purely informational (the data itself), as well as all sorts of 'extras'\n # such as how the data should be presented.\n #\n # For the sake of keeping it simple, I'm only going to show you the data, and one presentational\n # aspect.\n #\n # For more information on what kind of data the views can ask for, take a look at:\n # http://doc.trolltech.com/4.6/qabstractitemmodel.html#data\n #\n # Oh, and just to clarify: when it says 'invalid QVariant', it means a null QVariant.\n # i.e. QVariant().\n #\n # 'index' is of type QModelIndex, which actually has a whole host of stuff, but we\n # only really care about the row number for the sake of this tutorial.\n # For more information, see:\n # http://doc.trolltech.com/4.6/qmodelindex.html\n def data(self, index, role = Qt.DisplayRole):\n if role == Qt.DisplayRole:\n # The view is asking for the actual data, so, just return the item it's asking for.\n tr = self._items[index.row()]\n data = (tr.title, tr.artist, tr.album)\n return data\n elif role == Qt.BackgroundRole:\n # Here, it's asking for some background decoration.\n # Let's mix it up a bit: mod the row number to get even or odd, and return different\n # colours depending.\n # (you can, and should, more easily do this using this:\n # http://doc.trolltech.com/4.6/qabstractitemview.html#alternatingRowColors-prop\n # but I deliberately chose to show that you can put your own logic/processing here.)\n #\n # Exercise for the reader: make it print different colours for each row.\n # Implementation is up to you.\n if index.row() % 2 == 0:\n return QtGui.QColor(Qt.gray)\n else:\n return QtGui.QColor(Qt.lightGray)\n else:\n # We don't care about anything else, so make sure to return an empty QVariant.\n return None\n \n \nclass TreeNode(object):\n\tdef __init__(self, parent, row):\n\t\tself.parent = parent\n\t\tself.row = row\n\t\tself.subnodes = self._getChildren()\n\n\tdef _getChildren(self):\n\t\traise NotImplementedError()\t\n\n\tdef append(self, elt):\n\t\tnode = TreeNode(self, elt)\n\t\tself.subnodes.append(node)\n\t\treturn node\n \nclass TreeModel(QAbstractItemModel):\n\tdef __init__(self):\n\t\tQAbstractItemModel.__init__(self)\n\t\tself.rootNodes = self._getRootNodes()\n\n\t# à implémenter par la future classe fille\n\tdef _getRootNodes(self):\n\t\traise NotImplementedError()\n\n\t# cette méthode héritée de QAbstractItemModel doit retourner\n\t# l'indice de l'enregistrement en entrée moyennant le parent (un QModelIndex)\n\t# c.f. paragraph suivant pour plus d'explications.\n\tdef index(self, row, column, parent):\n\t\t# si l'indice du parent est invalide\n\t\tif not parent.isValid():\n\t\t\treturn self.createIndex(row, column, self.rootNodes[row])\n\t\tparentNode = parent.internalPointer()\n\t\treturn self.createIndex(row, column, parentNode.subnodes[row])\n\n\t\t# cette méthode héritée de QAbstractItemModel doit retourner\n\t\t# l'indice du parent de l'indice donné en paramètre\n\t\t# ou un indice invalide (QModelIndex()) si le noeud n'a pas de parent\n\t\t# ou si la requête est incorrecte\n\t\t# c.f. paragraph suivant pour plus d'explications.\n\tdef parent(self, index):\n\t\tif not index.isValid():\n\t\t\treturn QModelIndex()\n\t\t# on récupère l'objet sous-jacent avec la méthode internalPointer de l'indice\n\t\tnode = index.internalPointer()\n\t\tif node.parent is None:\n\t\t\treturn QModelIndex()\n\t\telse:\n\t\t\t# si tout est valide alors on crée l'indice associé pointant vers le parent\n\t\t\treturn self.createIndex(node.parent.row, 0, node.parent)\n\n\tdef reset(self):\n\t\tself.rootNodes = self._getRootNodes()\n\t\tQAbstractItemModel.reset(self)\n\n\tdef rowCount(self, parent):\n\t\tif not parent.isValid():\n\t\t\treturn len(self.rootNodes)\n\t\tnode = parent.internalPointer()\n\t\treturn len(node.subnodes)\n\nclass LibraryItem:\n\tdef __init__(self, icon, ID, label, playcount, rating, burn, rounded_burn, is_separator):\n\t\tself.icon = icon\n\t\tself.ID = ID\n\t\tself.label = label\n\t\tself.playcount = playcount\n\t\tself.rating = rating\n\t\tself.burn = burn\n\t\tself.rounded_burn = rounded_burn\n\t\tself.is_separator = is_separator\n\t\t\n\t\tself.subelements = []\n\t\t\nclass NamedElement(object): # notre structure interne pour gérer les objets\n def __init__(self, name, subelements=[]):\n self.name = name\n\n# notre noeud concret implémentant getChildren\nclass NamedNode(TreeNode):\n def __init__(self, ref, parent, row):\n self.ref = ref\n TreeNode.__init__(self, parent, row)\n\n\t# renvoie la liste des noeuds fils en utilisant la liste subelements de \n\t# notre objet (interne) NamedElement\n def _getChildren(self):\n return [NamedNode(elem, self, index)\n for index, elem in enumerate(self.ref.subelements)]\n \n# et enfin notre modèle avec \nclass NamesModel(TreeModel):\n\tdef __init__(self, rootElements):\n\t\tself.rootElements = rootElements\n\t\tTreeModel.__init__(self)\n\n\tdef _getRootNodes(self):\n\t\treturn [NamedNode(elem, None, index)\n\t\t\tfor index, elem in enumerate(self.rootElements)]\n\n\tdef columnCount(self, parent):\n\t\treturn 2\n\n\t# permet de récupérer les données liées à un indice et un rôle.\n\t# ces données peuvent ainsi varier selon le rôle.\n\tdef data(self, index, role):\n\t\tif not index.isValid():\n\t\t\treturn None\n\t\tnode = index.internalPointer()\n\t\tif role == Qt.DisplayRole:\n\t\t\tif index.column() == 0:\n\t\t\t\treturn node.ref.name.title\n\t\t\telif index.column() == 1:\n\t\t\t\treturn node.ref.name.album\n\t\telif role == Qt.DecorationRole and index.column() == 0:\n\t\t\treturn QtGui.QPixmap('icons/genre.png')\n\t\treturn None\n\n\tdef headerData(self, section, orientation, role):\n\t\tif orientation == Qt.Horizontal and role == Qt.DisplayRole:\n\t\t\tif section == 0:\n\t\t\t\treturn 'Name'\n\t\t\telif section == 1:\n\t\t\t\treturn 'Album'\n\t\t\t\n\t\treturn None\n\t\t\nclass LibraryModel(TreeModel):\n\tdef __init__(self, rootElements=[]):\n\t\tself.rootElements = rootElements\n\t\tTreeModel.__init__(self)\n\n\t\t\n\tdef _getRootNodes(self):\n\t\treturn [NamedNode(elem, None, index)\n\t\t\tfor index, elem in enumerate(self.rootElements)]\n\n\tdef columnCount(self, parent):\n\t\treturn 2\n\n\t# permet de récupérer les données liées à un indice et un rôle.\n\t# ces données peuvent ainsi varier selon le rôle.\n\tdef data(self, index, role):\n\t\tif not index.isValid():\n\t\t\treturn None\n\t\tnode = index.internalPointer()\n\t\tif role == Qt.DisplayRole:\n\t\t\tif index.column() == 0:\n\t\t\t\treturn node.row.label\n\t\t\telif index.column() == 1:\n\t\t\t\treturn node.row.playcount\n\t\telif role == Qt.DecorationRole and index.column() == 0:\n\t\t\treturn node.row.icon\n\t\treturn None\n\n\tdef headerData(self, section, orientation, role):\n\t\tif orientation == Qt.Horizontal and role == Qt.DisplayRole:\n\t\t\tif section == 0:\n\t\t\t\treturn 'Name'\n\t\t\telif section == 1:\n\t\t\t\treturn 'Album'\n\t\t\t\n\t\treturn None\n\t\n\tdef append(self, elt):\n\t\tnode = NamedNode(self, self, elt)\n\t\tself.rootElements.append(node)\n\t\treturn node\n\n\t\t\n\t\t\n\t\t\nclass NodeContainer(object):\n def __init__(self):\n self._subnodes = None\n self._ref2node = {}\n \n #--- Protected\n def _createNode(self, ref, row):\n # This returns a TreeNode instance from ref\n raise NotImplementedError()\n \n def _getChildren(self):\n # This returns a list of ref instances, not TreeNode instances\n raise NotImplementedError()\n \n #--- Public\n def invalidate(self):\n # Invalidates cached data and list of subnodes without resetting ref2node.\n self._subnodes = None\n \n #--- Properties\n @property\n def subnodes(self):\n if self._subnodes is None:\n children = self._getChildren()\n self._subnodes = []\n for index, child in enumerate(children):\n if child in self._ref2node:\n node = self._ref2node[child]\n node.row = index\n else:\n node = self._createNode(child, index)\n self._ref2node[child] = node\n self._subnodes.append(node)\n return self._subnodes\n \n\nclass TreeNode(NodeContainer):\n def __init__(self, model, parent, row):\n NodeContainer.__init__(self)\n self.model = model\n self.parent = parent\n self.row = row\n \n @property\n def index(self):\n return self.model.createIndex(self.row, 0, self)\n \n def append(self, elt):\n\t self.subnodes.append(elt)\n \n\nclass RefNode(TreeNode):\n \"\"\"Node pointing to a reference node.\n \n Use this if your Qt model wraps around a tree model that has iterable nodes.\n \"\"\"\n def __init__(self, model, parent, ref, row):\n TreeNode.__init__(self, model, parent, row)\n self.ref = ref\n \n def _createNode(self, ref, row):\n return RefNode(self.model, self, ref, row)\n \n def _getChildren(self):\n return list(self.ref)\n \n\nclass TreeModel(QAbstractItemModel, NodeContainer):\n def __init__(self):\n QAbstractItemModel.__init__(self)\n NodeContainer.__init__(self)\n self._dummyNodes = set() # dummy nodes' reference have to be kept to avoid segfault\n \n #--- Private\n def _createDummyNode(self, parent, row):\n # In some cases (drag & drop row removal, to be precise), there's a temporary discrepancy\n # between a node's subnodes and what the model think it has. This leads to invalid indexes\n # being queried. Rather than going through complicated row removal crap, it's simpler to\n # just have rows with empty data replacing removed rows for the millisecond that the drag &\n # drop lasts. Override this to return a node of the correct type.\n return TreeNode(self, parent, row)\n \n def _lastIndex(self):\n \"\"\"Index of the very last item in the tree.\n \"\"\"\n currentIndex = QModelIndex()\n rowCount = self.rowCount(currentIndex)\n while rowCount > 0:\n currentIndex = self.index(rowCount-1, 0, currentIndex)\n rowCount = self.rowCount(currentIndex)\n return currentIndex\n \n #--- Overrides\n def index(self, row, column, parent):\n if not self.subnodes:\n return QModelIndex()\n node = parent.internalPointer() if parent.isValid() else self\n try:\n return self.createIndex(row, column, node.subnodes[row])\n except IndexError:\n parentNode = parent.internalPointer() if parent.isValid() else None\n dummy = self._createDummyNode(parentNode, row)\n self._dummyNodes.add(dummy)\n return self.createIndex(row, column, dummy)\n \n def parent(self, index):\n if not index.isValid():\n return QModelIndex()\n node = index.internalPointer()\n if node.parent is None:\n return QModelIndex()\n else:\n return self.createIndex(node.parent.row, 0, node.parent)\n \n def reset(self):\n self.invalidate()\n self._ref2node = {}\n self._dummyNodes = set()\n QAbstractItemModel.reset(self)\n \n def rowCount(self, parent=QModelIndex()):\n node = parent.internalPointer() if parent.isValid() else self\n return len(node.subnodes)\n \n #--- Public\n def findIndex(self, rowPath):\n \"\"\"Returns the QModelIndex at `rowPath`\n \n `rowPath` is a sequence of node rows. For example, [1, 2, 1] is the 2nd child of the\n 3rd child of the 2nd child of the root.\n \"\"\"\n result = QModelIndex()\n for row in rowPath:\n result = self.index(row, 0, result)\n return result\n \n @staticmethod\n def pathForIndex(index):\n reversedPath = []\n while index.isValid():\n reversedPath.append(index.row())\n index = index.parent()\n return list(reversed(reversedPath))\n \n def refreshData(self):\n \"\"\"Updates the data on all nodes, but without having to perform a full reset.\n \n A full reset on a tree makes us lose selection and expansion states. When all we ant to do\n is to refresh the data on the nodes without adding or removing a node, a call on\n dataChanged() is better. But of course, Qt makes our life complicated by asking us topLeft\n and bottomRight indexes. This is a convenience method refreshing the whole tree.\n \"\"\"\n columnCount = self.columnCount()\n topLeft = self.index(0, 0, QModelIndex())\n bottomLeft = self._lastIndex()\n bottomRight = self.sibling(bottomLeft.row(), columnCount-1, bottomLeft)\n self.dataChanged.emit(topLeft, bottomRight)","sub_path":"qt/treework.py","file_name":"treework.py","file_ext":"py","file_size_in_byte":13018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"337235716","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef get_sig(A, f):\n ar = np.zeros(100)\n i = 0\n x = 0\n while i < 100:\n ar[i] = A * np.sin(2 * np.pi * f * x / 1000)\n ar[i] = ar[i] + np.sin(2 * np.pi * (f * 2) * x / 1000) + np.random.uniform(-0.2, 0.2)\n i = i + 1\n x = x + 0.1\n\n return ar\n\n\ndef filter(N, Fs, Fx, Fd, signal):\n h = np.zeros(N) # испульсная характеристика\n h_id = np.zeros(N) # идеальная импульсная характеристика\n w = np.zeros(N) # весовая функция Блекмена\n\n # импульсная характеристика фильтра\n Fc = (Fs + Fx) / (2 * Fd)\n\n for i in range(N):\n if i == 0:\n h_id[i] = 2 * np.pi * Fc\n else:\n h_id[i] = np.sin(2 * i * np.pi * Fc) / (np.pi * i)\n w[i] = 0.42 - 0.5 * np.cos((2 * np.pi * i) / (N - 1)) + 0.08*np.cos((4 * np.pi * i) / (N - 1))\n # w[i] = 1\n h[i] = h_id[i] * w[i]\n\n # нормирование\n s = np.sum(h)\n for i in range(N):\n h[i] = h[i] / s\n\n out_signal = np.zeros(len(signal))\n for i in range(len(signal)):\n out_signal[i] = 0\n for j in range(N): # N - 1\n if i - j >= 0:\n out_signal[i] = out_signal[i] + h[j] * signal[i - j]\n\n return out_signal, h, w, Fc, h_id\n\n\n# функция для преобразования Фурье\ndef fourier_transform(signal):\n frequencies = []\n frequencies = np.fft.rfft(signal, n=None, axis=-1)\n frequencies = np.abs(frequencies)\n\n return frequencies\n\n\ndef main_sin(N, Fs, Fx):\n A = 1\n f = 125\n signal = get_sig(A, f)\n frequencies_signal = fourier_transform(signal)\n\n plt.figure()\n axes = plt.subplot(1, 2, 1, facecolor='k')\n axes.plot(signal, \"g\")\n plt.grid(True, color=\"w\")\n plt.title(\"Исходный сигнал\", loc='center')\n plt.xlabel(\"Время\")\n plt.ylabel(\"Амплитуда\")\n # plt.show()\n\n # plt.figure()\n axes = plt.subplot(1, 2, 2, facecolor='k')\n axes.plot(frequencies_signal, \"g\")\n plt.grid(True, color=\"w\")\n plt.title(\"Спектр исходного сигнала\", loc='center')\n plt.xlabel(\"Частота\")\n # plt.ylabel(\"Амплитуда\")\n plt.show()\n\n Fd = 1000\n (out_signal, h, w, Fc, h_id) = filter(N, Fs, Fx, Fd, signal)\n frequencies_out_signal = fourier_transform(out_signal)\n\n plt.figure()\n axes = plt.subplot(1, 2, 1, facecolor='k')\n axes.plot(h_id, \"g\")\n plt.grid(True, color=\"w\")\n plt.title(\"Идеальная импульсная характеристика\", loc='center')\n plt.xlabel(\"Время\")\n plt.ylabel(\"Амплитуда\")\n # plt.show()\n\n # plt.figure()\n axes = plt.subplot(1, 2, 2, facecolor='k')\n axes.plot(h, \"g\")\n plt.grid(True, color=\"w\")\n plt.title(\"Импульсная характеристика (окно Блекмена)\", loc='center')\n plt.xlabel(\"Время\")\n plt.ylabel(\"Амплитуда\")\n plt.show()\n\n plt.figure()\n axes = plt.subplot(facecolor='k')\n axes.plot(w, \"g\")\n plt.grid(True, color=\"w\")\n plt.title(\"Функция Блекмена\", loc='center')\n # plt.xlabel(\"Время\")\n # plt.ylabel(\"Амплитуда\")\n plt.show()\n\n plt.figure()\n axes = plt.subplot(1, 2, 1, facecolor='k')\n axes.plot(out_signal, \"g\")\n plt.grid(True, color=\"w\")\n plt.title(\"Сигнал после применения фильтра\", loc='center')\n plt.xlabel(\"Время\")\n plt.ylabel(\"Амплитуда\")\n # plt.show()\n\n # plt.figure()\n axes = plt.subplot(1, 2, 2, facecolor='k')\n axes.plot(frequencies_out_signal, \"g\")\n plt.grid(True, color=\"w\")\n plt.title(\"Спектр отфильтрованного сигнала\", loc='center')\n plt.xlabel(\"Частота\")\n # plt.ylabel(\"Амплитуда\")\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"sin.py","file_name":"sin.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"423691573","text":"###\n# Contains utils for model architecture creation, etc.\n###\n\n\n###\n# JQNet1 start\n###\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim import lr_scheduler\n# device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n#from https://github.com/pytorch/examples/blob/master/mnist/main.py\nclass JQNet1(nn.Module):\n def __init__(self, use_gpu):\n super(JQNet1, self).__init__()\n if use_gpu:\n self.conv1 = nn.Conv2d(1, 10, kernel_size=3, padding = 1).cuda()\n self.conv2 = nn.Conv2d(10, 20, kernel_size=3, padding = 1).cuda()\n self.conv3 = nn.Conv2d(20, 40, kernel_size=3, padding = 1).cuda()\n self.conv2_drop = nn.Dropout2d(p = 0.2)\n self.conv3_drop = nn.Dropout2d(p = 0.1)\n\n #assumes a batch size of 50\n self.fc1 = nn.Linear(31360,64).cuda()\n self.fc2 = nn.Linear(64, 25).cuda()\n self.fc3 = nn.Linear(25,2).cuda()\n else:\n self.conv1 = nn.Conv2d(1, 10, kernel_size=3, padding = 1).cpu()\n self.conv2 = nn.Conv2d(10, 20, kernel_size=3, padding = 1).cpu()\n self.conv3 = nn.Conv2d(20, 40, kernel_size=3, padding = 1).cpu()\n self.conv2_drop = nn.Dropout2d(p = 0.2)\n self.conv3_drop = nn.Dropout2d(p = 0.1)\n\n #assumes a batch size of 50\n self.fc1 = nn.Linear(31360,64).cpu()\n self.fc2 = nn.Linear(64, 25).cpu()\n self.fc3 = nn.Linear(25,2).cpu()\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = F.relu(F.max_pool2d(self.conv3_drop(self.conv3(x)), 2))\n \n x = x.view(x.size(0),-1) #clutch line\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n \n \n x = x.view(x.size(0),-1) #clutch line\n x = F.relu(self.fc2(x))\n x = F.dropout(x, training=self.training)\n \n x = self.fc3(x)\n return F.log_softmax(x, dim=1)\n \n###\n# JQNet1 End\n###\n \n \n \n###\n# Net1 Start\n###\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim import lr_scheduler\nimport torch\n\nclass Net1(nn.Module):\n def __init__(self, use_gpu):\n super(Net1, self).__init__()\n device = torch.device('cuda' if use_gpu else 'cpu')\n \n# self.input_size = (32, 32)\n self.input_size = (64, 64)\n \n self.conv1 = nn.Conv2d(1, 10, kernel_size=4).to(device)\n self.maxpool1 = nn.MaxPool2d(2).to(device)\n \n self.conv2 = nn.Conv2d(10, 20, kernel_size=4).to(device)\n self.maxpool2 = nn.MaxPool2d(6).to(device)\n\n # Todo: check input size of this layer\n# self.fc1 = nn.Linear(8000, 100).to(device)\n self.fc1 = nn.Linear(320, 100).to(device)\n\n \n self.batchnorm1 = nn.BatchNorm1d(100).to(device)\n \n self.fc2 = nn.Linear(100, 2).to(device)\n \n # Only use this layer if self.training\n self.conv2_drop = nn.Dropout2d(p = 0.2).to(device)\n\n def forward(self, x):\n x = F.relu(self.maxpool1(self.conv1(x)))\n x = F.relu(self.maxpool2(self.conv2(x)))\n \n# print(x.size())\n # Expand x based on batch size (x.size(0))\n x = x.view(x.size(0), -1)\n# print(x.size())\n x = self.fc1(x)\n \n x = self.batchnorm1(x)\n \n x = self.fc2(x)\n \n if self.training:\n x = self.conv2_drop(x)\n \n return F.log_softmax(x, dim=1)\n\n###\n# Net1 End\n###\n \n \n# ========================================\n# define model structure\n# ========================================\nfrom lib.playground.utee import selector\nfrom lib.playground.mnist import model\nimport inspect\n\ndef create_model_architecture(model_type='mnist', use_gpu = False, **kwargs):\n \"\"\"\n params model_type: the type of model, for now, support mnist and resnet18 \n \"\"\"\n if model_type == 'mnist':\n print('using pretrained mnist model')\n \n # load the model from the playground library\n model_annotation, ds_fetcher, is_imagenet = selector.select('mnist')\n \n # remove last layer\n removed = list(model_annotation.model.children())[:-1]\n \n # add a front layer to account for new input\n # IMPORTANT, we need to update the self.input_dims of the MLP class\n removed = [nn.Linear(img_input_size*img_input_size,28*28), nn.ReLU()] + removed\n \n # formulate the layers\n model_annotation.model=torch.nn.Sequential(*removed)\n \n # add the new fc layer\n model_annotation.model.fc = torch.nn.Linear(256,2).cuda()\n \n # update the self.input_dims of the network\n model_annotation.input_dims = img_input_size * img_input_size \n\n elif model_type == 'resnet18': \n print(\"Transferring resnet18 and retraining with annotations dataset.\") \n model_annotation = models.resnet18(pretrained=True)\n num_params = sum(1 for i in model_annotation.parameters())\n\n # There are 10 layers (model_ft.children()) in resnet18\n # Freezing the first half of resnet18, freezing all params for layers 1-5\n max_layer = 5\n curr_layer = 1\n last_layer = None\n for child in model_annotation.children():\n if curr_layer <= max_layer:\n for param in child.parameters():\n param.requires_grad = False\n last_layer = child\n curr_layer = curr_layer + 1\n else:\n break\n\n # Replace the final fully connected layer to perform binary classification\n num_ftrs = model_annotation.fc.in_features\n model_annotation.fc = nn.Linear(num_ftrs, 2)\n \n elif model_type == 'jq_net1':\n print(\"Creating JQ's net1.\")\n argspec = inspect.getfullargspec(JQNet1).args\n args = {arg : kwargs[arg] for arg in argspec if arg in kwargs}\n model_annotation = JQNet1(use_gpu, **args)\n \n elif model_type == 'net1':\n print('Creating Net1.')\n argspec = inspect.getfullargspec(Net1).args\n args = {arg : kwargs[arg] for arg in argspec if arg in kwargs}\n model_annotation = Net1(use_gpu, **args)\n \n\n # return\n if use_gpu:\n return model_annotation.cuda()\n else:\n return model_annotation.cpu()\n","sub_path":"experiments/cnn/notebooks/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":6483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"518393888","text":"import abc\n\nfrom flask import current_app, jsonify\n\nfrom app import errors\nfrom app.admin.admin_config import ScoreConfig\nfrom app.auth.util import validate_email\nfrom app.exam.exam_config import ExamConfig\nfrom app.models.analysis import *\nfrom app.models.question import QuestionModel\nfrom app.models.user import UserModel\nfrom . import admin, util\nfrom .algorithm import OptimizeAlgorithm\nfrom app.utils.date_and_time import datetime_to_str\n\n\n@admin.route('/score/question', methods=['GET'])\ndef get_score_of_all_questions():\n \"\"\"获取各题目平均分的成绩情况\n\n :return: 所有题目,可直接展示\n \"\"\"\n current_app.logger.info('get_score_of_all_questions')\n\n all_answers = AnalysisModel.objects().fields(question_num=1, score_key=1, score_detail=1).order_by('question_num')\n if all_answers.count() == 0:\n return jsonify(errors.Score_no_data)\n\n mapper = QuestionNumMapper()\n result = __generate_result_from_dict(mapper.map_answers(all_answers), 'questionId')\n return jsonify(errors.success({'result': result}))\n\n\n@admin.route('/score/question/', methods=['GET'])\ndef get_score_of_specific_questions(index):\n \"\"\"获取某题目的成绩情况\n\n :param index: 题号ID\n :return: 该问题的成绩情况\n \"\"\"\n current_app.logger.info('get_score_of_specific_questions ' + index)\n\n cur_question = QuestionModel.objects(index=index).first()\n if not cur_question:\n return jsonify(errors.Score_criteria_not_exist)\n\n all_answers = AnalysisModel.objects(question_num=index).order_by('date')\n if len(all_answers) == 0:\n return jsonify(errors.Score_no_data)\n\n mapper = DateMapper()\n result_by_date = __generate_result_from_dict(mapper.map_answers(all_answers), 'date')\n\n result_all = []\n for answer in all_answers:\n result_all.append(_generate_total_score(answer['score_key'], answer['score_detail']))\n\n return jsonify(errors.success({\n 'resultByDate': result_by_date,\n 'allResult': result_all\n }))\n\n\n@admin.route('/score/user', methods=['GET'])\ndef get_score_of_all_users():\n \"\"\"获取各用户平均分的成绩情况\n\n :return: 所有题目,可直接展示\n \"\"\"\n current_app.logger.info('get_score_of_all_users')\n\n all_answers = AnalysisModel.objects().fields(user=1, score_key=1, score_detail=1)\n if all_answers.count() == 0:\n return jsonify(errors.Score_no_data)\n\n mapper = UserMapper()\n result = __generate_result_from_dict(mapper.map_answers(all_answers), 'username')\n\n # map_answers_by_user() 返回的结果中 key 为 ObjectId,需要改为 email / phone\n # 先把所有用户信息取回本地,相比每一次遍历都连接数据库查询用户信息更快\n all_users = UserModel.objects.fields(id=1, email=1, phone=1)\n user_dict = {}\n for user in all_users:\n user_dict[user['id']] = {'email': user['email'], 'phone': user['phone']}\n\n for item in result:\n cur_user_id = item['username']\n\n if cur_user_id in user_dict:\n cur_user = user_dict[cur_user_id]\n cur_username = cur_user['email'] if cur_user['email'] else cur_user['phone']\n else:\n cur_username = cur_user_id.__str__() + ScoreConfig.DEFAULT_USER_EMAIL\n\n item['username'] = cur_username\n return jsonify(errors.success({'result': result}))\n\n\n@admin.route('/score/user/', methods=['GET'])\ndef get_score_of_specific_users(username):\n \"\"\"获取某用户的成绩情况\n\n :param username: 用户邮箱\n :return: 该用户的成绩情况\n \"\"\"\n current_app.logger.info('get_score_of_specific_users ' + username)\n\n if '@' in username:\n # 邮箱登录\n email = username\n if not validate_email(email):\n return jsonify(errors.Params_error)\n if ScoreConfig.DEFAULT_USER_EMAIL in username:\n # 默认邮箱,通过主键查询\n user_object_id = username[:-16]\n all_answers = AnalysisModel.objects(user=user_object_id).order_by('date')\n else:\n # 真实邮箱\n cur_user = UserModel.objects(email=username).first()\n if not cur_user:\n return jsonify(errors.Score_criteria_not_exist)\n all_answers = AnalysisModel.objects(user=cur_user['id']).\\\n fields(voice_features=0, key_hits=0, detail_hits=0).order_by('date')\n else:\n # 手机号登录\n cur_user = UserModel.objects(phone=username).first()\n if not cur_user:\n return jsonify(errors.Score_criteria_not_exist)\n all_answers = AnalysisModel.objects(user=cur_user['id']). \\\n fields(voice_features=0, key_hits=0, detail_hits=0).order_by('date')\n\n if len(all_answers) == 0:\n return jsonify(errors.Score_no_data)\n\n date_mapper = DateMapper()\n result_by_date = __generate_result_from_dict(date_mapper.map_answers(all_answers), 'date')\n\n question_id_mapper = QuestionNumMapper()\n result_by_qid = __generate_result_from_dict(question_id_mapper.map_answers(all_answers), 'questionId')\n # 按照 q_id 排序\n result_by_qid.sort(key=__sort_by_question_id)\n\n result_all = []\n for answer in result_by_qid:\n result_all.append({'questionId': answer['questionId'], 'totalScore': answer['totalScore']})\n\n return jsonify(errors.success({\n 'resultByDate': result_by_date,\n 'allResult': result_all\n }))\n\n\nclass AbstractMapper(object):\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def map_answers(self, answers):\n \"\"\"\n 将回答 list 按照一定规则对应为 dict\n \"\"\"\n return\n\n\nclass QuestionNumMapper(AbstractMapper):\n\n def map_answers(self, answers):\n \"\"\"将所有回答结果按照题号对应\n\n :param answers: 所有的回答\n :return: key 为题号,value 为相应的 {key, detail} 得分的字典\n \"\"\"\n res = {}\n for answer in answers:\n cur_question_id = answer['question_num']\n if cur_question_id in res:\n res[cur_question_id].append({'key': answer['score_key'], 'detail': answer['score_detail']})\n else:\n res[cur_question_id] = [{'key': answer['score_key'], 'detail': answer['score_detail']}]\n return res\n\n\nclass UserMapper(AbstractMapper):\n\n def map_answers(self, answers):\n \"\"\"将所有回答结果按照用户对应\n\n :param answers: 所有的回答\n :return: key 为用户 ObjectId,value 为相应的 {key, detail} 得分的字典\n \"\"\"\n res = {}\n for answer in answers:\n cur_user = answer['user']\n if cur_user in res:\n res[cur_user].append({'key': answer['score_key'], 'detail': answer['score_detail']})\n else:\n res[cur_user] = [{'key': answer['score_key'], 'detail': answer['score_detail']}]\n return res\n\n\nclass DateMapper(AbstractMapper):\n\n def map_answers(self, answers):\n \"\"\"将所有回答结果按照回答日期对应\n\n :param answers: 所有的回答\n :return: key 为日期,value 为相应的 {key, detail} 得分的字典\n \"\"\"\n res = {}\n for answer in answers:\n cur_date = datetime_to_str(answer['test_start_time'], only_date=True)\n if cur_date in res:\n res[cur_date].append({'key': answer['score_key'], 'detail': answer['score_detail']})\n else:\n res[cur_date] = [{'key': answer['score_key'], 'detail': answer['score_detail']}]\n return res\n\n\ndef __generate_result_from_dict(dict, result_key_name):\n \"\"\"\n\n :param dict: {result_key: {key, detail}]}\n :param result_key_name: 结果集中的 key 值\n :return:\n \"\"\"\n algorithm = OptimizeAlgorithm()\n\n result = []\n for key in dict:\n cur_answers = dict[key]\n detail_scores = [cur_answer['detail'] for cur_answer in cur_answers]\n key_scores = [cur_answer['key'] for cur_answer in cur_answers]\n\n key_score = algorithm.analysis_score(key_scores, ExamConfig.full_score)['mean']\n detail_score = algorithm.analysis_score(detail_scores, ExamConfig.full_score)['mean']\n result.append({\n result_key_name: key,\n 'times': len(cur_answers),\n 'mainScore': format(key_score, ScoreConfig.DEFAULT_NUM_FORMAT),\n 'detailScore': format(detail_score, ScoreConfig.DEFAULT_NUM_FORMAT),\n 'totalScore': _generate_total_score(key_score, detail_score)\n })\n return result\n\n\ndef __sort_by_question_id(array_item):\n return array_item['questionId']\n\n\ndef _generate_total_score(key_score, detail_score):\n return format(key_score * ExamConfig.key_percent + detail_score * ExamConfig.detail_percent,\n ScoreConfig.DEFAULT_NUM_FORMAT)\n","sub_path":"app/admin/views_score.py","file_name":"views_score.py","file_ext":"py","file_size_in_byte":8857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"602628902","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 16 07:53:01 2021\r\n\r\n@author: kevin\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport scipy as sp\r\nfrom scipy.optimize import minimize\r\nimport itertools\r\nimport copy\r\n\r\nimport seaborn as sns\r\ncolor_names = [\"windows blue\", \"red\", \"amber\", \"faded green\"]\r\ncolors = sns.xkcd_palette(color_names)\r\nsns.set_style(\"white\")\r\nsns.set_context(\"talk\")\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib \r\nmatplotlib.rc('xtick', labelsize=35) \r\nmatplotlib.rc('ytick', labelsize=35) \r\n\r\nimport matplotlib as mpl\r\nmpl.rcParams['text.usetex']=True\r\nmpl.rcParams['text.latex.unicode']=True\r\n\r\n# %% Neural fields\r\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n# %% parameters\r\nL = 20\r\nT = 10\r\ntau = 0.1\r\na = 3\r\nJ0 = .1\r\nnoise = 0.01\r\ndt = 0.1\r\nk = 1\r\nlt = int(T/dt)\r\n\r\n# %% settings\r\ndef RR(x,y,R):\r\n JJ = np.zeros((L,L))\r\n for xx in range(L):\r\n for yy in range(L):\r\n JJ[xx,yy] = J0/(2*np.pi*a)**0.5 * np.exp(-((x-xx)**2+(y-yy)**2)/2/a**2)\r\n return np.sum(JJ*R)\r\n\r\n#def NL(x):\r\n# up = x.copy()\r\n# up[np.where(up<0)[0]] = 0\r\n# up = up**2\r\n# return up / (1+k*np.sum(up))\r\ndef NL(x):\r\n nl = 1/(1+np.exp(x))\r\n return nl\r\n\r\n# %% dynamics\r\n \r\nU = np.zeros((L,L,lt))+0.1\r\nR = np.zeros((L,L,lt))+0.1\r\nIn = np.random.randn(L,L,lt)*1. #set better input later\r\n\r\nfor tt in range(0,lt-1):\r\n R[:,:,tt] = NL(U[:,:,tt]) #activation\r\n for xx in range(1,L-1):\r\n for yy in range(1,L-1):\r\n JJ = RR(xx,yy,R[:,:,tt]) #recurrent signal\r\n U[xx,yy,tt+1] = U[xx,yy,tt] + 1/tau*dt*(-U[xx,yy,tt] + JJ + In[xx,yy,tt]) \\\r\n + np.sqrt(dt*np.random.rand())*noise #potential\r\n\r\n\r\n# %% Dynamic mode decomposition\r\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n# %%\r\n\r\n\r\n\r\n","sub_path":"Neural_field.py","file_name":"Neural_field.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"37168945","text":"from SpiderKeeper.app import db, Base\r\n\r\n\r\nclass Project(Base):\r\n \"\"\"\r\n Project爬虫项目ORM类\r\n \"\"\"\r\n __tablename__ = 'sk_project'\r\n project_name = db.Column(db.String(50), unique=True)\r\n applicant = db.Column(db.String(50)) # 申请人\r\n developers = db.Column(db.String(50)) # 项目的开发者\r\n for_project = db.Column(db.String(50)) # 提出需求的项目\r\n project_cate = db.Column(db.String(100)) # 爬虫分类\r\n project_alias = db.Column(db.String(100)) # 项目的备注\r\n is_msd = db.Column(db.String(50)) # 是否是主从分布式爬虫 0 单机爬虫 1 分布式爬虫\r\n\r\n @classmethod\r\n def load_project(cls, project_list):\r\n \"\"\"\r\n 将爬虫项目列表里面的爬虫项目添加进入数据库\r\n :param project_list: 爬虫项目列表\r\n :return:\r\n \"\"\"\r\n for project in project_list:\r\n existed_project = cls.query.filter_by(project_name=project.project_name).first()\r\n if not existed_project:\r\n db.session.add(project)\r\n db.session.commit()\r\n\r\n @classmethod\r\n def find_project_by_id(cls, project_id):\r\n \"\"\"\r\n 根据爬虫项目id查找爬虫项目信息\r\n :param project_id: 爬虫项目id\r\n :return:\r\n \"\"\"\r\n return Project.query.filter_by(id=project_id).first()\r\n\r\n def to_dict(self):\r\n return dict(\r\n project_id=self.id,\r\n project_name=self.project_name,\r\n applicant=self.applicant,\r\n developers=self.developers,\r\n for_project=self.for_project,\r\n project_alias=self.project_alias,\r\n project_cate=self.project_cate,\r\n create_time=str(self.date_created),\r\n is_msd=self.is_msd\r\n )","sub_path":"SpiderKeeper/app/projects/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"427753297","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport pymysql\n\n\nclass BasePipeline(object):\n table_name = 'kugou'\n\n def __init__(self, mysql_uri, mysql_db, mysql_user, mysql_password):\n self.mysql_uri = mysql_uri\n self.mysql_db = mysql_db\n self.mysql_user = mysql_user\n self.mysql_password = mysql_password\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(\n mysql_uri=crawler.settings.get('MYSQL_URI'),\n mysql_db=crawler.settings.get('MONGO_DATABASE', 'myproject'),\n mysql_user=crawler.settings.get('MYSQL_USER'),\n mysql_password=crawler.settings.get('MYSQL_PASSWORD')\n )\n\n def open_spider(self, spider):\n self.connect = pymysql.connect(\n host=self.mysql_uri,\n db=self.mysql_db,\n port=3306,\n user=self.mysql_user,\n passwd=self.mysql_password,\n charset='utf8',\n )\n self.cursor = self.connect.cursor()\n\n def close_spider(self, spider):\n self.connect.close()\n\n\nclass KugouPipeline(BasePipeline):\n\n def process_item(self, item, spider):\n typeclass = item['type']\n if typeclass == 'kugou_list':\n self.cursor.execute(\"\"\"select * from music_list where id = %s\"\"\", item[\"sid\"])\n ret = self.cursor.fetchone()\n if ret:\n self.cursor.execute(\n \"\"\"update music_list set title = %s, image=%s, name=%s, bg_time=%s, type=1 where id=%s \"\"\",\n (item['title'], item['image'], item['name'], item['time'], item['sid']))\n else:\n self.cursor.execute(\n \"\"\"insert into music_list (title, image, name, id, bg_time, type) value (%s, %s, %s, %s, %s, 1)\"\"\",\n (item['title'], item['image'], item['name'], item['sid'], item['time'])) # item里面定义的字段和表字段对应\n # 提交sql语句\n self.connect.commit()\n return item\n elif typeclass == 'kugou_music':\n self.cursor.execute(\"\"\"select * from music_detail where id = %s\"\"\", item[\"audio_id\"])\n ret = self.cursor.fetchone()\n if ret:\n self.cursor.execute(\n \"\"\"update music_detail set name=%s, content=%s, bg_time=%s, sid=%s, type = 1 where id=%s \"\"\",\n # 纯属python操作mysql知识,不熟悉请恶补\n (item['music_name'],\n item['music_data'],\n item['time'],\n item['sid'],\n item['audio_id'])) # item里面定义的字段和表字段对应\n else:\n self.cursor.execute(\n \"\"\"insert into music_detail(name, content, bg_time, sid, id, type)\n value (%s, %s, %s, %s, %s, 1)\"\"\", # 纯属python操作mysql知识,不熟悉请恶补\n (item['music_name'],\n item['music_data'],\n item['time'],\n item['sid'],\n item['audio_id'])) # item里面定义的字段和表字段对应\n # 提交sql语句\n self.connect.commit()\n return item\n else:\n return item\n\n\nclass IndexPipeline(BasePipeline):\n def process_item(self, item, spider):\n typeclass = item['type']\n if typeclass == 'kugou_banner':\n self.cursor.execute(\n \"\"\"insert into music_banner (id, image, bg_time, url, type) value (%s, %s, %s, %s, 1)\"\"\",\n (item['banner_id'], item['image'], item['time'], item['url'])) # item里面定义的字段和表字段对应\n elif typeclass == 'kugou_single':\n self.cursor.execute(\n \"\"\"insert into music_single \n (id, image, bg_time, name, introduction, type) value (%s, %s, %s, %s, %s, 1)\"\"\",\n (item['single_id'], item['image'], item['time'], item['name'], item['introduction'])) # item\n self.connect.commit()\n return item","sub_path":"kugou/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"246867316","text":"from pybamm import exp\n\n\ndef graphite_LGM50_electrolyte_reaction_rate_Chen2020(T, T_inf, E_r, R_g):\n \"\"\"\n Reaction rate for Butler-Volmer reactions between graphite and LiPF6 in EC:DMC.\n References\n ----------\n .. [1] Chang-Hui Chen, Ferran Brosa Planella, Kieran O’Regan, Dominika Gastol, W.\n Dhammika Widanage, and Emma Kendrick. \"Development of Experimental Techniques for\n Parameterization of Multi-scale Lithium-ion Battery Models.\" Submitted for\n publication (2020).\n Parameters\n ----------\n T: :class: `numpy.Array`\n Dimensional temperature\n T_inf: double\n Reference temperature\n E_r: double\n Reaction activation energy\n R_g: double\n The ideal gas constant\n Returns\n -------\n :`numpy.Array`\n Reaction rate\n \"\"\"\n\n m_ref = 6.48e-7\n arrhenius = exp(E_r / R_g * (1 / T_inf - 1 / T))\n\n return m_ref * arrhenius\n","sub_path":"pybamm/input/parameters/lithium-ion/anodes/graphite_Chen2020/graphite_LGM50_electrolyte_reaction_rate_Chen2020.py","file_name":"graphite_LGM50_electrolyte_reaction_rate_Chen2020.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"441375138","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nimport time\nimport numpy as np\nimport sys\nsys.path.append(\"..\")\nimport d2lzh_pytorch as d2l\n\n\n# mnist_train=torchvision.datasets.FashionMNIST(\n# root='./Datasets/FashionMNIST0216',\n# train=True,\n# download=True,\n# transform=transforms.ToTensor()\n# )\n# mnist_test=torchvision.datasets.FashionMNIST(\n# root='./Datasets/FashionMNIST0216',\n# train=False,\n# download=True,\n# transform=transforms.ToTensor()\n# )\n\n\n\n\n#设置批量大小\nbatch_size=256\ntrain_iter,test_iter=d2l.load_data_fashion_mnist(batch_size)\n\n\n#初始化模型参数\nnum_inputs=784\nnum_outputs=10\n\nW=torch.tensor(np.random.normal(0,0.01,\n (num_inputs,num_outputs)),\n dtype=torch.float)\nb=torch.zeros(num_outputs,dtype=torch.float)\nW.requires_grad_(requires_grad=True)\nb.requires_grad_(requires_grad=True)\n\n\n#实现softmax运算\ndef softmax(X):\n X_exp=X.exp()#首先对每个元素做指数运算\n partition=X_exp.sum(dim=1,keepdim=True)#对同一行数据进行求和\n return X_exp/partition #这里使用了广播机制\n\n\n#定义模型\ndef net(X):\n return softmax(torch.mm(X.view((-1,num_inputs)),W)+b)\n\n\n#定义损失函数\ndef cross_entropy(y_hat,y):\n return -torch.log(y_hat.gather(1,y.view(-1,1)))\n\n\n#计算分类准确率\ndef accuracy(y_hat,y):\n return (y_hat.argmax(dim=1)==y).float().mean().item()\n\n\n#训练模型\nnum_epochs,lr=5,0.1\nd2l.train_ch3(net, train_iter, test_iter,\n cross_entropy, num_epochs,\n batch_size,\n [W, b], lr)\n\nif __name__=='__main__':\n\n # print(type(mnist_train))\n # print(len(mnist_train), len(mnist_test))\n # print(d2l.evaluate_accuracy(test_iter,net))\n\n X,y=iter(test_iter).next()\n\n true_labels=d2l.get_fashion_mnist_labels(y.numpy())\n pred_labels=d2l.get_fashion_mnist_labels(net(X).argmax(dim=1).numpy())\n titles = [true + '\\n' + pred for true, pred in zip(true_labels, pred_labels)]\n\n d2l.show_fashion_mnist(X[0:9], titles[0:9])\n\n","sub_path":"softmax.py","file_name":"softmax.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"537382371","text":"\"\"\"A video player class.\"\"\"\n\nfrom .video_library import VideoLibrary\nfrom random import choice\n\nplaying = False\nvideo_playing = None\npaused = False\n\nplaylists = {}\n\nclass VideoPlayer:\n \"\"\"A class used to represent a Video Player.\"\"\"\n\n def __init__(self):\n self._video_library = VideoLibrary()\n\n def number_of_videos(self):\n num_videos = len(self._video_library.get_all_videos())\n print(f\"{num_videos} videos in the library\")\n\n def show_all_videos(self):\n all_videos = self._video_library.get_all_videos()\n print(\"Here's a list of all available videos:\")\n all_videos.sort(key=lambda x: x.title)\n for video in all_videos:\n tag = str(video.tags).strip(\"()\")\n print(f\"{video.title} ({video.video_id}) [{tag}]\")\n\n def play_video(self, video_id):\n \"\"\"Plays the respective video.\"\"\"\n all_videos = self._video_library.get_all_videos()\n video_ids = []\n for video in all_videos:\n video_ids.append(video.video_id)\n\n if video_id in video_ids:\n\n global playing\n global video_playing\n global paused\n\n if playing:\n print(f\"Stopping video: {video_playing.title}\")\n\n playing = True\n video_playing = self._video_library.get_video(video_id)\n\n print(f\"Playing Video: {video_playing.title}\", end=\"\")\n\n paused = False\n\n else:\n print(\"Cannot play video: Video does not exist\")\n\n\n def stop_video(self):\n \"\"\"Stops the current video.\"\"\"\n\n global playing\n global video_playing\n global paused\n\n if playing:\n print(f\"Stopping video: {video_playing.title}\")\n playing = False\n video_playing = None\n else:\n print(\"Cannot stop video: No video is currently playing\")\n\n\n def play_random_video(self):\n \"\"\"Plays a random video from the video library.\"\"\"\n\n all_videos = self._video_library.get_all_videos()\n \n random_video = choice(all_videos)\n\n global playing\n global video_playing\n global paused\n\n if playing:\n print(f\"Stopping video: {video_playing.title}\")\n else:\n playing = True\n\n print(f\"Playing video: {random_video.title}\")\n video_playing = random_video\n\n paused = False\n\n def pause_video(self):\n \"\"\"Pauses the current video.\"\"\"\n\n global playing\n global video_playing\n global paused\n\n if playing and not paused:\n print(f\"Pausing video: {video_playing.title}\")\n paused = True\n elif playing and paused:\n print(f\"Video already paused: {video_playing.title}\")\n elif not playing:\n print(\"Cannot pause video: No video is currently playing\")\n\n def continue_video(self):\n \"\"\"Resumes playing the current video.\"\"\"\n\n global playing\n global video_playing\n global paused\n\n if paused:\n print(f\"Continuing video: {video_playing.title}\")\n paused = False\n elif playing and not paused:\n print(\"Cannot continue video: Video is not paused\")\n elif not playing:\n print(\"Cannot continue video: No video is currently playing\")\n\n def show_playing(self):\n \"\"\"Displays video currently playing.\"\"\"\n\n global playing\n global video_playing\n global paused\n\n if playing:\n tag = str(video_playing.tags).strip(\"()\")\n print(f\"Currently playing: {video_playing.title} ({video_playing.video_id}) [{tag}]\", end=\" \")\n \n if paused:\n print(\"- Paused\")\n else:\n print(\"No video is currently playing\")\n\n def create_playlist(self, playlist_name):\n \"\"\"Creates a playlist with a given name.\"\"\"\n\n global playlists\n\n already_exists = False\n\n for playlist in playlists:\n if playlist_name.lower() == playlist.lower():\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n already_exists = True\n break\n\n if not already_exists:\n playlists[playlist_name] = []\n print(f\"Successfully created new playlist: {playlist_name}\")\n\n def add_to_playlist(self, playlist_name, video_id):\n \"\"\"Adds a video to a playlist with a given name.\"\"\"\n\n global playlists\n\n playlist_exists = False\n video_exists = False\n\n all_videos = self._video_library.get_all_videos()\n videos = []\n for video in all_videos:\n videos.append(video.video_id)\n\n for playlist in playlists:\n\n if playlist_name.lower() == playlist.lower():\n\n if video_id in videos:\n\n video = self._video_library.get_video(video_id)\n\n if video in playlists[playlist]:\n print(f\"Cannot add video to {playlist}: Video already added\")\n else: \n playlists[playlist].append(video)\n print(f\"Added video to {playlist}: {video.title}\")\n video_exists = True\n\n else:\n video_exists = False\n\n playlist_exists = True\n return\n\n if not playlist_exists:\n print(f\"Cannot add video to {playlist_name}: Playlist does not exist\")\n return\n\n if not video_exits:\n print(f\"Cannot add video to {playlist_name}: Video does not exist\")\n\n\n def show_all_playlists(self):\n \"\"\"Display all playlists.\"\"\"\n\n global playlists\n\n if playlists:\n print(\"Showing all playlists:\")\n for playlist in playlists:\n print(playlist)\n else:\n print(\"No playlists exist yet\")\n\n def show_playlist(self, playlist_name):\n \"\"\"Display all videos in a playlist with a given name.\"\"\"\n\n playlist_exists = False\n\n for playlist in playlists:\n if playlist_name.lower() == playlist.lower():\n print(f\"Showing playlist: {playlist}\")\n \n if len(playlists[playlist]) == 0:\n print(\"No videos here yet\")\n else:\n for video in playlists[playlist]:\n tag = str(video.tags).strip(\"()\")\n print(f\"{video.title} ({video.video_id}) [{tag}]\")\n \n playlist_exists = True\n\n if not playlist_exists:\n print(f\"Cannot show {playlist_name}: Playlist does not exist\")\n\n\n def remove_from_playlist(self, playlist_name, video_id):\n \"\"\"Removes a video to a playlist with a given name.\"\"\"\n\n playlist_exists = False\n\n all_videos = self._video_library.get_all_videos()\n videos = []\n for video in all_videos:\n videos.append(video.video_id)\n \n if video_id in videos:\n\n video = self._video_library.get_video(video_id)\n\n for playlist in playlists:\n if playlist_name.lower() == playlist.lower():\n if video in playlists[playlist]:\n playlists[playlist].remove(video)\n print(f\"Removed video from {playlist}: {video.title}\")\n else:\n print(f\"Cannot remove video from {playlist}: Video does not exist\")\n\n playlist_exists = True\n\n if not playlist_exists:\n print(f\"Cannot remove video from {playlist_name}: Playlist does not exist\")\n return\n\n else:\n print(f\"Cannot remove video from {playlist_name}: Video does not exist\")\n\n def clear_playlist(self, playlist_name):\n \"\"\"Removes all videos from a playlist with a given name.\"\"\"\n\n playlist_exists = False\n\n for playlist in playlists:\n if playlist_name.lower() == playlist.lower():\n playlists[playlist] = []\n print(f\"Successfully removed all videos from {playlist}\")\n playlist_exists = True\n \n if not playlist_name:\n print(f\"Cannot clear playlist {playlist_name}: Playlist does not exist\")\n\n\n def delete_playlist(self, playlist_name):\n \"\"\"Deletes a playlist with a given name.\"\"\"\n\n playlist_exists = False\n\n for playlist in playlists:\n if playlist_name.lower() == playlist.lower():\n playlists.pop(playlist)\n print(f\"Deleted playlist: my_playlist: {playlist}\")\n playlist_exists = True\n \n if not playlist_name:\n print(f\"Cannot delete playlist {playlist_name}: Playlist does not exist\")\n \n\n def search_videos(self, search_term):\n \"\"\"Display all the videos whose titles contain the search_term.\n\n Args:\n search_term: The query to be used in search.\n \"\"\"\n print(\"search_videos needs implementation\")\n\n def search_videos_tag(self, video_tag):\n \"\"\"Display all videos whose tags contains the provided tag.\n\n Args:\n video_tag: The video tag to be used in search.\n \"\"\"\n print(\"search_videos_tag needs implementation\")\n\n def flag_video(self, video_id, flag_reason=\"\"):\n \"\"\"Mark a video as flagged.\n\n Args:\n video_id: The video_id to be flagged.\n flag_reason: Reason for flagging the video.\n \"\"\"\n print(\"flag_video needs implementation\")\n\n def allow_video(self, video_id):\n \"\"\"Removes a flag from a video.\n\n Args:\n video_id: The video_id to be allowed again.\n \"\"\"\n print(\"allow_video needs implementation\")\n","sub_path":"python/src/video_player.py","file_name":"video_player.py","file_ext":"py","file_size_in_byte":9822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"569738072","text":"# coding: utf-8\n\nfrom PIL import Image\nimport pygame\n\n\nclass Tela:\n\n width = 800\n height = 600\n\n\nclass Cenario():\n\n def __init__(self, local, protagonista, posicao=(0, 0)):\n self.local = local\n self.pos = [posicao[0], posicao[1]]\n self.mov = [0, 0]\n\n img = Image.open(local)\n self.size = img.size\n self.limites = []\n\n self.protagonista = protagonista\n\n if self.pos[1] > 0:\n self.limites.append(self.pos[1])\n else:\n self.limites.append(0)\n if self.size[0] >= Tela.width:\n self.limites.append(Tela.width)\n else:\n self.limites.append(self.size[0] + self.pos[0])\n if self.size[1] >= Tela.height:\n self.limites.append(Tela.height)\n else:\n self.limites.append(self.size[1] + self.pos[1])\n if self.pos[0] > 0:\n self.limites.append(self.pos[0])\n else:\n self.limites.append(0)\n\n def setPosicao(self, local='', pos=(0, 0)):\n if local == 'up':\n self.pos[0] = Tela.width / 2 - self.size[0] / 2\n self.pos[1] = 0\n elif local == 'left':\n self.pos[0] = 0\n self.pos[1] = Tela.height / 2 - self.size[1] / 2\n elif local == 'center':\n self.pos[0] = Tela.width / 2 - self.size[0] / 2\n self.pos[1] = Tela.height / 2 - self.size[1] / 2\n elif local == 'right':\n self.pos[0] = Tela.width - self.size[0]\n self.pos[1] = Tela.height / 2 - self.size[1] / 2\n elif local == 'down':\n self.pos[0] = Tela.width / 2 - self.size[0] / 2\n self.pos[1] = Tela.height - self.size[1]\n else:\n self.pos[0] = pos[0]\n self.pos[1] = pos[1]\n\n def moveCenario(self):\n if self.size[0] > Tela.width:\n if Tela.width - self.size[0] < self.pos[0] + self.mov[0] < 0:\n self.pos[0] += self.mov[0]\n else:\n self.protagonista.mov[0] *= 2\n else:\n self.protagonista.mov[0] *= 2\n if self.size[1] > Tela.height:\n if Tela.height - self.size[1] < self.pos[1] + self.mov[1] < 0:\n self.pos[1] += self.mov[1]\n else:\n self.protagonista.mov[1] *= 2\n else:\n self.protagonista.mov[1] *= 2\n\n self.mov[0] = 0\n self.mov[1] = 0\n\n\nclass Protagonista(pygame.sprite.Sprite):\n\n def __init__(self, local, pos):\n pygame.sprite.Sprite.__init__(self)\n self.local = local\n self.velocidade = 10\n self.pos = [pos[0], pos[1]]\n self.mov = [0, 0]\n self.sprite = [0, 0]\n self.rect = pygame.Rect(self.pos[0], self.pos[1], 32, 32)\n\n def spriteSheet(self):\n sprite_sheet = pygame.image.load(self.local)\n image = pygame.Surface([32, 32])\n image.set_colorkey((0, 0, 0), pygame.RLEACCEL)\n image.blit(sprite_sheet, (self.sprite[0], self.sprite[1]))\n return image\n\n def moveProtagonista(self, limites):\n if self.pos[0] + self.mov[0] > limites[3] and self.pos[0] + self.mov[0] + 32 < limites[1]:\n self.pos[0] += self.mov[0]\n if self.pos[1] + self.mov[1] > limites[0] and self.pos[1] + self.mov[1] + 32 < limites[2]:\n self.pos[1] += self.mov[1]\n\n self.rect = pygame.Rect(self.pos[0], self.pos[1], 32, 32)\n self.mov[0] = 0\n self.mov[1] = 0\n\n\nclass Colide():\n def __init__(self):\n self.colisao = []\n self.npc = []\n self.porta = []\n\n arq = open('img/cenarios/fase1/mapa.txt', 'r')\n for i in arq:\n i = i.split()\n if i[2] == 'c':\n self.colisao.append(pygame.Rect(int(i[0]), int(i[1]), 16, 16))\n if i[2] == 'f':\n self.npc.append(pygame.Rect(int(i[0]), int(i[1]), 16, 16))\n if i[2] == 'e':\n self.porta.append(pygame.Rect(int(i[0]), int(i[1]), 16, 16))\n arq.close()\n","sub_path":"map_editor/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"521676058","text":"import sys\nimport os\nimport subprocess\n\nfrom PyQt5 import QtCore\nfrom PyQt5.QtWidgets import QApplication, QWidget, QMainWindow\nfrom PyQt5 import uic\n\nfrom datetime import datetime, timedelta, date\n\nfrom timscript import Tim\n\ntitle = \"Timmy\"\nMainUI = os.path.dirname(os.path.realpath(__file__)) + \"/main.ui\" \n\nUi_MainWindow, QtBaseClass = uic.loadUiType(MainUI)\nclass MainForm(QMainWindow,Ui_MainWindow): \n def __init__(self): \n QMainWindow.__init__(self)\n Ui_MainWindow.__init__(self)\n self.setupUi(self)\n\n self.setWindowTitle(title)\n\n self.tim = Tim()\n\n self.init()\n self.initSignals()\n\n if self.tim.get_config_autostart():\n self.startWork()\n\n def closeEvent(self, event):\n self.stopWork()\n event.accept()\n\n def init(self):\n self.chbAutoStart.setChecked(self.tim.get_config_autostart())\n\n self.cmbType.addItem('Commission', 'artwork:comission')\n self.cmbType.addItem('Request', 'artwork:request')\n self.cmbType.addItem('ArtTrade', 'artwork:arttrade')\n self.cmbType.addItem('Private', 'artwork:private')\n self.cmbType.addItem('Work', 'work')\n\n self.cmbEstimateUnit.addItem('Hours', 'hours')\n self.cmbEstimateUnit.addItem('Minute', 'minutes')\n\n self.currentTimName = self.tim.current_work()\n\n if self.currentTimName == \"\":\n self.currentTimName = self.tim.last_work()\n\n if self.currentTimName:\n names = self.currentTimName.split(':')\n if len(names) >= 2:\n if names[0] == 'artwork':\n self.currentType = names[1]\n index = self.cmbType.findData(self.currentType)\n if ( index != -1 ):\n self.cmbType.setCurrentIndex(index)\n\n if len(names) == 3:\n self.txtProject.setText(names[2])\n elif len(names) > 3:\n self.txtProject.setText( \":\".join(names[2:-1]) )\n\n self.currentProjectName = self.txtProject.text().lower()\n self.currentType = self.cmbType.currentData().lower()\n self.currentEstimateUnit = self.cmbEstimateUnit.itemData(0)\n self.currentEstimate = 0\n\n self.updateUI()\n\n\n def initSignals(self):\n self.chbAutoStart.stateChanged.connect(self.changeAutoStart)\n self.txtProject.textChanged.connect(self.changeProjectName)\n self.cmbType.activated.connect(self.changeType)\n self.btnStartStop.clicked.connect(self.clickedStartStop)\n self.btnBreak.clicked.connect(self.clickedBreak)\n self.cmbEstimateUnit.activated.connect(self.changeEstimateUnit)\n self.spbEstimateValue.valueChanged.connect(self.changeEstimate)\n \n def updateUIButtons(self):\n self.btnBreak.setEnabled(self.isWorking() and not self.isBreak() and self.valid_time())\n self.btnStartStop.setEnabled(self.isWorking() or self.isBreak() or ((self.isNotWorking() or self.isBreak()) and self.currentProjectName != \"\") and self.valid_time())\n\n if self.isWorking():\n self.btnStartStop.setText(\"Stop\")\n else:\n self.btnStartStop.setText(\"Start\")\n\n def updateUI(self):\n self.cmbType.setEnabled(self.isNotWorking() and self.currentProjectName != \"\")\n self.txtProject.setEnabled(self.isNotWorking())\n\n self.updateUIButtons()\n \n if self.currentTimName != self.timBreakName() and self.currentProjectName:\n diff = self.tim.diff(self.currentTimName)\n hours, remainder = divmod(diff.seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n diff_str = '{:02}:{:02}:{:02}'.format(int(hours), int(minutes), int(seconds))\n total_time_str = self.tim.total_time_str(self.currentTimName)\n self.lblStatus.setText(\"{0} ({1})\".format(diff_str, total_time_str))\n\n def updateWork(self):\n self.currentProjectName = self.txtProject.text().lower()\n self.currentType = self.cmbType.currentData().lower()\n\n self.updateUI()\n\n\n def clickedStartStop(self):\n if self.valid_time():\n self.currentTimName = self.timName()\n self.startStopWorkToggle()\n \n\n def clickedBreak(self):\n if self.valid_time():\n self.currentTimName = self.timBreakName()\n self.startStopWorkToggle()\n \n\n def changeProjectName(self, str):\n self.currentProjectName = self.txtProject.text().lower()\n self.currentTimName = self.timName()\n\n if self.currentProjectName != \"\":\n self.cmbType.setEnabled(True)\n else:\n self.cmbType.setEnabled(False)\n\n self.updateUIButtons()\n\n def changeType(self,index):\n self.currentType = self.cmbType.itemData(index).lower()\n self.currentTimName = self.timName()\n self.updateUIButtons()\n\n def changeAutoStart(self, state):\n if state == QtCore.Qt.Checked:\n self.tim.set_config_autostart(True)\n else:\n self.tim.set_config_autostart(False)\n\n \n def changeEstimateUnit(self,index):\n self.currentEstimateUnit = self.cmbEstimateUnit.itemData(index)\n self.updateEstimate()\n \n\n def changeEstimate(self, value):\n self.currentEstimate = value\n self.updateEstimate()\n\n def updateEstimate(self):\n hours = 0\n minutes = 0\n seconds = 0\n\n if self.currentEstimateUnit == \"hours\":\n hours = self.currentEstimate\n elif self.currentEstimateUnit == \"minutes\":\n minutes = self.currentEstimate\n \n name = self.timName()\n if name:\n estimate = '{:02}:{:02}:{:02}'.format(int(hours), int(minutes), int(seconds))\n self.tim.set_estimate(name, estimate)\n\n dt_estimate = timedelta(hours=hours, minutes=minutes, seconds=seconds)\n dt_time = self.tim.total_time(name)\n\n print(dt_time.total_seconds())\n print(dt_estimate.total_seconds())\n\n self.pgbProgress.setMaximum(dt_estimate.total_seconds())\n self.pgbProgress.setValue(dt_time.total_seconds())\n\n def timName(self):\n if self.currentProjectName == \"\":\n return ''\n\n return self.currentType + \":\" + self.currentProjectName\n \n def timBreakName(self):\n return 'break'\n\n def isWorking(self):\n return self.tim.is_working() and self.currentTimName != self.timBreakName()\n\n def isBreak(self):\n return self.tim.is_working() and self.currentTimName == self.timBreakName()\n \n def isNotWorking(self):\n return not self.isWorking()\n\n def startStopWorkToggle(self):\n name = self.tim.current_work()\n time = self.tim.to_datetime(self.txtAddTime.text().lower())\n\n if self.currentTimName:\n if not name:\n self.tim.begin(self.currentTimName, time)\n else:\n if self.currentTimName != name:\n self.tim.switch(self.currentTimName, time)\n else:\n self.tim.end(time)\n self.currentTimName = \"\"\n\n self.txtAddTime.clear()\n\n self.updateWork()\n \n def valid_time(self):\n time = self.tim.parse_isotime(self.tim.to_datetime(self.txtAddTime.text().lower()))\n current_time = self.tim.current_work_start_time()\n return current_time is None or time >= current_time\n\n def startWork(self):\n time = self.tim.to_datetime(self.txtAddTime.text().lower())\n\n if self.currentTimName:\n if self.isNotWorking() and not self.isBreak():\n self.tim.begin(self.currentTimName, time)\n elif self.isBreak():\n time_datetime = self.tim.parse_isotime(time)\n current_time = self.tim.current_work_start_time()\n if time_datetime >= current_time:\n self.tim.switch(self.currentTimName, time)\n\n self.txtAddTime.clear()\n\n self.updateWork()\n\n def stopWork(self):\n time = self.tim.to_datetime(self.txtAddTime.text().lower())\n\n if self.currentTimName:\n if self.isWorking() or self.isBreak():\n time_datetime = self.tim.parse_isotime(time)\n current_time = self.tim.current_work_start_time()\n if time_datetime >= current_time:\n self.tim.end(time)\n self.currentTimName = \"\"\n\n self.txtAddTime.clear()\n\n self.updateWork()\n \ndef main():\n app = QApplication(sys.argv)\n w = MainForm()\n w.show()\n sys.exit(app.exec_())\n\nif __name__ == '__main__':\n main()","sub_path":"tim/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":8678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"58093183","text":"# # python -m pip install h5py\nfrom __future__ import print_function\nimport numpy as np\n# 相同网络结果都会不同\nnp.random.seed(1337) # for reproducibility 用于指定随机数生成时所用算法开始的整数值,如果使用相同的seed()值,则每次生成的随即数都相同\n\nfrom PIL import Image\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\nfrom keras import backend as K\n\n'''\nOlivetti Faces是纽约大学的一个比较小的人脸库,由40个人的400张图片构成,即每个人的人脸图片为10张。每张图片的灰度级为8位,每个像素的灰度大小位于0-255之间。整张图片大小是1190 × 942,一共有20 × 20张照片。那么每张照片的大小就是(1190 / 20)× (942 / 20)= 57 × 47 。\n'''\n\n# There are 40 different classes\nnb_classes = 40 # 40个类别\n# TODO\nepochs = 40 # 进行40轮次训 迭代次数\n# 这个40个数据是怎么取的\n# 首要关注我跟我自己的像, 如果带来我跟别人也像,才去关注我跟别人的不像,一个是1/40 一个是39/40\n# 理想情况下,我就是跟别人不一样的\n# 如果数据足够充裕,可以去随机选择,这样既能把握自己的像,也跟关照到跟别人的不像\n\nbatch_size = 40 # 每次迭代训练使用40个样本 批量处理大小 每个网络里面用40个数据,相当于训练8个网络\n\n# input image dimensions\nimg_rows, img_cols = 57, 47\n# number of convolutional filters to use\nnb_filters1, nb_filters2 = 5, 10 # 卷积核的数目(即输出的维度)\n# size of pooling area for max pooling\nnb_pool = 2\n# convolution kernel size\nnb_conv = 3 # 单个整数或由两个整数构成的list/tuple,卷积核的宽度和长度。如为单个整数,则表示在各个空间维度的相同长度。\n\n\n# 加载数据\ndef load_data(dataset_path):\n img = Image.open(dataset_path)\n # 除以255 对原数据做归一化,使数据尽可能的准确\n img_ndarray = np.asarray(img, dtype='float64') / 255 # asarray,将数据转化为np.ndarray,但使用原内存\n # 400 pictures, size: 57*47 = 2679\n faces = np.empty((400, 2679))\n # 二维数据展开为一维的\n # 顺序也是一种随机结果\n for row in range(20):\n for column in range(20):\n faces[row * 20 + column] = np.ndarray.flatten(\n img_ndarray[row * 57: (row + 1) * 57, column * 47: (column + 1) * 47])\n # flatten将多维数组降为一维\n\n label = np.empty(400)\n for i in range(40):\n label[i * 10: i * 10 + 10] = i\n label = label.astype(np.int)\n\n # train:320,valid:40,test:40\n train_data = np.empty((320, 2679))\n train_label = np.empty(320)\n valid_data = np.empty((40, 2679))\n valid_label = np.empty(40)\n test_data = np.empty((40, 2679))\n test_label = np.empty(40)\n # 打标签\n for i in range(40):\n train_data[i * 8: i * 8 + 8] = faces[i * 10: i * 10 + 8] # 训练集中的数据\n train_label[i * 8: i * 8 + 8] = label[i * 10: i * 10 + 8] # 训练集对应的标签\n valid_data[i] = faces[i * 10 + 8] # 验证集中的数据\n valid_label[i] = label[i * 10 + 8] # 验证集对应的标签\n test_data[i] = faces[i * 10 + 9]\n test_label[i] = label[i * 10 + 9]\n\n # 避免数据过大,从原来的64转到32位,考虑存储误差\n train_data = train_data.astype('float32')\n valid_data = valid_data.astype('float32')\n test_data = test_data.astype('float32')\n # 组合返回一个元组\n rval = [(train_data, train_label), (valid_data, valid_label), (test_data, test_label)]\n return rval\n\n\n# 设置网络层数\ndef set_model(lr=0.005, decay=1e-6, momentum=0.9):\n # 类似于平台,初始化\n model = Sequential()\n # 图片数据存储版本不一致,通道不同,区别对待\n # `'channels_first'` or `'channels_last'`\n # 用几个卷积核代表几个特征,特征多就用更多的卷积核\n # 局部的变化更明显,卷积核用更小的 ,1*1 就相当于关注图片的亮暗程度, 偏选奇数\n # 能够识别表情了,但是可能会跟其他图片的表情一样造成干扰\n if K.image_data_format() == 'channels_first':\n model.add(Conv2D(5, kernel_size=(3, 3), input_shape=(1, img_rows, img_cols)))\n else:\n model.add(Conv2D(5, kernel_size=(3, 3), input_shape=(img_rows, img_cols, 1)))\n\n # relu 不像就丢弃(0),tanh 不像就投反对票(负),sigmoid不像时还是正数,只是比较小\n # 但是tanh也会引入梯度消失问题\n model.add(Activation('tanh')) # relu sigmoid tanh\n # 卷积核越多越好,能提取更多的特征\n model.add(MaxPooling2D(pool_size=(2, 2)))\n # 卷积核数目通常越来越多\n model.add(Conv2D(10, kernel_size=(3, 3)))\n model.add(Activation('tanh'))\n # 池化层不宜多大,2*2 就丢掉3/4的数据了\n # 把最重要的东西的周边都扔掉了,眼睛只占用几个像素,做池化的话,能用来识别的像素就更少了\n # 如果现在拿到的人脸图片很大, 需要池化来集中特征\n model.add(MaxPooling2D(pool_size=(2, 2)))\n # 卷积层就做dropout\n # 一般在0.25以内\n # model.add(Dropout(0.25))\n model.add(Flatten())\n # todo\n model.add(Dense(1000)) # Full connection 128\n # 当前参数不多,所以dropout越小越好\n # 并且当前网络只有8个,dropout越小越好\n model.add(Activation('tanh'))\n # model.add(Dropout(0.5))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n # 梯度下降的更新方式\n sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)\n # 误差/损失函数的择取 ,一般是 1/2(o-t)^2\n model.compile(loss='categorical_crossentropy', optimizer=sgd)\n return model\n\n\n# 训练网络\ndef train_model(model, X_train, Y_train, X_val, Y_val):\n model.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs,\n verbose=1, validation_data=(X_val, Y_val))\n model.save_weights('model_weights.h5', overwrite=True)\n return model\n\n\n# 测试网络\ndef test_model(model, X, Y):\n model.load_weights('model_weights.h5')\n score = model.evaluate(X, Y, verbose=0)\n return score\n\n\nif __name__ == '__main__':\n # the data, shuffled and split between tran and test sets\n (X_train, y_train), (X_val, y_val), (X_test, y_test) = load_data('olivettifaces.gif')\n\n if K.image_data_format() == 'channels_first':\n X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)\n X_val = X_val.reshape(X_val.shape[0], 1, img_rows, img_cols)\n X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)\n X_val = X_val.reshape(X_val.shape[0], img_rows, img_cols, 1)\n X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1) # 1 为图像像素深度\n\n print('X_train shape:', X_train.shape)\n print(X_train.shape[0], 'train samples')\n print(X_val.shape[0], 'validate samples')\n print(X_test.shape[0], 'test samples')\n\n # 标签转向量,第nb_classes为1,其他都是0\n # convert class vectors to binary class matrices\n Y_train = np_utils.to_categorical(y_train, nb_classes)\n Y_val = np_utils.to_categorical(y_val, nb_classes)\n Y_test = np_utils.to_categorical(y_test, nb_classes)\n\n model = set_model()\n train_model(model, X_train, Y_train, X_val, Y_val)\n score = test_model(model, X_test, Y_test)\n\n model.load_weights('model_weights.h5')\n classes = model.predict_classes(X_test, verbose=0)\n # 求平均值\n test_accuracy = np.mean(np.equal(y_test, classes))\n print(\"accuarcy:\", test_accuracy)\n for i in range(0, 40):\n if y_test[i] != classes[i]:\n print(y_test[i], '被错误分成', classes[i]);\n","sub_path":"demo/refer.py","file_name":"refer.py","file_ext":"py","file_size_in_byte":8107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"517896829","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 7 11:32:22 2017\n@author: lbrein\n --- 与tick 配套对比计算的 kline\n > 1m的 K线 模拟tick预测\n > 定期执行,用于筛选交易对\n\"\"\"\n\nfrom com.base.public import public, logger\nfrom com.object.obj_entity import future_baseInfo\nimport talib as ta\nimport pandas as pd\nimport numpy as np\nimport time\nfrom multiprocessing import Pool, Manager\nfrom com.train.train_future_singleExpect import train_future_singleExpect\nfrom com.base.stat_fun import fisher\n\n\n# 回归方法\nclass train_future_macd(train_future_singleExpect):\n \"\"\"\n\n \"\"\"\n iniAmount = 250000 # 单边50万\n csvList = [\n \"MA_15_2.0_15_3_0.5_1.2\",\n \"JM_30_2.0_15_3_0.5_1.2\",\n \"JD_30_2.0_15_3_0.5_1.2\",\n \"RB_30_2.0_15_3_0.5_1.2\",\n \"MA_30_2.0_30_3_0.5_1.2\",\n \"JM_30_2.0_30_3_0.5_1.2\",\n \"JD_30_2.0_30_3_0.5_1.2\",\n \"RB_30_2.0_30_3_0.5_1.2\",\n ]\n\n def __init__(self):\n # 费率和滑点\n self.saveDetail = True # 是否保存明细\n self.isSimTickUse = False # 是否使用1分钟模拟tick测试,否则直接使用kline回测\n self.topUse = False\n self.isEmptyUse = False\n self.baseInfo = {}\n\n self.periodList = [15, 30, 45, 60] # 窗体参数\n self.scaleList = [2.0]\n\n self.shiftScale = 0.527 # 滑点模拟系数\n self.processCount = 6\n # k线时间\n # self.klineTypeList = ['5m']\n self.klineTypeList = ['15m', '30m', '60m']\n\n self.widthDeltaLineList = [0.25, 0.5, 0.75]\n\n self.pointLineList = [1.2, 1.5, 2.0]\n self.pointLine = 2\n self.pointStatusLine = 3\n\n self.scaleDiff = 0\n self.scaleDiff2 = 1\n self.stopTimeLine = 5\n\n self.widthTimesPeriodList = [3]\n\n # 起始时间\n self.startDate = public.getDate(diff=-60) # 60天数据回测\n self.endDate = public.getDate(diff=0)\n\n self.total_tablename = 'train_total_1'\n self.detail_tablename = 'train_future_1'\n self.totalMethod = 'macdmix'\n\n self.method = 'simTick' if self.isSimTickUse else 'quick'\n self.stage = 'dema4'\n self.uidKey = \"%s_%s_%s_%s_%s_\" + self.method + \"_\" + self.stage\n\n def iterCond(self):\n # 多重组合参数输出\n keys = ['widthDeltaLine', 'pointLine']\n for s0 in self.__getattribute__(keys[0] + 'List'):\n self.__setattr__(keys[0], s0)\n\n for s1 in self.__getattribute__(keys[1] + 'List'):\n self.__setattr__(keys[1], s1)\n\n yield '%s_%s' % (str(s0), str(s1))\n\n def Pool(self):\n time0 = time.time()\n\n pool = Pool(processes=self.processCount)\n shareDict = Manager().list([])\n\n Base = future_baseInfo()\n # 交易量大的,按价格排序, 类型:turple,第二位为夜盘收盘时间\n lists = Base.all(vol=100)\n tops = self.tops()\n # 清空数据库\n self.empty()\n\n for rs in lists:\n # 检查时间匹配\n codes = [rs[0]]\n if self.topUse and codes not in tops: continue\n print(rs)\n if codes[0] not in ['JM', 'RB', 'JD', 'MA']: continue\n\n for kt in self.klineTypeList:\n #self.start(codes, time0, kt, shareDict)\n try:\n pool.apply_async(self.start, (codes, time0, kt, shareDict))\n pass\n except Exception as e:\n print(e)\n continue\n pool.close()\n pool.join()\n\n def point(self, row):\n r= float(self.pointLine)\n\n width, wd1, wd2, macd, trend, volm, macd2d = (row[key] for key in\n \"width,widthDelta,widthDelta2,macd,trend,volm,macd2d\".split(\",\"))\n mm, isPoint = 0, 0\n if width!=0 and not np.isnan(macd2d) :\n mm = pow(trend * abs(wd1) * abs(wd2) * abs(macd2d) * volm / width, 1/6) / 1.5\n cond0 = (macd2d * macd) > 0 and wd1 > 0 and wd2 > 0\n cond = mm > r\n isPoint = 0 if not (cond0 and cond ) else np.sign(macd2d) * int(mm)\n\n columns = ['pow', 'isPoint']\n return pd.Series([mm, isPoint], index=columns)\n\n def turn(self, mm, md, mode):\n return 0 if mm > 0 else 1 if mode * md > 0 else -1\n\n def turn2(self, row):\n mm1 ,mm5, md, md5 = (row[key] for key in \"macd2dm1,macd2dm5,macd2d,macd2d5\".split(\",\"))\n\n if mm1 < 0 and abs(mm1) > 0.2 :\n return 1 if md > 0 else -1\n\n elif mm5 < 0:\n return 1 if md5 > 0 else -1\n\n return 0\n\n def trend(self, row):\n close, high, open, low = (row[key] for key in \"close,high,open,low\".split(\",\"))\n\n if open > close:\n r = 1 if (open -low) == 0 else ((open - close) / (open -low) - 0.1) * 2\n else:\n r = 1 if (high - open)== 0 else ((close - open) / (high - open) - 0.1) * 2\n\n return r if r > 0.2 else 0.2\n\n def total(self, dfs, dfs2=None, period=60):\n # 计算参数\n df0 = dfs[self.mCodes[0]]\n df0[\"rel_price\"] = close = df0[\"close\"]\n df0[\"datetime\"] = df0.index\n\n s0 = self.shift[0]\n p_l = df0[\"p_l\"] = (df0[\"close\"] + s0)\n p_h = df0[\"p_h\"] = (df0[\"close\"] - s0)\n\n if self.isSimTickUse:\n # 调用复合apply函数计算混合参数\n close2 = dfs2[self.mCodes[0]][\"close\"]\n df0_1 = df0.apply(lambda row: self.k_ma(row['datetime'], row['rel_price'], close2, period), axis=1)\n df0 = pd.concat([df0, df0_1], axis=1)\n\n else:\n df0[\"ma\"] = ma = ta.MA(close, timeperiod=period)\n df0[\"std\"] = std = ta.STDDEV(close, timeperiod=period, nbdev=1)\n df0['open'] = close.shift(1)\n\n # 计算影线\n df0['trend'] = trend = df0.apply(lambda x: self.trend(x), axis=1)\n # 成交量\n vol = df0['volume']\n df0['volm'] = volm = vol / vol.mean()\n\n # 上下柜\n # bullWidth\n df0[\"width\"] = width = (4 * std / ma * 100).fillna(0)\n df0[\"bullwidth\"] = width / width.abs().mean()\n\n # 近三分钟width变动\n df0[\"widthDelta\"] = wd1 = ta.DEMA((width - width.shift(1)) * trend * volm, timeperiod=2)\n df0[\"widthDelta\"] = wd1 /wd1.abs().mean()\n\n df0[\"widthDelta2\"] = wd2 = wd1 - wd1.shift(1)\n df0[\"widthDelta2\"] = wd2 / wd2.abs().mean()\n\n df0[\"wd2m\"] = wd1 * wd1.shift(1)\n df0[\"wd2m\"] = df0.apply(lambda row: self.turn(row['wd2m'], row['widthDelta'], 1), axis=1)\n\n # macd区间\n dif, dea, macd = ta.MACD(close, fastperiod=int(int(period) * 0.8), slowperiod=period,\n signalperiod=int(0.4 * period))\n\n df0[\"macd\"] = macd / macd.abs().mean() #归一化处理\n df0[\"mastd\"] = ta.STDDEV(macd * trend * volm, timeperiod=9, nbdev=1)\n df0[\"macdmax\"] = ta.MAX(df0[\"macd\"].abs(), timeperiod = int(0.4 * period)) #归一化处理\n\n df0[\"macdm\"] = macd * macd.shift(1)\n df0[\"macdm\"] = df0.apply(lambda row: self.turn(row['macdm'], row['macd'], 1), axis=1)\n # 计算顶点\n for wt in [5, 1]:\n ma2d = macd - macd.shift(1)\n df0[\"macd2d\" + str(wt)] = ma2d / ma2d.abs().mean() #归一化处理\n m2m = ma2d * ma2d.shift(1)\n df0[\"macd2dm\" + str(wt)] = m2m / m2m.abs().mean()\n\n df0[\"macd2d\"]= df0[\"macd2d5\"]\n df0[\"macd2dm\"] = df0.apply(lambda row: self.turn2(row), axis=1)\n\n # 相对波动\n # 相对波动\n df0['delta'] = (p_l - p_h) / df0['std']\n\n df1 = None\n # 循环 scale\n docs = []\n for scale in self.scaleList:\n for conds in self.iterCond():\n uid = self.uidKey % (\n '_'.join(self.codes), str(period), str(scale), self.klineType[:-1],\n str(self.widthTimesPeriod) + '_' + conds)\n\n df0[\"top\"], df0[\"lower\"] = df0['ma'] + (scale - self.scaleDiff) * df0['std'], df0['ma'] - (\n scale + self.scaleDiff) * df0['std']\n\n #print(df0.columns)\n df01 = df0.apply(lambda row: self.point(row), axis=1)\n df0['pow'] = df01['pow']\n df0['isPoint'] = df01['isPoint']\n\n df0.fillna(0, inplace=True)\n\n key = '_'.join(uid.split('_')[0:7])\n if key in self.csvList:\n cs = []\n bans = 'ma,open,close,high,low,p_l,p_h,top,lower,std,delta,volume,rel_price'.split(',')\n for c in df0.columns:\n if c not in bans:\n cs.append(c)\n\n file = self.Rice.basePath + '%s_pre.csv' % (uid)\n print(uid, '---------------------------- to_cvs', file)\n df0.to_csv(file, index=0, columns=cs)\n # self.share.append(self.codes)\n\n # df0.fillna(0, inplace=True)\n tot = None\n tot = self.detect(df0, df1, period=period, uid=uid)\n if tot is not None and tot['amount'] != 0:\n tot.update(\n {\n \"scale\": scale,\n \"method\": self.totalMethod,\n \"code\": self.codes[0],\n \"period\": period,\n \"uid\": uid,\n \"shift\": (p_l - p_h).mean(),\n \"std\": df0['width'].mean(),\n \"createdate\": public.getDatetime()\n }\n )\n docs.append(tot)\n return docs\n\n # 核心策略部分\n def stageApply(self, df0, df1, period=15, uid=''):\n\n doc, docs = {}, []\n\n \"\"\"\n 布林带策略:\n \n macd 组合策略 \n macd - 快慢线差值\n mastd - 差值标准差\n macdm - macd交叉点 \n macd2d - macd 变化率 \n macd2dm - macd 顶点 macd2d>0 谷点 macd2d<0 顶点\n \n # 开平仓状态 \n 开仓:\n 1 - 标准布林带开仓(> std)\n 2 : 扩展布林带策略开仓 (局部布林带顶点开仓) 5 局部macd谷点开仓\n 3: macd策略顶点开仓\n 4: macd拐点> 3类开仓\n 6: macd 交叉点开仓 \n 平仓:\n 0:标准布林带平仓\n 1、2: 扩展布林带平仓(布林带顶点/macd顶点) \n 3、macd 策略 谷底平仓\n 4、macd 突发点强制平仓(结束布林状态)\n 5、macd 交叉点平仓(结束macd状态)\n 6、macd 交叉点顶点平仓(macd顶点) \n \n \"\"\"\n\n status, isOpen = 0, 0\n\n for i in range(period, len(df0)):\n isRun, isstop = False, 0\n\n ma, p_l, p_h, top, lower, std, volm, delta, width, wd1, wd2, wd2m, macd, macdm, macd2d, macd2dm, isPoint, pow = (\n df0.ix[i, key] for key in\n \"ma,p_l,p_h,top,lower,std,volm,delta,bullwidth,widthDelta,widthDelta2,wd2m,macd,macdm,macd2d,macd2dm,isPoint,pow\".split(\n \",\"))\n\n if isPoint != 0 and isPoint * status <= 0:\n \"\"\"\n 突变点处理 \n 将布林带策略切换为macd策略 \n \"\"\"\n status = isPoint\n # 强制平仓 - 4类和5类\n if isOpen != 0 and isOpen * status < 0:\n doc = self.order(df0.iloc[i], None, 0, uid, df0, isstop= 4)\n if doc is not None:\n isOpen = 0\n docs.append(doc)\n\n # 反向持仓\n if isOpen == 0 and pow >= self.pointStatusLine:\n isOpen = 4 if status > 0 else - 4\n isRun = True\n\n elif macdm != 0 and status != 0:\n \"\"\"\n 交叉点: 快慢线穿越处理\n 1、结束突变状态,变为布林带处理\n 2、根据节点状况开平新仓,状态为6 \n \"\"\"\n # 结束macd状态\n status = 0\n\n elif status!=0:\n \"\"\"\n macd 策略处理 \n \n \"\"\"\n if isOpen == 0 and status * macd2dm > 0 :\n # 开仓\n isOpen, isRun = 3 if status > 0 else -3, True\n\n # 平仓\n elif isOpen != 0 and status * macd2dm < 0:\n isOpen, isRun, isstop = 0, True, 3\n\n else:\n \"\"\"\n 布林带策略处理 \n \"\"\"\n wline = self.widthDeltaLine\n\n cond1, cond2 = False, False\n if wline > 0:\n # 布林宽带变化率\n cond1 = (wd1 < 0 and abs(macd2d) < 1) or ((volm + abs(wd1) + abs(macd2d))/3 < wline)\n\n if isOpen == 0:\n # 突变状态开始\n # 大于上线轨迹\n if p_h >= top and cond1:\n isOpen = -1\n isRun = True\n\n elif p_l <= lower and cond1:\n isOpen = 1\n isRun = True\n\n elif (p_h + self.scaleDiff2 * std / 2) >= top and not cond1 and (wd2m < 0 or macd2dm < 0):\n isOpen = -2 if wd2m < 0 else -5\n isRun = True\n\n elif (p_l - self.scaleDiff2 * std / 2) <= lower and not cond1 and (wd2m < 0 or macd2dm > 0) :\n isOpen = 2 if wd2m > 0 else 5\n isRun = True\n\n # 平仓\n else:\n sign, dline = isOpen / abs(isOpen), - self.scaleDiff2 * std / 2\n cond3 = (sign * ((p_h if isOpen > 0 else p_l) - ma))\n #\n if cond3 >= -dline and not cond1 and (wd2m < 0 or macd2dm * isOpen < 0) :\n isOpen, isstop = 0, 2 if wd2m < 0 else 5\n isRun = True\n\n elif cond3 >= 0 and cond1:\n isOpen, isstop = 0, 0\n isRun = True\n\n # print(i, isOpen, status, isstop, isRun )\n if isRun:\n doc = self.order(df0.iloc[i], None, isOpen, uid, df0, isstop=isstop)\n if doc is not None:\n docs.append(doc)\n return docs\n\n\ndef main():\n action = {\n \"kline\": 1,\n }\n\n if action[\"kline\"] == 1:\n obj = train_future_macd()\n obj.Pool()\n\nif __name__ == '__main__':\n main()\n","sub_path":"com/train/train_future_macd2.py","file_name":"train_future_macd2.py","file_ext":"py","file_size_in_byte":15079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"33246856","text":"arr = [1,1,2,2,4,4,5,5,5]\r\nres = []\r\n\r\nmaximum = 0\r\ndiff = 1\r\n\r\nfor k in arr:\r\n n1 = arr.count(k)\r\n n2 = arr.count(k-diff) #find number of respective values with given difference.\r\n maximum = max(maximum, n1+n2)\r\n\r\nprint(maximum) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"picking_num.py","file_name":"picking_num.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"284863708","text":"#\tquery.py\r\n\r\nimport socket\r\nimport os\r\nimport frozen_dir\r\nimport sys\r\n\r\ndef query(filename):\r\n\t#向局域网内广播query请求\r\n\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #创建UDP socket (IPv4)\r\n\ts.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\r\n\r\n\tip = getIP()\r\n\tttl = 5\r\n\tmessage = 'query ' + filename + ' ' \\\r\n\t\t+ ip + ' ' + str(ttl) #发送信息格式为 (query 文件名 IP TTL)\r\n\tipaddress = '255.255.255.255'\r\n\tport = 16380\r\n\r\n\ts.sendto(message.encode('utf-8'), (ipaddress, port))\t#发送广播信息\r\n\ts.close()\r\n\r\ndef get(filename, filepath, ipaddress, filesize):\r\n\t#收到返回的ACK信息后,向第一个传回信息的peer发送get请求\r\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #创建TCP socket (IPv4)\r\n\ts.bind(('0.0.0.0', 16580))\r\n\tmessage = 'get ' + filepath\r\n\tport = 16381\r\n\t#print(ipaddress)\r\n\ts.connect((ipaddress[0], port))\t#建立TCP连接\r\n\ts.send(message.encode('utf-8'))\t#发送get请求\r\n\t#download_path = os.path.abspath(os.path.dirname(__file__)) + '/download/'\r\n\tdownload_path = os.path.abspath(frozen_dir.app_path()) + '/download/'\r\n\ttry:\r\n\t\t#print(download_path + filename)\r\n\t\tcurr_size = 0\r\n\t\tprint('文件大小共{}bytes'.format(filesize))\r\n\t\twith open(download_path + filename, 'wb') as file:\r\n\t\t\twhile True:\r\n\t\t\t\tdata = s.recv(65535)\r\n\t\t\t\t#print(\"recv\" + str(data))\r\n\t\t\t\tif data == b'end' or data == b'':\r\n\t\t\t\t\tprint('')\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\t#print(data)\r\n\t\t\t\t\tcurr_size += len(data)\r\n\t\t\t\t\tfile.write(data)\r\n\t\t\t\t\tdone = int(50*(curr_size/filesize))\r\n\t\t\t\t\tsys.stdout.write(\"\\r[%s%s]\" % ('█' * done, ' ' * (50 - done)))\r\n\t\t\t\t\tsys.stdout.flush()\r\n\t\t\t\t\tif curr_size == filesize:\r\n\t\t\t\t\t\tprint('')\r\n\t\t\t\t\t\tbreak\r\n\texcept Exception as e:\r\n\t\ts.close()\r\n\t\tprint(e)\r\n\t\treturn False\r\n\t#print('finish')\r\n\ts.close()\r\n\treturn True\r\n\r\ndef send_quit():\r\n\t# 向本机监听线程发送退出信息\r\n\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\ts.sendto('quit'.encode('utf-8'), ('127.0.0.1', 16380))\r\n\ts.close()\r\n\ts_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\tTCP_PORT = 16381\r\n\ts_tcp.connect(('127.0.0.1', TCP_PORT))\r\n\ts_tcp.send('quit'.encode('utf-8'))\r\n\ts_tcp.close()\r\n\r\ndef getIP():\r\n\treturn socket.gethostbyname(socket.gethostname())\r\n\r\n\r\n\r\n# 大致实现思路:\r\n# 1. 用固定文件夹作为共享文件夹位置\r\n# 2. 请求方输入文件名\r\n# 3. 向局域网内广播query请求信息 (UDP)\r\n# 4. 收到信息的peer在本地的共享文件夹中搜索该文件,如果存在,向请求方发送一个ACK\r\n# 5. 若不存在,则向本地存储的IP地址转发请求(为了防止死循环 应设置TTL)\r\n# 6. 请求方向第一个传回ACK信号的peer发送get请求 (TCP)\r\n# 7. 建立连接,进行附件下载\r\n\r\n# 具体问题:\r\n# 1. 如何同时监听和发送信息 => 多线程\r\n# 2. 监听线程收到ACK之后如何通知发送线程发送get请求? => 可以将ACK监听放到客户端线程中去\r\n# 3. 若网络中无该文件,如何判断? => 超过一定时间未收到ACK,则说明无该文件\r\n# 4. 联机测试中,MSI无法接收到Surface发出的广播,反之则可以 => 发现是防火墙的问题,关掉就好了\r\n\r\n","sub_path":"2019141460505/陈蓝玉_2019141460505_计网课设/陈蓝玉_2019141460505_计网课设/source/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"352253751","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 10 11:21:53 2021\n\n@author: didie\n\"\"\"\n\nimport pickle, os\nimport numpy as np\n#%%\ndef create_distances():\n n_dipoles = 1000\n coordinates = pickle.load(open(os.environ['DATA'] + '\\\\MasterAIThesis\\\\llocation_neurons.pkl', 'rb'))\n coordinates = coordinates[range(0,coordinates.shape[0], int(coordinates.shape[0]/(n_dipoles - 1))),:][:n_dipoles,:]\n\n distances = np.zeros((n_dipoles,n_dipoles))\n def distance(coord1, coord2):\n return np.sqrt((coord1[0] - coord2[0])**2 + (coord1[1] - coord2[1])**2 + (coord1[2] - coord2[2])**2) \n \n for i in range(n_dipoles):\n for j in range(n_dipoles):\n distances[i, j] = distance(coordinates[i,:], coordinates[j,:])\n \n pickle.dump(distances,open(os.environ['DATA'] + '\\\\MasterAIThesis\\\\dipole_distances.pkl', 'wb'))\n#%%\ncenters = [115, 231, 186, 310, 163, 463, 901, 588, 776, 696, 964, 843]\n#%%\ndef calculate_centers(centers = [], n_centers = 1, iterations = 20):\n distances = pickle.load(open(os.environ['DATA'] + '\\\\MasterAIThesis\\\\dipole_distances.pkl', 'rb'))\n n_dipoles = distances.shape[0]\n coordinates = pickle.load(open(os.environ['DATA'] + '\\\\MasterAIThesis\\\\llocation_neurons.pkl', 'rb'))\n coordinates = coordinates[range(0,coordinates.shape[0], int(coordinates.shape[0]/(n_dipoles - 1))),:][:n_dipoles,:]\n\n if len(centers) == 0: centers = list(range(n_centers))\n group = np.zeros((n_dipoles)).astype('int')\n for i in range(iterations):\n for i in range(n_dipoles):\n group[i] = np.argmin(distances[i, centers])\n for i in range(n_centers):\n members = np.where(group == i)[0]\n center_group = coordinates[members].mean(axis = 0)\n average_coord = coordinates[members] - center_group\n average_distance = np.apply_along_axis(lambda x: x[0]**2 + x[1]**2 + x[2]**2, 1, average_coord)\n centers[i] = members[np.argmin(average_distance)]\n \n return centers, group\n\n\n\n\n\n\n\n\n","sub_path":"Code/generate_centers.py","file_name":"generate_centers.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"287826542","text":"# -*- coding: utf-8 -*-\n# !/usr/bin/python\n\n\"\"\"\nSSH Tool api\nSSH工具类\n\"\"\"\n\nimport paramiko\nimport threading\nimport datetime\nimport os\nimport time\n\n\nclass SshClient:\n # SSH主机地址\n ip = None\n # SSH主机端口\n port = None\n # 用户名\n username = None\n # 密码\n passwd = None\n # SSH连接对象\n ssh = None\n # SFTP连接对象\n sftp = None\n\n def __init__(self, _ip, _username='root', _passwd='lead1234#', port=21312):\n \"\"\"\n 构造方法\n @param _ip: SSH连接的目标IP地址\n @param _username: 用户名\n @param _passwd:密码\n @param port:端口\n \"\"\"\n # 服务器设置\n self.ip = _ip\n self.port = port\n self.username = _username\n self.passwd = _passwd\n self.connect()\n\n def __del__(self):\n # 关闭SFTP连接\n if self.sftp is not None:\n self.sftp.close()\n print('.....sftp is closed.....' + self.ip)\n # 关闭SSH连接\n if self.ssh is not None:\n self.ssh.close()\n print('.....ssh is closed.....' + self.ip)\n\n \"\"\"\n \" 连接到SSH服务器\n @return: 无\n \"\"\"\n\n def connect(self):\n print()\n print('connect to ip ' + self.ip)\n self.ssh = paramiko.SSHClient()\n self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n # 连接到SSH\n self.ssh.connect(self.ip, self.port, self.username, self.passwd, timeout=5)\n # 连接到SFTP\n self.sftp = self.ssh.open_sftp()\n print()\n\n def upload(self, local_dir, remote_dir):\n \"\"\"\n 通过SSH进行文件上传\n @param local_dir: 要上传的本地目录\n @param remote_dir: 要上传到的远程目录\n @return: 无\n \"\"\"\n\n try:\n print('upload file start %s ' % datetime.datetime.now())\n for root, dirs, files in os.walk(local_dir):\n print('[%s][%s][%s]' % (root, dirs, files))\n for file_path in files:\n local_file = os.path.join(root, file_path)\n print(11, '[%s][%s][%s][%s]' % (root, file_path, local_file, local_dir))\n a = local_file.replace(local_dir, '').replace('\\\\', '/').lstrip('/')\n print('01', a, '[%s]' % remote_dir)\n remoteFile = os.path.join(remote_dir, a)\n print(22, remoteFile)\n try:\n self.sftp.put(local_file, remoteFile)\n except Exception as e:\n self.sftp.mkdir(os.path.split(remoteFile)[0])\n self.sftp.put(local_file, remoteFile)\n print(\"66 upload %s to remote %s\" % (local_file, remoteFile))\n for name in dirs:\n local_path = os.path.join(root, name)\n print(0, local_path, local_dir)\n a = local_path.replace(local_dir, '').replace('\\\\', '')\n print(1, a)\n print(1, remote_dir)\n remote_path = os.path.join(remote_dir, a)\n print(33, remote_path)\n try:\n self.sftp.mkdir(remote_path)\n print(44, \"mkdir path %s\" % remote_path)\n except Exception as e:\n print(55, e)\n print('77,upload file success %s ' % datetime.datetime.now())\n except Exception as e:\n print(88, e)\n\n def exe_cmd_list(self, cmd_list):\n \"\"\"\n 执行SSH命令列表\n @param cmd_list: 要执行的指令列表\n @return: 无\n \"\"\"\n\n try:\n print('\\n[Execute Commonds]\\n')\n for cmd in cmd_list:\n print('Commond send >>>>> ' + cmd)\n print()\n stdin, stdout, stderr = self.ssh.exec_command(cmd)\n out = stdout.readlines()\n print('Console output >>>>>>>>>>>>>>>>>>>')\n # 屏幕输出\n for o in out:\n print(o)\n print('<<<<<<<<<<<<< output End')\n print()\n except Exception as e:\n print(88, e)\n\n def deploy_server(self, server_path, war_name, local_path):\n \"\"\"\n 通过SSH把本地war包发布到远程Tomcat服务器并重启。\n @param server_path: 远程Tomcat服务器目录\n @param war_name: WAR包名(不含后缀)\n @param local_path: 本地WAR包所在目录\n @return: 无\n \"\"\"\n\n print('--------------------------------------')\n print('\\nDeploy [' + war_name + '] Start >>>>>>>>>>>>>>>>>>>\\n')\n\n # 停止Tomcat,删除已发布War目录及包\n cmd_stop = []\n cmd_stop.append(server_path + '/bin/shutdown.sh')\n cmd_stop.append('rm -rf ' + server_path + '/webapps/' + war_name)\n cmd_stop.append('rm -rf ' + server_path + '/webapps/' + war_name + '.war')\n self.exe_cmd_list(cmd_stop)\n\n # 上传War包\n self.upload(local_path, server_path + '/webapps/')\n\n # 启动Tomcat\n cmd_start = []\n cmd_start.append(server_path + '/bin/startup.sh')\n self.exe_cmd_list(cmd_start)\n\n print('Deploy [' + war_name + '] End')\n print('--------------------------------------')\n print()\n\n def get_new_lines(self, remote_filename, remote_file_size):\n \"\"\"\n 通过SSH读取远程文件新增文本行\n @param remote_filename: 远程文件路径\n @param remote_file_size: 远程文件曾经大小\n @return: 新增行\n \"\"\"\n\n # Opens the file and reads any new data from it.\n remote_file = self.sftp.open(remote_filename, 'r')\n line_terminators_joined = '\\r\\n'\n # seek to the latest read point in the file\n remote_file.seek(remote_file_size, 0)\n # read any new lines from the file\n line = remote_file.readline()\n while line:\n yield line.strip(line_terminators_joined)\n line = remote_file.readline()\n\n remote_file.close()\n\n def tail_print(self, remote_filename):\n \"\"\"\n 通过SSH监控远程文件新增文本行并输出到控制台\n @param remote_filename: 远程文件路径\n @return: 无\n \"\"\"\n\n try:\n remote_file_size = -1\n while 1:\n # 文件统计\n stat_info = self.sftp.stat(remote_filename)\n # 上次文件大小非空时,输出新增行\n if remote_file_size > 0:\n # if the file's grown\n if remote_file_size < stat_info.st_size:\n for line in self.get_new_lines(remote_filename, remote_file_size):\n print(line)\n\n remote_file_size = stat_info.st_size\n # 休息1秒后再试\n time.sleep(1)\n except Exception as e:\n print(88, e)\n\n\nif __name__ == '__main__':\n # 启动SSH客户端 \n ip = '10.1.xx.xxx'\n sshClient = SshClient(ip)\n # 发包crmcore\n sshClient.deploy_server('/opt/tomcat_XXX', 'xxxx', r'd:\\xxxx\\output\\xxxx')\n # 查看日志\n sshClient.tail_print('/xxxx/logs/xxxxxx/xxxxxxx.log')\n","sub_path":"common/sshcm.py","file_name":"sshcm.py","file_ext":"py","file_size_in_byte":7345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"447526871","text":"import unittest\n\nfrom collections.abc import Iterable\nimport pickle\nimport shutil\nimport os\n\nimport chainerio\n\n\n@unittest.skipIf(shutil.which('hdfs') is None, \"HDFS client not installed\")\nclass TestHdfsHandler(unittest.TestCase):\n\n def setUp(self):\n self.test_string = \"this is a test string\\n\"\n self.test_string_b = self.test_string.encode(\"utf-8\")\n self.fs = \"hdfs\"\n self.tmpfile_name = \"tmpfile.txt\"\n\n def test_read_bytes(self):\n\n with chainerio.create_handler(self.fs) as handler:\n with handler.open(self.tmpfile_name, \"wb\") as tmpfile:\n tmpfile.write(self.test_string_b)\n with handler.open(self.tmpfile_name, \"rb\") as f:\n self.assertEqual(self.test_string_b, f.read())\n\n def test_read_string(self):\n\n with chainerio.create_handler(self.fs) as handler:\n with handler.open(self.tmpfile_name, \"w\") as tmpfile:\n tmpfile.write(self.test_string)\n with handler.open(self.tmpfile_name, \"r\") as f:\n self.assertEqual(self.test_string, f.read())\n with handler.open(self.tmpfile_name, \"r\") as f:\n self.assertEqual(self.test_string, f.readline())\n\n def test_read_non_exist(self):\n\n non_exist_file = \"non_exist_file.txt\"\n\n with chainerio.create_handler(self.fs) as handler:\n self.assertRaises(IOError, handler.open, non_exist_file)\n\n def test_list(self):\n with chainerio.create_handler(self.fs) as handler:\n file_generator = handler.list()\n self.assertIsInstance(file_generator, Iterable)\n file_list = list(file_generator)\n self.assertIn(self.tmpfile_name, file_list)\n\n def test_info(self):\n with chainerio.create_handler(self.fs) as handler:\n self.assertIsInstance(handler.info(), str)\n\n def test_isdir(self):\n with chainerio.create_handler(self.fs) as handler:\n self.assertTrue(handler.isdir(\"/\"))\n self.assertFalse(handler.isdir(self.tmpfile_name))\n\n def test_mkdir(self):\n test_dir_name = \"testmkdir\"\n with chainerio.create_handler(self.fs) as handler:\n handler.mkdir(test_dir_name)\n self.assertTrue(handler.isdir(test_dir_name))\n\n handler.remove(test_dir_name)\n\n def test_makedirs(self):\n test_dir_name = \"testmkdir/\"\n nested_dir_name = test_dir_name + \"nested_dir\"\n\n with chainerio.create_handler(self.fs) as handler:\n handler.makedirs(nested_dir_name)\n self.assertTrue(handler.isdir(nested_dir_name))\n\n handler.remove(test_dir_name, True)\n\n def test_picle(self):\n\n pickle_file_name = \"test_pickle.pickle\"\n test_data = {'test_elem1': b'balabala',\n 'test_elem2': 'balabala'}\n\n with chainerio.create_handler(self.fs) as handler:\n with handler.open(pickle_file_name, 'wb') as f:\n pickle.dump(test_data, f)\n with handler.open(pickle_file_name, 'rb') as f:\n loaded_obj = pickle.load(f)\n self.assertEqual(test_data, loaded_obj)\n\n handler.remove(pickle_file_name, True)\n\n def test_exists(self):\n non_exist_file = \"non_exist_file.txt\"\n\n with chainerio.create_handler(self.fs) as handler:\n self.assertTrue(handler.exists(self.tmpfile_name))\n self.assertTrue(handler.exists(\"/\"))\n self.assertFalse(handler.exists(non_exist_file))\n\n def test_rename(self):\n with chainerio.create_handler(self.fs) as handler:\n with handler.open('src', 'w') as fp:\n fp.write('foobar')\n\n self.assertTrue(handler.exists('src'))\n self.assertFalse(handler.exists('dst'))\n\n handler.rename('src', 'dst')\n self.assertFalse(handler.exists('src'))\n self.assertTrue(handler.exists('dst'))\n\n with handler.open('dst', 'r') as fp:\n data = fp.read()\n assert data == 'foobar'\n\n handler.remove('dst', True)\n\n def test_remove(self):\n test_file = \"test_remove.txt\"\n test_dir = \"test_dir/\"\n nested_dir = os.path.join(test_dir, \"nested_file/\")\n nested_file = os.path.join(nested_dir, test_file)\n\n with chainerio.create_handler(self.fs) as handler:\n with handler.open(test_file, 'w') as fp:\n fp.write('foobar')\n\n # test remove on one file\n self.assertTrue(handler.exists(test_file))\n handler.remove(test_file)\n self.assertFalse(handler.exists(test_file))\n\n # test remove on directory\n handler.makedirs(nested_dir)\n with handler.open(nested_file, 'w') as fp:\n fp.write('foobar')\n\n self.assertTrue(handler.exists(test_dir))\n self.assertTrue(handler.exists(nested_dir))\n self.assertTrue(handler.exists(nested_file))\n\n handler.remove(test_dir, True)\n\n self.assertFalse(handler.exists(test_dir))\n self.assertFalse(handler.exists(nested_dir))\n self.assertFalse(handler.exists(nested_file))\n\n def test_stat(self):\n # pass for now\n # TODO(tianqi) add test after we well defined the stat\n pass\n","sub_path":"tests/filesystem_tests/test_hdfs_handler.py","file_name":"test_hdfs_handler.py","file_ext":"py","file_size_in_byte":5326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"395250252","text":"import colorama, time\r\ncolorama.init()\r\n\r\nloadingPhrases = ['Загружаем мир', 'Печатаем диалоги', 'Полируем пол', 'Убиваем монстров', 'Отдыхаем']\r\ninitialisationPhrase = list('Процесс инициализации игры')\r\nsuccessInitPhrase = list('Успешно!')\r\nrequireInf = list('Обязательно посетите пункт \"информация\" в главном меню!')\r\nreadyColor = list('Готово! ')\r\nchooseColorAlert = list('Внимание, перед запуском главного меню Вам требуется выбрать цвет интерфейса')\r\ncolorRed = list('Красный (1)')\r\ncolorBlue = list('Синий (2)')\r\ncolorWhite = list('Белый (3)')\r\ncolorGreen = list('Зеленый(4)')\r\nwelcomeMenu = list('Добро пожаловать в главное меню!')\r\nnewGame = list('Новая игра (1)')\r\ncountinueGame = list('Продолжить (2)')\r\ninformationAbout = list('[!] Информация (3)')\r\nsettingsGame = list('Настройки (4)')\r\nexitGame = list('Выход (5)')\r\nbye = list('До свидания!')\r\nchangeColor = list('Изменить цвет (1)')\r\nchangeSpeed = list('Изменить задержку появления символов (2)')\r\nchangeColorCycle = list('Введите число 1-2')\r\nmenuCycle = list('Введите число 1-5')\r\nchangeColorCycle2 = list('Введите число 1-4')\r\ninformFirstP1 = list('1. Все сохранения находятся в файле save.txt, а настройки в settings.txt. Во избижание ошибок программы рекомендуется ')\r\ninformFirstP2 = list('НЕ ')\r\ninformFirstP3 = list('изменять содержимое файлов')\r\ninformSecond = list('2. Игра сохранятеся посредством ввода save (обязательно в нижнем регистре) во время игры.')\r\nbackMenuWhileGame = list('3. Для выхода в меню во время игры введите mainMenu (обязательно в таком регистре) во время игры')\r\nlostSettingsAlert = list('4. В случае ошибки программы/необходимости сброса настроек просто удалите файл settings.txt')\r\nbackMenu = list('Выйти в главое меню (1)')\r\nchooseSpeedAlert = list('А теперь, пожалуйста, выберите задержку появления символов (0.1 - 2)')\r\nchooseSpeedAlert2 = list('Выберите задержку появления символов (0.1 - 2)')\r\n\r\ndef stylishPrint(letters, delay, transference):\r\n for let in letters:\r\n print(let, end='', flush=True)\r\n time.sleep(delay)\r\n if transference == True:\r\n print('', end='\\n')\r\n\r\ndef colorStylishPrint(letters, delay, transference, colorGlobalArg):\r\n for let in letters:\r\n print(colorGlobalArg + let, end='', flush=True)\r\n time.sleep(delay)\r\n if transference == True:\r\n print('', end='\\n')\r\n\r\ndef colorStylishPrint2(letters, delay, transference, colorGlobalArg, style):\r\n for let in letters:\r\n print(style + colorGlobalArg + let, end='', flush=True)\r\n time.sleep(delay)\r\n if transference == True:\r\n print('', end='\\n')\r\n","sub_path":"source RUS/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":3398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"630019807","text":"def get_gcd(a, b):\n if a < 0:\n a = -a\n if b < 0:\n b = -b\n while a != 0 and b != 0:\n if a > b:\n a %= b\n else:\n b %= a\n res = a + b\n return res\n\ndef get_average(roul):\n prev_level = cur_level = [roul[0]]\n for i in range(1, len(roul)):\n cur_level = []\n for k in range(i + 1):\n sum_elem = roul[i][k]\n cell = []\n if k == 0:\n cell.append(sum_elem + prev_level[0][0])\n elif k == i:\n cell.append(sum_elem + prev_level[-1][0])\n else:\n for elem in prev_level[k - 1]:\n cell.append(sum_elem + elem)\n for elem in prev_level[k]:\n cell.append(sum_elem + elem)\n cur_level.append(cell)\n prev_level = cur_level\n ans = []\n for s in cur_level:\n for elem in s:\n ans.append(elem)\n num = sum(ans)\n den = len(ans)\n gcd = get_gcd(num, den)\n num //= gcd\n den //= gcd\n return (num, den)\n\n#### Ввод входных данных ####################\nn = int(input()) # Кол-во наборов входных данных\nresults = []\nfor _ in range(n):\n height = int(input()) # Высота рулетки\n roul = [] # Рулетка\n for i in range(height):\n roul.append(list(map(int, input().split()))) # Заполнение очередного уровня\n results.append(get_average(roul))\nfor i in range(n):\n print(results[i][0], results[i][1])\n\n\n \n","sub_path":"casino_halton/casino_halton.py","file_name":"casino_halton.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"298216120","text":"#导入数值计算库\nimport numpy as np\n#导入科学计算库\nimport pandas as pd\n\n\ndef convertStrToNumber(name, data):\n mapping_keys = data[name].drop_duplicates().values\n mapping = {}\n for i in range(len(mapping_keys)):\n mapping[mapping_keys[i]] = i\n return data[name].map(mapping), mapping\n\n\ntrain_path = \"to_veirify_no_mapping.csv\"\ntrain = pd.read_csv(train_path,\n header=0)\n\ntrain.pop(\"iyear\")\n\n# ids = train.pop(\"eventid\") 不要丢弃id\nwhat = train.pop(\"Unnamed: 0\")\n\n# 处理省份信息provstate\ntrain[\"provstate\"], provstate_mapping = convertStrToNumber(\"provstate\", train)\n\n# 处理犯罪集团的名称gname\ntrain[\"gname\"], gname_mapping = convertStrToNumber(\"gname\", train)\n\n# 处理实体的名称corp1\ntrain[\"corp1\"], corp1_mapping = convertStrToNumber(\"corp1\", train)\n\n# 查看省份字符串到数字的映射\nprint(provstate_mapping)\n# 查看犯罪集团字符串到数字的映射\nprint(gname_mapping)\n# 查看实体字符串到数字的映射\nprint(corp1_mapping)\n\n\n\ntrain.to_csv(\"to_verify.csv\")\n","sub_path":"jichao/to_verify_analyze.py","file_name":"to_verify_analyze.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"331871787","text":"from django.urls import path\nfrom . import views\n\n\napp_name = 'app'\nurlpatterns = [\n path('room', views.room, name='room'),\n path('assign_chore', views.assign_chore, name='assign_chore'),\n path('reset_common_fee', views.reset_common_fee, name='reset_common_fee'),\n path('finish_task', views.finish_task, name='finish_task'),\n path('request_house_owner', views.request_house_owner,\n name='request_house_owner'),\n path('set_username', views.set_username, name='set_username'),\n]\n","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"577906475","text":"__author__ = 'Anand'\n\nimport turtle\n\ndef draw_square():\n brad = turtle.Turtle()\n brad.shape(\"turtle\")\n brad.color(\"black\")\n brad.speed(2)\n\n for i in range(5):\n brad.forward(100)\n brad.right(90)\n\n\ndef draw_circle():\n\n anand = turtle.Turtle()\n anand.shape(\"turtle\")\n anand.color(\"blue\")\n anand.circle(100)\n\ndef draw_triangle():\n\n damini = turtle.Turtle()\n damini.shape(\"turtle\")\n damini.color(\"blue\")\n damini.triangle(100)\n\nwindow = turtle.Screen()\nwindow.bgcolor(\"red\")\n\nfor i in range(1,37):\n draw_square()\n brad.right(10)\n#draw_circle()\n#draw_triangle()\nwindow.exitonclick()","sub_path":"shapes.py","file_name":"shapes.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"395296556","text":"'''\nSimple analysis of the file found here\nhttps://datamarket.com/data/set/22p5/number-of-deaths-and-serious-injuries-in-uk-road-accidents-each-month-jan-1969-dec-1984-seatbelt-law-introduced-in-feb-1983-indicator-in-second-column#!ds=22p5!2eks&display=line\n'''\nfrom __future__ import print_function\nimport pandas as pd\nimport numpy as np\nimport pymc as pm\nimport scipy.stats as stats\nimport scipy.optimize as opt\nimport matplotlib.pyplot as plt\nplt.style.use('bmh')\ncolors = ['#348ABD', '#A60628', '#7A68A6', '#467821', '#D55E00', \n '#CC79A7', '#56B4E9', '#009E73', '#F0E442', '#0072B2']\n\n\ndf = pd.read_csv(\"/home/pawel-dell/learning/datasets/count_data/car_accidents/cars.csv\")\n\naccidents = df.ix[:,1]\n\n\n#at first we run (-) minimum likelihood optimisation\ndef poisson_logprob(mu, sign=-1):\n return np.sum(sign*stats.poisson.logpmf(accidents, mu=mu))\n\nfreq_results = opt.minimize_scalar(poisson_logprob)\n\n\nx_min = np.min(accidents)-10\nx_max = np.max(accidents)+10\nx_mu = np.int(freq_results['x'])\n'''\nfor i in range(x_min,x_max):\n plt.bar(i, stats.poisson.pmf(x_mu, i), color=colors[3])\n\nplt.show()\n'''\n\n# so the problem with this that min and max spawns\n# ('min:', 1047, 'x_max:', 2664)\n# and mle with mu equal to mu: 1670.3072916\n# does not even cover winter values of 2200+\n\nM=50000\n\n#mu = pm.Uniform('mu', lower=x_min, upper=x_max)\nmu = pm.Normal('mu', mu=1650, tau=0.00001)\ny_obs = pm.Poisson('observed', mu=mu,value=accidents, observed=True)\ny_pre = pm.Poisson('estimated',mu=mu, observed=False)\nmodel = pm.Model([ mu, y_obs, y_pre ])\nmcmc = pm.MCMC(model)\n# there are two ways at arriving at similar value of mu as above\n# 1) Maximum a posteriori estimation\n'''\n\n map = pm.MAP(model)\n map.fit()\n\n\n# 2) or traditional mcmc run\n'''\nmc_res = mcmc.sample(M,100)\n\nprint('MLE:',freq_results['x'], 'MCMC:', mcmc.trace('mu')[:].mean())\n\ny_pred = mcmc.trace('estimated')[:]\n\n\n# but it does not cover much\ndef plt_post_pois():\n fig = plt.figure(figsize=(10,6))\n fig.add_subplot(211)\n plt.hist(y_pred, range=[x_min, x_max], bins=100, histtype='stepfilled', color=colors[1]) \n plt.xlim(x_min, x_max)\n plt.ylabel('Frequency')\n plt.title('Posterior predictive distribution')\n fig.add_subplot(212)\n plt.hist(accidents, range=[x_min, x_max], bins=100, histtype='stepfilled', color=colors[1]) \n plt.xlabel('No of accidents')\n plt.ylabel('Frequency')\n plt.title('Distribution of observed data')\n plt.tight_layout()\n plt.show()\n\n\n\nn_mu = pm.Normal('n_mu', mu=1650, tau=0.00001)\nn_lam = pm.Uniform('n_uni_alpha',0,1)\nn_alpha = pm.Exponential('n_alpha', beta=n_lam)\nn_y_obs = pm.NegativeBinomial('n_observed', mu=mu, alpha = n_alpha, value=accidents, observed=True)\nn_y_pre = pm.NegativeBinomial('n_estimated',mu=mu, alpha = n_alpha, observed=False)\nn_model = pm.Model([ n_mu, n_lam, n_alpha, n_y_obs, n_y_pre ])\nn_mcmc = pm.MCMC(n_model)\n\n\nmc_res = n_mcmc.sample(M,100)\n\nprint('MLE:',freq_results['x'], 'MCMC(Poisson):', mcmc.trace('mu')[:].mean(), 'MCMC(Negative binominal)', n_mcmc.trace('mu')[:].mean())\n\nn_y_pred = n_mcmc.trace('n_estimated')[:]\n\n\ndef plt_post():\n fig = plt.figure(figsize=(10,6))\n fig.add_subplot(311)\n plt.hist(y_pred, range=[x_min, x_max], bins=100, histtype='stepfilled', color=colors[1]) \n plt.xlim(x_min, x_max)\n plt.ylabel('Frequency')\n plt.title('Posterior predictive distribution ( Poisson )')\n fig.add_subplot(312)\n plt.hist(n_y_pred, range=[x_min, x_max], bins=100, histtype='stepfilled', color=colors[1]) \n plt.xlim(x_min, x_max)\n plt.ylabel('Frequency')\n plt.title('Posterior predictive distribution ( negative binominal )')\n fig.add_subplot(313)\n plt.hist(accidents, range=[x_min, x_max], bins=100, histtype='stepfilled', color=colors[1])\n plt.xlabel('No of accidents')\n plt.ylabel('Frequency')\n plt.title('Distribution of observed data')\n plt.tight_layout()\n plt.show()\n\n\n\n\n\n\n\nd_model_alpha = 1.0 / accidents.mean()\na_n = len(accidents)\n\nd_lam = pm.Uniform('d_uni_alpha',0,1)\nd_alpha = pm.Exponential('d_alpha', beta=d_lam)\nlambda_1 = pm.Exponential(\"lambda_1\", d_lam)\nlambda_2 = pm.Exponential(\"lambda_2\", d_lam)\ntau = pm.DiscreteUniform(\"tau\", lower=x_min, upper=x_max)\n\n@pm.deterministic\ndef lambda_(tau=tau, lambda_1=lambda_1, lambda_2=lambda_2):\n out = np.zeros(a_n)\n out[:tau] = lambda_1 # lambda before tau is lambda1\n out[tau:] = lambda_2 # lambda after (and including) tau is lambda2\n return out\n\n#d_obs = pm.NegativeBinomial('d_observed', mu=lambda_, alpha = d_alpha, value=accidents, observed=True)\nd_obs = pm.Poisson('d_observed', mu=lambda_, value=accidents, observed=True)\n\nd_model = pm.Model([d_obs,d_lam, d_alpha, lambda_1, lambda_2, tau])\n\nd_mcmc = pm.MCMC(d_model)\nd_mcmc.sample(M, 10000, 1)\n\nlambda_1_samples = d_mcmc.trace('lambda_1')[:]\nlambda_2_samples = d_mcmc.trace('lambda_2')[:]\ntau_samples = d_mcmc.trace('tau')[:]\n\ndef plt_post_two_lambdas():\n fig = plt.figure(figsize=(10,6))\n fig.add_subplot(311)\n plt.hist(lambda_1_samples, range=[x_min, x_max], bins=100, histtype='stepfilled', color=colors[1]) \n plt.xlim(x_min, x_max)\n plt.ylabel('Frequency')\n plt.title('First lambda group')\n fig.add_subplot(312)\n plt.hist(lambda_2_samples, range=[x_min, x_max], bins=100, histtype='stepfilled', color=colors[1]) \n plt.xlim(x_min, x_max)\n plt.ylabel('Frequency')\n plt.title('Second Lambda Group')\n fig.add_subplot(313)\n plt.hist(tau_samples, range=[x_min, x_max], bins=100, histtype='stepfilled', color=colors[1])\n plt.xlabel('No of accidents')\n plt.ylabel('Frequency')\n plt.title('Tau')\n plt.tight_layout()\n plt.show()\n\n\nplt_post_two_lambdas()\n","sub_path":"code/count_data/car_accidents/cars.py","file_name":"cars.py","file_ext":"py","file_size_in_byte":5583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"537872217","text":"\nimport keras.backend as K\nimport numpy as np\nfrom keras.layers import Layer\nfrom keras.constraints import max_norm\n\n\ndata_format = \"channels_last\"\ninitializer='glorot_uniform'\nconstraint = max_norm(1.0)\nggl_scale_hw = 2\n\nclass SelfAttention(Layer):\n\t\n\t\n\tdef __init__(self, attn_filters, **kwargs):\n\t\t\n\t\tsuper(SelfAttention, self).__init__(**kwargs)\n\t\tself.filters = attn_filters\n\t\tself.Wf = None\n\t\tself.Wg = None\n\t\tself.Wh = None\n\t\tself.Wv = None\n\t\tself.n = 0\n\t\tself.channels = 0\n\t\tself.gamma = self.add_weight(name='gamma', shape=[1], initializer='zeros', constraint=constraint, dtype = 'float32')\n\t\t\n\t\t\n\tdef build(self, input_shape):\n\t\t\n\t\tshape1 = [1,1] + [input_shape[-1], self.filters]\n\t\tshape2 = [1,1] + [input_shape[-1]] * 2\n\t\tself.Wf = self.add_weight(name='Wf', shape=shape1,\n\t\tinitializer=initializer, constraint=constraint, dtype = 'float32')\n\t\tself.Wg = self.add_weight(name='Wg', shape = shape1,\n\t\tinitializer=initializer, constraint=constraint, dtype = 'float32')\n\t\tself.Wh = self.add_weight(name='Wh', shape = shape2,\n\t\tinitializer=initializer, constraint=constraint, dtype = 'float32')\n\t\tself.Wv = self.add_weight(name='Wh', shape = shape2, \n\t\tinitializer=initializer, constraint=constraint, dtype = 'float32')\n\t\tself.channels= input_shape[-1]\n\t\tself.n = input_shape[1]*input_shape[2]\n\t\tsuper(SelfAttention, self).build(input_shape)\n \n \n\tdef call(self, inputs):\n\t\t\n\t\tbs = K.shape(inputs)[0:1]\n\t\tshape1 = K.variable(np.array([self.n, self.filters]), dtype='int32')\n\t\tshape2 = K.variable(np.array([self.n, self.channels]), dtype='int32')\n\t\tshape1 = K.concatenate([bs, shape1])\n\t\tshape2 = K.concatenate([bs, shape2])\n\t\tf = K.conv2d(inputs, kernel = self.Wf, data_format = data_format)\n\t\tg = K.conv2d(inputs, kernel = self.Wg, data_format = data_format)\n\t\th = K.conv2d(inputs, kernel = self.Wh, data_format = data_format)\n\t\tff = K.reshape(f, shape1)\n\t\tgf = K.reshape(g, shape1)\n\t\thf = K.reshape(h,shape2)#bs × n x c\n\t\ts = K.batch_dot(ff, gf, axes=(2,2))#bs x n x n\n\t\tbeta = K.softmax(s)\n\t\to = K.batch_dot(beta, hf, axes=(2,1))#bs x n x c\n\t\to = K.reshape(o, K.shape(inputs))\n\t\to = K.conv2d(o, kernel = self.Wv, data_format = data_format)\n\t\ty = self.gamma * o + inputs\n\t\treturn y\n\t\t\n\t\t\n\tdef get_config(self):\n\t\t\n\t\tconfig = {'attn_filters' : self.filters}\n\t\tbase_config = super(SelfAttention, self).get_config()\n\t\treturn dict(list(config.items()) + list(base_config.items()))\n\n\n\n\n\nclass GoogleAttention(Layer):\n\n\n\tdef __init__(self, scale_channels = (8,2), **kwargs):\n\n\t\tself.scale_channels = scale_channels\n\t\tself.Wf = None\n\t\tself.Wg = None\n\t\tself.Wh = None\n\t\tself.Wv = None\n\t\tself.n1 = 0\n\t\tself.n2 = 0\n\t\tself.channels = 0\n\t\tsuper(GoogleAttention, self).__init__(**kwargs)\n\t\t\n\t\tself.gamma = self.add_weight(name='google_gamma', shape=[1], initializer='zeros', constraint=constraint, dtype = 'float32')\n\n\n\tdef build(self, input_shape):\n\t\t\n\t\tself.channels= input_shape[-1]\n\t\tshape1 = [1,1] + [input_shape[-1], self.channels // self.scale_channels[0]]\n\t\tshape2 = [1,1] + [input_shape[-1], self.channels // self.scale_channels[0]]\n\t\tshape3 = [1,1] + [self.channels // self.scale_channels[1], self.channels]\n\t\tself.Wf = self.add_weight(name='Wf', shape=shape1,\n\t\tinitializer=initializer, constraint=constraint, dtype = 'float32')\n\t\tself.Wg = self.add_weight(name='Wg', shape = shape1,\n\t\tinitializer=initializer, constraint=constraint, dtype = 'float32')\n\t\tself.Wh = self.add_weight(name='Wh', shape = shape2,\n\t\tinitializer=initializer, constraint=constraint, dtype = 'float32')\n\t\tself.Wv = self.add_weight(name='Wh', shape = shape3, \n\t\tinitializer=initializer, constraint=constraint, dtype = 'float32')\n\t\tself.n1 = (input_shape[1] // ggl_scale_hw)*(input_shape[2] // ggl_scale_hw)\n\t\tself.n2 = input_shape[1] * input_shape[2]\n\t\tsuper(GoogleAttention, self).build(input_shape)\n\n\n\tdef call(self, inputs):\n\n\t\t#c2 : num of input channels // 2, c1 : num of input channels // 8\n\t\t#n1 : (h/2) * (w/2), n2 : h * w, C : num of input channels\n\n\t\tbs = K.shape(inputs)[0:1]\n\t\thw = K.shape(inputs)[1:3]\n\t\tc2 = K.shape(inputs)[-1:] // self.scale_channels[1]\n\t\tshape1 = K.variable(np.array([self.n1, self.channels // self.scale_channels[0]]), dtype='int32')\n\t\tshape2 = K.variable(np.array([self.n2, self.channels // self.scale_channels[0]]), dtype='int32')\n\t\tshape3 = K.variable(np.array([self.n1, self.channels // self.scale_channels[1]]), dtype='int32')\n\t\tshape1 = K.concatenate([bs, shape1])\n\t\tshape2 = K.concatenate([bs, shape2])\n\t\tshape2 = K.concatenate([bs, shape3])\n\t\tshape4 = K.concatenate([bs, hw, c2])\n\t\tf = K.conv2d(inputs, kernel = self.Wf, data_format = data_format)\n\t\tf = K.pool2d(f, pool_size = (ggl_scale_hw, ggl_scale_hw), \n\t\tstrides = (ggl_scale_hw, ggl_scale_hw), padding='same', pool_mode = 'max') # h/2 , w/2 , c1\n\n\t\tg = K.conv2d(inputs, kernel = self.Wg, data_format = data_format) # h, w, c1\n\n\t\th = K.conv2d(inputs, kernel = self.Wh, data_format = data_format)\n\t\th = K.pool2d(h, pool_size = (ggl_scale_hw, ggl_scale_hw), \n\t\tstrides = (ggl_scale_hw, ggl_scale_hw), padding='same', pool_mode = 'max') # h/2, w/2, c2\n\n\t\tff = K.reshape(f, shape1) # bs, n1, c1\n\t\tgf = K.reshape(g, shape2) # bs, n2, c1\n\t\thf = K.reshape(h, shape3) # bs, n1, c2\n\n\t\ts = K.batch_dot(ff, gf, axes=(2,2)) # bs, n1, n2\n\t\tbeta = K.softmax(s) #bs, n1, n2\n\t\to = K.batch_dot(beta, hf, axes=(1,1)) #bs, n2, c2\n\t\to = K.reshape(o, shape4) #bs, h, w, c2\n\t\to = K.conv2d(o, kernel = self.Wv, data_format = data_format) #bs, h, w, C\n\t\treturn self.gamma*o + inputs\n\n\n\tdef get_config(self):\n\t\t\n\t\tconfig = {'scale_channels' : self.scale_channels}\n\t\tbase_config = super(GoogleAttention, self).get_config()\n\t\treturn dict(list(config.items()) + list(base_config.items()))\n\n\t\t\n\n\n\t\t\t\t\t\t\nif __name__ == '__main__':\n\t\n\tfrom keras.layers import Input\n\tfrom keras.models import Model\n\tfrom keras.optimizers import Adam\n\t\n\t\n\tinp = Input([4,4,3])\n\timg = np.random.normal(0.0,1.0, (2,4,4,3))\n\tobj = np.random.normal(0.0, 1.0, (2,4,4,3))\n\tout = SelfAttention(2)(inp)\n\tm = Model(inp, out)\n\tm.compile(optimizer=Adam(0.01), loss = 'mse')\n\tp = m.predict(img)\n\tprint(img)\n\tprint('----')\n\tprint(obj)\n\tprint('----')\n\tprint(p)\n\tprint(p.shape)\n\tm.train_on_batch(img, obj)\n\tprint('----')\n\tprint(m.predict(img))\n\t\n\t'''\n\tinp = Input([4,4,3])\n\timg = np.random.normal(0.0,1.0, (2,4,4,3))\n\tobj = np.random.normal(0.0, 1.0, (2,4,4,3))\n\tout = GoogleAttention()(inp)\n\tm = Model(inp, out)\n\tm.compile(optimizer=Adam(0.01), loss = 'mse')\n\tp = m.predict(img)\n\tprint(img)\n\tprint('----')\n\tprint(obj)\n\tprint('----')\n\tprint(p)\n\tprint(p.shape)\n\tm.train_on_batch(img, obj)\n\tprint('----')\n\tprint(m.predict(img))\n\t'''\n","sub_path":".ipynb_checkpoints/self_attention-checkpoint.py","file_name":"self_attention-checkpoint.py","file_ext":"py","file_size_in_byte":6560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"609237177","text":"def divide(a,b):\n\ttry:\n\t\tresult = a/b\n\texcept ZeroDivisionError:\n\t\tprint('Do not divide by zero')\n\texcept TypeError as err:\n\t\tprint('a and b must be int or float')\n\t\tprint(err) # print the description in TypeError\n\telse:\n\t\tprint(f'{a} devided by {b} is {result}')\n\n\n\ndivide(1,3)\ndivide(1,0)\ndivide(1,'5')","sub_path":"debugging/divide.py","file_name":"divide.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"580736369","text":"import xgboost as xgb\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB\nfrom sklearn.preprocessing import Imputer\nfrom sklearn import preprocessing\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import cross_validate\nfrom matplotlib import pyplot as plt\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import svm\ntrain_data = pd.read_csv('train_upd.csv')\ny = pd.read_csv('train_upd.csv')\n\ny = y[\"Congestion_Type\"]\ny[y == \"4G_BACKHAUL_CONGESTION\"] = \"C\"\ny[y == \"4G_RAN_CONGESTION\"] = \"C\"\ny[y == \"3G_BACKHAUL_CONGESTION\"] = \"C\"\nle = preprocessing.LabelEncoder()\nmvar47 = le.fit_transform(train_data['ran_vendor'].astype(str))\ntrain_data[\"ran_vendor\"] = mvar47\n\nle = preprocessing.LabelEncoder()\nmvar47 = le.fit_transform(y.astype(str))\ny = mvar47\n# print(le.inverse_transform(y))\ntrain_data['total_bytes'] = 0\nbyte_cols = ['web_browsing_total_bytes',\n 'video_total_bytes',\n 'social_ntwrking_bytes',\n 'cloud_computing_total_bytes',\n 'web_security_total_bytes',\n 'gaming_total_bytes',\n 'health_total_bytes',\n 'communication_total_bytes',\n 'file_sharing_total_bytes',\n 'remote_access_total_bytes',\n 'photo_sharing_total_bytes',\n 'software_dwnld_total_bytes',\n 'marketplace_total_bytes',\n 'storage_services_total_bytes',\n 'audio_total_bytes',\n 'location_services_total_bytes',\n 'presence_total_bytes',\n 'advertisement_total_bytes',\n 'system_total_bytes',\n 'voip_total_bytes',\n 'speedtest_total_bytes',\n 'email_total_bytes',\n 'weather_total_bytes',\n 'media_total_bytes',\n 'mms_total_bytes',\n 'others_total_bytes']\ntrain_data['total_bytes_mod'] = 0\nfor i in byte_cols:\n train_data['total_bytes'] += train_data[i]\ntrain_data.total_bytes_mod = train_data.total_bytes/1024 #for GB/s\ntrain_data.total_bytes_mod = (train_data.total_bytes_mod/(train_data.par_min*60))\n\nimport datetime\ntemp =[]\nfor i in range(len(train_data)):\n temp.append(datetime.datetime(train_data['par_year'][i],train_data['par_month'][i],train_data['par_day'][i]).weekday())\ntrain_data[\"week_day\"] = temp\nx=train_data.drop(['Congestion_Type','cell_name','par_year','par_month','par_day'],axis=1)\n# In[39]:\n\n\nfrom sklearn.model_selection import train_test_split\nx_train ,x_test,y_train,y_test=train_test_split(x,y,test_size=0.20,random_state=20)\n\n\n# In[81]:\n\n\nfrom sklearn.metrics import make_scorer\nfrom sklearn.metrics import matthews_corrcoef\nfrom sklearn.model_selection import cross_val_score\nregressor=RandomForestClassifier(n_estimators=700,max_features=3,max_depth=26,random_state=0,n_jobs=-1)\nxgb1 = xgb.XGBClassifier(booster='gbtree', colsample_bylevel=1,\n colsample_bytree=1, gamma=0, learning_rate=0.15,\n max_delta_step=0, max_depth=3, min_child_weight=1, missing=None,\n n_estimators=700, n_jobs=3, nthread=1, objective='binary:logistic',\n random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1,\n seed=None, subsample=1)\n#my_scorer = make_scorer(matthews_corrcoef)\n#print(cross_val_score(xgb1, x, y,scoring= my_scorer, cv=4, verbose=10))\n\n\n# In[82]:\n\n\n#xgb1.fit(x,y)\nregressor.fit(x_train,y_train)\n\n# In[209]:\n\n\ny=le.inverse_transform(y)\n\n\n# In[210]:\n\n\ny\n\n\n# In[211]:\n\n\n#pred = xgb1.predict(x)\npred = regressor.predict(x_test)\nprint(matthews_corrcoef(pred,y_test))\n\n\n# In[223]:\n\n\ntrain_data.head()\nnew_data = train_data[train_data['Congestion_Type']!= \"NC\"]\n\n\n# In[224]:\n\n\ny2 = new_data['Congestion_Type']\n\n\n# In[225]:\n\n\nle = preprocessing.LabelEncoder()\nmvar47 = le.fit_transform(y2.astype(str))\ny2 = mvar47\n# le.inverse_transform(y2)\n\n\n# In[226]:\n\n\n# y2[1:10]\n\n\n# In[227]:\n\n\nx2=new_data.drop(['Congestion_Type','cell_name','par_year','par_month','par_day'],axis=1)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[102]:\nx_train ,x_test,y_train,y_test=train_test_split(x2,y2,test_size=0.20,random_state=20)\nregressor2 = RandomForestClassifier(n_estimators=700,max_features=3,max_depth=26,random_state=0,n_jobs=-1)\nxgb2 = xgb.XGBClassifier(booster='gbtree', colsample_bylevel=1,\n colsample_bytree=1, gamma=0, learning_rate=0.15,\n max_delta_step=0, max_depth=3, min_child_weight=1, missing=None,\n n_estimators=700, n_jobs=3, nthread=1, objective='multi:softmax',\n random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1,\n seed=None, subsample=1)\n#xgb2.fit(x2,y2)\nregressor2.fit(x2,y2)\n\n# In[228]:\n\npred2 = regressor2.predict(x2)\n#pred2 = xgb2.predict(x2)\nlen(pred2[pred2 == 2])\n\n\n# In[229]:\n\n\ntest = pd.read_csv('train_upd.csv')\n\n\n# In[230]:\n\n\nle = preprocessing.LabelEncoder()\nmvar47 = le.fit_transform(test['ran_vendor'].astype(str))\ntest[\"ran_vendor\"] = mvar47\n\n\n# In[231]:\n\n\ntest['total_bytes'] = 0\nbyte_cols = ['web_browsing_total_bytes',\n 'video_total_bytes',\n 'social_ntwrking_bytes',\n 'cloud_computing_total_bytes',\n 'web_security_total_bytes',\n 'gaming_total_bytes',\n 'health_total_bytes',\n 'communication_total_bytes',\n 'file_sharing_total_bytes',\n 'remote_access_total_bytes',\n 'photo_sharing_total_bytes',\n 'software_dwnld_total_bytes',\n 'marketplace_total_bytes',\n 'storage_services_total_bytes',\n 'audio_total_bytes',\n 'location_services_total_bytes',\n 'presence_total_bytes',\n 'advertisement_total_bytes',\n 'system_total_bytes',\n 'voip_total_bytes',\n 'speedtest_total_bytes',\n 'email_total_bytes',\n 'weather_total_bytes',\n 'media_total_bytes',\n 'mms_total_bytes',\n 'others_total_bytes']\ntest['total_bytes_mod'] = 0\nfor i in byte_cols:\n test['total_bytes'] += test[i]\ntest.total_bytes_mod = test.total_bytes/1024 #for GB/s\ntest.total_bytes_mod = (test.total_bytes_mod/(test.par_min*60))\n\n\n# In[240]:\n\n\ntest['cell_name'][1:10]\n\n\n# In[241]:\n\n\n\n\n\n# In[232]:\n\n\nimport datetime\ntemp =[]\nfor i in range(len(test)):\n temp.append(datetime.datetime(test['par_year'][i],test['par_month'][i],test['par_day'][i]).weekday())\ntest[\"week_day\"] = temp\n\n\n# In[233]:\n\n\ntest_x=test.drop(['cell_name','par_year','par_month','par_day','Congestion_Type'],axis=1)\n\n\n# In[328]:\nle = preprocessing.LabelEncoder()\nmvar47 = le.fit_transform(test_x['Congestion_Type'].astype(str))\ntest_x['Congestion_Type'] = mvar47\npred1_test = regressor.predict(test_x)\n#pred1_test = xgb1.predict(test_x)\n\n\n# In[329]:\n\n\nlen(pred1_test[pred1_test == 1])\n\n\n# In[330]:\n\n\nl = []\nfor i in range(len(test_x)):\n if pred1_test[i] == 1:\n l.append(test['cell_name'][i])\n\n\n# In[331]:\n\n\ndf = pd.DataFrame(l)\n\n\n# In[332]:\n\n\ndf = df.rename({0 : 'cell_name'},axis = 1)\n\n\n# In[333]:\n\n\ndf['Predictions'] = 1\n\n\n# In[334]:\n\n\ndf.head()\n\n\n# In[335]:\n\n\ndfnew = pd.merge(test,df)\n\n\n# In[336]:\n\n\ndfnew.shape\n\n\n# In[337]:\n\n\n\n\n# In[338]:\n\n\ntest.head()\n\n\n# In[339]:\n\n\nl2 = []\nfor i in range(len(test_x)):\n if pred1_test[i] == 0:\n l2.append(test['cell_name'][i])\n\n\n# In[ ]:\n\n\n\n\n\n# In[268]:\n\n\nlen(l2)\n\n\n# In[340]:\n\n\nnew_test = test[pred1_test == 0]\n\n\n# In[341]:\n\n\nnew_test.head()\n\n\n# In[342]:\n\n\nnewnew_test = new_test.drop(['cell_name','par_year','par_month','par_day'],axis=1)\n\n\n# In[343]:\n\npred2_test = regressor2.predict(newnew_test)\n#pred2_test = xgb2.predict(newnew_test)\n\n\n# In[344]:\n\n\npred2_test.shape\n\n\n# In[345]:\n\nnew_test['Predictions'] = 0\nnew_test['Predictions'][pred2_test == 0] = 0\nnew_test['Predictions'][pred2_test == 1] = 2\nnew_test['Predictions'][pred2_test == 2] = 3\n\n\n# In[346]:\n\n\nnew_test.head()\n\n\n# In[347]:\n\n\n\ndfnew['Predictions'] = 3\n\n\n# In[348]:\n\n\ndfnew.head()\n\n\n# In[349]:\n\n\nnew_test['Predictions'][new_test['Predictions'] == 2] = 1\nnew_test['Predictions'][new_test['Predictions'] == 3] = 2\n\n\n# In[350]:\n\n\nnew_test.head()\n\n\n# In[351]:\n\n\niopp = pd.concat([dfnew,new_test])\n\n\n# In[352]:\n\n\niopp.shape\n\n\n# In[353]:\n\n\ny_test = pd.read_csv('/home/kartik/Hall_data_2019/y_test.csv')\n\n\n# In[354]:\n\n\niopp = iopp.sort_values(by = 'cell_name')\n\n\n# In[355]:\n\n\ny_test = y_test.sort_values(by = 'cell_name')\n\n\n# In[358]:\n\n\niopp.head()\n\n\n# In[360]:\n\n\ny_test.head()\n\n\n# In[361]:\n\n\nzz = y_test['Congestion_Type']\n\n\n# In[ ]:\n\n\n\n\n\n# In[302]:\n\n\nzz\n\n\n# In[362]:\n\n\nle = preprocessing.LabelEncoder()\nmvar47 = le.fit_transform(zz.astype(str))\nzz = mvar47\n\n\n# In[363]:\n\n\nzz\n\n\n# In[364]:\n\n\nzz[0:20]\n\n\n# In[365]:\n\n\nmatthews_corrcoef(zz,iopp['Predictions'])\n\n\n# In[ ]:\n","sub_path":"svm_2step.py","file_name":"svm_2step.py","file_ext":"py","file_size_in_byte":8121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"102345709","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on 2021-02-2021/2/2\n@author: xdong\n@site:\n@email: 12919662@qq.com\n@file: 24.QDialog-打开文件对话框的应用\n@description: $描述$\n\"\"\"\nimport sys\n\nfrom PyQt5.Qt import *\n\n\nclass Window(QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle('QDialog-打开文件对话框')\n self.resize(500, 500)\n btn = QPushButton('Test', self)\n btn.clicked.connect(self.test)\n self.setup_ui()\n\n def setup_ui(self):\n # 用静态方法建立文件对话框\n # 打开一个文件\n # result = QFileDialog.getOpenFileName(\n # self,\n # \"选择一个文件\",\n # \"./\",\n # \"All(*.*);;Image(*.png);;Python文件(*.py)\",\n # \"Python文件(*.py)\")\n\n # 打开多个文件\n # result = QFileDialog.getOpenFileNames(\n # self,\n # \"选择一个py文件\",\n # \"./\",\n # \"All(*.*);;Images(*.png *.jpg);;Python文件(*.py)\",\n # \"Python文件(*.py)\")\n\n # # 打开1个URL文件地址,需要提前构造QUrl对象\n # url = QUrl('./')\n # result = QFileDialog.getOpenFileUrl(\n # self,\n # \"选择一个py文件\",\n # url,\n # \"All(*.*);;Images(*.png *.jpg);;Python文件(*.py)\",\n # \"Python文件(*.py)\")\n\n # 打开多个URL文件地址,需要提前构造QUrl对象\n # result = QFileDialog.getOpenFileUrls(\n # self,\n # \"选择一个py文件\",\n # QUrl('./'),\n # \"All(*.*);;Images(*.png *.jpg);;Python文件(*.py)\",\n # \"Python文件(*.py)\")\n\n # 保存一个文件\n # result = QFileDialog.getSaveFileName(\n # self,\n # \"选择一个文件\",\n # \"./\",\n # \"All(*.*);;Image(*.png);;Python文件(*.py)\",\n # \"Python文件(*.py)\")\n\n # 获取文件夹, 返回结果不再是一个元祖,而是一个字符串\n # result = QFileDialog.getExistingDirectory(\n # self,\n # \"选择一个文件\",\n # \"./\"\n # )\n # 获取文件夹URL, 返回一个QUrl对象\n # result = QFileDialog.getExistingDirectoryUrl(\n # self,\n # \"选择一个文件\",\n # QUrl(\"./\")\n # )\n pass\n\n def test(self):\n fd = QFileDialog(\n self,\n '选择一个文件',\n './',\n \"All(*.*);;Images(*.png *.jpg);;Python文件(*.py)\"\n )\n\n # 利用内建信号获得文件string\n fd.fileSelected.connect(lambda file:print(file))\n\n # 设置为保存文件\n # fd.setAcceptMode(QFileDialog.AcceptSave)\n # 设置默认后缀\n # fd.setDefaultSuffix(\"txt\")\n # 选择文件夹\n # fd.setFileMode(QFileDialog.Directory)\n # 自定义按钮Label\n fd.setLabelText(QFileDialog.FileName, \"顺哥的文件\") #MAC无效\n fd.setLabelText(QFileDialog.Accept, \"顺哥的接受\")\n fd.setLabelText(QFileDialog.Reject, \"顺哥的拒绝\")#MAC无效\n fd.open()\n print('xxx')\n\nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n window = Window()\n window.show()\n sys.exit(app.exec_())\n","sub_path":"my_notes/24.QDialog-打开文件对话框的应用.py","file_name":"24.QDialog-打开文件对话框的应用.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"75862961","text":"\"\"\"byte - file executor win32 lock module.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom byte.executors.file.lock.base import BaseFileLock, FileLockError\n\nimport pywintypes\nimport win32con\nimport win32file\n\n\nclass BaseWin32FileLock(BaseFileLock):\n \"\"\"Base win32 lock class.\"\"\"\n\n def __init__(self, fp):\n \"\"\"Create win32 lock.\n\n :param fp: File\n :type fp: file or io.IOBase\n \"\"\"\n super(BaseWin32FileLock, self).__init__(fp)\n\n # Retrieve win32 file handle\n self.handle = win32file._get_osfhandle(self.fp.fileno())\n\n def release(self):\n \"\"\"Release lock.\"\"\"\n # Unlock file\n try:\n win32file.UnlockFileEx(self.handle, 0, 0x7fff0000, pywintypes.OVERLAPPED())\n except Exception as ex:\n raise FileLockError(ex)\n\n\nclass Win32ExclusiveFileLock(BaseWin32FileLock):\n \"\"\"Exclusive win32 lock class.\"\"\"\n\n def acquire(self, blocking=None):\n \"\"\"Acquire lock.\n\n :param blocking: Block until the lock has been acquired\n :type blocking: bool\n \"\"\"\n if blocking is None:\n blocking = self.blocking\n\n # Determine lock mode\n mode = win32con.LOCKFILE_EXCLUSIVE_LOCK\n\n if not blocking:\n mode += win32con.LOCKFILE_FAIL_IMMEDIATELY\n\n # Lock file\n try:\n win32file.LockFileEx(self.handle, mode, 0, 0x7fff0000, pywintypes.OVERLAPPED())\n except Exception as ex:\n raise FileLockError(ex)\n\n\nclass Win32SharedFileLock(BaseWin32FileLock):\n \"\"\"Shared win32 lock class.\"\"\"\n\n def acquire(self, blocking=None):\n \"\"\"Acquire lock.\n\n :param blocking: Block until the lock has been acquired\n :type blocking: bool\n \"\"\"\n if blocking is None:\n blocking = self.blocking\n\n # Determine lock mode\n mode = 0\n\n if not blocking:\n mode += win32con.LOCKFILE_FAIL_IMMEDIATELY\n\n # Lock file\n try:\n win32file.LockFileEx(self.handle, mode, 0, 0x7fff0000, pywintypes.OVERLAPPED())\n except Exception as ex:\n raise FileLockError(ex)\n","sub_path":"byte/executors/file/lock/win32.py","file_name":"win32.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"168937424","text":"import configparser\nimport math\nimport os\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\nfile = '20170518_123358'\nos.chdir('data')\nos.chdir(file)\n\nconfig = configparser.ConfigParser()\nconfig.read(file + '.txt')\n\ntest_patter = int(config['simulation']['normal'])\ntest_event = int(config['simulation']['event'])\ntest_D_NG = int(config['simulation']['D_NG'])\ntest_nedic = int(config['simulation']['nedic'])\ntest = (test_patter, test_event, test_D_NG, test_nedic)\n\nf_value = pd.read_csv(file + 'f_value' + '.csv')\n\nth_pa = [0, 0, 0, 1, 1, 1, 2, 2, 2,3,3,3]\nstepsize = []\nfor i in range(3):\n stepsize.append(float(config['stepsize']['stepsize' + str(int(i + 1))]))\n\nave_f = [[[] for i in range(j)] for j in test]\n\ntmp2 = 0\nfor i2 in range(len(test)):\n for i1 in range(test[i2]):\n tmp2 += 1\n ave_f[i2][i1] = f_value.iloc[:, tmp2]\n\ncolor = ['b', 'r', 'y']\ncolor =['black','r','b','g','m','c','y']\nline = ['-', '--', '-.', ':']\nstep_index = ['$s(k) = 0.5/(k+1)$', '$s(k) = 1.0/(k+1)$', '$s(k) = 2.0/(k+1)$']\nstep_index2 = [r'$s_{1}(k)$', r'$s_{2}(k)$', r'$s_{3}(k)$']\n# trigger_index = ['$E_i(k) = 0, for all i', '$E_i(k) = 0, for all i', '$E_i(k) = 0, for all i',\n# '$E_i(k) = 10/(k+1), for all i', '$E_i(k) = 10/(k+1), for all i', '$E_i(k) = 10/(k+1), for all i',\n# '$E_i(k) = 10/(k+1)^2, for all i', '$E_i(k) = 10/(k+1)^2, for all i',\n# '$E_i(k) = 10/(k+1)^2, for all i',\n# '$E_i(k) = 10*0.99^k, for all i', '$E_i(k) = 10*0.99^k, for all i',\n# '$E_i(k) = 10*0.99^k, for all i', ]\ngraph_name_index = ['$E(k) = 0$', '$E(k)=1.0/(k+1)$', '$E(k)=10/(k+1)$', '$E(k)=40/(k+1)$',\n '$E(k)=10/(k+1)^2$', '$E(k)=10/(k+1)^{0.75}$', '$E(k)=0.2$']\ntrigger_index2 = [['1'], ['2', '3', '4']]\nfor i2 in range(len(test)):\n for i1 in range(test[i2]):\n if i2 == 0:\n trigger_name = 'time'\n tmp_line = line[i2]\n elif i2 == 1:\n trigger_name = 'event'\n tmp_line = line[int(th_pa[i1] + 1)]\n plt.plot(ave_f[i2][i1],color = color[i1],linewidth = 1,\n label=graph_name_index[i1])\n# plt.legend()\nplt.xlabel('iteration $k$', fontsize=14)\nplt.ylabel('$Σ_{i=1}^{50} (f(x_i(k))-f^*)/ Σ_{i=1}^{50}(f(x_i(0))-f^*)$', fontsize=14)\nplt.tick_params(labelsize=14)\nplt.yscale(\"log\")\nplt.ylim([0.9*10**(-4),1])\n# plt.ylim([10**(-4),10**(-3)])\nplt.legend(fontsize = 16)\nsns.set_style(\"dark\")\nplt.savefig(file + 'f_value' + \".png\")\nplt.show()\n","sub_path":"make_graph_from_csv.py","file_name":"make_graph_from_csv.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"437632691","text":"# Author: Omkar Dixit\n# Email: ond170030@utdallas.edu\n\n# Rod Cutting\n\nimport sys\n\ndef rodCutting(n, prices):\n R = [0] * (n+1)\n for k in range(1, n+1):\n q = 0\n for i in range(k):\n q = max(q, prices[i]+R[k-i-1])\n R[k] = q\n return R[-1]\n\n\nif __name__==\"__main__\":\n if len(sys.argv)==1:\n print(\"Input Not detected\")\n else:\n prices = []\n for price in sys.argv[1:]:\n prices.append(int(price))\n # rate = [1.0, 2.5, 2.6, 2.5, 2.6, 2.8, 2.4, 2.5, 2.6, 2.6]\n print(rodCutting(len(prices), prices))","sub_path":"DynamicProgramming/rodCutting.py","file_name":"rodCutting.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"71417022","text":"\"\"\"\nthis module contains one class: `DataCleaner`, whose sole job is to clean the data_handling source for training purposes.\n\"\"\"\nimport logging\n\nimport pandas as pd\n\nfrom config.constants import RUN_MODE\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass DataCleaner():\n def __init__(self, config):\n self.run_mode = config[\"run_mode\"]\n self.mandatory_cols = config[\"mandatory_cols\"]\n\n def do_clean(self, pdf: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n the data_handling cleaning routine, which is only valid for `train` pipeline.\n\n Because we are not responsible for the messiness of the data_handling provided to us,\n provided that it contains what we need -- which has been validated in the `DataReader` stage.\n\n Args:\n pdf: data_handling source as a pandas dataframe\n\n Returns: cleaned data_handling source as a pandas dataframe\n\n \"\"\"\n logger.info(\"** doing data cleaning\")\n if self.run_mode == RUN_MODE.TRAIN.value:\n pdf = self.keep_only_mandatory_col(pdf)\n pdf = DataCleaner.remove_duplicates(pdf)\n pdf = DataCleaner.deal_with_nan(pdf)\n logger.info(\"** data cleaning finished!\")\n return pdf\n\n def keep_only_mandatory_col(self, pdf: pd.DataFrame):\n \"\"\"\n This might not be necessary, since the same has been performed in the `DataReader`,\n but just for completeness sake in case the code might be used in an unexpected way.\n\n Args:\n pdf: data_handling source as a pandas dataframe\n\n Returns: data_handling source as a pandas dataframe or termination of program..\n \"\"\"\n try:\n return pdf[self.mandatory_cols]\n except KeyError:\n raise KeyError(\"Mandatory columns are not present! no prediction can be made at this point!!\")\n\n @staticmethod\n def remove_duplicates(pdf: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n drop duplicate columns. this function can be expanded into more complicated forms.\n\n Args:\n pdf: data_handling source as a pandas dataframe\n\n Returns: data_handling source as a pandas dataframe\n\n \"\"\"\n return pdf.drop_duplicates()\n\n @staticmethod\n def deal_with_nan(pdf: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n deal with nan in the dataframe -- an already skimmed down dataframe, so `nan` can be quite significant.\n the strategy used here is drop-all, but it can be expanded into sth more sophisticated later.\n\n Args:\n pdf: data_handling source as a pandas dataframe\n\n Returns: data_handling source as a pandas dataframe\n\n \"\"\"\n return pdf.dropna(how='any')\n\n @staticmethod\n def impute_nan(pdf: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n this is one of the strategies to deal with nan, which is yet implemented.\n\n Args:\n pdf: data_handling source as a pandas dataframe\n\n Returns: data_handling source as a pandas dataframe\n\n \"\"\"\n return pdf\n\n\n","sub_path":"text_classifier/data_handling/data_cleaner.py","file_name":"data_cleaner.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"222770392","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\nfrom ipywidgets import HTML\r\n\r\n# Fixed styles to avoid leaflet maps overlap sepal widgets\r\nSTYLES = \"\"\"\r\n\r\n\"\"\"\r\n\r\n_ = display(HTML(STYLES))\r\n\r\nCOMPONENTS = {\r\n\r\n 'PROGRESS_BAR':{\r\n 'color':'indigo',\r\n }\r\n}\r\n\r\nICON_TYPES = {\r\n # Used for folders\r\n '':{ \r\n 'color':'amber',\r\n 'icon':'mdi-folder-outline'\r\n },\r\n '.csv':{\r\n 'color':'green accent-4',\r\n 'icon':'mdi-border-all'\r\n },\r\n '.txt':{\r\n 'color':'green accent-4',\r\n 'icon':'mdi-border-all'\r\n },\r\n '.tif':{\r\n 'color':'deep-purple',\r\n 'icon':'mdi-image-outline'\r\n },\r\n '.tiff':{\r\n 'color':'deep-purple',\r\n 'icon':'mdi-image-outline'\r\n },\r\n '.shp':{\r\n 'color':'deep-purple',\r\n 'icon':'mdi-vector-polyline'\r\n },\r\n 'DEFAULT':{\r\n 'color':'light-blue',\r\n 'icon':'mdi-file-outline'\r\n },\r\n # Icon for parent folder\r\n 'PARENT':{ \r\n 'color':'black',\r\n 'icon':'mdi-folder-upload-outline'\r\n },\r\n\r\n}\r\n","sub_path":"sepal_ui/styles/styles.py","file_name":"styles.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"131947912","text":"import nltk\nimport string\nimport os\nimport pickle\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom nltk.stem.porter import PorterStemmer\nfrom sklearn.metrics.pairwise import linear_kernel\n\nstemmer = PorterStemmer()\ntoken_dict = {}\n\n\ndef preprocess_input(input_sentence):\n processed_sentence = input_sentence.lower()\n processed_sentence = processed_sentence.translate(None, string.punctuation)\n processed_sentence = tokenize(processed_sentence)\n return processed_sentence\n\n\ndef stem_tokens(tokens, stemmer):\n stemmed = []\n for item in tokens:\n stemmed.append(stemmer.stem(item))\n\n return stemmed\n\n\ndef tokenize(text):\n tokens = nltk.word_tokenize(text)\n stems = stem_tokens(tokens, stemmer)\n return stems\n\n\nclass ContextRecognition:\n tf_idf = None\n tf_idf_matrix = None\n model_loaded = False\n save_file = 'current_models/tfidf.pkl'\n load_file = 'current_models/tfidf.pkl'\n\n def __init__(self):\n self.tf_idf = TfidfVectorizer(tokenizer=tokenize, stop_words='english')\n\n def load_model(self):\n self.tf_idf = pickle.load(open(self.load_file, 'rb'))\n self.tf_idf_matrix = pickle.load(open('current_models/tfidf_matrix.pkl', 'rb'))\n self.model_loaded = True\n\n def save_model(self):\n with open(self.save_file, 'wb') as handle:\n pickle.dump(self.tf_idf, handle)\n with open('current_models/tfidf_matrix.pkl', 'wb') as handle:\n pickle.dump(self.tf_idf_matrix, handle)\n\n def load_corpus(self, path_to_corpus):\n path = path_to_corpus\n sentence_index = 0\n for subdir, dirs, files in os.walk(path):\n for file in files:\n file_path = subdir + os.path.sep + file\n #Check that the file is not empty\n if os.path.getsize(file_path) > 0:\n document = open(file_path, 'r')\n text = document.read()\n lowers = text.lower()\n no_punctuation = lowers.translate(None, string.punctuation)\n sentences = no_punctuation.splitlines()\n for sentence in sentences:\n token_dict[sentence_index] = sentence\n sentence_index += 1\n\n return\n\n def compute_document_similarity(self, input_sentence):\n best_match = 'No response is available to your statement.'\n correlation = 0\n\n if not self.model_loaded:\n self.tf_idf_matrix = self.tf_idf.fit_transform(token_dict.values())\n\n input_sentence_converted = self.tf_idf.transform([input_sentence])\n cosine_similarities = linear_kernel(input_sentence_converted, self.tf_idf_matrix).flatten()\n related_docs_indices = cosine_similarities.argsort()[:-5:-1]\n related_doc_scores = cosine_similarities[related_docs_indices]\n\n if related_doc_scores[0] > 0:\n best_match = token_dict[related_docs_indices[0]]\n correlation = related_doc_scores[0]\n\n return best_match, correlation","sub_path":"Chatbot-Emotion-Recognition/context_recognition.py","file_name":"context_recognition.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"192126440","text":"#karatsuba multiplication\nx = input(\"x: \")\ny = input(\"y: \")\n\ndef karatsuba(x, y):\n\tl1 = len(x)\n\tl2 = len(y)\n\tl = max(l1, l2)\n\n\t#padding 0s to make two numbers have same digits\n\tif l1 != l2:\n\t\tx = x.rjust(l, '0')\n\t\ty = y.rjust(l, '0')\n\n\tif (l1 == 1) or (l2 == 1) or int(x) == 0 or int(y) == 0:\n\t\treturn int(x) * int(y)\n\n\ta = x[: l // 2]\n\tb = x[l // 2 :]\n\tc = y[: l // 2]\n\td = y[l // 2 :]\n\n\t#count how many digits to shift\n\tn = l - l // 2\n\n\treturn (10 ** (2*n) - 10 ** n) * karatsuba(a, c) + (10 ** n) * karatsuba(str(int(a)+int(b)), str(int(c)+int(d))) + (1- 10 ** n) * karatsuba(b, d)\n\nprint(x, \"*\", y, \"=\")\nprint(karatsuba(x, y))","sub_path":"Karatsuba algorithm.py","file_name":"Karatsuba algorithm.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"417924723","text":"\"\"\"Template config validator.\"\"\"\nimport logging\n\nimport voluptuous as vol\n\nfrom homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN\nfrom homeassistant.config import async_log_exception, config_without_domain\nfrom homeassistant.const import CONF_SENSORS, CONF_UNIQUE_ID\nfrom homeassistant.helpers import config_validation as cv\nfrom homeassistant.helpers.trigger import async_validate_trigger_config\n\nfrom . import sensor as sensor_platform\nfrom .const import CONF_TRIGGER, DOMAIN\n\nCONFIG_SECTION_SCHEMA = vol.Schema(\n {\n vol.Optional(CONF_UNIQUE_ID): cv.string,\n vol.Optional(CONF_TRIGGER): cv.TRIGGER_SCHEMA,\n vol.Optional(SENSOR_DOMAIN): vol.All(\n cv.ensure_list, [sensor_platform.SENSOR_SCHEMA]\n ),\n vol.Optional(CONF_SENSORS): cv.schema_with_slug_keys(\n sensor_platform.LEGACY_SENSOR_SCHEMA\n ),\n }\n)\n\n\nasync def async_validate_config(hass, config):\n \"\"\"Validate config.\"\"\"\n if DOMAIN not in config:\n return config\n\n config_sections = []\n\n for cfg in cv.ensure_list(config[DOMAIN]):\n try:\n cfg = CONFIG_SECTION_SCHEMA(cfg)\n\n if CONF_TRIGGER in cfg:\n cfg[CONF_TRIGGER] = await async_validate_trigger_config(\n hass, cfg[CONF_TRIGGER]\n )\n except vol.Invalid as err:\n async_log_exception(err, DOMAIN, cfg, hass)\n continue\n\n if CONF_SENSORS in cfg:\n logging.getLogger(__name__).warning(\n \"The entity definition format under template: differs from the platform \"\n \"configuration format. See \"\n \"https://www.home-assistant.io/integrations/template#configuration-for-trigger-based-template-sensors\"\n )\n sensors = list(cfg[SENSOR_DOMAIN]) if SENSOR_DOMAIN in cfg else []\n sensors.extend(\n sensor_platform.rewrite_legacy_to_modern_conf(cfg[CONF_SENSORS])\n )\n cfg = {**cfg, \"sensor\": sensors}\n\n config_sections.append(cfg)\n\n # Create a copy of the configuration with all config for current\n # component removed and add validated config back in.\n config = config_without_domain(config, DOMAIN)\n config[DOMAIN] = config_sections\n\n return config\n","sub_path":"homeassistant/components/template/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"266629476","text":"# First in terminal: 'pip3 install Flask'\n\n\nimport os\nimport json # now we want Python to import the data. To do that, we first need to import the JSON library, because we're going to be passing the data that's coming in as JSON.\nfrom flask import Flask, render_template, request, flash\n # we're importing our Flask class.\n # we're importing the render_template() function from Flask\n # Request is going to handle things like finding out what method we used, and it will also contain our form object when we've posted it.\n # 'flashed messages' in Flask\nif os.path.exists(\"env.py\"):\n import env # Once we save that, a new directory called 'pycache' is created.\n\n\napp = Flask(__name__)\n\"\"\"\nWe're then creating an instance of this and storing it in a variable called 'app'.\nThe first argument of the Flask class, is the name of the application's module - our package.\nSince we're just using a single module, we can use __name__ which is a built-in Python variable.\nFlask needs this so that it knows where to look for templates and static files.\n\"\"\"\napp.secret_key = os.environ.get(\"SECRET_KEY\")\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\"\"\"\nWe're then using the app.route decorator.\nIn Python, a decorator starts with the @ symbol, which is also called pie-notation.\nEffectively, a decorator is a way of wrapping functions.\n\nWhen we try to browse to the root directory, as indicated by the \"/\", \nthen Flask triggers the index function underneath and returns the \"Hello, World\" text.\n\nInstead of returning text, we return render_template(\"index.html\"). \nFlask expects it to be a directory called templates with an 's',\nwhich should be at the same level as our run.py file.\n\nThe root decorator binds the index() function to itself, \nso that whenever that root is called, the function is called.\nThis function is also called a 'view'.\n\"\"\"\n\n\n@app.route(\"/about\")\ndef about():\n data = [] # We will initialize an empty array or list called 'data'.\n with open(\"data/company.json\", \"r\") as json_data:\n data = json.load(json_data)\n return render_template(\"about.html\", page_title=\"About\", company=data)\n\"\"\"\nWe added in an additional argument.\nI will just call that argument 'page_title'.\nYou can call this anything you want, it's not specific to the framework.\nIt's just a variable name that I've made up, but could've been called almost anything else,\nexcept for one of the pre-defined Python variables.\n\nTo use this new variable, let's go to the about.html file, and remove the text between\nthe

tags at the top.\nI will replace that text with: {{ page_title }}\nRemember, double curly brackets is an expression that's going to display something on the page.\n\nWe need to have Python open the JSON file in order to read it.\nThis is called a 'with' block.\nwith open(\"data/company.json\", \"r\") as json_data: Python is opening the JSON file as \"read-only\",\nand assigning the contents of the file to a new variable we've created called json_data.\nWe need to set our empty 'data' list to equal the parsed JSON data that we've sent through.\ndata = json.load(json_data)\n\nFinally, I will pass that list into my return statement, and call it 'company'.\ncompany=data This is assigning a new variable called 'company'\nthat will be sent through to the HTML template, which is equal to the list of data it's loading\nfrom the JSON file.\n\"\"\"\n\n\n@app.route(\"/about/\")\ndef about_member(member_name):\n member = {}\n with open(\"data/company.json\", \"r\") as json_data:\n data = json.load(json_data)\n for obj in data:\n if obj[\"url\"] == member_name:\n member = obj\n return render_template(\"member.html\", member=member)\n\"\"\"\nThe first argument is going to be our new \"member.html\" template that we just created.\nThe second argument will be \"member=member\".\nThis first 'member' is the variable name being passed through into our html file.\nThe second 'member' is the member object we created above on line 24.\n\"\"\"\n\n\n@app.route(\"/contact\", methods=[\"GET\", \"PORT\"])\ndef contact():\n if request.method == \"PORT\":\n flash(\"Thanks {}, we have received your message!\".format(\n request.form.get(\"name\")))\n return render_template(\"contact.html\", page_title=\"Contact\")\n\n\n@app.route(\"/careers\")\ndef careers():\n return render_template(\"careers.html\", page_title=\"Careers\")\n\n\nif __name__ == \"__main__\": # The word 'main' wrapped in double-underscores (__main__) is the name of the default module in Python.\n \"\"\"\n If name is equal to \"main\" (both wrapped in double underscores), then we're going to run our app with the following arguments.\n\n The 'host' will be set to os.environ.get(\"IP\"), and I will set a default of \"0.0.0.0\".\n\n We're using the os module from the standard library to get the 'IP' environment variable if it exists, \n but set a default value if it's not found.\n\n It will be the same with 'PORT', but this time, we're casting it as an integer, and I will set that default to \"5000\", \n which is a common port used by Flask.\n\n We also need to specify \"debug=True\", because that will allow us to debug our code much easier during the development stage.\n \"\"\"\n app.run(\n host=os.environ.get(\"IP\", \"0.0.0.0\"),\n port=int(os.environ.get(\"PORT\", \"5000\")),\n debug=True # You should only have debug=True while testing your application in development mode, but change it to debug=False before you submit your project.\n )\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"115046869","text":"import asyncio\nimport time\n\nimport httpretty\nimport json\nimport re\n\nfrom bs4 import BeautifulSoup, Tag\nimport os\nfrom pyppeteer import launch, browser\nfrom pyppeteer.page import Page\nfrom unittest import mock\n\nimport poster, extractor\nimport Credentials as c\n# from test import test_methods\n\n# loads mock post data from provided json file.\nwith open('mock_data2.json', 'r') as f:\n data = json.load(f)\nre_question = re.compile('https://api.stackexchange.com/2.2/questions/.*')\n\n\nasync def test_question_extractor():\n \"\"\"Mock data method for question_Extractor().\n\n :param m: Mock adapter for intercepting calls from requests module\n :return: a list of posts\n \"\"\"\n # intercepts all GET calls to addresses matching regex and sends a\n # response with our mock data\n # m.get(re_question, text=json.dumps({'hello' : 'world'}))\n httpretty.enable()\n httpretty.register_uri(httpretty.GET, re_question, body=json.dumps(data))\n result = await extractor.question_Extractor()\n httpretty.disable()\n return result\n\n\nasync def localize_page(page: Page, answered_link: str) -> str:\n \"\"\"Generates a mock local copy of a page for testing.\n\n answered_link gives the page to navigate to upon form submission. This is\n necessary to enumate the functionality of our bot without directly\n impacting the StackOverflow webpage itself.\n\n :param page: the page to localize\n :param answered_link: link to the answered version of the question\n :return: absolute path to localized webpage html file\n \"\"\"\n html = await page.content()\n soup = BeautifulSoup(html)\n answers = soup.find('div', {'id' : 'answers'})\n form = answers.find('form', {'id' : 'post-form'})\n form['action'] = \"file://{}\".format(answered_link)\n\n with open('test/mock_post_unanswered.html', 'w') as f:\n f.write(str(soup))\n\n return os.path.abspath('test/mock_post_unanswered.html')\n\n\nasync def _mock_post(page: Page, bot_post: str) -> str:\n \"\"\"Creates a mock SO page with a dummy answer.\n\n :param page: the pyppeteer.page.Page object to post an answer on\n :param content: dict containing question information for answer formatting\n :return:\n \"\"\"\n\n # Gets page's html in string format and parses it with BeautifulSoup\n html = await page.content()\n soup = BeautifulSoup(html)\n\n # Locates answer section on question page (div tag with id set to answers)\n answers = soup.find(\"div\", {\"id\": \"answers\"})\n # Since we only target unanswered posts, remove attr set to 'no-answers'\n del answers['class']\n # Answer posts begin after 'answer-header', so we insert nodes after it\n ans_head = answers.find('div', {'id': 'answers-header'})\n # New tag declaring answer id is created and inserted after answer-head\n id_tag = Tag(builder=soup.builder,\n name='a',\n attrs={'name': '000'})\n ans_head.insert_after(id_tag)\n\n # parse html template for a posted suggested answer to a question\n with open('test/mock_post_template.html', 'r') as fp:\n test_tree = BeautifulSoup(fp, 'html.parser')\n\n # modify template to match our mock answer post\n test_root = test_tree.find('div', {'class': 'answer'})\n # set answer ids and find post body\n test_root['id'] = 'answer-000'\n test_root['data-answerid'] = 000\n post = test_tree.find('div', {'class': \"post-text\", 'itemprop': 'text'})\n # Insert dummy_answer into post body, then put after matching answer id tag\n post.string = bot_post\n id_tag.insert_after(test_tree)\n\n # write mock post to file for examination\n with open('test/mock_post_answered.html', 'w') as f:\n f.write(str(soup))\n\n return os.path.abspath('test/mock_post_answered.html')\n\n\nasync def mock_post(page: Page, content: dict) -> Page:\n \"\"\"Creates a mock SO page with a dummy answer.\n\n :param page: the pyppeteer.page.Page object to post an answer on\n :param content: dict containing question information for answer formatting\n :return:\n \"\"\"\n posting_answer = ''\n example_flag = await poster.containsExample(content)\n if example_flag:\n posting_answer = await poster.getExample(content)\n else:\n posting_answer = await poster.getQuery(content)\n\n posting_answer = \"Hello \" + str(content['qposter']).split(' ')[\n 0] + ',\\n\\n' + posting_answer\n\n # TODO set form redirect to answered_page on submission\n answered_page = await _mock_post(page, posting_answer)\n local_mock = await localize_page(page, answered_page)\n await page.goto(\"file://{}\".format(local_mock))\n\n await page.type('textarea[id=wmd-input]', posting_answer, {'delay': 15})\n print(posting_answer)\n await page.click('button[id=submit-button]')\n await page.goto(\"file://{}\".format(answered_page))\n\n return page\n\n\n@mock.patch('poster.answer', new=lambda x,y: mock_post(x,y))\ndef test_run(q):\n \"\"\"Calls _run() with post_answer() method mocked with mock_post()\n\n @mock.patch() patches calls to post_answer() in test.test_methods with a\n call to mock_post() when run through the current mocking.py file.\n\n :param q: question steam from question_Extractor\n :return:\n \"\"\"\n asyncio.get_event_loop().run_until_complete(poster.run(q))\n\n\nasync def test_browser(url):\n \"\"\"Method to test mock_post() via a simulated browser.\n\n :param url: the question url to post an answer on\n :return: html string\n \"\"\"\n browser = await launch(\n {\"headless\": False, \"args\": [\"--no-sandbox\", \"--disable-web-security\"]})\n page = await browser.newPage()\n await page.goto(url)\n html = await mock_post(page, {'ques' : 'SilverStripe PHP'}).content()\n await browser.close()\n return html\n\n\n# def mock_data():\n# url = 'https://api.stackexchange.com/2.2/questions/no-answers?order=asc&sort=creation&tagged=git&filter=!9Z(-wwYGT&&site=stackoverflow&team=stackoverflow.com/c/ncsu&key=' + \\\n# c.secret['key']\n# headers = {\n# 'X-API-Access-Token': c.secret['AccessToken'],\n# 'Accept-Charset': 'UTF-8'\n# }\n#\n# res = requests.get(url, headers=headers)\n# data = res.json()\n# with open('mock_data2.json', 'w') as f:\n# f.write(json.dumps(data))\n\n\nif __name__ == '__main__':\n os.chdir('..')\n resp = asyncio.get_event_loop().run_until_complete(\n test_question_extractor())\n print(resp)\n for q in resp:\n test_run(q)\n time.sleep(10)\n # html = asyncio.get_event_loop().run_until_complete(test_browser(\n # 'https://stackoverflow.com/c/ncsu/questions/852'\n # ))\n","sub_path":"test/mocking.py","file_name":"mocking.py","file_ext":"py","file_size_in_byte":6567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"209232431","text":"import streamlit as st\nimport yfinance as yf\nimport pandas as pd\nimport numpy as np\nfrom bs4 import BeautifulSoup\nimport selenium\nimport requests\nimport math\nimport time\nimport base64\nfrom pathlib import Path\n\n# Selenium Imports\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.keys import Keys\n\n\n# Selenium initialization\ndef initializeSelenium(path):\n chrome_options = Options()\n chrome_options.add_argument(\"window-size=1200x600\")\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--headless')\n driver = Chrome(executable_path=str(path) + \"/chromedriver\",\n options=chrome_options)\n return driver\n\n\n# Helper function to accessSite\ndef accessSite(driver, link):\n driver.get(link)\n\n\ndef getTable():\n # Obtain the number of rows in body\n rows = 1 + len(driver.find_elements_by_xpath(\n \"/html/body/section/div[1]/div[1]/div[1]/div[1]/div[1]/div[2]/div[3]/table/tbody/tr\"))\n\n # Obtain the number of columns in table\n cols = len(driver.find_elements_by_xpath(\n \"/html/body/section/div[1]/div[1]/div[1]/div[1]/div[1]/div[2]/div[3]/table/tbody/tr[1]/td\"))\n table = []\n for r in range(2, rows + 1):\n row = []\n for p in range(1, cols + 1):\n # obtaining the text from each column of the table\n try:\n value = driver.find_element_by_xpath(\n \"/html/body/section/div[1]/div[1]/div[1]/div[1]/div[1]/div[2]/div[3]/table/tbody/tr[\" + str(\n r) + \"]/td[\" + str(p) + \"]\").text\n row.append(value)\n except NoSuchElementException:\n break\n table.append(row)\n table = [val[1:] for val in table]\n years = ['(2012)', '(2017)', '(2018)', '(2019)', '(2020)', '(2021)']\n table = [val for val in table if len(val) > 0]\n for val in table:\n number = val[-1]\n for year in years:\n if year in number:\n number = number.replace(' ' + year, '')\n if ' +' in number:\n number = number.replace(' +', '')\n if '+' in number:\n number = number.replace('+', '')\n if ',' in number:\n number = number.replace(',', '')\n if '-' in number:\n val[-1] = [int(i) for i in number.split(\"-\")][-1]\n else:\n val[-1] = int(number)\n return sorted(table, key=lambda x: x[2], reverse=True)\nst.write(\"\"\"\n# Web Scraper App\n\nWelcome to the web scraping app!\n\n\"\"\")\n\ndirectory_option = st.selectbox(\n 'Scrape the following points of data:',\n ('Top OPT Companies', 'Other')\n)\n\n# Upon \"GO\" button submission\ndirectory_button = st.button(\"GO\")\nif directory_button:\n # Checks if it says \"Other\"\n if directory_option == 'Other':\n st.error(\"Invalid directory, please re-enter a correct value\")\n else:\n with st.spinner(\"Scraping Top OPT Listings\"):\n path = Path(__file__).parent\n driver = initializeSelenium(path)\n driver.implicitly_wait(3)\n link = \"https://unitedopt.com/Home/blogdetail/top-companies-offering-opt-jobs-to-international-students-in-2021\"\n accessSite(driver, link)\n table = getTable()\n df_table = pd.DataFrame(table, columns=['Company Name', 'Sector', 'Employee Number'])\n st.table(df_table)\n\n st.success(\"Scraping complete!\")\n company_option = st.selectbox(\n 'Search relevant information on company:',\n set(df_table['Company Name'][:5])\n )\n company_button = st.button(\"GO\")\n if company_button:\n # TODO: MASSIVE ENDEAVOR OF ADDING IN ALL OF THESE COMPANIES\n if company_option == 'Amazon':\n st.text = \"This company fucks\"\n else:\n st.error(\"Information relevant to this company is currently unavailable. It will be in due time but in the meantime, please choose another!\")\n\n #TODO: Scrap company specific site for mission statement, perks, payments etc (keyvalue, lever, the company site etc)\n\n #\n # with st.spinner(\"Scraping directory\"):\n # scrap_prog = st.progress(0)\n #\n # scrap_prog.progress(100)\n #\n # st.success(\"Directory has been scraped!\")\n st.balloons()\n\n # df_text = pd.DataFrame(scrap_text)\n # #Workaround to Streamlit file exports\n # csv = df_text.to_csv(index=False)\n # b64 = base64.b64encode(csv.encode()).decode()\n # filename = f\"{directory_option}_Scraped_Text\"\n # href = f'

Click here to download the scraped directory: Here

'\n # st.markdown(href, unsafe_allow_html=True)\n #\n # st.write(\"\"\"\n # ## Here is a sample of the scraped text:\n # \"\"\")\n # text_slot = st.empty()\n # text_slot.write(f\"{scrap_text[0]}\")\n","sub_path":"my_app.py","file_name":"my_app.py","file_ext":"py","file_size_in_byte":5341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"496146294","text":"from Common.CreateDistanceMatrix import load_data_from_file, create_distance_matrix\r\nfrom Common.Visualize import plot_results\r\nfrom Common.Helpers import get_total_distance, get_random_cycles\r\nfrom Common.TestBenchTools import get_alg_results_struct, Alg_results,print_best\r\nfrom .ILS1 import ils1_method\r\nfrom .ILS2 import ils2_method\r\nfrom .MSLS import ls_multi_start\r\n\r\nimport numpy as np\r\nimport time as t\r\n\r\nNO_ITERATIONS=2\r\nNO_STARTS_MSLS=2\r\ndef get_results(instance_filename):\r\n algs=[ls_multi_start,ils1_method,ils2_method]\r\n algs_structs=get_alg_results_struct(algs)\r\n dist_m = create_distance_matrix(instance_filename)\r\n \r\n vertecies_arr=list(range(len(dist_m[0])))\r\n\r\n for _ in range(NO_ITERATIONS):\r\n rand_cycs=get_random_cycles(vertecies_arr)\r\n for alg in algs_structs:\r\n if alg.fun is ls_multi_start:\r\n time=t.time()\r\n curr_res_cycs=alg.fun(dist_m,NO_STARTS_MSLS)\r\n time=t.time()-time\r\n else:\r\n curr_res_cycs=alg.fun(rand_cycs[0],rand_cycs[1],dist_m,time)\r\n curr_res_lenght=get_total_distance(curr_res_cycs,dist_m)\r\n alg.update_time(time)\r\n alg.update_res(curr_res_lenght,curr_res_cycs) \r\n return algs_structs\r\n \r\n\r\ndef run_test():\r\n algs_res_A=get_results(\"kroA200.tsp\")\r\n algs_res_B=get_results(\"kroB200.tsp\")\r\n print_best(algs_res_A,\"kroA200.tsp\")\r\n print_best(algs_res_B,\"kroB200.tsp\")","sub_path":"ExtendedLocalSearch/TestBench.py","file_name":"TestBench.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"23279056","text":"def palindrome_recursive(string, index):\n \"\"\"\n # Complete the palindrome algorithm --- with recursion\n # Think about how to break a large problem into smaller sub problems.\n What is our base case in this problem?\n\n # Another way to ask: what is our smallest problem?\n How to get to this smallest problem?\n\n :param string: String -- the string to check whether it is a palindrome\n :param index: Int -- additional parameter for recursion tracking\n\n :return: True if @string is palindrome, False otherwise\n \"\"\"\n if index >= len(string)//2 - 1 and string[index] == string[len(string)-1-index]:\n return True\n elif index nan_percent_thrsh].column_name)\n data_df = data_df.drop(many_nans_cols, axis = 1)\n return data_df\n\n\ndef clean_naming(x, lower_case=True):\n '''Change strings to only have lower case letters and underscores.\n\n Parameters\n ----------\n x : string or list of strings\n String(s) on which to clean the naming, standardizing it.\n lower_case : bool, default True\n If set to True, all strings will be converted to lower case.\n\n Returns\n -------\n x : string or list of strings\n Cleaned string(s).\n '''\n if 'pandas.core.indexes.base.Index' in str(type(x)):\n # If the user input is a dataframe index (e.g. df.columns), convert it to a list\n x = list(x)\n if isinstance(x, list):\n if lower_case is True:\n x = [string.lower().replace(' ', '')\n .replace(' ', '_')\n .replace(',', '_and') for string in x]\n else:\n x = [string.replace(' ', '')\n .replace(' ', '_')\n .replace(',', '_and') for string in x]\n elif (isinstance(x, pd.DataFrame)\n or isinstance(x, pd.Series)\n or isinstance(x, dd.DataFrame)\n or isinstance(x, dd.Series)):\n raise Exception('ERROR: Wrong method. When using dataframes or series, use clean_categories_naming() method instead.')\n else:\n if lower_case is True:\n x = (str(x).lower().replace(' ', '')\n .replace(' ', '_')\n .replace(',', '_and'))\n else:\n x = (str(x).replace(' ', '')\n .replace(' ', '_')\n .replace(',', '_and'))\n return x\n\n\ndef clean_categories_naming(df, column, clean_missing_values=True,\n specific_nan_strings=[], lower_case=False):\n '''Change categorical values to only have lower case letters and underscores.\n\n Parameters\n ----------\n df : pandas.DataFrame or dask.DataFrame\n Dataframe that contains the column to be cleaned.\n column : string\n Name of the dataframe's column which needs to have its string values\n standardized.\n clean_missing_values : bool, default True\n If set to True, the algorithm will search for missing value\n representations and replace them with the standard, NumPy NaN value.\n specific_nan_strings : list of strings, default []\n Parameter where the user can specify additional strings that\n should correspond to missing values.\n lower_case : bool, default False\n If set to True, all strings will be converted to lower case.\n\n Returns\n -------\n df : pandas.DataFrame or dask.DataFrame\n Dataframe with its string column already cleaned.\n '''\n # Fix the seeting of all lower case characters according to the `lower_case` parameter\n clean_naming_prtl = partial(clean_naming, lower_case=lower_case)\n if isinstance(df, dd.DataFrame):\n df[column] = (df[column].map(clean_naming_prtl, meta=('x', str)))\n if clean_missing_values is True:\n df[column] = df[column].apply(lambda x: standardize_missing_values(x, specific_nan_strings),\n meta=df[column]._meta.dtypes)\n else:\n df[column] = (df[column].map(clean_naming_prtl))\n if clean_missing_values is True:\n df[column] = df[column].apply(lambda x: standardize_missing_values(x, specific_nan_strings))\n return df\n\n\ndef one_hot_encoding_dataframe(df, columns, clean_name=True, clean_missing_values=True,\n specific_nan_strings=[], lower_case=False,\n has_nan=False, join_rows=False,\n join_by=['patientunitstayid', 'ts'],\n get_new_column_names=False,\n search_by_dtypes=False, inplace=False):\n '''Transforms specified column(s) from a dataframe into a one hot encoding\n representation.\n\n Parameters\n ----------\n df : pandas.DataFrame or dask.DataFrame\n Dataframe that will be used, which contains the specified column.\n columns : list of strings\n Name of the column(s) that will be conveted to one hot encoding.\n clean_name : bool, default True\n If set to true, changes the name of the categorical values into lower\n case, with words separated by an underscore instead of space.\n clean_missing_values : bool, default True\n If set to True, the algorithm will search for missing value\n representations and replace them with the standard, NumPy NaN value.\n specific_nan_strings : list of strings, default []\n Parameter where the user can specify additional strings that\n should correspond to missing values.\n lower_case : bool, default False\n If set to True, all strings will be converted to lower case.\n has_nan : bool, default False\n If set to true, will first fill the missing values (NaN) with the string\n f'{column}_missing_value'.\n join_rows : bool, default False\n If set to true, will group the rows created by the one hot encoding by\n summing the boolean values in the rows that have the same identifiers.\n join_by : string or list, default ['subject_id', 'ts'])\n Name of the column (or columns) which serves as a unique identifier of\n the dataframe's rows, which will be used in the groupby operation if the\n parameter join_rows is set to true. Can be a string (single column) or a\n list of strings (multiple columns).\n get_new_column_names : bool, default False\n If set to True, the names of the new columns will also be outputed.\n search_by_dtypes : bool, default False\n If set to True, the method will only look for boolean columns based on\n their data type. This is only reliable if all the columns' data types\n have been properly set.\n inplace : bool, default False\n If set to True, the original dataframe will be used and modified\n directly. Otherwise, a copy will be created and returned, without\n changing the original dataframe.\n\n Raises\n ------\n ColumnNotFoundError\n Column name not found in the dataframe.\n\n Returns\n -------\n ohe_df : pandas.DataFrame or dask.DataFrame\n Returns a new dataframe with the specified column in a one hot encoding\n representation.\n new_column_names : list of strings\n List of the new, one hot encoded columns' names.\n '''\n if not inplace:\n # Make a copy of the data to avoid potentially unwanted changes to the original dataframe\n data_df = df.copy()\n else:\n # Use the original dataframe\n data_df = df\n # Make sure that the columns is a list\n if isinstance(columns, str):\n columns = [columns]\n if not isinstance(columns, list):\n raise Exception(f'ERROR: The `columns` argument must be specified as either a single string or a list of strings. Received input with type {type(columns)}.')\n print('Cleaning the categorical columns...')\n for col in utils.iterations_loop(columns):\n # Check if the column exists\n if col not in data_df.columns:\n raise Exception('ERROR: Column name not found in the dataframe.')\n if clean_name is True:\n # Clean the column's string values to have the same, standard format\n data_df = clean_categories_naming(data_df, col, clean_missing_values,\n specific_nan_strings, lower_case)\n if has_nan is True:\n # Fill NaN with \"missing_value\" name\n data_df[col] = data_df[col].fillna(value='missing_value')\n # Cast the variable into the built in pandas Categorical data type\n if isinstance(data_df, pd.DataFrame):\n data_df[col] = pd.Categorical(data_df[col])\n if isinstance(data_df, dd.DataFrame):\n data_df = data_df.categorize(columns)\n if get_new_column_names is True:\n # Find the previously existing column names\n old_column_names = data_df.columns\n print('Getting dummies...')\n # Apply the one hot encoding to the specified columns\n if isinstance(data_df, dd.DataFrame):\n ohe_df = dd.get_dummies(data_df, columns=columns)\n else:\n ohe_df = pd.get_dummies(data_df, columns=columns)\n if join_rows is True:\n # Columns which are one hot encoded\n ohe_columns = search_explore.list_boolean_columns(ohe_df, search_by_dtypes=search_by_dtypes)\n # Group the rows that have the same identifiers\n ohe_df = ohe_df.groupby(join_by).sum(min_count=1).reset_index()\n # Clip the one hot encoded columns to a maximum value of 1\n # (there might be duplicates which cause values bigger than 1)\n ohe_df.loc[:, ohe_columns] = ohe_df[ohe_columns].clip(upper=1)\n print('Done!')\n if get_new_column_names is True:\n # Find the new column names and output them\n new_column_names = list(set(ohe_df.columns) - set(old_column_names))\n new_column_names.sort()\n return ohe_df, new_column_names\n else:\n return ohe_df\n\n\ndef category_to_feature(df, categories_feature, values_feature, min_len=None,\n see_progress=True, inplace=False):\n '''Convert a categorical column and its corresponding values column into\n new features, one for each category.\n WARNING: Currently not working properly on a Dask dataframe. Apply .compute()\n to the dataframe to convert it to Pandas, before passing it to this method.\n If the data is too big to run on Pandas, use the category_to_feature_big_data\n method.\n\n Parameters\n ----------\n df : pandas.DataFrame or dask.DataFrame\n Dataframe on which to add the new features.\n categories_feature : string\n Name of the feature that contains the categories that will be converted\n to individual features.\n values_feature : string\n Name of the feature that has each category's corresponding value, which\n may or may not be a category on its own (e.g. it could be numeric values).\n min_len : int, default None\n If defined, only the categories that appear on at least `min_len` rows\n are converted to features.\n see_progress : bool, default True\n If set to True, a progress bar will show up indicating the execution\n of the normalization calculations.\n inplace : bool, default False\n If set to True, the original dataframe will be used and modified\n directly. Otherwise, a copy will be created and returned, without\n changing the original dataframe.\n\n Returns\n -------\n data_df : pandas.DataFrame or dask.DataFrame\n Dataframe with the newly created features.\n '''\n if not inplace:\n # Make a copy of the data to avoid potentially unwanted changes to the original dataframe\n data_df = df.copy()\n else:\n # Use the original dataframe\n data_df = df\n # Find the unique categories\n categories = data_df[categories_feature].unique()\n if isinstance(df, dd.DataFrame):\n categories = categories.compute()\n # Create a feature for each category\n for category in utils.iterations_loop(categories, see_progress=see_progress):\n if min_len is not None:\n # Check if the current category has enough data to be worth it to convert to a feature\n if len(data_df[data_df[categories_feature] == category]) < min_len:\n # Ignore the current category\n continue\n # Convert category to feature\n data_df[category] = data_df.apply(lambda x: x[values_feature] if x[categories_feature] == category\n else np.nan, axis=1)\n return data_df\n\n\ndef category_to_feature_big_data(df, categories_feature, values_feature,\n min_len=None, see_progress=True):\n '''Convert a categorical column and its corresponding values column into\n new features, one for each category. Optimized for very big Dask dataframes,\n which can't be processed as a whole Pandas dataframe.\n\n Parameters\n ----------\n df : dask.DataFrame\n Dataframe on which to add the new features.\n categories_feature : string\n Name of the feature that contains the categories that will be converted\n to individual features.\n values_feature : string\n Name of the feature that has each category's corresponding value, which\n may or may not be a category on its own (e.g. it could be numeric values).\n min_len : int, default None\n If defined, only the categories that appear on at least `min_len` rows\n are converted to features.\n see_progress : bool, default True\n If set to True, a progress bar will show up indicating the execution\n of the normalization calculations.\n\n Returns\n -------\n data_df : dask.DataFrame\n Dataframe with the newly created features.\n '''\n # Create a list with Pandas dataframe versions of each partition of the\n # original Dask dataframe\n df_list = []\n print('Converting categories to features in each partition...')\n for n in utils.iterations_loop(range(df.npartitions), see_progress=see_progress):\n # Process each partition separately in Pandas\n tmp_df = df.get_partition(n).compute()\n tmp_df = category_to_feature(tmp_df, categories_feature=categories_feature,\n values_feature=values_feature, min_len=min_len,\n see_progress=see_progress)\n df_list.append(tmp_df)\n # Rejoin all the partitions into a Dask dataframe with the same number of\n # partitions it originally had\n print('Rejoining partitions into a Dask dataframe...')\n data_df = dd.from_pandas(pd.concat(df_list, sort=False), npartitions=df.npartitions)\n print('Done!')\n return data_df\n\n\ndef remove_rows_unmatched_key(df, key, columns):\n '''Remove rows corresponding to the keys that weren't in the dataframe merged at the right.\n\n Parameters\n ----------\n df : pandas.DataFrame or dask.DataFrame\n Dataframe resulting from a asof merge which will be searched for missing values.\n key : string\n Name of the column which was used as the \"by\" key in the asof merge. Typically\n represents a temporal feature from a time series, such as days or timestamps.\n columns : list of strings\n Name of the column(s), originating from the dataframe which was merged at the\n right, which should not have any missing values. If it has, it means that\n the corresponding key wasn't present in the original dataframe. Even if there's\n just one column to analyze, it should be received in list format.\n\n Returns\n -------\n df : pandas.DataFrame or dask.DataFrame\n Returns the input dataframe but without the rows which didn't have any values\n in the right dataframe's features.\n '''\n for k in utils.iterations_loop(df[key].unique()):\n # Variable that counts the number of columns which don't have any value\n # (i.e. all rows are missing values) for a given identifier 'k'\n num_empty_columns = 0\n for col in columns:\n if df[df[key] == k][col].isnull().sum() == len(df[df[key] == k]):\n # Found one more column which is full of missing values for identifier 'k'\n num_empty_columns += 1\n if num_empty_columns == len(columns):\n # Eliminate all rows corresponding to the analysed key if all the columns\n # are empty for the identifier 'k'\n df = df[~(df[key] == k)]\n return df\n\n\ndef apply_zscore_norm(value, df=None, mean=None, std=None, categories_means=None,\n categories_stds=None, groupby_columns=None):\n '''Performs z-score normalization when used inside a Pandas or Dask\n apply function.\n\n Parameters\n ----------\n value : int or float\n Original, unnormalized value.\n df : pandas.DataFrame or dask.DataFrame, default None\n Original pandas dataframe which is used to retrieve the\n necessary statistical values used in group normalization, i.e. when\n values are normalized according to their corresponding categories.\n mean : int or float, default None\n Average (mean) value to be used in the z-score normalization.\n std : int or float, default None\n Standard deviation value to be used in the z-score normalization.\n categories_means : dict, default None\n Dictionary containing the average values for each set of categories.\n categories_stds : dict, default None\n Dictionary containing the standard deviation values for each set of\n categories.\n groupby_columns : string or list of strings, default None\n Name(s) of the column(s) that contains the categories from which\n statistical values (mean and standard deviation) are retrieved.\n\n Returns\n -------\n value_norm : int or float\n Z-score normalized value.\n '''\n if not isinstance(value, numbers.Number):\n raise Exception(f'ERROR: Input value should be a number, not an object of type {type(value)}.')\n if mean is not None and std is not None:\n return (value - mean) / std\n elif (df is not None and categories_means is not None\n and categories_stds is not None and groupby_columns is not None):\n try:\n if isinstance(groupby_columns, list):\n return ((value - categories_means[tuple(df[groupby_columns])])\n / categories_stds[tuple(df[groupby_columns])])\n else:\n return ((value - categories_means[df[groupby_columns]])\n / categories_stds[df[groupby_columns]])\n except Exception:\n warnings.warn(f'Couldn\\'t manage to find the mean and standard deviation values for the groupby columns {groupby_columns} with values {tuple(df[groupby_columns])}.')\n return np.nan\n else:\n raise Exception('ERROR: Invalid parameters. Either the `mean` and `std` or the `df`, `categories_means`, `categories_stds` and `groupby_columns` must be set.')\n\n\ndef apply_minmax_norm(value, df=None, min=None, max=None, categories_mins=None,\n categories_maxs=None, groupby_columns=None):\n '''Performs minmax normalization when used inside a Pandas or Dask\n apply function.\n\n Parameters\n ----------\n value : int or float\n Original, unnormalized value.\n df : pandas.DataFrame or dask.DataFrame, default None\n Original pandas dataframe which is used to retrieve the\n necessary statistical values used in group normalization, i.e. when\n values are normalized according to their corresponding categories.\n min : int or float, default None\n Minimum value to be used in the minmax normalization.\n max : int or float, default None\n Maximum value to be used in the minmax normalization.\n categories_mins : dict, default None\n Dictionary containing the minimum values for each set of categories.\n categories_maxs : dict, default None\n Dictionary containing the maximum values for each set of categories.\n groupby_columns : string or list of strings, default None\n Name(s) of the column(s) that contains the categories from which\n statistical values (minimum and maximum) are retrieved.\n\n Returns\n -------\n value_norm : int or float\n Minmax normalized value.\n '''\n if not isinstance(value, numbers.Number):\n raise Exception(f'ERROR: Input value should be a number, not an object of type {type(value)}.')\n if min and max:\n return (value - min) / (max - min)\n elif df and categories_mins and categories_maxs and groupby_columns:\n try:\n if isinstance(groupby_columns, list):\n return ((value - categories_mins[tuple(df[groupby_columns])])\n / (categories_maxs[tuple(df[groupby_columns])] - categories_mins[tuple(df[groupby_columns])]))\n else:\n return ((value - categories_mins[df[groupby_columns]])\n / (categories_maxs[df[groupby_columns]] - categories_mins[df[groupby_columns]]))\n except Exception:\n warnings.warn(f'Couldn\\'t manage to find the mean and standard deviation values for the groupby columns {groupby_columns} with values {tuple(df[groupby_columns])}.')\n return np.nan\n else:\n raise Exception('ERROR: Invalid parameters. Either the `min` and `max` or the `df`, `categories_mins`, `categories_maxs` and `groupby_columns` must be set.')\n\n\ndef apply_zscore_denorm(value, df=None, mean=None, std=None, categories_means=None,\n categories_stds=None, groupby_columns=None):\n '''Performs z-score denormalization when used inside a Pandas or Dask\n apply function.\n\n Parameters\n ----------\n value : int or float\n Input normalized value.\n df : pandas.DataFrame or dask.DataFrame, default None\n Original pandas dataframe which is used to retrieve the\n necessary statistical values used in group denormalization, i.e. when\n values are denormalized according to their corresponding categories.\n mean : int or float, default None\n Average (mean) value to be used in the z-score denormalization.\n std : int or float, default None\n Standard deviation value to be used in the z-score denormalization.\n categories_means : dict, default None\n Dictionary containing the average values for each set of categories.\n categories_stds : dict, default None\n Dictionary containing the standard deviation values for each set of\n categories.\n groupby_columns : string or list of strings, default None\n Name(s) of the column(s) that contains the categories from which\n statistical values (mean and standard deviation) are retrieved.\n\n Returns\n -------\n value_denorm : int or float\n Z-score denormalized value.\n '''\n if not isinstance(value, numbers.Number):\n raise Exception(f'ERROR: Input value should be a number, not an object of type {type(value)}.')\n if mean is not None and std is not None:\n return value * std + mean\n elif (df is not None and categories_means is not None\n and categories_stds is not None and groupby_columns is not None):\n try:\n if isinstance(groupby_columns, list):\n return (value * categories_stds[tuple(df[groupby_columns])]\n + categories_means[tuple(df[groupby_columns])])\n else:\n return (value * categories_stds[df[groupby_columns]]\n + categories_means[df[groupby_columns]])\n except Exception:\n warnings.warn(f'Couldn\\'t manage to find the mean and standard deviation values for the groupby columns {groupby_columns} with values {tuple(df[groupby_columns])}.')\n return np.nan\n else:\n raise Exception('ERROR: Invalid parameters. Either the `mean` and `std` or the `df`, `categories_means`, `categories_stds` and `groupby_columns` must be set.')\n\n\ndef apply_minmax_denorm(value, df=None, min=None, max=None, categories_mins=None,\n categories_maxs=None, groupby_columns=None):\n '''Performs minmax denormalization when used inside a Pandas or Dask\n apply function.\n\n Parameters\n ----------\n value : int or float\n Input normalized value.\n df : pandas.DataFrame or dask.DataFrame, default None\n Original pandas dataframe which is used to retrieve the\n necessary statistical values used in group denormalization, i.e. when\n values are denormalized according to their corresponding categories.\n min : int or float, default None\n Minimum value to be used in the minmax denormalization.\n max : int or float, default None\n Maximum value to be used in the minmax denormalization.\n categories_mins : dict, default None\n Dictionary containing the minimum values for each set of categories.\n categories_maxs : dict, default None\n Dictionary containing the maximum values for each set of categories.\n groupby_columns : string or list of strings, default None\n Name(s) of the column(s) that contains the categories from which\n statistical values (minimum and maximum) are retrieved.\n\n Returns\n -------\n value_denorm : int or float\n Minmax denormalized value.\n '''\n if not isinstance(value, numbers.Number):\n raise Exception(f'ERROR: Input value should be a number, not an object of type {type(value)}.')\n if min is not None and max is not None:\n return value * (max - min) + min\n elif (df is not None and categories_mins is not None\n and categories_maxs is not None and groupby_columns is not None):\n try:\n if isinstance(groupby_columns, list):\n return (value * (categories_maxs[tuple(df[groupby_columns])]\n - categories_mins[tuple(df[groupby_columns])])\n + categories_mins[tuple(df[groupby_columns])])\n else:\n return (value * (categories_maxs[df[groupby_columns]]\n - categories_mins[df[groupby_columns]])\n + categories_mins[df[groupby_columns]])\n except Exception:\n warnings.warn(f'Couldn\\'t manage to find the mean and standard deviation values for the groupby columns {groupby_columns} with values {tuple(df[groupby_columns])}.')\n return np.nan\n else:\n raise Exception('ERROR: Invalid parameters. Either the `min` and `max` or the `df`, `categories_mins`, `categories_maxs` and `groupby_columns` must be set.')\n\n\ndef normalize_data(df, data=None, id_columns=['patientunitstayid', 'ts'],\n normalization_method='z-score', columns_to_normalize=None,\n columns_to_normalize_categ=None, categ_columns=None,\n see_progress=True, get_stats=False,\n search_by_dtypes=False, inplace=False):\n '''Performs data normalization to a continuous valued tensor or dataframe,\n changing the scale of the data.\n\n Parameters\n ----------\n df : pandas.DataFrame or dask.DataFrame\n Original Pandas or Dask dataframe which is used to correctly calculate the\n necessary statistical values used in the normalization. These values\n can't be calculated from the tensor as it might have been padded. If\n the data tensor isn't specified, the normalization is applied directly\n on the dataframe.\n data : torch.Tensor, default None\n PyTorch tensor corresponding to the data which will be normalized\n by the specified normalization method. If the data tensor isn't\n specified, the normalization is applied directly on the dataframe.\n id_columns : string or list of strings, default ['subject_id', 'ts']\n List of columns names which represent identifier columns. These are not\n supposed to be normalized.\n normalization_method : string, default 'z-score'\n Specifies the normalization method used. It can be a z-score\n normalization, where the data is subtracted of its mean and divided\n by the standard deviation, which makes it have zero average and unit\n variance, much like a standard normal distribution; it can be a\n min-max normalization, where the data is subtracted by its minimum\n value and then divided by the difference between the minimum and the\n maximum value, getting to a fixed range from 0 to 1.\n columns_to_normalize : string or list of strings, default None\n If specified, the columns provided in the list are the only ones that\n will be normalized. If set to False, no column will be normalized directly,\n although columns can still be normalized in groups of categories, if\n specified in the `columns_to_normalize_categ` parameter. Otherwise, all\n continuous columns will be normalized.\n columns_to_normalize_categ : tuple or list of tuples of tuples, default None\n If specified, the columns provided in the list are going to be\n normalized on their categories. That is, the values (column 2 in the\n tuple) are normalized with stats of their respective categories (column\n 1 of the tuple). Otherwise, no column will be normalized on their\n categories.\n categ_columns : string or list of strings, default None\n If specified, the columns in the list, which represent categorical\n features, which either are a label or will be embedded, aren't\n going to be normalized.\n see_progress : bool, default True\n If set to True, a progress bar will show up indicating the execution\n of the normalization calculations.\n get_stats : bool, default False\n If set to True, the stats used to normalize the data (e.g. mean and\n standard deviation) are also outputed.\n search_by_dtypes : bool, default False\n If set to True, the method will only look for boolean columns based on\n their data type. This is only reliable if all the columns' data types\n have been properly set.\n inplace : bool, default False\n If set to True, the original dataframe will be used and modified\n directly. Otherwise, a copy will be created and returned, without\n changing the original dataframe.\n\n Returns\n -------\n data : pandas.DataFrame or dask.DataFrame or torch.Tensor\n Normalized Pandas or Dask dataframe or PyTorch tensor.\n\n If get_stats == True and normalization_method == 'z-score':\n\n mean : float or dict or list of floats or list of dicts\n Mean value(s) used in the data normalization.\n std : float or dict or list of floats or list of dicts\n Standard deviation value(s) used in the data normalization.\n\n If get_stats == True and normalization_method == 'min-max':\n\n min : dict\n Minimum value(s) used in the data normalization.\n max : dict\n Maximum value(s) used in the data normalization.\n '''\n # Check if specific columns have been specified for normalization\n if columns_to_normalize is None:\n # List of all columns in the dataframe\n feature_columns = list(df.columns)\n # Normalize all non identifier continuous columns, ignore one hot encoded ones\n columns_to_normalize = feature_columns\n if id_columns is not None:\n # Make sure that the id_columns is a list\n if isinstance(id_columns, str):\n id_columns = [id_columns]\n if not isinstance(id_columns, list):\n raise Exception(f'ERROR: The `id_columns` argument must be specified as either a single string or a list of strings. Received input with type {type(id_columns)}.')\n # List of all columns in the dataframe, except the ID columns\n [columns_to_normalize.remove(col) for col in id_columns]\n if categ_columns is not None:\n # Make sure that the categ_columns is a list\n if isinstance(categ_columns, str):\n categ_columns = [categ_columns]\n if not isinstance(categ_columns, list):\n raise Exception(f'ERROR: The `categ_columns` argument must be specified as either a single string or a list of strings. Received input with type {type(categ_columns)}.')\n # Prevent all features that will be embedded from being normalized\n [columns_to_normalize.remove(col) for col in categ_columns]\n # List of boolean or one hot encoded columns\n boolean_cols = search_explore.list_boolean_columns(df[columns_to_normalize], search_by_dtypes=search_by_dtypes)\n if boolean_cols is not None:\n # Prevent boolean features from being normalized\n [columns_to_normalize.remove(col) for col in boolean_cols]\n # Remove all non numeric columns that could be left\n columns_to_normalize = [col for col in columns_to_normalize\n if df[col].dtype == int or df[col].dtype == float]\n if columns_to_normalize is None:\n print('No columns to normalize, returning the original dataframe.')\n return df\n\n # Make sure that the columns_to_normalize is a list\n if isinstance(columns_to_normalize, str):\n columns_to_normalize = [columns_to_normalize]\n if not isinstance(columns_to_normalize, list) and not isinstance(columns_to_normalize, bool):\n raise Exception(f'ERROR: The `columns_to_normalize` argument must be specified as either a single string, a list of strings or a boolean. Received input with type {type(columns_to_normalize)}.')\n\n if type(normalization_method) is not str:\n raise ValueError('Argument normalization_method should be a string. Available options are \"z-score\" and \"min-max\".')\n\n if normalization_method.lower() == 'z-score':\n if columns_to_normalize is not False:\n # Calculate the means and standard deviations\n means = df[columns_to_normalize].mean()\n stds = df[columns_to_normalize].std()\n # Check if there are constant features\n const_feat = list(stds[stds == 0].index)\n if len(const_feat) > 0:\n # Prevent constant features from being normalized\n [columns_to_normalize.remove(col) for col in const_feat]\n means = means.drop(const_feat)\n stds = stds.drop(const_feat)\n warnings.warn(f'Found columns {const_feat} to be constant throughout all the data. They should be removed as no insight will be extracted from them.')\n\n if isinstance(df, dd.DataFrame):\n # Make sure that the values are computed, in case we're using Dask\n means = means.compute()\n stds = stds.compute()\n\n # Check if the data being normalized is directly the dataframe\n if data is None:\n if not inplace:\n # Make a copy of the data to avoid potentially unwanted changes to the original dataframe\n data = df.copy()\n else:\n # Use the original dataframe\n data = df\n\n # Normalize the right columns\n if columns_to_normalize is not False:\n print(f'z-score normalizing columns {columns_to_normalize}...')\n data[columns_to_normalize] = (data[columns_to_normalize] - means) / stds\n\n if columns_to_normalize_categ is not None:\n if get_stats is True:\n mean_list = []\n std_list = []\n # Make sure that the columns_to_normalize_categ is a list\n if isinstance(columns_to_normalize_categ, tuple):\n columns_to_normalize_categ = [columns_to_normalize_categ]\n if not isinstance(columns_to_normalize_categ, list):\n raise Exception(f'ERROR: The `columns_to_normalize_categ` argument must be specified as either a single tuple or a list of tuples. Received input with type {type(columns_to_normalize_categ)}.')\n print(f'z-score normalizing columns {columns_to_normalize_categ} by their associated categories...')\n for col_tuple in utils.iterations_loop(columns_to_normalize_categ, see_progress=see_progress):\n categ_columns = col_tuple[0]\n column_to_normalize = col_tuple[1]\n # Calculate the means and standard deviations\n means_grpb = df.groupby(categ_columns)[column_to_normalize].mean()\n stds_grpb = df.groupby(categ_columns)[column_to_normalize].std()\n if isinstance(df, dd.DataFrame):\n # Make sure that the values are computed, in case we're using Dask\n means_grpb = means.compute()\n stds_grpb = stds.compute()\n if get_stats is True:\n if isinstance(column_to_normalize, str):\n # Make sure that the feature being normalized has its name specified in the stats\n tmp_mean_grpb = dict()\n tmp_std_grpb = dict()\n tmp_mean_grpb[column_to_normalize] = means_grpb.to_dict()\n tmp_std_grpb[column_to_normalize] = stds_grpb.to_dict()\n # Add the current stats values to the output lists\n mean_list.append(tmp_mean_grpb)\n std_list.append(tmp_std_grpb)\n else:\n # Add the current stats values to the output lists\n mean_list.append(means_grpb.to_dict())\n std_list.append(stds_grpb.to_dict())\n # Get the categories columns as a numpy array, so as to\n # index the groupby-resulting dataframes of mean and standard\n # deviation values\n cat_arr = df[categ_columns].to_numpy()\n if isinstance(categ_columns, list) and len(categ_columns) > 1:\n # Convert the sets of values into tuples so as to be\n # properly readable as dataframe indices\n cat_arr = list(map(tuple, cat_arr))\n # Get the mean and standard deviation values in the same\n # order as the original dataframe's row order\n means_cat = means_grpb.loc[cat_arr].to_numpy()\n stds_cat = stds_grpb.loc[cat_arr].to_numpy()\n # Normalize the right categories\n data[column_to_normalize] = (data[column_to_normalize] - means_cat) / stds_cat\n if get_stats is True:\n # Merge all the stats dictionaries\n mean_categ_dict = utils.merge_dicts(mean_list)\n std_categ_dict = utils.merge_dicts(std_list)\n\n # Otherwise, the tensor is normalized\n else:\n if columns_to_normalize is not False:\n # Dictionaries to retrieve the mean and standard deviation values\n column_means = dict(means)\n column_stds = dict(stds)\n # Dictionary to convert the the tensor's column indices into the dataframe's column names\n idx_to_name = dict(enumerate(df.columns))\n # Dictionary to convert the dataframe's column names into the tensor's column indices\n name_to_idx = dict([(t[1], t[0]) for t in enumerate(df.columns)])\n # List of indices of the tensor's columns which are needing normalization\n tensor_columns_to_normalize = [name_to_idx[name] for name in columns_to_normalize]\n # Normalize the right columns\n print(f'z-score normalizing columns {columns_to_normalize}...')\n for col in utils.iterations_loop(tensor_columns_to_normalize, see_progress=see_progress):\n data[:, :, col] = ((data[:, :, col] - column_means[idx_to_name[col]])\n / column_stds[idx_to_name[col]])\n\n if get_stats is False:\n return data\n elif columns_to_normalize is not False and columns_to_normalize_categ is not None:\n return data, means.to_dict(), stds.to_dict(), mean_categ_dict, std_categ_dict\n elif columns_to_normalize is not False and columns_to_normalize_categ is None:\n return data, means.to_dict(), stds.to_dict()\n elif columns_to_normalize is False and columns_to_normalize_categ is not None:\n return data, mean_categ_dict, std_categ_dict\n\n elif normalization_method.lower() == 'min-max':\n if columns_to_normalize is not False:\n mins = df[columns_to_normalize].min()\n maxs = df[columns_to_normalize].max()\n # Check if there are constant features\n const_feat = list(mins[mins == maxs].index)\n if len(const_feat) > 0:\n # Prevent constant features from being normalized\n [columns_to_normalize.remove(col) for col in const_feat]\n mins = mins.drop(const_feat)\n maxs = maxs.drop(const_feat)\n warnings.warn(f'Found columns {const_feat} to be constant throughout all the data. They should be removed as no insight will be extracted from them.')\n\n if isinstance(df, dd.DataFrame):\n # Make sure that the values are computed, in case we're using Dask\n mins = means.compute()\n maxs = maxs.compute()\n\n # Check if the data being normalized is directly the dataframe\n if data is None:\n if not inplace:\n # Make a copy of the data to avoid potentially unwanted changes to the original dataframe\n data = df.copy()\n else:\n # Use the original dataframe\n data = df\n\n if columns_to_normalize is not False:\n # Normalize the right columns\n print(f'min-max normalizing columns {columns_to_normalize}...')\n data[columns_to_normalize] = (data[columns_to_normalize] - mins) / (maxs - mins)\n\n if columns_to_normalize_categ is not None:\n if get_stats is True:\n min_list = []\n max_list = []\n # Make sure that the columns_to_normalize_categ is a list\n if isinstance(columns_to_normalize_categ, tuple):\n columns_to_normalize_categ = [columns_to_normalize_categ]\n if not isinstance(columns_to_normalize_categ, list):\n raise Exception(f'ERROR: The `columns_to_normalize_categ` argument must be specified as either a single tuple or a list of tuples. Received input with type {type(columns_to_normalize_categ)}.')\n print(f'min-max normalizing columns {columns_to_normalize_categ} by their associated categories...')\n for col_tuple in columns_to_normalize_categ:\n categ_columns = col_tuple[0]\n column_to_normalize = col_tuple[1]\n # Calculate the minimum and maximum values\n mins_grpb = df.groupby(col_tuple[0])[col_tuple[1]].min()\n maxs_grpb = df.groupby(col_tuple[0])[col_tuple[1]].max()\n if isinstance(df, dd.DataFrame):\n # Make sure that the values are computed, in case we're using Dask\n mins_grpb = mins_grpb.compute()\n maxs_grpb = maxs_grpb.compute()\n if get_stats is True:\n if isinstance(column_to_normalize, str):\n # Make sure that the feature being normalized has its name specified in the stats\n tmp_min_grpb = dict()\n tmp_max_grpb = dict()\n tmp_min_grpb[column_to_normalize] = mins_grpb.to_dict()\n tmp_max_grpb[column_to_normalize] = maxs_grpb.to_dict()\n # Add the current stats values to the output lists\n min_list.append(tmp_min_grpb)\n max_list.append(tmp_max_grpb)\n else:\n # Add the current stats values to the output lists\n min_list.append(mins_grpb.to_dict())\n max_list.append(maxs_grpb.to_dict())\n # Get the categories columns as a numpy array, so as to\n # index the groupby-resulting dataframes of minimum and\n # maximum values\n cat_arr = df[categ_columns].to_numpy()\n if isinstance(categ_columns, list) and len(categ_columns) > 1:\n # Convert the sets of values into tuples so as to be\n # properly readable as dataframe indices\n cat_arr = list(map(tuple, cat_arr))\n # Get the minimum and maximum values in the same\n # order as the original dataframe's row order\n mins_cat = mins_grpb.loc[cat_arr].to_numpy()\n maxs_cat = maxs_grpb.loc[cat_arr].to_numpy()\n # Normalize the right categories\n data[column_to_normalize] = (data[column_to_normalize] - mins_cat) / (maxs_cat - mins_cat)\n if get_stats is True:\n # Merge all the stats dictionaries\n min_categ_dict = utils.merge_dicts(min_list)\n max_categ_dict = utils.merge_dicts(max_list)\n # Otherwise, the tensor is normalized\n else:\n if columns_to_normalize is not False:\n # Dictionaries to retrieve the min and max values\n column_mins = dict(mins)\n column_maxs = dict(maxs)\n # Dictionary to convert the the tensor's column indices into the dataframe's column names\n idx_to_name = dict(enumerate(df.columns))\n # Dictionary to convert the dataframe's column names into the tensor's column indices\n name_to_idx = dict([(t[1], t[0]) for t in enumerate(df.columns)])\n # List of indices of the tensor's columns which are needing normalization\n tensor_columns_to_normalize = [name_to_idx[name] for name in columns_to_normalize]\n # Normalize the right columns\n print(f'min-max normalizing columns {columns_to_normalize}...')\n for col in utils.iterations_loop(tensor_columns_to_normalize, see_progress=see_progress):\n data[:, :, col] = ((data[:, :, col] - column_mins[idx_to_name[col]])\n / (column_maxs[idx_to_name[col]] - column_mins[idx_to_name[col]]))\n\n if get_stats is False:\n return data\n elif columns_to_normalize is not False and columns_to_normalize_categ is not None:\n return data, mins.to_dict(), maxs.to_dict(), min_categ_dict, max_categ_dict\n elif columns_to_normalize is not False and columns_to_normalize_categ is None:\n return data, mins.to_dict(), maxs.to_dict()\n elif columns_to_normalize is False and columns_to_normalize_categ is not None:\n return data, min_categ_dict, max_categ_dict\n else:\n raise ValueError(f'{normalization_method} isn\\'t a valid normalization method. Available options \\\n are \"z-score\" and \"min-max\".')\n\n\ndef denormalize_data(df=None, data=None, id_columns=['patientunitstayid', 'ts'],\n denormalization_method='z-score', columns_to_denormalize=None,\n columns_to_denormalize_categ=None, categ_columns=None,\n see_progress=True, search_by_dtypes=False, inplace=False,\n means=None, stds=None, mins=None, maxs=None,\n feature_columns=None):\n '''Performs data denormalization to a continuous valued tensor or dataframe,\n changing the scale of the data.\n\n Parameters\n ----------\n df : pandas.DataFrame or dask.DataFrame, default None\n Original Pandas or Dask dataframe which is used to correctly calculate the\n necessary statistical values used in the denormalization. These values\n can't be calculated from the tensor as it might have been padded. If\n the data tensor isn't specified, the denormalization is applied directly\n on the dataframe.\n data : torch.Tensor or numpy.Array, default None\n PyTorch tensor or NumPy array corresponding to the data which will be\n denormalized by the specified denormalization method. If the data isn't\n specified, the denormalization is applied directly on the dataframe.\n id_columns : string or list of strings, default ['subject_id', 'ts']\n List of columns names which represent identifier columns. These are not\n supposed to be denormalized.\n denormalization_method : string, default 'z-score'\n Specifies the denormalization method used. It can be a z-score\n denormalization, where the data is subtracted of its mean and divided\n by the standard deviation, which makes it have zero average and unit\n variance, much like a standard normal distribution; it can be a\n min-max denormalization, where the data is subtracted by its minimum\n value and then divided by the difference between the minimum and the\n maximum value, getting to a fixed range from 0 to 1.\n columns_to_denormalize : string or list of strings, default None\n If specified, the columns provided in the list are the only ones that\n will be denormalized. If set to False, no column will be denormalized directly,\n although columns can still be denormalized in groups of categories, if\n specified in the `columns_to_denormalize_categ` parameter. Otherwise, all\n continuous columns will be denormalized.\n columns_to_denormalize_categ : tuple or list of tuples of tuples, default None\n If specified, the columns provided in the list are going to be\n denormalized on their categories. That is, the values (column 2 in the\n tuple) are denormalized with stats of their respective categories (column\n 1 of the tuple). Otherwise, no column will be denormalized on their\n categories.\n categ_columns : string or list of strings, default None\n If specified, the columns in the list, which represent categorical\n features, which either are a label or will be embedded, aren't\n going to be denormalized.\n see_progress : bool, default True\n If set to True, a progress bar will show up indicating the execution\n of the denormalization calculations.\n search_by_dtypes : bool, default False\n If set to True, the method will only look for boolean columns based on\n their data type. This is only reliable if all the columns' data types\n have been properly set.\n inplace : bool, default False\n If set to True, the original dataframe will be used and modified\n directly. Otherwise, a copy will be created and returned, without\n changing the original dataframe.\n\n Returns\n -------\n data : pandas.DataFrame or dask.DataFrame or torch.Tensor\n Denormalized Pandas or Dask dataframe or PyTorch tensor.\n '''\n # [TODO] Add the option in denormalize_data to denormalize a data tensor\n # using a norm_stats dictionary instead of fetching the denormalization\n # stats from the original dataframe\n if feature_columns is None and df is not None:\n # List of all columns in the dataframe\n feature_columns = list(df.columns)\n # Check if specific columns have been specified for denormalization\n if columns_to_denormalize is None:\n # Denormalize all non identifier continuous columns, ignore one hot encoded ones\n columns_to_denormalize = feature_columns.copy()\n if id_columns is not None:\n # Make sure that the id_columns is a list\n if isinstance(id_columns, str):\n id_columns = [id_columns]\n if not isinstance(id_columns, list):\n raise Exception(f'ERROR: The `id_columns` argument must be specified as either a single string or a list of strings. Received input with type {type(id_columns)}.')\n # List of all columns in the dataframe, except the ID columns\n [columns_to_denormalize.remove(col) for col in id_columns]\n if categ_columns is not None:\n # Make sure that the categ_columns is a list\n if isinstance(categ_columns, str):\n categ_columns = [categ_columns]\n if not isinstance(categ_columns, list):\n raise Exception(f'ERROR: The `categ_columns` argument must be specified as either a single string or a list of strings. Received input with type {type(categ_columns)}.')\n # Prevent all features that will be embedded from being denormalized\n [columns_to_denormalize.remove(col) for col in categ_columns]\n # List of boolean or one hot encoded columns\n boolean_cols = search_explore.list_boolean_columns(df[columns_to_denormalize], search_by_dtypes=search_by_dtypes)\n if boolean_cols is not None:\n # Prevent boolean features from being denormalized\n [columns_to_denormalize.remove(col) for col in boolean_cols]\n # Remove all non numeric columns that could be left\n columns_to_denormalize = [col for col in columns_to_denormalize\n if df[col].dtype == int or df[col].dtype == float]\n if columns_to_denormalize is None:\n print('No columns to denormalize, returning the original dataframe.')\n return df\n\n # Make sure that the columns_to_denormalize is a list\n if isinstance(columns_to_denormalize, str):\n columns_to_denormalize = [columns_to_denormalize]\n if not isinstance(columns_to_denormalize, list) and not isinstance(columns_to_denormalize, bool):\n raise Exception(f'ERROR: The `columns_to_denormalize` argument must be specified as either a single string, a list of strings or a boolean. Received input with type {type(columns_to_denormalize)}.')\n\n if type(denormalization_method) is not str:\n raise ValueError('Argument denormalization_method should be a string. Available options are \"z-score\" and \"min-max\".')\n\n if denormalization_method.lower() == 'z-score':\n if columns_to_denormalize is not False:\n # Calculate the means and standard deviations\n if means is None:\n means = df[columns_to_denormalize].mean()\n if stds is None:\n stds = df[columns_to_denormalize].std()\n # Check if there are constant features\n if isinstance(stds, pd.Series):\n const_feat = list(stds[stds == 0].index)\n elif isinstance(stds, dict):\n const_feat = [feat for feat in stds.keys() if stds[feat] == 0]\n if len(const_feat) > 0:\n # Prevent constant features from being denormalized\n [columns_to_denormalize.remove(col) for col in const_feat]\n means = means.drop(const_feat)\n stds = stds.drop(const_feat)\n warnings.warn(f'Found columns {const_feat} to be constant throughout all the data. They should be removed as no insight will be extracted from them.')\n\n if isinstance(df, dd.DataFrame):\n # Make sure that the values are computed, in case we're using Dask\n means = means.compute()\n stds = stds.compute()\n\n # Check if the data being denormalized is directly the dataframe\n if data is None:\n if not inplace:\n # Make a copy of the data to avoid potentially unwanted changes to the original dataframe\n data = df.copy()\n else:\n # Use the original dataframe\n data = df\n\n # Denormalize the right columns\n if columns_to_denormalize is not False:\n print(f'z-score denormalizing columns {columns_to_denormalize}...')\n data[columns_to_denormalize] = data[columns_to_denormalize] * stds + means\n\n if columns_to_denormalize_categ is not None:\n # Make sure that the columns_to_denormalize_categ is a list\n if isinstance(columns_to_denormalize_categ, tuple):\n columns_to_denormalize_categ = [columns_to_denormalize_categ]\n if not isinstance(columns_to_denormalize_categ, list):\n raise Exception(f'ERROR: The `columns_to_denormalize_categ` argument must be specified as either a single tuple or a list of tuples. Received input with type {type(columns_to_denormalize_categ)}.')\n print(f'z-score denormalizing columns {columns_to_denormalize_categ} by their associated categories...')\n for col_tuple in utils.iterations_loop(columns_to_denormalize_categ, see_progress=see_progress):\n categ_columns = col_tuple[0]\n column_to_denormalize = col_tuple[1]\n # Calculate the means and standard deviations\n means_grpb = df.groupby(categ_columns)[\n column_to_denormalize].mean()\n stds_grpb = df.groupby(categ_columns)[\n column_to_denormalize].std()\n if isinstance(df, dd.DataFrame):\n # Make sure that the values are computed, in case we're using Dask\n means_grpb = means.compute()\n stds_grpb = stds.compute()\n # Get the categories columns as a numpy array, so as to\n # index the groupby-resulting dataframes of mean and standard\n # deviation values\n cat_arr = df[categ_columns].to_numpy()\n if isinstance(categ_columns, list) and len(categ_columns) > 1:\n # Convert the sets of values into tuples so as to be\n # properly readable as dataframe indices\n cat_arr = list(map(tuple, cat_arr))\n # Get the mean and standard deviation values in the same\n # order as the original dataframe's row order\n means_cat = means_grpb.loc[cat_arr].to_numpy()\n stds_cat = stds_grpb.loc[cat_arr].to_numpy()\n # Denormalize the right categories\n data[column_to_denormalize] = data[column_to_denormalize] * stds_cat + means_cat\n # Otherwise, the array is denormalized\n else:\n if not inplace:\n # Make a copy of the data to avoid potentially unwanted changes to the original array\n if isinstance(data, torch.Tensor):\n data = data.clone()\n else:\n data = data.copy()\n else:\n # Use the original array\n data = data\n if columns_to_denormalize is not False:\n # Dictionaries to retrieve the mean and standard deviation values\n if not isinstance(means, dict):\n means = dict(means)\n if not isinstance(stds, dict):\n stds = dict(stds)\n # Dictionary to convert the the array's column indices into the dataframe's column names\n idx_to_name = dict(enumerate(feature_columns))\n # Dictionary to convert the dataframe's column names into the array's column indices\n name_to_idx = dict([(t[1], t[0])\n for t in enumerate(feature_columns)])\n # List of indices of the array's columns which are needing denormalization\n array_columns_to_denormalize = [name_to_idx[name]\n for name in columns_to_denormalize]\n # Denormalize the right columns\n print(f'z-score denormalizing columns {columns_to_denormalize}...')\n for col in utils.iterations_loop(array_columns_to_denormalize, see_progress=see_progress):\n if len(data.shape) == 3:\n data[:, :, col] = data[:, :, col] * stds[idx_to_name[col]] + means[idx_to_name[col]]\n elif len(data.shape) == 2:\n data[:, col] = data[:, col] * stds[idx_to_name[col]] + means[idx_to_name[col]]\n else:\n raise Exception(f'ERROR: The data array or tensor must be either two or three-dimensional. The provided data has {len(data.shape)} dimensions.')\n\n return data\n\n elif denormalization_method.lower() == 'min-max':\n if columns_to_denormalize is not False:\n mins = df[columns_to_denormalize].min()\n maxs = df[columns_to_denormalize].max()\n # Check if there are constant features\n const_feat = list(mins[mins == maxs].index)\n if len(const_feat) > 0:\n # Prevent constant features from being denormalized\n [columns_to_denormalize.remove(col) for col in const_feat]\n mins = mins.drop(const_feat)\n maxs = maxs.drop(const_feat)\n warnings.warn(f'Found columns {const_feat} to be constant throughout all the data. They should be removed as no insight will be extracted from them.')\n\n if isinstance(df, dd.DataFrame):\n # Make sure that the values are computed, in case we're using Dask\n mins = means.compute()\n maxs = maxs.compute()\n\n # Check if the data being denormalized is directly the dataframe\n if data is None:\n if not inplace:\n # Make a copy of the data to avoid potentially unwanted changes to the original dataframe\n if isinstance(data, torch.Tensor):\n data = data.clone()\n else:\n data = data.copy()\n else:\n # Use the original dataframe\n data = df\n\n if columns_to_denormalize is not False:\n # Denormalize the right columns\n print(f'min-max denormalizing columns {columns_to_denormalize}...')\n data[columns_to_denormalize] = data[columns_to_denormalize] * (maxs - mins) + mins\n\n if columns_to_denormalize_categ is not None:\n # Make sure that the columns_to_denormalize_categ is a list\n if isinstance(columns_to_denormalize_categ, tuple):\n columns_to_denormalize_categ = [columns_to_denormalize_categ]\n if not isinstance(columns_to_denormalize_categ, list):\n raise Exception(f'ERROR: The `columns_to_denormalize_categ` argument must be specified as either a single tuple or a list of tuples. Received input with type {type(columns_to_denormalize_categ)}.')\n print(f'min-max denormalizing columns {columns_to_denormalize_categ} by their associated categories...')\n for col_tuple in columns_to_denormalize_categ:\n categ_columns = col_tuple[0]\n column_to_denormalize = col_tuple[1]\n # Calculate the minimum and maximum values\n mins_grpb = df.groupby(col_tuple[0])[col_tuple[1]].min()\n maxs_grpb = df.groupby(col_tuple[0])[col_tuple[1]].max()\n if isinstance(df, dd.DataFrame):\n # Make sure that the values are computed, in case we're using Dask\n mins_grpb = mins_grpb.compute()\n maxs_grpb = maxs_grpb.compute()\n # Get the categories columns as a numpy array, so as to\n # index the groupby-resulting dataframes of minimum and\n # maximum values\n cat_arr = df[categ_columns].to_numpy()\n if isinstance(categ_columns, list) and len(categ_columns) > 1:\n # Convert the sets of values into tuples so as to be\n # properly readable as dataframe indices\n cat_arr = list(map(tuple, cat_arr))\n # Get the minimum and maximum values in the same\n # order as the original dataframe's row order\n mins_cat = mins_grpb.loc[cat_arr].to_numpy()\n maxs_cat = maxs_grpb.loc[cat_arr].to_numpy()\n # Denormalize the right categories\n data[column_to_denormalize] = data[column_to_denormalize] * (maxs_cat - mins_cat) + mins_cat\n # Otherwise, the array is denormalized\n else:\n if not inplace:\n # Make a copy of the data to avoid potentially unwanted changes to the original array\n data = data.clone()\n else:\n # Use the original array\n data = data\n if columns_to_denormalize is not False:\n # Dictionaries to retrieve the min and max values\n column_mins = dict(mins)\n column_maxs = dict(maxs)\n # Dictionary to convert the the array's column indices into the dataframe's column names\n idx_to_name = dict(enumerate(feature_columns))\n # Dictionary to convert the dataframe's column names into the array's column indices\n name_to_idx = dict([(t[1], t[0])\n for t in enumerate(feature_columns)])\n # List of indices of the array's columns which are needing denormalization\n array_columns_to_denormalize = [name_to_idx[name] for name in columns_to_denormalize]\n # Denormalize the right columns\n print(f'min-max denormalizing columns {columns_to_denormalize}...')\n for col in utils.iterations_loop(array_columns_to_denormalize, see_progress=see_progress):\n if len(data.shape) == 3:\n data[:, :, col] = (data[:, :, col] * (column_maxs[idx_to_name[col]] - column_mins[idx_to_name[col]])\n + column_mins[idx_to_name[col]])\n elif len(data.shape) == 2:\n data[:, col] = (data[:, col] * (column_maxs[idx_to_name[col]] - column_mins[idx_to_name[col]])\n + column_mins[idx_to_name[col]])\n else:\n raise Exception(f'ERROR: The data array or tensor must be either two or three-dimensional. The provided data has {len(data.shape)} dimensions.')\n\n return data\n else:\n raise ValueError(f'{denormalization_method} isn\\'t a valid denormalization method. Available options \\\n are \"z-score\" and \"min-max\".')\n\n\ndef transpose_dataframe(df, column_to_transpose=None, inplace=False):\n '''Transpose a dataframe, either by its original index or through a specific\n column, which will be converted to the new column names (i.e. the header).\n\n Parameters\n ----------\n data : pandas.DataFrame or dask.DataFrame\n Dataframe that will be transposed.\n column_to_transpose : string, default None\n If specified, the given column will be used as the new column names, with\n its unique values forming the new dataframe's header. Otherwise, the\n dataframe will be transposed on its original index.\n inplace : bool, default False\n If set to True, the original tensor or dataframe will be used and modified\n directly. Otherwise, a copy will be created and returned, without\n changing the original tensor or dataframe.\n\n Returns\n -------\n data : pandas.DataFrame or dask.DataFrame\n Transposed dataframe.\n '''\n if not inplace:\n # Make a copy of the data to avoid potentially unwanted changes to the original dataframe\n data_df = df.copy()\n else:\n # Use the original dataframe\n data_df = df\n if column_to_transpose is not None:\n # Set as index the column that has the desired column names as values\n data_df = data_df.set_index(column_to_transpose)\n if isinstance(data_df, pd.DataFrame):\n data_df = data_df.transpose()\n elif isinstance(data_df, dd.DataFrame):\n data_df = (dd.from_pandas(data_df.compute().transpose(),\n npartitions=data_df.npartitions))\n else:\n raise Exception(f'ERROR: The input data must either be a Pandas dataframe or a Dask dataframe, not {type(df)}.')\n return data_df\n\n\ndef merge_values(x1, x2, separator=';', str_over_num=True, join_strings=True,\n is_bool=False):\n '''Merge two values, by extracting the non-missing one, their average value\n or the non-numeric one.\n\n Parameters\n ----------\n x1\n Value 1 of the merge operation.\n x2\n Value 2 of the merge operation.\n separator : string, default ';'\n Symbol that concatenates each string's words, which will be used to join\n the inputs if they are both strings.\n str_over_num : bool, default True\n If set to True, preference will be given to string inputs. Otherwise,\n numeric inputs will be prioritized.\n join_strings : bool, default True\n If set to True, in case of receiving two string inputs, the algorithm\n will joined them using the defined separator. Otherwise, the shortest\n string will be returned.\n is_bool : bool, default False\n If set to True, the method will treat the values to merge as boolean\n (i.e. it will return either 1, if it's one of the values, or 0).\n\n Returns\n -------\n x\n Resulting merged value.\n '''\n if is_bool is True:\n if (x1 is None or utils.is_num_nan(x1)) and (x2 is None or utils.is_num_nan(x2)):\n return 0\n elif (x1 is None or utils.is_num_nan(x1)) and not (x2 is None or utils.is_num_nan(x2)):\n return x2\n elif not (x1 is None or utils.is_num_nan(x1)) and (x2 is None or utils.is_num_nan(x2)):\n return x1\n else:\n return max(x1, x2)\n if x1 is None and x2 is not None:\n return x2\n elif x1 is not None and x2 is None:\n return x1\n elif x1 == x2:\n return x1\n elif ((isinstance(x1, float) or isinstance(x1, int))\n and (isinstance(x2, float) or isinstance(x2, int))):\n # Get the average value between the columns, ignoring NaNs\n return np.nanmean([x1, x2])\n elif isinstance(x1, str) and isinstance(x2, str):\n if not isinstance(separator, str):\n raise Exception(f'ERROR: Separator symbol must be in string format, not {type(separator)}.')\n if join_strings is True:\n # Join strings through the defined separator\n return separator.join([x1, x2])\n else:\n # Return the shortest string\n if len(x1) <= len(x2):\n return x1\n else:\n return x2\n elif ((isinstance(x1, float) or isinstance(x1, int))\n and not (isinstance(x2, float) or isinstance(x2, int))):\n if utils.is_num_nan(x1) and not utils.is_num_nan(x2):\n # Return the not NaN value\n return x2\n if str_over_num is True:\n # Give preference to string values\n return x2\n else:\n # Give preference to numeric values\n return x1\n elif not ((isinstance(x1, float) or isinstance(x1, int))\n and (isinstance(x2, float) or isinstance(x2, int))):\n if utils.is_num_nan(x2) and not utils.is_num_nan(x1):\n # Return the not NaN value\n return x1\n if str_over_num is True:\n # Give preference to string values\n return x1\n else:\n # Give preference to numeric values\n return x2\n else:\n warnings.warn(f'Both values are different than NaN and are not numeric. Randomly returning the first value {x1}, instead of {x2}.')\n return x1\n\n\ndef merge_columns(df, cols_to_merge=None, drop_old_cols=True, separator=';',\n join_strings=False, see_progress=True, inplace=False):\n '''Merge columns that have been created, as a consequence of a dataframe\n merge operation, resulting in duplicate columns with suffixes.\n\n Parameters\n ----------\n df : pandas.DataFrame or dask.DataFrame\n Dataframe that will have its columns merged.\n cols_to_merge : string or list of strings, default None\n The columns which will be regenerated, by merging its duplicates.\n If not specified, the algorithm will search for columns with suffixes.\n drop_old_cols : bool, default True\n If set to True, the preexisting duplicate columns will be removed.\n separator : string, default ';'\n Symbol that concatenates each string's words, which will be used to join\n the inputs if they are both strings.\n join_strings : bool, default False\n If set to True, in case of receiving two string inputs, the algorithm\n will joined them using the defined separator. Otherwise, the shortest\n string will be returned.\n see_progress : bool, default True\n If set to True, a progress bar will show up indicating the execution\n of the normalization calculations.\n inplace : bool, default False\n If set to True, the original tensor or dataframe will be used and modified\n directly. Otherwise, a copy will be created and returned, without\n changing the original tensor or dataframe.\n\n Returns\n -------\n data_df : pandas.DataFrame or dask.DataFrame\n Dataframe with the new merged columns.\n '''\n if not inplace:\n # Make a copy of the data to avoid potentially unwanted changes to the original dataframe\n data_df = df.copy()\n else:\n # Use the original dataframe\n data_df = df\n if cols_to_merge is None:\n print('Finding columns to merge...')\n # Find all columns that have typical merging suffixes\n cols_to_merge = set([col.split('_x')[0].split('_y')[0] for col in df.columns\n if col.endswith('_x') or col.endswith('_y')])\n # Make sure that the cols_to_merge is a list\n if isinstance(cols_to_merge, str):\n cols_to_merge = [cols_to_merge]\n print('Merging the duplicate columns...')\n for col in utils.iterations_loop(cols_to_merge, see_progress=see_progress):\n # Check if the columns being merged are boolean\n is_bool = all([search_explore.is_boolean_column(data_df, col, n_unique_values=None)]\n for col in [f'{col}_x', f'{col}_y'])\n # Create a column, with the original name, merging the associated columns' values\n data_df[col] = data_df.apply(lambda x: merge_values(x[f'{col}_x'], x[f'{col}_y'],\n separator=separator,\n join_strings=join_strings,\n is_bool=is_bool), axis=1)\n if drop_old_cols:\n print('Removing old columns...')\n # Remove the old columns, with suffixes `_x` and '_y', which resulted\n # from the merge of dataframes\n for col in utils.iterations_loop(cols_to_merge, see_progress=see_progress):\n data_df = data_df.drop(columns=[f'{col}_x', f'{col}_y'])\n print('Done!')\n return data_df\n\n\ndef missing_values_imputation(data, columns_to_imputate=None, method='zero',\n id_column=None, zero_bool=True, reset_index=True,\n search_by_dtypes=False, inplace=False):\n '''Performs missing values imputation to a tensor or dataframe corresponding to\n a single column.\n NOTE: Most imputation methods don't work with float16 data types and\n interpolation can't be applied to nullable integer types.\n\n Parameters\n ----------\n data : torch.Tensor or pandas.DataFrame or dask.DataFrame\n PyTorch tensor corresponding to a single column or a dataframe which will\n be imputed.\n columns_to_imputate : str or list of str, default None\n Specific column(s) to run missing values imputation on. Might be useful\n if some columns should be imputated in a specific method, different from\n the rest. If left unspecified, all columns will be imputated with the\n same method.\n method : string, default 'zero'\n Imputation method to be used. If user inputs 'zero', it will just fill all\n missing values with zero. If the user chooses 'zigzag', it will do a\n forward fill, a backward fill and then replace all remaining missing values\n with zero (this option is only available for dataframes, not tensors).\n If the user selects 'interpolation', missing data will be interpolated based\n on known neighboring values and then all possible remaining ones are\n replaced with zero (this option is only available for dataframes, not\n tensors).\n id_column : string, default None\n Name of the column which corresponds to the sequence or subject identifier\n in the dataframe. If not specified, the imputation will not differenciate\n different IDs nor sequences. Only used if the chosen imputation method is\n 'zigzag' or 'interpolation'.\n zero_bool : bool, default True\n If set to True, it will look for boolean features and replace their\n missing values with zero, regardless of the chosen imputation method.\n reset_index : bool, default True\n If set to True (recommended), the dataframe's index will be reset. This\n can prevent values from being assigned to the wrong rows.\n search_by_dtypes : bool, default False\n If set to True, the method will only look for boolean columns based on\n their data type. This is only reliable if all the columns' data types\n have been properly set.\n inplace : bool, default False\n If set to True, the original tensor or dataframe will be used and modified\n directly. Otherwise, a copy will be created and returned, without\n changing the original tensor or dataframe.\n\n Returns\n -------\n tensor : torch.Tensor\n Imputed PyTorch tensor.\n '''\n if ((not isinstance(data, pd.DataFrame))\n and (not isinstance(data, dd.DataFrame))\n and (not isinstance(data, torch.Tensor))):\n raise Exception(f'ERROR: The input data must either be a PyTorch tensor, a Pandas dataframe or a Dask dataframe, not {type(data)}.')\n if not inplace:\n # Make a copy of the data to avoid potentially unwanted changes to the original data\n if isinstance(data, torch.Tensor):\n data_copy = data.clone()\n else:\n data_copy = data.copy()\n else:\n # Use the original data object\n data_copy = data\n # [TODO] Implement an option to only imputate specified column(s)\n # if columns is None:\n # columns = list(data_copy.columns)\n if reset_index is True:\n # Reset index to avoid assigning values in the wrong rows\n print('Resetting the index...')\n data_copy.reset_index(drop=True, inplace=True)\n if columns_to_imputate is None:\n # Imputate all the columns\n columns_to_imputate = list(data_copy.columns)\n # Make sure that the columns_to_imputate is a list\n if isinstance(columns_to_imputate, str):\n columns_to_imputate = [columns_to_imputate]\n if id_column is not None:\n # Make sure that the ID column is in columns_to_imputate\n if id_column not in columns_to_imputate:\n columns_to_imputate = [id_column] + columns_to_imputate\n if zero_bool is True:\n # Check if there are boolean features\n print('Searching for boolean features...')\n bool_feat = search_explore.list_boolean_columns(data_copy, search_by_dtypes=search_by_dtypes)\n if len(bool_feat) > 0:\n # Fill all boolean features' missing values with zeros\n print('Replacing boolean features\\' missing values with zero...')\n data_copy.loc[:, bool_feat] = data_copy[bool_feat].fillna(value=0)\n # Remove the boolean columns from the list of columns to imputate\n columns_to_imputate = list(set(columns_to_imputate) - set(bool_feat))\n if method.lower() == 'zero':\n # Replace NaN's with zeros\n print('Replacing missing values with zero...')\n if isinstance(data, pd.DataFrame) or isinstance(data, dd.DataFrame):\n data_copy.loc[:, columns_to_imputate] = data_copy[columns_to_imputate].fillna(value=0)\n elif isinstance(data, torch.Tensor):\n # [TODO] Add the ability to specify the tensor columns to imputate\n data_copy = torch.where(data_copy != data_copy, torch.zeros_like(data_copy), data_copy)\n elif method.lower() == 'zigzag':\n if isinstance(data, pd.DataFrame) or isinstance(data, dd.DataFrame):\n if id_column is not None:\n # Perform imputation on each ID separately\n # Forward fill and backward fill\n print('Forward filling and backward filling missing values...')\n data_copy.loc[:, columns_to_imputate] = data_copy[columns_to_imputate].groupby(id_column).apply(lambda group: group.ffill().bfill())\n # Replace remaining missing values with zero\n print('Replacing remaining missing values with zero...')\n data_copy.loc[:, columns_to_imputate] = data_copy[columns_to_imputate].fillna(value=0)\n else:\n # Apply imputation on all the data as one single sequence\n # Forward fill\n print('Forward filling missing values...')\n data_copy.loc[:, columns_to_imputate] = data_copy[columns_to_imputate].ffill()\n # Backward fill\n print('Backward filling missing values...')\n data_copy.loc[:, columns_to_imputate] = data_copy[columns_to_imputate].bfill()\n # Replace remaining missing values with zero\n print('Replacing remaining missing values with zero...')\n data_copy.loc[:, columns_to_imputate] = data_copy[columns_to_imputate].fillna(value=0)\n elif isinstance(data, torch.Tensor):\n raise Exception('ERROR: PyTorch tensors aren\\'t supported in the zigzag imputation method. Please use a dataframe instead.')\n elif method.lower() == 'interpolation':\n if isinstance(data, pd.DataFrame) or isinstance(data, dd.DataFrame):\n # Linear interpolation, placing a linear scale between known points and doing simple\n # backward and forward fill, when the missing value doesn't have known data points\n # before or after, respectively\n # NOTE: Since the interpolate method doesn't work on nullable integer data types,\n # we need to find and separate columns with that dtype and apply zigzag imputation on them\n columns_cant_interpolate = list()\n for col in columns_to_imputate:\n if (('Int' in str(data[col].dtype) or 'boolean' in str(data[col].dtype))\n and col != id_column):\n columns_cant_interpolate.append(col)\n columns_to_imputate.remove(col)\n if id_column is not None:\n try:\n if len(columns_cant_interpolate) > 0:\n # Perform zigzag imputation on columns that can't be interpolated\n print('Running zigzag imputation on columns that can\\'t be interpolated...')\n print(f'(These columns are {columns_cant_interpolate})')\n columns_cant_interpolate = [id_column] + columns_cant_interpolate\n # Forward fill and backward fill\n print('Forward filling and backward filling missing values...')\n data_copy.loc[:, columns_cant_interpolate] = data_copy[columns_cant_interpolate].groupby(id_column).apply(lambda group: group.ffill().bfill())\n # Replace remaining missing values with zero\n print('Replacing remaining missing values with zero...')\n data_copy.loc[:, columns_cant_interpolate] = data_copy[columns_cant_interpolate].fillna(value=0)\n # There's no need to interpolate if the only column in columns_to_imputate is the ID column\n if len(columns_to_imputate) > 1:\n # Perform imputation on each ID separately\n print('Interpolating missing values...')\n data_copy.loc[:, columns_to_imputate] = data_copy[columns_to_imputate].groupby(id_column)[columns_to_imputate].apply(lambda group: group.interpolate(limit_direction='both'))\n # Replace remaining missing values with zero\n print('Replacing remaining missing values with zero...')\n data_copy.loc[:, columns_to_imputate] = data_copy[columns_to_imputate].fillna(value=0)\n except ValueError as e:\n warnings.warn(f'Initial attempt to interpolate failed. Original exception message: \"{str(e)}\"\\nTrying again after replacing all possible occurences with a Numpy NaN.')\n # Save the current data types\n dtype_dict = dict(data_copy.dtypes)\n # Replace the '' objects with NumPy's NaN\n data_copy = data_copy.applymap(lambda x: x if not utils.is_num_nan(x) else np.nan)\n print('Finished replacing all possible values.')\n # Perform imputation on each ID separately\n print('Interpolating missing values...')\n data_copy.loc[:, columns_to_imputate] = data_copy[columns_to_imputate].groupby(id_column)[columns_to_imputate].apply(lambda group: group.interpolate(limit_direction='both'))\n # Replace remaining missing values with zero\n print('Replacing remaining missing values with zero...')\n data_copy.loc[:, columns_to_imputate] = data_copy[columns_to_imputate].fillna(value=0)\n # Convert the data types back to the original ones\n print('Converting data types back to the original ones...')\n data_copy = utils.convert_dtypes(data_copy, dtypes=dtype_dict, inplace=True)\n else:\n try:\n if len(columns_cant_interpolate) > 0:\n # Perform zigzag imputation on columns that can't be interpolated\n print('Running zigzag imputation on columns that can\\'t be interpolated...')\n print(f'(These columns are {columns_cant_interpolate})')\n # Forward fill\n print('Forward filling missing values...')\n data_copy.loc[:, columns_cant_interpolate] = data_copy[columns_cant_interpolate].ffill()\n # Backward fill\n print('Backward filling missing values...')\n data_copy.loc[:, columns_cant_interpolate] = data_copy[columns_cant_interpolate].bfill()\n # Replace remaining missing values with zero\n print('Replacing remaining missing values with zero...')\n data_copy.loc[:, columns_cant_interpolate] = data_copy[columns_cant_interpolate].fillna(value=0)\n # There's no need to interpolate if columns_to_imputate is empty\n if len(columns_to_imputate) > 0:\n # Apply imputation on all the data as one single sequence\n print('Interpolating missing values...')\n data_copy.loc[:, columns_to_imputate] = data_copy[columns_to_imputate].interpolate(limit_direction='both')\n # Replace remaining missing values with zero\n print('Replacing remaining missing values with zero...')\n data_copy.loc[:, columns_to_imputate] = data_copy[columns_to_imputate].fillna(value=0)\n except ValueError as e:\n warnings.warn(f'Initial attempt to interpolate failed. Original exception message: \"{str(e)}\"\\nTrying again after replacing all possible occurences with a Numpy NaN.')\n # Save the current data types\n dtype_dict = dict(data_copy.dtypes)\n data_copy = utils.convert_dtypes(data_copy, dtypes=dtype_dict, inplace=True)\n print('Finished replacing all possible values.')\n # Apply imputation on all the data as one single sequence\n print('Interpolating missing values...')\n data_copy.loc[:, columns_to_imputate] = data_copy[columns_to_imputate].interpolate(limit_direction='both')\n # Replace remaining missing values with zero\n print('Replacing remaining missing values with zero...')\n data_copy.loc[:, columns_to_imputate] = data_copy[columns_to_imputate].fillna(value=0)\n # Convert the data types back to the original ones\n print('Converting data types back to the original ones...')\n data_copy = utils.convert_dtypes(data_copy, dtypes=dtype_dict, inplace=True)\n elif isinstance(data, torch.Tensor):\n raise Exception('ERROR: PyTorch tensors aren\\'t supported in the interpolation imputation method. Please use a dataframe instead.')\n else:\n raise Exception(f'ERROR: Unsupported {method} imputation method. Currently available options are `zero` and `zigzag`.')\n # [TODO] Add other, more complex imputation methods, like a denoising autoencoder\n print('Done!')\n return data_copy\n\n\ndef __sep_dosage_units(x):\n # Start by assuming that dosage and unit are unknown\n dosage = np.nan\n unit = np.nan\n try:\n x = x.split(' ')\n if len(x) == 2:\n try:\n # Add correctly formated dosage\n dosage = float(x[0])\n except Exception:\n pass\n try:\n if utils.is_definitely_string(x[1]):\n # Add correctly formated unit values\n unit = x[1]\n except Exception:\n pass\n elif len(x) == 1:\n try:\n # Try to add correctly formated dosage, even without units\n dosage = float(x[0])\n except Exception:\n pass\n except Exception:\n try:\n # Try to add correctly formated dosage, even without units\n dosage = float(x)\n except:\n pass\n return dosage, unit\n\n\ndef set_dosage_and_units(df, orig_column='dosage', new_column_names=['drug_dosage', 'drug_unit']):\n '''Separate medication dosage string column into numeric dosage and units\n features.\n\n Parameters\n ----------\n df : pandas.DataFrame or dask.DataFrame\n Dataframe containing the medication dosage information.\n orig_column : string, default 'dosage'\n Name of the original column, which will be split in two.\n\n Returns\n -------\n df : pandas.DataFrame or dask.DataFrame\n Dataframe after adding the numeric dosage and units columns.\n '''\n # Separate the dosage and unit data\n dosage_unit_data = df[orig_column].apply(__sep_dosage_units)\n # Make sure that the new columns are created\n for col in new_column_names:\n df[col] = np.nan\n # Add the new dosage and units columns\n df[new_column_names] = pd.DataFrame(dosage_unit_data.to_numpy().tolist(),\n index=dosage_unit_data.index)\n return df\n\n\ndef signal_idx_derivative(s, time_scale='seconds', periods=1):\n '''Creates a series that contains the signal's index derivative, with the\n same divisions (if needed) as the original data and on the desired time\n scale.\n\n Parameters\n ----------\n s : pandas.Series or dask.Series\n Series which will be analyzed for outlier detection.\n time_scale : bool, default 'seconds'\n How to calculate derivatives, either with respect to the index values,\n on the time scale of 'seconds', 'minutes', 'hours', 'days', 'months' or\n 'years', or just sequentially, just getting the difference between\n consecutive values, 'False'. Only used if parameter 'signal' isn't set\n to 'value'.\n periods : int, default 1\n Defines the steps to take when calculating the derivative. When set to 1,\n it performs a normal backwards derivative. When set to 1, it performs a\n normal forwards derivative.\n\n Returns\n -------\n s_idx : pandas.Series or dask.Series\n Index derivative signal, on the desired time scale.\n '''\n # Calculate the signal index's derivative\n s_idx = s.index.to_series().diff()\n if isinstance(s_idx, dd.DataFrame):\n # Make the new derivative have the same divisions as the original signal\n s_idx = (s_idx.to_frame().rename(columns={s.index.name:'tmp_val'})\n .reset_index()\n .set_index(s.index.name, sorted=True, divisions=s.divisions)\n .tmp_val)\n # Convert derivative to the desired time scale\n if time_scale == 'seconds':\n s_idx = s_idx.dt.seconds\n elif time_scale == 'minutes':\n s_idx = s_idx.dt.seconds / 60\n elif time_scale == 'hours':\n s_idx = s_idx.dt.seconds / 3600\n elif time_scale == 'days':\n s_idx = s_idx.dt.seconds / 86400\n elif time_scale == 'months':\n s_idx = s_idx.dt.seconds / 2592000\n return s_idx\n\n\ndef threshold_outlier_detect(s, max_thrs=None, min_thrs=None, threshold_type='absolute',\n signal_type='value', time_scale='seconds',\n derivate_direction='backwards'):\n '''Detects outliers based on predetermined thresholds.\n\n Parameters\n ----------\n s : pandas.Series or dask.Series\n Series which will be analyzed for outlier detection.\n max_thrs : int or float, default None\n Maximum threshold, i.e. no normal value can be larger than this\n threshold, in the signal (or its n-order derivative) that we're\n analyzing.\n min_thrs : int or float, default None\n Minimum threshold, i.e. no normal value can be smaller than this\n threshold, in the signal (or its n-order derivative) that we're\n analyzing.\n threshold_type : string, default 'absolute'\n Determines if we're using threshold values with respect to the original\n scale of values, 'absolute', relative to the signal's mean, 'mean' or\n 'average', to the median, 'median' or to the standard deviation, 'std'.\n As such, the possible settings are ['absolute', 'mean', 'average',\n 'median', 'std'].\n signal_type : string, default 'value'\n Sets if we're analyzing the original signal value, 'value', its first\n derivative, 'derivative' or 'speed', or its second derivative, 'second\n derivative' or 'acceleration'. As such, the possible settings are\n ['value', 'derivative', 'speed', 'second derivative', 'acceleration'].\n time_scale : string or bool, default 'seconds'\n How to calculate derivatives, either with respect to the index values,\n on the time scale of 'seconds', 'minutes', 'hours', 'days', 'months' or\n 'years', or just sequentially, just getting the difference between\n consecutive values, 'False'. Only used if parameter 'signal' isn't set\n to 'value'.\n derivate_direction : string, default 'backwards'\n The direction in which we calculate the derivative, either comparing to\n previous values, 'backwards', or to the next values, 'forwards'. As such,\n the possible settings are ['backwards', 'forwards']. Only used if\n parameter 'signal' isn't set to 'value'.\n\n Returns\n -------\n outlier_s : pandas.Series or dask.Series\n Boolean series indicating where the detected outliers are.\n '''\n if signal_type.lower() == 'value':\n signal = s\n elif signal_type.lower() == 'derivative' or signal_type.lower() == 'speed':\n if derivate_direction.lower() == 'backwards':\n periods = 1\n elif derivate_direction.lower() == 'forwards':\n periods = -1\n else:\n raise Exception(f'ERROR: Invalid derivative direction. It must either be \"backwards\" or \"forwards\", not {derivate_direction}.')\n # Calculate the difference between consecutive values\n signal = s.diff(periods)\n if time_scale is not None:\n # Derivate by the index values\n signal = signal / signal_idx_derivative(signal, time_scale, periods)\n elif (signal_type.lower() == 'second derivative'\n or signal_type.lower() == 'acceleration'):\n if derivate_direction.lower() == 'backwards':\n periods = 1\n elif derivate_direction.lower() == 'forwards':\n periods = -1\n else:\n raise Exception(f'ERROR: Invalid derivative direction. It must either be \"backwards\" or \"forwards\", not {derivate_direction}.')\n # Calculate the difference between consecutive values\n signal = s.diff(periods).diff(periods)\n if time_scale is not None:\n # Derivate by the index values\n signal = signal / signal_idx_derivative(signal, time_scale, periods)\n else:\n raise Exception('ERROR: Invalid signal type. It must be \"value\", \"derivative\", \"speed\", \"second derivative\" or \"acceleration\", not {signal}.')\n if threshold_type.lower() == 'absolute':\n signal = signal\n elif threshold_type.lower() == 'mean' or threshold_type.lower() == 'average':\n signal_mean = signal.mean()\n if isinstance(signal, dd.DataFrame):\n # Make sure that the value is computed, in case we're using Dask\n signal_mean = signal_mean.compute()\n # Normalize by the average value\n signal = signal / signal_mean\n elif threshold_type.lower() == 'median':\n if isinstance(signal, dd.DataFrame):\n # Make sure that the value is computed, in case we're using Dask\n signal_median = signal.compute().median()\n else:\n signal_median = signal.median()\n # Normalize by the median value\n signal = signal / signal_median\n elif threshold_type.lower() == 'std':\n signal_mean = signal.mean()\n signal_std = signal.std()\n if isinstance(signal, dd.DataFrame):\n # Make sure that the values are computed, in case we're using Dask\n signal_mean = signal_mean.compute()\n signal_std = signal_std.compute()\n # Normalize by the average and standard deviation values\n signal = (signal - signal_mean) / signal_std\n else:\n raise Exception(f'ERROR: Invalid value type. It must be \"absolute\", \"mean\", \"average\", \"median\" or \"std\", not {threshold_type}.')\n\n # Search for outliers based on the given thresholds\n if max_thrs is not None and min_thrs is not None:\n outlier_s = (signal > max_thrs) | (signal < min_thrs)\n elif max_thrs is not None:\n outlier_s = signal > max_thrs\n elif min_thrs is not None:\n outlier_s = signal < min_thrs\n else:\n raise Exception('ERROR: At least a maximum or a minimum threshold must be set. Otherwise, no outlier will ever be detected.')\n\n return outlier_s\n\n\ndef slopes_outlier_detect(s, max_thrs=4, bidir_sens=0.5, threshold_type='std',\n time_scale='seconds', only_bir=False):\n '''Detects outliers based on large variations on the signal's derivatives,\n either in one direction or on both at the same time.\n\n Parameters\n ----------\n s : pandas.Series or dask.Series\n Series which will be analyzed for outlier detection.\n max_thrs : int or float\n Maximum threshold, i.e. no point can have a magnitude derivative value\n deviate more than this threshold, in the signal that we're analyzing.\n bidir_sens : float, default 0.5\n Dictates how much more sensitive the algorithm is when a deviation (i.e.\n large variation) is found on both sides of the data point / both\n directions of the derivative. In other words, it's a factor that will be\n multiplied by the usual one-directional threshold (`max_thrs`), from which\n the resulting value will be used as the bidirectional threshold.\n threshold_type : string, default 'std'\n Determines if we're using threshold values with respect to the original\n scale of derivative values, 'absolute', relative to the derivative's\n mean, 'mean' or 'average', to the median, 'median' or to the standard\n deviation, 'std'. As such, the possible settings are ['absolute', 'mean',\n 'average', 'median', 'std'].\n time_scale : string or bool, default 'seconds'\n How to calculate derivatives, either with respect to the index values,\n on the time scale of 'seconds', 'minutes', 'hours', 'days', 'months' or\n 'years', or just sequentially, just getting the difference between\n consecutive values, 'False'. Only used if parameter 'signal' isn't set\n to 'value'.\n only_bir : bool, default False\n If set to True, the algorithm will only check for data points that have\n large derivatives on both directions.\n\n Returns\n -------\n outlier_s : pandas.Series or dask.Series\n Boolean series indicating where the detected outliers are.\n '''\n # Calculate the difference between consecutive values\n bckwrds_deriv = s.diff()\n frwrds_deriv = s.diff(-1)\n if time_scale is not None:\n # Derivate by the index values\n bckwrds_deriv = bckwrds_deriv / signal_idx_derivative(bckwrds_deriv, time_scale, periods=1)\n frwrds_deriv = frwrds_deriv / signal_idx_derivative(frwrds_deriv, time_scale, periods=-1)\n if threshold_type.lower() == 'absolute':\n bckwrds_deriv = bckwrds_deriv\n frwrds_deriv = frwrds_deriv\n elif threshold_type.lower() == 'mean' or threshold_type.lower() == 'average':\n bckwrds_deriv_mean = bckwrds_deriv.mean()\n frwrds_deriv_mean = frwrds_deriv.mean()\n if isinstance(bckwrds_deriv, dd.DataFrame):\n # Make sure that the value is computed, in case we're using Dask\n bckwrds_deriv_mean = bckwrds_deriv_mean.compute()\n frwrds_deriv_mean = frwrds_deriv_mean.compute()\n # Normalize by the average value\n bckwrds_deriv = bckwrds_deriv / bckwrds_deriv_mean\n frwrds_deriv = frwrds_deriv / frwrds_deriv_mean\n elif threshold_type.lower() == 'median':\n bckwrds_deriv_median = bckwrds_deriv.median()\n frwrds_deriv_median = frwrds_deriv.median()\n if isinstance(bckwrds_deriv, dd.DataFrame):\n # Make sure that the value is computed, in case we're using Dask\n bckwrds_deriv_median = bckwrds_deriv_median.compute()\n frwrds_deriv_median = frwrds_deriv_median.compute()\n # Normalize by the median value\n bckwrds_deriv = bckwrds_deriv / bckwrds_deriv_median\n frwrds_deriv = frwrds_deriv / frwrds_deriv_median\n elif threshold_type.lower() == 'std':\n bckwrds_deriv_mean = bckwrds_deriv.mean()\n frwrds_deriv_mean = frwrds_deriv.mean()\n bckwrds_deriv_std = bckwrds_deriv.std()\n frwrds_deriv_std = frwrds_deriv.std()\n if isinstance(bckwrds_deriv, dd.DataFrame):\n # Make sure that the values are computed, in case we're using Dask\n bckwrds_deriv_mean = bckwrds_deriv_mean.compute()\n frwrds_deriv_mean = frwrds_deriv_mean.compute()\n bckwrds_deriv_std = bckwrds_deriv_std.compute()\n frwrds_deriv_std = frwrds_deriv_std.compute()\n # Normalize by the average and standard deviation values\n bckwrds_deriv = (bckwrds_deriv - bckwrds_deriv_mean) / bckwrds_deriv_std\n frwrds_deriv = (frwrds_deriv - frwrds_deriv_mean) / frwrds_deriv_std\n else:\n raise Exception('ERROR: Invalid value type. It must be \"absolute\", \"mean\", \"average\", \"median\" or \"std\", not {threshold_type}.')\n\n # Bidirectional threshold, to be used when observing both directions of the derivative\n bidir_max = bidir_sens * max_thrs\n if only_bir is True:\n # Search for outliers on both derivatives at the same time, always on their respective magnitudes\n outlier_s = (bckwrds_deriv.abs() > bidir_max) & (frwrds_deriv.abs() > bidir_max)\n else:\n # Search for outliers on each individual derivative, followed by both at the same time with a lower threshold, always on their respective magnitudes\n outlier_s = ((bckwrds_deriv.abs() > max_thrs) | (frwrds_deriv.abs() > max_thrs)\n | ((bckwrds_deriv.abs() > bidir_max) & (frwrds_deriv.abs() > bidir_max)))\n return outlier_s\n\n\ndef save_chunked_data(df, file_name, n_chunks=None, batch_size=1,\n id_column=None, data_path='', format='feather'):\n '''Save a dataframe in chunks, i.e. in separate files, so as to prevent\n memory issues and other problems when loading it back again.\n\n Parameters\n ----------\n df : pandas.DataFrame or dask.DataFrame\n Dataframe which will be saved in chunks.\n file_name : str\n Name to be given to the file.\n n_chunks : int, default None\n Number of chunks, i.e. number of files, on which to split and save the\n dataframe.\n batch_size : int, default 1\n Defines the batch size, i.e. the number of samples used in each\n training iteration to update the model's weights.\n id_column : string, default None\n Name of the column which corresponds to the sequence or subject identifier\n in the dataframe. If specified, the data will be saved in files\n containing a `batch_size` number of unique IDs. This is useful if we're\n working with large datasets, which therefore need to be loaded file by\n file, lazily, in each training or inference batch.\n data_path : str, default ''\n Directory path where the file will be stored.\n format : str, default 'feather'\n Data format used to saved the dataframe. Currently available options are\n 'feather'.\n '''\n n_rows = len(df)\n format = str(format).lower()\n if format == 'feather':\n file_ext = '.ftr'\n else:\n raise Exception(f'ERROR: Invalid data format \"{format}\". Please choose one of the currently supported formats \"feather\".')\n if n_chunks is not None:\n # Total number of rows per file\n chunk_size = int(n_rows / n_chunks)\n for i in du.utils.iterations_loop(range(n_chunks)):\n # Get a chunk of the dataframe\n if i < n_chunks-1:\n df_i = df.iloc[i*chunk_size:(i+1)*chunk_size]\n else:\n df_i = df.iloc[i*chunk_size:]\n # Reset the index, so as to make it feather compatible\n df_i.reset_index(drop=True, inplace=True)\n # Save the current dataframe\n df_i.to_feather(f'{data_path}{file_name}_{i}{file_ext}')\n # Remove the already saved dataframe from memory\n del df_i\n elif batch_size is not None and id_column is not None:\n # List of unique sequence identifiers\n ids = list(df[id_column].unique())\n # Number of unique IDs\n n_ids = len(ids)\n # Total number of files to be saved\n n_chunks = max(1, math.ceil(n_ids / batch_size))\n for i in du.utils.iterations_loop(range(n_chunks)):\n # Set the current batch's list of IDs\n if i < n_chunks-1:\n ids_i = ids[i*batch_size:(i+1)*batch_size]\n else:\n ids_i = ids[i*batch_size:]\n # Get a chunk of the dataframe\n df_i = df[df[id_column].isin(ids_i)]\n # Reset the index, so as to make it feather compatible\n df_i.reset_index(drop=True, inplace=True)\n # Save the current dataframe\n df_i.to_feather(f'{data_path}{file_name}_{i}{file_ext}')\n # Remove the already saved dataframe from memory\n del df_i\n else:\n raise Exception(f'ERROR: Invalid set of input parameters. The user must either specify a number of chunks (`n_chunks`) to save the data or a batch size (`batch_size`) and an ID column (`id_column`) on which to fetch sequences.')\n\n\ndef load_chunked_data(file_name, n_chunks=None, data_path='', format='feather',\n dtypes=None, ordered_naming=True):\n '''Load a dataframe in chunks, i.e. in separate files, so as to prevent\n memory issues and other problems when loading.\n\n Parameters\n ----------\n file_name : str\n Name of the file where the dataframe is saved.\n n_chunks : int, default None\n Number of chunks, i.e. number of files, needed to load the dataframe.\n If left unspecified, all the files that match the naming and format will\n be loaded.\n data_path : str, default ''\n Directory path where the file is stored.\n format : str, default 'feather'\n Data format used to saved the dataframe. Currently available options are\n 'feather'.\n dtypes : dict, default None\n Dictionary that indicates the desired dtype for each column.\n e.g. {'Var1': 'float64', 'Var2': 'UInt8', 'Var3': str}\n ordered_naming : bool, default True\n If set to True, the method will load data considering an ordered naming,\n staring in 0 until n_chunks. Otherwise, it will search for all files\n that have the specified naming and format, even if it uses a different\n or irregular numbering.\n\n Returns\n -------\n df : pandas.DataFrame or dask.DataFrame\n Loaded dataframe.\n '''\n # Validate the file format\n format = str(format).lower()\n if format == 'feather':\n file_ext = '.ftr'\n else:\n raise Exception(f'ERROR: Invalid data format \"{format}\". Please choose one of the currently supported formats \"feather\".')\n if n_chunks is None or ordered_naming is False:\n # Get a list with the names of the files that can be loaded\n data_files = glob(f'{data_path}{file_name}_*{file_ext}')\n if n_chunks is None:\n # Load all the files, if no limit is specified\n n_chunks = len(data_files)\n for i in du.utils.iterations_loop(range(n_chunks)):\n if i == 0:\n # Load the first file\n if ordered_naming is True:\n df = pd.read_feather(f'{data_path}{file_name}_{i}{file_ext}')\n else:\n df = pd.read_feather(data_files[i])\n if dtypes is not None:\n df = du.utils.convert_dtypes(df, dtypes=dtypes, inplace=True)\n else:\n # Load another file and join it with the already loaded ones\n if ordered_naming is True:\n tmp_df = pd.read_feather(f'{data_path}{file_name}_{i}{file_ext}')\n else:\n tmp_df = pd.read_feather(data_files[i])\n if dtypes is not None:\n tmp_df = du.utils.convert_dtypes(tmp_df, dtypes=dtypes, inplace=True)\n df = pd.concat((df, tmp_df))\n # Remove the already concatenated dataframe from memory\n del tmp_df\n return df\n","sub_path":"data_utils/data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":118960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"337429340","text":"\"\"\"version 1.3.0\n\nRevision ID: eb7141efd75a\nRevises: 430a70c8aa21\nCreate Date: 2016-01-06 13:38:46.918409\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'eb7141efd75a'\ndown_revision = '430a70c8aa21'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom frontend.models.sqlobjects import FileWeb\nfrom lib.common.utils import UUID\nfrom sqlalchemy.orm import sessionmaker, scoped_session\n\n\ndef upgrade():\n bind = op.get_bind()\n session = scoped_session(sessionmaker(autocommit=False, autoflush=False,\n bind=bind))\n op.add_column('irma_file', sa.Column('mimetype',\n sa.String(),\n nullable=True))\n op.add_column('irma_fileWeb', sa.Column('external_id',\n sa.String(length=36),\n nullable=True))\n op.add_column('irma_fileWeb', sa.Column('id_parent',\n sa.Integer(),\n nullable=True))\n op.add_column('irma_fileWeb', sa.Column('path',\n sa.String(length=255),\n nullable=True))\n\n # Create external_id as new uuid\n for fileweb in session.query(FileWeb).all():\n if fileweb.external_id is None:\n fileweb.external_id = UUID.generate()\n session.commit()\n # Now that all data are fixed set column to non nullable\n op.alter_column('irma_fileWeb', 'external_id', nullable=False)\n\n op.create_index(op.f('ix_irma_fileWeb_external_id'),\n 'irma_fileWeb',\n ['external_id'],\n unique=False)\n op.drop_constraint(u'irma_fileWeb_id_scan_scan_file_idx_key',\n 'irma_fileWeb',\n type_='unique')\n op.create_unique_constraint(None,\n 'irma_fileWeb',\n ['external_id'])\n op.create_foreign_key(None,\n 'irma_fileWeb',\n 'irma_file',\n ['id_parent'],\n ['id'])\n op.drop_column('irma_fileWeb', 'scan_file_idx')\n op.add_column('irma_scan', sa.Column('force',\n sa.Boolean(),\n nullable=True))\n op.add_column('irma_scan', sa.Column('mimetype_filtering',\n sa.Boolean(),\n nullable=True))\n op.add_column('irma_scan', sa.Column('probelist',\n sa.String(),\n nullable=True))\n op.add_column('irma_scan', sa.Column('resubmit_files',\n sa.Boolean(),\n nullable=True))\n op.add_column('irma_tag', sa.Column('text',\n sa.String(),\n nullable=False))\n op.drop_column('irma_tag', 'name')\n\n\ndef downgrade():\n bind = op.get_bind()\n session = scoped_session(sessionmaker(autocommit=False, autoflush=False,\n bind=bind))\n op.add_column('irma_tag', sa.Column('name',\n sa.VARCHAR(),\n autoincrement=False,\n nullable=False))\n op.drop_column('irma_tag', 'text')\n op.drop_column('irma_scan', 'resubmit_files')\n op.drop_column('irma_scan', 'probelist')\n op.drop_column('irma_scan', 'mimetype_filtering')\n op.drop_column('irma_scan', 'force')\n op.add_column('irma_fileWeb', sa.Column('scan_file_idx',\n sa.INTEGER(),\n autoincrement=False,\n nullable=True))\n\n # Create scan_file_idx autoincrement per scan\n last_id_scan = None\n scan_idx = 0\n for fileweb in session.query(FileWeb).all():\n if last_id_scan != fileweb.id_scan:\n last_id_scan = fileweb.id_scan\n scan_idx = 0\n if fileweb.scan_file_idx is None:\n fileweb.scan_file_idx = scan_idx\n scan_idx += 1\n # Now that all data are fixed set column to non nullable\n op.alter_column('irma_fileWeb', 'scan_file_idx', nullable=False)\n\n op.drop_constraint(None, 'irma_fileWeb', type_='foreignkey')\n op.drop_constraint(None, 'irma_fileWeb', type_='unique')\n op.create_unique_constraint(u'irma_fileWeb_id_scan_scan_file_idx_key',\n 'irma_fileWeb',\n ['id_scan', 'scan_file_idx'])\n op.drop_index(op.f('ix_irma_fileWeb_external_id'),\n table_name='irma_fileWeb')\n op.drop_column('irma_fileWeb', 'path')\n op.drop_column('irma_fileWeb', 'id_parent')\n op.drop_column('irma_fileWeb', 'external_id')\n op.drop_column('irma_file', 'mimetype')\n","sub_path":"frontend/extras/migration/versions/eb7141efd75a_version_1_3_0.py","file_name":"eb7141efd75a_version_1_3_0.py","file_ext":"py","file_size_in_byte":5151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"368462726","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom CyberDance import functions\n\n\ndef home(request, alert=''):\n base = functions.which_base(request)\n not_logged = True\n if request.user.is_authenticated:\n not_logged = False\n context = {'base': base, 'alert': alert, 'not_logged': not_logged}\n return render(request, 'room/home.html', context)\n\n\ndef room(request):\n base = functions.which_base(request)\n context = {'base': base, 'rooms_active': 'active'}\n return render(request, 'room/room.html', context)\n","sub_path":"room/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"166613865","text":"import random\nfrom Cross.crossover import Crossover\nfrom items import Items\n\n\nclass OnePointCross(Crossover):\n\n def __init__(self, parent_size, genome_size, item_keys):\n super().__init__(parent_size, genome_size, item_keys)\n\n def crossover(self, parent1, parent2):\n p = random.randint(0, self.genome_size)\n parent_keys = list(parent1.items.equipment.keys())\n\n item_len = len(parent_keys)\n\n items1 = {}\n items2 = {}\n height1 = parent1.height\n height2 = parent2.height\n\n for i in range(self.genome_size):\n if i >= p:\n if i < item_len:\n items1[parent_keys[i]] = parent2.items.equipment[parent_keys[i]]\n items2[parent_keys[i]] = parent1.items.equipment[parent_keys[i]]\n else:\n height1 = parent2.height\n height2 = parent1.height\n else:\n if i < item_len:\n items1[parent_keys[i]] = parent1.items.equipment[parent_keys[i]]\n items2[parent_keys[i]] = parent2.items.equipment[parent_keys[i]]\n\n child1 = parent1.create_child(\n Items(items1[\"weapon\"], items1[\"boots\"], items1[\"helmet\"], items1[\"gloves\"], items1[\"chest\"]), height1)\n child2 = parent1.create_child(\n Items(items2[\"weapon\"], items2[\"boots\"], items2[\"helmet\"], items2[\"gloves\"], items2[\"chest\"]), height2)\n return child1, child2\n","sub_path":"TP2/Cross/one_point.py","file_name":"one_point.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"94417874","text":"# Copyright (C) 2014 Codethink Limited\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; version 2 of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\n\nimport logging\nimport time\n\nimport bottle\n\nimport lorrycontroller\n\n\nclass JobShower(object):\n\n def get_job_as_json(self, statedb, job_id):\n path = statedb.get_job_path(job_id)\n exit = statedb.get_job_exit(job_id)\n output = statedb.get_job_output(job_id)\n started, ended = statedb.get_job_started_and_ended(job_id)\n disk_usage = statedb.get_job_disk_usage(job_id)\n now = statedb.get_current_time()\n\n return {\n 'job_id': job_id,\n 'host': statedb.get_job_minion_host(job_id),\n 'pid': statedb.get_job_minion_pid(job_id),\n 'path': statedb.get_job_path(job_id),\n 'exit': 'no' if exit is None else exit,\n 'disk_usage': disk_usage,\n 'disk_usage_nice': self.format_bytesize(disk_usage or 0),\n 'output': output,\n 'job_started': self.format_time(started),\n 'job_ended': '' if ended is None else self.format_time(ended),\n 'timestamp': self.format_time(now),\n }\n\n def format_time(self, timestamp):\n return time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(timestamp))\n\n def format_bytesize(self, num_bytes):\n if num_bytes is None:\n return 'unknown'\n mebibyte = 2**20\n return '%.1f MiB' % (float(num_bytes) / float(mebibyte))\n\n\nclass ShowJob(lorrycontroller.LorryControllerRoute):\n\n http_method = 'GET'\n path = '/1.0/job/'\n\n def run(self, **kwargs):\n logging.info('%s %s called', self.http_method, self.path)\n job_id = int(kwargs['job_id'])\n\n statedb = self.open_statedb()\n return JobShower().get_job_as_json(statedb, job_id)\n\n\nclass ShowJobHTML(lorrycontroller.LorryControllerRoute):\n\n http_method = 'GET'\n path = '/1.0/job-html/'\n\n def run(self, **kwargs):\n logging.info('%s %s called', self.http_method, self.path)\n job_id = int(kwargs['job_id'])\n\n statedb = self.open_statedb()\n variables = JobShower().get_job_as_json(statedb, job_id)\n return bottle.template(self._templates['job'], **variables)\n","sub_path":"lorrycontroller/showjob.py","file_name":"showjob.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"559876011","text":"#!/data/home/henry/anaconda3/bin/python3.6\n# -*- coding: utf-8 -*-\n# @File : Case_storm_area.py\n# @Author: Hanzhaohui\n# @Date : 2018/8/31\n# @Desc : 对给定区域进行平均,分析指定时间段内的时间序列\nimport matplotlib\nmatplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\nimport time\nimport numpy as np\nfrom Tools.Class_Read import Get_time, Getfile\nfrom Tools.Area_read import Read_area, area_mean\nfrom Tools.Class_Figure import Plotyy_time\n\n#*****************************************************************\n# 用户自��义\nYmin = 2005\nMmin = 11\nDmin = 10\nYmax = 2005\nMmax = 11\nDmax = 14\n\n# lat: 45~65 lon: 310~350\nlon1 = 330\nlon2 = 345\nlat1 = 50\nlat2 = 60\n# ******************************************************\n# 待读取文件列表\nList_tar, List_toc = Getfile.his_list(Ymin, Mmin, Ymax, Mmax)\ntime1 = str(Ymin) + str(Mmin).zfill(2) + str(Dmin).zfill(2)\ntime2 = str(Ymax) + str(Mmax).zfill(2) + str(Dmax).zfill(2)\n\n# 待读取文件名\nnc_tar = List_tar[0]\nnc_toc = List_toc[0]\n#*****************************************************************\n# 读取指定时间、经纬度的变量\n# Read_area类:获取index\nxtar = Read_area(time1, time2, lon1, lon2, lat1, lat2, nc_tar)\nu_tar = xtar.read_var('u')\nv_tar = xtar.read_var('v')\nsustr_tar = xtar.read_var('sustr')\nsvstr_tar = xtar.read_var('svstr')\n\nxtoc = Read_area(time1, time2, lon1, lon2, lat1, lat2, nc_toc)\nu_toc = xtoc.read_var('u')\nv_toc = xtoc.read_var('v')\nsustr_toc = xtoc.read_var('sustr')\nsvstr_toc = xtoc.read_var('svstr')\n#*****************************************************************\n# 读取时间\n# 查找最近点\nIndex1 = xtar.Itime1\nIndex2 = xtar.Itime2\ntimex = Get_time.nc_time(List_tar[0], Ntime1=Index1, Ntime2=Index2)\nYoing = '2000-01-01'\n# 转换成时间数组\nYtime0 = time.strptime(Yoing, '%Y-%m-%d')\n# 转换成时间戳\nYtime = time.mktime(Ytime0)\n# print(Ytime)\ntimex = timex + Ytime\n#*****************************************************************\n# 计算标量场\nwpi_tar = u_tar * sustr_tar + v_tar * svstr_tar\nuv_tar = np.sqrt(u_tar**2 + v_tar**2)\nsuvstr_tar = np.sqrt(sustr_tar**2 + svstr_tar**2)\n\nwpi_toc = u_toc * sustr_toc + v_toc * svstr_toc\nuv_toc = np.sqrt(u_toc**2 + v_toc**2)\nsuvstr_toc = np.sqrt(sustr_toc**2 + svstr_toc**2)\n\n# 平均\nMuv_tar = area_mean(uv_tar)\nMsuvstr_tar = area_mean(suvstr_tar)\nMwpi_tar = area_mean(wpi_tar)\n\nMuv_toc = area_mean(uv_toc)\nMsuvstr_toc = area_mean(suvstr_toc)\nMwpi_toc = area_mean(wpi_toc)\n# *********************************************************\n# 画图\nfig1 = plt.figure(figsize=(9, 6))\n\nax1 = fig1.add_subplot(3, 1, 1)\nPlotyy_time().fig(ax1, Msuvstr_tar, Msuvstr_toc, timex, 'suvstr')\ntitle=f'Area average [lon:{lon1}~{lon2}, lat:{lat1}~{lat2}]'\nax1.set_title(title)\n\nax2 = fig1.add_subplot(3, 1, 2)\nPlotyy_time().fig(ax2, Muv_tar, Muv_toc, timex, 'uv')\n\nax3 = fig1.add_subplot(3, 1, 3)\nPlotyy_time().fig(ax3, Mwpi_tar, Mwpi_toc, timex, 'wpi')\nax3.set_xlabel('Time (month/day)')\n\n# 保存图像\nfig_name=f'figure/Storm_area_{time1}-{time2}_' \\\n f'lon_{lon1}-{lon2}_lat_{lat1}-{lat2}.png'\nfig1.savefig(fig_name,dpi=200)\nplt.show()\n","sub_path":"case/Case_storm_area.py","file_name":"Case_storm_area.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"116443892","text":"\r\nimport os\r\n\r\nfrom assembler import asm\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n asm_file = '../../doublegap/DoubleGap.asm'\r\n\r\n abpath = os.path.abspath(asm_file)\r\n i = abpath.rindex('.')\r\n basepath = abpath[0:i]\r\n\r\n list_file = basepath + '.lst'\r\n bin_file = basepath + '.bin'\r\n\r\n asa = asm.Assembler(asm_file)\r\n\r\n try:\r\n asa.assemble()\r\n asa.write_listing(list_file)\r\n asa.write_binary(bin_file)\r\n except asm.ASMException as ex:\r\n print(ex, ex.line)\r\n","sub_path":"src/app_dis.py","file_name":"app_dis.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"462188426","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.14-x86_64/egg/ntlib/NTLArchive/NTLPolynomialEuclideanDivision.py\n# Compiled at: 2018-04-23 08:51:10\nimport copy\nfrom .NTLUtilities import jsrange\nfrom .NTLValidations import int_check, list_check\n__all__ = [\n 'polyED', 'polyEDLoop']\nnickname = 'polydiv'\n\ndef polyED(dvdExp, dvdCoe, dvsExp, dvsCoe):\n list_check(dvdExp, dvdCoe, dvsExp, dvsCoe)\n ecDictDividend = {}\n for ptr in jsrange(len(dvdExp)):\n exp_ = dvdExp[ptr]\n coe_ = dvdCoe[ptr]\n int_check(exp_, coe_)\n ecDictDividend[exp_] = coe_\n\n ecDictDivisor = {}\n for ptr in jsrange(len(dvsExp)):\n exp_ = dvsExp[ptr]\n coe_ = dvsCoe[ptr]\n int_check(exp_, coe_)\n ecDictDivisor[exp_] = coe_\n\n ecDictQuotient = {}\n ecDictRemainder = {}\n ecDictQuotient, ecDictRemainder = polyEDLoop(ecDictDividend, ecDictDivisor, ecDictQuotient, ecDictRemainder)\n qttCoe = []\n qttExp = sorted(ecDictQuotient.keys(), reverse=True)\n for exp in qttExp:\n qttCoe.append(ecDictQuotient[exp])\n\n rmdCoe = []\n rmdExp = sorted(ecDictRemainder.keys(), reverse=True)\n for rmd in rmdExp:\n rmdCoe.append(ecDictRemainder[rmd])\n\n return (qttExp, qttCoe, rmdExp, rmdCoe)\n\n\ndef polyEDLoop(ecDictDividend, ecDictDivisor, ecDictQuotient, ecDictRemainder):\n ecDDvdCopy = copy.deepcopy(ecDictDividend)\n ecDDvdExpMax = max(ecDictDividend.keys())\n ecDDvsExp = sorted(ecDictDivisor.keys(), reverse=True)\n if ecDictDivisor[ecDDvsExp[0]] != 1:\n flag = True\n ecDDvsCoeMax = ecDictDivisor[ecDDvsExp[0]]\n if ecDictDividend[ecDDvdExpMax] % ecDDvsCoeMax == 0:\n mul_ = ecDictDividend[ecDDvdExpMax] // ecDDvsCoeMax\n for key in ecDictDivisor.keys():\n if ecDictDivisor[key] * mul_ != ecDictDividend[key]:\n break\n else:\n ecDictQuotient = copy.deepcopy(ecDictDivisor)\n return (ecDictQuotient, ecDictRemainder)\n\n for exp in ecDDvsExp:\n if ecDictDivisor[exp] % ecDDvsCoeMax != 0:\n ecDictRemainder = copy.deepcopy(ecDictDividend)\n return (\n ecDictQuotient, ecDictRemainder)\n ecDictDivisor[exp] //= ecDDvsCoeMax\n\n for key in ecDictDividend.keys():\n if ecDictDividend[key] % ecDDvsCoeMax != 0:\n ecDictRemainder = copy.deepcopy(ecDictDividend)\n return (\n ecDictQuotient, ecDictRemainder)\n ecDDvdCopy[key] //= ecDDvsCoeMax\n\n ecDictDividend = copy.deepcopy(ecDDvdCopy)\n while ecDDvdExpMax >= ecDDvsExp[0]:\n ecDQttCoe = ecDictDividend[ecDDvdExpMax]\n ecDQttExp = ecDDvdExpMax - ecDDvsExp[0]\n ecDictQuotient[ecDQttExp] = ecDQttCoe\n for exp in ecDDvsExp:\n tmpexp = exp + ecDQttExp\n if tmpexp in ecDictDividend:\n ecDictDividend[tmpexp] -= ecDictDivisor[exp] * ecDQttCoe\n if ecDictDividend[tmpexp] == 0:\n ecDictDividend.pop(tmpexp)\n else:\n ecDictDividend[tmpexp] = -1 * ecDictDivisor[exp] * ecDQttCoe\n\n try:\n ecDDvdExpMax = max(ecDictDividend.keys())\n except ValueError:\n ecDictRemainder = {}\n return (\n ecDictQuotient, ecDictRemainder)\n\n ecDictRemainder = ecDictDividend.copy()\n return (ecDictQuotient, ecDictRemainder)","sub_path":"pycfiles/pyntlib-1.5.0.post4-py2.7/NTLPolynomialEuclideanDivision.py","file_name":"NTLPolynomialEuclideanDivision.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"7136350","text":"#-*- coding: utf-8 -*-\nimport pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport xlrd\n \n# 输出中文\nplt.rcParams['font.sans-serif']=['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\nwb = xlrd.open_workbook(\"C:/Users/fan shiyu/Desktop/项目数据/程序/2.4/aaa.xlsx\")\nsh = wb.sheet_by_index(0) # 第一个表\nx = sh.col_values(0)#读取一列的数据\ny = sh.col_values(1)#读取二列的数据\nz = sh.col_values(2)\n\n\n# Plot the points\nplt.scatter(x, y, s=z*3, c=z, alpha=1.0)\nplt.xlabel('售货机编号')\nplt.ylabel('月订单量')\nplt.title('4月每台售货机交易额与订单量气泡图')\nplt.show()\n\n\n\n\n\n\n\n\n\n\n","sub_path":"code/task2/2.4/bubble.py","file_name":"bubble.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"299243152","text":"# -*- coding: utf-8 -*-\n\n# sys\nimport json\nimport math\n# thirdpart\nimport pandas as pd\nimport numpy as np\n\n# this project\nif __name__ == '__main__':\n import sys\n\n sys.path.append('/home/ken/workspace/code/self/github/py-code/new_stock')\n##########################\nimport sys\nprint(sys.path)\nimport util\nimport util.utils\nimport const\nimport adjust.loop as loop\n\n\n\npriorXQ = util.priorXQuarter\npriorQ = util.priorQuarter\nnextXQ = util.nextXQuarter\n\nKN = const.CWSJ_KEYWORD.ADJUST_NAME\nID_NAME = const.CWSJ_KEYWORD.ID_NAME\nKEY_NAME = const.CWSJ_KEYWORD.KEY_NAME\nADJUST_NAME = const.CWSJ_KEYWORD.ADJUST_NAME\nMONGODB_ID = const.MONGODB_ID\n\n\n\nclass GenLastYearProfit(loop.AdjustOPSimpleColumnCheck):\n @property\n def keyP(self):\n return ADJUST_NAME['LastYearProfit']\n\n @property\n def keyR(self):\n return ADJUST_NAME['LastYearROE']\n\n def columns(self):\n return [self.keyP, self.keyR]\n\n def baseColumns(self):\n return [self.keyP, ]#self.keyR]\n\n def op(self, data):\n for date, row in data.iterrows():\n try:\n d = util.priorYear(date)\n d = util.getFourthQuarter(d)\n data.loc[date, self.keyP] = data.loc[d, KEY_NAME['jbmgsy']]\n data.loc[date, self.keyR] = data.loc[d, KEY_NAME['jqjzcsyl']]\n except TypeError as e:\n print(e)\n except KeyError as e:\n print(e)\n","sub_path":"new_stock/adjust/cwsj/lastYearProfit.py","file_name":"lastYearProfit.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"363128268","text":"# Opus/UrbanSim urban simulation software.\r\n# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington\r\n# See opus_core/LICENSE\r\n\r\nfrom opus_core.variables.variable import Variable\r\nfrom variable_functions import my_attribute_label\r\nfrom opus_core.simulation_state import SimulationState\r\nfrom numpy import maximum, ma\r\n\r\nclass has_valid_year_built(Variable):\r\n \"\"\"If buildings have valid year_built or not.\"\"\"\r\n\r\n year_built = \"year_built\"\r\n\r\n def dependencies(self):\r\n return [my_attribute_label(self.year_built)]\r\n\r\n def compute(self, dataset_pool):\r\n urbansim_constant = dataset_pool.get_dataset('urbansim_constant')\r\n return self.get_dataset().get_attribute(self.year_built) >= urbansim_constant[\"absolute_min_year\"]\r\n\r\n def post_check(self, values, dataset_pool):\r\n self.do_check(\"x == False or x == True\", values)\r\n\r\n\r\nfrom opus_core.tests import opus_unittest\r\nfrom opus_core.tests.utils.variable_tester import VariableTester\r\nfrom numpy import array\r\nclass Tests(opus_unittest.OpusTestCase):\r\n def test_my_inputs(self):\r\n tester = VariableTester(\r\n __file__,\r\n package_order=['urbansim_parcel','urbansim'],\r\n test_data={\r\n 'building':{\r\n 'building_id': array([1,2,3,4]),\r\n 'year_built': array([1995, 1800, 2006, 200])\r\n },\r\n 'urbansim_constant':{\r\n 'absolute_min_year': array([1800])\r\n }\r\n }\r\n )\r\n\r\n SimulationState().set_current_time(2005)\r\n should_be = array([True, True, True, False])\r\n tester.test_is_equal_for_variable_defined_by_this_module(self, should_be)\r\n\r\nif __name__=='__main__':\r\n opus_unittest.main()","sub_path":"urbansim_parcel/building/has_valid_year_built.py","file_name":"has_valid_year_built.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"46136554","text":"import requests\nimport json\nimport random, string\nfrom datetime import datetime\nfrom requests_toolbelt.multipart.encoder import MultipartEncoder\n\n\ndef send(videofile, id_stream, object_name):\n current_date = datetime.today().strftime('%Y-%m-%d %H:%M:%S')\n print(current_date)\n id_new = ''.join(random.choices(string.ascii_letters + string.digits, k=16)) # test\n\n current_enterprise = '0KVmDvc9'\n # crée racine\n table_name = 'IA_RESULT'\n url_sync_put = 'http://api5.securemotion.fr/modules/gabarit/api/securemotion/sync_put.php?current_enterprise=0KVmDvc9&tbl_name=' + table_name\n print(url_sync_put)\n\n # créer data à envoyer\n send_data = [{\"id\": id_new, \"created_on\": current_date, \"deleted_on\": \"\", \"updated_on\": current_date,\n \"_id_stream\": id_stream, \"_id_object\": object_name}]\n json_dump = json.dumps(send_data)\n\n send_me = {\"tbl_name\": table_name, \"_id_login\": \"server\", \"db_data\": json_dump}\n print(send_me)\n\n r = requests.post(url_sync_put, json=send_me)\n r.status_code\n\n print('envoi ok')\n filename = videofile\n id_file = ''.join(random.choices(string.ascii_letters + string.digits, k=16)) # test\n\n # créer data à envoyer\n json_file = [\n {\"id\": id_file, \"created_on\": current_date, \"deleted_on\": \"\", \"article_id\": id_new, \"updated_on\": current_date}]\n json_dump = json.dumps(json_file)\n\n print('id_file => ' + id_file)\n\n ext_file = \"avi\"\n url_upload = 'http://api5.securemotion.fr/acces/api/sql/file/upload.php?current_enterprise=0KVmDvc9&db=' + table_name + \"&table=\" + table_name + \"&id_file=\" + id_file\n\n multipart_data = MultipartEncoder(\n fields={\n # a file upload field\n 'file': (filename, open(filename, 'rb'), 'text/plain'),\n 'id_file': id_file,\n 'ext_file': ext_file,\n 'cat': '0',\n 'article_id': id_new,\n 'json_file': json_dump,\n 'db_name': table_name,\n 'db_mode': 'mysql',\n 'current_enterprise': current_enterprise,\n 'url_client': '',\n 'source_type': 'android'\n }\n )\n\n response = requests.post(url_upload, data=multipart_data, headers={'Content-Type': multipart_data.content_type})\n","sub_path":"test_upload.py","file_name":"test_upload.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"309040653","text":"# -*- coding: utf-8 -*-\n# pylint: disable=C0413\n\n'''\nThis module is to serve as the line-oriented command interpreter for data\nanalysis modules defined in the data_analysis package.\nIt is to be run with frameworkpython2/3 defined in for_matplotlib_on_osx\nin the root of the project.\n'''\n\nimport argparse\nimport cmd\nimport datetime\nimport os\nimport re\nimport sys\nfrom pathlib import Path\nimport numpy as np\nfrom exploratory_data_analysis import ExploratoryDataAnalyzer\n\n_P = Path(os.path.abspath(__file__)).parent.parent\nsys.path.insert(0, str(_P))\n\nimport book_parsing.new_money_manager as m # noqa: E402\nimport household_account_data as rd # noqa: E402\nfrom household_account_data import NmmDataProvider # noqa: E402\n\n\nclass AnalysisInterpreter(cmd.Cmd):\n '''\n a command interpreter module as the interface for data analysis modules\n '''\n intro = 'Welcome to the account book data analysis shell. ' +\\\n 'Type help or ? to list commands.\\n'\n prompt = '(AI) '\n\n def __init__(self, eda, dp):\n ''' init method '''\n super().__init__()\n self._eda = eda\n self._dp = dp\n self._file = None\n\n def do_6numbersummary(self, arg):\n '''\n Get the six number summary for a numeric column of the account book\n data.\n '''\n if not arg:\n print(\"[ERROR] invalid input argument passed.\")\n return\n summary = self._eda.get_6_number_summary(str(arg))\n print(\"6 number summary for the column '\" + str(arg) + \"':\\n\" +\n str(summary))\n\n def do_frequencytable(self, arg):\n ''' Get the frequency table details. '''\n if not arg:\n print(\"[ERROR] invalid input argument passed.\")\n return\n freq_table = self._eda.get_frequency_table(str(arg))\n if freq_table is None:\n print(\"[ERROR] frequency table for \" + str(arg) +\n \" is not available\")\n return\n print(\"frequency table for the column '\" + str(arg) + \"':\")\n for key in freq_table:\n print(\"\\t%-40s %d\" % (key, freq_table[key]))\n\n def do_surplus_income_by_year_end(self, _):\n ''' Estimate the surplus income by the year end. '''\n # this method doesn't require any argument\n monthly_exp = self._dp.get_monthly_expenditures()\n avg_exp = np.mean(\n [monthly_exp[k] for k in sorted(monthly_exp.keys())[-3:]])\n monthly_inc = self._dp.get_monthly_incomes()\n avg_inc = np.mean(\n [monthly_inc[k] for k in sorted(monthly_inc.keys())[-3:]])\n avg_monthly_surplus = avg_inc - avg_exp\n now = datetime.datetime.now()\n remaining_months = 13 - now.month\n print(\"Estimated surplus income, using the last 3 months' records, \" +\n \"from \" + now.strftime(\"%Y.%m\") + \" to the year end: \" +\n str(int(avg_monthly_surplus * remaining_months)))\n\n def do_salary_monthly_income_ratio(self, _):\n ''' Calculate the ratio of salary to the last month's total income '''\n # this method doesn't require any argument\n monthly_income = self._dp.get_monthly_incomes()\n last_month = \"{}. {:02}\".format(datetime.datetime.now().year,\n datetime.datetime.now().month - 1)\n if last_month not in monthly_income:\n print(\"[ERROR] book data has no income records for \" + last_month)\n return\n print(monthly_income[last_month])\n salary_df = self._dp.get_query_result(\n m.COLUMN_NAMES[m.SUBJECT] + ' == \"' +\n m.INCOME_SUBCATEGORIES[\"SALARY\"] + '\"')\n salary = salary_df.iloc[0][m.COLUMN_NAMES[m.KRW]]\n print(\"ratio of salary to the month's income: \" +\n str(salary/monthly_income[last_month]))\n\n def do_exit(self, arg):\n ''' Terminate the interpreter. '''\n _ = arg # simply suppress the stupid complaint by pylint\n print('Terminate the interpreter.')\n self.close()\n return True\n\n # ----- record and playback -----\n def do_record(self, arg):\n ''' save future commands to filename: record script.txt '''\n self._file = open(arg, 'w')\n\n def do_playback(self, arg):\n ''' playback commands from a file: playback script.txt '''\n self.close()\n with open(arg) as inf:\n self.cmdqueue.extend(inf.read().splitlines())\n\n def precmd(self, line):\n '''\n refer to https://docs.python.org/3/library/cmd.html#cmd.Cmd for\n details of this method\n '''\n # line = line.lower() # DO NOT LOWERCASE COMMAND NAME & ARGUMENTS\n if self._file and 'playback' not in line:\n print(line, file=self._file)\n return line\n\n def close(self):\n ''' to close an open commands record file, if any '''\n if self._file:\n self._file.close()\n self._file = None\n\n\ndef main(log_path, last_month):\n ''' main function '''\n if log_path is None or last_month is None:\n print(\"[ERROR] invalid input arguments passed.\")\n sys.exit(1)\n\n data_provider = NmmDataProvider(log_path, last_month)\n raw_data = data_provider.get_raw_household_data(rd.RAW_DATA_FORMAT[0])\n eda = ExploratoryDataAnalyzer(raw_data)\n _ai = AnalysisInterpreter(eda, data_provider)\n _ai.cmdloop()\n\n\nif __name__ == '__main__':\n print(\"[\" + __file__ + \"] main invoked\")\n\n AP = argparse.ArgumentParser(description=\"args parser\")\n AP.add_argument(\"-book_path\", action=\"store\", required=True,\n help=\"household account book file path\")\n AP.add_argument(\"-sentinel_month\",\n default=datetime.datetime.now().strftime(\"%Y. %m\"),\n help=\"the month next to the last of book-keeping period\")\n ARGS = AP.parse_args()\n\n if re.search(r'(\\d{4}. \\d{2})', ARGS.sentinel_month) is None:\n print(\"[ERROR] invalid sentinel month passed. \"\n \"(must be in 'YYYY. MM' format)\")\n sys.exit(2)\n\n main(ARGS.book_path, ARGS.sentinel_month)\n","sub_path":"data_analysis/analysis_cmd_interpret.py","file_name":"analysis_cmd_interpret.py","file_ext":"py","file_size_in_byte":6073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"534781886","text":"width=input(\"Enter the width of your tree (Odd numbers only!)\")\nlength=input(\"Enter the length of the tree trunk\")\n\nw=int(width)\nl=int(length)\n\nt=(\"*\") #The character used to display the tree\nr=(1) #The start of the tree (top)\nn=(1) #The number of rows of the tree trunk\ne=(3) #The number of \"*\" character columns in the tree trunk\n\n\nwhile (r*t)<(w*t):\n print (r*t)\n r = r + 2\n\n\nwhile n 0, sample.pre_historic > 0)\n chi2, p, dof, ex = chi2_contingency(ctab, correction=False)\n\n t1 = time.time()\n results.append({'chi2': chi2, 'p': p, 'dof': dof, 'ex': ex, 'sample_size': sample_size,\n 'iteration': i, 'buffer_size': buffer_size, 'time': t1 - t0})\n\n return pd.DataFrame(results)\n\n# sensitivity analysis\nsample_sizes = [100, 200] # np.arange(500, 12500, 250)\nbuffer_sizes = [1600] # np.arange(800, 1600, 100)\n\nparams = list(itertools.product(sample_sizes, buffer_sizes))\n\nresults = list()\nfor parm in params:\n results.append(chi2_near_test(\n gdf, sample_size=parm[0], buffer_size=parm[1]))\n\ndf = pd.concat(results) # .to_csv('chi2_sensitivity_analysis.csv')\n","sub_path":"revised_chi_tests.py","file_name":"revised_chi_tests.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"320353783","text":"import logging\nimport re\n\nPROPERTY_KEY = re.compile(r'[-_.a-zA-Z0-9]+')\nPROPERTY_VALUE = re.compile(r'[^#]*')\nPROPERTY_KV = re.compile(r'^\\s*('+PROPERTY_KEY.pattern+r')\\s*=\\s*('+\n PROPERTY_VALUE.pattern+')')\n\nlog = logging.getLogger(__name__)\n\nclass AndroidPropertyList:\n def __init__(self):\n self.prop = {}\n\n def set(self, key, value):\n if not PROPERTY_KEY.match(key):\n raise ValueError(\"Invalid key format\")\n if not PROPERTY_VALUE.match(value):\n raise ValueError(\"Invalid value format\")\n\n self.prop[key] = value\n\n def __getitem__(self, key):\n return self.get(key)\n\n def __setitem__(self, key, value):\n self.set(key, value)\n\n def __contains__(self, key):\n return key in self.prop\n\n def get(self, key):\n return self.prop[key]\n\n def get_default(self, key, default=\"\"):\n if key not in self.prop:\n return default\n else:\n return self.prop[key]\n\n def get_multi_default(self, keys, default=\"\"):\n \"\"\" Try multiple keys returning the first found or the default \"\"\"\n for key in keys:\n if key in self.prop:\n return self.prop[key]\n\n return default\n\n def merge(self, rhs):\n \"\"\" Merge another property list into this one \"\"\"\n assert isinstance(rhs, AndroidPropertyList)\n self._merge(rhs.prop)\n\n def _merge(self, other):\n for k, v in other.items():\n self.prop[k] = v\n\n def from_file(self, filename):\n prop_raw_data = open(filename, 'r').read()\n\n properties = {}\n\n for line_no, line in enumerate(prop_raw_data.split(\"\\n\")):\n # Ignore comments and blank lines\n if re.match(r'^(\\s*#)|(\\s*$)', line):\n continue\n\n # Ignore import statements\n if re.match('^import', line):\n log.warning(\"property_from_file: unhandled import statement at line '%d' in '%s'\",\n line_no+1, filename)\n continue\n\n # Match property assignments (right side can be blank)\n result = PROPERTY_KV.match(line)\n\n if not result:\n log.warn(\"property_from_file: failed to match line %d in %s\", line_no+1, filename)\n continue\n\n prop, value = result.groups()\n properties[prop] = value\n\n # Merge in the final found properties\n self._merge(properties)\n\n def to_file(self, filename):\n fp = open(filename, 'w')\n\n for k, v in self.prop.items():\n fp.write(\"%s=%s\\n\" % (k, v))\n\n fp.close()\n","sub_path":"eval/tools/android/property.py","file_name":"property.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"512511125","text":"import warnings\nimport numpy as np\nfrom scipy.ndimage import map_coordinates\nfrom dipy.reconst.recspeed import peak_finding, le_to_odf, sum_on_blocks_1d\nfrom dipy.utils.spheremakers import sphere_vf_from\nfrom scipy.fftpack import fftn, fftshift, ifftn,ifftshift\nfrom dipy.reconst.dsi import project_hemisph_bvecs\nfrom scipy.ndimage.filters import laplace,gaussian_laplace\nfrom scipy.ndimage import zoom,generic_laplace,correlate1d\nfrom dipy.core.geometry import sphere2cart,cart2sphere,vec2vec_rotmat\n\n\n\nimport warnings\nwarnings.warn(\"This module is most likely to change both as a name and in structure in the future\",FutureWarning)\n\n\n\n\nclass DiffusionNabla(object):\n ''' Reconstruct the signal using Diffusion Nabla Imaging \n \n As described in E.Garyfallidis PhD thesis, 2011. \n '''\n def __init__(self, data, bvals, gradients,odf_sphere='symmetric362', \n mask=None,\n half_sphere_grads=False,\n auto=True,\n save_odfs=False,\n fast=True):\n '''\n Parameters\n -----------\n data : array, shape(X,Y,Z,D), or (X,D)\n bvals : array, shape (N,)\n gradients : array, shape (N,3) also known as bvecs \n odf_sphere : str or tuple, optional\n If str, then load sphere of given name using ``get_sphere``.\n If tuple, gives (vertices, faces) for sphere.\n filter : array, shape(len(vertices),) \n default is None (using standard hanning filter for DSI)\n half_sphere_grads : boolean Default(False) \n in order to create the q-space we use the bvals and gradients. \n If the gradients are only one hemisphere then \n auto : boolean, default True \n if True then the processing of all voxels will start automatically \n with the class constructor,if False then you will have to call .fit()\n in order to do the heavy duty processing for every voxel\n save_odfs : boolean, default False\n save odfs, which is memory expensive\n\n See also\n ----------\n dipy.reconst.dti.Tensor, dipy.reconst.dsi.DiffusionSpectrum\n '''\n \n #read the vertices and faces for the odf sphere\n odf_vertices, odf_faces = sphere_vf_from(odf_sphere)\n self.odf_vertices=odf_vertices\n self.odf_faces=odf_faces\n self.odfn=len(self.odf_vertices)\n self.save_odfs=save_odfs\n \n #check if bvectors are provided only on a hemisphere\n if half_sphere_grads==True:\n bvals=np.append(bvals.copy(),bvals[1:].copy())\n gradients=np.append(gradients.copy(),-gradients[1:].copy(),axis=0)\n data=np.append(data.copy(),data[...,1:].copy(),axis=-1)\n \n #load bvals and bvecs\n self.bvals=bvals\n gradients[np.isnan(gradients)] = 0.\n self.gradients=gradients\n #save number of total diffusion volumes\n self.dn=data.shape[-1] \n self.data=data\n self.datashape=data.shape #initial shape \n self.mask=mask\n #odf sampling radius \n self.radius=np.arange(0,6,.2)\n #self.radiusn=len(self.radius)\n #self.create_qspace(bvals,gradients,16,8)\n #peak threshold\n self.peak_thr=.4\n self.iso_thr=.7\n #calculate coordinates of equators\n #self.radon_params()\n #precompute coordinates for pdf interpolation\n #self.precompute_interp_coords() \n #self.precompute_fast_coords()\n self.zone=5.\n #self.precompute_equator_indices(self.zone)\n #precompute botox weighting\n #self.precompute_botox(0.05,.3)\n self.gaussian_weight=0.1\n #self.precompute_angular(self.gaussian_weight)\n \n self.fast=fast \n if fast==True: \n self.odf=self.fast_odf\n else:\n self.odf=self.slow_odf\n \n self.update()\n \n if auto:\n self.fit()\n \n def update(self): \n self.radiusn=len(self.radius)\n self.create_qspace(self.bvals,self.gradients,17,8)\n if self.fast==False: \n self.radon_params() \n self.precompute_interp_coords()\n if self.fast==True: \n self.precompute_fast_coords() \n self.precompute_equator_indices(self.zone) \n self.precompute_angular(self.gaussian_weight)\n \n \n def precompute_botox(self,smooth,level):\n self.botox_smooth=.05\n self.botox_level=.3\n \n def precompute_angular(self,smooth):\n if smooth==None: \n self.E=None\n return \n self.W=np.dot(self.odf_vertices,self.odf_vertices.T)\n self.W=self.W.astype('f8')\n E=np.exp(self.W/smooth)\n self.E=E/np.sum(E,axis=1)[:,None]\n \n \n def create_qspace(self,bvals,gradients,size,origin):\n bv=bvals\n bmin=np.sort(bv)[1]\n bv=np.sqrt(bv/bmin) \n qtable=np.vstack((bv,bv,bv)).T*gradients\n qtable=np.floor(qtable+.5)\n self.qtable=qtable\n self.q=qtable+origin\n self.q=self.q.astype('i8')\n self.origin=origin\n self.sz=size\n \n def radon_params(self,ang_res=64):\n #calculate radon integration parameters\n phis=np.linspace(0,2*np.pi,ang_res)[:-1]\n planars=[]\n for phi in phis:\n planars.append(sphere2cart(1,np.pi/2,phi))\n planars=np.array(planars)\n planarsR=[]\n for v in self.odf_vertices:\n R=vec2vec_rotmat(np.array([0,0,1]),v) \n planarsR.append(np.dot(R,planars.T).T) \n self.equators=planarsR\n self.equatorn=len(phis) \n \n def fit(self):\n #memory allocations for 4D volumes \n if len(self.datashape)==4:\n x,y,z,g=self.datashape \n S=self.data.reshape(x*y*z,g)\n GFA=np.zeros((x*y*z))\n IN=np.zeros((x*y*z,5))\n NFA=np.zeros((x*y*z,5))\n QA=np.zeros((x*y*z,5))\n PK=np.zeros((x*y*z,5))\n if self.save_odfs:\n ODF=np.zeros((x*y*z,self.odfn))\n #BODF=np.zeros((x*y*z,self.odfn)) \n if self.mask != None:\n if self.mask.shape[:3]==self.datashape[:3]:\n msk=self.mask.ravel().copy()\n if self.mask == None:\n self.mask=np.ones(self.datashape[:3])\n msk=self.mask.ravel().copy()\n #memory allocations for a series of voxels \n if len(self.datashape)==2:\n x,g= self.datashape\n S=self.data\n GFA=np.zeros(x)\n IN=np.zeros((x,5))\n NFA=np.zeros((x,5))\n QA=np.zeros((x,5))\n PK=np.zeros((x,5))\n if self.save_odfs:\n ODF=np.zeros((x,self.odfn))\n #BODF=np.zeros((x,self.odfn)) \n if self.mask != None:\n if self.mask.shape[0]==self.datashape[0]:\n msk=self.mask.ravel().copy()\n if self.mask == None:\n self.mask=np.ones(self.datashape[:1])\n msk=self.mask.ravel().copy()\n #find the global normalization parameter \n #useful for quantitative anisotropy\n glob_norm_param = 0.\n #loop over all voxels\n for (i,s) in enumerate(S):\n if msk[i]>0:\n #calculate the orientation distribution function \n #odf=self.odf(s)\n odf=self.odf(s) \n odf=self.angular_weighting(odf) \n if self.save_odfs:\n ODF[i]=odf \n #normalization for QA\n glob_norm_param=max(np.max(odf),glob_norm_param)\n #calculate the generalized fractional anisotropy\n GFA[i]=self.std_over_rms(odf) \n odf_max=odf.max()\n #if not in isotropic case\n #if odf.min() self.iso_thr: \n #find peaks\n peaks,inds=peak_finding(odf,self.odf_faces) \n ismallp=np.where(peaks/peaks[0]0:\n l=ismallp[0][0]\n #do not allow more that three peaks\n if l>3:\n l=3\n else:\n l=len(peaks)\n if l==0:\n IN[i][l] = inds[l]\n NFA[i][l] = GFA[i]\n QA[i][l] = peaks[l]-np.min(odf)\n PK[i][l] = peaks[l] \n if l>0 and l<=3: \n IN[i][:l] = inds[:l]\n NFA[i][:l] = GFA[i]\n QA[i][:l] = peaks[:l]-np.min(odf)\n PK[i][:l] = peaks[:l] \n\n if len(self.datashape) == 4:\n self.GFA=GFA.reshape(x,y,z)\n self.NFA=NFA.reshape(x,y,z,5)\n self.QA=QA.reshape(x,y,z,5)/glob_norm_param\n self.PK=PK.reshape(x,y,z,5)\n self.IN=IN.reshape(x,y,z,5)\n if self.save_odfs:\n self.ODF=ODF.reshape(x,y,z,ODF.shape[-1]) \n self.QA_norm=glob_norm_param \n if len(self.datashape) == 2:\n self.GFA=GFA\n self.NFA=NFA\n self.QA=QA\n self.PK=PK\n self.IN=IN\n if self.save_odfs:\n self.ODF=ODF\n #self.BODF=BODF\n self.QA_norm=None\n \n def reduce_peaks(self,peaks,odf_min):\n \"\"\" helping peak_finding when too many peaks are available \n \"\"\"\n if len(peaks)==0:\n return -1 \n if odf_min0:\n l=ismallp[0][0]\n else:\n l=len(peaks)\n else:\n return -1\n return l\n \n \n def slow_odf(self,s):\n \"\"\" Calculate the orientation distribution function \n \"\"\" \n odf = np.zeros(self.odfn)\n Eq=np.zeros((self.sz,self.sz,self.sz))\n for i in range(self.dn):\n Eq[self.q[i][0],self.q[i][1],self.q[i][2]]=s[i]/s[0]\n LEq=laplace(Eq)\n self.Eq=Eq\n self.LEq=LEq\n LEs=map_coordinates(LEq,self.Xs,order=1) \n le_to_odf(odf,LEs,self.radius,self.odfn,self.radiusn,self.equatorn)\n return odf\n \n def odfs(self):\n return self.ODF\n \n def fast_odf(self,s):\n odf = np.zeros(self.odfn) \n Eq=np.zeros((self.sz,self.sz,self.sz))\n for i in range(self.dn): \n Eq[self.q[i][0],self.q[i][1],self.q[i][2]]+=s[i]/s[0] \n LEq=laplace(Eq)\n self.Eq=Eq\n self.LEq=LEq \n LEs=map_coordinates(LEq,self.Ys,order=1) \n LEs=LEs.reshape(self.odfn,self.radiusn)\n LEs=LEs*self.radius\n LEsum=np.sum(LEs,axis=1) \n for i in xrange(self.odfn):\n odf[i]=np.sum(LEsum[self.eqinds[i]])/self.eqinds_len[i]\n #print np.sum(np.isnan(odf))\n return -odf\n \n def angular_weighting(self,odf):\n if self.E==None:\n return odf\n else:\n return np.dot(odf[None,:],self.E).ravel()\n \n def precompute_equator_indices(self,thr=5): \n eq_inds=[]\n eq_inds_complete=[] \n eq_inds_len=np.zeros(self.odfn) \n for (i,v) in enumerate(self.odf_vertices):\n eq_inds.append([]) \n for (j,k) in enumerate(self.odf_vertices):\n angle=np.rad2deg(np.arccos(np.dot(v,k)))\n if angle < 90 + thr and angle > 90 - thr:\n eq_inds[i].append(j)\n eq_inds_complete.append(j) \n eq_inds_len[i]=len(eq_inds[i]) \n self.eqinds=eq_inds\n self.eqinds_com=np.array(eq_inds_complete)\n self.eqinds_len=np.array(eq_inds_len,dtype='i8')\n \n \n def precompute_fast_coords(self):\n Ys=[]\n for m in range(self.odfn):\n for q in self.radius: \n #print disk.shape\n xi=self.origin + q*self.odf_vertices[m,0]\n yi=self.origin + q*self.odf_vertices[m,1]\n zi=self.origin + q*self.odf_vertices[m,2] \n Ys.append(np.vstack((xi,yi,zi)).T)\n self.Ys=np.concatenate(Ys).T\n \n \n def precompute_interp_coords(self): \n Xs=[] \n for m in range(self.odfn):\n for q in self.radius: \n #print disk.shape\n xi=self.origin + q*self.equators[m][:,0]\n yi=self.origin + q*self.equators[m][:,1]\n zi=self.origin + q*self.equators[m][:,2] \n Xs.append(np.vstack((xi,yi,zi)).T)\n self.Xs=np.concatenate(Xs).T \n \n def std_over_rms(self,odf):\n numer=len(odf)*np.sum((odf-np.mean(odf))**2)\n denom=(len(odf)-1)*np.sum(odf**2)\n return np.sqrt(numer/denom)\n \n def gfa(self):\n \"\"\" Generalized Fractional Anisotropy\n Defined as the std/rms of the odf values.\n \"\"\"\n return self.GFA\n def nfa(self):\n return self.NFA\n def qa(self):\n return self.QA\n def pk(self):\n return self.PK\n def ind(self):\n \"\"\" peak indices\n \"\"\"\n return self.IN\n\n\nclass EquatorialInversion(DiffusionNabla): \n def eit_operator(self,input, scale, output = None, mode = \"reflect\", cval = 0.0):\n \"\"\"Calculate a multidimensional laplace filter using an estimation\n for the second derivative based on differences.\n \"\"\"\n def derivative2(input, axis, output, mode, cval):\n return correlate1d(input, scale*np.array([1, -2, 1]), axis, output, mode, cval, 0)\n return generic_laplace(input, derivative2, output, mode, cval)\n \n def set_operator(self,name):\n self.operator=name\n\n def set_mode(self,order=1,zoom=1,mode='constant'):\n self.order=order\n self.mode=mode\n self.zoom=zoom\n #self.Eqs=[] \n \n def fast_odf(self,s):\n odf = np.zeros(self.odfn) \n Eq=np.zeros((self.sz,self.sz,self.sz))\n #for i in range(self.dn):\n # Eq[self.q[i][0],self.q[i][1],self.q[i][2]]+=s[i]/s[0]\n Eq[self.q[:,0],self.q[:,1],self.q[:,2]]=s[:]/s[0]\n #self.Eqs.append(Eq)\n \n if self.operator=='2laplacian': \n LEq=self.eit_operator(Eq,2) \n sign=-1\n if self.operator=='laplacian':\n #ZEq=zoom(Eq,zoom=5,order=self.order,mode=self.mode,cval=0.0,prefilter=True)\n #self.ZEqs.append(ZEq)\n #ZLEq=laplace(ZEq)\n #self.ZLEqs.append(ZLEq)\n #LEq=zoom(ZLEq,zoom=.2,order=self.order,mode=self.mode,cval=0.0,prefilter=True) \n #LEq=laplace(Eq)\n #if self.zoom>1:\n # ZEq=zoom(Eq,zoom=self.zoom,order=self.order,mode=self.mode)\n # LEq=laplace(ZEq)\n #else:\n LEq=laplace(Eq)\n sign=-1\n if self.operator=='laplap':\n LEq=laplace(laplace(Eq))\n sign=1\n if self.operator=='signal':\n LEq=Eq\n sign=1\n \n LEs=map_coordinates(LEq,self.Ys,order=1)\n #LEs=map_coordinates(LEq,self.zoom*self.Ys,order=1) \n LEs=LEs.reshape(self.odfn,self.radiusn)\n LEs=LEs*self.radius\n #LEs=LEs*self.radius*self.zoom\n LEsum=np.sum(LEs,axis=1)\n #This is what the following code is doing \n #for i in xrange(self.odfn):\n # odf[i]=np.sum(LEsum[self.eqinds[i]])/self.eqinds_len[i] \n #odf2=odf.copy()\n LES=LEsum[self.eqinds_com] \n sum_on_blocks_1d(LES,self.eqinds_len,odf,self.odfn)\n odf=odf/self.eqinds_len \n \n return sign*odf\n\n","sub_path":"dipy/reconst/dni.py","file_name":"dni.py","file_ext":"py","file_size_in_byte":16703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"630857543","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 21 10:07:56 2019\n\n@author: ayebaze\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set_style('dark')\n#matplotlib inline\n\n#this is for ploting of the graph \n\n##from IPython import get_ipython\n##get_ipython().run_line_magic('matplotlib', 'inline')\n\n\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\n#pd.set_option('display.max_columns', True)\n\nratings_data = pd.read_csv(\"/home/ayebaze/MovieLens/ratings.csv\")\nratings_data.head()\n\nmovie_names = pd.read_csv(\"/home/ayebaze/MovieLens/movies.csv\")\nmovie_names.head()\n\nmovie_data = pd.merge(ratings_data, movie_names, on='movieId')\n#movie_data.head()\n\n#movie_data.groupby('title')['rating'].mean().head()\n\nmovie_data.groupby('title')['rating'].mean().sort_values(ascending=False).head()\n#movie_data.head()\n\nmovie_data.groupby('title')['rating'].count().sort_values(ascending=False).head()\n#print(movie_data)\n\nratings_mean_count = pd.DataFrame(movie_data.groupby('title')['rating'].mean())\nratings_mean_count['rating_counts'] = pd.DataFrame(movie_data.groupby('title')['rating'].count()) \nratings_mean_count.head()\n#print(ratings_mean_count)\n\n#plt.figure(figsize=(8,6)) \n#plt.rcParams['patch.force_edgecolor'] = True \n#ratings_mean_count['rating_counts'].hist(bins=50) \n\n#The Output shows that most movies recieved less than 50 ratings while those with more than 100 is very low\n#--------------------------------------------------------------------------------------\n\n##plt.figure(figsize=(8,6)) \n##plt.rcParams['patch.force_edgecolor'] = True \n##ratings_mean_count['rating'].hist(bins=50) \n\n#integer values have taller bars than floating values people assign ratings as 1,2,3,4,5 and few autliers\n#Data has weak mean normal distribution with a mean of 3.5\n#----------------------------------------------------------------------------------------\n\n##plt.figure(figsize=(8,6)) \n##plt.rcParams['patch.force_edgecolor'] = True \n##sns.jointplot(x='rating', y='rating_counts', data=ratings_mean_count, alpha=0.4) \n\n#The graph shows that movies with higher average ratings actually have more no of ratings compared with movies with lower average ratings\n\nuser_movie_rating = movie_data.pivot_table(index='userId', columns='title', values='rating') \nuser_movie_rating.head()\n#print(user_movie_rating.head())\n\n#The matrix of the Movie titles and the corresponding user ratings.\n#-----------------------------------------------------------------------------------------\n\nforrest_gump_ratings = user_movie_rating['Forrest Gump (1994)']\nforrest_gump_ratings.head()\n#print(forrest_gump_ratings)\n\n#The ratings for the pandas series.\n\nmovies_like_forest_gump = user_movie_rating.corrwith(forrest_gump_ratings)\n\ncorr_forrest_gump = pd.DataFrame(movies_like_forest_gump, columns=['Correlation']) \ncorr_forrest_gump.dropna(inplace=True) \ncorr_forrest_gump.head() \n\ncorr_forrest_gump.sort_values('Correlation', ascending=False).head(10) \n#print(corr_forrest_gump)\n\n#The movies that correlate with Forest Gump are very few\n#There is a challenge of sorting the values.\n\ncorr_forrest_gump = corr_forrest_gump.join(ratings_mean_count['rating_counts'])\n#print(corr_forrest_gump.head()) \ncorr_forrest_gump[corr_forrest_gump ['rating_counts']>50].sort_values('Correlation', ascending=False).head()\n#The line above is not working to find the ratings whic are >50\n\nprint(corr_forrest_gump.head())\n\n#To solve the problem above we retrieve only those correlated movies that have at least more than 50 ratings\n\n#https://stackabuse.com/creating-a-simple-recommender-system-in-python-using-pandas/\n","sub_path":"projects/recommender/Scripts/recommendersystem.py","file_name":"recommendersystem.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"483441286","text":"import pygame as pg\nfrom settings import *\n\nclass Player(pg.sprite.Sprite):\n def __init__(self, game, x, y):\n self.groups = game.all_sprites\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.image = pg.Surface((TILE_SIZE, TILE_SIZE))\n self.image.fill((0, 255, 255))\n self.rect = self.image.get_rect()\n\n # initially grid coordinates,\n # now these are actual player coordinates (because rect coordinates must be integers)\n self.x = x * TILE_SIZE\n self.y = y * TILE_SIZE\n\n # moving\n self.speed = 150\n self.vel_x, self.vel_y = 0, 0\n\n def get_keys(self):\n self.vel_x, self.vel_y = 0, 0\n keys = pg.key.get_pressed() # object denoting which keys are currently held down\n\n if keys[pg.K_LEFT] or keys[pg.K_a]:\n self.vel_x = -self.speed\n elif keys[pg.K_RIGHT] or keys[pg.K_d]:\n self.vel_x = self.speed\n elif keys[pg.K_UP] or keys[pg.K_w]:\n self.vel_y = -self.speed\n elif keys[pg.K_DOWN] or keys[pg.K_s]:\n self.vel_y = self.speed\n\n def collide_walls(self, dir):\n hitted = pg.sprite.spritecollide(self, self.game.walls, False) # False, so they don't get deleted\n\n\n # not sticking to the wall bug fix\n if dir == 'x':\n \"\"\"\n if hitted:\n if self.vel_x > 0:\n self.x = hitted[0].rect.left - self.rect.width\n if self.vel_x < 0:\n self.x = hitted[0].rect.right\n self.vel_x = 0\n self.rect.x = self.x\n \"\"\"\n if hitted:\n print(self.vel_x)\n # smooth corners\n if self.vel_x > 0 and (hitted[0].rect.y - self.rect.y) > (self.rect.height * (2 / 3)):\n print(f\"You hit a wall ({hitted[0].rect.x / TILE_SIZE}, {hitted[0].rect.y / TILE_SIZE})\")\n self.y = hitted[0].y - self.rect.height\n self.rect.y = self.y\n #elif self.vel_x > 0:\n # self.x = hitted[0].rect.left - self.rect.width\n #elif self.vel_x < 0:\n # self.x = hitted[0].rect.right\n\n self.vel_x = 0\n #self.rect.x = self.x\n\n elif dir == 'y':\n if hitted:\n if self.vel_y > 0:\n self.y = hitted[0].rect.top - self.rect.height\n if self.vel_y < 0:\n self.y = hitted[0].rect.bottom\n self.vel_y = 0\n self.rect.y = self.y\n\n def update(self):\n self.get_keys()\n\n # velocities are pixels per second, not per frame!\n self.x += self.vel_x * self.game.dt\n self.y += self.vel_y * self.game.dt\n\n # to avoid not sticking to the wall bug\n self.rect.x = self.x\n self.collide_walls('x')\n\n self.rect.y = self.y\n self.collide_walls('y')\n\nclass Wall(pg.sprite.Sprite):\n def __init__(self, game, x, y):\n self.groups = game.all_sprites, game.walls\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.image = pg.Surface((TILE_SIZE, TILE_SIZE))\n self.image.fill((255, 0, 0))\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n self.rect.x = x * TILE_SIZE\n self.rect.y = y * TILE_SIZE\n","sub_path":"sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"615227312","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Process pavement widths\n#\n# Based on methodology outlined in https://github.com/meliharvey/sidewalkwidths-nyc\n#\n# - read roadside (pavement) features\n# - find centreline\n# - calculate width along centreline\n# - calculate some summary statistics\n# - output cleaned\n\nimport glob\nimport json\nimport os\nimport sys\n\nimport centerline\nimport centerline.exceptions\nimport pandas\nimport geopandas\nimport requests\nimport shapely.wkt\n\nfrom centerline.geometry import Centerline\nfrom shapely.geometry import LineString\nfrom shapely.geometry import Point, MultiPoint, MultiLineString\nfrom shapely.ops import linemerge, nearest_points\nfrom tqdm import tqdm\n\n# set up progress_apply\ntqdm.pandas()\nCACHE_PATH = os.path.join(os.path.dirname(__file__), 'db-data')\n\n\ndef remove_short_lines(line):\n if line.type == 'MultiLineString':\n passing_lines = []\n\n for i, linestring in enumerate(line):\n other_lines = MultiLineString([x for j, x in enumerate(line) if j != i])\n\n p0 = Point(linestring.coords[0])\n p1 = Point(linestring.coords[-1])\n\n is_deadend = False\n\n if p0.disjoint(other_lines): is_deadend = True\n if p1.disjoint(other_lines): is_deadend = True\n\n if not is_deadend or linestring.length > 5:\n passing_lines.append(linestring)\n\n return MultiLineString(passing_lines)\n\n if line.type == 'LineString':\n return line\n\n\ndef linestring_to_segments(linestring):\n return [\n LineString([linestring.coords[i], linestring.coords[i+1]])\n for i in range(len(linestring.coords) - 1)\n ]\n\n\ndef get_segments(line):\n if line.type == 'MultiLineString':\n line_segments = []\n for linestring in line.geoms:\n line_segments.extend(linestring_to_segments(linestring))\n return line_segments\n\n elif line.type == 'LineString':\n return linestring_to_segments(line)\n else:\n return []\n\n\ndef interpolate_by_distance(linestring, distance=1):\n count = round(linestring.length / distance) + 1\n\n if count == 1:\n # grab mid-point if it's a short line\n return [linestring.interpolate(linestring.length / 2)]\n else:\n # interpolate along the line\n return [linestring.interpolate(distance * i) for i in range(count)]\n\n\ndef interpolate(line):\n if line.type == 'MultiLineString':\n all_points = []\n\n for linestring in line:\n all_points.extend(interpolate_by_distance(linestring))\n\n return all_points\n\n if line.type == 'LineString':\n return interpolate_by_distance(line)\n\n\ndef polygon_to_multilinestring(polygon):\n return MultiLineString([polygon.exterior] + [line for line in polygon.interiors])\n\n\ndef get_avg_distances(row):\n avg_distances = []\n\n boundary = polygon_to_multilinestring(row.geometry)\n\n for segment in row.segments:\n points = interpolate(segment)\n\n distances = []\n\n for point in points:\n p1, p2 = nearest_points(boundary, point)\n distances.append(p1.distance(p2))\n\n avg_distances.append(sum(distances) / len(distances))\n\n return avg_distances\n\n\ndef explode_to_segments(df):\n data = {'geometry': [], 'width': []}\n\n for i, row in df.iterrows():\n\n for segment, distance in zip(row.segments, row.avg_distances):\n data['geometry'].append(segment.buffer(distance))\n data['width'].append(distance * 2)\n\n df_segments = pandas.DataFrame(data)\n df_segments = geopandas.GeoDataFrame(df_segments, crs=df.crs, geometry='geometry')\n return df_segments\n\n\ndef process_centreline(geom):\n try:\n line = Centerline(geom)\n except centerline.exceptions.TooFewRidgesError:\n line = Centerline(geom, interpolation_distance=0.1)\n\n line = remove_short_lines(linemerge(line))\n return line.simplify(1, preserve_topology=True)\n\n\ndef process_lad(lad_code):\n try:\n df = geopandas.read_file(os.path.join(CACHE_PATH, f'{lad_code}.gpkg'))\n except Exception as err:\n print(lad_code, err)\n return\n\n df['centerlines'] = df.geometry.progress_apply(process_centreline)\n df['segments'] = df.centerlines.progress_apply(get_segments)\n df['avg_distances'] = df.progress_apply(get_avg_distances, axis=1)\n df_segments = explode_to_segments(df)\n df_segments.to_file(os.path.join(CACHE_PATH, f'{lad_code}_segments.gpkg'), driver='GPKG')\n\n\nif __name__ == '__main__':\n try:\n lad_code = sys.argv[1]\n except IndexError:\n print(f\"Usage: python {__file__} \")\n sys.exit()\n\n process_lad(lad_code)\n","sub_path":"process-widths.py","file_name":"process-widths.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"355786576","text":"# 1. 模拟斗地主发牌,牌共54张\n# 黑桃('\\u2660'), 梅花('\\u2663'), 红桃('\\u2666')\n# 方块('\\u2665')\n# A2-10JQK\n# 大王,小王\n# 三个个,每人发17张牌,底牌留三张\n# 输入回车,打印第1个人的17张牌\n# 输入回车,打印第2个人的17张牌\n# 输入回车,打印第3个人的17张牌\n# 输入回车,显示三张底牌\n\n\nkinds = ['\\u2660', '\\u2663', '\\u2666', '\\u2665']\nnumbers = ['A']\nnumbers += [str(x) for x in range(2, 11)]\nnumbers += list(\"JQK\")\n# print(numbers)\npoke = ['大王', '小王']\nfor x in kinds:\n for y in numbers:\n poke.append(x + y)\n\n# print(poke)\nassert len(poke) == 54, '牌数不够'\n\nimport random\npoke2 = poke.copy() # 复制另一幅牌\nrandom.shuffle(poke2)\n# print(poke2)\n\nplayer1 = poke2[:17]\nplayer2 = poke2[17:34]\nplayer3 = poke2[34:51]\nbase = poke2[51:]\n\ninput()\nprint(\"第1个人的牌是:\", player1)\ninput()\nprint(\"第2个人的牌是:\", player2)\ninput()\nprint(\"第3个人的牌是:\", player3)\ninput()\nprint(\"底牌是:\", base)\n\n","sub_path":"02-PythonBase/day14/day13_exercise/poke.py","file_name":"poke.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"545120074","text":"\"\"\"\nA trie implementation that powerfully supports prefix searching.\n\nTrie or R-trie is sweet for small strings and small alphabet.\nThe performance of trie is hard to beat if you can afford the space :)\n\"\"\"\n\nclass Trie:\n class Node:\n def __init__(self, R):\n self.val = None\n self.next = [None] * R\n\n def __init__(self, R):\n self.root = self.Node(R)\n self.R = R\n\n def get(self, key):\n node = self.root\n for c in key:\n node = node.next[ord(c)-ord('a')]\n if not node:\n return\n return node.val\n\n def insert(self, key, val):\n node = self.root\n for c in key:\n if not node.next[ord(c)-ord('a')]:\n node.next[ord(c)-ord('a')] = self.Node(self.R)\n node = node.next[ord(c)-ord('a')]\n node.val = val\n\n def keys_with_prefix(self, prefix):\n node = self.root\n for c in prefix:\n node = node.next[ord(c)-ord('a')]\n if not node:\n return\n self.keys = []\n self.dfs(node, prefix)\n return self.keys\n\n def dfs(self, node, key):\n if not node:\n return\n if node.val is not None: #\n self.keys.append(key)\n for i in range(self.R):\n self.dfs(node.next[i], key+chr(i+ord('a')))\n","sub_path":"data_structures/tree/trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"462558866","text":"import pandas\nimport math\nimport random\nfilename = 'mission_exocat.csv'\nnames = ['Ids', 'Parallax (mas)', 'Evolutionary class', 'Observational type', 'Parallax (mas)', 'Orbital period (days)', 'Semi-major axis (arcsec)', 'Eccentricity', 'Inclination (deg)', 'Source']\ndata = pandas.read_csv(filename, names=names)\nprint(data.shape)\n\nbinarydistnan = list(data['Orbital period (days)'])\n\n\nbinarydist = []\ndel binarydistnan[0:13]\n\nfor q in (binarydistnan):\n if q != 'nan':\n binarydist.append(float(q))\n\n\n\nrandom.shuffle(binarydist)\ndel binarydist[0:4000]\nprint (len(binarydist))\n\nbinarydistance = []\n\n\ndef stardistance1 (P):\n return P ** (2./3)\n #math.sqrt(((4*(math.pi**2))/G*m1)*(math.fabs(d2 + d1)**3))\n\n\n\nfor i in range(len(binarydist)-1):\n binarydistance.append(stardistance1(binarydist[i]))\n\nprint(binarydist[0:5])\nprint(binarydistance[0:5])\nprint(len(binarydistance))\n\n\nimport pickle, os\n\npickle.dump(binarydistance, open(os.getcwd()+'/binarydistance', 'wb'))","sub_path":"DistanceFeatureBinary.py","file_name":"DistanceFeatureBinary.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"374306567","text":"def groupItemsBySingleKeyIndex(listItems, keyIndex):\n dic_itemGroups={}\n for item in listItems:\n key=str(item[keyIndex]) \n dic_itemGroups.setdefault(key, []).append(item)\n \n return dic_itemGroups\n\ndef groupTxtByClass_Txtindex(listtuple_pred_true_text, isByTrueLabel):\n dic_tupple_class_Txtindex={}\n if isByTrueLabel == False:\n #print(\"isByPredLabel\")\n i=-1\t\n for tuple_pred_true_text in listtuple_pred_true_text:\n i=i+1\n predLabel = str(tuple_pred_true_text[0])\n trueLabel = tuple_pred_true_text[1]\n txt = tuple_pred_true_text[2] \n dic_tupple_class_Txtindex.setdefault(predLabel, []).append([predLabel, trueLabel, txt, i])\n else:\n #print(\"isByTrueLabel\")\n i=-1\n for tuple_pred_true_text in listtuple_pred_true_text:\n i=i+1\n predLabel = tuple_pred_true_text[0]\n trueLabel = str(tuple_pred_true_text[1])\n txt = tuple_pred_true_text[2] \n dic_tupple_class_Txtindex.setdefault(trueLabel, []).append([predLabel, trueLabel, txt, i])\n\t \n return dic_tupple_class_Txtindex \n\n##group txt by class\ndef groupTxtByClass(listtuple_pred_true, isByTrueLabel):\n dic_tupple_class = {}\n if isByTrueLabel == False:\n for tuple_pred_true in listtuple_pred_true:\n predLabel = str(tuple_pred_true[0])\n dic_tupple_class.setdefault(predLabel, []).append(tuple_pred_true)\n else:\n for tuple_pred_true in listtuple_pred_true:\n trueLabel = str(tuple_pred_true[1]) \n dic_tupple_class.setdefault(trueLabel, []).append(tuple_pred_true) \n\n #for key, value in dic_tupple_class.items():\n # print(key, len(value))\n\n return dic_tupple_class\n","sub_path":"BatchClustering/groupTxt_ByClass.py","file_name":"groupTxt_ByClass.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"532866455","text":"from .exceptions import *\nimport random\n\n# Complete with your own, just for fun :)\nLIST_OF_WORDS = []\n\n\ndef _get_random_word(list_of_words):\n try:\n return list_of_words[random.randint(0,len(list_of_words) - 1)]\n except:\n raise InvalidListOfWordsException\n\ndef _mask_word(word):\n if word:\n return len(word) * '*'\n else:\n raise InvalidWordException\n\n\ndef _uncover_word(answer_word, masked_word, character):\n lower_char = character.lower()\n temp_uncovered_word = masked_word\n if not (len(answer_word) == len(masked_word) and answer_word and masked_word):\n raise InvalidWordException\n if len(lower_char) != 1:\n raise InvalidGuessedLetterException\n for i, letter_in_word in enumerate(answer_word):\n if letter_in_word.lower() == lower_char:\n temp_uncovered_word = temp_uncovered_word[:i] + lower_char + temp_uncovered_word[i + 1:]\n return temp_uncovered_word\n \n \ndef guess_letter(game, letter):\n if not '*' in game['masked_word'] or game['remaining_misses'] == 0:\n raise GameFinishedException\n lower_letter = letter.lower()\n temp_answer_word = game['answer_word']\n temp_masked_word = game['masked_word']\n uncovered_word = _uncover_word(temp_answer_word, temp_masked_word, lower_letter)\n if len(letter) != 1:\n raise InvalidGuessedLetterException\n letter_found = not(uncovered_word == game['masked_word'])\n game['masked_word'] = uncovered_word\n game['previous_guesses'].append(lower_letter)\n if not letter_found:\n game['remaining_misses'] -= 1\n if not '*' in game['masked_word']:\n raise GameWonException\n if game['remaining_misses'] == 0:\n raise GameLostException\n \n\ndef start_new_game(list_of_words=None, number_of_guesses=5):\n if list_of_words is None: \n list_of_words = LIST_OF_WORDS\n\n word_to_guess = _get_random_word(list_of_words)\n masked_word = _mask_word(word_to_guess)\n game = {\n 'answer_word': word_to_guess,\n 'masked_word': masked_word,\n 'previous_guesses': [],\n 'remaining_misses': number_of_guesses,\n }\n\n return game","sub_path":"hangman/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"433102596","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"coda\", # Replace with your own username\n version=\"0.0.1\",\n author=\"Aaron Lim\",\n author_email=\"aaronzlim@gmail.com\",\n description=\"CoxOrb Data Analyzer\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/aaronzlim/coda\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/aaronzlim/coda/issues\",\n },\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n packages=setuptools.find_packages(where=\"src\"),\n install_requires=[\n \"matplotlib==3.4.2\",\n \"pysimplegui==4.40.0\",\n \"pytz==2021.1\",\n ],\n entry_points={\"console_scripts\": [\"coda=src.__main__:main\"]},\n python_requires=\">=3.8\",\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"225984922","text":"import csv, codecs, cStringIO\n\ndef __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n # Redirect output to a queue\n self.queue = cStringIO.StringIO()\n self.writer = csv.writer(self.queue, dialect=dialect, **kwds)\n self.stream = f\n self.encoder = codecs.getincrementalencoder(encoding)()\n \ndef write_row(self, row):\n self.writer.writerow([s.encode(\"utf-8\") for s in row])\n # Fetch UTF-8 output from the queue ...\n data = self.queue.getvalue()\n data = data.decode(\"utf-8\")\n # ... and reencode it into the target encoding\n data = self.encoder.encode(data)\n # write to the target stream\n self.stream.write(data)\n # empty queue\n self.queue.truncate(0)\n\n\ndef write_rows(csvfile, order_id , ext_id, amount, payment_type):\n\n f = open(csvfile, 'wt')\n try:\n writer = csv.writer(f)\n writer.writerow(('Equator Order ID', 'External Transaction ID','Amount','Payment Type'))\n writer.writerows([[order_id, ext_id, amount, payment_type]])\n finally:\n f.close()\n\ndef write_row_double(csvfile, order_id , ext_id, amount, payment_type):\n\n f = open(csvfile, 'at')\n try:\n writer = csv.writer(f)\n writer.writerows([[order_id, ext_id, amount, payment_type]])\n finally:\n f.close()\n\n","sub_path":"pythonlibs/createcsvfile.py","file_name":"createcsvfile.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"211822133","text":"from utils import filepath_can_be_reached\nfrom error_utils import *\n\nDEFAULT_EPOCHS = 50\nDEFAULT_BATCH_SIZE = 32\n\nDEFAULT_OPTION = 0\nDEFAULT_GRAPH_OPTION = 1\n\ndef get_epochs():\n \"\"\" function used to read the number of epochs perfomed in the training of the autoencoder \"\"\"\n\n # get the number of epochs\n prompt = \"\\nGive the number of epochs (default = {}): \"\n prompt = prompt.format(DEFAULT_EPOCHS)\n epochs = input(prompt)\n\n # make sure the user gives a legit input\n while epochs != \"\":\n\n # try to convert the input to an int\n try:\n epochs = int(epochs)\n # it must be a positive integer\n if epochs <= 0:\n raise ValueError\n # if we get here then the input is fine, so break\n break\n\n # catch error and try again\n except ValueError:\n print(\"The number of epochs must a positive integer. Please try again.\")\n epochs = input(prompt)\n\n # check if the user wants to use the deault value\n if epochs == \"\":\n epochs = DEFAULT_EPOCHS\n\n # return the final value\n return epochs\n\n\ndef get_batch_size():\n \"\"\" function used to read the batch size used in the training of the autoencoder \"\"\"\n\n # get the number of batch_size\n prompt = \"\\nGive the batch size (default = {}): \"\n prompt = prompt.format(DEFAULT_BATCH_SIZE)\n batch_size = input(prompt)\n\n # make sure the user gives a legit input\n while batch_size != \"\":\n\n # try to convert the input to an int\n try:\n batch_size = int(batch_size)\n # it must be a positive integer\n if batch_size <= 0:\n raise ValueError\n # if we get here then the input is fine, so break\n break\n\n # catch error and try again\n except ValueError:\n print(\"The batch size must a positive integer. Please try again.\")\n batch_size = input(prompt)\n\n # check if the user wants to use the deault value\n if batch_size == \"\":\n batch_size = DEFAULT_BATCH_SIZE\n\n # return the final value\n return batch_size\n\n\ndef get_option():\n \"\"\" Function used to extract an option (0, 1, 2, 3) from the user after an experiment has\n been completed \"\"\"\n\n # ask the user what he would like to do now\n prompt = \"\\nThe following options are now available:\\n\\t0) Exit the program\\n\\t1) Repeat \" \\\n \"experiment with different values for the hyperprameters\\n\\t2) Show graphs with the \" \\\n \"results of all the experiments run so far\\n\\t3) Print the results\\n\\n\" \\\n \"Enter your action (default = 0): \"\n option = input(prompt)\n\n # make sure that the option given is legit\n while option != \"\" and option != \"0\" and option != \"1\" and option != \"2\" and option != \"3\":\n\n # ask again as the input was wrong\n print(\"Wrong input, the option selected should be one of the following: 0, 1, 2 or 3. \" \\\n \"Please try again.\")\n option = input(prompt)\n\n # check for the default value\n if option == \"\":\n option = DEFAULT_OPTION\n # else, convert it to an int\n else:\n option = int(option)\n\n # return it\n return option\n\n\ndef get_graph_option():\n \"\"\" Function used to extract from the user whether he wants to see a loss vs epochs graph of the\n current experiment, or a loss vs hyperprameters graph over all the experiments \"\"\"\n\n # ask the user\n prompt = \"\\nThere are 2 options available regarding the graphs that can be shown:\\n\\t1) Show \" \\\n \"the Loss vs Epochs graph of the current experiment\\n\\t2) Show the Loss vs \" \\\n \"hyperprameters graph over all the experiments run so far\\n\\nEnter your option \" \\\n \"(default = 1): \"\n answer = input(prompt)\n\n # while the answer is invalid\n while answer != \"\" and answer != \"1\" and answer != \"2\":\n # ask again as the input was wrong\n print(\"Wrong input, the option selected should be one of the following: 1 or 2. \" \\\n \"Please try again.\")\n option = input(prompt)\n\n # check whether the user wants the default option\n if answer == \"\":\n answer = DEFAULT_GRAPH_OPTION\n # else convert the answer to an integer\n else:\n answer = int(answer)\n\n # return the answer as an integer\n return answer\n","sub_path":"Autoencoder/src/utils/interface_utils.py","file_name":"interface_utils.py","file_ext":"py","file_size_in_byte":4356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"136710859","text":"import setup\n\nimport os\nimport re\nfrom os.path import isfile, join\nfrom os import listdir\n\nDATADIR = os.path.join(os.getcwd(), 'data\\\\')\nLOOKUPDIR = os.path.join(os.getcwd(), 'lookup\\\\')\n\nclass Setup(object):\n\n def __init__(self):\n self.inp = setup.Input()\n self.learnerdict = {}\n\n def main(self):\n self.greeting()\n chosen_dataset = self.getdatasets_input()\n chosen_learners = self.getlearner_input()\n dataset, learners = self.parse_options(chosen_dataset, chosen_learners)\n self.inp.print_out(\"\\nChosen data set: \")\n self.inp.print_out(dataset, extraspace=' ')\n self.inp.print_out(\"\\nChosen learners: \")\n self.inp.print_out(str(learners), extraspace=' ')\n self.inp.print_out('\\nAt any time, type \\'help\\' to get help\\n.')\n ra = setup.RunAuger(DATADIR, dataset, learners)\n ra.stage()\n\n\n def greeting(self):\n self.inp.print_out(\"\"\"\\n\n Welcome to Project Auger!\\n\n Project Auguer is ML learning tool\n that allows you to select a dataset and ML model(s),\n mash them together, and spit out awesome results.\\n\n It's also intended as a learning tool.\n At any time, type 'help' for assistance.\\n\n Note that any dataset you wish to use:\n a. must exist in the project_auger /data directory (no subdirs)\n b. must be a .csv file\\n\\n\"\"\")\n\n\n def getdatasets_input(self):\n self.inp.print_out(\"Choose a dataset\")\n total_datasets = self.showdatasets()\n while True:\n choice = self.inp.get_input(\"Enter a number: \")\n try:\n choice = int(choice)\n if choice > total_datasets or choice < 1:\n self.inp.print_out(\"Number not in range, try again.\")\n else:\n break\n except ValueError:\n self.inp.print_out(\"Not a number, try again.\")\n datasetname = self.get_dataset_name(choice)\n self.inp.print_out(\"\\n You chose: '\" + datasetname + \"'. Awesome choice!\\n\")\n return choice\n\n\n def showdatasets(self):\n index = 1\n for csv in listdir(DATADIR):\n file, ext = os.path.splitext(os.getcwd() + csv)\n if ext == '.csv':\n self.inp.print_out(str(index) + '. ' +csv, extraspace=' ')\n index = index + 1\n self.inp.print_out('')\n return index-1\n\n\n def getlearner_input(self):\n while True:\n self.inp.print_out(\"Choose your learner(s)\")\n total_learners = self.showlearners()\n choice = self.inp.get_input(\"Use standard slicing notation, i.e. [:3], or comma-separated by number: \")\n choice = choice.split(',')\n run_learners = []\n correct_nums = 1\n for num in choice:\n if re.search(\"\\A\\[\",num) and re.search(\"\\]\\Z\",num):\n num = num.split(':')\n start = num[0].replace('[', '')\n end = num[1].replace(']','')\n if start:\n try:\n start = int(start)\n except ValueError:\n correct_nums = 0\n break\n if start < 1:\n start = 1\n else:\n start = 1\n if end:\n try:\n end = int(end)\n except ValueError:\n correct_nums = 0\n break\n if end > total_learners:\n end = total_learners + 1\n else:\n end = total_learners + 1\n for eachnum in range(start, end):\n run_learners.append(eachnum)\n else:\n try:\n toenter = int(num)\n except ValueError:\n correct_nums = 0\n break\n if not toenter > total_learners: \n run_learners.append(toenter)\n\n if(correct_nums):\n if run_learners:\n run_learners = list(dict.fromkeys(run_learners))\n return sorted(run_learners)\n else:\n self.inp.print_out(\"No learners chosen\")\n else:\n self.inp.print_out('Non-integer value entered, try again.')\n\n\n def showlearners(self):\n f = open(os.path.join(LOOKUPDIR, 'model_options.txt'), 'r')\n for index, model in enumerate(f):\n self.inp.print_out(str(index+1)+'. '+model, end='')\n self.learnerdict[index+1] = model\n self.inp.print_out('\\n')\n return (index+1)\n\n\n def get_dataset_name(self, numchosen):\n index = 1\n for csv in listdir(DATADIR):\n file, ext = os.path.splitext(os.getcwd() + csv)\n if ext == '.csv':\n if index == numchosen:\n return csv\n index = index + 1\n\n\n def parse_options(self, dataset, run_learners):\n dataset = self.get_dataset_name(dataset)\n learners = [self.learnerdict[l].replace('\\n', '') for l in run_learners]\n return dataset, learners\n","sub_path":"setup/Setup.py","file_name":"Setup.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"225787643","text":"class Node():\n def __init__(self,vertex):\n self.vertex = vertex\n self.connections = []\n\n def __str__(self):\n text = \"Node-%s. Connections: [\" % self.vertex\n\n for i,connection in enumerate(self.connections):\n text += \"Node-%s\" % connection.vertex\n if i is not (len(self.connections) - 1):\n text += \", \"\n\n text += \"]\\n\"\n return text","sub_path":"app_graph/model/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"458484407","text":"# Copyright 2021 VMware, Inc.\n# SPDX-License-Identifier: Apache-2.0\nfrom unittest.mock import MagicMock\n\nimport pytest\nfrom vdk.api.plugin.plugin_input import IPropertiesServiceClient\nfrom vdk.internal.builtin_plugins.config.job_config import JobConfigKeys\nfrom vdk.internal.builtin_plugins.job_properties.properties_router import (\n PropertiesRouter,\n)\nfrom vdk.internal.core.config import Configuration\nfrom vdk.internal.core.errors import VdkConfigurationError\n\n\ndef test_routing():\n router = PropertiesRouter(\n \"foo\", Configuration({}, {JobConfigKeys.TEAM: \"test-team\"})\n )\n mock_client = MagicMock(spec=IPropertiesServiceClient)\n router.set_properties_factory_method(\"default\", lambda: mock_client)\n\n router.get_properties_impl().set_all_properties({\"a\": \"b\"})\n mock_client.write_properties.assert_called_with(\"foo\", \"test-team\", {\"a\": \"b\"})\n\n router.get_properties_impl().get_all_properties()\n mock_client.read_properties.assert_called_with(\"foo\", \"test-team\")\n\n\ndef test_routing_error():\n router = PropertiesRouter(\"foo\", Configuration({}, {}))\n\n def raise_error():\n raise AttributeError(\"dummy exception\")\n\n router.set_properties_factory_method(\"default\", raise_error)\n\n with pytest.raises(VdkConfigurationError):\n router.get_properties_impl().set_all_properties({\"a\": \"b\"})\n\n\ndef test_routing_empty_error():\n router = PropertiesRouter(\"foo\", Configuration({}, {}))\n\n with pytest.raises(VdkConfigurationError):\n router.get_properties_impl().set_all_properties({\"a\": \"b\"})\n\n\ndef test_routing_choose_single_registered():\n router = PropertiesRouter(\"foo\", Configuration({}, {\"team\": \"test-team\"}))\n mock_client = MagicMock(spec=IPropertiesServiceClient)\n router.set_properties_factory_method(\"foo\", lambda: mock_client)\n\n router.get_properties_impl().set_all_properties({\"a\": \"b\"})\n mock_client.write_properties.assert_called_with(\"foo\", \"test-team\", {\"a\": \"b\"})\n\n\ndef test_routing_choose_default_type_chosen():\n router = PropertiesRouter(\n \"foo\", Configuration({}, {\"properties_default_type\": \"foo\"})\n )\n foo_mock_client = MagicMock(spec=IPropertiesServiceClient)\n bar_mock_client = MagicMock(spec=IPropertiesServiceClient)\n router.set_properties_factory_method(\"foo\", lambda: foo_mock_client)\n router.set_properties_factory_method(\"bar\", lambda: bar_mock_client)\n\n router.get_properties_impl().set_all_properties({\"a\": \"b\"})\n foo_mock_client.write_properties.assert_called_with(\"foo\", None, {\"a\": \"b\"})\n bar_mock_client.assert_not_called()\n\n\ndef test_routing_choose_too_many_choices():\n router = PropertiesRouter(\"foo\", Configuration({}, {}))\n foo_mock_client = MagicMock(spec=IPropertiesServiceClient)\n bar_mock_client = MagicMock(spec=IPropertiesServiceClient)\n router.set_properties_factory_method(\"foo\", lambda: foo_mock_client)\n router.set_properties_factory_method(\"bar\", lambda: bar_mock_client)\n\n with pytest.raises(VdkConfigurationError):\n router.get_properties_impl().set_all_properties({\"a\": \"b\"})\n","sub_path":"projects/vdk-core/tests/vdk/internal/builtin_plugins/job_properties/test_properties_router.py","file_name":"test_properties_router.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"98433000","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import with_statement\n\nfrom datetime import datetime\nimport os\n\nfrom revolver import contextmanager as ctx\nfrom revolver import directory as dir\nfrom revolver import file\nfrom revolver import git\nfrom revolver import log\nfrom revolver import user\nfrom revolver.core import put, run, sudo, local\n\ndef deploy(owner, upload_hook=None, revision='HEAD', keep_versions=10):\n if not user.exists(owner):\n log.abort('Specified owner does not exists! Deploy aborted')\n\n # Ensure some directories\n paths = _ensure_layout(owner)\n new_release_dir = _create_new_release_dir(owner, paths['releases'])\n paths['new_release'] = new_release_dir\n\n # Upload the new version and call the after upload hook\n _upload(owner, new_release_dir, revision)\n if upload_hook:\n with ctx.sudo(owner), ctx.cd(new_release_dir):\n upload_hook(owner, paths)\n\n # Activate the new release and \n _symlink_release(owner, paths['current'], new_release_dir)\n _clear_old_releases(paths['releases'], keep_versions)\n\n return paths\n\ndef _ensure_layout(owner):\n home_dir = user.home_directory(owner)\n repo_name = git.repository_name()\n\n join = os.path.join\n project_dir = join(home_dir, repo_name)\n\n paths = {\n 'project': join(project_dir),\n 'current': join(project_dir, 'current'),\n 'releases': join(project_dir, 'releases'), \n 'shared': join(project_dir, 'shared'),\n 'logs': join(project_dir, 'shared', 'logs'),\n 'temp': join(project_dir, 'shared', 'temp')\n }\n\n with ctx.sudo(owner):\n for path in paths.itervalues():\n if dir.exists(path): \n continue\n dir.create(path, recursive=True)\n\n return paths\n\ndef _create_new_release_dir(owner, base_dir):\n date_dir = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n release_dir = os.path.join(base_dir, date_dir)\n\n with ctx.sudo(owner):\n dir.create(release_dir)\n\n return release_dir\n\ndef _upload(owner, upload_dir, revision):\n tmp_tar = git.create_archive(revision)\n\n try:\n with ctx.cd(upload_dir):\n with ctx.sudo():\n put(tmp_tar, 'deploy.tar.gz')\n file.attributes('deploy.tar.gz', owner=owner)\n\n with ctx.sudo(owner):\n run('tar -xzf deploy.tar.gz')\n file.remove('deploy.tar.gz')\n file.write('VERSION', git.revparse(revision))\n finally:\n local('rm -rf %s' % tmp_tar)\n\ndef _symlink_release(owner, current_dir, release_dir):\n with ctx.sudo(owner):\n if dir.exists(current_dir):\n dir.remove(current_dir, recursive=True)\n file.link(release_dir, current_dir)\n\ndef _clear_old_releases(directory, keep):\n with ctx.cd(directory):\n sudo(\n 'ls -1 | sort -V | head -n-%s | xargs -l1 rm -rf'\n % keep\n )\n","sub_path":"revolver/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"452388428","text":"import re\n\nfrom .string import String\n\n# TOSCA definition: .[.[.[-[0-9]|([1-9][0-9]+))\n \\.(?P[0-9]|([1-9][0-9]+))\n (\n \\.(?P[0-9]|([1-9][0-9]+))\n (\n \\.(?P\\w+)\n (\n -(?P[0-9]|([1-9][0-9]+))\n )?\n )?\n )?\n $\n \"\"\",\n re.ASCII | re.VERBOSE,\n)\n\n\nclass Version(String):\n @classmethod\n def validate(cls, yaml_node):\n super().validate(yaml_node)\n if not VERSION_RE.match(yaml_node.value):\n cls.abort(\"Invalid version format.\", yaml_node.loc)\n","sub_path":"src/opera/parser/tosca/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"635651673","text":"'''\nCreated on 15-May-2020\n\n@author: raghuveer\n'''\n\ndef func1():\n try:\n dict1 = {\"IN\" : \"India\", \"US\" : \"United States\"}\n del dict1[\"IN\"]\n value = 100//(len(dict1) - 1)\n print(value)\n except ZeroDivisionError:\n print(\"ZD\", end=\" \")\n value = int(dict1[0])\n except KeyError:\n print(\"KE\", end=\" \")\n finally:\n print(\"FI\", end=\" \")\n\ntry:\n func1()\n print(\"TR\")\nexcept:\n print(\"CA\")\n ","sub_path":"Python_Learning/AssessmentSet4/Q9.py","file_name":"Q9.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"28520289","text":"class Transporte():\n def __init__(self):\n self.__nome = None\n self.__altura = None\n self.__comprimento = None\n self.__carga = None\n self.__velocidade = None\n\n @property\n def nome(self):\n return self.__nome\n\n @nome.setter\n def nome(self, nome: str):\n self.__nome = nome\n\n @property\n def altura(self):\n return self.__altura\n\n @altura.setter\n def altura(self, altura: float):\n self.__altura = altura\n\n @property\n def comprimento(self):\n return self.__comprimento\n\n @comprimento.setter\n def comprimento(self, comprimento: float):\n self.__comprimento = comprimento\n\n @property\n def carga(self):\n return self.__carga\n\n @carga.setter\n def carga(self, carga: float):\n self.__carga = carga\n\n @property\n def velocidade(self):\n return self.__velocidade\n\n @velocidade.setter\n def velocidade(self, velocidade: float):\n self.__velocidade = velocidade\n\n def add(self, nome: str, altura: float, comprimento: float,\n carga: float, velocidade: float):\n self.nome = nome\n self.altura = altura\n self.comprimento = comprimento\n self.carga = carga\n self.velocidade = velocidade\n\n def mostra(self):\n print(f\"\"\"\n nome: {self.nome}\n altura: {self.altura}\n comprimento: {self.comprimento}\n carga: {self.carga}\n velocidade: {self.velocidade}\n \"\"\")\n\n\nclass TransporteAereo(Transporte):\n def __init__(self):\n self.envergadura = None\n self.autonomia = None\n\n def add(self, nome: str, altura: float, comprimento: float,\n carga: float, velocidade: float, autonomia: float, envergadura: float):\n self.nome = nome\n self.altura = altura\n self.comprimento = comprimento\n self.carga = carga\n self.velocidade = velocidade\n self.autonomia = autonomia\n self.envergadura = envergadura\n\n def mostra(self):\n print(f\"\"\"\n nome: {self.nome}\n altura: {self.altura}\n comprimento: {self.comprimento}\n carga: {self.carga}\n velocidade: {self.velocidade}\n autonomia: {self.autonomia}\n envergadura: {self.envergadura}\n \"\"\")\n\n\nclass TransporteTerrestre (Transporte):\n def __init__(self):\n self.motor = None\n self.rodas = None\n\n def add(self, nome: str, altura: float, comprimento: float,\n carga: float, velocidade: float, motor, rodas):\n self.nome = nome\n self.altura = altura\n self.comprimento = comprimento\n self.carga = carga\n self.velocidade = velocidade\n self.motor = motor\n self.rodas = rodas\n\n def mostra(self):\n print(f\"\"\"\n nome: {self.nome}\n altura: {self.altura}\n comprimento: {self.comprimento}\n carga: {self.carga}\n velocidade: {self.velocidade}\n motor: {self.motor}\n rodas: {self.rodas}\n \"\"\")\n\n\nclass TransporteAquatico(Transporte):\n def __init__(self):\n self.boca = None\n self.calado = None\n\n def add(self, nome: str, altura: float, comprimento: float,\n carga: float, velocidade: float, boca, calado):\n self.nome = nome\n self.altura = altura\n self.comprimento = comprimento\n self.carga = carga\n self.velocidade = velocidade\n self.boca = boca\n self.calado = calado\n\n def mostra(self):\n print(f\"\"\"\n nome: {self.nome}\n altura: {self.altura}\n comprimento: {self.comprimento}\n carga: {self.carga}\n velocidade: {self.velocidade}\n boca: {self.boca}\n calado: {self.calado}\n \"\"\")\n\n\ndef entrada():\n a = input(\"Nome: \")\n b = float(input(\"Altura: \"))\n c = float(input(\"Comprimento: \"))\n d = float(input(\"Carga: \"))\n e = float(input(\"Velocidade: \"))\n return a, b, c, d, e\n\n\nclass Catalogo:\n def __init__(self):\n self.veiculos = []\n\n def addVeiculo(self):\n while True:\n cmd = input(\"\"\"\n O veiculo é:\n 1- Aereo\n 2- Aquatico\n 3- Terrestre\n\n 0- exit\n \"\"\")\n if cmd == '1':\n q, w, e, r, t = entrada()\n autonomia = float(input(\"Autonomia: \"))\n envergadura = float(input(\"Envergadura: \"))\n a = TransporteAereo()\n a.add(q, w, e, r, t, autonomia, envergadura)\n self.veiculos.append(a)\n elif cmd == '2':\n q, w, e, r, t = entrada()\n boca = input(\"Boca: \")\n calado = float(input(\"Calado: \"))\n b = TransporteAquatico()\n b.add(q, w, e, r, t, boca, calado)\n self.veiculos.append(b)\n elif cmd == '3':\n q, w, e, r, t = entrada()\n motor = input(\"Motor: \")\n rodas = input(\"Rodas: \")\n c = TransporteTerrestre()\n c.add(q, w, e, r, t, motor, rodas)\n self.veiculos.append(c)\n elif cmd == '0':\n break\n else:\n pass\n\n def getVeiculo(self):\n for i in self.veiculos:\n i.mostra()\n\n\nc = Catalogo()\nwhile True:\n cmd = input(\"\"\"\n 1- add veiculo\n 2- ver catalogo\n\n 0- exit: \"\"\")\n if cmd == '1':\n c.addVeiculo()\n elif cmd == '2':\n c.getVeiculo()\n elif cmd == '0':\n print(\"VLW PELO APOIO.\")\n break\n else:\n print(\"Comando invalido.\")\n#\n#a = TransporteTerrestre()\n#q, w, e, r, t = entrada()\n#a.add(q, w, e, r, t, 1, 2)\n# a.mostra()\n","sub_path":"exPolimorf19/exPoli19-10.py","file_name":"exPoli19-10.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"593074219","text":"from netCDF4 import Dataset\nimport glob\nfrom os.path import basename\nfrom pyhdf.SD import *\nimport numpy as np\n\n\n# ACCESS PREDICTION DATA\nprediction = \"F:/dataset/rain_data/prediction/2d.20160419-acc/caps_nmmb_rad/ar2016041900.netacc03_010800\"\nnetcdfFile = Dataset(prediction)\nrain = netcdfFile.variables['acc03_'][:]\nrainData = rain[0, :, :] # this is the two dimensional grid, 1155x1683\nprint(rainData)\n\n\n# ACCESS VERIFICATION (REAL) DATA\nfileName = 'F:/dataset/rain_data/verification/20160419/ar2016041900.hdfacc03_010800'\n# Open file in read-only mode (default)\nhdfFile = SD(fileName, SDC.READ)\ndatasets_dic = hdfFile.datasets()\nfor idx,sds in enumerate(datasets_dic.keys()):\n print (idx,sds)\nsds_obj = hdfFile.select('acc03_') # select sds\ndata = np.array(sds_obj[0, :, :]) # get sds data\nprint(data)\nprint(data.min(), data.max(), data.mean(), data.std()) # finding min, max, mean and std\n\n# rescaling the data\nfor key, value in sds_obj.attributes().items():\n if key == 'max':\n maxValue = value\n if key == 'min':\n minValue = value\nscalef = (maxValue - minValue) / 65534.0\noriginal_data = scalef * (data + 32767) + minValue\nprint(original_data)","sub_path":"ReadDataset.py","file_name":"ReadDataset.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"269080317","text":"import socket\nimport asyncio\nimport pickle\n\nfrom utils import get_config\nfrom cache import Cache\nfrom loguru import logger\n\nlogger.add(str, level='DEBUG', format='{time} {level} {message}')\n\n\nclass Server:\n\n def __init__(self) -> None:\n self.socket = socket.socket(\n socket.AF_INET,\n socket.SOCK_STREAM\n )\n server_address = get_config()['tcp_server']['host'], get_config()[\n 'tcp_server']['port']\n self.socket.bind(server_address)\n self.socket.listen(5)\n self.socket.setblocking(False)\n self.event_loop = asyncio.new_event_loop()\n self.cache = Cache()\n \n async def get(self, user, key) -> None:\n result = self.cache.get(key)\n await self.event_loop.sock_sendall(user, pickle.dumps(result))\n\n async def set(self, user, key, value, ttl) -> None:\n await self.event_loop.sock_sendall(user, pickle.dumps(self.cache.set(key, value, ttl)))\n\n async def hset(self, user, key, fields, ttl) -> None:\n await self.event_loop.sock_sendall(user, pickle.dumps(self.cache.hset(key, fields, ttl)))\n\n async def hget(self, user, key, field) -> None:\n result = self.cache.hget(key, field)\n await self.event_loop.sock_sendall(user, pickle.dumps(result))\n\n async def lset(self, user, key, _list, ttl) -> None:\n await self.event_loop.sock_sendall(user, pickle.dumps(self.cache.lset(key, _list, ttl)))\n\n async def lget(self, user, key, index) -> None:\n result = self.cache.lget(key, index)\n await self.event_loop.sock_sendall(user, pickle.dumps(result))\n\n async def delete(self, user, keys) -> None:\n await self.event_loop.sock_sendall(user, pickle.dumps(self.cache.delete(keys)))\n\n async def save(self, user) -> None:\n await self.event_loop.sock_sendall(user, pickle.dumps(self.cache.save()))\n\n async def listen_socket(self, user_socket, address) -> None:\n while True:\n data = await self.event_loop.sock_recv(user_socket, 2048)\n\n if not data:\n logger.debug(f'User {address} diconnected!')\n break\n\n command, *data = pickle.loads(data)\n\n if command == 'get':\n logger.debug(\n f'User {address} used the get(key={data[0]}) method')\n await self.get(user_socket, *data)\n\n if command == 'set':\n logger.debug(\n f'User {address} used the set(key={data[0]}, value={data[1]}, ttl={data[2]}) method')\n await self.set(user_socket, *data)\n\n if command == 'hget':\n logger.debug(\n f'User {address} used the hget(key={data[0]}, field={data[1]}) method')\n await self.hget(user_socket, *data)\n\n if command == 'hset':\n logger.debug(\n f'User {address} used the hset(key={data[0]}, fields={data[1]}, ttl={data[2]}) method')\n await self.hset(user_socket, *data)\n\n if command == 'lget':\n logger.debug(\n f'User {address} used the lget(key={data[0]}, index={data[1]}) method')\n await self.lget(user_socket, *data)\n\n if command == 'lset':\n logger.debug(\n f'User {address} used the lset(key={data[0]}, _list={data[1]}, ttl={data[2]}) method')\n await self.lset(user_socket, *data)\n\n if command == 'del':\n logger.debug(f'User {address} used the del({data[0]}) method')\n await self.delete(user_socket, *data)\n\n if command == 'save':\n logger.debug(f'User {address} used the save() method')\n await self.save(user_socket)\n\n async def accept_sockets(self) -> None:\n while True:\n user_socket, address = await self.event_loop.sock_accept(self.socket)\n logger.debug(f'User {address} connected!')\n\n self.event_loop.create_task(self.listen_socket(user_socket, address))\n\n async def main(self) -> None:\n await self.event_loop.create_task(self.accept_sockets())\n\n def run(self) -> None:\n logger.debug(f'Run server!!')\n self.event_loop.run_until_complete(self.main())\n\n def stop(self) -> None:\n self.socket.close()\n self.event_loop.close()\n logger.debug(f'Stop server!')\n\n\nif __name__ == '__main__':\n try:\n server = Server()\n server.run()\n except KeyboardInterrupt:\n server.stop()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"36377544","text":"import control\nimport numpy as np\nimport scipy as sp\n\n# conda install -c conda-forge slycot\n\n\ndef __first_dim__(X):\n if sp.size(X) == 1:\n m = 1\n else:\n m = sp.size(X,0)\n return m\n\n\ndef __second_dim__(X):\n if sp.size(X) == 1:\n m = 1\n else:\n m = sp.size(X,1)\n return m\n\ndef kalman_filter(A, B, C, D, Qn, Rn, Nn=None):\n \"\"\" Design a Kalman filter for the discrete-time system\n x_{k+1} = Ax_{k} + Bu_{k} + Gw_{k}\n y_{k} = Cx_{k} + Du_{k} + Hw_{k} + v_{k}\n with known inputs u and sctochastic disturbances v, w.\n In particular, v and w are zero mean, white Gaussian noise sources with\n E[vv'] = Qn, E[ww'] = Rn, E['wv'] = Nn\n\n The Kalman filter has structure\n \\hat x_{k+1} = Ax_{k} + Bu_{k} + L(y_{k} - C\\hat x{k} - Du_{k})\n \\hat y_{k} = Cx_k + Du_k\n \"\"\"\n nx = np.shape(A)[0]\n nw = np.shape(Qn)[0] # number of uncontrolled inputs\n nu = np.shape(B)[1] - nw # number of controlled inputs\n ny = np.shape(C)[0]\n\n if Nn is None:\n Nn = np.zeros((nw, ny))\n\n E = np.eye(nx)\n Bu = B[:, 0:nu]\n Du = D[:, 0:nu]\n Bw = B[:, nu:]\n Dw = D[:, nu:]\n\n Hn = Dw @ Nn\n Rb = Rn + Hn + np.transpose(Hn) + Dw @ Qn @ np.transpose(Dw)\n Qb = Bw @ Qn @ np.transpose(Bw)\n Nb = Bw @ (Qn @ np.transpose(Dw) + Nn)\n\n # Enforce symmetry\n Qb = (Qb + np.transpose(Qb))/2\n Rb = (Rb+np.transpose(Rb))/2\n\n P,W,K, = control.dare(np.transpose(A), np.transpose(C), Qb, Rb, Nb, np.transpose(E))\n\n L = np.transpose(K) # Kalman gain\n return L,P,W\n\ndef kalman_filter_simple(A, B, C, D, Qn, Rn):\n r\"\"\"Design a Kalman filter for the discrete-time system\n\n .. math::\n \\begin{split}\n x_{k+1} &= Ax_{k} + Bu_{k} + Iw_{k}\\\\\n y_{k} &= Cx_{k} + Du_{k} + I v_{k}\n \\end{split}\n\n with known inputs u and stochastic disturbances w and v.\n In particular, w and v are zero mean, white Gaussian noise sources with\n E[vv'] = Qn, E[ww'] = Rn, E['wv'] = 0\n\n The Kalman filter has structure\n\n .. math::\n \\begin{split}\n \\hat x_{k+1} &= Ax_{k} + Bu_{k} + L(y_{k} - C\\hat x{k} - Du_{k})\\\\\n \\hat y_{k} &= Cx_k + Du_k\n \\end{split}\n \"\"\"\n\n nx = __first_dim__(A)\n nw = nx # number of uncontrolled inputs\n nu = __second_dim__(B) # number of controlled inputs\n ny = __first_dim__(C)\n\n P,W,K, = control.dare(np.transpose(A), np.transpose(C), Qn, Rn)\n L = np.transpose(K) # Kalman gain\n return L,P,W\n\n\nclass LinearStateEstimator:\n def __init__(self, x0, A, B, C, D, L):\n\n self.x = x0\n self.y = C @ x0\n self.A = A\n self.B = B\n self.C = C\n self.D = D\n self.L = L\n\n self.nx = __first_dim__(A)\n self.nu = __second_dim__(B) # number of controlled inputs\n self.ny = __first_dim__(C)\n\n def out_y(self,u):\n return self.y\n\n def predict(self, u):\n self.x = self.A @ self.x + self.B @u # x[k+1|k]\n self.y = self.C @ self.x + self.D @u\n return self.x\n\n def update(self, y_meas):\n self.x = self.x + self.L @ (y_meas - self.y) # x[k+1|k+1]\n return self.x\n\n def sim(self, u_seq, x=None):\n\n if x is None:\n x = self.x\n Np = __first_dim__(u_seq)\n nu = __second_dim__(u_seq)\n assert(nu == self.nu)\n\n y = np.zeros((Np,self.ny))\n x_tmp = x\n for i in range(Np):\n u_tmp = u_seq[i]\n y[i,:] = self.C @ x_tmp + self.D @ u_tmp\n x_tmp = self.A @x_tmp + self.B @ u_tmp\n\n #y[Np] = self.C @ x_tmp + self.D @ u_tmp # not really true for D. Here it is 0 anyways\n return y\n\nif __name__ == '__main__':\n\n # Constants #\n Ts = 0.2 # sampling time (s)\n M = 2 # mass (Kg)\n b = 0.3 # friction coefficient (N*s/m)\n\n Ad = np.array([\n [1.0, Ts],\n [0, 1.0 -b/M*Ts]\n ])\n\n Bd = np.array([\n [0.0],\n [Ts/M]])\n\n Cd = np.array([[1, 0]])\n Dd = np.array([[0]])\n\n [nx, nu] = Bd.shape # number of states and number or inputs\n ny = np.shape(Cd)[0]\n\n ## General design ##\n Bd_kal = np.hstack([Bd, Bd])\n Dd_kal = np.array([[0, 0]])\n Q_kal = np.array([[100]]) # nw x nw matrix, w general (here, nw = nu)\n R_kal = np.eye(ny) # ny x ny)\n L_general,P_general,W_general = kalman_filter(Ad, Bd_kal, Cd, Dd_kal, Q_kal, R_kal)\n\n # Simple design\n Q_kal = 10 * np.eye(nx)\n R_kal = np.eye(ny)\n L_simple,P_simple,W_simple = kalman_filter_simple(Ad, Bd, Cd, Dd, Q_kal, R_kal)\n\n # Simple design written in general form\n Bd_kal = np.hstack([Bd, np.eye(nx)])\n Dd_kal = np.hstack([Dd, np.zeros((ny, nx))])\n Q_kal = 10 * np.eye(nx)#np.eye(nx) * 100\n R_kal = np.eye(ny) * 1\n L_gensim,P_gensim,W_gensim = kalman_filter_simple(Ad, Bd, Cd, Dd, Q_kal, R_kal)\n\n assert(np.isclose(L_gensim[0], L_simple[0]))\n","sub_path":"test_scripts/kalman/kalman.py","file_name":"kalman.py","file_ext":"py","file_size_in_byte":4854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"279391476","text":"import math\nimport os\nimport shutil\nfrom datetime import datetime\nfrom time import time\n\ndef archive_mgt(app_info, fn_log= None, fn_log_reset= None):\n result_folder_paths = app_info.RESULTS_PATH_\n archive_folder_path = app_info.FULL_ARCHIVE_PATH_\n _before_instance = True\n _start_time = time()\n\n def fn_get_elapsed_time(start_time):\n end_time = time()\n time_diff = int(end_time - start_time)\n mins = math.floor(time_diff / 60)\n secs = time_diff % 60\n start_time_human = datetime.fromtimestamp(start_time).strftime(\"%H:%M:%S\")\n end_time_human = datetime.fromtimestamp(end_time).strftime(\"%H:%M:%S\")\n time_stats = f'Time elapsed: minutes: {mins} seconds: {secs} ----- (start_time:{start_time_human}, end_time:{end_time_human})'\n return time_stats\n\n def fn_archive():\n nonlocal _before_instance\n try:\n\n if _before_instance:\n real_archive_path = os.path.join( archive_folder_path , 'BEFORE')\n else:\n real_archive_path = os.path.join( archive_folder_path , 'AFTER')\n if 'fn_save_model' in app_info:\n if app_info.fn_save_model is not None:\n app_info.fn_save_model()\n\n if os.path.exists(real_archive_path):\n shutil.rmtree(real_archive_path)\n\n if not _before_instance:\n time_stats = fn_get_elapsed_time(start_time= app_info.START_TIME_)\n app_info.fn_log(time_stats)\n\n\n shutil.copytree(result_folder_paths, real_archive_path, symlinks=False, ignore=None)\n\n if _before_instance:\n _before_instance = False\n if fn_log_reset is not None:\n fn_log_reset()\n\n return \"INFO:: Sucessfully Archived at {}\".format(archive_folder_path)\n\n except Exception as x:\n print(x)\n raise Exception('Exception: fn_archive')\n\n fn_archive()\n\n return fn_archive\n","sub_path":"ws/RLUtils/setup/archive_mgt.py","file_name":"archive_mgt.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"261625316","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function\n\nimport os\n\nimport rhea\n\nfrom hestia.auth import AuthenticationTypes\nfrom hestia.user_path import polyaxon_user_path\n\nTMP_AUTH_TOKEN_PATH = '/tmp/.polyaxon/.authtoken'\nCLIENT_CONFIG_PATH = os.path.join(polyaxon_user_path(), '.polyaxonclient')\n\nconfig = rhea.Rhea.read_configs([\n rhea.ConfigSpec(CLIENT_CONFIG_PATH, config_type='.json', check_if_exists=False),\n os.environ,\n rhea.ConfigSpec(TMP_AUTH_TOKEN_PATH, config_type='.json', check_if_exists=False)\n])\n\nIN_CLUSTER = config.get_boolean('POLYAXON_IN_CLUSTER',\n is_optional=True,\n default=False)\nNO_OP = config.get_boolean('POLYAXON_NO_OP',\n is_optional=True,\n default=False)\nAPI_HOST = config.get_string('POLYAXON_API_HOST',\n is_optional=True)\nHTTP_PORT = config.get_int('POLYAXON_HTTP_PORT',\n is_optional=True)\nWS_PORT = config.get_int('POLYAXON_WS_PORT',\n is_optional=True)\nUSE_HTTPS = config.get_boolean('POLYAXON_USE_HTTPS',\n is_optional=True,\n default=False)\nVERIFY_SSL = config.get_boolean('POLYAXON_VERIFY_SSL',\n is_optional=True)\nAPI_HTTP_HOST = config.get_string('POLYAXON_API_HTTP_HOST',\n is_optional=True)\nAPI_WS_HOST = config.get_string('POLYAXON_API_WS_HOST',\n is_optional=True)\nSECRET_USER_TOKEN_KEY = 'POLYAXON_SECRET_USER_TOKEN' # noqa\nSECRET_USER_TOKEN = config.get_string(SECRET_USER_TOKEN_KEY,\n is_optional=True)\nSECRET_EPHEMERAL_TOKEN_KEY = 'POLYAXON_SECRET_EPHEMERAL_TOKEN' # noqa\nSECRET_EPHEMERAL_TOKEN = config.get_string(SECRET_EPHEMERAL_TOKEN_KEY,\n is_optional=True)\nINTERNAL_TOKEN_KEY = 'POLYAXON_SECRET_INTERNAL_TOKEN' # noqa\nSECRET_INTERNAL_TOKEN = config.get_string(INTERNAL_TOKEN_KEY,\n is_optional=True)\nAUTHENTICATION_TYPE = config.get_string('POLYAXON_AUTHENTICATION_TYPE',\n is_optional=True,\n default=AuthenticationTypes.TOKEN)\nAPI_VERSION = config.get_string('POLYAXON_API_VERSION',\n is_optional=True,\n default='v1')\nHASH_LENGTH = config.get_int('POLYAXON_HASH_LENGTH',\n is_optional=True,\n default=12)\nINTERNAL_HEADER = config.get_string('POLYAXON_INTERNAL_HEADER',\n is_optional=True)\nINTERNAL_HEADER_SERVICE = config.get_string('POLYAXON_INTERNAL_HEADER_SERVICE',\n is_optional=True)\nSCHEMA_RESPONSE = config.get_boolean('POLYAXON_SCHEMA_RESPONSE',\n is_optional=True,\n default=False)\nINTERNAL_HEALTH_CHECK_URL = config.get_string('POLYAXON_INTERNAL_HEALTH_CHECK_URL',\n is_optional=True)\n\nDEFAULT_HTTP_PORT = 80\nDEFAULT_HTTPS_PORT = 443\nMIN_TIMEOUT = config.get_int('POLYAXON_MIN_TIMEOUT',\n is_optional=True,\n default=1)\nTIMEOUT = config.get_int('POLYAXON_TIMEOUT',\n is_optional=True,\n default=20)\nREQUEST_TIMEOUT = config.get_int('POLYAXON_REQUEST_TIMEOUT',\n is_optional=True,\n default=25)\nLONG_REQUEST_TIMEOUT = config.get_int('POLYAXON_LONG_REQUEST_TIMEOUT',\n is_optional=True,\n default=3600)\nINTERVAL = config.get_int('POLYAXON_INTERVAL',\n is_optional=True,\n default=1)\nHEALTH_CHECK_INTERVAL = config.get_int('HEALTH_CHECK_INTERVAL',\n is_optional=True,\n default=60)\nQUEUE_CALL = config.get_int('POLYAXON_INTERVAL',\n is_optional=True,\n default=200)\nLOGS_LEVEL = config.get_int('POLYAXON_LOGS_LEVEL',\n is_optional=True)\n","sub_path":"polyaxon_client/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"627860417","text":"from typing import Dict\nfrom urllib.parse import urlparse\n\nfrom flask import request, render_template, redirect, flash\nfrom flask_login import current_user\n\nfrom app.config import EMAIL_DOMAIN\nfrom app.extensions import db\nfrom app.jose_utils import make_id_token\nfrom app.log import LOG\nfrom app.models import (\n Client,\n AuthorizationCode,\n ClientUser,\n GenEmail,\n RedirectUri,\n OauthToken,\n DeletedAlias,\n)\nfrom app.oauth.base import oauth_bp\nfrom app.oauth_models import (\n get_response_types,\n ResponseType,\n Scope,\n SUPPORTED_OPENID_FLOWS,\n SUPPORTED_OPENID_FLOWS_STR,\n response_types_to_str,\n)\nfrom app.utils import random_string, encode_url, convert_to_id, random_word\n\n\n@oauth_bp.route(\"/authorize\", methods=[\"GET\", \"POST\"])\ndef authorize():\n \"\"\"\n Redirected from client when user clicks on \"Login with Server\".\n This is a GET request with the following field in url\n - client_id\n - (optional) state\n - response_type: must be code\n \"\"\"\n oauth_client_id = request.args.get(\"client_id\")\n state = request.args.get(\"state\")\n scope = request.args.get(\"scope\")\n redirect_uri = request.args.get(\"redirect_uri\")\n response_mode = request.args.get(\"response_mode\")\n nonce = request.args.get(\"nonce\")\n\n try:\n response_types: [ResponseType] = get_response_types(request)\n except ValueError:\n return (\n \"response_type must be code, token, id_token or certain combination of these.\"\n \" Please see /.well-known/openid-configuration to see what response_type are supported \",\n 400,\n )\n\n if set(response_types) not in SUPPORTED_OPENID_FLOWS:\n return (\n f\"SimpleLogin only support the following OIDC flows: {SUPPORTED_OPENID_FLOWS_STR}\",\n 400,\n )\n\n if not redirect_uri:\n LOG.d(\"no redirect uri\")\n return \"redirect_uri must be set\", 400\n\n client = Client.get_by(oauth_client_id=oauth_client_id)\n if not client:\n final_redirect_uri = (\n f\"{redirect_uri}?error=invalid_client_id&client_id={oauth_client_id}\"\n )\n return redirect(final_redirect_uri)\n\n # check if redirect_uri is valid\n # allow localhost by default\n hostname, scheme = get_host_name_and_scheme(redirect_uri)\n if hostname != \"localhost\" and hostname != \"127.0.0.1\":\n # support custom scheme for mobile app\n if scheme == \"http\":\n final_redirect_uri = f\"{redirect_uri}?error=http_not_allowed\"\n return redirect(final_redirect_uri)\n\n if not RedirectUri.get_by(client_id=client.id, uri=redirect_uri):\n final_redirect_uri = f\"{redirect_uri}?error=unknown_redirect_uri\"\n return redirect(final_redirect_uri)\n\n # redirect from client website\n if request.method == \"GET\":\n if current_user.is_authenticated:\n suggested_email, other_emails, email_suffix = None, [], None\n suggested_name, other_names = None, []\n\n # user has already allowed this client\n client_user: ClientUser = ClientUser.get_by(\n client_id=client.id, user_id=current_user.id\n )\n user_info = {}\n if client_user:\n LOG.debug(\"user %s has already allowed client %s\", current_user, client)\n user_info = client_user.get_user_info()\n else:\n suggested_email, other_emails = current_user.suggested_emails(\n client.name\n )\n suggested_name, other_names = current_user.suggested_names()\n email_suffix = random_word()\n\n return render_template(\n \"oauth/authorize.html\",\n client=client,\n user_info=user_info,\n client_user=client_user,\n Scope=Scope,\n suggested_email=suggested_email,\n personal_email=current_user.email,\n suggested_name=suggested_name,\n other_names=other_names,\n other_emails=other_emails,\n email_suffix=email_suffix,\n EMAIL_DOMAIN=EMAIL_DOMAIN,\n )\n else:\n # after user logs in, redirect user back to this page\n return render_template(\n \"oauth/authorize_nonlogin_user.html\",\n client=client,\n next=request.url,\n Scope=Scope,\n )\n else: # user allows or denies\n if request.form.get(\"button\") == \"deny\":\n LOG.debug(\"User %s denies Client %s\", current_user, client)\n final_redirect_uri = f\"{redirect_uri}?error=deny&state={state}\"\n return redirect(final_redirect_uri)\n\n LOG.debug(\"User %s allows Client %s\", current_user, client)\n client_user = ClientUser.get_by(client_id=client.id, user_id=current_user.id)\n\n # user has already allowed this client, user cannot change information\n if client_user:\n LOG.d(\"user %s has already allowed client %s\", current_user, client)\n else:\n email_suffix = request.form.get(\"email-suffix\")\n custom_email_prefix = request.form.get(\"custom-email-prefix\")\n chosen_email = request.form.get(\"suggested-email\")\n\n suggested_name = request.form.get(\"suggested-name\")\n custom_name = request.form.get(\"custom-name\")\n\n use_default_avatar = request.form.get(\"avatar-choice\") == \"default\"\n\n gen_email = None\n if custom_email_prefix:\n # check if user can generate custom email\n if not current_user.can_create_new_alias():\n raise Exception(f\"User {current_user} cannot create custom email\")\n\n email = f\"{convert_to_id(custom_email_prefix)}.{email_suffix}@{EMAIL_DOMAIN}\"\n LOG.d(\"create custom email alias %s for user %s\", email, current_user)\n\n if GenEmail.get_by(email=email) or DeletedAlias.get_by(email=email):\n LOG.error(\"email %s already used, very rare!\", email)\n flash(f\"alias {email} already used\", \"error\")\n return redirect(request.url)\n\n gen_email = GenEmail.create(email=email, user_id=current_user.id)\n db.session.flush()\n else: # user picks an email from suggestion\n if chosen_email != current_user.email:\n gen_email = GenEmail.get_by(email=chosen_email)\n if not gen_email:\n gen_email = GenEmail.create(\n email=chosen_email, user_id=current_user.id\n )\n db.session.flush()\n\n client_user = ClientUser.create(\n client_id=client.id, user_id=current_user.id\n )\n if gen_email:\n client_user.gen_email_id = gen_email.id\n\n if custom_name:\n LOG.d(\n \"use custom name %s for user %s client %s\",\n custom_name,\n current_user,\n client,\n )\n client_user.name = custom_name\n elif suggested_name != current_user.name:\n LOG.d(\n \"use another name %s for user %s client %s\",\n custom_name,\n current_user,\n client,\n )\n client_user.name = suggested_name\n\n if use_default_avatar:\n # use default avatar\n LOG.d(\"use default avatar for user %s client %s\", current_user, client)\n client_user.default_avatar = True\n\n db.session.flush()\n LOG.d(\"create client-user for client %s, user %s\", client, current_user)\n\n redirect_args = {}\n\n if state:\n redirect_args[\"state\"] = state\n else:\n LOG.warning(\n \"more security reason, state should be added. client %s\", client\n )\n\n if scope:\n redirect_args[\"scope\"] = scope\n\n auth_code = None\n if ResponseType.CODE in response_types:\n # Create authorization code\n auth_code = AuthorizationCode.create(\n client_id=client.id,\n user_id=current_user.id,\n code=random_string(),\n scope=scope,\n redirect_uri=redirect_uri,\n response_type=response_types_to_str(response_types),\n )\n db.session.add(auth_code)\n redirect_args[\"code\"] = auth_code.code\n\n oauth_token = None\n if ResponseType.TOKEN in response_types:\n # create access-token\n oauth_token = OauthToken.create(\n client_id=client.id,\n user_id=current_user.id,\n scope=scope,\n redirect_uri=redirect_uri,\n access_token=generate_access_token(),\n response_type=response_types_to_str(response_types),\n )\n db.session.add(oauth_token)\n redirect_args[\"access_token\"] = oauth_token.access_token\n\n if ResponseType.ID_TOKEN in response_types:\n redirect_args[\"id_token\"] = make_id_token(\n client_user,\n nonce,\n oauth_token.access_token if oauth_token else None,\n auth_code.code if auth_code else None,\n )\n\n db.session.commit()\n\n # should all params appended the url using fragment (#) or query\n fragment = False\n\n if response_mode and response_mode == \"fragment\":\n fragment = True\n\n # if response_types contain \"token\" => implicit flow => should use fragment\n # except if client sets explicitly response_mode\n if not response_mode:\n if ResponseType.TOKEN in response_types:\n fragment = True\n\n # construct redirect_uri with redirect_args\n return redirect(construct_url(redirect_uri, redirect_args, fragment))\n\n\ndef construct_url(url, args: Dict[str, str], fragment: bool = False):\n for i, (k, v) in enumerate(args.items()):\n # make sure to escape v\n v = encode_url(v)\n\n if i == 0:\n if fragment:\n url += f\"#{k}={v}\"\n else:\n url += f\"?{k}={v}\"\n else:\n url += f\"&{k}={v}\"\n\n return url\n\n\ndef generate_access_token() -> str:\n \"\"\"generate an access-token that does not exist before\"\"\"\n access_token = random_string(40)\n\n if not OauthToken.get_by(access_token=access_token):\n return access_token\n\n # Rerun the function\n LOG.warning(\"access token already exists, generate a new one\")\n return generate_access_token()\n\n\ndef get_host_name_and_scheme(url: str) -> (str, str):\n \"\"\"http://localhost:7777?a=b -> (localhost, http) \"\"\"\n url_comp = urlparse(url)\n\n return url_comp.hostname, url_comp.scheme\n","sub_path":"app/oauth/views/authorize.py","file_name":"authorize.py","file_ext":"py","file_size_in_byte":11048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"615612896","text":"import math\nimport os\n\ndef r0(n_cells, half_cell_length):\n phi = 2.*math.pi/n_cells/4.\n radius = half_cell_length/2./math.sin(phi)\n print(\"R0\", radius)\n return radius\n\n# Machida TUNES 0.232180291582827 -0.183879862734979\n# Rogers TUNES 0.2389296203830 0.17677569135392243\n\ndef get_baseline_substitution():\n R0 = r0(15, 3)\n baseline = {\n # ring\n \"__n_cells__\":15,\n \"__radius__\":R0, #m\n \"__drift_1__\":1.8,\n \"__drift_2__\":1.8,\n # beam\n \"__energy__\":400., # MeV\n # tracking\n \"__step_size__\":0.001, # m\n \"__n_turns__\":0.2,\n # main magnets\n \"__bf__\":-3.0*1.64, # T\n \"__bd__\":+1.0*1.64, # T\n \"__d_length__\":1.2, # m\n \"__f_length__\":1.2, # m\n \"__d_offset__\":-0.347745, # m\n \"__f_offset__\":0.347745, # m\n \"__d_end_length__\":0.3, # m\n \"__f_end_length__\":0.3, # m\n \"__m_index__\":0.87721, # m^-1\n \"__max_x_power__\":10,\n \"__neg_extent__\":1.0, # m\n \"__pos_extent__\":3.0, # m\n # NOTE BB length 0.125 m -> width 1.0 m; length 2.0 m; double for safety\n \"__magnet_width__\":2.0, # m\n \"__bb_length__\":8.0, # m\n # field maps\n \"__do_magnet_field_maps__\":True,\n \"__cartesian_x_min__\":-20.0, # m\n \"__cartesian_dx__\":40/1000., # m (1001 steps)\n \"__cartesian_y_min__\":-20.0, # m\n \"__cartesian_dy__\":40/1000., # m (1001 steps)\n #\"__cartesian_x_min__\":-6.0, # m\n #\"__cartesian_dx__\":6/1000., # m (1001 steps)\n #\"__cartesian_y_min__\":R0-3.0, # m\n #\"__cartesian_dy__\":6/1000., # m (1001 steps)\n }\n return baseline\n\n\nseeds = []\nfor ri in range(7):\n for rpi in range(3):\n for zi in range(3):\n r = 14000.+50.*ri\n rp = -0.1-0.1*rpi\n z = 1150.+50.*zi\n seeds.append([r, rp, z, 0.0])\n\nclass Config(object):\n def __init__(self):\n self.find_closed_orbits = {\n \"seed\":[[14222.486846033247, 235.56577518528238, 67.33679096440049, 6.319732909645213]],\n #[[14219.523614290156, -0.2573059678153996*1000, 72.17318674617552, -0.0074218697584373106*1000]], # 631.531\n \"deltas\":[0.01, 0.01, 0.01, 0.01],\n \"adapt_deltas\":False,\n \"output_file\":\"closed_orbits_cache\",\n \"subs_overrides\":{\"__n_turns__\":0.11, \"__do_magnet_field_maps__\":\"False\"},\n \"final_subs_overrides\":{\"__n_turns__\":0.21, \"__do_magnet_field_maps__\":\"True\"},\n \"root_batch\":0,\n \"max_iterations\":5,\n \"tolerance\":0.01,\n \"ds_cell\":1,\n \"do_plot_orbit\":False,\n \"run_dir\":\"tmp/find_closed_orbits\",\n \"probe_files\":\"TESTPROBE*.loss\",\n \"overwrite\":True,\n \"orbit_file\":\"TestRing-trackOrbit.dat\",\n }\n self.find_tune = {\n \"run_dir\":\"tmp/find_tune/\",\n \"probe_files\":\"RINGPROBE*.loss\",\n \"subs_overrides\":{\"__n_turns__\":10.1, \"__do_magnet_field_maps__\":\"True\"},\n \"root_batch\":0,\n \"delta_1\":5,\n \"delta_2\":5,\n \"max_n_cells\":0.1,\n \"output_file\":\"find_tune\",\n \"row_list\":None,\n \"axis\":None,\n }\n self.find_da = {\n \"run_dir\":\"tmp/find_da/\",\n \"probe_files\":\"RINGPROBE01.loss\",\n \"subs_overrides\":{\"__n_turns__\":50.1, \"__no_field_maps__\":\"// \"},\n \"get_output_file\":\"get_da\",\n \"scan_output_file\":\"scan_da\",\n \"row_list\":None,\n \"scan_x_list\":[],\n \"scan_y_list\":[],\n \"x_seed\":1.,\n \"y_seed\":1.,\n \"min_delta\":1.0,\n \"max_delta\":500.,\n \"required_n_hits\":50,\n \"max_iterations\":10,\n }\n \n self.substitution_list = [get_baseline_substitution()]\n \n self.run_control = {\n \"find_closed_orbits_4d\":True,\n \"find_tune\":False,\n \"find_da\":False,\n \"find_bump_parameters\":False,\n \"track_bump\":False,\n \"clean_output_dir\":False,\n \"output_dir\":os.path.join(os.getcwd(), \"output/test_baseline\"),\n \"root_verbose\":6000,\n }\n\n self.tracking = {\n \"mpi_exe\":None, #os.path.expandvars(\"${OPAL_ROOT_DIR}/external/install/bin/mpirun\"),\n \"beam_file_out\":\"disttest.dat\",\n \"n_cores\":4,\n \"links_folder\":[\"VerticalFFA\",], # link relative to lattice/VerticalFFA.in\n \"lattice_file\":os.path.join(os.getcwd(), \"lattice/JanFFA.in\"),\n \"lattice_file_out\":\"VerticalFFAMagnet.tmp\",\n \"opal_path\":os.path.expandvars(\"${OPAL_BUILD_PATH}/tests/opal_unit_tests\"),\n \"flags\":[\"--gtest_filter=VerticalFFAMagnetTrackingTest.*\"],\n \"tracking_log\":\"log\",\n \"step_size\":1.,\n \"pdg_pid\":2212,\n \"clear_files\":None,\n \"verbose\":False,\n \"file_format\":\"ascii\",\n }\n\n\n","sub_path":"scripts/config/old_config/config_test_baseline.py","file_name":"config_test_baseline.py","file_ext":"py","file_size_in_byte":4998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"281551708","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport glob\nfrom math import ceil\nfrom multiprocessing import Process\nimport os\nimport re\nimport requests\nimport sys\nimport time\nfrom urllib.parse import urlparse\n\n\nDEFAULT_FILENAME = 'file'\nDEFAULT_PROCESS_NUM = 10\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('URI', help='URI want to download resource.')\n parser.add_argument('-n', type=int, help='Number of process will be forked.')\n parser.add_argument('-o', help='Output file name.')\n\n if len(sys.argv) < 2:\n parser.print_help()\n sys.exit(1)\n\n args = parser.parse_args()\n uri = args.URI\n if args.n:\n process_num = args.n\n else:\n process_num = DEFAULT_PROCESS_NUM\n print('Number of process: {}'.format(process_num))\n\n res = requests.head(uri)\n\n # get information from header\n content_length = int(res.headers.get('Content-Length'))\n content_disposition = res.headers.get('Content-Disposition')\n accept_ranges = res.headers.get('Accept-Ranges')\n if process_num > content_length:\n chunk_size = 1\n else:\n chunk_size = ceil(content_length / process_num)\n\n if args.o:\n filename = args.o\n # get filename from Content-Disposition header\n elif content_disposition:\n regex = re.compile(r'filename=\"(.+)\"')\n match_obj = regex.search(content_disposition)\n if match_obj:\n filename = match_obj.group(1)\n else:\n filename = DEFAULT_FILENAME + str(time.time()).replace('.', '')\n else:\n component = urlparse(uri)\n if component.path:\n filename = component.path.split('/')[-1]\n else:\n filename = DEFAULT_FILENAME + str(time.time()).replace('.', '')\n\n start = 0\n end = start + chunk_size\n if accept_ranges and accept_ranges != 'none':\n processes = []\n for i in range(process_num):\n p = Process(target=download, args=(uri, i, start, end))\n processes.append(p)\n p.start()\n\n start = end + 1\n end = start + chunk_size\n if end >= content_length:\n end = content_length\n # wait all process finish\n while True:\n if not any([p.is_alive() for p in processes]):\n break\n time.sleep(0.1)\n else:\n print('{} cannot range request.'.format(uri), file=sys.stderr)\n sys.exit(0)\n\n combine(filename)\n\n\ndef download(uri, index, start, end):\n headers = {'Range': 'bytes={}-{}'.format(start, end)}\n res = requests.get(uri, headers=headers)\n\n if res.status_code == 206:\n # Success\n with open('data.{}'.format(index), 'wb') as f:\n f.write(res.content)\n else:\n # Failure\n # TODO: resend request\n print('failure', file=sys.stderr)\n\n\ndef combine(filename):\n files = glob.glob('data.*')\n\n with open(filename, 'wb') as wf:\n for file in files:\n with open(file, 'rb') as rf:\n wf.write(rf.read())\n os.remove(file)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"417221328","text":"from tv_show_folder_rename_sonarr.sonarr_api import SonarrApi\nfrom pathlib import Path\nimport queue\n\n\ndef stop_rename(config):\n \"\"\"\n Checks runner_queue if the user asked to interrupt the loop\n :param config: GuiConfig\n :return: bool True if message to stop was received\n \"\"\"\n if config.runner_queue is not None:\n try:\n message = config.runner_queue.get_nowait()\n except queue.Empty: # get_nowait() will get exception when Queue is empty\n message = None # break from the loop if no more messages are queued up\n else:\n message = None\n\n if message == 'stop':\n print('received stop')\n config.logger.log('Rename of Series folders has been aborted by user', 7)\n config.gui_queue.put(('Stop', 101))\n config.gui_queue.put(('Run', 100))\n return True\n else:\n return False\n\n\ndef run_rename(config):\n connection = SonarrApi(config)\n language_profiles_json = connection.get_all_language_profiles()\n language_profiles = {}\n for element in language_profiles_json:\n language_profiles[element['id']] = str(element['name']).lower()\n config.logger.log('Getting list of all shows from Sonarr')\n shows = connection.get_all_shows()\n config.logger.log('Starting to loop through shows')\n if stop_rename(config) is True:\n return\n for show in shows:\n if stop_rename(config) is True:\n return\n show_id = show['id']\n config.logger.log('Working on show_id ' + str(show_id))\n show_details = connection.get_show(show_id)\n show_path = Path(show_details['path'])\n new_path = get_new_path(config, show_path, show_details, language_profiles)\n if config.get('preview') is False:\n if show_path.exists():\n if not new_path.exists():\n show_path.rename(new_path)\n show_details['path'] = str(new_path)\n result = connection.update_show(show_details)\n config.logger.log('Moved show to \"' + str(new_path) + '\". Sonarr update response was: ' + str(result))\n else:\n config.logger.log('Path \"' + str(new_path) + '\" already exists. Skipping moving and updating the show',\n 8)\n else:\n show_details['path'] = str(new_path)\n result = connection.update_show(show_details)\n config.logger.log('Path \"' + str(show_path) + '\" does not exist. Skipped folder move. Sonarr update response was: ' + str(result), 6)\n\n else:\n config.logger.log('Preview run. New path for show would be: ' + str(new_path))\n config.logger.log('Finished renaming the tv shows known to sonarr')\n if config.gui_queue is not None:\n config.gui_queue.put(('Stop', 101))\n config.gui_queue.put(('Run', 100))\n\n\ndef get_new_path(config, show_path, show_details, language_profiles):\n new_name = ''\n for element in config.get('new_name_components'):\n if element[1] is True:\n new_name += str(show_details[element[0]])\n else:\n new_name += element[0]\n\n # Make the names windows friendly, in case i check via smb share\n illegal_chars = ['/', '\\\\', ':', '?', '*']\n for illegal_char in illegal_chars:\n new_name = new_name.replace(illegal_char, ' ')\n new_name = \" \".join(new_name.split())\n\n # some shows already have the year in the title\n # check if year is duplicated -- will only work if the new name is supposed to end with ' (year)'\n show_name_no_year = new_name[:-7]\n year = new_name[-7:]\n if show_name_no_year.endswith(year):\n new_name = show_name_no_year\n\n # perform space replacement if required\n if config.get('replace_space') is True:\n new_name = new_name.replace(' ', config.get('replacement_char'))\n\n if config.get('replace_root') is True:\n new_path = Path(config.get('new_root'))\n else:\n new_path = show_path.parent\n\n if config.get('use_language_in_path') is True:\n lang_id = show_details['languageProfileId']\n new_path = new_path / language_profiles[lang_id]\n\n new_path = new_path / new_name\n return new_path\n","sub_path":"tv_show_folder_rename_sonarr/rename_shows.py","file_name":"rename_shows.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"129221608","text":"import pandas as pd\nfrom bokeh.models import ColumnDataSource\n\n\ndef invoices_customer(customer_id, selected_year):\n '''\n Analyzes yearly invoice amount for a given customer\n '''\n\n # Importing non-man-hours invoice values and saving the values as a dataframe\n dfInvoices = pd.read_csv('static/munck_textfiles/df/df_invoices.txt', sep=',',\n header=0, low_memory=False, encoding=\"ISO-8859-1\",\n parse_dates=['date'], index_col=[0])\n\n # creating a new df based on G-projects (repairs, maintenance)\n dfInvoicesG = dfInvoices[dfInvoices.product_group == 1110]\n # creating a new df based on B-projects (annual & semi-annual inspections)\n dfInvoicesB = dfInvoices[dfInvoices.product_group == 1120]\n\n # variables\n selected_customer = customer_id\n selected_year = selected_year\n\n # Metric values for each product group:\n # total invoices to the selected customer in the selected year\n invoices_total = dfInvoices.loc[(dfInvoices.index.year == selected_year) & (\n dfInvoices['customer_id'] == selected_customer)].value.sum()\n # total G-projects invoices to the selected customer in the selected year\n invoices_g = dfInvoicesG.loc[(dfInvoicesG.index.year == selected_year) & (\n dfInvoicesG['customer_id'] == selected_customer)].value.sum()\n # total B-projects invoices to the selected customer in the selected year\n invoices_b = dfInvoicesB.loc[(dfInvoicesB.index.year == selected_year) & (\n dfInvoicesB['customer_id'] == selected_customer)].value.sum()\n\n dfInvoices = dfInvoices.loc[(\n dfInvoices['customer_id'] == selected_customer)]\n dfInvoices = dfInvoices.groupby(dfInvoices.index.year).sum()\n dict_invoices = {'years': dfInvoices.index.tolist(),\n 'values': dfInvoices.value.tolist()}\n\n # data for the chart\n source = ColumnDataSource(dict_invoices)\n\n invoices = {'source': source,\n 'invoices_total': invoices_total,\n 'invoices_g': invoices_g,\n 'invoices_b': invoices_b\n }\n\n return(invoices)\n\n\ndef hours_customer(customer_id, selected_year):\n '''\n Analyzes yearly man-hours spent at a given customer\n '''\n\n dfHours = pd.read_csv('static/munck_textfiles/df/df_hours.txt', sep=',',\n header=0, low_memory=False, encoding=\"ISO-8859-1\",\n parse_dates=['date'], index_col=[0])\n\n # Creating two new dataframes based for project G & B.\n dfHoursG = dfHours[dfHours.project_id.str.startswith('G')].copy()\n dfHoursB = dfHours[dfHours.project_id.str.startswith('B')].copy()\n\n # variables\n selected_customer = customer_id\n selected_year = selected_year\n\n # Assign variables for man hours\n hours_total = dfHours.loc[(dfHours.index.year == selected_year) & (\n dfHours['customer_id'] == selected_customer)].hours.sum()\n hours_g = dfHoursG.loc[(dfHoursG.index.year == selected_year) & (\n dfHoursG['customer_id'] == selected_customer)].hours.sum()\n hours_b = dfHoursB.loc[(dfHoursB.index.year == selected_year) & (\n dfHoursB['customer_id'] == selected_customer)].hours.sum()\n\n # Assign variables for total man hours costs\n hours_cost_total = dfHours.loc[(dfHours.index.year == selected_year) & (\n dfHours['customer_id'] == selected_customer)].cost.sum()\n hours_cost_g = dfHoursG.loc[(dfHoursG.index.year == selected_year) & (\n dfHoursG['customer_id'] == selected_customer)].cost.sum()\n hours_cost_b = dfHoursB.loc[(dfHoursB.index.year == selected_year) & (\n dfHoursB['customer_id'] == selected_customer)].cost.sum()\n\n # df for customer = customer_id\n dfHours = dfHours.loc[(dfHours['customer_id'] == selected_customer)]\n dfHours = dfHours.groupby(dfHours.index.year).sum()\n dict_hours = {'years': dfHours.index.tolist(),\n 'hours': dfHours.hours.tolist()}\n\n # data for chart\n source = ColumnDataSource(dict_hours)\n\n man_hours_dict = {'source': source,\n 'hours_total': hours_total,\n 'hours_g': hours_g,\n 'hours_b': hours_b,\n 'hours_cost_total': hours_cost_total,\n 'hours_cost_g': hours_cost_g,\n 'hours_cost_b': hours_cost_b}\n\n return(man_hours_dict)\n\n\ndef costs_customer(customer_id, selected_year):\n '''\n Analyzes yearly costs (except man-hours) on projects for a given customer\n '''\n\n dfCosts = pd.read_csv('static/munck_textfiles/df/df_costs.txt', sep=',',\n header=0, low_memory=False, encoding=\"ISO-8859-1\",\n parse_dates=['date'], index_col=[0])\n\n # Creating two new dataframes based for project G & B.\n dfCostsG = dfCosts[dfCosts.project_id.str.startswith('G')].copy()\n dfCostsB = dfCosts[dfCosts.project_id.str.startswith('B')].copy()\n\n # passsed through variables\n selected_customer = customer_id\n selected_year = selected_year\n\n # cost variables\n costs_total = dfCosts.loc[(dfCosts.index.year == selected_year) & (\n dfCosts['customer_id'] == selected_customer)].cost.sum()\n costs_g = dfCostsG.loc[(dfCostsG.index.year == selected_year) & (\n dfCostsG['customer_id'] == selected_customer)].cost.sum()\n costs_b = dfCostsB.loc[(dfCostsB.index.year == selected_year) & (\n dfCostsB['customer_id'] == selected_customer)].cost.sum()\n\n costs_dict = {'costs_total': costs_total,\n 'costs_g': costs_g,\n 'costs_b': costs_b}\n\n return costs_dict\n\n\ndef invoices_customer_dept(selected_department, selected_year, product_groups):\n '''\n - Analyzes the total invoiced sums for one or more product groups in\n in a selected department for this year.\n\n - Analyzes the top ten customers within one or more product groups in a\n selected department in a chosen year.\n '''\n\n # importing file\n dfInvoices = pd.read_csv('static/munck_textfiles/df/df_invoices.txt', sep=',',\n header=0, low_memory=False, encoding=\"ISO-8859-1\",\n parse_dates=['date'], index_col=[0])\n\n # importing customer list\n dfCustomers = pd.read_csv('static/munck_textfiles/Kunde.txt',\n encoding=\"ISO-8859-1\", sep=\";\", header=None, low_memory=False)\n dfCustomers = dfCustomers[[1, 3]]\n dfCustomers.rename(\n columns={1: 'customer_id', 3: 'customer_name'}, inplace=True)\n dfCustomers = dfCustomers[dfCustomers.customer_id != 'Kunder Nor']\n dfCustomers['customer_id'] = dfCustomers['customer_id'].astype('float')\n\n # merging dfInvoices with dfCustomers\n dfInvoices = dfInvoices.reset_index().merge(\n dfCustomers, how='left', on=['customer_id']).set_index('date')\n\n # variables\n selected_dept = selected_department\n selected_year = selected_year\n service_product_groups = product_groups\n\n # ceating a new def with the specified product groups\n dfInvoices = dfInvoices[dfInvoices['product_group'].isin(\n service_product_groups)]\n\n # creating new df and filter by department\n dfInvoices = dfInvoices.loc[(dfInvoices['dept'] == selected_dept)]\n # grouping the df by year\n dfInvoices_grouped = dfInvoices.groupby(dfInvoices.index.year).sum()\n\n # creating new df and filter by year\n dfInvoices_month = dfInvoices.loc[dfInvoices.index.year == selected_year]\n # grouping the df by month\n dfInvoices_month_grouped = dfInvoices_month.groupby(\n dfInvoices_month.index.month).sum()\n\n # creating a new df sorted by year\n dfInvoices_sorted = dfInvoices.loc[dfInvoices.index.year == selected_year]\n\n # GROUPING BY INVOICE VALUES\n # grouping the df by customer_name and showing the sum of invoice values\n dfInvoices_sorted_values = dfInvoices_sorted.groupby(\n dfInvoices_sorted.customer_name).value.sum()\n # ========================= TOP TEN CUSTOMERS =========================\n top_ten = dfInvoices_sorted_values.sort_values(\n ascending=False).head(n=10)\n # Creating a list out of the company names\n company_name_list = top_ten.index.tolist()\n # capitalizing first letters of each word\n company_name_list = [item.title() for item in company_name_list]\n # creating a list of the invoice sums\n value_list = top_ten.tolist()\n # changing the number format of the values\n value_list = [\"{:,}\".format(item) for item in value_list]\n\n # creating the dictionary for top ten customers\n top_ten_dict = {'names': company_name_list,\n 'values': value_list}\n\n # ========================= DATA FOR GRAPHING DEPARTMENT INVOICES =========================\n invoices_year_dict = {'years': dfInvoices_grouped.index.tolist(),\n 'values': dfInvoices_grouped.value.tolist()}\n\n # creating dictionary for storing montly invoicing values\n invoices_month_dict = {'months': dfInvoices_month_grouped.index.tolist(),\n 'values': dfInvoices_month_grouped.value.tolist()}\n\n department_dict = {'top_ten_dict': top_ten_dict,\n 'invoices_year_dict': invoices_year_dict,\n 'invoices_month_dict': invoices_month_dict,\n 'selected_dept': selected_dept,\n 'selected_year': selected_year}\n\n return department_dict\n","sub_path":"src/dashboard/pandas/customer_analysis.py","file_name":"customer_analysis.py","file_ext":"py","file_size_in_byte":9434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"379435916","text":"\"\"\"\nThis part of code is the DQN brain, which is a brain of the agent.\nAll decisions are made in here.\nUsing Tensorflow to build the neural network.\n\nView more on my tutorial page: https://morvanzhou.github.io/tutorials/\n\nUsing:\nTensorflow: 1.0\ngym: 0.7.3\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nimport os\nimport pickle\nnp.random.seed(1)\ntf.set_random_seed(1)\n\n\n# Deep Q Network off-policy\nclass DeepQNetwork:\n def __init__(\n self,\n n_actions,\n n_features,\n sess,\n agent_id,\n num_training,\n learning_rate=0.01,\n reward_decay=0.9,\n replace_target_iter=300,\n memory_size=500,\n batch_size=32,\n save_model_freq=100,\n max_epsilon=1,\n min_epsilon=0,\n load_model=False,\n ):\n self.n_actions = n_actions\n self.n_features = n_features\n self.sess = sess\n self.agent_id = agent_id\n self.num_training = num_training\n self.lr = learning_rate\n self.gamma = reward_decay\n self.replace_target_iter = replace_target_iter\n self.memory_size = memory_size\n self.batch_size = batch_size\n self.save_model_freq = save_model_freq\n self.max_epsilon = max_epsilon\n self.min_epsilon = min_epsilon\n self.epsilon = self.max_epsilon\n self.load_model = load_model\n\n # total learning step\n self.learn_step_counter = 0\n self.episode_rew_agent = 0\n self.episode_rew_all = 0\n self.episode = 0\n\n # initialize zero memory [s, a, r, s_]\n self.memory = np.zeros((self.memory_size, n_features * 2 + 2))\n # consist of [target_net, evaluate_net]\n self._build_net()\n t_params = tf.get_collection('target_net_params')\n e_params = tf.get_collection('eval_net_params')\n self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]\n\n self.cost_his = []\n if(self.load_model):\n saver = tf.train.Saver(max_to_keep=100000000)\n model_load_steps = 420000\n model_file_load = os.path.join(\"models/\", \"agent_No_\" + str(self.agent_id) + \"/\",\n str(model_load_steps) + \"_\" + \"model_segment_training/\", \"8m\")\n saver.restore(self.sess, model_file_load)\n print(\"model trained for %s steps of agent %s have been loaded\"%(model_load_steps, self.agent_id))\n else:\n self.sess, self.saver, self.summary_placeholders, self.update_ops, self.summary_op, self.summary_writer, self.summary_vars = self.init_sess()\n\n # 将网络计算的初始化工作完成\n def init_sess(self):\n # Summary for tensorboard\n summary_placeholders, update_ops, summary_op, summary_vars = self.setup_summary()\n fileWritePath = os.path.join(\"logs/\", \"agent_No_\" + str(self.agent_id) + \"/\")\n summary_writer = tf.summary.FileWriter(fileWritePath, self.sess.graph)\n self.sess.run(tf.global_variables_initializer())\n\n # Load the file if the saved file exists\n\n saver = tf.train.Saver(max_to_keep=100000000)\n\n return self.sess, saver, summary_placeholders, update_ops, summary_op, summary_writer, summary_vars\n\n def _build_net(self):\n # ------------------ build evaluate_net ------------------\n self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # input\n self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # for calculating loss\n with tf.variable_scope('eval_net'):\n # c_names(collections_names) are the collections to store variables 512*512的网络结构\n c_names, n_l1, n_l2, w_initializer, b_initializer = \\\n ['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 256, 256, \\\n tf.contrib.layers.xavier_initializer(), tf.contrib.layers.xavier_initializer()\n # tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers\n\n # first layer. collections is used later when assign to target net\n with tf.variable_scope('l1'):\n w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)\n b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)\n l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1)\n\n # second layer. collections is used later when assign to target net\n with tf.variable_scope('l2'):\n w2 = tf.get_variable('w2', [n_l1, n_l2], initializer=w_initializer, collections=c_names)\n b2 = tf.get_variable('b2', [1, n_l2], initializer=b_initializer, collections=c_names)\n l2 = tf.nn.relu(tf.matmul(l1, w2) + b2)\n\n # third layer. collections is used later when assign to target net\n with tf.variable_scope('l3'):\n w3 = tf.get_variable('w3', [n_l2, self.n_actions], initializer=w_initializer, collections=c_names)\n b3 = tf.get_variable('b3', [1, self.n_actions], initializer=b_initializer, collections=c_names)\n self.q_eval = tf.matmul(l2, w3) + b3\n\n with tf.variable_scope('loss'):\n self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))\n\n with tf.variable_scope('train'):\n # self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)\n self._train_op = tf.train.AdamOptimizer(self.lr, epsilon=1e-02).minimize(self.loss)\n # ------------------ build target_net ------------------\n self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # input\n with tf.variable_scope('target_net'):\n # c_names(collections_names) are the collections to store variables\n c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]\n\n # first layer. collections is used later when assign to target net\n with tf.variable_scope('l1'):\n w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)\n b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)\n l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1)\n\n # second layer. collections is used later when assign to target net\n with tf.variable_scope('l2'):\n w2 = tf.get_variable('w2', [n_l1, n_l2], initializer=w_initializer, collections=c_names)\n b2 = tf.get_variable('b2', [1, n_l2], initializer=b_initializer, collections=c_names)\n l2 = tf.nn.relu(tf.matmul(l1, w2)) + b2\n\n # third layer. collections is used later when assign to target net\n with tf.variable_scope('l3'):\n w3 = tf.get_variable('w3', [n_l2, self.n_actions], initializer=w_initializer, collections=c_names)\n b3 = tf.get_variable('b3', [1, self.n_actions], initializer=b_initializer, collections=c_names)\n self.q_next = tf.matmul(l2, w3) + b3\n\n def store_transition(self, s, a, r, s_):\n if not hasattr(self, 'memory_counter'):\n self.memory_counter = 0\n\n transition = np.hstack((s, [a, r], s_)) #往水平方向平铺,所以是一行数\n\n # replace the old memory with new memory\n index = self.memory_counter % self.memory_size\n self.memory[index, :] = transition\n\n self.memory_counter += 1\n\n def choose_action(self, observation):\n # to have batch dimension when feed into tf placeholder\n observation = observation[np.newaxis, :]\n if(self.load_model == False):\n if np.random.uniform() < self.epsilon:\n # forward feed the observation and get q value for every actions\n action = np.random.randint(0, self.n_actions)\n else:\n actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})\n action = np.argmax(actions_value)\n else:\n actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})\n action = np.argmax(actions_value)\n return action\n\n def learn(self):\n # check to replace target parameters\n if(self.memory_counter < self.batch_size):\n return\n if self.learn_step_counter % self.replace_target_iter == 0:\n self.sess.run(self.replace_target_op)\n # print('\\ntarget_params_replaced\\n')\n\n # sample batch memory from all memory\n if self.memory_counter > self.memory_size:\n sample_index = np.random.choice(self.memory_size, size=self.batch_size)\n else:\n sample_index = np.random.choice(self.memory_counter, size=self.batch_size)\n batch_memory = self.memory[sample_index, :]\n\n q_next, q_eval = self.sess.run(\n [self.q_next, self.q_eval],\n feed_dict={\n self.s_: batch_memory[:, -self.n_features:], # fixed params\n self.s: batch_memory[:, :self.n_features], # newest params\n })\n\n # change q_target w.r.t q_eval's action\n q_target = q_eval.copy()\n\n batch_index = np.arange(self.batch_size, dtype=np.int32)\n eval_act_index = batch_memory[:, self.n_features].astype(int)\n reward = batch_memory[:, self.n_features + 1] #batch个行,第n_features + 1列的数,那正好是reward\n\n q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1)\n\n \"\"\"\n For example in this batch I have 2 samples and 3 actions:\n q_eval =\n [[1, 2, 3],\n [4, 5, 6]]\n\n q_target = q_eval =\n [[1, 2, 3],\n [4, 5, 6]]\n\n Then change q_target with the real q_target value w.r.t the q_eval's action.\n For example in:\n sample 0, I took action 0, and the max q_target value is -1;\n sample 1, I took action 2, and the max q_target value is -2:\n q_target =\n [[-1, 2, 3],\n [4, 5, -2]]\n\n So the (q_target - q_eval) becomes:\n [[(-1)-(1), 0, 0],\n [0, 0, (-2)-(6)]]\n\n We then backpropagate this error w.r.t the corresponding action to network,\n leave other action as error=0 cause we didn't choose it.\n \"\"\"\n\n # train eval network\n _, self.cost = self.sess.run([self._train_op, self.loss],\n feed_dict={self.s: batch_memory[:, :self.n_features],\n self.q_target: q_target})\n\n self.cost_his.append(self.cost)\n\n self.learn_step_counter += 1\n\n self.plotting()\n\n # Decreasing epsilon\n if self.epsilon > self.min_epsilon:\n self.epsilon -= self.max_epsilon/self.num_training\n else:\n self.epsilon = self.min_epsilon\n\n\n if (self.learn_step_counter % self.save_model_freq == 0):\n model_file_save = os.path.join(\"models/\", \"agent_No_\"+str(self.agent_id)+\"/\", str(self.learn_step_counter) + \"_\" + \"model_segment_training/\", \"8m\")\n dirname = os.path.dirname(model_file_save)\n if any(dirname):\n os.makedirs(dirname, exist_ok=True)\n self.saver.save(self.sess, model_file_save)\n print(\"Model trained for %s times is saved\"%self.learn_step_counter)\n\n # save data of replay buffer\n obj = self.memory\n filename = 'buffer_agent'+str(self.agent_id)+'.txt'\n file = open(filename, 'wb')\n pickle.dump(obj, file)\n file.close()\n\n def setup_summary(self):\n cost = tf.Variable(0.)\n eps_rew_agent = tf.Variable(0.)\n eps_rew_all = tf.Variable(0.)\n\n tf.summary.scalar(\"cost\", cost)\n tf.summary.scalar(\"eps_rew_agent\", eps_rew_agent)\n tf.summary.scalar(\"eps_rew_all\", eps_rew_all)\n summary_vars = [cost, eps_rew_agent, eps_rew_all]\n\n summary_placeholders = [tf.placeholder(tf.float32) for _ in range(len(summary_vars))]\n\n update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]\n summary_op = tf.summary.merge_all()\n\n return summary_placeholders, update_ops, summary_op, summary_vars\n\n def plotting(self):\n tensorboard_info = [self.cost, self.episode_rew_agent, self.episode_rew_all]\n vars_plot = []\n for i in range(len(tensorboard_info)):\n vars_plot.append(self.sess.run(self.update_ops[i], feed_dict={self.summary_placeholders[i]: float(tensorboard_info[i])}))\n\n summary_1 = tf.Summary(value=[tf.Summary.Value(tag=\"cost\", simple_value=vars_plot[0])])\n summary_2 = tf.Summary(value=[tf.Summary.Value(tag=\"eps_rew_agent\", simple_value=vars_plot[1])])\n summary_3 = tf.Summary(value=[tf.Summary.Value(tag=\"eps_rew_all\", simple_value=vars_plot[2])])\n\n self.summary_writer.add_summary(summary_1, self.learn_step_counter)\n self.summary_writer.add_summary(summary_2, self.episode)\n self.summary_writer.add_summary(summary_3, self.episode)\n\n def plot_cost(self):\n import matplotlib.pyplot as plt\n plt.plot(np.arange(len(self.cost_his)), self.cost_his)\n plt.ylabel('Cost')\n plt.xlabel('training steps')\n plt.show()\n\n def get_episode_reward(self, eps_r_agent, eps_r_all, episode):\n self.episode_rew_agent = eps_r_agent\n self.episode_rew_all = eps_r_all\n self.episode = episode\n","sub_path":"source/DQN_smac_tf/RL_brain.py","file_name":"RL_brain.py","file_ext":"py","file_size_in_byte":13648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"331200940","text":"# Copyright 2019 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport base64\nimport logging\nimport os\nimport re\nimport tempfile\n\nimport click\nimport yaml\n\nfrom c7n.resources import load_resources\nfrom c7n.utils import local_session\nfrom c7n_azure.constants import ENV_CONTAINER_EVENT_QUEUE_NAME, ENV_SUB_ID\nfrom c7n_azure.session import Session\n\nlogger = logging.getLogger(\"c7n_azure.container-host.deploy\")\n\nMANAGEMENT_GROUP_TYPE = '/providers/Microsoft.Management/managementGroups'\nSUBSCRIPTION_TYPE = '/subscriptions'\n\n\nclass Deployment(object):\n\n def __init__(self, ctx, default_environment=None, default_secret_environment=None):\n logging.basicConfig(level=logging.INFO, format='%(message)s')\n\n self.dry_run = ctx.parent.params.get('dry_run')\n\n self.deployment_name = ctx.parent.params.get('deployment_name')\n self.deployment_namespace = ctx.parent.params.get('deployment_namespace')\n\n self.image_repository = ctx.parent.params.get('image_repository')\n self.image_tag = ctx.parent.params.get('image_tag')\n self.image_pull_policy = ctx.parent.params.get('image_pull_policy')\n\n self.default_environment = default_environment\n self.default_secret_environment = default_secret_environment\n self.subscription_hosts = []\n\n def run(self):\n values = self.build_values_dict()\n values_file_path = Deployment.write_values_to_file(values)\n\n logger.info(\"Created values file at {}\\n\".format(values_file_path))\n values_yaml = yaml.dump(values)\n logger.info(values_yaml)\n\n # Currently deploy the helm chart through a system command, this assumes helm is installed\n # and configured with the target cluster.\n logger.info(\"Deploying with helm\")\n helm_command = Deployment.build_helm_command(\n self.deployment_name, values_file_path, namespace=self.deployment_namespace,\n dry_run=self.dry_run)\n logger.info(helm_command)\n exit_status = os.system(helm_command)\n\n os.remove(values_file_path)\n if exit_status:\n exit(exit_status)\n\n def build_values_dict(self):\n values = {}\n\n # custom image fields\n self._set_image_field(values, 'repository', self.image_repository)\n self._set_image_field(values, 'tag', self.image_tag)\n self._set_image_field(values, 'pullPolicy', self.image_pull_policy)\n\n # default environment variables for each host\n if self.default_environment:\n values['defaultEnvironment'] = self.default_environment\n\n # A list of configurations for individual hosts\n values['subscriptionHosts'] = self.subscription_hosts\n return values\n\n def _set_image_field(self, values, key, value):\n if value:\n values.setdefault('image', {})[key] = value\n\n def add_subscription_host(self, name='', environment={}, secret_environment={}):\n self.subscription_hosts.append({\n 'name': name,\n 'environment': environment,\n 'secretEnvironment': secret_environment,\n })\n\n @staticmethod\n def write_values_to_file(values):\n values_file_path = tempfile.mktemp(suffix='.yaml')\n with open(values_file_path, 'w') as values_file:\n yaml.dump(values, stream=values_file)\n return values_file_path\n\n @staticmethod\n def build_helm_command(deployment_name, values_file_path, namespace=None, dry_run=False):\n command = 'helm upgrade --install --debug'\n if dry_run:\n command += ' --dry-run'\n if namespace:\n command += ' --namespace {}'.format(namespace)\n command += ' --values {}'.format(values_file_path)\n chart_path = os.path.dirname(__file__) or os.getcwd()\n command += ' {} {}'.format(deployment_name, chart_path)\n return command\n\n\nclass SubscriptionDeployment(Deployment):\n\n def __init__(self, ctx, name='', env=[], secret_env=[]):\n super(SubscriptionDeployment, self).__init__(ctx)\n self.name = name\n self.environment = {e[0]: e[1] for e in env}\n self.secret_environment = {e[0]: base64.b64encode(e[1].encode()) for e in secret_env}\n\n self.run()\n\n def build_values_dict(self):\n self.add_subscription_host(self.name, self.environment, self.secret_environment)\n return super(SubscriptionDeployment, self).build_values_dict()\n\n\nclass ManagementGroupDeployment(Deployment):\n\n def __init__(self, ctx, management_group_id, env=[], secret_env=[]):\n super(ManagementGroupDeployment, self).__init__(ctx,\n default_environment={e[0]: e[1] for e in env},\n default_secret_environment={e[0]: base64.b64encode(e[1].encode()) for e in secret_env})\n self.management_group_id = management_group_id\n load_resources()\n self.session = local_session(Session)\n\n self.run()\n\n def build_values_dict(self):\n self._add_subscription_hosts()\n return super(ManagementGroupDeployment, self).build_values_dict()\n\n def _add_subscription_hosts(self):\n client = self.session.client('azure.mgmt.managementgroups.ManagementGroupsAPI')\n info = client.management_groups.get(\n self.management_group_id, expand='children', recurse=True)\n self._add_subscription_hosts_from_info(info)\n\n def _add_subscription_hosts_from_info(self, info):\n if info.type == SUBSCRIPTION_TYPE:\n sub_id = info.name # The 'name' field of child info is the subscription id\n self.add_subscription_host(\n ManagementGroupDeployment.sub_name_to_deployment_name(info.display_name),\n {\n ENV_SUB_ID: sub_id,\n ENV_CONTAINER_EVENT_QUEUE_NAME: 'c7n-{}'.format(info.name[-4:])\n },\n )\n elif info.type == MANAGEMENT_GROUP_TYPE and info.children:\n for child in info.children:\n self._add_subscription_hosts_from_info(child)\n\n @staticmethod\n def sub_name_to_deployment_name(sub_name):\n # Deployment names must use only lower case alpha numeric characters, -, _, and .\n # They must also start/end with an alpha numeric character\n return re.sub(r'[^A-Za-z0-9-\\._]+', '-', sub_name).strip('-_.').lower()\n\n\n@click.group()\n@click.option('--deployment-name', '-d', default='cloud-custodian')\n@click.option('--deployment-namespace', '-s', default='cloud-custodian')\n@click.option('--image-repository')\n@click.option('--image-tag')\n@click.option('--image-pull-policy')\n@click.option('--dry-run/--no-dry-run', default=False)\ndef cli(deployment_name, deployment_namespace, image_repository='', image_tag='',\n image_pull_policy='', dry_run=False):\n pass\n\n\n@cli.command('subscription')\n@click.option('--name', '-n', required=True)\n@click.option('--env', '-e', type=click.Tuple([str, str]), multiple=True)\n@click.option('--secret-env', type=click.Tuple([str, str]), multiple=True)\n@click.pass_context\nclass SubscriptionDeploymentCommand(SubscriptionDeployment):\n pass\n\n\n@cli.command('management_group')\n@click.pass_context\n@click.option('--management-group-id', '-m', required=True)\n@click.option('--env', '-e', type=click.Tuple([str, str]), multiple=True)\n@click.option('--secret-env', type=click.Tuple([str, str]), multiple=True)\nclass ManagementGroupDeploymentCommand(ManagementGroupDeployment):\n pass\n\n\nif __name__ == '__main__':\n cli()\n","sub_path":"tools/ops/azure/container-host/chart/deploy_chart.py","file_name":"deploy_chart.py","file_ext":"py","file_size_in_byte":7970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"217192089","text":"#!/usr/bin/python\n\nimport unittest\nimport sys\nimport ConfigParser\nimport os\nimport re\nimport time\nimport pysphere\nfrom utils import Utils, Tools, Multi\nfrom ha import Ha\n\n\nclass delete(unittest.TestCase):\n\n def setUp(self):\n self.configfile = sys.argv[1]\n self.utils = Utils()\n self.all_config = self.utils.init_allconfig(self.configfile)\n\n self.utils.deploy_usx(self.all_config)\n\n self.amc_ip = self.all_config['amc_ip']\n self.tests = Ha(self.configfile)\n self.tools = Tools(self.amc_ip)\n\n def tearDown(self):\n # clean_testbed_op = [\"clean_testbed:amc_ip\"]\n # self.tests._exec(clean_testbed_op)\n print(\"done!!!!!!!!!!!!\")\n\n def delete(self):\n self.assertEqual(self.stretch('hybrid'), True)\n # self.assertEqual(self.stretch('flash'), True)\n #self.assertEqual(self.stretch('memory'), True)\n # self.assertEqual(self.stretch('hyperconverge'), True)\n\n def stretch(self, volume):\n return True\n enable_ha_op = [\"enable_ha:'vols'[\" + volume + \"]:\"]\n # self.tests._exec(enable_ha_op)\n\n # disable_ha_op = [\"disable_ha:'vols'[\" + volume + \"]:\"]\n # self.tests._exec(disable_ha_op)\n\n mount_op = [\"mount_vol:'vols'[\" + volume + \"]:\"]\n # self.tests._exec(mount_op)\n\n clone_op = [\"clone_vm:'vols'[\" + volume + \"][0]:\"]\n # self.tests._exec(clone_op)\n\n # sync_op = [\"sync_vm:\"]\n # self.tests._exec(sync_op)\n\n # for x in range(2):\n failover_op = [\"failover_case:'vols'[\" + volume + \"]:\"]\n # self.tests._exec(failover_op)\n\n # check_crm_op = [\"verifyCrm:'vols'[\" + volume + \"]:\"]\n # return self.tests._exec(check_crm_op)\n return True\n\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(delete(\"delete\"))\n\n return suite\n\nif __name__ == '__main__':\n unitrunner = unittest.TextTestRunner()\n test_suite = suite()\n unitrunner.run(test_suite)\n","sub_path":"scripts/deployed.py","file_name":"deployed.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"19530398","text":"import bs4\r\nimport requests\r\nimport pandas as pd\r\nimport openpyxl\r\n\r\npages = []\r\nprices = []\r\nstars = []\r\ntitles = []\r\nurls = []\r\npagesToScrape = int(input('Ile stron?'))\r\n\r\npageUrl = 'http://books.toscrape.com/'\r\n\r\nfor i in range(1, pagesToScrape + 1):\r\n url = ('http://books.toscrape.com/catalogue/page-{}.html').format(i)\r\n pages.append(url)\r\nfor item in pages:\r\n page = requests.get(item)\r\n soup = bs4.BeautifulSoup(page.text, 'html.parser')\r\n for i in soup.find_all('h3'):\r\n titles.append(i.getText())\r\n for i in soup.find_all('p', class_= 'price_color'):\r\n prices.append(i.getText().replace('£', '$'))\r\n for i in soup.find_all('p', class_= 'star-rating'):\r\n for k, v in i.attrs.items():\r\n star = v[1]\r\n stars.append(star)\r\n for i in soup.find_all('img', class_= 'thumbnail'):\r\n for k,v in i.attrs.items():\r\n\r\n toappend = ('http://books.toscrape.com/' + i['src']).replace('../', '')\r\n if toappend not in urls:\r\n urls.append(toappend)\r\n\r\n\r\ndata = {'Title': titles, 'Prices': prices, 'Stars': stars, 'Url': urls}\r\nprint(len(titles), len(prices), len(stars), len(urls))\r\ndf = pd.DataFrame(data=data)\r\ndf.index += 1\r\ndf.to_excel('test.xlsx')\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"170104358","text":"\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom WFSLayer import WFSLayer\nfrom RequestsLibrary import RequestsLibrary\n\n\nclass OGCServiceLibrary(RequestsLibrary, WFSLayer):\n\n def __init__(self):\n super(OGCServiceLibrary,self).__init__()\n self._result = 0\n self._url = ''\n self._ogc_version = '1.1.0'\n __version__ = '1.0'\n\n def connect_to_url(self,url):\n \"\"\"\n Check that we can connect to a given url\n Test framework errors if failure (i.e. NOT response 200), example:\n | Connect to url | my_test_url |\n \"\"\"\n RequestsLibrary.create_session(self,\"URL\",url)\n resp = RequestsLibrary.get(self,\"URL\",\"/\")\n if str(resp.status_code) != \"200\":\n raise AssertionError(\"url: %s Status %s Can't connect\" % (url,resp.status_code))\n\n def result_should_be(self,expected=0):\n \"\"\"\n Compares two values as strings, fail if not equal, example:\n | Result should be | my_expected_result |\n\n \"\"\"\n if str(self._result) != str(expected):\n raise AssertionError(\"%s == %s\" % (self._result, expected))\n\n ROBOT_LIBRARY_SCOPE = 'GLOBAL'","sub_path":"src/OGCServiceLibrary/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"64489260","text":"import sys\nfrom games.game_environment_general import GameEnvironmentGeneral\nfrom games.generalized_prisoners_dilemma.contestant_template import Action\nfrom games.generalized_prisoners_dilemma.config import ITERATIONS, NUM_PLAYERS, TURN_BASED, WIN_CRITERIA, PAYOFF_MATRICES\n\nclass GameEnvironment(GameEnvironmentGeneral):\n def __init__(self, suspense):\n super().__init__(ITERATIONS, NUM_PLAYERS, TURN_BASED, WIN_CRITERIA, suspense)\n\n def get_reward(self, moves:dict, game_board=None):\n \n ''' Calculate the score of each player\n :param moves: A dictionary with keys that equal the player name and a value\n that is one of the possible Actions.\n E.g: {'player1_name': Action1, 'player2_name': Action2}\n :param game_board: A dictionary with the payoff matrix, subjective for each player\n '''\n players = list(moves.keys())\n actions = [moves[player] for player in players]\n\n # Sanity-check on player actions\n assert actions[0] in Action\n assert actions[1] in Action\n \n # GET SCORES\n if actions[0] == Action.COOPERATE and actions[1] == Action.COOPERATE:\n print(\"O\", end='')\n winner = None\n elif actions[0] == Action.COOPERATE and actions[1] == Action.BETRAY:\n print(\"<\", end='')\n winner = players[1]\n elif actions[0] == Action.BETRAY and actions[1] == Action.COOPERATE:\n print(\">\", end='')\n winner = players[0]\n elif actions[0] == Action.BETRAY and actions[1] == Action.BETRAY:\n print(\"X\", end='')\n winner = None\n else:\n raise ValueError(f\"Unknown player action. {players[0]} played {actions[0]} and {players[1]} played {actions[1]}\")\n\n rewards = game_board[players[0]][actions[0], actions[1]]\n\n\n self.game_over = True\n sys.stdout.flush() # Forces the buffer to be written to stdout\n\n return {\n 'winner': winner,\n 'players': {\n players[0]: {\n 'action': actions[0],\n 'reward': rewards[0]\n },\n players[1]: {\n 'action': actions[1],\n 'reward': rewards[1]\n }\n }\n }\n\n def get_game_board(self, players, iteration:int):\n i = iteration\n return {\n players[0]: {\n (Action.COOPERATE, Action.COOPERATE): PAYOFF_MATRICES[i][0][0],\n (Action.COOPERATE, Action.BETRAY): PAYOFF_MATRICES[i][0][1],\n (Action.BETRAY, Action.COOPERATE): PAYOFF_MATRICES[i][1][0],\n (Action.BETRAY, Action.BETRAY): PAYOFF_MATRICES[i][1][1]\n },\n players[1]: {\n (Action.COOPERATE, Action.COOPERATE): PAYOFF_MATRICES[i][0][0],\n (Action.COOPERATE, Action.BETRAY): PAYOFF_MATRICES[i][1][0],\n (Action.BETRAY, Action.COOPERATE): PAYOFF_MATRICES[i][0][1],\n (Action.BETRAY, Action.BETRAY): PAYOFF_MATRICES[i][1][1]\n },\n }","sub_path":"games/generalized_prisoners_dilemma/game_environment.py","file_name":"game_environment.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"61609343","text":"import requests\n\n\ndef telegram_bot_sendtext(bot_message):\n bot_token = '928618400:AAGjlDTP4aJ9dX2-zh2Fx2Nfege2UHg-SPE'\n bot_chatID = '-1001454981688'\n send_text = 'https://api.telegram.org/bot' + bot_token + '/sendMessage?chat_id=' + bot_chatID + '&parse_mode=Markdown&text=' + bot_message\n\n response = requests.get(send_text)\n\n return response.json()\n\n\ntest = telegram_bot_sendtext(\"Match: {}, Winner:{}, Quantity:{}\".format('Nadal vs Djokovic', 'Nadal', '5%'))\nprint(test)","sub_path":"telegram_client.py","file_name":"telegram_client.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"191456668","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 24 09:37:08 2019\n\n@author: wangpeiyu\n\"\"\"\n\nimport csv\nimport re\nimport time\nimport json\nimport pickle\nimport warnings\nimport random\nimport numpy as np\nnp.set_printoptions(threshold=1)\n\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.stem.porter import PorterStemmer\np_stemmer = PorterStemmer()\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.preprocessing import Normalizer\n\nimport pandas as pd\n#vocab_nips=pd.read_csv(\"vocab.nips.txt\",header=None)\n#vocab_enron=pd.read_csv(\"vocab.enron.txt\",header=None)\nvocab_kos=pd.read_csv(\"vocab.kos.txt\",header=None)\n\n#vocab_nips.columns=['word']\n#vocab_enron.columns=['word']\nvocab_kos.columns=['word']\n#print(vocab_nips)\n#docword_nips=pd.read_csv(\"docword.nips.txt\",delim_whitespace=True, header=None)\n#docword_nips.columns=['doc','word_index','word_frequency']\n#docword_enron=pd.read_csv(\"docword.enron.txt\",delim_whitespace=True, header=None)\n#docword_enron.columns=['doc','word_index','word_frequency']\ndocword_kos=pd.read_csv(\"docword.kos.txt\",delim_whitespace=True, header=None)\ndocword_kos.columns=['doc','word_index','word_frequency']\n#print(docword_nips)\n\nd = {'doc': 'first', 'word_frequency': 'sum'}\ndf_new = docword_kos.groupby('word_index', as_index=False).aggregate(d).reindex(columns=docword_kos.columns)\n\ndf=pd.merge(vocab_kos, df_new, left_index=True, right_index=True)\n\n\n\ndoc=[]\ndf['word'] = df['word'].astype(str)\nfor i in range(0,df.shape[0]):\n a=df.at[i,'word_frequency']\n for j in range(0,a):\n bodytext = df.at[i,'word']\n doc.append(bodytext)\ndoc1=[]\nfor i in range(0,df.shape[0]):\n doc1.append(df.at[i,'word'])\n\n \n\"\"\"word_doc_nips=docword_nips.iloc[:,0]\nprint(word_doc_nips)\nword_index=docword_nips.iloc[:,1]-1\"\"\"\n\n\"\"\"kos_word_list=[]\n#print(word_index)\nfor j in range(3431):\n word_list=[]\n for i in range(len(docword_kos)):\n if docword_kos.iloc[i,0]==j:\n word_index=docword_kos.iloc[i,1]-1\n for num in range(docword_kos.iloc[i,2]):\n word_list.extend(vocab_kos.iloc[word_index,0])\n print(j)\n kos_word_list.append(word_list)\nprint(kos_word_list)\"\"\"\n\n#clean_train_reviews=filter(None, clean_train_reviews)\nmaxfeature_num=300\ncomponents_number=101\ncomponent_list = []\nfor i in range(1,components_number+1):\n component_name = \"component\" + str(i) \n component_list.append(component_name)\n\nvectorizer = CountVectorizer(max_df=0.99,min_df=3,ngram_range=(1,1),max_features=maxfeature_num)\n#vectorizer = TfidfVectorizer(max_df=0.99,min_df=3,ngram_range=(1,1),max_features=maxfeature_num)\ntrain_data_features = vectorizer.fit_transform(doc)\npd_head10 = pd.DataFrame(train_data_features.toarray(),index=doc,columns=vectorizer.get_feature_names())\nlsa = TruncatedSVD(n_components=components_number, n_iter=1)\ntrainset_X_LSA = lsa.fit_transform(train_data_features)\nnormalizer = Normalizer(copy=False)\ntrainset_X_LSA = normalizer.fit_transform(trainset_X_LSA)\n\n# attch the LSA result with each documents. \npd_component_and_word = pd.DataFrame(lsa.components_,index = component_list,columns = vectorizer.get_feature_names())\npd_component_and_document = pd.DataFrame(trainset_X_LSA, index = doc, columns = component_list)\npd_component_and_word_T = pd.DataFrame(lsa.components_.T,index = vectorizer.get_feature_names(),columns = component_list)\npd_component_and_document_T = pd.DataFrame(trainset_X_LSA.T, index = component_list, columns = doc)\n# Save each pandas dataframe into pickle format\npickleoutput_1 = open('component_and_word.pkl', 'wb')\npickleoutput_2 = open('component_and_document.pkl', 'wb')\npickleoutput_3 = open('component_and_word_T.pkl', 'wb')\npickleoutput_4 = open('component_and_document_T.pkl', 'wb')\npickle.dump(pd_component_and_word, pickleoutput_1)\npickle.dump(pd_component_and_document, pickleoutput_2)\npickle.dump(pd_component_and_word_T, pickleoutput_3)\npickle.dump(pd_component_and_document_T, pickleoutput_4)\n\nprint (\"LSA space is created with \"+ str(components_number)+\" components, \" + \"each component contains \" +str(maxfeature_num)+ \" features(token).\")\n\npickleoutput_1.close()\npickleoutput_2.close()\npickleoutput_3.close()\npickleoutput_4.close()\ncentroid=np.mean(pd_component_and_document)\nprint(centroid)","sub_path":"Assignments/Applied Machine Learning for Analytics/HW5/Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":4569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"614681318","text":"# -*- coding: utf-8 -*-\n\nimport os, operator\nimport re\n\ntrainDir = \"./train/\"\ntestDir = \"./test/\"\n\nclass Counter(dict):\n\tdef __missing__(self, key):\n\t\treturn 0\n\nmData = {}\nfData = {}\nmTokens = Counter()\nfTokens = Counter()\np = re.compile(\"[~.,'\\\":;!@#$%^&*()_\\-+=?/|\\u201C\\u201D\\u2018\\u2019]\")\n\ndef openData():\n\tallData = os.listdir(trainDir)\n\tallData.sort()\n\tfor file in allData:\n\t\tif file.startswith(\"M\"):\n\t\t\tmData[file] = fileToEntry(trainDir + file)\n\t\telif file.startswith(\"F\"):\n\t\t\tfData[file] = fileToEntry(trainDir + file)\n\t\telse:\n\t\t\tprint(\"Loading of data failed\")\n\ndef fileToEntry(fileName):\n\ttweetFile = open(fileName, errors='replace')\n\treturn tweetFile.read().encode('ascii','ignore').decode('ascii')\n\n# def dictToLines(dic):\n\t# lines = []\n\t# for key in dic:\n\t\t# lines.append(dic[key].splitlines())\n\t# return lines\n\t\ndef lineToTokens(line):\n\ttokenArray = []\n\ttokens = line.split()\n\tfor token in tokens:\n\t\ttoken = normalize(token)\n\t\tif token != '':\n\t\t\ttokenArray.append(token)\n\treturn tokenArray\n\t\ndef normalize(str):\n\tnormWord = str.lower().replace(\"usermention\", \"\").replace(\"rt\", \"\").replace(\"RT\",\"\")\n\tnormWord = p.sub(\"\", normWord)\n\treturn normWord\n\ndef tokensToNgrams(tokens, n):\n\tngrams = []\n\tfor i in range(n-1, len(tokens)):\n\t\tngram = \"\"\n\t\tfor j in range(0, n):\n\t\t\tif j == 0:\n\t\t\t\tngram = tokens[i-j] + ngram\n\t\t\telse:\n\t\t\t\tngram = tokens[i-j] + \" \" + ngram\n\t\tngrams.append(ngram)\n\treturn ngrams\n\ndef tally(ngrams):\n\tc = Counter()\n\tfor ngram in ngrams:\n\t\tc[ngram] += 1\n\treturn c\n\ndef mergeCounters(c1, c2):\n for key in c2.keys():\n c1[key] += c2[key]\n\ndef sortCount(dict):\n\tdict_sorted = sorted(dict.items(), key=operator.itemgetter(1), reverse=True)\n\treturn dict_sorted\n\t\nopenData()\n\nmaleNgrams = []\n\nfor key in mData.keys():\n tokens = lineToTokens(mData[key])\n ngrams = tokensToNgrams(tokens,4)\n for ngram in ngrams:\n maleNgrams.append(ngram)\n\nNgramsTally = tally(maleNgrams)\n\nNgramsTally = sortCount(NgramsTally)\n\nprint(len(NgramsTally))\n\n#~ def dictionary(lines):\n\t#~ word = lineToTokens(lines)\n\t#~ for i in range(1,4):\n\t\t#~ ngrams = tokenToNgram(word, i)\n\t\t#~ sorted_ngrams = sortCount(tally(ngrams))\n\t\t#~ for j in range(0,11):\n\t\t\t#~ print(sorted_ngrams[j])\n\t\t\n\t\t#~ for j in range(1,5):\n\t\t\t#~ count = 0;\n\t\t\t#~ for word in sorted_ngrams:\n\t\t\t\t#~ if word[1] == j:\n\t\t\t\t\t#~ count += 1\n\t\t\t#~ print(str(count) + \" \" + str(i) + \"-grams occur \" + str(j) + \" time(s)\")\n\t\t#~ print( \"Unique n-grams with n=\" + str(i) + \": \" + str(len(sorted_ngrams)))\n\t\t#~ print (\"\")\n\n\n#files = fData.values()\n#print(files)\n\n#print(dest)\n\n#~ dest = dict(list(fData.items()) + list(mData.items()))\n\n#~ for key in dest.items():\n\t#~ dictionary(dest[key].values())\n\n\n\n\n\n\n\n\n\n\n","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"210043692","text":"import pickle\nimport db\ndbFile = 'db.pickle'\n\ndef writeDB():\n f = open(dbFile, \"wb\")\n dba = db.getDb()\n print(dba)\n pickle.dump(dba, f)\n f.close()\n\n\ndef readDB():\n try:\n with open(dbFile, 'rb') as f:\n data = pickle.load(f)\n db.setDb(data)\n except EOFError:\n writeDB()\n readDB()\n f.close()","sub_path":"dz-3/readWrite.py","file_name":"readWrite.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"203096840","text":"import time\nimport os\nfrom os.path import abspath\nfrom lru_cache import *\n\n# Original running complexity was n**2\n\nstart_time = time.time()\nfile_directory = os.path.dirname(abspath(__file__))\nf = open(os.path.join(file_directory, 'names_1.txt'), 'r')\nnames_1 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nf = open(os.path.join(file_directory, 'names_2.txt'), 'r')\nnames_2 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nduplicates = []\ncache_1 = lru_cache.LRUCache(len(names_1))\ncache_2 = lru_cache.LRUCache(len(names_2))\n\nfor index in range(len(names_1)):\n cache_1.set(names_1[index], True)\n cache_2.set(names_2[index], True)\n\n if cache_1.get(names_2[index]):\n duplicates.append(names_2[index])\n if cache_2.get(names_1[index]):\n duplicates.append(names_1[index])\n\nend_time = time.time()\nprint(f\"{len(duplicates)} duplicates:\\n\\n{', '.join(sorted(duplicates))}\\n\\n\")\nprint(f\"runtime: {end_time - start_time} seconds\")\n\n\n\"\"\"\n## Stretch optimization for low RAM device\nstart_time = time.time()\n\nf = open('names_1.txt', 'r')\nnames_1 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nf = open('names_2.txt', 'r')\nnames_2 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nduplicates = []\nnames = []\nfor name_1 in names_1:\n names.append(name_1)\n\nfor name_2 in names_2:\n if name_2 in names:\n duplicates.append(name_2)\n\nend_time = time.time()\nprint(f\"{len(duplicates)} duplicates:\\n\\n{', '.join(duplicates)}\\n\\n\")\nprint(f\"runtime: {end_time - start_time} seconds\")\n \"\"\"\n","sub_path":"names/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"80364241","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn import svm\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import RepeatedStratifiedKFold, train_test_split\nfrom sklearn.preprocessing import LabelEncoder\n\n\nclass Combination:\n def __init__(self, kernel, measure, degree, coef, gamma, c):\n self.kernel = kernel\n self.measure = measure\n self.degree = degree\n self.coef = coef\n self.gamma = gamma\n self.c = c\n\n def __str__(self):\n return self.kernel + ' ' + str(self.measure) + \" degree \" + str(self.degree) + \\\n \" coef \" + str(self.coef) + \" gamma \" + str(self.gamma) + \" c \" + str(self.c)\n\n\ndef read_data(dataset, norm):\n X = dataset.drop('class', axis=1).to_numpy()\n y = dataset['class'].to_numpy()\n if norm:\n min_max_scaler = preprocessing.MinMaxScaler()\n X = min_max_scaler.fit_transform(X)\n return X, y\n\n\ndef startified_folds():\n rskf = RepeatedStratifiedKFold(n_splits=5, n_repeats=2, random_state=36851234)\n global c, clf, y_pred\n C_2d_range = [1e-1, 1, 1e1]\n for c in C_2d_range:\n clf = svm.SVC(C=c, kernel='linear')\n for train, test in rskf.split(X, y):\n clf.fit(X[train], y[train])\n y_pred = clf.predict(X_test)\n combinations.append(Combination('linear', f1_score(y_test, y_pred, average='weighted'), 0, 0, 0, c))\n for c in C_2d_range:\n for degree in [1, 2, 3, 4, 5, 6]:\n for coef in [0.0, 0.5, 1.5, 2.0, 2.5]:\n clf = svm.SVC(C=c, kernel='poly', degree=degree, coef0=coef)\n for train, test in rskf.split(X, y):\n clf.fit(X[train], y[train])\n y_pred = clf.predict(X[test])\n combinations.append(\n Combination('polynomial', f1_score(y[test], y_pred, average='weighted'), degree, coef, 0, c))\n for c in C_2d_range:\n for gamma in [1e-1, 1, 1e1]:\n clf = svm.SVC(C=c, kernel='rbf', gamma=gamma)\n for train, test in rskf.split(X, y):\n clf.fit(X[train], y[train])\n y_pred = clf.predict(X[test])\n combinations.append(Combination('rbf', f1_score(y[test], y_pred, average='weighted'), 0, 0, gamma, c))\n for c in C_2d_range:\n for coef in [0.0, 0.5, 1.5, 2.0, 2.5]:\n clf = svm.SVC(C=c, kernel='sigmoid', coef0=coef)\n for train, test in rskf.split(X, y):\n clf.fit(X[train], y[train])\n y_pred = clf.predict(X[test])\n combinations.append(\n Combination('sigmoid', f1_score(y[test], y_pred, average='weighted'), 0, coef, 0, c))\n combinations.sort(key=lambda x: x.measure, reverse=True)\n for combination in combinations:\n print(combination)\n\n\ndef compute_params():\n global c, clf, y_pred\n C_2d_range = [1e-1, 1, 1e1]\n for c in C_2d_range:\n clf = svm.SVC(C=c, kernel='linear')\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n combinations.append(Combination('linear', f1_score(y_test, y_pred, average='weighted'), 0, 0, 0, c))\n for c in C_2d_range:\n for degree in [1, 2, 3, 4, 5, 6]:\n for coef in [0.0, 0.5, 1.5, 2.0, 2.5]:\n clf = svm.SVC(C=c, kernel='poly', degree=degree, coef0=coef)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n combinations.append(\n Combination('polynomial', f1_score(y_test, y_pred, average='weighted'), degree, coef, 0, c))\n for c in C_2d_range:\n for gamma in [1e-1, 1, 1e1]:\n clf = svm.SVC(C=c, kernel='rbf', gamma=gamma)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n combinations.append(Combination('rbf', f1_score(y_test, y_pred, average='weighted'), 0, 0, gamma, c))\n for c in C_2d_range:\n for coef in [0.0, 0.5, 1.5, 2.0, 2.5]:\n clf = svm.SVC(C=c, kernel='sigmoid', coef0=coef)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n combinations.append(Combination('sigmoid', f1_score(y_test, y_pred, average='weighted'), 0, coef, 0, c))\n combinations.sort(key=lambda x: x.measure, reverse=True)\n for combination in combinations:\n print(combination)\n\n\ndef geyser_clf():\n global clf\n clf = svm.SVC(kernel='linear', C=10)\n clf.fit(X, y)\n classifiers.append((clf, clf.kernel))\n clf = svm.SVC(kernel='poly', degree=2, coef0=0.5, C=0.1)\n clf.fit(X, y)\n classifiers.append((clf, clf.kernel))\n clf = svm.SVC(kernel='rbf', gamma=10, C=0.1)\n clf.fit(X, y)\n classifiers.append((clf, clf.kernel))\n clf = svm.SVC(kernel='sigmoid', coef0=0.0, C=0.1)\n clf.fit(X, y)\n classifiers.append((clf, clf.kernel))\n\n\ndef chips_clf():\n global clf\n clf = svm.SVC(kernel='linear', C=10.0)\n clf.fit(X, y)\n classifiers.append((clf, clf.kernel))\n clf = svm.SVC(kernel='poly', degree=2, coef0=0.5, C=1.0)\n clf.fit(X, y)\n classifiers.append((clf, clf.kernel))\n clf = svm.SVC(kernel='rbf', gamma=1, C=10.0)\n clf.fit(X, y)\n classifiers.append((clf, clf.kernel))\n clf = svm.SVC(kernel='sigmoid', coef0=2.5, C=10.0)\n clf.fit(X, y)\n classifiers.append((clf, clf.kernel))\n\n\ndef rbf_plot(dataset):\n global clf\n plt.figure(figsize=(8, 6))\n xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))\n X_2d = X[:, :2]\n X_2d = X_2d[y > 0]\n y_2d = y[y > 0]\n y_2d -= 1\n for (k, (clf, kernel)) in enumerate(classifiers):\n # evaluate decision function in a grid\n Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n\n # visualize decision function for these parameters\n plt.title(\"dataset %s kernel=%s\" % (dataset, kernel),\n size='medium')\n\n # visualize parameter's effect on decision function\n plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)\n plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r,\n edgecolors='k')\n plt.xticks(())\n plt.yticks(())\n plt.axis('tight')\n plt.show()\n\n\nif __name__ == '__main__':\n combinations = []\n classifiers = []\n chips = pd.read_csv(\"chips.csv\")\n X, y = read_data(chips, False)\n # geyser = pd.read_csv(\"geyser.csv\")\n # X, y = read_data(geyser, True)\n le = LabelEncoder()\n y = le.fit_transform(y)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, stratify=y)\n compute_params()\n # startified_folds()\n # geyser_clf()\n chips_clf()\n # rbf_plot('geyser')\n for (k, (clf, kernel)) in enumerate(classifiers):\n plt.figure()\n plt.clf()\n plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired,\n edgecolor='k', s=20)\n\n plt.axis('tight')\n x_min = X[:, 0].min()\n x_max = X[:, 0].max()\n y_min = X[:, 1].min()\n y_max = X[:, 1].max()\n\n XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]\n Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(XX.shape)\n plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)\n plt.contour(XX, YY, Z, colors=['k', 'k', 'k'],\n linestyles=['--', '-', '--'], levels=[-.5, 0, .5])\n\n plt.title(kernel + ' chips')\n plt.show()\n","sub_path":"lab03/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":7536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"214722360","text":"from PIL import Image\nimport torch.utils.data as data\nfrom os import listdir\nfrom os.path import join\nimport random\nimport numpy as np\nimport torch\n\n\ndef add_noise(x, noise='.'):\n if noise is not '.':\n noise_type = noise[0]\n noise_value = int(noise[1:])\n if noise_type == 'G':\n noises = np.random.normal(scale=noise_value, size=x.shape)\n noises = noises.round()\n elif noise_type == 'S':\n noises = np.random.poisson(x * noise_value) / noise_value\n noises = noises - noises.mean(axis=0).mean(axis=0)\n\n x_noise = x.astype(np.int16) + noises.astype(np.int16)\n x_noise = x_noise.clip(0, 255).astype(np.uint8)\n return x_noise\n else:\n return x\n\n\ndef is_image_file(filename):\n filename_lower = filename.lower()\n return any(filename_lower.endswith(extension) for extension in ['.png', '.jpg', '.jpeg', '.tif'])\n\n\ndef get_patch(*args, patch_size):\n if patch_size == 0:\n return args\n ih, iw = args[0].shape[:2]\n ix = random.randrange(0, iw - patch_size + 1)\n iy = random.randrange(0, ih - patch_size + 1)\n\n ret = [*[a[iy:iy + patch_size, ix:ix + patch_size, :] for a in args]]\n\n return ret\n\n\ndef augment(*args, hflip=True, rot=False):\n hflip = hflip and random.random() < 0.5\n vflip = rot and random.random() < 0.5\n rot90 = rot and random.random() < 0.5\n\n def _augment(img):\n if hflip: img = img[:, ::-1, :]\n if vflip: img = img[::-1, :, :]\n if rot90: img = img.transpose(1, 0, 2)\n\n return img\n\n return [_augment(a) for a in args]\n\n\ndef np2Tensor(*args, rgb_range=1.):\n def _np2Tensor(img):\n np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1)))\n tensor = torch.from_numpy(np_transpose).float()\n tensor.mul_(rgb_range / 255)\n\n return tensor\n\n return [_np2Tensor(a) for a in args]\n\n\nclass RAW2RGBData(data.Dataset):\n def __init__(self, dataset_dir, patch_size=0, test=False):\n super(RAW2RGBData, self).__init__()\n self.patch_size = patch_size\n self.test = test\n data_dir = join(dataset_dir, \"RAW\")\n label_dir = join(dataset_dir, \"RGB\")\n\n data_filenames = [join(data_dir, x) for x in listdir(data_dir) if is_image_file(x)]\n label_filenames = [join(label_dir, x) for x in listdir(label_dir) if is_image_file(x)]\n\n label_filenames.sort()\n data_filenames.sort()\n\n # data_filenames = data_filenames[:1200]\n # label_filenames = label_filenames[:1200]\n\n data_filenames = data_filenames[::200] if test else list(set(data_filenames) - set(data_filenames[::200]))\n label_filenames = label_filenames[::200] if test else list(set(label_filenames) - set(label_filenames[::200]))\n label_filenames.sort()\n data_filenames.sort()\n\n self.data_filenames = data_filenames\n self.label_filenames = label_filenames\n\n def __getitem__(self, index):\n data = np.asarray(Image.open(self.data_filenames[index]))\n add_noise(data, 'G1')\n label = np.asarray(Image.open(self.label_filenames[index]))\n\n data, label = get_patch(data, label, patch_size=self.patch_size)\n if not self.test:\n data, label = augment(data, label)\n data, label = np2Tensor(data, label)\n\n return data, label\n\n def __len__(self):\n return len(self.data_filenames)\n","sub_path":"data_noise.py","file_name":"data_noise.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"8505349","text":"import pygame\r\nimport random\r\nimport math\r\nfrom pygame import mixer\r\n\r\npygame.init()\r\n\r\n# Window Display\r\nwindow = pygame.display.set_mode((800, 600))\r\npygame.display.set_caption(\"Space Invaders\")\r\nicon = pygame.image.load(\"icon.png\")\r\npygame.display.set_icon(icon)\r\n\r\nbackground = pygame.image.load(\"Background.jpg\")\r\n\r\nmixer.music.load(\"background.wav\")\r\nmixer.music.play(-1)\r\n# Player\r\n\r\nplayership = pygame.image.load(\"space ship.png\")\r\nplayerX = 370\r\nplayerY = 480\r\nplayerX_change = 0\r\nplayerY_change = 0\r\n\r\n# Aliens\r\nalien_enemy = []\r\nalienX = []\r\nalienY = []\r\nalienX_change = []\r\nalienY_change = []\r\nnum_of_alien = 6\r\n\r\nfor i in range(num_of_alien):\r\n alien_enemy.append(pygame.image.load(\"alien.png\"))\r\n alienX.append(random.randint(1, 735))\r\n alienY.append(random.randint(20, 50))\r\n alienX_change.append(0.3)\r\n alienY_change.append(20)\r\n\r\n# Bullet\r\nbullet_move = pygame.image.load(\"bullet.png\")\r\nbulletX = 0\r\nbulletY = 445\r\nbulletY_change = 2\r\nbullet_state = \"ready\"\r\n\r\n# Score\r\nscore_value = 0\r\nfont = pygame.font.Font(\"freesansbold.ttf\", 32)\r\nfontX = 10\r\nfontY = 10\r\n\r\nfailed_font = pygame.font.Font(\"freesansbold.ttf\", 64)\r\n\r\n\r\n# Function\r\n\r\ndef show_score(x, y):\r\n score = font.render(\"Score : \" + str(score_value), True, (255, 255, 255))\r\n window.blit(score, (x, y))\r\n\r\n\r\ndef show_failed():\r\n failed = failed_font.render(\"GAME OVER\", True, (255, 255, 255))\r\n window.blit(failed, (200, 250))\r\n\r\n\r\ndef player(x, y):\r\n window.blit(playership, (x, y))\r\n\r\n\r\ndef alien(x, y, i):\r\n window.blit(alien_enemy[i], (x, y))\r\n\r\n\r\ndef bullet(x, y):\r\n global bullet_state\r\n bullet_state = \"fire\"\r\n window.blit(bullet_move, (x + 16, y))\r\n\r\n\r\ndef iscollide(alienX, alienY, bulletX, bulletY):\r\n distance = math.sqrt((math.pow(alienX - bulletX, 2)) + (math.pow(alienY - bulletY, 2)))\r\n if distance < 27:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n# Loop\r\nrunning = True\r\nwhile running:\r\n window.fill((0, 0, 0))\r\n window.blit(background, (0, 0))\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_a:\r\n playerX_change = -0.8\r\n if event.key == pygame.K_d:\r\n playerX_change = +0.8\r\n if event.key == pygame.K_w:\r\n playerY_change = -0\r\n if event.key == pygame.K_s:\r\n playerY_change = +0\r\n if event.key == pygame.K_SPACE:\r\n if bullet_state is \"ready\":\r\n bullet_sound = mixer.Sound(\"laser.wav\")\r\n bullet_sound.play()\r\n bulletX = playerX\r\n bullet(playerX, bulletY)\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_a or event.key == pygame.K_d:\r\n playerX_change = 0\r\n if event.key == pygame.K_w or event.key == pygame.K_s:\r\n playerY_change = 0\r\n if playerX >= 736:\r\n playerX = 736\r\n elif playerX <= 0:\r\n playerX = 0\r\n\r\n if bulletY <= 0:\r\n bulletY = 445\r\n bullet_state = \"ready\"\r\n\r\n for i in range(num_of_alien):\r\n if alienY[i] > 200:\r\n for j in range(num_of_alien):\r\n alienY[j] = 2000\r\n show_failed()\r\n break\r\n alienX[i] += alienX_change[i]\r\n\r\n if alienX[i] >= 736:\r\n alienX[i] = 736\r\n alienY[i] += alienY_change[i]\r\n alienX_change[i] = -0.3\r\n elif alienX[i] <= 0:\r\n alienX[i] = 0\r\n alienY[i] += alienY_change[i]\r\n alienX_change[i] = 0.3\r\n\r\n collide = iscollide(alienX[i], alienY[i], bulletX, bulletY)\r\n if collide:\r\n death_sound = mixer.Sound(\"explosion.wav\")\r\n death_sound.play()\r\n bulletY = 445\r\n bullet_state = \"ready\"\r\n score_value += 1\r\n alienX[i] = random.randint(1, 735)\r\n alienY[i] = random.randint(20, 50)\r\n alien(alienX[i], alienY[i], i)\r\n\r\n if bullet_state is \"fire\":\r\n bullet(bulletX, bulletY)\r\n bulletY -= bulletY_change\r\n\r\n playerY += playerY_change\r\n playerX += playerX_change\r\n show_score(fontX, fontY)\r\n player(playerX, playerY)\r\n pygame.display.update()\r\n","sub_path":"Second.py","file_name":"Second.py","file_ext":"py","file_size_in_byte":4340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"585799329","text":"#!/usr/bin/env python3\n\n# author: Matteo Caliandro\n# e-mail: mcaliandro92@gmail.com\n# github: mcaliandro\n# \n# project: qemustarter\n# version: 0.2.1\n# date: 18-03-2020\n\nfrom argparse import ArgumentParser\nfrom jsonschema import validate, ValidationError\nfrom pathlib import Path\nfrom subprocess import Popen, DEVNULL\nfrom sys import exit\nfrom yaml import load, Loader, YAMLError\n\n\n# Thanks to Nadia Alramli for the hint on converting a Python dictionary to an object\n# source: https://stackoverflow.com/questions/1305532/convert-nested-python-dict-to-object/1305682#1305682\nclass Object():\n def __init__(self, source):\n for key, value in source.items():\n if isinstance(value, (list, tuple)):\n setattr(self, key, [Object(item) if isinstance(item, dict) else item for item in value])\n else:\n setattr(self, key, Object(value) if isinstance(value, dict) else value)\n\n\nclass Error():\n @staticmethod\n def print_error(msg):\n print(msg)\n exit()\n @staticmethod\n def config_failed():\n Error.print_error(\"Error: virtual machine cannot be configured.\")\n @staticmethod\n def invalid_action(value):\n Error.print_error(\"Error: invalid action {}\".format(value))\n @staticmethod\n def no_disk(value):\n Error.print_error(\"Error: Disk image not found {}\".format(value))\n @staticmethod\n def no_iso(value):\n Error.print_error(\"Error: ISO file not found {}\".format(value))\n\n\nclass QemuBase():\n def __init__(self, base):\n self.__command = base\n def __call__(self):\n proc = Popen(self.__command, stdout=DEVNULL)\n proc.wait()\n def add_option(self, opt, value=None):\n if not value:\n self.__command.append(opt)\n else:\n self.__command.extend([opt, value])\n def props():\n pass\n\n\nclass QemuImage(QemuBase):\n def __init__(self):\n super().__init__([\"qemu-img\", \"create\"])\n def props(self, disk):\n self.add_option(\"-f\", disk.type)\n self.add_option(opt=disk.image) \n self.add_option(opt=\"{}m\".format(disk.size))\n\n\nclass QemuMachine(QemuBase):\n def __init__(self):\n super().__init__([\"qemu-system-x86_64\", \"-enable-kvm\"])\n def props(self, name=None, cores=None, ram=None, cdrom=None, disk=None, network=False, noreboot=False):\n if name:\n self.add_option(\"-name\", name)\n if cores:\n self.add_option(\"-smp\", str(cores))\n if ram:\n self.add_option(\"-m\", \"{}m\".format(ram))\n if cdrom:\n self.add_option(\"-cdrom\", cdrom)\n if disk:\n self.add_option(\"-{}\".format(disk.device), disk.image)\n if network:\n self.add_option(\"-net\", \"nic\")\n self.add_option(\"-net\", \"user\")\n if noreboot:\n self.add_option(opt=\"-no-reboot\")\n\n\ndef create_vm(disk):\n create_img = QemuImage()\n create_img.props(disk=disk)\n create_img()\n\n\ndef main(vmconfig):\n if vmconfig.action not in [\"boot\", \"install\", \"live\"]:\n Error.invalid_action(vmconfig.action)\n launch_vm = QemuMachine()\n launch_vm.props(name=vmconfig.name, cores=vmconfig.cores, ram=vmconfig.ram, network=True)\n if vmconfig.action == \"boot\":\n if not Path(vmconfig.disk.image).exists():\n Error.no_disk(vmconfig.disk.image)\n launch_vm.props(disk=vmconfig.disk)\n launch_vm()\n else:\n if not Path(vmconfig.iso).exists():\n Error.no_iso(vmconfig.iso)\n if vmconfig.action == \"install\":\n if not Path(vmconfig.disk.image).exists():\n create_vm(vmconfig.disk)\n launch_vm.props(cdrom=vmconfig.iso, disk=vmconfig.disk, noreboot=True)\n launch_vm()\n if vmconfig.action == \"live\":\n launch_vm.props(cdrom=vmconfig.iso, noreboot=True)\n launch_vm()\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"--config\", \"-c\", default=\"config.yml\", type=str)\n parser.add_argument(\"--schema\", \"-s\", default=\"schema.yml\", type=str)\n args = parser.parse_args()\n config = None\n try:\n with open(args.config, \"r\") as config_yaml:\n config = load(config_yaml, Loader=Loader)\n config_yaml.close()\n with open(args.schema, \"r\") as schema_yaml:\n schema = load(schema_yaml, Loader=Loader)\n schema_yaml.close()\n validate(instance=config, schema=schema)\n except IOError as io_err:\n raise io_err\n except YAMLError as yaml_err: \n raise yaml_err\n except ValidationError as val_err: \n raise val_err\n if not config:\n Error.config_failed()\n main(Object(config))\n","sub_path":"qemustarter.py","file_name":"qemustarter.py","file_ext":"py","file_size_in_byte":4706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"67100415","text":"\nimport cleanup\nfrom topo5 import topo5\nfrom mininet.net import Mininet\nfrom mininet.node import Controller\nfrom mininet.cli import CLI\nfrom mininet.log import setLogLevel, info\nfrom packet import *\n\nfrom printer import *\n\n\ndef dijkstra(graph, src, dest, visited=[], distances={}, predecessors={}):\n \"\"\" calculates a shortest path tree routed in src\n \"\"\"\n # a few sanity checks\n if src not in graph:\n raise TypeError('The root of the shortest path tree cannot be found')\n if dest not in graph:\n raise TypeError('The target of the shortest path cannot be found')\n # ending condition\n if src == dest:\n # We build the shortest path and display it\n path = []\n pred = dest\n while pred != None:\n path.append(pred)\n pred = predecessors.get(pred, None)\n print('shortest path: ' + str(path) + \" cost= \" + str(distances[dest]))\n global path2\n path2 = path\n\n else:\n # if it is the initial run, initializes the cost\n if not visited:\n distances[src] = 0\n # visit the neighbors\n for neighbor in graph[src]:\n if neighbor not in visited:\n new_distance = distances[src] + graph[src][neighbor]\n print(new_distance)\n if new_distance <= distances.get(neighbor, float('inf')):\n distances[neighbor] = new_distance\n predecessors[neighbor] = src\n # mark as visited\n visited.append(src)\n # now that all neighbors have been visited: recurse\n # select the non visited node with lowest distance 'x'\n # run Dijskstra with src='x'\n unvisited = {}\n for k in graph:\n if k not in visited:\n unvisited[k] = distances.get(k, float('inf'))\n x = min(unvisited, key=unvisited.get)\n dijkstra(graph, x, dest, visited, distances, predecessors)\n\n\n\n\n# def dijkstraHelperFunction(topo, src, dst):\n# ''' dijkstra's helper function:\n# makes link dictionary\n# calls dijkstras on it\n# '''\n#\n# topoG = topo.g\n#\n# graphDic = {} # empty dictionary\n# for node in topoG.nodes(): # make switch dictionary without links\n# graphDic[node] = {}\n# for edge in topoG.edges(): # adds each link to each switch\n# graphDic[edge[0]][edge[1]] = 1\n# graphDic[edge[1]][edge[0]] = 1\n#\n# path = dijkstra(graphDic, src, dst, visited=[], distances={}, predecessors={})\n#\n# dpidPath = []\n# for switch in path:\n# dpidPath.append(topo.id_gen(name=switch).dpid)\n#\n# return path\n\n\n\n\n#Edited:\n\n\ndef dijkstraTable(topo, src):\n # Routing table creater for each node in network\n\n # open file for saving paths\n\n filename = \"src.txt\"\n f = open(filename, 'w+')\n\n topoG = topo.g\n\n graphDic = {} # empty dictionary\n for node in topoG.nodes(): # make switch dictionary without links\n graphDic[node] = {}\n for edge in topoG.edges(): # adds each link to each switch\n graphDic[edge[0]][edge[1]] = 1\n graphDic[edge[1]][edge[0]] = 1\n\n # get paths to all other node from src\n\n for node in graphDic:\n\n path = dijkstra(graphDic, src, node, visited=[], distances={}, predecessors={})\n\n dpidPath = []\n\n # add switch names\n\n for switch in path:\n dpidPath.append(topo.id_gen(name=switch).dpid)\n\n # write paths to file for the src\n\n route = \"%s %s \\n\" % node % path\n f.write(route)\n\n","sub_path":"HW_4_6/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"552985392","text":"import urllib.request, urllib.parse, json\n\n# Just playing with openstack api\n# This is a simple script for creating 10 VMs\n# You need to edit the dictionaries token_data and data.\n\n\ndef api(url, headers, data, token=True):\n\n data = json.dumps(data).encode(\"utf-8\")\n req = urllib.request.Request(url, data, headers)\n\n res = urllib.request.urlopen(req)\n\n json_res = json.loads(res.read().decode(\"utf-8\"))\n\n if token:\n return (json_res['access']['token']['id'], \n json_res['access']['token']['tenant']['id'])\n\n return json_res\n\n\n\ndef main():\n url = \"http://10.0.2.61:5000/v2.0/tokens\"\n\n headers = {'Content-Type': \"application/json\", \n 'Accept': \"application/json\"}\n\n token_data = {\"auth\": {\"passwordCredentials\": \n {\"username\": \"admin\", \"password\": \"mypass\"}, \"tenantName\": \"admin\"} \n }\n\n token_tenant = api(url, headers, token_data)\n\n url = \"http://10.0.2.61:8774/v2/%s/servers\" % token_tenant[1]\n\n headers[\"X-Auth-Token\"] = token_tenant[0]\n\n data = {\n \"server\": {\n \"name\": \"server-test\",\n \"imageRef\": \"63f277fb-22c8-41b9-96f0-41c0a065fdb4\",\n \"flavorRef\": \"1\",\n \"max_count\": 10,\n \"min_count\": 1,\n \"networks\": [\n {\n \"uuid\": \"d7fb9de1-8139-4f60-a5c0-f4486fc93899\"\n }\n ],\n \"security_groups\": [\n {\n \"name\": \"default\"\n },\n ]\n }\n }\n\n print(api(url, headers, data, token=False))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"create_10_vms.py","file_name":"create_10_vms.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"481179699","text":"import MySQLdb as mysqldb\n\n\nclass DBManager(object):\n def __init__(self):\n self.db = mysqldb.connect(\"localhost\", \"root\", \"123456\", \"test\", charset='utf8')\n self.cursor = self.db.cursor()\n\n def get_version(self):\n\n # 使用execute方法执行SQL语句\n self.cursor.execute(\"SELECT VERSION()\")\n\n # 使用 fetchone() 方法获取一条数据\n data = self.cursor.fetchone()\n\n print(\"Database version : %s \" % data)\n\n def insert_data(self):\n insert_sql = \"INSERT INTO douban_commits (id, commits_text, commits_score, commits_use_count,\" \\\n \" commits_user_name, commits_date) VALUES (%s,%s,%s,%s,%s, %s)\" % \\\n (\"'sfs'\", \"'sdfs'\", \"'sdsdf'\", \"'sdsdf'\", \"'fsfdf'\", \"'7987'\")\n try:\n self.cursor.execute(insert_sql)\n self.db.commit()\n except:\n print(\"insert error!\")\n\n def insert_data1(self, param1, param2, param3, param4, param5):\n insert_sql = \"INSERT INTO douban_commits (id, commits_text, \" \\\n \"commits_score, \" \\\n \"commits_use_count,\" \\\n \"commits_user_name,\" \\\n \"commits_date) VALUES \" \\\n \"('%s','%s','%s','%s','%s', '%s')\" % (str(param1),\n str(param1),\n str(param2),\n str(param3),\n str(param4),\n str(param5))\n print(insert_sql)\n try:\n self.cursor.execute(insert_sql)\n self.db.commit()\n except:\n print(\"insert error!\")\n\n def select_data(self):\n select_sql = \"select * from douban_commits order by commits_date\"\n try:\n self.cursor.execute(select_sql)\n results = self.cursor.fetchall()\n return results\n except Exception:\n print(\"select error!\")\n\n def select_data_by_date(self):\n select_sql = \"select commits_date from douban_commits order by commits_date\"\n try:\n self.cursor.execute(select_sql)\n results = self.cursor.fetchall()\n for row in results:\n print(row)\n except Exception:\n print(\"select error!\")\n\n def delete_data(self):\n delete_sql = \"delete from douban_commits where id like 'sfs' \"\n try:\n self.cursor.execute(delete_sql)\n self.db.commit()\n except Exception:\n print(\"delete error\")\n\n def update_data(self):\n update_sql = \"update douban_commits set commits_text = 'test' where id like '111s'\"\n try:\n self.cursor.execute(update_sql)\n self.db.commit()\n except:\n print(\"update error\")\n\n def __del__(self):\n # 关闭数据库连接\n self.db.close()\n\n\nif __name__ == \"__main__\":\n db_test = DBManager()\n db_test.select_data_by_date()\n","sub_path":"Crawler/DBManager.py","file_name":"DBManager.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"487507816","text":"from AccessControl import ClassSecurityInfo, ClassSecurityInfo\nfrom Acquisition import aq_base, aq_inner\nfrom Products.ATExtensions.widget.records import RecordsWidget\nfrom Products.Archetypes.Registry import registerWidget, registerPropertyType\nfrom Products.Archetypes.Widget import TypesWidget\nfrom Products.Archetypes.utils import shasattr, DisplayList\nfrom Products.CMFCore.utils import getToolByName\nfrom archetypes.referencebrowserwidget import utils\n\nclass ReferenceResultsWidget(RecordsWidget):\n _properties = TypesWidget._properties.copy()\n _properties.update({\n 'macro': \"bika_widgets/referenceresultswidget\",\n 'helper_js': (\"bika_widgets/referenceresultswidget.js\",),\n 'helper_css': (\"bika_widgets/referenceresultswidget.css\",),\n })\n\n security = ClassSecurityInfo()\n\n security.declarePublic('process_form')\n def process_form(self, instance, field, form, empty_marker = None,\n emptyReturnsMarker = False):\n \"\"\" All records have have UID specified in the TAL, so they will show up as valid\n RecordsWidget entries. We remove rows with no other values entered, here.\n \"\"\"\n value = form.get(field.getName(), empty_marker)\n if value is empty_marker:\n return empty_marker\n if emptyReturnsMarker and value == '':\n return empty_marker\n\n for idx in range(len(value) - 1, -1, -1):\n if len(value[idx].keys()) == 1: del value[idx]\n return value, {}\n\n security.declarePublic('getServicesByCategory')\n def getServicesByCategory(self):\n \"\"\"Returns a dictionary of all services that do not have dependents.\n If a service has dependents, the reference samples should cater for\n those instead.\n AnalysisCategory[service,service,...]\n \"\"\"\n categories = {}\n pc = getToolByName(self, 'portal_catalog')\n services = pc(portal_type = 'AnalysisService',\n sort_on='sortable_title')\n for service in services:\n service = service.getObject()\n calc = service.getCalculation()\n if calc and calc.getDependentServices():\n continue\n CategoryUID = service.getCategory().UID()\n CategoryTitle = service.getCategory().Title()\n key = \"%s_%s\"%(CategoryUID, CategoryTitle)\n if categories.has_key(key):\n categories[key].append({'title': service.Title(),\n 'uid' : service.UID()})\n else:\n categories[key] = [{'title': service.Title(),\n 'uid': service.UID()},]\n return categories\n\n security.declarePublic('getServicesWithResultsByCategory')\n def getServicesWithResultsByCategory(self, field):\n \"\"\" list of services which have results specified in field\n \"\"\"\n categories = {}\n pc = getToolByName(self, 'portal_catalog')\n for ref in getattr(field, field.accessor)():\n service = pc(portal_type='AnalysisService',\n sort_on='sortable_title',\n UID=ref['uid'])[0]\n service = service.getObject()\n calc = service.getCalculation()\n if calc and calc.getDependentServices():\n continue\n CategoryUID = service.getCategory().UID()\n CategoryTitle = service.getCategory().Title()\n key = \"%s_%s\"%(CategoryUID, CategoryTitle)\n ref['title'] = service.Title()\n ref['service'] = ref['service'].UID()\n if not categories.has_key(key):\n categories[key] = {}\n categories[key][service.UID()] = ref\n return categories\n\n security.declarePublic('getCategoryUID')\n def getCategoryUID(self, category_title):\n pc = getToolByName(self, 'portal_catalog')\n cats = pc(portal_type = \"AnalysisCategory\",\n sort_on='sortable_title')\n cats = [cat.UID for cat in cats if cat.Title == category_title]\n if cats:\n return cats[0]\n else:\n return \"\"\n\n security.declarePublic('dumps')\n def dumps(self, data):\n import json\n return json.dumps(data)\n\nregisterWidget(ReferenceResultsWidget,\n title = 'Reference definition results',\n description = ('Reference definition results.'),\n )\n","sub_path":"bika/lims/browser/widgets/referenceresultswidget.py","file_name":"referenceresultswidget.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"607235922","text":"\n# Copyright (c) 2016, The Bifrost Authors. All rights reserved.\n# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of The Bifrost Authors nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport ctypes\nimport unittest\nimport numpy as np\nfrom numpy import matmul as gold_matmul\nfrom bifrost.linalg import LinAlg\nimport bifrost as bf\n\nRTOL = 1e-4\nATOL = 1e-5\n\nclass TestLinAlg(unittest.TestCase):\n\tdef setUp(self):\n\t\tself.linalg = LinAlg()\n\t\tnp.random.seed(1234)\n\tdef run_test_matmul_aa_ci8_shape(self, shape):\n\t\tshape_complex = shape[:-1] + (shape[-1]*2,)\n\t\ta8 = (np.random.random(size=shape_complex)*255).astype(np.int8)\n\t\ta_gold = a8.astype(np.float32).view(np.complex64)\n\t\ta = a8.view(bf.DataType.ci8)\n\t\t# Note: np.matmul seems to be slow and inaccurate when there are batch dims\n\t\tc_gold = np.matmul(a_gold, np.swapaxes(a_gold, -1, -2).conj())\n\t\ttriu = np.triu_indices(shape[-2], 1)\n\t\tc_gold[...,triu[0],triu[1]] = 0\n\t\ta = bf.asarray(a, space='cuda')\n\t\tc = bf.zeros_like(c_gold, space='cuda')\n\t\tself.linalg.matmul(1, a, None, 0, c)\n\t\tc = c.copy('system')\n\t\tnp.testing.assert_allclose(c, c_gold, RTOL, ATOL)\n\tdef run_test_matmul_aa_dtype_shape(self, shape, dtype, axes=None):\n\t\ta = ((np.random.random(size=shape))*127).astype(dtype)\n\t\tif axes is None:\n\t\t\taxes = range(len(shape))\n\t\taa = a.transpose(axes)\n\t\tc_gold = np.matmul(aa, np.swapaxes(aa, -1, -2).conj())\n\t\ttriu = np.triu_indices(shape[axes[-2]], 1)\n\t\tc_gold[...,triu[0],triu[1]] = 0\n\t\ta = bf.asarray(a, space='cuda')\n\t\taa = a.transpose(axes)\n\t\tc = bf.zeros_like(c_gold, space='cuda')\n\t\tself.linalg.matmul(1, aa, None, 0, c)\n\t\tc = c.copy('system')\n\t\tnp.testing.assert_allclose(c, c_gold, RTOL, ATOL)\n\tdef run_test_matmul_aa_dtype(self, dtype):\n\t\tself.run_test_matmul_aa_dtype_shape((11,23), dtype)\n\t\tself.run_test_matmul_aa_dtype_shape((11,23), dtype, [1,0])\n\t\tself.run_test_matmul_aa_dtype_shape((111,223), dtype)\n\t\tself.run_test_matmul_aa_dtype_shape((111,223), dtype, [1,0])\n\t\tself.run_test_matmul_aa_dtype_shape((1111,2223), dtype)\n\t\tself.run_test_matmul_aa_dtype_shape((3,111,223), dtype)\n\t\tself.run_test_matmul_aa_dtype_shape((3,111,223), dtype, [0,2,1])\n\t\tself.run_test_matmul_aa_dtype_shape((3,111,223), dtype, [1,2,0])\n\t\tself.run_test_matmul_aa_dtype_shape((3,111,223), dtype, [1,0,2])\n\t\t# Note: The fastest dim can't be a batch dim, so these aren't supported\n\t\t#self.run_test_matmul_aa_dtype_shape((3,111,223), dtype, [2,0,1])\n\t\t#self.run_test_matmul_aa_dtype_shape((3,111,223), dtype, [2,1,0])\n\t\tself.run_test_matmul_aa_dtype_shape((5,3,111,57), dtype)\n\t\tself.run_test_matmul_aa_dtype_shape((5,3,111,57), dtype, [0,1,3,2])\n\t\tself.run_test_matmul_aa_dtype_shape((5,3,111,57), dtype, [1,0,2,3])\n\t\tself.run_test_matmul_aa_dtype_shape((5,3,111,57), dtype, [1,0,3,2])\n\t\tself.run_test_matmul_aa_dtype_shape((5,3,111,57), dtype, [1,2,3,0])\n\t\tself.run_test_matmul_aa_dtype_shape((5,3,111,57), dtype, [1,2,0,3])\n\t\tself.run_test_matmul_aa_dtype_shape((5,3,111,57), dtype, [2,1,0,3])\n\t\tself.run_test_matmul_aa_dtype_shape((5,3,111,57), dtype, [2,1,3,0])\n\t\tself.run_test_matmul_aa_dtype_shape((5,3,111,57), dtype, [2,0,3,1])\n\t\tself.run_test_matmul_aa_dtype_shape((5,3,111,57), dtype, [2,0,1,3])\n\t\tself.run_test_matmul_aa_dtype_shape((5,7,3,111,223), dtype)\n\tdef test_matmul_aa_ci8(self):\n\t\tself.run_test_matmul_aa_ci8_shape((11,23))\n\t\tself.run_test_matmul_aa_ci8_shape((111,223))\n\t\tself.run_test_matmul_aa_ci8_shape((1111,2223))\n\t\tself.run_test_matmul_aa_ci8_shape((3,111,223))\n\t\tself.run_test_matmul_aa_ci8_shape((5,3,111,223))\n\t\tself.run_test_matmul_aa_ci8_shape((5,7,3,111,223))\n\tdef test_matmul_aa_f32(self):\n\t\tself.run_test_matmul_aa_dtype(np.float32)\n\tdef test_matmul_aa_f64(self):\n\t\tself.run_test_matmul_aa_dtype(np.float64)\n\tdef test_matmul_aa_c32(self):\n\t\tself.run_test_matmul_aa_dtype(np.complex64)\n\tdef test_matmul_aa_c64(self):\n\t\tself.run_test_matmul_aa_dtype(np.complex128)\n","sub_path":"test/test_linalg.py","file_name":"test_linalg.py","file_ext":"py","file_size_in_byte":5325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"599436693","text":"import maya.cmds as cmds\nimport pymel.core as pm\n\n# Module imports\nimport jhRigBuilder.controls as controls\n\nfrom tinytest.test import *\nfrom common import TestTransform\n\n\n######################################################################\n#\n# Control Object Tests\n#\n######################################################################\n\n@describe('Control')\nclass TestControl(TestTransform):\n\n testingClass = controls.Control\n\n @setup\n def setup(self):\n\n cmds.file(newFile = True)\n\n @cleanup\n def cleanup(self):\n\n cmds.file(newFile = True, force = True)\n\n @should('apply correct name when creating new maya node')\n def createNodeWithName(self):\n\n name = 'NewTestName'\n newObject = self.testingClass(name = name)\n\n actual = find('Incorrect node name').using(newObject._node.nodeName())\n verify(actual).isEqualTo(name)\n\n @should('apply correct name when wrapping existing maya node')\n def wrapExistingNodeWithName(self):\n\n name = 'NewTestName'\n mayaNode = self.createNode()\n newObject = self.testingClass(mayaNode, name = name)\n\n actual = find('Incorrect node name').using(mayaNode.nodeName())\n verify(actual).isEqualTo(name)\n\n @should('not change name when none provided')\n def wrapExistingNodeWithoutName(self):\n\n mayaNode = self.createNode()\n name = mayaNode.nodeName()\n\n newObject = self.testingClass(mayaNode)\n\n actual = find('Incorrect node name').using(mayaNode.nodeName())\n verify(actual).isEqualTo(name)\n\n @should('retrieve node name')\n def getNodeName(self):\n\n mayaNode = self.createNode()\n name = mayaNode.nodeName()\n\n newObject = self.testingClass(mayaNode)\n\n getName = find('Incorrect name retrieved').using(newObject.name)\n verify(getName).isEqualTo(name)\n\n @should('set node name')\n def setNodeName(self):\n\n mayaNode = self.createNode()\n newName = 'NewTestName'\n\n newObject = self.testingClass(mayaNode)\n\n newObject.name = newName\n\n getName = find('Name was not updated').using(newObject.name)\n verify(getName).isEqualTo(newName)\n\n @should('retrieve full path name')\n def getNodeFullPathName(self):\n\n mayaNode = self.createNode()\n path = mayaNode.name()\n\n newObject = self.testingClass(mayaNode)\n\n getPath = find('Incorrect name retrieved').using(newObject.path)\n verify(getPath).isEqualTo(path)\n\n @should('apply correct color when creating new maya node')\n def createNodeWithColor(self):\n\n color = controls.Control.Colors.BLUE\n newObject = self.testingClass(color = color)\n mayaNode = newObject.node\n\n override = find('Override not enabled').using(mayaNode.overrideEnabled.get())\n verify(override).isTrue()\n\n actual = find('Incorrect color').using(mayaNode.overrideColor.get())\n verify(actual).isEqualTo(color)\n\n @should('apply correct color when wrapping existing maya node')\n def wrapExistingNodeWithColor(self):\n\n color = controls.Control.Colors.BLUE\n mayaNode = self.createNode()\n newObject = self.testingClass(mayaNode, color = color)\n\n override = find('Override not enabled').using(mayaNode.overrideEnabled.get())\n verify(override).isTrue()\n\n actual = find('Incorrect color').using(mayaNode.overrideColor.get())\n verify(actual).isEqualTo(color)\n\n @should('clear color when intialized if specified')\n def wrapExistingNodeWithColorRemove(self):\n\n mayaNode = self.createNode()\n color = controls.Control.Colors.GREEN\n\n mayaNode.overrideEnabled.set(True)\n mayaNode.overrideColor.set(color)\n\n newObject = self.testingClass(mayaNode, color = None)\n\n override = find('Override was enabled').using(mayaNode.overrideEnabled.get())\n verify(override).isFalse()\n\n actual = find('Color was not cleared').using(mayaNode.overrideColor.get())\n verify(actual).isEqualTo(controls.Control.Colors.DEFAULT)\n\n @should('not apply color when none provided')\n def createNodeWithoutColor(self):\n\n newObject = self.testingClass()\n mayaNode = newObject.node\n\n override = find('Override was enabled').using(mayaNode.overrideEnabled.get())\n verify(override).isFalse()\n\n actual = find('Incorrect color').using(mayaNode.overrideColor.get())\n verify(actual).isEqualTo(controls.Control.Colors.DEFAULT)\n\n @should('not change color when none provided')\n def wrapExistingNodeWithoutColor(self):\n\n mayaNode = self.createNode()\n color = controls.Control.Colors.GREEN\n\n mayaNode.overrideEnabled.set(True)\n mayaNode.overrideColor.set(color)\n\n newObject = self.testingClass(mayaNode)\n\n override = find('Override state changed').using(mayaNode.overrideEnabled.get())\n verify(override).isTrue()\n\n actual = find('Color was changed').using(mayaNode.overrideColor.get())\n verify(actual).isEqualTo(controls.Control.Colors.GREEN)\n\n @should('retrieve color information when overrides are off')\n def getColorWhenNoOverride(self):\n\n mayaNode = self.createNode()\n mayaNode.overrideColor.set(controls.Control.Colors.BLUE)\n\n newObject = self.testingClass(mayaNode)\n\n actual = find('Unexpected color information').using(newObject.color)\n verify(actual).isNone()\n\n @should('retrieve color information when overrides are on')\n def getColorWhenOverride(self):\n\n color = controls.Control.Colors.BLUE\n mayaNode = self.createNode()\n\n mayaNode.overrideEnabled.set(True)\n mayaNode.overrideColor.set(color)\n\n newObject = self.testingClass(mayaNode)\n\n actual = find('Unexpected color').using(newObject.color)\n verify(actual).isEqualTo(color)\n\n @should('set color information')\n def setColor(self):\n\n color = controls.Control.Colors.BLUE\n mayaNode = self.createNode()\n\n newObject = self.testingClass(mayaNode)\n\n newObject.color = color\n\n override = find('Override not enabled').using(mayaNode.overrideEnabled.get())\n verify(override).isTrue()\n\n actual = find('Incorrect color').using(mayaNode.overrideColor.get())\n verify(actual).isEqualTo(color)\n\n @should('remove color information')\n def clearColor(self):\n\n color = controls.Control.Colors.BLUE\n mayaNode = self.createNode()\n\n newObject = self.testingClass(mayaNode, color = color)\n\n newObject.color = None\n\n override = find('Override not disabled').using(mayaNode.overrideEnabled.get())\n verify(override).isFalse()\n\n actual = find('Incorrect color').using(mayaNode.overrideColor.get())\n verify(actual).isEqualTo(controls.Control.Colors.DEFAULT)\n\n @should('retrieve correct renderable state when not renderable')\n def getRenderableNone(self):\n\n mayaNode = self.createNode()\n newObject = self.testingClass(mayaNode)\n\n for attribute in self.testingClass.Attributes.Render.ALL:\n mayaNode.attr(attribute).set(False)\n\n renderable = find('Incorrect renderable state').using(newObject.renderable)\n verify(renderable).isFalse()\n\n @should('retrieve correct renderable state when partially renderable')\n def getRenderableSome(self):\n\n for attribute in self.testingClass.Attributes.Render.ALL:\n\n mayaNode = self.createNode()\n newObject = self.testingClass(mayaNode)\n\n for renderAttr in self.testingClass.Attributes.Render.ALL:\n mayaNode.attr(renderAttr).set(False)\n\n mayaNode.attr(attribute).set(True)\n\n renderable = find('Incorrect renderable state when only {0} enabled'.format(attribute)).using(newObject.renderable)\n verify(renderable).isTrue()\n\n @should('retrieve correct renderable state when fully renderable')\n def getRenderableAll(self):\n\n mayaNode = self.createNode()\n newObject = self.testingClass(mayaNode)\n\n for attribute in self.testingClass.Attributes.Render.ALL:\n mayaNode.attr(attribute).set(True)\n\n renderable = find('Incorrect renderable state').using(newObject.renderable)\n verify(renderable).isTrue()\n\n @should('set all renderable attributes')\n def setRenderable(self):\n\n mayaNode = self.createNode()\n newObject = self.testingClass(mayaNode)\n\n newObject.renderable = False\n\n for attribute in self.testingClass.Attributes.Render.ALL:\n\n actual = find('Incorrect attribute state for {0}'.format(attribute)).using(mayaNode.attr(attribute).get())\n verify(actual).isFalse()\n\n newObject.renderable = True\n\n for attribute in self.testingClass.Attributes.Render.ALL:\n\n actual = find('Incorrect attribute state for {0}'.format(attribute)).using(mayaNode.attr(attribute).get())\n verify(actual).isTrue()\n\n @should('add a new shape node without affecting the original')\n def addSkinnedShape(self):\n\n testNode = self.createNode()\n testShapes = testNode.getShapes()\n\n otherNode = pm.polySphere()[0]\n otherShapes = otherNode.getShapes()\n oldShape = otherNode.getShape()\n\n oldShape.addAttr('SomeSpecialTestMarkerAttr', attributeType = controls.Attribute.Type.BOOL)\n\n newObject = self.testingClass(testNode)\n\n newObject.addSkinnedShape([oldShape.faces])\n\n other = find('Other node\\'s shapes changed').using(otherNode.getShapes())\n verify(other).collection.isEqualTo(otherShapes)\n\n difference = list(set(testShapes) ^ set(testNode.getShapes()))\n\n actual = find('Incorrect shape count after addition').using(difference)\n verify(actual).collection.size(1)\n\n newShape = difference[0]\n\n actual = find('Old shape was not duplicated').using(newShape)\n verify(actual).isNotEqualTo(oldShape)\n\n actual = find('New shape was not a duplicate').using(newShape.hasAttr('SomeSpecialTestMarkerAttr'))\n verify(actual).isTrue()\n\n\n######################################################################\n#\n# Shared Control Tests\n#\n######################################################################\n\n@describe('Shared Group Control')\nclass TestSharedControl(TestClass):\n\n testingClass = controls.SharedControl\n createNode = lambda x: pm.circle()[0]\n\n @setup\n def testsetup(self):\n\n cmds.file(newFile = True)\n\n @cleanup\n def testcleanup(self):\n\n cmds.file(newFile = True, force = True)\n\n def verifyCorrectControl(self, testControl, group):\n\n hasGroup = find('Missing group attribute').using(testControl.hasAttr(controls.SharedControl.Attributes.GROUP))\n verify(hasGroup).isTrue()\n\n correctGroup = find('Incorrect group set').using(testControl.attr(controls.SharedControl.Attributes.GROUP).get())\n verify(correctGroup).isEqualTo(group)\n\n exists = find('Shared node does not exist').using(pm.objExists(testControl.shared))\n verify(exists).isTrue()\n\n node = testControl.node\n\n sharedShapes = [shape for shape in node.getShapes() if shape.hasAttr(controls.SharedControl.Attributes.GROUP)]\n\n actual = find('Unexpected number of shared shapes').using(sharedShapes)\n verify(actual).collection.size(1)\n\n @should('error when no group is provided')\n def createWithoutGroup(self):\n\n actual = find('Create with no params').using(self.testingClass)\n verify(actual).callWith().shouldThrow(KeyError)\n\n @should('create node when no shared node is provided or already exists')\n def createWithoutParamWithoutExists(self):\n\n newObject = self.testingClass(group = 'TestGroup')\n self.verifyCorrectControl(newObject, 'TestGroup')\n\n @should('wrap node when no shared node is provided or already exists')\n def wrapWithoutParamWithoutExists(self):\n\n mayaNode = self.createNode()\n newObject = self.testingClass(mayaNode, group = 'TestGroup')\n\n self.verifyCorrectControl(newObject, 'TestGroup')\n\n @should('error when bad shared node type is provided and none already exists')\n def createWithBadParamWithoutExists(self):\n\n mayaNode1 = self.createNode()\n mayaNode2 = self.createNode()\n\n actual = find('Invalid shared parameter did not error').using(self.testingClass)\n verify(actual).callWith(mayaNode1, shared = mayaNode2, group = 'TestGroup').shouldThrow(ValueError)\n\n @should('error when bad shared node attribute is provided and none already exists')\n def createWithBadParamWithoutExists(self):\n\n mayaNode1 = self.createNode()\n mayaNode2 = self.createNode()\n\n actual = find('Invalid shared parameter did not error').using(self.testingClass)\n verify(actual).callWith(mayaNode1, shared = mayaNode2.getShape(), group = 'TestGroup').shouldThrow(ValueError)\n\n @should('wrap node when good shared node parameter is provided and none already exists')\n def createWithGoodParamWithoutExists(self):\n\n mayaNode1 = self.createNode()\n mayaNode2 = self.createNode()\n shape = mayaNode2.getShape()\n\n shape.addAttr(controls.SharedControl.Attributes.GROUP, dataType = controls.Data.Type.STRING)\n shape.attr(controls.SharedControl.Attributes.GROUP).set('TestGroup')\n\n newObject = self.testingClass(mayaNode1, shared = shape, group = 'TestGroup')\n\n self.verifyCorrectControl(newObject, 'TestGroup')\n\n parent = find('Shared shape was not parented to control').using(shape.hasParent(mayaNode1))\n verify(parent).isTrue()\n\n shared = find('Shape was not cached as shared').using(newObject.shared)\n verify(shared).isEqualTo(shape)\n\n @should('error if already part of a different group and no shared parameter is provided')\n def createWithoutParamWithOtherExists(self):\n\n mayaNode1 = self.createNode()\n shape = mayaNode1.getShape()\n\n shape.addAttr(controls.SharedControl.Attributes.GROUP, dataType = controls.Data.Type.STRING)\n shape.attr(controls.SharedControl.Attributes.GROUP).set('OtherGroup')\n\n actual = find('Overlapping groups did not throw an error').using(self.testingClass)\n verify(actual).callWith(mayaNode1, group = 'TestGroup').shouldThrow(ValueError)\n\n @should('error if already part of a different group and bad shared parameter is provided')\n def createWithBadParamWithOtherExists(self):\n\n mayaNode1 = self.createNode()\n mayaNode2 = self.createNode()\n shape = mayaNode1.getShape()\n\n shape.addAttr(controls.SharedControl.Attributes.GROUP, dataType = controls.Data.Type.STRING)\n shape.attr(controls.SharedControl.Attributes.GROUP).set('OtherGroup')\n\n actual = find('Overlapping groups did not throw an error').using(self.testingClass)\n verify(actual).callWith(mayaNode1, shared = mayaNode2, group = 'TestGroup').shouldThrow(ValueError)\n\n @should('error if already part of a different group and good shared parameter is provided')\n def createWithGoodParamWithOtherExists(self):\n\n mayaNode1 = self.createNode()\n mayaNode2 = self.createNode()\n shape1 = mayaNode1.getShape()\n shape2 = mayaNode2.getShape()\n\n shape1.addAttr(controls.SharedControl.Attributes.GROUP, dataType = controls.Data.Type.STRING)\n shape1.attr(controls.SharedControl.Attributes.GROUP).set('OtherGroup')\n\n shape2.addAttr(controls.SharedControl.Attributes.GROUP, dataType = controls.Data.Type.STRING)\n shape2.attr(controls.SharedControl.Attributes.GROUP).set('TestGroup')\n\n actual = find('Overlapping groups did not throw an error').using(self.testingClass)\n verify(actual).callWith(mayaNode1, shared = shape2, group = 'TestGroup').shouldThrow(ValueError)\n\n @should('wrap node when shared node exists and no shared parameter is provided')\n def wrapWithoutParamWithGoodExists(self):\n\n mayaNode1 = self.createNode()\n shape1 = mayaNode1.getShape()\n\n shape1.addAttr(controls.SharedControl.Attributes.GROUP, dataType = controls.Data.Type.STRING)\n shape1.attr(controls.SharedControl.Attributes.GROUP).set('TestGroup')\n\n newObject = self.testingClass(mayaNode1, group = 'TestGroup')\n\n self.verifyCorrectControl(newObject, group = 'TestGroup')\n\n @should('error when bad shared parameter is provided and a good shared node exists')\n def wrapWithBadParamWithGoodExists(self):\n\n mayaNode1 = self.createNode()\n mayaNode2 = self.createNode()\n shape1 = mayaNode1.getShape()\n\n shape1.addAttr(controls.SharedControl.Attributes.GROUP, dataType = controls.Data.Type.STRING)\n shape1.attr(controls.SharedControl.Attributes.GROUP).set('TestGroup')\n\n actual = find('Error not thrown with bad parameter type').using(self.testingClass)\n verify(actual).callWith(mayaNode1, shared = mayaNode2, group = 'TestGroup').shouldThrow(ValueError)\n\n @should('wrap node when good shared parameter points to an existing shared node')\n def wrapWithGoodParamInstanceOfGoodExists(self):\n\n mayaNode1 = self.createNode()\n shape1 = mayaNode1.getShape()\n\n shape1.addAttr(controls.SharedControl.Attributes.GROUP, dataType = controls.Data.Type.STRING)\n shape1.attr(controls.SharedControl.Attributes.GROUP).set('TestGroup')\n\n newObject = self.testingClass(mayaNode1, shared = shape1, group = 'TestGroup')\n\n self.verifyCorrectControl(newObject, 'TestGroup')\n\n actual = find('Unexpected shared node').using(newObject.shared)\n verify(actual).isEqualTo(shape1)\n\n @should('wrap node when good shared parameter is provided and there is an existing shared node of the same group')\n def wrapWithGoodParamWithGoodExists(self):\n\n mayaNode1 = self.createNode()\n mayaNode2 = self.createNode()\n\n shape1 = mayaNode1.getShape()\n shape2 = mayaNode2.getShape()\n\n shape1.addAttr(controls.SharedControl.Attributes.GROUP, dataType = controls.Data.Type.STRING)\n shape1.attr(controls.SharedControl.Attributes.GROUP).set('TestGroup')\n\n shape2.addAttr(controls.SharedControl.Attributes.GROUP, dataType = controls.Data.Type.STRING)\n shape2.attr(controls.SharedControl.Attributes.GROUP).set('TestGroup')\n\n newObject = self.testingClass(mayaNode1, shared = shape2, group = 'TestGroup')\n\n self.verifyCorrectControl(newObject, 'TestGroup')\n\n actual = find('Shared parameter did not replace existing shared node').using(newObject.shared)\n verify(actual).isEqualTo(shape2)\n\n @should('exclude shared node from getShape when no other shapes exist')\n def getShapeExcludesSharedWhenNoShapes(self):\n\n mayaNode1 = self.createNode()\n shape1 = mayaNode1.getShape()\n\n shape1.addAttr(controls.SharedControl.Attributes.GROUP, dataType = controls.Data.Type.STRING)\n shape1.attr(controls.SharedControl.Attributes.GROUP).set('TestGroup')\n\n newObject = self.testingClass(mayaNode1, shared = shape1, group = 'TestGroup')\n\n actual = find('Incorrect shape returned').using(newObject.getShape())\n verify(actual).isNone()\n\n @should('exclude shared node from getShape when other shapes exist')\n def getShapeExcludesSharedWhenNoShapes(self):\n\n mayaNode1 = self.createNode()\n mayaNode2 = self.createNode()\n shape1 = mayaNode1.getShape()\n shape2 = mayaNode2.getShape()\n\n shape1.addAttr(controls.SharedControl.Attributes.GROUP, dataType = controls.Data.Type.STRING)\n shape1.attr(controls.SharedControl.Attributes.GROUP).set('TestGroup')\n\n pm.parent(shape2, mayaNode1, shape = True, relative = True)\n pm.delete(mayaNode2)\n\n newObject = self.testingClass(mayaNode1, shared = shape1, group = 'TestGroup')\n\n actual = find('Incorrect shape returned').using(newObject.getShape())\n verify(actual).isEqualTo(shape2)\n\n @should('exclude shared node from getShapes when no other shapes exist')\n def getShapesExcludesSharedWhenNoShapes(self):\n\n mayaNode1 = self.createNode()\n shape1 = mayaNode1.getShape()\n\n shape1.addAttr(controls.SharedControl.Attributes.GROUP, dataType = controls.Data.Type.STRING)\n shape1.attr(controls.SharedControl.Attributes.GROUP).set('TestGroup')\n\n newObject = self.testingClass(mayaNode1, shared = shape1, group = 'TestGroup')\n\n actual = find('Incorrect shapes returned').using(newObject.getShapes())\n verify(actual).collection.isEmpty()\n\n @should('exclude shared node from getShapes when other shapes exist')\n def getShapesExcludesSharedWhenNoShapes(self):\n\n mayaNode1 = self.createNode()\n mayaNode2 = self.createNode()\n shape1 = mayaNode1.getShape()\n shape2 = mayaNode2.getShape()\n\n shape1.addAttr(controls.SharedControl.Attributes.GROUP, dataType = controls.Data.Type.STRING)\n shape1.attr(controls.SharedControl.Attributes.GROUP).set('TestGroup')\n\n pm.parent(shape2, mayaNode1, shape = True, relative = True)\n pm.delete(mayaNode2)\n\n newObject = self.testingClass(mayaNode1, shared = shape1, group = 'TestGroup')\n\n actual = find('Incorrect shape returned').using(newObject.getShapes())\n verify(actual).collection.isEqualTo([shape2])\n\n\n######################################################################\n#\n# Fk Ik Control Tests\n#\n######################################################################\n\n@describe('Fk Ik Control')\nclass TestFkIkControl(TestClass):\n\n testingClass = controls.FkIkControl\n createNode = lambda x: pm.circle()[0]\n\n @setup\n def testsetup(self):\n\n cmds.file(newFile = True)\n\n @cleanup\n def testcleanup(self):\n\n cmds.file(newFile = True, force = True)\n\n @should('share node between multiple controls')\n def nodeIsShared(self):\n\n newObject1 = self.testingClass(group = 'TestGroup')\n newObject2 = self.testingClass(shared = newObject1.shared, group = 'TestGroup')\n\n shared = find('Shared node was not shared').using(newObject2.shared)\n verify(shared).isEqualTo(newObject1.shared)\n\n @should('share state between multiple controls')\n def stateIsShared(self):\n\n newObject1 = self.testingClass(group = 'TestGroup')\n newObject2 = self.testingClass(shared = newObject1.shared, group = 'TestGroup')\n\n newObject1.state = 0\n newObject2.state = 0\n\n newObject1.state = 1\n\n actual = find('State attribute did not update on all controls').using(newObject2.state)\n verify(actual).isEqualTo(1)\n\n @should('error when state is set outside the valid range')\n def setStateBad(self):\n\n mayaNode1 = self.createNode()\n newObject1 = self.testingClass(group = 'TestGroup')\n\n outOfRangeLow = controls.FkIkControl.States.ALL[0] - 1\n outOfRangeHigh = controls.FkIkControl.States.ALL[-1] + 1\n\n actual = find('Out-of-range state did not throw error').using(lambda x: setattr(newObject1, 'state', x))\n verify(actual).callWith(outOfRangeLow).shouldThrow(ValueError)\n verify(actual).callWith(outOfRangeHigh).shouldThrow(ValueError)\n\n @should('retrieve target object')\n def getTarget(self):\n\n mayaNode1 = self.createNode()\n mayaNode2 = self.createNode()\n newObject1 = self.testingClass(mayaNode1, group = 'TestGroup')\n\n mayaNode2.message.connect(mayaNode1.attr(controls.FkIkControl.Messages.TARGET))\n\n actual = find('Incorrect target retrieved').using(newObject1.target)\n verify(actual).isEqualTo(mayaNode2)\n\n @should('set target to valid transform')\n def setTargetGood(self):\n\n mayaNode1 = self.createNode()\n mayaNode2 = self.createNode()\n newObject1 = self.testingClass(mayaNode1, group = 'TestGroup')\n\n newObject1.target = mayaNode2\n\n target = mayaNode1.attr(controls.FkIkControl.Messages.TARGET)\n\n actual = find('Target was not set').using(target.get())\n verify(actual).isEqualTo(mayaNode2)\n\n @should('error when target is set as non-transform')\n def setTargetBad(self):\n\n mayaNode1 = self.createNode()\n shape1 = mayaNode1.getShape()\n newObject1 = self.testingClass(group = 'TestGroup')\n\n actual = find('Non-transform target did not throw error').using(lambda x: setattr(newObject1, 'target', x))\n verify(actual).callWith(shape1).shouldThrow(TypeError)","sub_path":"tests/controls.py","file_name":"controls.py","file_ext":"py","file_size_in_byte":24738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"406126262","text":"\"\"\"\r\nsetIpAddress.py \r\n\r\nThis demonstration application assumes that you have one GigE camera visible to the host, and that\r\nthat GigE camera is connected to a GigE card with a statically assigned IP address.\r\n\r\nThis demo app is incomplete in that we can't know a priori what IP address, subnet mask and \r\ngateway *YOU* need to set. If you're unsure what values these need to be, please consult \r\nyour local network administrator/administratrix.\r\n\"\"\"\r\n\r\nfrom pixelinkWrapper import*\r\n\r\nA_OK = 0 # non-zero error codes\r\nGENERAL_ERROR = 1\r\n\r\n\"\"\"\r\nCheck if an IP address matches the network interface card (NIC) subnet\r\n\"\"\"\r\ndef is_subnet_matches(cameraIpAddress, cameraSubnetMask, cameraIdInfo):\r\n \r\n cameraSubnetAddress = list()\r\n nicSubnetAddress = list()\r\n for i in range(len(cameraIpAddress)):\r\n cameraSubnetAddress.append(cameraIpAddress[i] & cameraSubnetMask[i])\r\n\r\n for i in range(len(cameraIdInfo.NicIpAddress.Address.u8Address)):\r\n nicSubnetAddress.append(cameraIdInfo.NicIpAddress.Address.u8Address[i] & cameraIdInfo.NicIpMask.Address.u8Address[i])\r\n\r\n for i in range(len(cameraIpAddress)):\r\n if cameraSubnetAddress[i] != nicSubnetAddress[i]:\r\n return False\r\n \r\n return True\r\n\r\ndef main():\r\n\r\n # ******************* NOTE: Assign your values here *******************\r\n cameraIpAddress = (192, 168, 1, 2)\r\n cameraSubnetMask =\t (255, 255, 255, 0)\r\n cameraDefaultGateway = (222, 1, 1, 1)\r\n addressIsPersistent = False\r\n\t# ******************* NOTE: Assign your values here *******************\r\n\r\n # Remove this after you've set up your own appropriate values above.\r\n print(\"This demonstration application has not been configured for your local environment\\nSee the notes in setIpAddress.py for more information.\")\r\n return GENERAL_ERROR\r\n\r\n # Check our assumption that there's only one camera\r\n ret = PxLApi.getNumberCameras()\r\n numberOfCameras = ret[1]\r\n \r\n assert PxLApi.apiSuccess(ret[0])\r\n assert 1 == len(numberOfCameras)\r\n \r\n # Get the information for that camera\r\n cameraIdInfo = numberOfCameras[0]\r\n\r\n # A bit of sanity checking\r\n assert cameraIdInfo.NicIpAddress.Address.u32Address != 0\r\n\r\n if not is_subnet_matches(cameraIpAddress, cameraSubnetMask, cameraIdInfo):\r\n print(\"WARNING: You are setting an IP address that doesn't match the network interface card (NIC) subnet\")\r\n\r\n # Copy MAC address found in the cameraIdInfo into a list\r\n cameraMacAddress = list()\r\n for i in range(len(cameraIdInfo.CameraMac.MacAddr)):\r\n cameraMacAddress.append(cameraIdInfo.CameraMac.MacAddr[i])\r\n\r\n ret = PxLApi.setCameraIpAddress(cameraMacAddress, cameraIpAddress, cameraSubnetMask, cameraDefaultGateway, addressIsPersistent)\r\n\r\n print(\"PxLApi.setCameraIpAddress returned %d\" % ret[0])\r\n if not PxLApi.apiSuccess(ret[0]):\r\n return GENERAL_ERROR\r\n\r\n return A_OK\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"samples/Windows/setIpAddress.py","file_name":"setIpAddress.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"138097073","text":"import cv2\r\nimport time\r\nfrom PIL import Image\r\nimport face_recognition\r\nimport os\r\nimport numpy as np\r\nimport sys\r\nfrom save import *\r\nfrom config import number_of_times_to_upsample\r\nfrom config import num_jitters\r\nfrom config import tolerance\r\n\r\n\r\nsubjects = []\r\nstatus = []\r\nface_encoding_list = []\r\n\r\ndef colour_f(status1):\r\n if status1==\"vip\":\r\n return (0,255,0)\r\n if status1==\"blacklisted\":\r\n return (0,0,255)\r\n else :\r\n return (255,255,255)\r\n\r\ndef draw_rectangle2(img, rect,colour):\r\n (x, y, w, h) = rect\r\n cv2.rectangle(img, (x, y), (w, h), colour, 2)\r\n\r\ndef draw_text(img, text, x, y):\r\n cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)\r\n\r\ndef prepare_training_data(data_folder_path):\r\n global subjects\r\n global status\r\n\r\n dirs = os.listdir(data_folder_path)\r\n\r\n faces = []\r\n\r\n labels = []\r\n\r\n for dir_name in dirs:\r\n\r\n if not dir_name.startswith(\"s\"):\r\n continue;\r\n\r\n label = int(dir_name.replace(\"s\", \"\"))\r\n\r\n subject_dir_path = data_folder_path + \"/\" + dir_name\r\n\r\n subject_images_names = os.listdir(subject_dir_path)\r\n\r\n for image_name in subject_images_names:\r\n\r\n if image_name.startswith(\".\"):\r\n continue;\r\n\r\n if image_name == \"name.txt\":\r\n name_path = subject_dir_path + \"/\" + image_name\r\n with open(name_path,'r+') as name:\r\n content = name.read()\r\n content = content.lower()\r\n subjects.append(content)\r\n\r\n elif image_name == \"status.txt\":\r\n name_path = subject_dir_path + \"/\" + image_name\r\n with open(name_path,'r+') as name:\r\n content = name.read()\r\n content = content.lower()\r\n status.append(content)\r\n\r\n else :\r\n image_path = subject_dir_path + \"/\" + image_name\r\n image = face_recognition.load_image_file(image_path)\r\n\r\n # make sure to resize on fixed amount\r\n # cv2.imshow(\"Training on image...\", cv2.resize(image, (400, 500)))\r\n print(\"Faces Scanned: \", len(faces) + 1)\r\n cv2.waitKey(100)\r\n\r\n faces.append(image)\r\n labels.append(label)\r\n cv2.destroyAllWindows()\r\n cv2.waitKey(1)\r\n cv2.destroyAllWindows()\r\n\r\n return faces\r\n\r\nfaces = prepare_training_data(\"train-images\")\r\n\r\n\r\n\r\ndef show_result(frame):\r\n global faces\r\n global subjects\r\n global status\r\n global face_encoding_list\r\n\r\n face_locations = face_recognition.face_locations(frame,number_of_times_to_upsample=number_of_times_to_upsample)\r\n try:\r\n for face in faces:\r\n face_encoding = face_recognition.face_encodings(face,num_jitters=num_jitters)[0]\r\n face_encoding_list.append(face_encoding)\r\n except IndexError:\r\n print(\"I wasn't able to locate any faces in at least one of the images. Check the image files. Aborting...\")\r\n quit()\r\n print(\"I found {} face(s) in this photograph.\".format(len(face_locations)))\r\n img = frame.copy()\r\n for face_location in face_locations:\r\n\r\n # Print the location of each face in this image\r\n top, right, bottom, left = face_location\r\n print(\"A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}\".format(top, left, bottom, right))\r\n\r\n # You can access the actual face itself like this:\r\n face_image = frame[top:bottom, left:right]\r\n face_encoding1 = face_recognition.face_encodings(face_image,num_jitters=num_jitters)\r\n if len(face_encoding1) > 0 :\r\n face_encoding = face_encoding1[0]\r\n results = face_recognition.compare_faces(face_encoding_list, face_encoding , tolerance = tolerance)\r\n if True in results:\r\n index1 = results.index(True)\r\n colour_2 = colour_f(status[index1])\r\n draw_rectangle2(img, (left,top,right,bottom),colour_2)\r\n draw_text(img,subjects[index1] , left, top-5)\r\n\r\n print(subjects[index1])\r\n print(status[index1])\r\n else :\r\n print(\"not found\")\r\n draw_rectangle2(img, (left,top,right,bottom),(255,255,255))\r\n draw_text(img,\"No Match\" ,left, top-5)\r\n else:\r\n draw_rectangle2(img, (left,top,right,bottom),(255,255,255))\r\n draw_text(img,\"Error 1\" ,left, top-5)\r\n\r\n\r\n save(frame,\"before\")\r\n save(img,\"after\")\r\n cv2.imshow(\"face_detected\",img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n\r\n\r\nvid = cv2.VideoCapture(0)\r\n# vid.open('http://192.168.43.1:8080/video')\r\n\r\nwhile True:\r\n check , frame = vid.read()\r\n cv2.imshow(\"Press c to Pass frame , q to exit\",frame)\r\n\r\n key = cv2.waitKey(1)\r\n if key == ord('q'):\r\n break\r\n if key == ord('c'):\r\n show_result(frame)\r\n\r\n\r\nvid.release()\r\ncv2.destroyAllWindows()\r\n","sub_path":"webcam.py","file_name":"webcam.py","file_ext":"py","file_size_in_byte":5029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"533366280","text":"import sys\n\n\nclass Solution:\n def convert(self, string, num_of_rows):\n window = num_of_rows + (num_of_rows - 2)\n padded_string = string + \" \" * window\n str_len = len(padded_string)\n if(window == 0):\n return string\n\n final_str = list()\n for k in range(0, num_of_rows):\n i = k\n j = window - k\n while(i < str_len and j < str_len):\n current_char = padded_string[i]\n if(current_char != \" \"):\n final_str.append(current_char)\n i += window\n if(k != 0 and k != num_of_rows - 1):\n current_char = padded_string[j]\n if(current_char != \" \"):\n final_str.append(current_char)\n j += window\n\n return ''.join(final_str)\n\n\nif __name__ == \"__main__\":\n string = input().strip()\n num_of_rows = int(input())\n ret_value = Solution().convert(string, num_of_rows)\n print(ret_value)\n","sub_path":"LeetCode/zig_zag_conversion_2.py","file_name":"zig_zag_conversion_2.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"475529382","text":"#!/usr/bin/env python\n\n# script to take \"TABLE OF TREE SEGS\" from John White\n# and collate sequences from each seg into fasta files\n# 10.07.2019\n\n# sometimes the spreadsheet gives an NCBI# to download\n# other times it gives an isolate name which I'll get from my current files\n# will do one segment at a time\n\nimport sys, os\nimport argparse\nfrom Bio import Entrez\nfrom Bio import SeqIO\nimport csv\n\n# use argparse to grab command line arguments\n\nparser = argparse.ArgumentParser(\"take TABLE OF TREE SEGS from John in csv format and collate\"\n \" sequences into fasta files\")\n\nparser.add_argument('-t', '--table', type = str,\n help = \"TABLE OF TREE SEGS from John. Must be in csv format.\")\nparser.add_argument('-s', '--seq_file', type = str,\n help = \"Fasta file containing already obtained sequences\")\nparser.add_argument('-m', '--meta_file', type = str,\n help = \"Meta data file for the obtained sequences\")\nparser.add_argument('-g', '--seg', type = str,\n help = \"Segment to analyse. Should be in the format 'SEG-4'\")\n\n\nargs = parser.parse_args()\n\n# copy function to get NCBI records from 'retrieve_genbank_records.py'\n\nEntrez.email = \"matthewjneave1@gmail.com\"\n\ndef retrieve_ncbi_record(ncbi_id):\n print(\"retrieving {} from NCBI\".format(ncbi_id))\n new_handle = Entrez.efetch(db=\"nucleotide\", id=ncbi_id, rettype=\"fasta\")\n seq_record = SeqIO.read(new_handle, \"fasta\")\n return(seq_record)\n\n# call the sequence file into an index for easy access\n\navail_seqs = SeqIO.index(args.seq_file, \"fasta\")\n\n# make a meta data dictionary for the already obtained sequences\n\nmeta_dict = {}\n\nwith open(args.meta_file) as fl:\n header = next(fl).strip().split(\",\")\n for line in fl:\n line = line.strip()\n cols = line.split(\",\")\n strain = cols[0]\n meta_dict[strain] = cols[5:]\n\n# read through spreadsheet line by line\n# extracting appropriate sequence and metadata\n\nrecords_to_write = []\nmeta_to_write = [[\"ID\", \"strain\", \"type\", \"year\", \"country\", \"accession\"]]\n\nwith open(args.table) as t:\n reader = csv.reader(t)\n # only want header columns that refer to sequences\n # see next for loop\n header = next(reader)[4:]\n for line in reader:\n strain = line[0].strip()\n type = line[1].strip()\n year = line[2].strip()\n country = line[3].strip().replace(\"_\", \"/\")\n for index, seq in enumerate(line[4:]):\n seq = seq.strip()\n # check the right segment is selected for this run\n # and that John wants this particular segment in the tree\n if seq != \"\" and header[index] == args.seg:\n # this is a sequence that we need to include\n ## 16.07.19 John has sent me a new spreadsheet that contains\n ## a new isolate name (replacing an old one)\n ## plus it has genbank numbers but these are not on NCBI\n ## need to hack my script to get this to work\n if strain == \"DPP9244\":\n strain = \"V9244\"\n new_name = \"DPP9244\"\n\n # first check if I have it already\n if strain in avail_seqs:\n segment_number = int(args.seg.split(\"-\")[1])\n #print(\"{} detected in avail_seqs\".format(strain))\n records_to_write.append(avail_seqs[strain])\n # if the record is V9244, need to include accession numbers from the spreadsheet\n # these are not on NCBI yet\n if strain == \"V9244\":\n accession = seq\n elif strain != seq:\n accession = seq\n else:\n try:\n accession = meta_dict[strain][segment_number]\n except:\n print(\"couldn't get accession number for {}, setting to unassigned\".format(strain))\n accession = \"MZXXXXXX\"\n\n if strain == \"V9244\":\n meta_to_write.append([strain, new_name, type, year, country, accession])\n else:\n meta_to_write.append([strain, strain, type, year, country, accession])\n else:\n try:\n record = retrieve_ncbi_record(seq)\n records_to_write.append(record)\n # note: genbank records get downloaded as fasta with the version number\n # need to add this '.1' to the meta (e.g. KX578963.1)\n meta_to_write.append([record.id, strain, type, year, country, record.id])\n except Exception as e:\n print(e)\n print(\"{} unable to be collated\".format(seq))\n\nSeqIO.write(records_to_write, args.seg + \".fasta\", \"fasta\")\n\nwith open(args.seg + \".meta\", \"w\") as output:\n for meta in meta_to_write:\n output.write(\",\".join(meta) + \"\\n\")\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"BTV/scripts/table_to_fasta.py","file_name":"table_to_fasta.py","file_ext":"py","file_size_in_byte":5112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"406418940","text":"\"\"\"PLEASE PROVIDE MODULE DOCSTRING.\"\"\"\n\n\nimport SimpleGUICS2Pygame.simpleguics2pygame as simplegui\n\n\nclass Text():\n\t\"\"\"Object holding text string, x-offsets, font object and scale for drawing the actual text\"\"\"\n\tdef __init__(self, chars: str, scale: tuple, font):\n\t\tself.chars = chars\n\t\tself.scale = scale\n\t\tself.font = font\n\t\tself.offsets = []\n\t\tfor char in chars:\n\t\t\tif char.upper() not in font.characters:\n\t\t\t\tindex = font.characters.index(\" \") # fallback to space\n\t\t\t\tprint(\"Character {0} not found in font {1}\".format(char, self.font))\n\t\t\telse:\n\t\t\t\tindex = font.characters.index(char.upper())\t# no upper/lowercase letters yet\n\t\t\toffset = ((font.offset_x + font.g_width)*index, font.offset_y)\n\t\t\tself.offsets.append(offset)\n\n\tdef GetTextWidth(self) -> float:\n\t\t\"\"\"Get total width for a set of glyphs and a scale factor\"\"\"\n\t\twidth = 0\n\t\tfor _ in self.chars:\t# monospace therefore same width for all chars\n\t\t\twidth += self.font.g_width * self.scale[0]\n\t\treturn width\n\n\tdef GetTextHeight(self) -> float:\n\t\t\"\"\"Single-line, therefore same as char height times scale\"\"\"\n\t\treturn self.font.g_height * self.scale[1]\n\n\tdef Draw(self, canvas: simplegui.Canvas, x: int, y: int) -> None:\n\t\t\"\"\"Draws the text object.\"\"\"\n\t\tself.font.Draw(canvas, self, x, y)\n\n\nclass Font():\n\t\"\"\"Generic class providing methods for any font\"\"\"\n\tdef __init__(self, font_path: str, characters: str, offset: tuple, glyphdims: tuple):\n\t\t\"\"\"ctor\"\"\"\n\t\tself.characters = characters\n\t\tself.image = simplegui._load_local_image(font_path)\n\t\tself.offset_x = offset[0]\n\t\tself.offset_y = offset[1]\n\t\tself.g_width = glyphdims[0]\n\t\tself.g_height = glyphdims[1]\n\n\t#pylint: disable=line-too-long\n\tdef Draw(self, canvas: simplegui.Canvas, text: Text, x: int, y: int) -> None:\n\t\t\"\"\"Draw method to be called by the specific font class\"\"\"\n\t\tfor i, _ in enumerate(text.chars):\n\t\t\tcanvas.draw_image(self.image,\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#image\n\t\t\t\t\t (text.offsets[i][0] + self.g_width // 2, text.offsets[i][1] + self.g_height // 2),\t\t#center_source\n\t\t\t\t\t (self.g_width, self.g_height),\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#width_height_source\n\t\t\t\t\t (x + (self.g_width * i + self.g_width // 2) * text.scale[0], y + (self.g_height // 2) * text.scale[1]),\t#center_dest\n\t\t\t\t\t (self.g_width * text.scale[0], self.g_height * text.scale[1])\t\t\t\t\t\t\t\t\t\t\t\t#width_height_dest\n\t\t\t\t\t )\n\n\nclass FontVisitor(Font):\n\t\"\"\"Subclass implementing the Visitor font\"\"\"\n\tdef __init__(self, color: str):\n\t\t\"\"\"ctor\"\"\"\n\t\tsuper().__init__(\n\t\t\t\t\t\t\"assets/visitor_{0}.png\".format(color),\n\t\t\t\t\t\tr\"#/$% 0123456789?@ABCDEFGHIJKLMNOPQRSTUVWXYZ\\_\",\n\t\t\t\t\t\t(0, 20),\n\t\t\t\t\t\t(61, 61))\n\n\tdef Draw(self, canvas: simplegui.Canvas, text: str, x: float, y: float) -> None:\n\t\tsuper().Draw(canvas, text, x, y)","sub_path":"StreetFighter/Font.py","file_name":"Font.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"86449205","text":"# How can you make this more scalable and reusable later?\ndef find_armstrong_numbers(numbers):\n armstrong_array = []\n\n for x in range(len(numbers)):\n numbers_string = str(x)\n numbers_list = list(numbers_string)\n numbers_array = []\n\n for num in numbers_list:\n numbers_array.append(int(num))\n \n sum_of_digits = 0\n\n for digit in numbers_array:\n sum_of_digits += digit ** len(numbers_array)\n\n if x == sum_of_digits:\n armstrong_array.append(x)\n \n print(armstrong_array)\n\n return armstrong_array\n\n\n","sub_path":"python/armstrong_numbers.py","file_name":"armstrong_numbers.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"136465959","text":"import json\nimport pymysql\nfrom tqdm import trange\nimport traceback\nimport config\n\ndef write_into_db(words_path):\n with open(words_path) as f:\n words = f.read()\n words_list = json.loads(words)\n\n db = pymysql.connect(host=config.db_host, user=config.db_username, password=config.db_passwd, database=config.db_name, charset=config.db_charset)\n\n cursor = db.cursor()\n for i in trange(len(words_list)):\n try:\n word = words_list[i]\n cursor.execute('select id from ' + config.db_table + ' where words = %s limit 1', (word,))\n result = cursor.fetchone()\n try:\n if result is None:\n cursor.execute('insert into ' + config.db_table + '(words) values (%s)', (word,))\n db.commit()\n except Exception:\n print(\"写入失败\")\n traceback.print_exc()\n db.rollback()\n except Exception:\n print(\"查询失败\")\n traceback.print_exc()\n\n db.close()","sub_path":"write_sql.py","file_name":"write_sql.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"485284536","text":"from tqsdk import TqApi, TqAuth\n\n# 创建API实例\napi = TqApi(auth=TqAuth(\"czq1ac\",\"czq1ac\"))\n\nsymbol = \"CZCE.SA309\"\n# 获得上期所 cu2001 的行情引用,当行情有变化时 quote 中的字段会对应更新\n# get_quote() 函数提供实时行情和合约信息:\nquote = api.get_quote(symbol)\nprint(quote)\n# 获取信息\ninfo = api.query_symbol_info(symbol)\nprint(quote)\n\n# 获取日线\n\n# K线数据¶\n# get_kline_serial() 函数获取指定合约和周期的K线序列数据:\n\nklines = api.get_kline_serial(symbol, 10) # 获取SHFE.cu1812合约的10秒K线\nprint(klines)\n\n# 日��\ndata_klines = api.get_kline_serial(symbol, 86400) # 86400秒线, 即日线\nprint(data_klines)\n\n\n\n\napi.close()\n","sub_path":"LearnDay/tq/testApi.py","file_name":"testApi.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"615016013","text":"#!/usr/bin/python3\n\nimport argparse\nimport os\nfrom os import access, listdir, path, rename, remove, removedirs, walk\n\n\ndef removeFiles(directory, argString, folder=False, verbose=False):\n \"\"\" Remove files from the directory that contain the string passed to\n the program. \"\"\"\n\n if verbose:\n print('Removing files from ', directory)\n\n filelist = listdir(directory)\n for file in filelist:\n if(argString in file):\n fullfilepath = path.join(directory, file)\n if(path.isdir(fullfilepath) & folder):\n if(verbose):\n print(\"Removing folder \", fullfilepath)\n try:\n removedirs(fullfilepath)\n except OSError as err:\n print(\"Error.\", err.strerror, fullfilepath)\n elif(path.isfile(fullfilepath)):\n if(verbose):\n print(\"Removing \", fullfilepath)\n\n remove(fullfilepath)\n\n\ndef removeFilesRecursive(directory, argString, folder=False, verbose=False):\n \"\"\" Remove files recursively from the directory that contain the string\n passed to the program recursively. \"\"\"\n\n if verbose:\n print('Removing files...')\n\n for (dirpath, dirname, filenames) in walk(directory, topdown=False):\n for file in filenames:\n if(argString in file):\n fullfilepath = path.join(dirpath, file)\n\n if verbose:\n print(fullfilepath)\n # finally, removes\n remove(fullfilepath)\n\n # Removes folder after removing all files\n if(folder):\n try:\n removedirs(dirpath)\n except OSError as err:\n print(\"Error.\", err.strerror, dirpath)\n\n\ndef replaceString(directory, argString, replaceString, folder=False,\n verbose=False):\n \"\"\" Replaces a string in the filenames passed as an argument\n to the program. \"\"\"\n if verbose:\n print('Removing string from files from ', directory)\n\n filelist = listdir(directory)\n for file in filelist:\n if(argString in file):\n fullfilepath = path.join(directory, file)\n if(verbose):\n print(\"Renaming \", fullfilepath)\n\n newfilename = file.replace(argString, replaceString)\n if newfilename == '':\n newfilename = 'untitled'\n\n newfullpath = path.join(directory, newfilename)\n rename(fullfilepath, newfullpath)\n\n\ndef replaceStringRecursive(directory, argString, replaceString, folder=False,\n verbose=False):\n \"\"\" Replaces a string in the filenames passed as an argument\n to the program recursively. \"\"\"\n if verbose:\n print('Replacing string in filenames...')\n\n for (dirpath, dirname, filenames) in walk(directory, topdown=False):\n for file in filenames:\n # finally, removes string\n if(argString in file):\n fullfilepath = path.join(dirpath, file)\n\n if(verbose):\n print(\"Renaming \", fullfilepath)\n\n # replaces string for an empty character\n newfilename = file.replace(argString, replaceString)\n\n # if filename is empty, add 'untitled' to it\n if newfilename == '':\n newfilename = 'untitled'\n\n # gets the new full path of the file, and then, renames it\n newfullpath = path.join(dirpath, newfilename)\n rename(fullfilepath, newfullpath)\n\n # removes string from folder if enabled\n if(folder):\n for folder in dirname:\n newdirname = folder.replace(argString, replaceString)\n rename(path.join(dirpath, folder), path.join(dirpath,\n newdirname))\n\n\n# Main function, responsible for parsing arguments and executing actions\ndef main():\n \"\"\" Main function, used to parse arguments and execute actions \"\"\"\n\n parser = argparse.ArgumentParser(description='Renames or removes files' +\n ' by searching for bits of strings.')\n\n # main arguments\n parser.add_argument('directory', metavar='dir',\n help='Directory to search for files.')\n\n # optional arguments\n parser.add_argument('-R', '--recursive', action='store_true',\n dest='recursive',\n help='Operates recursively into the directory tree.')\n parser.add_argument('-F', '--folder', action='store_true',\n dest='folder',\n help='Operates on folders as well.')\n parser.add_argument('-f', '--force', action='store_true',\n dest='force',\n help='Forces removal of non-empty folders.')\n parser.add_argument('-v', '--verbose', action='store_true',\n dest='verbose',\n help='Verbose mode.')\n\n # Add mutually exclusive arguments into a group\n argGroup = parser.add_mutually_exclusive_group(required=True)\n argGroup.add_argument('-rm', '--removefile', dest='rmfile', nargs=1,\n metavar='string',\n help='Removes a file based on a string search.',)\n argGroup.add_argument('-tr', '--trimstring', dest='trstr', nargs=1,\n metavar='string',\n help='Trims a string out from the filename based on ' +\n 'a string search')\n argGroup.add_argument('-rp', '--replacestring', dest='rpstr', nargs=2,\n metavar='string',\n help='Replaces a string in the filename based on ' +\n 'a string search')\n\n # parses arguments from cli\n args = parser.parse_args()\n\n # gets full path to the directory\n absdir = path.abspath(args.directory)\n if not access(absdir, os.W_OK):\n print('Error, unable to access directory \"{0}\" for writing.'\n .format(absdir), 'Try running as an administrator.')\n exit()\n\n # executes actions for each option\n if(args.rmfile is not None):\n if (args.recursive is True):\n removeFilesRecursive(absdir, args.rmfile[0], folder=args.folder,\n verbose=args.verbose)\n else:\n removeFiles(absdir, args.rmfile[0], folder=args.folder,\n verbose=args.verbose)\n elif(args.trstr is not None):\n if(args.recursive is True):\n replaceStringRecursive(absdir, args.trstr[0], '',\n folder=args.folder, verbose=args.verbose)\n else:\n replaceString(absdir, args.trstr[0], '', folder=args.folder,\n verbose=args.verbose)\n elif(args.rpstr is not None):\n if(args.recursive is True):\n replaceStringRecursive(absdir, args.rpstr[0], args.rpstr[1],\n folder=args.folder,\n verbose=args.verbose)\n else:\n replaceString(absdir, args.rpstr[0], args.rpstr[1],\n folder=args.folder, verbose=args.verbose)\n\n\n# runs main program\nmain()\n","sub_path":"qfr.py","file_name":"qfr.py","file_ext":"py","file_size_in_byte":7314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"548970390","text":"import os\nos.environ['THEANO_FLAGS'] = \"device=gpu0\"\n\nfrom keras.layers import Activation,Input, Dense, Convolution2D, MaxPooling2D, UpSampling2D\nfrom keras.models import Model, Sequential\nfrom keras.datasets import mnist\nimport numpy as np\nfrom keras.callbacks import TensorBoard\nfrom keras.preprocessing import image as image_utils\nimport numpy as np\nimport h5py as h5\n\nh5f = h5.File('data.h5','r')\nx_train = h5f['train'][:]\nx_val = h5f['val'][:]\nx_test = h5f['test'][:]\nh5f.close()\n\nprint(x_train.shape)\nprint(x_test.shape)\n\nnum_channels = 3\nimage_width_process = 96\nimage_height_process = image_width_process\n\ninput_img = Input(shape=(num_channels, image_width_process, image_height_process))\n\nx = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(input_img)\n#x = MaxPooling2D((2, 2), border_mode='same')(x)\n#x = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(x)\n#x = MaxPooling2D((2, 2), border_mode='same')(x)\n#x = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(x)\nencoded = MaxPooling2D((2, 2), border_mode='same')(x)\n\n# at this point the representation is (8, 4, 4) i.e. 128-dimensional\n\nx = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(encoded)\nx = UpSampling2D((2, 2))(x)\n#x = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(x)\n#x = UpSampling2D((2, 2))(x)\n#x = Convolution2D(32, 3, 3, activation='relu')(x)\n#x = UpSampling2D((2, 2))(x)\ndecoded = Convolution2D(3, 3, 3, activation='sigmoid', border_mode='same')(x)\n\nautoencoder = Model(input_img, decoded)\nautoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\nautoencoder.fit(x_train, x_train,\n nb_epoch=10,\n batch_size=100,\n shuffle=True,\n validation_data=(x_val, x_val))\n\nimport matplotlib.pyplot as plt\n\ndecoded_imgs = autoencoder.predict(x_test)\n\nn = 10\nplt.figure(figsize=(20, 4))\nfor i in range(n):\n # display original\n ax = plt.subplot(2, n, i+1)\n plt.imshow(x_test[i].reshape(image_width_process, image_height_process,num_channels))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n # display reconstruction\n ax = plt.subplot(2, n, i + n+1)\n plt.imshow(decoded_imgs[i].reshape(image_width_process, image_height_process,num_channels))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\nplt.show()","sub_path":"ae.py","file_name":"ae.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"442162826","text":"from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom accounts.forms import SubscriptionForm\nfrom accounts.models import Profile\nfrom django.db import IntegrityError\n\n\ndef account_subscribe(request):\n \"\"\"\n Subscription page\n \"\"\"\n form = SubscriptionForm()\n if request.method == 'POST':\n form = SubscriptionForm(request.POST)\n if form.is_valid():\n \"\"\"\n Form is valid, process saving\n \"\"\"\n try:\n Profile.objects.create_profile_by_email(\n form.cleaned_data['email']\n )\n except IntegrityError:\n return redirect('/thanks')\n \n return redirect('/thanks')\n else:\n return redirect('/subscribe')\n\n return render(request, \"landing/base.html\", {\n 'form': form,\n 'action': '/subscribe/'\n })\n\n\ndef account_subscribe_after(request):\n \"\"\"\n After subscription page\n \"\"\"\n return render(request, \"landing/after.html\")\n","sub_path":"Django/landing/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"541290581","text":"import os\nfrom flask import Flask, render_template, request,redirect, url_for, flash\nfrom werkzeug.utils import secure_filename\nfrom app import app, db\nfrom app.upload import upload_blob\nfrom app.label import analyze_labels\nimport json \nfrom app.models import Detected\nfrom config import Config\nfrom app.forms import SearchForm\n\nUPLOAD_FOLDER = '/home/lewd/gdprojects/betadata/backend/app/static/uploads/'\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif','mp4', 'mkv'])\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n# basedir = os.path.abspath(os.path.dirname(__file__))\n# SECRET_KEY = os.environ.get('SECRET_KEY') or 'seeker'\n# GOOGLE_APPLICATION_CREDENTIALS=os.environ.get(\"SECRET_KEY\")\n# SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \\\n# 'sqlite:///' + os.path.join(basedir, 'app.db')\n# SQLALCHEMY_TRACK_MODIFICATIONS = False\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n@app.route('/upload', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n gcs_upload = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n to_gcs = upload_blob('ziscography_bucket', gcs_upload, filename)\n results = analyze_labels(to_gcs)\n for result in results:\n try: \n d = Detected(label_description=result['label_description'], label_category=result['label_category'], start_time=result['start_time'], end_time=result['stop_time'], confidence=result['confidence'])\n db.session.add(d)\n db.session.commit()\n except: \n pass\n return render_template('search_results.html', results=results)\n\n return '''\n \n Upload new File\n

Upload new File

\n
\n

\n \n

\n '''\n\n@app.route('/search', methods=['GET', 'POST'])\ndef search():\n if request.method == 'POST':\n if 'search_term' not in request.form:\n flash('No search term entered')\n return redirect(request.url)\n search_term = request.form['search_term']\n s = search_term.split()\n if ('play') in s:\n if ('from') in s:\n return render_template('player.html')\n if ('clip') in s:\n return 'playing clip'\n elif ('find') in s:\n if ('first') in s:\n return 'playing first'\n if ('last') in s:\n return 'playing last'\n else:\n return 'x clips found'\n else:\n return 'search term not understood'\n return render_template(\"search.html\")\n\n# @app.route('/search_results', methods=['GET', 'POST'])\n# def search_results():\n# return render_template(\"search_results.html\")\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)\n","sub_path":"backend/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"146604874","text":"\"\"\"\nThis module contains ADT -> Team.\n\"\"\"\n\nfrom event import Event\nfrom arrays import ArrayExpanded\nfrom API_requests import Requests\nimport matplotlib.pyplot as plt\nimport numpy\n\n\nclass Team:\n \"\"\"\n Team ADT\n The atribute of this class is team ID.\n \"\"\"\n\n def __init__(self, team_id: str, start_date: str, end_date: str):\n self._team_id = team_id\n self._num_events = 0\n self._events = self.get_last_events(start_date, end_date)\n\n def get_last_events(self, start_date: str, end_date: str):\n print('Getting last events about your team...')\n\n events_json = Requests.get_events_by_team_id(\n self._team_id, start_date, end_date)\n\n events_lst = ArrayExpanded(len(events_json))\n for event in events_json:\n events_lst.insert(self._num_events, Event(event))\n self._num_events += 1\n\n return events_lst\n\n def get_results_of_events(self):\n \"\"\"\n This method returns result of the chosen event.\n \"\"\"\n results = {\"win\": 0, \"lose\": 0, \"draw\": 0}\n\n for event in self._events:\n results[event.get_result(self._team_id)] += 1\n\n return results\n\n def get_schemas_info(self):\n \"\"\"\n This method returns information about schemas which team uses.\n \"\"\"\n schemes = {}\n for event in self._events:\n scheme = event.get_scheme(self._team_id)\n\n if len(scheme) > 0:\n if scheme not in schemes:\n schemes[scheme] = {\"win\": 0, \"lose\": 0, \"draw\": 0}\n\n schemes[scheme][event.get_result(self._team_id)] += 1\n\n return schemes\n\n def get_ball_possesion_info(self):\n \"\"\"\n This method returns teams ball possesion.\n \"\"\"\n ball_possesion_info = {'0-29': {\"win\": 0, \"lose\": 0, \"draw\": 0},\n '30-39': {\"win\": 0, \"lose\": 0, \"draw\": 0},\n '40-49': {\"win\": 0, \"lose\": 0, \"draw\": 0},\n '50-59': {\"win\": 0, \"lose\": 0, \"draw\": 0},\n '60-69': {\"win\": 0, \"lose\": 0, \"draw\": 0},\n '70-100': {\"win\": 0, \"lose\": 0, \"draw\": 0}}\n\n for event in self._events:\n ball_possesion = event.get_ball_possesion(self._team_id)\n\n if ball_possesion:\n if 0 <= ball_possesion < 30:\n range_possesion = '0-29'\n elif 30 <= ball_possesion < 40:\n range_possesion = '30-39'\n elif 40 <= ball_possesion < 49:\n range_possesion = '40-49'\n elif 50 <= ball_possesion < 59:\n range_possesion = '50-59'\n elif 60 <= ball_possesion < 69:\n range_possesion = '60-69'\n else:\n range_possesion = '70-100'\n\n ball_possesion_info[range_possesion][event.get_result(\n self._team_id)] += 1\n\n return ball_possesion_info\n\n def get_fouls_info(self):\n \"\"\"\n This method returns information about fouls in a game.\n \"\"\"\n fouls_info = {}\n\n for event in self._events:\n fouls = event.get_fouls(self._team_id)\n\n if fouls:\n if fouls not in fouls_info:\n fouls_info[fouls] = {\"win\": 0, \"lose\": 0, \"draw\": 0}\n\n fouls_info[fouls][event.get_result(\n self._team_id)] += 1\n\n return fouls_info\n\n def get_shots_info(self):\n \"\"\"\n This method retruns information about shots in the game.\n \"\"\"\n shots_info = {}\n\n for event in self._events:\n shots = event.get_shots(self._team_id)\n\n if shots:\n if shots not in shots_info:\n shots_info[shots] = {\"win\": 0, \"lose\": 0, \"draw\": 0}\n\n shots_info[shots][event.get_result(\n self._team_id)] += 1\n\n return shots_info\n\n def analyze_schemas(self):\n \"\"\"\n This method returns analysis of the schemas displayed in graphic.\n \"\"\"\n schemes = self.get_schemas_info()\n\n labels1, win_count, lose_count, draw_count = [], [], [], []\n for key, value in schemes.items():\n labels1.append(key)\n win_count.append(value[\"win\"])\n lose_count.append(value[\"lose\"])\n draw_count.append(value[\"draw\"])\n\n win_count = numpy.array(win_count)\n lose_count = numpy.array(lose_count)\n draw_count = numpy.array(draw_count)\n\n _, ax = plt.subplots()\n\n ax.bar(labels1, lose_count, 0.5, label='Count of lose shemes')\n ax.bar(labels1, draw_count, 0.5,\n label='Count of draw shemes', bottom=lose_count)\n ax.bar(labels1, win_count, 0.5, label='Count of win schemes',\n bottom=lose_count + draw_count)\n\n ax.set_ylabel('Number of games')\n ax.set_title(\"Analysis of shemes' efficiency\")\n ax.legend()\n\n plt.show()\n","sub_path":"team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":5070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"612257852","text":"import pandas as pd\nimport numpy as np\nimport datetime\nimport pandas_datareader as pdr\nimport quandl\nimport csv\n# from .env import quandl_API\n\n\n# Pulling data from Quandl API \nquandl.ApiConfig.api_key='1mEBe1BeVaAExprr7akA'\ndf_ism = pd.read_csv('https://www.quandl.com/api/v3/datasets/ISM/MAN_PMI.csv?api_key=1mEBe1BeVaAExprr7akA', index_col=['Date'])\ndf_ism = df_ism.iloc[::-1]\n\n\n\n# Setting up the Start and End time \n# series_code = ['CURRCIR', 'DGS10']\ndata_source = 'fred'\n\nstart = datetime.datetime (1990, 1, 1) # (2005, 5, 1)\nend = datetime.datetime (2030, 12, 1)\n\n\n\"\"\"\nReading in the data from FRED\n\"\"\"\n# dfs = [copper, iron_steel, chemical_man, con_mach_eq]\nnan_value = 0\n# commodities = pd.concat(dfs, join='outer', axis=1).dropna()\n\ndf_m2 = pdr.DataReader('CURRCIR', data_source, start, end)\ndf_m2 = pd.DataFrame(df_m2).fillna(nan_value)\ndf_yr10 = pdr.DataReader('DGS10', data_source, start, end)\ndf_yr10 = pd.DataFrame(df_yr10).fillna(nan_value)\ndf_vix = pdr.DataReader('VIXCLS', data_source, start, end)\ndf_vix = pd.DataFrame(df_vix).fillna(nan_value)\ndf_headline_cpi = pdr.DataReader('CPIAUCSL', data_source, start, end)\ndf_headline_cpi = pd.DataFrame(df_headline_cpi).fillna(nan_value)\ndf_core_cpi = pdr.DataReader('CPILFESL', data_source, start, end)\ndf_core_cpi = pd.DataFrame(df_core_cpi).fillna(nan_value)\n\n\n\n# df_qoq = pd.merge(df_gdp, df_headline_cpi, left_index=True, right_index=True)\n# df_qoq = pd.merge(df_qoq, df_core_cpi, left_index=True, right_index=True)\n# df_qoq.columns = ['GDP', 'CPI', 'Core_CPI']\n# print(df_qoq.head())\n# print(df_qoq.tail(10))\n\n\n\"\"\"\nMerging all the dataframes and renaming \nthe columns MoM with some QoQ (for the most part)\n\"\"\"\ndf_mom = pd.merge(df_m2, df_yr10, on='DATE', how='inner') # using `merge` REMOVES all NaN values\ndf_mom = pd.merge(df_mom, df_ism, left_index=True, right_index=True)\ndf_mom= pd.merge(df_mom, df_vix, left_index=True, right_index=True)\ndf_mom = pd.merge(df_mom, df_headline_cpi, left_index=True, right_index=True)\ndf_mom = pd.merge(df_mom, df_core_cpi, left_index=True, right_index=True)\n\n# df_mom = df_mom.join(df_gdp) # using `join` will KEEP all NaN values\n# df_mom = df_mom.join(df_m2_vel)\n\ndf_mom.columns = ['M2_supply', 'US10yr_rate', 'ISM_PMI', 'VIX', 'Headline_CPI', 'Core_CPI'] # , 'Real_GDP', 'M2_velocity'\n\ndf_mom['ISM_lagged18'] = df_mom['ISM_PMI'].shift(18)\ndf_mom['US10_pct_yoy'] = df_mom['US10yr_rate'].pct_change(12)\ndf_mom['Core_pct_yoy_INV'] = df_mom['Core_CPI'].pct_change(12) \n\n# print(df_mom.shape)\n# print(df_mom.head())\n# print(df_mom.tail())\n# print(df_mom.corr())\n\nfedfunds = pdr.DataReader('FEDFUNDS', data_source, start, end)\nfedfunds = pd.DataFrame(fedfunds).fillna(nan_value)\n\npce = pdr.DataReader('PCE', data_source, start, end)\npce = pd.DataFrame(pce).fillna(nan_value)\n\n\n\n\nif __name__ == \"__main__\":\n print(df_mom.tail(10))\n # print(fedfunds.head(2))\n # print(fedfunds.tail(10))\n print(pce.head())\n print(pce.tail(6))\n # print(df_qoq.tail())\n # print(df_mom.shape)\n # print(df_mom.isnull().sum())\n # print(df_mom.corr())","sub_path":"fred/mom_data.py","file_name":"mom_data.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"11418961","text":"\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\n\nf = open(\"EQ_Volatility_2016-01-14_0300_LON.csv\",\"r\")\n\nmat, stk, vol = [], [], []\nfor x in f:\n data = x.split(\",\")\n data[-1] = data[-1].strip(\"\\n\")\n if data[1]==\"KOSPI 200\":\n today = datetime.datetime.strptime(data[0],\"%Y-%m-%d\")\n matDate = datetime.datetime.strptime(data[7],\"%Y-%m-%d\")\n stk.append(int(data[8]))\n mat.append((matDate-today).days/365.0)\n vol.append(float(data[-1]))\n\nmat = np.array(mat)\nstk = np.array(stk)\nvol = np.array(vol)\nmat.shape, stk.shape, vol.shape = (14, 11), (14, 11), (14, 11)\n\nfig = plt.figure(1)\nax = fig.add_subplot(111, projection='3d')\nax.plot_surface(stk, mat, vol, linewidth=1, rstride=1, cstride=1, cmap=cm.coolwarm)\n\nf.close()","sub_path":"volsurface_3dplot/volsurface.py","file_name":"volsurface.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"491677574","text":"###############################################################################\n#\n# Copyright 2011-2012 Pants Developers (see AUTHORS.txt)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n###############################################################################\n\"\"\"\nStreaming server channel.\n\"\"\"\n\n###############################################################################\n# Imports\n###############################################################################\n\nimport socket\nimport ssl\nimport weakref\n\nfrom pants._channel import _Channel, HAS_IPV6\nfrom pants.stream import Stream\n\n\n###############################################################################\n# Logging\n###############################################################################\n\nimport logging\nlog = logging.getLogger(\"pants\")\n\n\n###############################################################################\n# Server Class\n###############################################################################\n\nclass Server(_Channel):\n \"\"\"\n A stream-oriented server channel.\n\n A :class:`~pants.stream.Server` instance represents a local server\n capable of listening for connections from remote hosts over a\n connection-oriented protocol such as TCP/IP.\n\n ================= ================================================\n Keyword Argument Description\n ================= ================================================\n engine *Optional.* The engine to which the channel\n should be added. Defaults to the global engine.\n socket *Optional.* A pre-existing socket to wrap. This\n can be a regular :obj:`~socket.socket` or an\n :obj:`~ssl.SSLSocket`. If a socket is not\n provided, a new socket will be created for the\n channel when required.\n ssl_options *Optional.* If provided,\n :meth:`~pants.stream.Server.startSSL` will be\n called with these options once the server is\n ready. By default, SSL will not be enabled.\n ================= ================================================\n \"\"\"\n ConnectionClass = Stream\n\n def __init__(self, ConnectionClass=None, **kwargs):\n sock = kwargs.get(\"socket\", None)\n if sock and sock.type != socket.SOCK_STREAM:\n raise TypeError(\"Cannot create a %s with a socket type other than SOCK_STREAM.\"\n % self.__class__.__name__)\n\n _Channel.__init__(self, **kwargs)\n\n # Socket\n self._remote_address = None\n self._local_address = None\n\n self._slave = None\n\n # Channel state\n self.listening = False\n\n # SSL state\n self.ssl_enabled = False\n self._ssl_options = None\n if kwargs.get(\"ssl_options\", None) is not None:\n self.startSSL(kwargs[\"ssl_options\"])\n\n # Connection class\n if ConnectionClass is not None:\n self.ConnectionClass = ConnectionClass\n self.channels = weakref.WeakValueDictionary()\n\n ##### Properties ##########################################################\n\n @property\n def remote_address(self):\n \"\"\"\n \"\"\"\n return self._remote_address or self._socket.getpeername()\n\n @remote_address.setter\n def remote_address(self, val):\n self._remote_address = val\n\n @property\n def local_address(self):\n \"\"\"\n \"\"\"\n return self._local_address or self._socket.getsockname()\n\n @local_address.setter\n def local_address(self, val):\n self._local_address = val\n\n ##### Control Methods #####################################################\n\n def startSSL(self, ssl_options={}):\n \"\"\"\n Enable SSL on the channel.\n\n Enabling SSL on a server channel will cause any new connections\n accepted by the server to be automatically wrapped in an SSL\n context before being passed to\n :meth:`~pants.stream.Server.on_accept`. If an error occurs while\n a new connection is being wrapped,\n :meth:`~pants.stream.Server.on_ssl_wrap_error` is called.\n\n SSL is enabled immediately. Typically, this method is called\n before :meth:`~pants.stream.Server.listen`. If it is called\n afterwards, any connections made in the meantime will not have\n been wrapped in SSL contexts.\n\n The SSL options argument will be passed through to each\n invocation of :func:`ssl.wrap_socket` as keyword arguments - see\n the :mod:`ssl` documentation for further information. You will\n typically want to provide the ``keyfile``, ``certfile`` and\n ``ca_certs`` options. The ``do_handshake_on_connect`` option\n **must** be ``False`` and the ``server_side`` option **must** be\n true, or a :exc:`ValueError` will be raised.\n\n Attempting to enable SSL on a closed channel or a channel that\n already has SSL enabled on it will raise a :exc:`RuntimeError`.\n\n Returns the channel.\n\n ============ ===================================================\n Arguments Description\n ============ ===================================================\n ssl_options *Optional.* Keyword arguments to pass to\n :func:`ssl.wrap_socket`.\n ============ ===================================================\n \"\"\"\n if self.ssl_enabled:\n raise RuntimeError(\"startSSL() called on SSL-enabled %r.\" % self)\n\n if self._closed:\n raise RuntimeError(\"startSSL() called on closed %r.\" % self)\n\n if ssl_options.setdefault(\"server_side\", True) is not True:\n raise ValueError(\"SSL option 'server_side' must be True.\")\n\n if ssl_options.setdefault(\"do_handshake_on_connect\", False) is not False:\n raise ValueError(\"SSL option 'do_handshake_on_connect' must be False.\")\n\n self.ssl_enabled = True\n self._ssl_options = ssl_options\n\n return self\n\n def listen(self, address, backlog=1024, slave=True):\n \"\"\"\n Begin listening for connections made to the channel.\n\n The given ``address`` is resolved, the channel is bound to the\n address and then begins listening for connections. Once the\n channel has begun listening,\n :meth:`~pants.stream.Server.on_listen` will be called.\n\n Addresses can be represented in a number of different ways. A\n single string is treated as a UNIX address. A single integer is\n treated as a port and converted to a 2-tuple of the form\n ``('', port)``. A 2-tuple is treated as an IPv4 address and a\n 4-tuple is treated as an IPv6 address. See the :mod:`socket`\n documentation for further information on socket addresses.\n\n If no socket exists on the channel, one will be created with a\n socket family appropriate for the given address.\n\n An error will occur if the given address is not of a valid\n format or of an inappropriate format for the socket (e.g. if an\n IP address is given to a UNIX socket).\n\n Calling :meth:`listen()` on a closed channel or a channel that\n is already listening will raise a :exc:`RuntimeError`.\n\n Returns the channel.\n\n =============== ================================================\n Arguments Description\n =============== ================================================\n address The local address to listen for connections on.\n backlog *Optional.* The maximum size of the\n connection queue.\n slave *Optional.* If True, this will cause a\n Server listening on IPv6 INADDR_ANY to\n create a slave Server that listens on the\n IPv4 INADDR_ANY.\n =============== ================================================\n \"\"\"\n if self.listening:\n raise RuntimeError(\"listen() called on active %r.\" % self)\n\n if self._closed:\n raise RuntimeError(\"listen() called on closed %r.\" % self)\n\n address, family = self._format_address(address)\n self._do_listen(address, family, backlog, slave)\n\n return self\n\n def close(self):\n \"\"\"\n Close the channel.\n\n The channel will be closed immediately and will cease to accept\n new connections. Any connections accepted by this channel will\n remain open and will need to be closed separately. If this\n channel has an IPv4 slave (see\n :meth:`~pants.stream.Server.listen`) it will be closed.\n\n Once closed, a channel cannot be re-opened.\n \"\"\"\n if self._closed:\n return\n\n self.listening = False\n\n self.ssl_enabled = False\n\n if self._slave:\n self._slave.close()\n\n _Channel.close(self)\n\n ##### Public Event Handlers ###############################################\n\n def on_accept(self, socket, addr):\n \"\"\"\n Called after the channel has accepted a new connection.\n\n Create a new instance of\n :attr:`~pants.basic.Server.ConnectonClass` to wrap the socket\n and add it to the server.\n\n ========= ============\n Argument Description\n ========= ============\n sock The newly connected socket object.\n addr The new socket's address.\n ========= ============\n \"\"\"\n connection = self.ConnectionClass(engine=self.engine, socket=socket)\n connection.server = self\n self.channels[connection.fileno] = connection\n connection._handle_connect_event()\n\n def on_close(self):\n \"\"\"\n Called after the channel has finished closing.\n\n Close all active connections to the server.\n \"\"\"\n for channel in self.channels.values():\n channel.close(flush=False)\n\n ##### Public Error Handlers ###############################################\n\n def on_ssl_wrap_error(self, sock, addr, exception):\n \"\"\"\n Placeholder. Called when an error occurs while wrapping a new\n connection with an SSL context.\n\n By default, logs the exception and closes the new connection.\n\n ========== ============\n Argument Description\n ========== ============\n sock The newly connected socket object.\n addr The new socket's address.\n exception The exception that was raised.\n ========== ============\n \"\"\"\n log.exception(exception)\n try:\n sock.close()\n except socket.error:\n pass\n\n ##### Internal Methods ####################################################\n\n def _do_listen(self, addr, family, backlog, slave):\n \"\"\"\n A callback method to be used with\n :meth:`~pants._channel._Channel._resolve_addr` - either listens\n immediately or notifies the user of an error.\n\n ========= =====================================================\n Argument Description\n ========= =====================================================\n backlog The maximum size of the connection queue.\n slave If True, this will cause a Server listening on\n IPv6 INADDR_ANY to create a slave Server that\n listens on the IPv4 INADDR_ANY.\n addr The address to listen on or None if address\n resolution failed.\n family The detected socket family or None if address\n resolution failed.\n error *Optional.* Error information or None if no error\n occured.\n ========= =====================================================\n \"\"\"\n if self._socket:\n if self._socket.family != family:\n self.engine.remove_channel(self)\n self._socket_close()\n self._closed = False\n\n sock = socket.socket(family, socket.SOCK_STREAM)\n self._socket_set(sock)\n self.engine.add_channel(self)\n\n try:\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n except AttributeError:\n pass\n\n if hasattr(socket, \"IPPROTO_IPV6\") and hasattr(socket, \"IPV6_V6ONLY\")\\\n and family == socket.AF_INET6:\n self._socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)\n slave = False\n\n try:\n self._socket_bind(addr)\n self._socket_listen(backlog)\n except socket.error as err:\n self.close()\n raise\n\n self.listening = True\n self._safely_call(self.on_listen)\n\n if slave and not isinstance(addr, str) and addr[0] == '' and HAS_IPV6:\n # Silently fail if we can't make a slave.\n try:\n self._slave = _SlaveServer(self.engine, self, addr, backlog)\n except Exception:\n self._slave = None\n\n ##### Internal Event Handler Methods ######################################\n\n def _handle_read_event(self):\n \"\"\"\n Handle a read event raised on the channel.\n \"\"\"\n while True:\n try:\n sock, addr = self._socket_accept()\n except socket.error:\n log.exception(\"Exception raised by accept() on %r.\" % self)\n try:\n sock.close()\n except socket.error:\n pass\n return\n\n if sock is None:\n return\n\n if self.ssl_enabled:\n try:\n sock.setblocking(False)\n sock = ssl.wrap_socket(sock, **self._ssl_options)\n except ssl.SSLError as e:\n self._safely_call(self.on_ssl_wrap_error, sock, addr, e)\n continue\n\n self._safely_call(self.on_accept, sock, addr)\n\n def _handle_write_event(self):\n \"\"\"\n Handle a write event raised on the channel.\n \"\"\"\n log.warning(\"Received write event for %r.\" % self)\n\n\n###############################################################################\n# _SlaveServer Class\n###############################################################################\n\nclass _SlaveServer(Server):\n \"\"\"\n A slave for a StreamServer to allow listening on multiple address\n familes.\n \"\"\"\n def __init__(self, engine, server, addr, backlog):\n Server.__init__(self, engine=engine)\n self.server = server\n\n # Now, listen our way.\n if server._socket.family == socket.AF_INET6:\n family = socket.AF_INET\n else:\n family = socket.AF_INET6\n\n sock = socket.socket(family, socket.SOCK_STREAM)\n self._socket_set(sock)\n self.engine.add_channel(self)\n\n try:\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n except AttributeError:\n pass\n\n try:\n self._socket_bind(addr)\n self._socket_listen(backlog)\n except socket.error as err:\n self.close()\n raise\n\n self._remote_address = None\n self._local_address = None\n\n self.listening = True\n\n self.on_accept = self.server.on_accept\n\n def on_close(self):\n if self.server._slave == self:\n self.server._slave = None\n","sub_path":"pants/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":16197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"11787584","text":"from django.urls import path, include\nfrom .import views\nfrom rest_framework import routers\n\nrouter = routers.DefaultRouter()\nrouter.register('employee', views.EmployeeView),\nrouter.register('department', views.DepartmentView),\nrouter.register('category', views.CategoryView)\nurlpatterns = [\n path('', include(router.urls))\n]\n","sub_path":"myrestproject/rest/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"78634790","text":"# encoding=utf-8\n\"\"\"\n@Time : 2019/10/18 16:35\n@Author : LiuYanZhe\n@File : BP_lyz.py\n@Software: PyCharm\n@Description: BP神经网络的Python代码实现\nsigmoid函数会将输出值固定在-1到1间,因此需要对原始数据归一化处理(将值固定在0-1间)\n一层隐含层的三层神经网络\n\"\"\"\nimport numpy\nimport random\nimport logging\nimport time\n\n'''日志设置'''\nlogging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')\n\n# 禁用日志\n# logging.disable()\n'''全局变量'''\n# 记录y的最大最小值,反归一化时用\nY_MAX = 0 # 一层时\nY_MIN = 0\nY_MAX_LIST = [] # 两层时\nY_MIN_LIST = []\n# 定义各层节点数\nINPUT_NUM = 18\nHIDDEN_NUM = 37\nOUTPUT_NUM = 3\n# INPUT_NUM = 2\n# HIDDEN_NUM = 10\n# OUTPUT_NUM = 1\nFLAG = 0\n# 存储全局误差(存储加权后的,为了和未改进的作比较)\nlist_E = []\n# 存储LearnRate\nlist_LearnRate = []\n# 各个朝代训练数量\nqing = 29\nming = 50\nyuan = 83\n# 各个朝代训练数量\nqing_test = 11\nming_test = 18\nyuan_test = 31\n# 正确输出项扩大倍数\nRIGHTRATE = 2.5\n\n\n# 生成a-b间的随机数方法\ndef randNum(a, b):\n return random.random() * (b - a) + a\n\n\n# 归一化X,两层,按列归一化\ndef normalX(x_list, a, b):\n x_arr = numpy.array(x_list) # 列表转化为矩阵方便找最小值\n max_list = x_arr.max(0)\n min_list = x_arr.min(0)\n # logging.debug('归一化X:'+str(max_list))\n for i in range(len(x_list)):\n for j in range(len(x_list[i])):\n x_list[i][j] = a * (x_list[i][j] - min_list[j]) / (max_list[j] - min_list[j]) + b\n return x_list\n\n\n# 归一化y,两层,按列归一化\ndef normalDY(y_list, a, b):\n y_arr = numpy.array(y_list) # 列表转矩阵\n max_list = y_arr.max(0)\n min_list = y_arr.min(0)\n global Y_MAX_LIST, Y_MIN_LIST\n Y_MAX_LIST = max_list # 存储最大最小值\n Y_MIN_LIST = min_list\n for i in range(len(y_list)):\n for j in range(len(y_list[i])):\n y_list[i][j] = a * (y_list[i][j] - min_list[j]) / (max_list[j] - min_list[j]) + b\n return y_list\n\n\n# 反归一化y,两层\ndef normalDY_F(y_out_list, a, b):\n for i in range(len(y_out_list)):\n for j in range(len(y_out_list[i])):\n y_out_list[i][j] = (y_out_list[i][j] - b) * (Y_MAX_LIST[j] - Y_MIN_LIST[j]) / a + Y_MIN_LIST[j]\n return y_out_list\n\n\n# 归一化Y,一层\ndef normalY(y_list, a, b):\n max_y = max(y_list)\n min_y = min(y_list)\n global Y_MIN, Y_MAX\n Y_MIN = min_y\n Y_MAX = max_y\n for i in range(len(y_list)):\n y_list[i] = a * (y_list[i] - min_y) / (max_y - min_y) + b\n return y_list\n\n\n# 反归一化Y\ndef normalY_F(y_out_list, a, b):\n y_true_list = []\n for i in range(len(y_out_list)):\n temp = (y_out_list[i] - b) * (Y_MAX - Y_MIN) / a + Y_MIN\n y_true_list.append(temp)\n return y_true_list\n\n\n# 加载数据方法\ndef loadData():\n # input_list = [[1, 2], [2, 3], [3, 6], [4, 1], [6, 2]]\n # output_list = [3, 5, 9, 5, 8]\n # input_list = [[0.1, 0.2], [0.2, 0.3], [0.3, 0.6], [0.4, 0.1], [0.6, 0.2]]\n # output_list = [0.3, 0.5, 0.9, 0.5, 0.8]\n input_list = numpy.loadtxt('data_input.txt').tolist()\n output_list = numpy.loadtxt('data_output.txt').tolist()\n # 归一化\n input_list = normalX(input_list, 1, 0)\n # output_list = normalY(output_list, 1, 0)\n return input_list, output_list\n\n\n# 激励函数\ndef incentiveFun(a, x):\n if a == 1:\n return 1 / (1 + numpy.exp(-x))\n\n\n# 激励函数的导数\ndef incentiveFunD(a, y):\n if a == 1:\n return y * (1 - y)\n\n\nclass BpNet:\n def __init__(self, iNum, hNum, oNum, flag=0): # flag为随机赋值或外部赋值\n # logging.debug('---init---')\n # 设定激励函数类型\n self.methodType = 1\n # 初始化,iNum,hNum,oNum分别为输入层、隐含层、输出层个数\n self.inputNum = iNum\n self.hiddenNum = hNum\n self.outputNum = oNum\n # 定义各层节点,列表存储节点值(激活)\n self.input_list = [0.0] * iNum\n self.hidden_list = [0.0] * hNum\n self.output_list = [0.0] * oNum\n # 定义阈值\n self.hidden_threshold_list = [0.0] * hNum\n self.output_threshold_list = [0.0] * oNum\n if flag == 0: # 若flag=0,随机赋值,否则单独调用set方法赋值\n # 为阈值随机赋值\n # print('阈值随机赋值')\n for i in range(self.hiddenNum):\n self.hidden_threshold_list[i] = randNum(-1.0, 1.0)\n for i in range(self.outputNum):\n self.output_threshold_list[i] = randNum(-1.0, 1.0)\n # 建立权值矩阵(使用NUmpy生成矩阵,转换为二维列表存储)\n # 从输入层到隐藏层的权值矩阵\n self.inHid_weight = numpy.ones((self.inputNum, self.hiddenNum)).tolist()\n # 从隐藏层到输出层的权值矩阵\n self.outhid_weight = numpy.ones((self.hiddenNum, self.outputNum)).tolist()\n if flag == 0:\n # print('权值随机赋值')\n # 为权值矩阵赋随机值\n for i in range(self.inputNum):\n for j in range(self.hiddenNum):\n self.inHid_weight[i][j] = randNum(-1.0, 1.0)\n for i in range(self.hiddenNum):\n for j in range(self.outputNum):\n self.outhid_weight[i][j] = randNum(-1.0, 1.0)\n # logging.debug('input_list:' + str(self.input_list))\n # logging.debug('hidden_list:' + str(self.hidden_list))\n # logging.debug('output_list:' + str(self.output_list))\n # logging.debug('inHid_weight:' + str(self.inHid_weight))\n # logging.debug('outhid_weight:' + str(self.outhid_weight))\n\n # 设置阈值与权值\n def setweightANDthreshold(self, output_threshold_list, hidden_threshold_list, outhid_weight, inHid_weight):\n # print('设定阈值与权值')\n self.hidden_threshold_list = hidden_threshold_list\n self.output_threshold_list = output_threshold_list\n self.inHid_weight = inHid_weight\n self.outhid_weight = outhid_weight\n\n '''正向传播'''\n\n # 通过公式计算神经网络的输出\n def update(self, data_list):\n # logging.debug('---update---')\n # 为输入层赋值(激活输入层)\n for i in range(self.inputNum):\n self.input_list[i] = data_list[i]\n # logging.debug('input_list:' + str(self.input_list))\n # 计算隐藏层的输出(激活隐藏层)\n for i in range(self.hiddenNum):\n hi = 0.0\n for j in range(self.inputNum):\n hi = hi + self.inHid_weight[j][i] * self.input_list[j]\n self.hidden_list[i] = incentiveFun(self.methodType,\n hi - self.hidden_threshold_list[i]) # 隐含层节点保存的是隐含层处理后的输出ho,计算输出层时要用\n # 计算输出层的输出(即网络的输出)(激活输出层)\n for i in range(self.outputNum):\n yi = 0.0\n for j in range(self.hiddenNum):\n yi = yi + self.outhid_weight[j][i] * self.hidden_list[j]\n self.output_list[i] = incentiveFun(self.methodType, yi - self.output_threshold_list[i]) # 输出层节点保存的是输出层的输出yo\n # logging.debug('hidden_list:' + str(self.hidden_list))\n # logging.debug('output_list:' + str(self.output_list))\n return self.output_list\n\n '''反向传播(修正权值)'''\n\n def backPropagate(self, rightOut_list, learnRate, flag=-1): # 参数rightOut_list指 期望输出,learnRate指学习率\n # logging.debug('---backPropagate---')\n '''计算误差'''\n # 输出层误差\n outErr_list = [0.0] * self.outputNum\n for i in range(self.outputNum):\n # logging.debug('rightOut_list:'+str(rightOut_list))\n # logging.debug('self.output_list:'+str(self.output_list))\n temp = rightOut_list[i] - self.output_list[i]\n # print('第', i, '个:', rightOut_list[i])\n if i == flag:\n # print(temp)\n temp = temp * RIGHTRATE\n # print(temp)\n # print('加倍')\n outErr_list[i] = temp * incentiveFunD(self.methodType, self.output_list[i])\n\n # 隐藏层误差\n hidErr_list = [0.0] * self.hiddenNum\n for i in range(self.hiddenNum):\n hid_err = 0.0\n for j in range(self.outputNum):\n hid_err = hid_err + outErr_list[j] * self.outhid_weight[i][j]\n hidErr_list[i] = hid_err * incentiveFunD(self.methodType, self.hidden_list[i])\n '''更新权重'''\n # 更新隐含层到输出层权重\n for i in range(self.hiddenNum):\n for j in range(self.outputNum):\n self.outhid_weight[i][j] = self.outhid_weight[i][j] + learnRate * outErr_list[j] * self.hidden_list[i]\n # 更新输入层到隐含层权重\n for i in range(self.inputNum):\n for j in range(self.hiddenNum):\n self.inHid_weight[i][j] = self.inHid_weight[i][j] + learnRate * hidErr_list[j] * self.input_list[i]\n '''更新阈值'''\n # 更新隐藏层阈值\n for i in range(self.hiddenNum):\n self.hidden_threshold_list[i] = self.hidden_threshold_list[i] - learnRate * hidErr_list[i]\n # 更新输出层阈值\n for i in range(self.outputNum):\n self.output_threshold_list[i] = self.output_threshold_list[i] - learnRate * outErr_list[i]\n # 用误差函数计算误差e\n err = 0.0\n err_true = 0.0\n for i in range(self.outputNum):\n err_true = err_true + (rightOut_list[i] - self.output_list[i]) ** 2\n e = rightOut_list[i] - self.output_list[i]\n if i == flag:\n e = e * RIGHTRATE\n err = err + e ** 2\n e = err / 2\n e_true = err_true / 2\n # logging.debug('outhid_weight:' + str(self.outhid_weight))\n # logging.debug('inHid_weight:' + str(self.inHid_weight))\n # logging.debug('误差:' + str(self.inHid_weight))\n # print('输入', self.input_list, '期望输出:', str(rightOut_list), '实际输出:', self.output_list)\n # print('期望输出:', str(rightOut_list), '实际输出:', self.output_list)\n return e, e_true\n\n # 训练函数\n def train(self, x_list, y_list, max_iter=1000, min_E=0.00028, learnRate=0.1):\n # 存储原始学习率\n # learnRate_old=learnRate\n for i in range(max_iter):\n e = 0.0\n e_true = 0.0\n # 指数法动态更改学习率\n # learnRate=learnRate*numpy.exp(-0.01*i)\n # if i%10==0:\n # # 多项式法动态更改学习率\n # learnRate=learnRate*(1+0.01*i)**(-0.05)\n # list_LearnRate.append(learnRate)\n if i % 400 == 0:\n learnRate = learnRate * 0.5 # 训练次数每隔1000次,学习率减半(翻倍)\n for j in range(len(x_list)):\n # 正向传播\n self.update(x_list[j])\n if j < qing:\n flag = 0\n elif j >= qing and j < qing + ming:\n flag = 1\n elif j >= qing + ming and j < qing + ming + yuan:\n flag = 2\n else:\n flag = -1\n # 反向更新,统计误差\n if OUTPUT_NUM == 1: # 若输出节点为1,是一维列表,需变成2维\n y_temp = []\n y_temp.append(y_list[j])\n temp_e, temp_e_true = self.backPropagate(y_temp, learnRate, flag=flag)\n e = e + temp_e\n e_true = e_true + temp_e_true\n else:\n temp_e, temp_e_true = self.backPropagate(y_list[j], learnRate, flag=flag)\n e = e + temp_e\n e_true = e_true + temp_e_true\n # 计算全局误差\n E = e / len(y_list)\n E_true = e_true / len(y_list)\n # list_E.append(E_true)\n list_E.append(E)\n # logging.debug('全局误差:' + str(E))\n # print('隐藏层阈值:', self.hidden_threshold_list, '\\n输出层阈值:', self.output_threshold_list)\n if i % 100 == 0: # 每100次存储并输出一次E\n if FLAG == 0:\n # print('第 ', i, ' 次训练,全局误差:', str(E))\n logging.debug('第 ' + str(i) + ' 次训练,全局误差:', str(E))\n # print('全局误差:', str(E))\n # 全局误差达到预设精度,结束算法\n if E <= min_E:\n break\n return E\n\n # 预测函数\n def pre(self, x_list, y_list):\n list_pre = [] # 存储预测值\n list_judge = [] # 存储比对结果\n list_rate = [] # 存储比例,依次为[正确比例,清朝正确比例,明朝正确比例,元朝正确比例]\n right_count = 0 # 存储正确数量\n right_qing_count = 0 # 存储清朝正确的数量\n right_ming_count = 0 # 存储明朝正确的数量\n right_yuan_count = 0 # 存储元朝正确的数量\n for i in range(len(x_list)):\n y_net = self.update(x_list[i])\n # y_net = normalY_F(y_net, 1, 0) # 反归一化\n y_right = y_list[i]\n # print('预测值:', str(y_net), '真实值:', str(y_right))\n logging.debug('预测值:' + str(y_net) + '真实值:' + str(y_right))\n list_pre.append(y_net.copy())\n # 如果预测正确\n if y_net.index(max(y_net)) == y_right.index(max(y_right)):\n list_judge.append(1)\n right_count += 1\n if i < qing_test: # 清朝的数据\n right_qing_count += 1\n elif i >= qing_test and i < ming_test + qing_test:\n right_ming_count += 1\n elif i >= ming_test + qing_test and i < yuan_test + ming_test + qing_test:\n right_yuan_count += 1\n else:\n list_judge.append(0)\n # 计算比例\n allrate = right_count / (qing_test + ming_test + yuan_test)\n qingrate = right_qing_count / qing_test\n mingrate = right_ming_count / ming_test\n yuanrate = right_yuan_count / yuan_test\n list_rate.append(allrate)\n list_rate.append(qingrate)\n list_rate.append(mingrate)\n list_rate.append(yuanrate)\n list_count = [right_qing_count + right_ming_count + right_yuan_count, right_qing_count, right_ming_count,\n right_yuan_count]\n # print('right_qing_count',right_qing_count)\n # print('right_ming_count',right_ming_count)\n # print('right_yuan_count',right_yuan_count)\n # print('judge:',list_judge)\n return list_pre, y_list, list_judge, list_rate, list_count\n\n\ndef main(name='_main', max_iter=2000, learnRate=0.05):\n startTime = time.time()\n # 实例Bp对象\n bp = BpNet(INPUT_NUM, HIDDEN_NUM, OUTPUT_NUM)\n # 加载数据\n x_list, y_list = loadData()\n # logging.debug('y_list:'+str(y_list))\n # 训练网络\n bp.train(x_list, y_list, max_iter=max_iter, learnRate=learnRate)\n numpy.savetxt('神经网络每百次训练全局误差' + name, list_E)\n # 预测\n # a = [[0.2, 0.1], [0.5, 0.4], [0.7, 0.1], [0.1, 0.1]]\n # b = [0.3, 0.9, 0.8, 0.2]\n # a = [[2, 1], [5, 4], [7, 1]]\n # b = [3, 9, 8]\n a = numpy.loadtxt('test_input.txt').tolist()\n b = numpy.loadtxt('test_output.txt').tolist()\n a = normalX(a, 1, 0) # 归一化x\n list_pre, y_list, list_judge, list_rate, list_count = bp.pre(a, b)\n numpy.savetxt('BP_output_threshold_list.txt', bp.output_threshold_list)\n numpy.savetxt('BP_hidden_threshold_list.txt', bp.hidden_threshold_list)\n numpy.savetxt('BP_outhid_weight.txt', bp.outhid_weight)\n numpy.savetxt('BP_inHid_weight.txt', bp.inHid_weight)\n numpy.savetxt('预测结果' + name, list_pre)\n numpy.savetxt('正确结果' + name, y_list)\n numpy.savetxt('结果对比' + name, list_judge)\n numpy.savetxt('正确比例' + name, list_rate)\n print('预测正确数量(全部,清,明,元):', list_count)\n print('预测正确比例(全部,清,明,元):', list_rate)\n endTime = time.time()\n print('运行时间:', endTime - startTime)\n return list_rate[0]\n\n\ndef main_par(output_threshold_list, hidden_threshold_list, outhid_weight, inHid_weight, max_iter=1000, min_E=0.00028,\n learnRate=0.1, name='_蚁群'):\n # startTime = time.time()\n # 标志位,蚁群算法不要在控制框输出信息\n global FLAG\n FLAG = 0\n # 实例Bp对象\n bp = BpNet(INPUT_NUM, HIDDEN_NUM, OUTPUT_NUM, flag=1)\n bp.setweightANDthreshold(output_threshold_list, hidden_threshold_list, outhid_weight, inHid_weight)\n # 加载数据\n x_list, y_list = loadData()\n # logging.debug('y_list:'+str(y_list))\n # 训练网络\n # print('bp.output_threshold_list:', bp.output_threshold_list)\n bp.train(x_list, y_list, max_iter=max_iter, min_E=min_E, learnRate=learnRate)\n numpy.savetxt('神经网络训练每百次全局误差E' + name, list_E)\n # 预测\n a = numpy.loadtxt('test_input.txt').tolist()\n b = numpy.loadtxt('test_output.txt').tolist()\n a = normalX(a, 1, 0) # 归一化x\n list_pre, y_list, list_judge, list_rate, list_count = bp.pre(a, b)\n numpy.savetxt('BP_output_threshold_list.txt', bp.output_threshold_list)\n numpy.savetxt('BP_hidden_threshold_list.txt', bp.hidden_threshold_list)\n numpy.savetxt('BP_outhid_weight.txt', bp.outhid_weight)\n numpy.savetxt('BP_inHid_weight.txt', bp.inHid_weight)\n numpy.savetxt('预测结果' + name, list_pre)\n numpy.savetxt('正确结果' + name, y_list)\n numpy.savetxt('结果对比' + name, list_judge)\n numpy.savetxt('正确比例' + name, list_rate)\n print('预测正确数量(全部,清,明,元):', list_count)\n print('预测正确比例(全部,清,明,元):', list_rate)\n # endTime = time.time()\n # print('BP神经网络运行时间:', endTime - startTime)\n # 返回全部数量以计算画图\n list_all_count = [yuan + ming + qing, qing, ming, yuan]\n return list_rate, list_count, list_all_count, list_pre, y_list, list_E, list_LearnRate\n\n\n# 通过预设权值与阈值,获得BP神经网络的误差(通过预设权值和阈值改进BP神经网络时,用误差作为衡量标准)\n# 参数列表:(隐藏层阈值列表,输出层阈值列表,输入层到隐藏层权值列表,隐藏层到输出层权值列表)\ndef measure(hidden_threshold_list, output_threshold_list, inHid_weight, outhid_weight):\n # 标志位,蚁群算法不要在控制框输出信息\n global FLAG\n FLAG = 1\n # 实例Bp对象\n bp = BpNet(INPUT_NUM, HIDDEN_NUM, OUTPUT_NUM, flag=1)\n # 设置权值、阈值\n bp.setweightANDthreshold(output_threshold_list, hidden_threshold_list, outhid_weight, inHid_weight)\n # bp.hidden_threshold_list = hidden_threshold_list\n # bp.output_threshold_list = output_threshold_list\n # bp.inHid_weight = inHid_weight\n # bp.outhid_weight = outhid_weight\n # 加载数据\n x_list, y_list = loadData()\n # 获得全局误差\n e = 0.0\n for j in range(len(x_list)):\n # 正向传播,获得输出结果\n output_list = bp.update(x_list[j])\n # 统计误\n err = 0.0\n if j < qing:\n flag = 0\n elif j >= qing and j < qing + ming:\n flag = 1\n elif j >= qing + ming and j < qing + ming + yuan:\n flag = 2\n for k in range(bp.outputNum):\n temp = y_list[j][k] - output_list[k]\n if temp == flag:\n temp = temp * RIGHTRATE\n err = err + temp ** 2\n # print(y_list[j][k],':',output_list[k])\n e = e + err / 2\n # 计算全局误差\n E = e / len(y_list)\n\n return E\n\n# main()\n","sub_path":"BP_AS/BP_lyz.py","file_name":"BP_lyz.py","file_ext":"py","file_size_in_byte":20171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"404907020","text":"import json\nimport requests\nimport datetime\nfrom operator import itemgetter\n\n#Authenticate Function\ndef authenticate():\n data = {'username':'', 'password':''}\n headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}\n url = 'https://[REDACTED FOR CONFIDENTIALITY]/api/users/authenticate'\n token = requests.post(url, headers=headers, json=data, verify=False)\n token = token.json()['token']\n tokenqaJWT = 'JWT ' + token\n\n return [data, tokenqaJWT]\n\n#Server Function\ndef get_servers(servername):\n serverinfourl = \"\"\"http://[REDACTED FOR CONFIDENTIALITY]/api/Servers?filter={\"where\":{\"hostname\":\"%s\"}}\"\"\" % servername\n serverinfojson = requests.get(serverinfourl, headers=headerstest, json=data)\n serverinfo = serverinfojson.json()\n serverId = serverinfo[0]['id']\n\n return serverId\n\n#Server Association Function\ndef get_serverAssociations(serverId=None):\n if serverId == None:\n serverAssociationsUrl =\"\"\"http://[REDACTED FOR CONFIDENTIALITY]/api/ServerAssociations/\"\"\"\n serverAssociationJson = requests.get(serverAssociationsUrl, headers=headerstest, json=data)\n serverAssociation = serverAssociationJson.json()\n deploymentId = serverAssociation[0]['deploymentId']\n\n return deploymentId\n\n else:\n serverAssociationsUrl =\"\"\"http://[REDACTED FOR CONFIDENTIALITY]/api/ServerAssociations?filter={\"where\":{\"serverId\":\"%s\"}}\"\"\" % serverId\n serverAssociationJson = requests.get(serverAssociationsUrl, headers=headerstest, json=data)\n serverAssociation = serverAssociationJson.json()\n deploymentId = serverAssociation[0]['deploymentId']\n\n return deploymentId\n\n#Deployment Function\ndef get_deployments(deploymentId):\n dict = {}\n if deploymentId == None:\n deploymentUrl = \"\"\"http://[REDACTED FOR CONFIDENTIALITY]/api/Deployments\"\"\"\n deploymentJson = requests.get(deploymentUrl, headers=headerstest, json=data)\n deployment = deploymentJson.json()\n\n for item in deployment:\n deploymentname = item['name']\n applicationId = item['applicationId']\n \n dict.update({deploymentname : applicationId})\n\n return dict\n\n else:\n deploymentUrl = \"\"\"http://[REDACTED FOR CONFIDENTIALITY]/api/Deployments?filter={\"where\":{\"id\":\"%s\"}}\"\"\" % deploymentId\n deploymentJson = requests.get(deploymentUrl, headers=headerstest, json=data)\n deployment = deploymentJson.json()\n\n for item in deployment:\n deploymentname = item['name']\n deploymentId = item['id']\n applicationId = item['applicationId']\n\n dict.update({deploymentname : applicationId})\n\n return dict\n\n#Application Function\ndef get_applications(applicationId):\n dict = {}\n if applicationId == None:\n applicationUrl = \"\"\"http://[REDACTED FOR CONFIDENTIALITY]/api/Applications/\"\"\"\n applicationJson = requests.get(applicationUrl, headers=headerstest, json=data)\n application = applicationJson.json()\n\n for item in application:\n applicationId = item['id']\n applicationname = item['name']\n familyId = item['familyId']\n \n dict.update({applicationname : familyId})\n\n return list\n\n else:\n applicationUrl = \"\"\"http://[REDACTED FOR CONFIDENTIALITY]/api/Applications?filter={\"where\":{\"id\":\"%s\"}}\"\"\" % applicationId\n applicationJson = requests.get(applicationUrl, headers=headerstest, json=data)\n application = applicationJson.json()\n \n for item in application:\n applicationId = item['id']\n applicationname = item['name']\n familyId = item['familyId']\n \n dict.update({applicationname : familyId})\n\n return dict\n\n#Family Function\ndef get_family(familyId):\n dict = {}\n if familyId == None:\n familyUrl = \"\"\"http://[REDACTED FOR CONFIDENTIALITY]/api/Families/\"\"\"\n familyJson = requests.get(familyUrl, headers=headerstest, json=data)\n family = familyJson.json()\n\n for item in family:\n familyname = item['name']\n familyId = item['id']\n\n dict.update({familyname : familyId})\n\n return dict\n\n else:\n familyUrl = \"\"\"http://[REDACTED FOR CONFIDENTIALITY]/api/Families?filter={\"where\":{\"id\":\"%s\"}}\"\"\" % familyId\n familyJson = requests.get(familyUrl, headers=headerstest, json=data)\n family = familyJson.json()\n \n for item in family:\n familyname = item['name']\n familyId = item['id']\n\n dict.update({familyname : familyId})\n\n return dict\n\nvalues = authenticate()\ndata = values[0]\ntoken = values[1]\nheaderstest = {'Content-Type': 'application/json', 'Authorization': token }\n\nservername = input(\"Server Name: \")\n\nserverId = get_servers(servername)\n\ndeploymentId = get_serverAssociations(serverId)\n\ndeployments = get_deployments(deploymentId)\n#Loop is here to unpack the variables from the dictionary\nfor deploymentname, applicationId in deployments.items():\n\n applications = get_applications(applicationId)\n\n for applicationname, familyId in applications.items():\n\n family = get_family(familyId)\n\n for familyname, familyId in family.items():\n #print({familyname : familyId})\n\n #headers for the table\n serverHeader = \"Server Name\"\n serverHeader2 = \"Server ID\"\n deploymentHeader = \"Deployment Name\"\n deploymentHeader2 = \"Deployment ID\"\n applicationHeader = \"Application Name\"\n applicationHeader2 = \"Application ID\"\n familyHeader = \"Family Name\"\n familyHeader2 = \"Family ID\"\n\n #Print output neatly and legible. \n print('\\n')\n print('{:<35s}{:>35s}'.format(serverHeader, serverHeader2))\n print('{:<40s}{:>40s}'.format(servername, serverId) + '\\n')\n\n print('{:<35s}{:>35s}'.format(deploymentHeader, deploymentHeader2))\n print('{:<40s}{:>40s}'.format(deploymentname, deploymentId) + '\\n')\n\n print('{:<35s}{:>35s}'.format(applicationHeader, applicationHeader2))\n print('{:<40s}{:>40s}'.format(applicationname, applicationId) + '\\n')\n\n print('{:<35s}{:>35s}'.format(familyHeader, familyHeader2))\n print('{:<40s}{:>40s}'.format(familyname, familyId))\n print('\\n')\n","sub_path":"Server_Deployment_Application_Family_Script.py","file_name":"Server_Deployment_Application_Family_Script.py","file_ext":"py","file_size_in_byte":6420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"155636779","text":"# -*- coding: utf-8 -*-\nfrom models import model\n\nclass Check_report():\n\t@staticmethod\n\tdef search_order(customer_id):\n\t\t\t\tcustomer_ref = customer_id\n\t\t\t\tque = model.Order_tbl.query.filter(model.Order_tbl.customer_id == customer_ref)\n\t\t\t\tsubquery = model.Order_tbl.query.filter(model.Order_tbl.customer_id == customer_ref)\t \n\t\t\t\tresult = [] \n\t\t\t\tresult=[dict(Order_id=a.order_id, Customer_id=a.customer_id, Customer_name=a.customer_name, customer_address=a.customer_address, customer_zipcode=a.customer_zipcode, customer_surname=a.customer_surname, products=a.products) for a in que]\n\t\t\t\t \n\t\t\t\treturn result\n\t\t\t\t\n\n","sub_path":"order/customer_order.py","file_name":"customer_order.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"70760053","text":"# Copyright (c) 2016 Cisco Systems Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport mock\nimport netaddr\n\nfrom aim.aim_lib import nat_strategy\nfrom aim import aim_manager\nfrom aim.api import resource as aim_resource\nfrom aim.api import status as aim_status\nfrom aim import config as aim_cfg\nfrom aim import context as aim_context\nfrom aim.db import model_base as aim_model_base\n\nfrom gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (\n extension_db as extn_db)\nfrom keystoneclient.v3 import client as ksc_client\nfrom neutron.api import extensions\nfrom neutron import context\nfrom neutron.db import api as db_api\nfrom neutron import manager\nfrom neutron.plugins.common import constants as service_constants\nfrom neutron.plugins.ml2 import config\nfrom neutron.tests.unit.api import test_extensions\nfrom neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin\nfrom neutron.tests.unit.extensions import test_address_scope\nfrom neutron.tests.unit.extensions import test_l3\nfrom opflexagent import constants as ofcst\n\nfrom gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (\n mechanism_driver as md)\n\nPLUGIN_NAME = 'gbpservice.neutron.plugins.ml2plus.plugin.Ml2PlusPlugin'\n\nAGENT_CONF_OPFLEX = {'alive': True, 'binary': 'somebinary',\n 'topic': 'sometopic',\n 'agent_type': ofcst.AGENT_TYPE_OPFLEX_OVS,\n 'configurations': {\n 'opflex_networks': None,\n 'bridge_mappings': {'physnet1': 'br-eth1'}}}\n\nDN = 'apic:distinguished_names'\nCIDR = 'apic:external_cidrs'\nPROV = 'apic:external_provided_contracts'\nCONS = 'apic:external_consumed_contracts'\nSNAT_POOL = 'apic:snat_host_pool'\n\naim_resource.ResourceBase.__repr__ = lambda x: x.__dict__.__repr__()\n\n\n# REVISIT(rkukura): Use mock for this instead?\nclass FakeTenant(object):\n def __init__(self, id, name):\n self.id = id\n self.name = name\n\n\nclass FakeProjectManager(object):\n def list(self):\n return [\n FakeTenant('another_tenant', 'AnotherTenantName'),\n FakeTenant('bad_tenant_id', 'BadTenantIdName'),\n FakeTenant('not_admin', 'NotAdminName'),\n FakeTenant('some_tenant', 'SomeTenantName'),\n FakeTenant('somebody_else', 'SomebodyElseName'),\n FakeTenant('t1', 'T1Name'),\n FakeTenant('tenant1', 'Tenant1Name'),\n FakeTenant('tenant_1', 'Tenant1Name'),\n FakeTenant('tenant_2', 'Tenant2Name'),\n FakeTenant('test-tenant', 'TestTenantName'),\n ]\n\n\nclass FakeKeystoneClient(object):\n def __init__(self, **kwargs):\n self.projects = FakeProjectManager()\n\n\n# TODO(rkukura): Also run Neutron L3 tests on apic_aim L3 plugin.\n\nclass ApicAimTestMixin(object):\n\n def initialize_db_config(self, session):\n aim_cfg.CONF.register_opts(aim_cfg.global_opts)\n aim_cfg._get_option_subscriber_manager = mock.Mock()\n self.aim_cfg_manager = aim_cfg.ConfigManager(\n aim_context.AimContext(db_session=session), '')\n self.aim_cfg_manager.replace_all(aim_cfg.CONF)\n\n def set_override(self, item, value, group=None, host=''):\n # Override DB config as well\n if group:\n aim_cfg.CONF.set_override(item, value, group)\n else:\n aim_cfg.CONF.set_override(item, value)\n self.aim_cfg_manager.to_db(aim_cfg.CONF, host=host)\n\n\nclass ApicAimTestCase(test_address_scope.AddressScopeTestCase,\n test_l3.L3NatTestCaseMixin, ApicAimTestMixin):\n\n def setUp(self):\n # Enable the test mechanism driver to ensure that\n # we can successfully call through to all mechanism\n # driver apis.\n config.cfg.CONF.set_override('mechanism_drivers',\n ['logger', 'apic_aim'],\n 'ml2')\n config.cfg.CONF.set_override('extension_drivers',\n ['apic_aim'],\n 'ml2')\n config.cfg.CONF.set_override('type_drivers',\n ['opflex', 'local', 'vlan'],\n 'ml2')\n config.cfg.CONF.set_override('tenant_network_types',\n ['opflex'],\n 'ml2')\n config.cfg.CONF.set_override('network_vlan_ranges',\n ['physnet1:1000:1099'],\n group='ml2_type_vlan')\n\n service_plugins = {\n 'L3_ROUTER_NAT':\n 'gbpservice.neutron.services.apic_aim.l3_plugin.ApicL3Plugin'}\n\n engine = db_api.get_engine()\n aim_model_base.Base.metadata.create_all(engine)\n self.db_session = db_api.get_session()\n\n self.initialize_db_config(self.db_session)\n\n super(ApicAimTestCase, self).setUp(PLUGIN_NAME,\n service_plugins=service_plugins)\n ext_mgr = extensions.PluginAwareExtensionManager.get_instance()\n self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)\n self.port_create_status = 'DOWN'\n\n self.saved_keystone_client = ksc_client.Client\n ksc_client.Client = FakeKeystoneClient\n self.plugin = manager.NeutronManager.get_plugin()\n self.plugin.start_rpc_listeners()\n self.driver = self.plugin.mechanism_manager.mech_drivers[\n 'apic_aim'].obj\n self.l3_plugin = manager.NeutronManager.get_service_plugins()[\n service_constants.L3_ROUTER_NAT]\n self.aim_mgr = aim_manager.AimManager()\n self._app_profile_name = self.driver.ap_name\n self.extension_attributes = ('router:external', DN,\n 'apic:nat_type', SNAT_POOL,\n CIDR, PROV, CONS)\n\n def tearDown(self):\n engine = db_api.get_engine()\n with engine.begin() as conn:\n for table in reversed(\n aim_model_base.Base.metadata.sorted_tables):\n conn.execute(table.delete())\n ksc_client.Client = self.saved_keystone_client\n super(ApicAimTestCase, self).tearDown()\n\n def _find_by_dn(self, dn, cls):\n aim_ctx = aim_context.AimContext(self.db_session)\n resource = cls.from_dn(dn)\n return self.aim_mgr.get(aim_ctx, resource)\n\n def _check_dn(self, resource, aim_resource, key):\n dist_names = resource.get('apic:distinguished_names')\n self.assertIsInstance(dist_names, dict)\n dn = dist_names.get(key)\n self.assertIsInstance(dn, basestring)\n self.assertEqual(aim_resource.dn, dn)\n\n def _check_no_dn(self, resource, key):\n dist_names = resource.get('apic:distinguished_names')\n if dist_names is not None:\n self.assertIsInstance(dist_names, dict)\n self.assertNotIn(key, dist_names)\n\n def _register_agent(self, host, agent_conf):\n agent = {'host': host}\n agent.update(agent_conf)\n self.plugin.create_or_update_agent(context.get_admin_context(), agent)\n\n def _bind_port_to_host(self, port_id, host):\n data = {'port': {'binding:host_id': host,\n 'device_owner': 'compute:',\n 'device_id': 'someid'}}\n req = self.new_update_request('ports', data, port_id,\n self.fmt)\n return self.deserialize(self.fmt, req.get_response(self.api))\n\n def _make_ext_network(self, name, dn=None, nat_type=None, cidrs=None):\n kwargs = {'router:external': True}\n if dn:\n kwargs[DN] = {'ExternalNetwork': dn}\n if nat_type is not None:\n kwargs['apic:nat_type'] = nat_type\n elif getattr(self, 'nat_type', None) is not None:\n kwargs['apic:nat_type'] = self.nat_type\n if cidrs:\n kwargs[CIDR] = cidrs\n\n return self._make_network(self.fmt, name, True,\n arg_list=self.extension_attributes,\n **kwargs)['network']\n\n\nclass TestAimMapping(ApicAimTestCase):\n def _get_tenant(self, tenant_name):\n session = db_api.get_session()\n aim_ctx = aim_context.AimContext(session)\n tenant = aim_resource.Tenant(name=tenant_name)\n tenant = self.aim_mgr.get(aim_ctx, tenant)\n self.assertIsNotNone(tenant)\n return tenant\n\n def _get_vrf(self, vrf_name, tenant_name):\n session = db_api.get_session()\n aim_ctx = aim_context.AimContext(session)\n vrf = aim_resource.VRF(tenant_name=tenant_name,\n name=vrf_name)\n vrf = self.aim_mgr.get(aim_ctx, vrf)\n self.assertIsNotNone(vrf)\n return vrf\n\n def _vrf_should_not_exist(self, vrf_name):\n session = db_api.get_session()\n aim_ctx = aim_context.AimContext(session)\n vrfs = self.aim_mgr.find(aim_ctx, aim_resource.VRF, name=vrf_name)\n self.assertEqual([], vrfs)\n\n def _get_bd(self, bd_name, tenant_name):\n session = db_api.get_session()\n aim_ctx = aim_context.AimContext(session)\n bd = aim_resource.BridgeDomain(tenant_name=tenant_name,\n name=bd_name)\n bd = self.aim_mgr.get(aim_ctx, bd)\n self.assertIsNotNone(bd)\n return bd\n\n def _bd_should_not_exist(self, bd_name):\n session = db_api.get_session()\n aim_ctx = aim_context.AimContext(session)\n bds = self.aim_mgr.find(\n aim_ctx, aim_resource.BridgeDomain, name=bd_name)\n self.assertEqual([], bds)\n\n def _get_subnet(self, gw_ip_mask, bd_name, tenant_name):\n session = db_api.get_session()\n aim_ctx = aim_context.AimContext(session)\n subnet = aim_resource.Subnet(tenant_name=tenant_name,\n bd_name=bd_name,\n gw_ip_mask=gw_ip_mask)\n subnet = self.aim_mgr.get(aim_ctx, subnet)\n self.assertIsNotNone(subnet)\n return subnet\n\n def _subnet_should_not_exist(self, gw_ip_mask, bd_name):\n session = db_api.get_session()\n aim_ctx = aim_context.AimContext(session)\n subnets = self.aim_mgr.find(\n aim_ctx, aim_resource.Subnet, bd_name=bd_name,\n gw_ip_mask=gw_ip_mask)\n self.assertEqual([], subnets)\n\n def _get_epg(self, epg_name, tenant_name, app_profile_name):\n session = self.db_session\n aim_ctx = aim_context.AimContext(session)\n epg = aim_resource.EndpointGroup(tenant_name=tenant_name,\n app_profile_name=app_profile_name,\n name=epg_name)\n epg = self.aim_mgr.get(aim_ctx, epg)\n self.assertIsNotNone(epg)\n return epg\n\n def _epg_should_not_exist(self, epg_name):\n session = db_api.get_session()\n aim_ctx = aim_context.AimContext(session)\n epgs = self.aim_mgr.find(aim_ctx, aim_resource.EndpointGroup,\n name=epg_name)\n self.assertEqual([], epgs)\n\n def _get_contract(self, contract_name, tenant_name):\n session = db_api.get_session()\n aim_ctx = aim_context.AimContext(session)\n contract = aim_resource.Contract(tenant_name=tenant_name,\n name=contract_name)\n contract = self.aim_mgr.get(aim_ctx, contract)\n self.assertIsNotNone(contract)\n return contract\n\n def _contract_should_not_exist(self, contract_name):\n session = db_api.get_session()\n aim_ctx = aim_context.AimContext(session)\n contracts = self.aim_mgr.find(aim_ctx, aim_resource.Contract,\n name=contract_name)\n self.assertEqual([], contracts)\n\n def _get_subject(self, subject_name, contract_name, tenant_name):\n session = db_api.get_session()\n aim_ctx = aim_context.AimContext(session)\n subject = aim_resource.ContractSubject(tenant_name=tenant_name,\n contract_name=contract_name,\n name=subject_name)\n subject = self.aim_mgr.get(aim_ctx, subject)\n self.assertIsNotNone(subject)\n return subject\n\n def _subject_should_not_exist(self, subject_name, contract_name):\n session = db_api.get_session()\n aim_ctx = aim_context.AimContext(session)\n subjects = self.aim_mgr.find(\n aim_ctx, aim_resource.ContractSubject,\n subject_name=subject_name, name=contract_name)\n self.assertEqual([], subjects)\n\n def _get_filter(self, filter_name, tenant_name):\n session = db_api.get_session()\n aim_ctx = aim_context.AimContext(session)\n filter = aim_resource.Filter(tenant_name=tenant_name,\n name=filter_name)\n filter = self.aim_mgr.get(aim_ctx, filter)\n self.assertIsNotNone(filter)\n return filter\n\n def _get_filter_entry(self, entry_name, filter_name, tenant_name):\n session = db_api.get_session()\n aim_ctx = aim_context.AimContext(session)\n entry = aim_resource.FilterEntry(tenant_name=tenant_name,\n filter_name=filter_name,\n name=entry_name)\n entry = self.aim_mgr.get(aim_ctx, entry)\n self.assertIsNotNone(entry)\n return entry\n\n def _check_network(self, net, routers=None, scope=None):\n tenant_aname = net['tenant_id'] # TODO(rkukura): Sharing\n self._get_tenant(tenant_aname)\n\n aname = net['id']\n router_anames = [router['id'] for router in routers or []]\n\n if routers:\n if scope:\n vrf_aname = scope['id']\n vrf_dname = scope['name']\n vrf_tenant_aname = scope['tenant_id']\n vrf_tenant_dname = None\n else:\n vrf_aname = 'DefaultVRF'\n vrf_dname = 'Default Routed VRF'\n vrf_tenant_aname = tenant_aname\n vrf_tenant_dname = None\n else:\n vrf_aname = 'UnroutedVRF'\n vrf_dname = 'Common Unrouted VRF'\n vrf_tenant_aname = 'common'\n vrf_tenant_dname = 'Common Tenant'\n\n aim_bd = self._get_bd(aname, tenant_aname)\n self.assertEqual(tenant_aname, aim_bd.tenant_name)\n self.assertEqual(aname, aim_bd.name)\n self.assertEqual(net['name'], aim_bd.display_name)\n self.assertEqual(vrf_aname, aim_bd.vrf_name)\n self.assertTrue(aim_bd.enable_arp_flood)\n if routers:\n self.assertTrue(aim_bd.enable_routing)\n else:\n self.assertFalse(aim_bd.enable_routing)\n self.assertTrue(aim_bd.limit_ip_learn_to_subnets)\n self.assertEqual('proxy', aim_bd.l2_unknown_unicast_mode)\n self.assertEqual('garp', aim_bd.ep_move_detect_mode)\n self._check_dn(net, aim_bd, 'BridgeDomain')\n\n aim_epg = self._get_epg(aname, tenant_aname, self._app_profile_name)\n self.assertEqual(tenant_aname, aim_epg.tenant_name)\n self.assertEqual(self._app_profile_name, aim_epg.app_profile_name)\n self.assertEqual(aname, aim_epg.name)\n self.assertEqual(net['name'], aim_epg.display_name)\n self.assertEqual(aname, aim_epg.bd_name)\n self.assertItemsEqual(router_anames, aim_epg.provided_contract_names)\n self.assertItemsEqual(router_anames, aim_epg.consumed_contract_names)\n # REVISIT(rkukura): Check openstack_vmm_domain_names and\n # physical_domain_names?\n self._check_dn(net, aim_epg, 'EndpointGroup')\n\n aim_tenant = self._get_tenant(vrf_tenant_aname)\n self.assertEqual(vrf_tenant_aname, aim_tenant.name)\n self.assertEqual(vrf_tenant_dname, aim_tenant.display_name)\n\n aim_vrf = self._get_vrf(vrf_aname, vrf_tenant_aname)\n self.assertEqual(vrf_tenant_aname, aim_vrf.tenant_name)\n self.assertEqual(vrf_aname, aim_vrf.name)\n self.assertEqual(vrf_dname, aim_vrf.display_name)\n self.assertEqual('enforced', aim_vrf.policy_enforcement_pref)\n self._check_dn(net, aim_vrf, 'VRF')\n\n def _check_network_deleted(self, net):\n aname = net['id']\n self._bd_should_not_exist(aname)\n self._epg_should_not_exist(aname)\n\n def _check_subnet(self, subnet, net, expected_gws, unexpected_gw_ips):\n prefix_len = subnet['cidr'].split('/')[1]\n\n tenant_aname = net['tenant_id'] # TODO(rkukura): Sharing\n self._get_tenant(tenant_aname)\n\n net_aname = net['id']\n\n for gw_ip, router in expected_gws:\n gw_ip_mask = gw_ip + '/' + prefix_len\n aim_subnet = self._get_subnet(gw_ip_mask, net_aname, tenant_aname)\n self.assertEqual(tenant_aname, aim_subnet.tenant_name)\n self.assertEqual(net_aname, aim_subnet.bd_name)\n self.assertEqual(gw_ip_mask, aim_subnet.gw_ip_mask)\n self.assertEqual('private', aim_subnet.scope)\n display_name = (\"%s - %s\" %\n (router['name'],\n (subnet['name'] or subnet['cidr'])))\n self.assertEqual(display_name, aim_subnet.display_name)\n self._check_dn(subnet, aim_subnet, gw_ip)\n\n for gw_ip in unexpected_gw_ips:\n gw_ip_mask = gw_ip + '/' + prefix_len\n self._subnet_should_not_exist(gw_ip_mask, net_aname)\n self._check_no_dn(subnet, gw_ip)\n\n def _check_subnet_deleted(self, subnet):\n # REVISIT(rkukura): Anything to check? We could find all the\n # AIM Subnets with the network's bd_name, and make sure none\n # are in this subnet.\n pass\n\n def _check_address_scope(self, scope):\n tenant_aname = scope['tenant_id']\n self._get_tenant(tenant_aname)\n\n aname = scope['id']\n\n aim_vrf = self._get_vrf(aname, tenant_aname)\n self.assertEqual(tenant_aname, aim_vrf.tenant_name)\n self.assertEqual(aname, aim_vrf.name)\n self.assertEqual(scope['name'], aim_vrf.display_name)\n self.assertEqual('enforced', aim_vrf.policy_enforcement_pref)\n self._check_dn(scope, aim_vrf, 'VRF')\n\n def _check_address_scope_deleted(self, scope):\n aname = scope['id']\n self._vrf_should_not_exist(aname)\n\n def _check_router(self, router, expected_gw_ips, unexpected_gw_ips,\n scope=None):\n tenant_aname = router['tenant_id'] # TODO(rkukura): Sharing\n self._get_tenant(tenant_aname)\n\n aname = router['id']\n\n aim_contract = self._get_contract(aname, tenant_aname)\n self.assertEqual(tenant_aname, aim_contract.tenant_name)\n self.assertEqual(aname, aim_contract.name)\n self.assertEqual(router['name'], aim_contract.display_name)\n self.assertEqual('context', aim_contract.scope) # REVISIT(rkukura)\n self._check_dn(router, aim_contract, 'Contract')\n\n aim_subject = self._get_subject('route', aname, tenant_aname)\n self.assertEqual(tenant_aname, aim_subject.tenant_name)\n self.assertEqual(aname, aim_subject.contract_name)\n self.assertEqual('route', aim_subject.name)\n self.assertEqual(router['name'], aim_subject.display_name)\n self.assertEqual([], aim_subject.in_filters)\n self.assertEqual([], aim_subject.out_filters)\n self.assertEqual(['AnyFilter'], aim_subject.bi_filters)\n self._check_dn(router, aim_subject, 'ContractSubject')\n\n self._check_any_filter(tenant_aname) # REVISIT\n\n if expected_gw_ips:\n if scope:\n vrf_aname = scope['id']\n vrf_dname = scope['name']\n vrf_tenant_aname = scope['tenant_id']\n vrf_tenant_dname = None\n else:\n vrf_aname = 'DefaultVRF'\n vrf_dname = 'Default Routed VRF'\n vrf_tenant_aname = tenant_aname\n vrf_tenant_dname = None\n\n aim_tenant = self._get_tenant(vrf_tenant_aname)\n self.assertEqual(vrf_tenant_aname, aim_tenant.name)\n self.assertEqual(vrf_tenant_dname, aim_tenant.display_name)\n\n aim_vrf = self._get_vrf(vrf_aname,\n vrf_tenant_aname)\n self.assertEqual(vrf_tenant_aname, aim_vrf.tenant_name)\n self.assertEqual(vrf_aname, aim_vrf.name)\n self.assertEqual(vrf_dname, aim_vrf.display_name)\n self.assertEqual('enforced', aim_vrf.policy_enforcement_pref)\n self._check_dn(router, aim_vrf, 'VRF')\n else:\n self._check_no_dn(router, 'VRF')\n\n # The AIM Subnets are validated in _check_subnet, so just\n # check that their DNs are present and valid.\n dist_names = router.get('apic:distinguished_names')\n for gw_ip in expected_gw_ips:\n self.assertIn(gw_ip, dist_names)\n aim_subnet = self._find_by_dn(dist_names[gw_ip],\n aim_resource.Subnet)\n self.assertIsNotNone(aim_subnet)\n for gw_ip in unexpected_gw_ips:\n self.assertNotIn(gw_ip, dist_names)\n\n def _check_router_deleted(self, router):\n aname = router['id']\n self._subject_should_not_exist('route', aname)\n self._contract_should_not_exist(aname)\n\n def _check_any_filter(self, tenant_aname):\n aim_filter = self._get_filter('AnyFilter', tenant_aname)\n self.assertEqual(tenant_aname, aim_filter.tenant_name)\n self.assertEqual('AnyFilter', aim_filter.name)\n self.assertEqual('Any Filter', aim_filter.display_name)\n\n aim_entry = self._get_filter_entry('AnyFilterEntry', 'AnyFilter',\n tenant_aname)\n self.assertEqual(tenant_aname, aim_entry.tenant_name)\n self.assertEqual('AnyFilter', aim_entry.filter_name)\n self.assertEqual('AnyFilterEntry', aim_entry.name)\n self.assertEqual('Any FilterEntry', aim_entry.display_name)\n self.assertEqual('unspecified', aim_entry.arp_opcode)\n self.assertEqual('unspecified', aim_entry.ether_type)\n self.assertEqual('unspecified', aim_entry.ip_protocol)\n self.assertEqual('unspecified', aim_entry.icmpv4_type)\n self.assertEqual('unspecified', aim_entry.icmpv6_type)\n self.assertEqual('unspecified', aim_entry.source_from_port)\n self.assertEqual('unspecified', aim_entry.source_to_port)\n self.assertEqual('unspecified', aim_entry.dest_from_port)\n self.assertEqual('unspecified', aim_entry.dest_to_port)\n self.assertEqual('unspecified', aim_entry.tcp_flags)\n self.assertFalse(aim_entry.stateful)\n self.assertFalse(aim_entry.fragment_only)\n\n def test_network_lifecycle(self):\n # Test create.\n net = self._make_network(self.fmt, 'net1', True)['network']\n net_id = net['id']\n self._check_network(net)\n\n # Test show.\n net = self._show('networks', net_id)['network']\n self._check_network(net)\n\n # Test update.\n data = {'network': {'name': 'newnamefornet'}}\n net = self._update('networks', net_id, data)['network']\n self._check_network(net)\n\n # Test delete.\n self._delete('networks', net_id)\n self._check_network_deleted(net)\n\n def test_subnet_lifecycle(self):\n # Create network.\n net_resp = self._make_network(self.fmt, 'net1', True)\n net = net_resp['network']\n\n # Test create.\n gw_ip = '10.0.0.1'\n subnet = self._make_subnet(\n self.fmt, net_resp, gw_ip, '10.0.0.0/24')['subnet']\n subnet_id = subnet['id']\n self._check_subnet(subnet, net, [], [gw_ip])\n\n # Test show.\n subnet = self._show('subnets', subnet_id)['subnet']\n self._check_subnet(subnet, net, [], [gw_ip])\n\n # Test update.\n data = {'subnet': {'name': 'newnamefornet'}}\n subnet = self._update('subnets', subnet_id, data)['subnet']\n self._check_subnet(subnet, net, [], [gw_ip])\n\n # Test delete.\n self._delete('subnets', subnet_id)\n self._check_subnet_deleted(subnet)\n\n def test_address_scope_lifecycle(self):\n # Test create.\n scope = self._make_address_scope(\n self.fmt, 4, name='as1')['address_scope']\n scope_id = scope['id']\n self._check_address_scope(scope)\n\n # Test show.\n scope = self._show('address-scopes', scope_id)['address_scope']\n self._check_address_scope(scope)\n\n # Test update.\n data = {'address_scope': {'name': 'newnameforaddressscope'}}\n scope = self._update('address-scopes', scope_id, data)['address_scope']\n self._check_address_scope(scope)\n\n # Test delete.\n self._delete('address-scopes', scope_id)\n self._check_address_scope_deleted(scope)\n\n def test_router_lifecycle(self):\n # Test create.\n router = self._make_router(\n self.fmt, 'test-tenant', 'router1')['router']\n router_id = router['id']\n self._check_router(router, [], [])\n\n # Test show.\n router = self._show('routers', router_id)['router']\n self._check_router(router, [], [])\n\n # Test update.\n data = {'router': {'name': 'newnameforrouter'}}\n router = self._update('routers', router_id, data)['router']\n self._check_router(router, [], [])\n\n # Test delete.\n self._delete('routers', router_id)\n self._check_router_deleted(router)\n\n def test_router_interface(self):\n # Create router.\n router = self._make_router(\n self.fmt, 'test-tenant', 'router1')['router']\n router_id = router['id']\n self._check_router(router, [], [])\n\n # Create network.\n net_resp = self._make_network(self.fmt, 'net1', True)\n net = net_resp['network']\n net_id = net['id']\n self._check_network(net)\n\n # Create subnet1.\n gw1_ip = '10.0.1.1'\n subnet = self._make_subnet(self.fmt, net_resp, gw1_ip,\n '10.0.1.0/24')['subnet']\n subnet1_id = subnet['id']\n self._check_subnet(subnet, net, [], [gw1_ip])\n\n # Create subnet2.\n gw2_ip = '10.0.2.1'\n subnet = self._make_subnet(self.fmt, net_resp, gw2_ip,\n '10.0.2.0/24')['subnet']\n subnet2_id = subnet['id']\n self._check_subnet(subnet, net, [], [gw2_ip])\n\n # Add subnet1 to router by subnet.\n info = self.l3_plugin.add_router_interface(\n context.get_admin_context(), router_id, {'subnet_id': subnet1_id})\n self.assertIn(subnet1_id, info['subnet_ids'])\n\n # Check router.\n router = self._show('routers', router_id)['router']\n self._check_router(router, [gw1_ip], [])\n\n # Check network.\n net = self._show('networks', net_id)['network']\n self._check_network(net, [router])\n\n # Check subnet1.\n subnet = self._show('subnets', subnet1_id)['subnet']\n self._check_subnet(subnet, net, [(gw1_ip, router)], [])\n\n # Check subnet2.\n subnet = self._show('subnets', subnet2_id)['subnet']\n self._check_subnet(subnet, net, [], [gw2_ip])\n\n # Test subnet update.\n data = {'subnet': {'name': 'newnameforsubnet'}}\n subnet = self._update('subnets', subnet1_id, data)['subnet']\n self._check_subnet(subnet, net, [(gw1_ip, router)], [])\n\n # Test router update.\n data = {'router': {'name': 'newnameforrouter'}}\n router = self._update('routers', router_id, data)['router']\n self._check_router(router, [gw1_ip], [])\n self._check_subnet(subnet, net, [(gw1_ip, router)], [])\n\n # Add subnet2 to router by port.\n fixed_ips = [{'subnet_id': subnet2_id, 'ip_address': gw2_ip}]\n port = self._make_port(self.fmt, net_id, fixed_ips=fixed_ips)['port']\n port2_id = port['id']\n info = self.l3_plugin.add_router_interface(\n context.get_admin_context(), router_id, {'port_id': port2_id})\n self.assertIn(subnet2_id, info['subnet_ids'])\n\n # Check router.\n router = self._show('routers', router_id)['router']\n self._check_router(router, [gw1_ip, gw2_ip], [])\n\n # Check network.\n net = self._show('networks', net_id)['network']\n self._check_network(net, [router])\n\n # Check subnet1.\n subnet = self._show('subnets', subnet1_id)['subnet']\n self._check_subnet(subnet, net, [(gw1_ip, router)], [])\n\n # Check subnet2.\n subnet = self._show('subnets', subnet2_id)['subnet']\n self._check_subnet(subnet, net, [(gw2_ip, router)], [])\n\n # Remove subnet1 from router by subnet.\n info = self.l3_plugin.remove_router_interface(\n context.get_admin_context(), router_id, {'subnet_id': subnet1_id})\n self.assertIn(subnet1_id, info['subnet_ids'])\n\n # Check router.\n router = self._show('routers', router_id)['router']\n self._check_router(router, [gw2_ip], [gw1_ip])\n\n # Check network.\n net = self._show('networks', net_id)['network']\n self._check_network(net, [router])\n\n # Check subnet1.\n subnet = self._show('subnets', subnet1_id)['subnet']\n self._check_subnet(subnet, net, [], [gw1_ip])\n\n # Check subnet2.\n subnet = self._show('subnets', subnet2_id)['subnet']\n self._check_subnet(subnet, net, [(gw2_ip, router)], [])\n\n # Remove subnet2 from router by port.\n info = self.l3_plugin.remove_router_interface(\n context.get_admin_context(), router_id, {'port_id': port2_id})\n self.assertIn(subnet2_id, info['subnet_ids'])\n\n # Check router.\n router = self._show('routers', router_id)['router']\n self._check_router(router, [], [gw1_ip, gw2_ip])\n\n # Check network.\n net = self._show('networks', net_id)['network']\n self._check_network(net)\n\n # Check subnet1.\n subnet = self._show('subnets', subnet1_id)['subnet']\n self._check_subnet(subnet, net, [], [gw1_ip])\n\n # Check subnet2.\n subnet = self._show('subnets', subnet2_id)['subnet']\n self._check_subnet(subnet, net, [], [gw2_ip])\n\n def test_router_interface_with_address_scope(self):\n # REVISIT(rkukura): Currently follows same workflow as above,\n # but might be sufficient to test with a single subnet with\n # its CIDR allocated from the subnet pool.\n\n # Create address scope.\n scope = self._make_address_scope(\n self.fmt, 4, name='as1')['address_scope']\n scope_id = scope['id']\n self._check_address_scope(scope)\n\n # Create subnet pool.\n pool = self._make_subnetpool(self.fmt, ['10.0.0.0/8'], name='sp1',\n tenant_id='test-tenant', # REVISIT\n address_scope_id=scope_id,\n default_prefixlen=24)['subnetpool']\n pool_id = pool['id']\n\n # Create router.\n router = self._make_router(\n self.fmt, 'test-tenant', 'router1')['router']\n router_id = router['id']\n self._check_router(router, [], [], scope)\n\n # Create network.\n net_resp = self._make_network(self.fmt, 'net1', True)\n net = net_resp['network']\n net_id = net['id']\n self._check_network(net)\n\n # Create subnet1.\n gw1_ip = '10.0.1.1'\n subnet = self._make_subnet(\n self.fmt, net_resp, gw1_ip, '10.0.1.0/24',\n subnetpool_id=pool_id)['subnet']\n subnet1_id = subnet['id']\n self._check_subnet(subnet, net, [], [gw1_ip])\n\n # Create subnet2.\n gw2_ip = '10.0.2.1'\n subnet = self._make_subnet(\n self.fmt, net_resp, gw2_ip, '10.0.2.0/24',\n subnetpool_id=pool_id)['subnet']\n subnet2_id = subnet['id']\n self._check_subnet(subnet, net, [], [gw2_ip])\n\n # Add subnet1 to router by subnet.\n info = self.l3_plugin.add_router_interface(\n context.get_admin_context(), router_id, {'subnet_id': subnet1_id})\n self.assertIn(subnet1_id, info['subnet_ids'])\n\n # Check router.\n router = self._show('routers', router_id)['router']\n self._check_router(router, [gw1_ip], [], scope)\n\n # Check network.\n net = self._show('networks', net_id)['network']\n self._check_network(net, [router], scope)\n\n # Check subnet1.\n subnet = self._show('subnets', subnet1_id)['subnet']\n self._check_subnet(subnet, net, [(gw1_ip, router)], [])\n\n # Check subnet2.\n subnet = self._show('subnets', subnet2_id)['subnet']\n self._check_subnet(subnet, net, [], [gw2_ip])\n\n # Test subnet update.\n data = {'subnet': {'name': 'newnameforsubnet'}}\n subnet = self._update('subnets', subnet1_id, data)['subnet']\n self._check_subnet(subnet, net, [(gw1_ip, router)], [])\n\n # Test router update.\n data = {'router': {'name': 'newnameforrouter'}}\n router = self._update('routers', router_id, data)['router']\n self._check_router(router, [gw1_ip], [], scope)\n self._check_subnet(subnet, net, [(gw1_ip, router)], [])\n\n # Add subnet2 to router by port.\n fixed_ips = [{'subnet_id': subnet2_id, 'ip_address': gw2_ip}]\n port = self._make_port(self.fmt, net_id, fixed_ips=fixed_ips)['port']\n port2_id = port['id']\n info = self.l3_plugin.add_router_interface(\n context.get_admin_context(), router_id, {'port_id': port2_id})\n self.assertIn(subnet2_id, info['subnet_ids'])\n\n # Check router.\n router = self._show('routers', router_id)['router']\n self._check_router(router, [gw1_ip, gw2_ip], [], scope)\n\n # Check network.\n net = self._show('networks', net_id)['network']\n self._check_network(net, [router], scope)\n\n # Check subnet1.\n subnet = self._show('subnets', subnet1_id)['subnet']\n self._check_subnet(subnet, net, [(gw1_ip, router)], [])\n\n # Check subnet2.\n subnet = self._show('subnets', subnet2_id)['subnet']\n self._check_subnet(subnet, net, [(gw2_ip, router)], [])\n\n # Remove subnet1 from router by subnet.\n info = self.l3_plugin.remove_router_interface(\n context.get_admin_context(), router_id, {'subnet_id': subnet1_id})\n self.assertIn(subnet1_id, info['subnet_ids'])\n\n # Check router.\n router = self._show('routers', router_id)['router']\n self._check_router(router, [gw2_ip], [gw1_ip], scope)\n\n # Check network.\n net = self._show('networks', net_id)['network']\n self._check_network(net, [router], scope)\n\n # Check subnet1.\n subnet = self._show('subnets', subnet1_id)['subnet']\n self._check_subnet(subnet, net, [], [gw1_ip])\n\n # Check subnet2.\n subnet = self._show('subnets', subnet2_id)['subnet']\n self._check_subnet(subnet, net, [(gw2_ip, router)], [])\n\n # Remove subnet2 from router by port.\n info = self.l3_plugin.remove_router_interface(\n context.get_admin_context(), router_id, {'port_id': port2_id})\n self.assertIn(subnet2_id, info['subnet_ids'])\n\n # Check router.\n router = self._show('routers', router_id)['router']\n self._check_router(router, [], [gw1_ip, gw2_ip], scope)\n\n # Check network.\n net = self._show('networks', net_id)['network']\n self._check_network(net)\n\n # Check subnet1.\n subnet = self._show('subnets', subnet1_id)['subnet']\n self._check_subnet(subnet, net, [], [gw1_ip])\n\n # Check subnet2.\n subnet = self._show('subnets', subnet2_id)['subnet']\n self._check_subnet(subnet, net, [], [gw2_ip])\n\n # TODO(rkukura): Test IPv6 and dual stack router interfaces.\n\n\nclass TestSyncState(ApicAimTestCase):\n @staticmethod\n def _get_synced_status(self, context, resource):\n status = aim_status.AciStatus.SYNCED\n return aim_status.AciStatus(sync_status=status)\n\n @staticmethod\n def _get_pending_status_for_type(resource, type):\n status = (isinstance(resource, type) and\n aim_status.AciStatus.SYNC_PENDING or\n aim_status.AciStatus.SYNCED)\n return aim_status.AciStatus(sync_status=status)\n\n @staticmethod\n def _get_failed_status_for_type(resource, type):\n status = (isinstance(resource, type) and\n aim_status.AciStatus.SYNC_FAILED or\n aim_status.AciStatus.SYNC_PENDING)\n return aim_status.AciStatus(sync_status=status)\n\n def _test_network(self, expected_state):\n net = self._make_network(self.fmt, 'net1', True)['network']\n self.assertEqual(expected_state, net['apic:synchronization_state'])\n\n net = self._show('networks', net['id'])['network']\n self.assertEqual(expected_state, net['apic:synchronization_state'])\n\n def test_network_synced(self):\n with mock.patch('aim.aim_manager.AimManager.get_status',\n TestSyncState._get_synced_status):\n self._test_network('synced')\n\n def test_network_bd_build(self):\n def get_status(self, context, resource):\n return TestSyncState._get_pending_status_for_type(\n resource, aim_resource.BridgeDomain)\n\n with mock.patch('aim.aim_manager.AimManager.get_status', get_status):\n self._test_network('build')\n\n def test_network_bd_error(self):\n def get_status(self, context, resource):\n return TestSyncState._get_failed_status_for_type(\n resource, aim_resource.BridgeDomain)\n\n with mock.patch('aim.aim_manager.AimManager.get_status', get_status):\n self._test_network('error')\n\n def test_network_epg_build(self):\n def get_status(self, context, resource):\n return TestSyncState._get_pending_status_for_type(\n resource, aim_resource.EndpointGroup)\n\n with mock.patch('aim.aim_manager.AimManager.get_status', get_status):\n self._test_network('build')\n\n def test_network_epg_error(self):\n def get_status(self, context, resource):\n return TestSyncState._get_failed_status_for_type(\n resource, aim_resource.EndpointGroup)\n\n with mock.patch('aim.aim_manager.AimManager.get_status', get_status):\n self._test_network('error')\n\n def test_network_vrf_build(self):\n def get_status(self, context, resource):\n return TestSyncState._get_pending_status_for_type(\n resource, aim_resource.VRF)\n\n with mock.patch('aim.aim_manager.AimManager.get_status', get_status):\n self._test_network('build')\n\n def test_network_vrf_error(self):\n def get_status(self, context, resource):\n return TestSyncState._get_failed_status_for_type(\n resource, aim_resource.VRF)\n\n with mock.patch('aim.aim_manager.AimManager.get_status', get_status):\n self._test_network('error')\n\n def _test_address_scope(self, expected_state):\n scope = self._make_address_scope(self.fmt, 4, name='scope1')[\n 'address_scope']\n self.assertEqual(expected_state, scope['apic:synchronization_state'])\n\n scope = self._show('address-scopes', scope['id'])['address_scope']\n self.assertEqual(expected_state, scope['apic:synchronization_state'])\n\n def test_address_scope_synced(self):\n with mock.patch('aim.aim_manager.AimManager.get_status',\n TestSyncState._get_synced_status):\n self._test_address_scope('synced')\n\n def test_address_scope_vrf_build(self):\n def get_status(self, context, resource):\n return TestSyncState._get_pending_status_for_type(\n resource, aim_resource.VRF)\n\n with mock.patch('aim.aim_manager.AimManager.get_status', get_status):\n self._test_address_scope('build')\n\n def test_address_scope_vrf_error(self):\n def get_status(self, context, resource):\n return TestSyncState._get_failed_status_for_type(\n resource, aim_resource.VRF)\n\n with mock.patch('aim.aim_manager.AimManager.get_status', get_status):\n self._test_address_scope('error')\n\n def _test_router(self, expected_state):\n router = self._make_router(self.fmt, 'test-tenant', 'router1')[\n 'router']\n self.assertEqual(expected_state, router['apic:synchronization_state'])\n\n router = self._show('routers', router['id'])['router']\n self.assertEqual(expected_state, router['apic:synchronization_state'])\n\n def test_router_synced(self):\n with mock.patch('aim.aim_manager.AimManager.get_status',\n TestSyncState._get_synced_status):\n self._test_router('synced')\n\n def test_router_contract_build(self):\n def get_status(self, context, resource):\n return TestSyncState._get_pending_status_for_type(\n resource, aim_resource.Contract)\n\n with mock.patch('aim.aim_manager.AimManager.get_status', get_status):\n self._test_router('build')\n\n def test_router_contract_error(self):\n def get_status(self, context, resource):\n return TestSyncState._get_failed_status_for_type(\n resource, aim_resource.Contract)\n\n with mock.patch('aim.aim_manager.AimManager.get_status', get_status):\n self._test_router('error')\n\n def test_router_subject_build(self):\n def get_status(self, context, resource):\n return TestSyncState._get_pending_status_for_type(\n resource, aim_resource.ContractSubject)\n\n with mock.patch('aim.aim_manager.AimManager.get_status', get_status):\n self._test_router('build')\n\n def test_router_subject_error(self):\n def get_status(self, context, resource):\n return TestSyncState._get_failed_status_for_type(\n resource, aim_resource.ContractSubject)\n\n with mock.patch('aim.aim_manager.AimManager.get_status', get_status):\n self._test_router('error')\n\n def _test_router_interface_vrf(self, expected_state):\n net_resp = self._make_network(self.fmt, 'net1', True)\n subnet = self._make_subnet(\n self.fmt, net_resp, '10.0.0.1', '10.0.0.0/24')['subnet']\n router = self._make_router(self.fmt, 'test-tenant', 'router1')[\n 'router']\n self.l3_plugin.add_router_interface(\n context.get_admin_context(), router['id'],\n {'subnet_id': subnet['id']})\n\n router = self._show('routers', router['id'])['router']\n self.assertEqual(expected_state, router['apic:synchronization_state'])\n\n def test_router_interface_vrf_synced(self):\n with mock.patch('aim.aim_manager.AimManager.get_status',\n TestSyncState._get_synced_status):\n self._test_router_interface_vrf('synced')\n\n def test_router_interface_vrf_build(self):\n def get_status(self, context, resource):\n return TestSyncState._get_pending_status_for_type(\n resource, aim_resource.VRF)\n\n with mock.patch('aim.aim_manager.AimManager.get_status', get_status):\n self._test_router_interface_vrf('build')\n\n def test_router_interface_vrf_error(self):\n def get_status(self, context, resource):\n return TestSyncState._get_failed_status_for_type(\n resource, aim_resource.VRF)\n\n with mock.patch('aim.aim_manager.AimManager.get_status', get_status):\n self._test_router_interface_vrf('error')\n\n def _test_router_interface_subnet(self, expected_state):\n net_resp = self._make_network(self.fmt, 'net1', True)\n subnet = self._make_subnet(\n self.fmt, net_resp, '10.0.0.1', '10.0.0.0/24')['subnet']\n router = self._make_router(self.fmt, 'test-tenant', 'router1')[\n 'router']\n self.l3_plugin.add_router_interface(\n context.get_admin_context(), router['id'],\n {'subnet_id': subnet['id']})\n\n router = self._show('routers', router['id'])['router']\n self.assertEqual(expected_state,\n router['apic:synchronization_state'])\n\n subnet = self._show('subnets', subnet['id'])['subnet']\n self.assertEqual(expected_state, subnet['apic:synchronization_state'])\n\n def test_router_interface_subnet_synced(self):\n with mock.patch('aim.aim_manager.AimManager.get_status',\n TestSyncState._get_synced_status):\n self._test_router_interface_subnet('synced')\n\n def test_router_interface_subnet_build(self):\n def get_status(self, context, resource):\n return TestSyncState._get_pending_status_for_type(\n resource, aim_resource.Subnet)\n\n with mock.patch('aim.aim_manager.AimManager.get_status', get_status):\n self._test_router_interface_subnet('build')\n\n def test_router_interface_subnet_error(self):\n def get_status(self, context, resource):\n return TestSyncState._get_failed_status_for_type(\n resource, aim_resource.Subnet)\n\n with mock.patch('aim.aim_manager.AimManager.get_status', get_status):\n self._test_router_interface_subnet('error')\n\n def _test_external_network(self, expected_state, dn=None, msg=None):\n net = self._make_ext_network('net1', dn=dn)\n self.assertEqual(expected_state, net['apic:synchronization_state'],\n msg)\n net = self._show('networks', net['id'])['network']\n self.assertEqual(expected_state, net['apic:synchronization_state'],\n msg)\n\n def test_external_network(self):\n with mock.patch('aim.aim_manager.AimManager.get_status',\n TestSyncState._get_synced_status):\n self._test_external_network('synced',\n dn='uni/tn-t1/out-l1/instP-n1')\n\n for expected_status, status_func in [\n ('build', TestSyncState._get_pending_status_for_type),\n ('error', TestSyncState._get_failed_status_for_type)]:\n for a_res in [aim_resource.ExternalNetwork,\n aim_resource.EndpointGroup,\n aim_resource.BridgeDomain,\n aim_resource.VRF]:\n def get_status(self, context, resource):\n return status_func(resource, a_res)\n with mock.patch('aim.aim_manager.AimManager.get_status',\n get_status):\n self._test_external_network(expected_status,\n dn='uni/tn-t1/out-l1/instP-n1',\n msg='%s' % a_res)\n\n def test_unmanaged_external_network(self):\n self._test_external_network('N/A')\n\n def _test_external_subnet(self, expected_state, dn=None):\n net = self._make_ext_network('net1', dn=dn)\n subnet = self._make_subnet(\n self.fmt, {'network': net}, '10.0.0.1', '10.0.0.0/24')['subnet']\n\n subnet = self._show('subnets', subnet['id'])['subnet']\n self.assertEqual(expected_state, subnet['apic:synchronization_state'])\n\n def test_external_subnet(self):\n with mock.patch('aim.aim_manager.AimManager.get_status',\n TestSyncState._get_synced_status):\n self._test_external_subnet('synced',\n dn='uni/tn-t1/out-l1/instP-n1')\n\n for expected_status, status_func in [\n ('build', TestSyncState._get_pending_status_for_type),\n ('error', TestSyncState._get_failed_status_for_type)]:\n def get_status(self, context, resource):\n return status_func(resource, aim_resource.Subnet)\n with mock.patch('aim.aim_manager.AimManager.get_status',\n get_status):\n self._test_external_subnet(expected_status,\n dn='uni/tn-t1/out-l1/instP-n1')\n\n def test_unmanaged_external_subnet(self):\n self._test_external_subnet('N/A')\n\n\nclass TestTopology(ApicAimTestCase):\n def test_network_subnets_on_same_router(self):\n # Create network.\n net_resp = self._make_network(self.fmt, 'net1', True)\n net_id = net_resp['network']['id']\n\n # Create router.\n router1_id = self._make_router(\n self.fmt, 'test-tenant', 'router1')['router']['id']\n\n # Create subnet and add to router.\n subnet1_id = self._make_subnet(\n self.fmt, net_resp, '10.0.1.1', '10.0.1.0/24')['subnet']['id']\n self.l3_plugin.add_router_interface(\n context.get_admin_context(), router1_id, {'subnet_id': subnet1_id})\n\n # Create 2nd subnet and add to router.\n subnet2_id = self._make_subnet(\n self.fmt, net_resp, '10.0.2.1', '10.0.2.0/24')['subnet']['id']\n self.l3_plugin.add_router_interface(\n context.get_admin_context(), router1_id, {'subnet_id': subnet2_id})\n\n # Create another router.\n router2_id = self._make_router(\n self.fmt, 'test-tenant', 'router2')['router']['id']\n\n # Create 3rd subnet and verify adding to 2nd router fails.\n subnet3_id = self._make_subnet(\n self.fmt, net_resp, '10.0.3.1', '10.0.3.0/24')['subnet']['id']\n self.assertRaises(\n md.UnsupportedRoutingTopology,\n self.l3_plugin.add_router_interface,\n context.get_admin_context(), router2_id, {'subnet_id': subnet3_id})\n\n # Verify adding 1st subnet to 2nd router fails.\n fixed_ips = [{'subnet_id': subnet1_id, 'ip_address': '10.0.1.100'}]\n port_id = self._make_port(\n self.fmt, net_id, fixed_ips=fixed_ips)['port']['id']\n self.assertRaises(\n md.UnsupportedRoutingTopology,\n self.l3_plugin.add_router_interface,\n context.get_admin_context(), router2_id, {'port_id': port_id})\n\n # Verify adding 2nd subnet to 2nd router fails.\n fixed_ips = [{'subnet_id': subnet2_id, 'ip_address': '10.0.2.100'}]\n port_id = self._make_port(\n self.fmt, net_id, fixed_ips=fixed_ips)['port']['id']\n self.assertRaises(\n md.UnsupportedRoutingTopology,\n self.l3_plugin.add_router_interface,\n context.get_admin_context(), router2_id, {'port_id': port_id})\n\n def test_network_subnet_on_multple_routers(self):\n # Create network.\n net_resp = self._make_network(self.fmt, 'net1', True)\n net_id = net_resp['network']['id']\n\n # Create router.\n router1_id = self._make_router(\n self.fmt, 'test-tenant', 'router1')['router']['id']\n\n # Create subnet and add to router.\n subnet1_id = self._make_subnet(\n self.fmt, net_resp, '10.0.1.1', '10.0.1.0/24')['subnet']['id']\n self.l3_plugin.add_router_interface(\n context.get_admin_context(), router1_id, {'subnet_id': subnet1_id})\n\n # Create 2nd router.\n router2_id = self._make_router(\n self.fmt, 'test-tenant', 'router2')['router']['id']\n\n # Add same subnet to 2nd router.\n fixed_ips = [{'subnet_id': subnet1_id, 'ip_address': '10.0.1.100'}]\n port_id = self._make_port(\n self.fmt, net_id, fixed_ips=fixed_ips)['port']['id']\n self.l3_plugin.add_router_interface(\n context.get_admin_context(), router2_id, {'port_id': port_id})\n\n # Create 2nd subnet and verify adding to either router fails.\n subnet2_id = self._make_subnet(\n self.fmt, net_resp, '10.0.2.1', '10.0.2.0/24')['subnet']['id']\n self.assertRaises(\n md.UnsupportedRoutingTopology,\n self.l3_plugin.add_router_interface,\n context.get_admin_context(), router1_id, {'subnet_id': subnet2_id})\n self.assertRaises(\n md.UnsupportedRoutingTopology,\n self.l3_plugin.add_router_interface,\n context.get_admin_context(), router2_id, {'subnet_id': subnet2_id})\n\n\nclass TestPortBinding(ApicAimTestCase):\n def test_bind_opflex_agent(self):\n self._register_agent('host1', AGENT_CONF_OPFLEX)\n net = self._make_network(self.fmt, 'net1', True)\n self._make_subnet(self.fmt, net, '10.0.1.1', '10.0.1.0/24')\n port = self._make_port(self.fmt, net['network']['id'])['port']\n port_id = port['id']\n port = self._bind_port_to_host(port_id, 'host1')['port']\n self.assertEqual('ovs', port['binding:vif_type'])\n self.assertEqual({'port_filter': False, 'ovs_hybrid_plug': False},\n port['binding:vif_details'])\n\n # TODO(rkukura): Add tests for promiscuous_mode cases.\n\n def test_bind_unsupported_vnic_type(self):\n net = self._make_network(self.fmt, 'net1', True)\n self._make_subnet(self.fmt, net, '10.0.1.1', '10.0.1.0/24')\n vnic_arg = {'binding:vnic_type': 'macvtap'}\n port = self._make_port(self.fmt, net['network']['id'],\n arg_list=('binding:vnic_type',),\n **vnic_arg)['port']\n port = self._bind_port_to_host(port['id'], 'host1')['port']\n self.assertEqual('binding_failed', port['binding:vif_type'])\n\n # TODO(rkukura): Add tests for opflex, local and unsupported\n # network_type values.\n\n\nclass TestMl2BasicGet(test_plugin.TestBasicGet,\n ApicAimTestCase):\n pass\n\n\nclass TestMl2V2HTTPResponse(test_plugin.TestV2HTTPResponse,\n ApicAimTestCase):\n pass\n\n\nclass TestMl2PortsV2(test_plugin.TestPortsV2,\n ApicAimTestCase):\n pass\n\n\nclass TestMl2NetworksV2(test_plugin.TestNetworksV2,\n ApicAimTestCase):\n\n def test_aim_epg_domains(self):\n aim_ctx = aim_context.AimContext(self.db_session)\n self.aim_mgr.create(aim_ctx,\n aim_resource.VMMDomain(type='OpenStack',\n name='vm1'),\n overwrite=True)\n self.aim_mgr.create(aim_ctx,\n aim_resource.VMMDomain(type='OpenStack',\n name='vm2'),\n overwrite=True)\n self.aim_mgr.create(aim_ctx,\n aim_resource.PhysicalDomain(name='ph1'),\n overwrite=True)\n self.aim_mgr.create(aim_ctx,\n aim_resource.PhysicalDomain(name='ph2'),\n overwrite=True)\n with self.network(name='net'):\n epg = self.aim_mgr.find(aim_ctx, aim_resource.EndpointGroup)[0]\n self.assertEqual(set(['vm1', 'vm2']),\n set(epg.openstack_vmm_domain_names))\n self.assertEqual(set(['ph1', 'ph2']),\n set(epg.physical_domain_names))\n\n\nclass TestMl2SubnetsV2(test_plugin.TestSubnetsV2,\n ApicAimTestCase):\n pass\n\n\nclass TestMl2SubnetPoolsV2(test_plugin.TestSubnetPoolsV2,\n ApicAimTestCase):\n pass\n\n\nclass TestExtensionAttributes(ApicAimTestCase):\n\n def test_external_network_lifecycle(self):\n session = db_api.get_session()\n extn = extn_db.ExtensionDbMixin()\n\n # create with APIC DN, nat_typeand default CIDR\n net1 = self._make_ext_network('net1',\n dn='uni/tn-t1/out-l1/instP-n1',\n nat_type='')\n\n self.assertEqual('uni/tn-t1/out-l1/instP-n1',\n net1[DN]['ExternalNetwork'])\n self.assertEqual('', net1['apic:nat_type'])\n self.assertEqual(['0.0.0.0/0'], net1[CIDR])\n\n # create with nat_type set to default, and CIDR specified\n net2 = self._make_ext_network('net2',\n dn='uni/tn-t1/out-l2/instP-n2',\n cidrs=['5.5.5.0/24', '10.20.0.0/16'])\n self.assertEqual('distributed', net2['apic:nat_type'])\n self.assertEqual(['10.20.0.0/16', '5.5.5.0/24'],\n sorted(net2[CIDR]))\n\n # update CIDR\n net2 = self._update('networks', net2['id'],\n {'network': {CIDR: ['20.20.30.0/24']}})['network']\n self.assertEqual('distributed', net2['apic:nat_type'])\n self.assertEqual(['20.20.30.0/24'], net2[CIDR])\n\n net2 = self._update('networks', net2['id'],\n {'network': {CIDR: []}})['network']\n self.assertEqual([], net2[CIDR])\n\n # create without APIC DN -> this is an unmanaged network\n net3 = self._make_ext_network('net3')\n self.assertTrue(DN not in net3 or 'ExternalNetwork' not in net3[DN])\n self.assertFalse('apic:nat_type' in net3)\n self.assertFalse(CIDR in net3)\n\n # updating CIDR of unmanaged network is no-op\n net3 = self._update('networks', net3['id'],\n {'network': {CIDR: ['30.30.20.0/24']}})['network']\n self.assertTrue(DN not in net3 or 'ExternalNetwork' not in net3[DN])\n self.assertFalse('apic:nat_type' in net3)\n self.assertFalse(CIDR in net3)\n\n # delete the external networks\n self._delete('networks', net2['id'])\n self._delete('networks', net1['id'])\n\n self.assertFalse(extn.get_network_extn_db(session, net1['id']))\n self.assertFalse(extn.get_network_extn_db(session, net2['id']))\n\n def test_external_network_fail(self):\n # APIC DN not specified\n resp = self._create_network(self.fmt, 'net1', True,\n arg_list=self.extension_attributes,\n **{'router:external': True,\n DN: {'Foo': 'bar'}})\n self.assertEqual(400, resp.status_code)\n\n # APIC DN is wrong\n resp = self._create_network(self.fmt, 'net1', True,\n arg_list=self.extension_attributes,\n **{'router:external': True,\n DN: {'ExternalNetwork': 'uni/tenant-t1/ext-l1/instP-n2'}})\n self.assertEqual(400, resp.status_code)\n\n # Update APIC DN, nat-type\n net1 = self._make_ext_network('net1',\n dn='uni/tn-t1/out-l1/instP-n1',\n nat_type='edge')\n\n self._update('networks', net1['id'],\n {'network':\n {DN: {'ExternalNetwork': 'uni/tn-t1/out-l1/instP-n2'}}},\n 400)\n self._update('networks', net1['id'], {'apic:nat_type': ''}, 400)\n\n def test_external_subnet_lifecycle(self):\n session = db_api.get_session()\n extn = extn_db.ExtensionDbMixin()\n\n net1 = self._make_ext_network('net1',\n dn='uni/tn-t1/out-l1/instP-n1')\n # create with default value for snat_host_pool\n subnet = self._make_subnet(\n self.fmt, {'network': net1}, '10.0.0.1', '10.0.0.0/24')['subnet']\n subnet = self._show('subnets', subnet['id'])['subnet']\n self.assertFalse(subnet[SNAT_POOL])\n\n # Update something other than snat_host_pool\n subnet = self._update('subnets', subnet['id'],\n {'subnet': {'name': 'foo'}})['subnet']\n self.assertFalse(subnet[SNAT_POOL])\n\n # Update snat_host_pool\n subnet = self._update('subnets', subnet['id'],\n {'subnet': {SNAT_POOL: True}})['subnet']\n self.assertTrue(subnet[SNAT_POOL])\n\n subnet = self._update('subnets', subnet['id'],\n {'subnet': {SNAT_POOL: False}})['subnet']\n self.assertFalse(subnet[SNAT_POOL])\n\n # delete subnet\n self._delete('subnets', subnet['id'])\n self.assertFalse(extn.get_subnet_extn_db(session, subnet['id']))\n\n # Simulate a prior existing subnet (i.e. no extension attrs exist)\n # Get should give default value, and updates should stick\n subnet2 = self._make_subnet(\n self.fmt, {'network': net1}, '20.0.0.1', '20.0.0.0/24')['subnet']\n self._update('subnets', subnet2['id'],\n {'subnet': {SNAT_POOL: True}})\n with session.begin(subtransactions=True):\n db_obj = session.query(extn_db.SubnetExtensionDb).filter(\n extn_db.SubnetExtensionDb.subnet_id ==\n subnet2['id']).one()\n session.delete(db_obj)\n subnet2 = self._show('subnets', subnet2['id'])['subnet']\n self.assertFalse(subnet2[SNAT_POOL])\n\n subnet2 = self._update('subnets', subnet2['id'],\n {'subnet': {SNAT_POOL: True}})['subnet']\n self.assertTrue(subnet2[SNAT_POOL])\n\n def test_router_lifecycle(self):\n session = db_api.get_session()\n extn = extn_db.ExtensionDbMixin()\n\n # create router with default values\n rtr0 = self._make_router(self.fmt, 'test-tenant',\n 'router0')['router']\n self.assertEqual([], rtr0[PROV])\n self.assertEqual([], rtr0[CONS])\n\n # create with specific values\n rtr1 = self._make_router(self.fmt, 'test-tenant', 'router1',\n arg_list=self.extension_attributes,\n **{PROV: ['p1', 'p2', 'k'],\n CONS: ['c1', 'c2', 'k']})['router']\n self.assertEqual(['k', 'p1', 'p2'], sorted(rtr1[PROV]))\n self.assertEqual(['c1', 'c2', 'k'], sorted(rtr1[CONS]))\n\n # update router\n self._update('routers', rtr1['id'],\n {'router': {PROV: [], CONS: ['k']}})\n rtr1 = self._show('routers', rtr1['id'])['router']\n self.assertEqual([], rtr1[PROV])\n self.assertEqual(['k'], rtr1[CONS])\n\n self._update('routers', rtr1['id'],\n {'router': {PROV: ['p1', 'p2']}})\n rtr1 = self._show('routers', rtr1['id'])['router']\n self.assertEqual(['p1', 'p2'], sorted(rtr1[PROV]))\n self.assertEqual(['k'], rtr1[CONS])\n\n # delete\n self._delete('routers', rtr1['id'])\n self.assertEqual({PROV: [], CONS: []},\n extn.get_router_extn_db(session, rtr1['id']))\n\n # Simulate a prior existing router (i.e. no extension attrs exist)\n rtr2 = self._make_router(self.fmt, 'test-tenant', 'router2',\n arg_list=self.extension_attributes,\n **{PROV: ['k'], CONS: ['k']})['router']\n extn.set_router_extn_db(session, rtr2['id'], {PROV: [], CONS: []})\n rtr2 = self._show('routers', rtr2['id'])['router']\n self.assertEqual([], rtr2[PROV])\n self.assertEqual([], rtr2[CONS])\n\n rtr2 = self._update('routers', rtr2['id'],\n {'router': {PROV: ['p1', 'p2']}})['router']\n self.assertEqual(['p1', 'p2'], sorted(rtr2[PROV]))\n self.assertEqual([], rtr2[CONS])\n\n\nclass CallRecordWrapper(object):\n # Instrument all method calls in a class to record the call in a mock\n\n def setUp(self, klass):\n \"\"\"Returns a mock that records all calls.\"\"\"\n def record_and_call(func, recorder_func):\n def wrapped(*args, **kwargs):\n ret = func(*args, **kwargs)\n a = args[1:] # exclude the 'self' argument\n recorder_func(*a, **kwargs)\n return ret\n return wrapped\n\n self.klass = klass\n recorder = mock.create_autospec(self.klass)\n self.klass.__overridden = {}\n for fn in dir(self.klass):\n val = getattr(self.klass, fn, None)\n if val and callable(val) and not fn.startswith('_'):\n setattr(self.klass, fn,\n record_and_call(val, getattr(recorder, fn)))\n self.klass.__overridden[fn] = val\n return recorder\n\n def tearDown(self):\n for k, v in self.klass.__overridden.iteritems():\n setattr(self.klass, k, v)\n del self.klass.__overridden\n\n\nclass TestExternalConnectivityBase(object):\n\n def setUp(self):\n self.call_wrapper = CallRecordWrapper()\n kls = {'distributed': nat_strategy.DistributedNatStrategy,\n 'edge': nat_strategy.EdgeNatStrategy,\n '': nat_strategy.NoNatStrategy}\n self.mock_ns = self.call_wrapper.setUp(kls[self.nat_type])\n super(TestExternalConnectivityBase, self).setUp()\n\n def tearDown(self):\n self.call_wrapper.tearDown()\n super(TestExternalConnectivityBase, self).tearDown()\n\n def test_external_network_lifecycle(self):\n net1 = self._make_ext_network('net1',\n dn='uni/tn-t1/out-l1/instP-n1',\n cidrs=['20.10.0.0/16', '4.4.4.0/24'])\n self.mock_ns.create_l3outside.assert_called_once_with(\n mock.ANY,\n aim_resource.L3Outside(tenant_name='t1', name='l1'))\n a_ext_net = aim_resource.ExternalNetwork(\n tenant_name='t1', l3out_name='l1', name='n1')\n self.mock_ns.create_external_network.assert_called_once_with(\n mock.ANY, a_ext_net)\n self.mock_ns.update_external_cidrs.assert_called_once_with(\n mock.ANY, a_ext_net, ['20.10.0.0/16', '4.4.4.0/24'])\n ext_epg = aim_resource.EndpointGroup(\n tenant_name='t1', app_profile_name=self._app_profile_name,\n name='EXT-l1')\n ext_bd = aim_resource.BridgeDomain(tenant_name='t1', name='EXT-l1')\n ext_vrf = aim_resource.VRF(tenant_name='t1', name='EXT-l1')\n self._check_dn(net1, ext_epg, 'EndpointGroup')\n self._check_dn(net1, ext_bd, 'BridgeDomain')\n self._check_dn(net1, ext_vrf, 'VRF')\n\n net1 = self._show('networks', net1['id'])['network']\n self._check_dn(net1, ext_epg, 'EndpointGroup')\n self._check_dn(net1, ext_bd, 'BridgeDomain')\n self._check_dn(net1, ext_vrf, 'VRF')\n\n # test no-op CIDR update\n self.mock_ns.reset_mock()\n net1 = self._update('networks', net1['id'],\n {'network': {CIDR: ['4.4.4.0/24', '20.10.0.0/16']}})['network']\n self.mock_ns.update_external_cidrs.assert_not_called()\n\n # test CIDR update\n self.mock_ns.reset_mock()\n net1 = self._update('networks', net1['id'],\n {'network': {CIDR: ['33.33.33.0/30']}})['network']\n self.mock_ns.update_external_cidrs.assert_called_once_with(\n mock.ANY, a_ext_net, ['33.33.33.0/30'])\n\n # delete\n self.mock_ns.reset_mock()\n self._delete('networks', net1['id'])\n self.mock_ns.delete_l3outside.assert_called_once_with(\n mock.ANY,\n aim_resource.L3Outside(tenant_name='t1', name='l1'))\n self.mock_ns.delete_external_network.assert_called_once_with(\n mock.ANY,\n aim_resource.ExternalNetwork(tenant_name='t1', l3out_name='l1',\n name='n1'))\n\n # create with default CIDR\n self.mock_ns.reset_mock()\n self._make_ext_network('net2',\n dn='uni/tn-t1/out-l1/instP-n1')\n self.mock_ns.create_external_network.assert_called_once_with(\n mock.ANY, a_ext_net)\n self.mock_ns.update_external_cidrs.assert_called_once_with(\n mock.ANY, a_ext_net, ['0.0.0.0/0'])\n\n def test_unmanaged_external_network_lifecycle(self):\n net1 = self._make_ext_network('net1')\n self.mock_ns.create_l3outside.assert_not_called()\n self.mock_ns.create_external_network.assert_not_called()\n self.mock_ns.update_external_cidrs.assert_not_called()\n self._check_no_dn(net1, 'EndpointGroup')\n self._check_no_dn(net1, 'BridgeDomain')\n self._check_no_dn(net1, 'VRF')\n\n self._delete('networks', net1['id'])\n self.mock_ns.delete_l3outside.assert_not_called()\n self.mock_ns.delete_external_network.assert_not_called()\n\n def test_external_subnet_lifecycle(self):\n net1 = self._make_ext_network('net1',\n dn='uni/tn-t1/out-l1/instP-n1')\n subnet = self._make_subnet(\n self.fmt, {'network': net1}, '10.0.0.1', '10.0.0.0/24',\n allocation_pools=[{'start': '10.0.0.2',\n 'end': '10.0.0.250'}])['subnet']\n subnet = self._show('subnets', subnet['id'])['subnet']\n\n l3out = aim_resource.L3Outside(tenant_name='t1', name='l1')\n self.mock_ns.create_subnet.assert_called_once_with(\n mock.ANY, l3out, '10.0.0.1/24')\n ext_sub = aim_resource.Subnet(tenant_name='t1', bd_name='EXT-l1',\n gw_ip_mask='10.0.0.1/24')\n self._check_dn(subnet, ext_sub, 'Subnet')\n\n # Update gateway\n self.mock_ns.reset_mock()\n ext_sub.gw_ip_mask = '10.0.0.251/24'\n self._update('subnets', subnet['id'],\n {'subnet': {'gateway_ip': '10.0.0.251'}})\n subnet = self._show('subnets', subnet['id'])['subnet']\n self.mock_ns.delete_subnet.assert_called_once_with(\n mock.ANY, l3out, '10.0.0.1/24')\n self.mock_ns.create_subnet.assert_called_once_with(\n mock.ANY, l3out, '10.0.0.251/24')\n self._check_dn(subnet, ext_sub, 'Subnet')\n\n # delete subnet\n self.mock_ns.reset_mock()\n self._delete('subnets', subnet['id'])\n self.mock_ns.delete_subnet.assert_called_once_with(\n mock.ANY, l3out, '10.0.0.251/24')\n\n def test_unmanaged_external_subnet_lifecycle(self):\n net1 = self._make_ext_network('net1')\n subnet = self._make_subnet(\n self.fmt, {'network': net1}, '10.0.0.1', '10.0.0.0/24',\n allocation_pools=[{'start': '10.0.0.2',\n 'end': '10.0.0.250'}])['subnet']\n\n self.mock_ns.create_subnet.assert_not_called()\n self._check_no_dn(subnet, 'Subnet')\n self.assertEqual('N/A', subnet['apic:synchronization_state'])\n\n # Update gateway\n self._update('subnets', subnet['id'],\n {'subnet': {'gateway_ip': '10.0.0.251'}})\n subnet = self._show('subnets', subnet['id'])['subnet']\n self.mock_ns.delete_subnet.assert_not_called()\n self.mock_ns.create_subnet.assert_not_called()\n self._check_no_dn(subnet, 'Subnet')\n\n # delete subnet\n self._delete('subnets', subnet['id'])\n self.mock_ns.delete_subnet.assert_not_called()\n\n def _do_test_router_interface(self, use_addr_scope=False):\n cv = self.mock_ns.connect_vrf\n dv = self.mock_ns.disconnect_vrf\n\n ext_net1 = self._make_ext_network('ext-net1',\n dn='uni/tn-t1/out-l1/instP-n1')\n self._make_subnet(\n self.fmt, {'network': ext_net1}, '100.100.100.1',\n '100.100.100.0/24')\n\n # Each tenant has\n # 1. One subnetpool + address-scope (optional)\n # 2. Two networks with 2 subnets each; subnets come\n # from the subnetpool if present\n # 3. Two routers with external gateway.\n # Test connects the routers one-by-one to two subnets each,\n # and then removes the router interfaces one-by-one.\n\n objs = {}\n # Create the networks, subnets, routers etc\n for t in ['tenant_1', 'tenant_2']:\n subnetpool = None\n addr_scope = None\n if use_addr_scope:\n addr_scope = self._make_address_scope(\n self.fmt, 4, name='as1', tenant_id=t)['address_scope']\n subnetpool = self._make_subnetpool(\n self.fmt, ['10.0.0.0/8'], name='spool1', tenant_id=t,\n address_scope_id=addr_scope['id'])['subnetpool']\n for ni in range(0, 2):\n net = self._make_network(self.fmt, 'pvt-net%d' % ni, True,\n tenant_id=t)['network']\n sp_id = subnetpool['id'] if use_addr_scope else None\n sub1 = self._make_subnet(\n self.fmt, {'network': net}, '10.%d.1.1' % (10 + ni),\n '10.%d.1.0/24' % (10 + ni),\n subnetpool_id=sp_id)['subnet']\n sub2 = self._make_subnet(\n self.fmt, {'network': net}, '10.%d.2.1' % (10 + ni),\n '10.%d.2.0/24' % (10 + ni),\n subnetpool_id=sp_id)['subnet']\n\n router = self._make_router(\n self.fmt, t, 'router%d' % ni,\n arg_list=self.extension_attributes,\n external_gateway_info={'network_id':\n ext_net1['id']},\n **{PROV: ['pr-%s-%d' % (t, ni)],\n CONS: ['co-%s-%d' % (t, ni)]})['router']\n objs.setdefault(t, []).append(\n tuple([router, [sub1, sub2], addr_scope]))\n self.mock_ns.connect_vrf.assert_not_called()\n\n # Connect the router interfaces to the subnets\n vrf_objs = {}\n for tenant, router_list in objs.iteritems():\n a_vrf = aim_resource.VRF(tenant_name=tenant,\n name='DefaultVRF')\n a_ext_net = aim_resource.ExternalNetwork(\n tenant_name='t1', l3out_name='l1', name='n1')\n for router, subnets, addr_scope in router_list:\n if addr_scope:\n a_vrf.name = addr_scope['id']\n contract = router['id']\n a_ext_net.provided_contract_names.append(contract)\n a_ext_net.provided_contract_names.extend(\n router[PROV])\n a_ext_net.provided_contract_names.sort()\n a_ext_net.consumed_contract_names.append(contract)\n a_ext_net.consumed_contract_names.extend(\n router[CONS])\n a_ext_net.consumed_contract_names.sort()\n\n for idx in range(0, len(subnets)):\n self.mock_ns.reset_mock()\n self._router_interface_action('add', router['id'],\n subnets[idx]['id'], None)\n if idx == 0:\n cv.assert_called_once_with(mock.ANY, a_ext_net, a_vrf)\n else:\n cv.assert_not_called()\n vrf_objs[tenant] = a_ext_net\n\n # Remove the router interfaces\n for tenant, router_list in objs.iteritems():\n a_vrf = aim_resource.VRF(tenant_name=tenant,\n name='DefaultVRF')\n a_ext_net = vrf_objs.pop(tenant)\n num_router = len(router_list)\n for router, subnets, addr_scope in router_list:\n if addr_scope:\n a_vrf.name = addr_scope['id']\n contract = router['id']\n a_ext_net.provided_contract_names.remove(contract)\n a_ext_net.consumed_contract_names.remove(contract)\n for c in router[PROV]:\n a_ext_net.provided_contract_names.remove(c)\n for c in router[CONS]:\n a_ext_net.consumed_contract_names.remove(c)\n\n for idx in range(0, len(subnets)):\n self.mock_ns.reset_mock()\n self._router_interface_action('remove', router['id'],\n subnets[idx]['id'], None)\n if idx == len(subnets) - 1:\n num_router -= 1\n if num_router:\n cv.assert_called_once_with(mock.ANY, a_ext_net,\n a_vrf)\n else:\n dv.assert_called_once_with(mock.ANY, a_ext_net,\n a_vrf)\n else:\n cv.assert_not_called()\n dv.assert_not_called()\n\n self.mock_ns.reset_mock()\n self._delete('routers', router['id'])\n dv.assert_not_called()\n\n def test_router_interface(self):\n self._do_test_router_interface(use_addr_scope=False)\n\n def test_router_interface_addr_scope(self):\n self._do_test_router_interface(use_addr_scope=True)\n\n def _do_test_router_gateway(self, use_addr_scope=False):\n cv = self.mock_ns.connect_vrf\n dv = self.mock_ns.disconnect_vrf\n\n ext_net1 = self._make_ext_network('ext-net1',\n dn='uni/tn-t1/out-l1/instP-n1')\n self._make_subnet(\n self.fmt, {'network': ext_net1}, '100.100.100.1',\n '100.100.100.0/24')\n ext_net2 = self._make_ext_network('ext-net1',\n dn='uni/tn-t1/out-l2/instP-n2')\n self._make_subnet(\n self.fmt, {'network': ext_net2}, '200.200.200.1',\n '200.200.200.0/24')\n\n objs = []\n net = self._make_network(self.fmt, 'pvt-net1', True)['network']\n subnetpool = None\n addr_scope = None\n if use_addr_scope:\n addr_scope = self._make_address_scope(\n self.fmt, 4, name='as1',\n tenant_id=net['tenant_id'])['address_scope']\n subnetpool = self._make_subnetpool(\n self.fmt, ['10.10.0.0/16'],\n name='spool1', address_scope_id=addr_scope['id'],\n tenant_id=net['tenant_id'])['subnetpool']\n sub1 = self._make_subnet(\n self.fmt, {'network': net}, '10.10.1.1',\n '10.10.1.0/24',\n subnetpool_id=subnetpool['id'] if addr_scope else None)['subnet']\n\n router = self._make_router(\n self.fmt, net['tenant_id'], 'router1',\n arg_list=self.extension_attributes,\n **{PROV: ['pr-1'],\n CONS: ['co-1']})['router']\n objs.append(tuple([router, [sub1]]))\n\n self._router_interface_action('add', router['id'], sub1['id'], None)\n self.mock_ns.connect_vrf.assert_not_called()\n\n self.mock_ns.reset_mock()\n self._update('routers', router['id'],\n {'router':\n {'external_gateway_info': {'network_id':\n ext_net1['id']}}})\n contract = router['id']\n a_ext_net1 = aim_resource.ExternalNetwork(\n tenant_name='t1', l3out_name='l1', name='n1',\n provided_contract_names=sorted(['pr-1', contract]),\n consumed_contract_names=sorted(['co-1', contract]))\n tenant_aname = net['tenant_id'] # REVISIT\n a_vrf = aim_resource.VRF(tenant_name=tenant_aname,\n name='DefaultVRF')\n if use_addr_scope:\n a_vrf.name = addr_scope['id']\n cv.assert_called_once_with(mock.ANY, a_ext_net1, a_vrf)\n\n self.mock_ns.reset_mock()\n self._update('routers', router['id'],\n {'router':\n {'external_gateway_info': {'network_id':\n ext_net2['id']}}})\n a_ext_net2 = aim_resource.ExternalNetwork(\n tenant_name='t1', l3out_name='l2', name='n2',\n provided_contract_names=sorted(['pr-1', contract]),\n consumed_contract_names=sorted(['co-1', contract]))\n a_ext_net1.provided_contract_names = []\n a_ext_net1.consumed_contract_names = []\n dv.assert_called_once_with(mock.ANY, a_ext_net1, a_vrf)\n cv.assert_called_once_with(mock.ANY, a_ext_net2, a_vrf)\n\n self.mock_ns.reset_mock()\n self._update('routers', router['id'],\n {'router': {'external_gateway_info': {}}})\n a_ext_net2.provided_contract_names = []\n a_ext_net2.consumed_contract_names = []\n dv.assert_called_once_with(mock.ANY, a_ext_net2, a_vrf)\n\n def test_router_gateway(self):\n self._do_test_router_gateway(use_addr_scope=False)\n\n def test_router_gateway_addr_scope(self,):\n self._do_test_router_gateway(use_addr_scope=True)\n\n def test_router_with_unmanaged_external_network(self):\n ext_net1 = self._make_ext_network('ext-net1')\n self._make_subnet(\n self.fmt, {'network': ext_net1}, '100.100.100.1',\n '100.100.100.0/24')\n\n net = self._make_network(self.fmt, 'pvt-net1', True)['network']\n sub1 = self._make_subnet(\n self.fmt, {'network': net}, '10.10.1.1',\n '10.10.1.0/24')['subnet']\n\n router = self._make_router(\n self.fmt, net['tenant_id'], 'router1',\n arg_list=self.extension_attributes,\n external_gateway_info={'network_id': ext_net1['id']},\n **{PROV: ['pr-1'],\n CONS: ['co-1']})['router']\n\n self._router_interface_action('add', router['id'], sub1['id'], None)\n self.mock_ns.connect_vrf.assert_not_called()\n\n self._router_interface_action('remove', router['id'], sub1['id'], None)\n self.mock_ns.disconnect_vrf.assert_not_called()\n\n def _do_test_multiple_router(self, use_addr_scope=False):\n cv = self.mock_ns.connect_vrf\n dv = self.mock_ns.disconnect_vrf\n\n ext_nets = []\n a_ext_nets = []\n for x in range(0, 2):\n ext_net = self._make_ext_network('ext-net%d' % x,\n dn='uni/tn-t1/out-l%d/instP-n%d' % (x, x))\n self._make_subnet(\n self.fmt, {'network': ext_net}, '100.%d.100.1' % x,\n '100.%d.100.0/24' % x)\n ext_nets.append(ext_net['id'])\n a_ext_net = aim_resource.ExternalNetwork(\n tenant_name='t1', l3out_name='l%d' % x, name='n%d' % x)\n a_ext_nets.append(a_ext_net)\n\n net = self._make_network(self.fmt, 'pvt-net1', True)['network']\n subnetpool = None\n addr_scope = None\n if use_addr_scope:\n addr_scope = self._make_address_scope(\n self.fmt, 4, name='as1',\n tenant_id=net['tenant_id'])['address_scope']\n subnetpool = self._make_subnetpool(\n self.fmt, ['10.10.0.0/16'],\n name='spool1', address_scope_id=addr_scope['id'],\n tenant_id=net['tenant_id'])['subnetpool']\n sub1 = self._make_subnet(\n self.fmt, {'network': net}, '10.10.1.1',\n '10.10.1.0/24',\n subnetpool_id=subnetpool['id'] if addr_scope else None)['subnet']\n tenant_aname = net['tenant_id'] # REVISIT\n a_vrf = aim_resource.VRF(tenant_name=tenant_aname,\n name='DefaultVRF')\n if use_addr_scope:\n a_vrf.name = addr_scope['id']\n\n routers = []\n contracts = []\n for x in range(0, 2):\n r = self._make_router(\n self.fmt, net['tenant_id'], 'router1')['router']\n if x:\n sub_id = None\n intf_port = self._make_port(self.fmt, net['id'],\n fixed_ips=[{'subnet_id': sub1['id']}])['port']['id']\n else:\n sub_id = sub1['id']\n intf_port = None\n self._router_interface_action('add', r['id'], sub_id,\n intf_port)\n routers.append(r['id'])\n contracts.append(r['id'])\n cv.assert_not_called()\n\n self._add_external_gateway_to_router(routers[0], ext_nets[0])\n a_ext_nets[0].provided_contract_names = [contracts[0]]\n a_ext_nets[0].consumed_contract_names = [contracts[0]]\n cv.assert_called_once_with(mock.ANY, a_ext_nets[0], a_vrf)\n\n self.mock_ns.reset_mock()\n self._add_external_gateway_to_router(routers[1], ext_nets[1])\n a_ext_nets[1].provided_contract_names = [contracts[1]]\n a_ext_nets[1].consumed_contract_names = [contracts[1]]\n cv.assert_called_once_with(mock.ANY, a_ext_nets[1], a_vrf)\n\n self.mock_ns.reset_mock()\n self._router_interface_action('remove', routers[0], sub1['id'], None)\n a_ext_nets[0].provided_contract_names = []\n a_ext_nets[0].consumed_contract_names = []\n dv.assert_called_once_with(mock.ANY, a_ext_nets[0], a_vrf)\n cv.assert_not_called()\n\n self.mock_ns.reset_mock()\n self._router_interface_action('remove', routers[1], sub1['id'], None)\n a_ext_nets[1].provided_contract_names = []\n a_ext_nets[1].consumed_contract_names = []\n dv.assert_called_once_with(mock.ANY, a_ext_nets[1], a_vrf)\n\n def test_multiple_router(self):\n self._do_test_multiple_router(use_addr_scope=False)\n\n def test_multiple_router_addr_scope(self):\n self._do_test_multiple_router(use_addr_scope=True)\n\n def test_floatingip(self):\n net1 = self._make_network(self.fmt, 'pvt-net1', True)['network']\n sub1 = self._make_subnet(\n self.fmt, {'network': net1}, '10.10.1.1', '10.10.1.0/24')\n net2 = self._make_network(self.fmt, 'pvt-net1', True)['network']\n sub2 = self._make_subnet(\n self.fmt, {'network': net2}, '10.10.2.1', '10.10.2.0/24')\n\n self._register_agent('host1', AGENT_CONF_OPFLEX)\n p = []\n for sub in [sub1, sub2, sub2]:\n with self.port(subnet=sub) as port:\n port = self._bind_port_to_host(port['port']['id'], 'host1')\n port['port']['dns_name'] = None\n p.append(port['port'])\n\n mock_notif = mock.Mock()\n self.driver.notifier.port_update = mock_notif\n\n with self.floatingip_no_assoc(sub1) as fip1:\n fip1 = fip1['floatingip']\n self.assertEqual('DOWN', fip1['status'])\n mock_notif.assert_not_called()\n\n fip1 = self._update('floatingips', fip1['id'],\n {'floatingip': {'port_id': p[0]['id']}})\n fip1 = fip1['floatingip']\n self.assertEqual('ACTIVE', fip1['status'])\n mock_notif.assert_called_once_with(mock.ANY, p[0])\n\n mock_notif.reset_mock()\n fip1 = self._update('floatingips', fip1['id'],\n {'floatingip': {'port_id': None}})\n fip1 = fip1['floatingip']\n self.assertEqual('DOWN', fip1['status'])\n mock_notif.assert_called_once_with(mock.ANY, p[0])\n\n mock_notif.reset_mock()\n with self.floatingip_with_assoc(port_id=p[1]['id']) as fip2:\n fip2 = fip2['floatingip']\n self.assertEqual('ACTIVE', fip2['status'])\n mock_notif.assert_called_once_with(mock.ANY, p[1])\n\n mock_notif.reset_mock()\n fip2 = self._update('floatingips', fip2['id'],\n {'floatingip': {'port_id': p[2]['id']}})\n fip2 = fip2['floatingip']\n calls = [mock.call(mock.ANY, p[1]), mock.call(mock.ANY, p[2])]\n self.assertEqual(len(calls), mock_notif.call_count)\n mock_notif.has_calls(calls)\n self.assertEqual('ACTIVE', fip2['status'])\n\n mock_notif.reset_mock()\n # fip2 should be deleted at this point\n mock_notif.assert_called_once_with(mock.ANY, p[2])\n\n\nclass TestExternalDistributedNat(TestExternalConnectivityBase,\n ApicAimTestCase):\n nat_type = 'distributed'\n\n\nclass TestExternalEdgeNat(TestExternalConnectivityBase,\n ApicAimTestCase):\n nat_type = 'edge'\n\n\nclass TestExternalNoNat(TestExternalConnectivityBase,\n ApicAimTestCase):\n nat_type = ''\n\n\nclass TestSnatIpAllocation(ApicAimTestCase):\n\n def test_get_alloc_ip(self):\n admin_ctx = context.get_admin_context()\n ext_net = self._make_ext_network('ext-net1',\n dn='uni/tn-t1/out-l1/instP-n1')\n sub1 = self._make_subnet(\n self.fmt, {'network': ext_net}, '100.100.100.1',\n '100.100.100.0/29')['subnet']\n sub2 = self._make_subnet(\n self.fmt, {'network': ext_net}, '200.100.100.1',\n '200.100.100.0/28')['subnet']\n\n # No SNAT pools -> no allocation possible\n alloc = self.driver.get_or_allocate_snat_ip(admin_ctx, 'h0', ext_net)\n self.assertIsNone(alloc)\n\n # Add one SNAT pool\n self._update('subnets', sub1['id'],\n {'subnet': {SNAT_POOL: True}})\n\n # Allocate IP and then verify that same IP is returned on get\n for x in range(0, 5):\n alloc = self.driver.get_or_allocate_snat_ip(admin_ctx,\n 'h%d' % x, ext_net)\n self.assertEqual({'host_snat_ip': '100.100.100.%d' % (x + 2),\n 'gateway_ip': '100.100.100.1',\n 'prefixlen': 29}, alloc)\n alloc = self.driver.get_or_allocate_snat_ip(admin_ctx,\n 'h%d' % x, ext_net)\n self.assertEqual({'host_snat_ip': '100.100.100.%d' % (x + 2),\n 'gateway_ip': '100.100.100.1',\n 'prefixlen': 29}, alloc)\n\n # First pool exhausted, no further allocations possible\n alloc = self.driver.get_or_allocate_snat_ip(admin_ctx, 'h5', ext_net)\n self.assertIsNone(alloc)\n\n # Add a second pool and try to re-allocate\n self._update('subnets', sub2['id'],\n {'subnet': {SNAT_POOL: True}})\n alloc = self.driver.get_or_allocate_snat_ip(admin_ctx, 'h5', ext_net)\n self.assertEqual({'host_snat_ip': '200.100.100.2',\n 'gateway_ip': '200.100.100.1',\n 'prefixlen': 28}, alloc)\n\n def test_snat_pool_flag_update_no_ip(self):\n ext_net = self._make_ext_network('ext-net1',\n dn='uni/tn-t1/out-l1/instP-n1')\n sub1 = self._make_subnet(\n self.fmt, {'network': ext_net}, '100.100.100.1',\n '100.100.100.0/29')['subnet']\n self._update('subnets', sub1['id'],\n {'subnet': {SNAT_POOL: True}})\n\n self._update('subnets', sub1['id'],\n {'subnet': {SNAT_POOL: False}})\n self._update('subnets', sub1['id'],\n {'subnet': {SNAT_POOL: True}})\n\n self._delete('subnets', sub1['id'])\n\n def test_snat_pool_flag_update_with_ip(self):\n ext_net = self._make_ext_network('ext-net1',\n dn='uni/tn-t1/out-l1/instP-n1')\n sub1 = self._make_subnet(\n self.fmt, {'network': ext_net}, '100.100.100.1',\n '100.100.100.0/29')['subnet']\n self._update('subnets', sub1['id'],\n {'subnet': {SNAT_POOL: True}})\n\n alloc = self.driver.get_or_allocate_snat_ip(\n context.get_admin_context(), 'h0', ext_net)\n self.assertIsNotNone(alloc)\n self._update('subnets', sub1['id'],\n {'subnet': {SNAT_POOL: False}}, expected_code=500)\n self._delete('subnets', sub1['id'], expected_code=409)\n\n def _setup_router_with_ext_net(self):\n ext_net = self._make_ext_network('ext-net1',\n dn='uni/tn-t1/out-l1/instP-n1')\n self._make_subnet(\n self.fmt, {'network': ext_net}, '100.100.100.1',\n '100.100.100.0/24')\n\n net = self._make_network(self.fmt, 'pvt-net1', True)['network']\n pvt_sub = self._make_subnet(\n self.fmt, {'network': net}, '10.10.1.1',\n '10.10.1.0/24')['subnet']\n\n rtr = self._make_router(\n self.fmt, net['tenant_id'], 'router1',\n external_gateway_info={'network_id': ext_net['id']})['router']\n self._router_interface_action('add', rtr['id'], pvt_sub['id'], None)\n\n sub2 = self._make_subnet(\n self.fmt, {'network': ext_net}, '200.100.100.1',\n '200.100.100.0/29')['subnet']\n self._update('subnets', sub2['id'],\n {'subnet': {SNAT_POOL: True}})\n alloc = self.driver.get_or_allocate_snat_ip(\n context.get_admin_context(), 'h0', ext_net)\n self.assertIsNotNone(alloc)\n\n return sub2, rtr, pvt_sub\n\n def _get_snat_ports(self, snat_subnet):\n snat_ports = self._list('ports',\n query_params=('network_id=%s' % snat_subnet['network_id'])\n )['ports']\n return [p for p in snat_ports\n if p['fixed_ips'][0]['subnet_id'] == snat_subnet['id']]\n\n def test_snat_port_delete_on_router_gw_clear(self):\n snat_sub, rtr, _ = self._setup_router_with_ext_net()\n self.assertTrue(self._get_snat_ports(snat_sub))\n\n self._update('routers', rtr['id'],\n {'router': {'external_gateway_info': None}})\n self.assertFalse(self._get_snat_ports(snat_sub))\n self._update('subnets', snat_sub['id'],\n {'subnet': {SNAT_POOL: False}})\n\n def test_snat_port_delete_on_router_intf_remove(self):\n snat_sub, rtr, pvt_sub = self._setup_router_with_ext_net()\n self.assertTrue(self._get_snat_ports(snat_sub))\n\n self._router_interface_action('remove', rtr['id'], pvt_sub['id'],\n None)\n self.assertFalse(self._get_snat_ports(snat_sub))\n self._update('subnets', snat_sub['id'],\n {'subnet': {SNAT_POOL: False}})\n\n def test_floatingip_alloc_in_snat_pool(self):\n ext_net = self._make_ext_network('ext-net1',\n dn='uni/tn-t1/out-l1/instP-n1')\n snat_sub = self._make_subnet(\n self.fmt, {'network': ext_net}, '100.100.100.1',\n '100.100.100.0/24')['subnet']\n self._update('subnets', snat_sub['id'],\n {'subnet': {SNAT_POOL: True}})\n\n # allocate FIP by subnet\n res = self._create_floatingip(self.fmt, ext_net['id'],\n subnet_id=snat_sub['id'])\n self.assertEqual(400, res.status_int)\n res = self.deserialize(self.fmt, res)\n self.assertEqual('SnatPoolCannotBeUsedForFloatingIp',\n res['NeutronError']['type'])\n\n # allocate FIP by external address\n res = self._make_floatingip(self.fmt, ext_net['id'],\n floating_ip='100.100.100.10',\n http_status=400)\n self.assertEqual('SnatPoolCannotBeUsedForFloatingIp',\n res['NeutronError']['type'])\n\n def test_floatingip_alloc_in_non_snat_pool(self):\n ext_net = self._make_ext_network('ext-net1',\n dn='uni/tn-t1/out-l1/instP-n1')\n snat_sub = self._make_subnet(\n self.fmt, {'network': ext_net}, '100.100.100.1',\n '100.100.100.0/24')['subnet']\n self._update('subnets', snat_sub['id'],\n {'subnet': {SNAT_POOL: True}})\n\n fip_sub1 = self._make_subnet(\n self.fmt, {'network': ext_net}, '200.100.100.1',\n '200.100.100.0/29')['subnet']\n self._make_subnet(\n self.fmt, {'network': ext_net}, '250.100.100.1',\n '250.100.100.0/29')\n\n # FIP with subnet\n fip1 = self._create_floatingip(self.fmt, ext_net['id'],\n subnet_id=fip_sub1['id'])\n self.assertEqual(201, fip1.status_int)\n fip1 = self.deserialize(self.fmt, fip1)['floatingip']\n self.assertEqual('200.100.100.2', fip1['floating_ip_address'])\n\n # FIP with external-address\n fip2 = self._make_floatingip(self.fmt, ext_net['id'],\n floating_ip='250.100.100.3')['floatingip']\n self.assertEqual('250.100.100.3', fip2['floating_ip_address'])\n\n # FIP with no IP specifications - exhaust all available IPs\n ips = netaddr.IPSet(['200.100.100.0/29', '250.100.100.0/29'])\n for x in range(0, 8):\n fip = self._make_floatingip(self.fmt, ext_net['id'])['floatingip']\n self.assertTrue(fip['floating_ip_address'] in ips)\n","sub_path":"gbpservice/neutron/tests/unit/plugins/ml2plus/test_apic_aim.py","file_name":"test_apic_aim.py","file_ext":"py","file_size_in_byte":96984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"94120694","text":"import urllib.request\nimport json\nimport time\n\n\nclass VKApi:\n def __init__(self, token: str):\n self.token = token\n self.messages = Messages(self)\n self.users = Users(self, True, True)\n self.friends = Friends(self, False)\n\n def sendRequest(self, name: str, use_token: bool, args: list):\n req = 'https://api.vk.com/method/' + name + \"?v=5.60\"\n if use_token:\n req += '&' + self.token\n for arg in args:\n req += '&' + arg\n return self.sendRequestByURL(req)\n\n def sendRequestByURL(self, req):\n req = req.replace('\\n', '').replace(' ', '%20')\n res = urllib.request.urlopen(req).read()\n while res.startswith(b'{\"error\"'):\n errcode = json.loads(res.decode('utf8'))['error']['error_code']\n # check if this is \"too many requests per second\" error\n if errcode != 6:\n return 'err'\n time.sleep(0.4)\n res = urllib.request.urlopen(req).read()\n return res.decode('utf8')\n\n\nclass Messages:\n def __init__(self, vk_api):\n self.vk = vk_api\n\n def send(self, user_id, message):\n self.vk.sendRequest('messages.send', True, ['user_id=' + user_id, 'message=' + message])\n\n\nclass Users:\n def __init__(self, vk_api: VKApi, cache_users: bool, use_token: bool):\n self.vk = vk_api\n self.cache_users = cache_users\n self.use_token = use_token\n if cache_users:\n self.users = {}\n\n # def get(self, user_id: str):\n # return self.get(int(user_id))\n\n def get(self, user_id: int):\n if self.cache_users:\n if self.users.__contains__(user_id):\n user = self.users[user_id]\n else:\n user = json.loads(self.vk.sendRequest('users.get', self.use_token, ['user_id=' + str(user_id)]))['response'][0]\n self.users[user_id] = user\n else:\n user = json.loads(self.vk.sendRequest('users.get', self.use_token, ['user_id=' + str(user_id)]))['response'][0]\n return user\n\n def load_into_DB(self, user_ids: list):\n users = json.loads(self.vk.sendRequest('users.get', self.use_token, ['user_ids=' + ','.join(user_ids)]))['response']\n for u in users:\n self.users[u['id']] = u\n\n def loadDB(self):\n print('loading users DB from disk...')\n try:\n file = open('users.db', 'r+')\n while file:\n str = file.readline()\n if str:\n u = json.loads(str)\n self.users[u['id']] = u\n else:\n break\n file.close()\n except FileNotFoundError:\n print('DB file not found.')\n\n def saveDB(self):\n print('saving users DB to disk...')\n file = open('users.db', 'w+')\n for u in self.users.values():\n file.write((json.dumps(u) + '\\n'))\n file.close()\n\n\nclass Friends:\n def __init__(self, vk_api: VKApi, use_token: bool):\n self.vk = vk_api\n self.use_token = use_token\n\n def get(self, user_id: int):\n res = self.vk.sendRequest('friends.get', self.use_token, ['user_id=' + str(user_id)])\n # couldn't get friend list\n if res == 'err':\n return []\n return json.loads(res)['response']['items']\n","sub_path":"vkapi.py","file_name":"vkapi.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"372368450","text":"from bs4 import BeautifulSoup as soup\nimport csv\nimport requests\nimport re\n\nbase_url = 'https://locations.kfc.com/'\n\nreq_headers = {\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'en-US,en;q=0.8',\n 'x-requested-with': 'XMLHttpRequest',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'\n}\n\npage_html = requests.get(base_url, headers = req_headers).text\n\npage_soup = soup(page_html, 'html.parser')\n\nstates = page_soup.findAll('a', {'class': 'Directory-listLink'})\n\nwith open('data/kfc.csv', 'w', newline = '') as f:\n\n writer = csv.writer(f)\n writer.writerow(['street', 'city', 'state', 'zip_code'])\n\nfor state in states:\n \n state_url = base_url + state['href']\n\n page_html = requests.get(state_url, headers = req_headers).text\n\n page_soup = soup(page_html, 'html.parser')\n\n cities = page_soup.findAll('a', {'class': 'Directory-listLink'})\n\n for city in cities:\n \n city_url = base_url + city['href']\n\n page_html = requests.get(city_url, headers = req_headers).text\n\n page_soup = soup(page_html, 'html.parser')\n\n addresses = page_soup.findAll('address', {'class': 'c-address'})\n\n with open('data/kfc.csv', 'a', newline = '') as f:\n \n writer = csv.writer(f)\n\n for address in addresses:\n\n street = address.find('span', {'class':'c-address-street-1'}).text\n city = address.find('span', {'class':'c-address-city'}).text\n state = address.find('abbr', {'class':'c-address-state'}).text\n zip_code = address.find('span', {'class':'c-address-postal-code'}).text\n\n writer.writerow([street, city, state, zip_code])\n\n print(street+', '+city+', '+state+', '+zip_code)\n\n","sub_path":"scrape/kfc.py","file_name":"kfc.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"173753014","text":"import pandas as pd\nimport numpy as np\n\n\ndef get_iris_data():\n \"\"\"\n Function for fetching iris data from archive. The function obtains\n the data and puts it in the format needed for examples/perceptron\n\n Returns\n -------\n data: iterable\n Iris data in format for perceptron algorithm (iterable of pairs x,y)\n \"\"\"\n\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'\n\n df = pd.read_csv(url, header=None)\n\n x_vals = df.iloc[:, [0, 2]].values\n\n y_vals = df.iloc[:, 4].values\n y_vals = np.where(y_vals == 'Iris-setosa', -1, 1)\n\n size = len(x_vals)\n\n data = []\n\n # randomize ordering of data\n np.random.seed(1)\n random_indices = np.random.permutation(size)\n\n for i in range(size):\n cur_index = random_indices[i]\n data.append((x_vals[cur_index], y_vals[cur_index]))\n\n return data","sub_path":"data/iris_data.py","file_name":"iris_data.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"558032915","text":"from xlrd import *\nfrom xlwt import *\nfrom tempfile import TemporaryFile\nimport time, os.path\n\ndef extract_cobra_data(filename):\n '''\n Given a raw .csv file from the gamma counter (Cobra Quantum 5003; \n PerkinElmer Inc., Waltham, MA, USA), return a list where each entry is\n the split(',') contents of each line from the raw .csv file\n '''\n \n cpm_samples = [] # List to contain .csv contents\n # First entry in list is date raw .csv file created \n cpm_samples.append(time.ctime(os.path.getctime(filename)))\n # Reading the file and grabbing info\n data_file = open(filename, 'r')\n # Default values for list entries\n cpm = 'N/A'\n index = 7 \n line_raw = 'start'\n\n while line_raw != '': # End of raw .csv file \n line_raw = data_file.readline ().strip (\"\\n\")\n line = line_raw.split (\",\") \n cpm = line\n cpm_samples.append(cpm) \n \n return cpm_samples\n\ndef extract_wizard_data(filename):\n '''\n Similiar functionality to extractor() function above, but refactored for\n use with newer gamma counter (Wallac 1480 Wizard 30; PerkinElmer Inc.,\n Turku, Finland)\n '''\n \n cpm_samples = [] # List of W_sample objects\n \n cpm_samples.append (time.ctime(os.path.getctime(filename)))\n # Reading the file and grabbing info\n for line_raw in open (filename, 'r'):\n line = line_raw.split() \n cpm_samples.append (line)\n \n return cpm_samples\n\ndef add_sheet(output_file, samples_list, directory, filename):\n '''\n Given a list of samples (samples_list) and open excel file (output_file),\n add a new sheet to output_file. New sheet contains samples_list,\n with each list item written into its own row (in order).\n Sheet name is the filename\n ''' \n # Set the style for specific cells that will delineate data on the page\n borders = Borders()\n borders.bottom = Borders.THIN\n style_bot = XFStyle()\n style_bot.borders = borders \n \n sheet_name = filename.strip(\".txt\")\n output_sheet = output_file.add_sheet(sheet_name, cell_overwrite_ok=True)\n\n # Writing the headers in the sheet with the extracted data\n # Data is present in a 2D where each list entry should be its own cell\n for x in range (1, len(samples_list)):\n for y in range (0, len(samples_list[x])):\n try:\n output_sheet.write(x-1, y, float(samples_list[x][y]))\n except:\n output_sheet.write(x-1, y, samples_list[x][y])\n \n output_sheet.write (\n len (samples_list) - 1 , 0, \"Data collected on \" + samples_list [0]) \n output_sheet.write (\n len (samples_list), 0,\n \"Data extracted on \" + time.strftime (\"%a %b %d %H:%M:%S %Y\"))\n \n","sub_path":"GammaCounter.py","file_name":"GammaCounter.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"445562240","text":"from multiprocessing import Process, Queue, Manager\nfrom scipy.optimize import minimize\nimport pandas_datareader.data as web\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom datetime import datetime\nimport os\nimport itertools\nimport time\nfrom operator import itemgetter\nfrom dateutil.relativedelta import *\nimport math\n\ndef computeDataPoints(weights,returns_annual, cov_annual):\n returns = np.dot(weights, returns_annual)\n volatility = np.sqrt(np.dot(weights.T, np.dot(cov_annual, weights)))\n sharpe = (returns -0.03)/ volatility\n return returns,volatility,sharpe\n\ndef sharpeRatio(weights, *args):\n returns_annual, cov_annual = args[0],args[1]\n returns = np.dot(weights, returns_annual)\n volatility = np.sqrt(np.dot(weights.T, np.dot(cov_annual, weights)))\n sharpe = (returns -0.03)/ volatility\n return 1/sharpe\n\ndef constraint1(weights):\n return weights.sum() - 1\n\ndef minimizerThread(q, threadIndex, table, resultList):\n while q.empty():\n time.sleep(0.5)\n maxResult = (0,0,0,0,0)\n while not q.empty():\n tickers = q.get()\n\n cIndex = []\n for co in tickers:\n cIndex.append(table.columns.get_loc(co))\n newTable = table.iloc[:, cIndex]\n\n #if q.qsize() % 100 == 0 :\n # print(\"Thread{} - Processing #{} of Queue{} length: {}\".format(threadIndex,resIndex,threadIndex,q.qsize()))\n num_assets = len(tickers)\n returns_daily = newTable.pct_change()\n returns_annual = returns_daily.mean() * 250\n\n # get daily and covariance of returns of the stock\n cov_daily = returns_daily.cov()\n cov_annual = cov_daily * 250\n\n # initial guess\n # set random seed for reproduction's sake\n np.random.seed(101)\n weights_0 = np.random.random(num_assets)\n weights_0 /= np.sum(weights_0)\n\n #arguments for function to minimize\n additional = (returns_annual, cov_annual)\n #bounds\n b = (0.01, 1.0)\n bnds = np.full((num_assets, 2), b)\n #set constraint equality function (sum of all weights should be 1)\n con1 = {'type': 'eq', 'fun': constraint1}\n cons = ([con1])\n\n #start optimiser\n solution = minimize(sharpeRatio, weights_0, additional, method='SLSQP', bounds=bnds, constraints=cons,\n tol=0.0001)\n #get results\n final_weights = solution.x\n\n #compute datapoint from weights\n returns, volatility, sharpe = computeDataPoints(final_weights, returns_annual, cov_annual)\n\n #put data in resultlist\n if(sharpe > maxResult[4]):\n maxResult=(tickers,final_weights,returns,volatility,sharpe)\n\n #mark task as done\n #q.task_done()\n\n resultList[threadIndex] = maxResult\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n globalStopWatchStart = datetime.now()\n prevOptPortfolio = None\n SymbolsByMonth=[]\n WeightsByMonth=[]\n #Definitions\n #select the tickers of the equities you are interested in\n symbolsBE = [\"ABI.BR\", \"ABO.BR\", \"ACCB.BR\", \"ACKB.BR\", \"AED.BR\", \"AGS.BR\", \"AGFB.BR\", \"ANT.BR\", \"ARGX.BR\", \"ASC.BR\",\n \"ASIT.BR\", \"GEN.BR\", \"ATEB.BR\", \"BALTA.BR\", \"BBV.BR\", \"SANTA.BR\", \"BANI.BR\", \"BAR.BR\", \"BAS.BR\",\n \"BEAB.BR\", \"BEFB.BR\", \"BEKB.BR\", \"BELR.BR\", \"BELU.BR\", \"BCART.BR\", \"BOEI.BR\", \"BOTHE.BR\", \"BPOST.BR\",\n \"BNB.BR\", \"BREB.BR\", \"CAMB.BR\", \"CAND.BR\", \"CPINV.BR\", \"CYAD.BR\", \"CENER.BR\", \"CFEB.BR\", \"CHTEX.BR\",\n \"COMB.BR\", \"CIS.BR\", \"COBH.BR\", \"COFB.BR\", \"COFP2.BR\", \"COLR.BR\", \"CONN.BR\", \"OPTI.BR\", \"DIE.BR\",\n \"DECB.BR\", \"DTEL.BR\", \"DEXB.BR\", \"DIEG.BR\", \"DISL.BR\", \"EON.BR\", \"ECONB.BR\", \"ELI.BR\", \"ALEMK.BR\",\n \"ENI.BR\", \"EURN.BR\", \"ALPBS.BR\", \"ALEVA.BR\", \"EVS.BR\", \"EXM.BR\", \"FAGR.BR\", \"FLEX.BR\", \"FLOB.BR\",\n \"FLUX.BR\", \"FNG.BR\", \"FOU.BR\", \"GBLB.BR\", \"GENK.BR\", \"GIMB.BR\", \"GLOG.BR\", \"GREEN.BR\", \"GROWN.BR\",\n \"HAMO.BR\", \"HOMI.BR\", \"IBAB.BR\", \"IEP.BR\", \"MCC.BR\", \"IMMOU.BR\", \"IMMO.BR\", \"INCO.BR\", \"INTO.BR\",\n \"JEN.BR\", \"KBC.BR\", \"KBCA.BR\", \"KEYW.BR\", \"KIN.BR\", \"LEAS.BR\", \"LOTB.BR\", \"LUXA.BR\", \"MDXH.BR\",\n \"MELE.BR\", \"MSF.BR\", \"MIKO.BR\", \"MITRA.BR\", \"MONT.BR\", \"MOP.BR\", \"MOUR.BR\", \"MEURV.BR\", \"NEU.BR\",\n \"NEWT.BR\", \"NYR.BR\", \"ONTEX.BR\", \"OBEL.BR\", \"OXUR.BR\", \"PAY.BR\", \"PIC.BR\", \"PROX.BR\", \"QRF.BR\",\n \"QFG.BR\", \"REC.BR\", \"REI.BR\", \"RES.BR\", \"RET.BR\", \"ENGB.BR\", \"ROU.BR\", \"SAB.BR\", \"SCHD.BR\", \"SEQUA.BR\",\n \"SHUR.BR\", \"SIA.BR\", \"SIOE.BR\", \"SIP.BR\", \"SMAR.BR\", \"SOF.BR\", \"SOFT.BR\", \"SOLV.BR\", \"SOLB.BR\",\n \"SPA.BR\", \"SUCR.BR\", \"TIT.BR\", \"TFA.BR\", \"TNET.BR\", \"TERB.BR\", \"TESB.BR\", \"TEXF.BR\", \"TINC.BR\",\n \"TISN.BR\", \"TUB.BR\", \"UNI.BR\", \"PNSB.BR\", \"UCB.BR\", \"UMI.BR\", \"VAN.BR\", \"VASTB.BR\", \"VGP.BR\", \"VIO.BR\",\n \"VWA.BR\", \"VWAP.BR\", \"WEB.BR\", \"WDP.BR\", \"WEHB.BR\", \"WOLE.BR\", \"WOLS.BR\", \"XIOR.BR\", \"ZENT.BR\",\n \"ZEN.BR\"]\n\n symbolsBEL20 = [\"ABI.BR\", \"ACKB.BR\", \"APAM.AS\", \"ARGX.BR\", \"BAR.BR\", \"COFB.BR\", \"COLR.BR\", \"GLPG.AS\", \"GBLB.BR\",\n \"INGA.AS\", \"KBC.BR\", \"ONTEX.BR\", \"PROX.BR\", \"SOF.BR\", \"SOLB.BR\", \"TNET.BR\", \"UCB.BR\", \"UMI.BR\",\n \"WDP.BR\"]\n\n symbolsSET50 = ['SUPER.BK','TRITN.BK','TPIPL.BK','MAX.BK','NUSA.BK','TFG.BK','EVER.BK','AQUA.BK','PF.BK','BLAND.BK','EFORL.BK','SIRI.BK','JSP.BK','BEM.BK','UPA.BK','KTC.BK','JAS.BK','PSTC.BK','CGD.BK','ML.BK','GEL.BK','MACO.BK','WHA.BK','RML.BK','RWI.BK','NMG.BK','TMB.BK','ACC.BK','SGP.BK','TRUE.BK','IRPC.BK','QH.BK','IEC.BK','RS.BK','TWZ.BK','T.BK','GUNKUL.BK','ORI.BK','CHG.BK','ANAN.BK','BSM.BK','TRC.BK','CHO.BK','SPALI.BK','BWG.BK','ITD.BK','TPIPP.BK','NEWS.BK','STPI.BK','NWR.BK']\n\n symbolsEUROSTOXX30 = ['BMW.DE','DPW.DE','G.MI','INGA.AS','MC.PA','PHIA.AS','AIR.PA','ASML.AS','ENEL.MI','ABI.BR','ORA.PA','ENGI.PA','BAYN.DE','SAN.PA','BN.PA','ITX.MC','BBVA.MC','BNP.PA','ENI.MI','DBK.DE','ALV.DE','AI.PA','SAF.PA','IBE.MC','DTE.DE','CA.PA','SU.PA','OR.PA']\n\n\n symbolsAEX = ['INGA.AS','ABN.AS','PHIA.AS','WKL.AS','RDSA.AS','ASML.AS','KPN.AS','MT.AS','VPK.AS','AGN.AS','RAND.AS','NN.AS','REN.AS','HEIA.AS','IMCD.AS','AALB.AS','DSM.AS','UNA.AS','ASRNL.AS','URW.AS','AD.AS','ADYEN.AS','AKZA.AS','GLPG.AS','TKWY.AS']\n\n symbolsCAC = ['SGO.PA','MC.PA','AIR.PA','CAP.PA','HO.PA','LR.PA','ORA.PA','ENGI.PA','KER.PA','SAN.PA','EN.PA','BN.PA','FP.PA','ATO.PA','BNP.PA','UG.PA','SU.PA','VIV.PA','OR.PA','ML.PA','AI.PA','FTI.PA','DG.PA','VIE.PA','RI.PA','ACA.PA','CA.PA','GLE.PA','SW.PA','AC.PA']\n\n symbolsDAX = ['BMW.DE','DAI.DE','MRK.DE','DPW.DE','FRE.DE','CON.DE','VOW3.DE','BAYN.DE','LIN.DE','SAP.DE','MUV2.DE','DB1.DE','EOAN.DE','HEN3.DE','DBK.DE','ALV.DE','VNA.DE','HEI.DE','ADS.DE','BAS.DE','RWE.DE','DTE.DE','SIE.DE','BEI.DE','1COV.DE','FME.DE','IFX.DE','TKA.DE','LHA.DE','WDI.DE']\n\n symbolsIBEX = ['MRL.MC','GRF.MC','ELE.MC','REE.MC','FER.MC','AMS.MC','ENC.MC','ITX.MC','BBVA.MC','ENG.MC','IAG.MC','ANA.MC','MAP.MC','ACS.MC','COL.MC','MTS.MC','ACX.MC','TL5.MC','SAN.MC','AENA.MC','SAB.MC','BKT.MC','NTGY.MC','TEF.MC','VIS.MC','IBE.MC','CLNX.MC','CABK.MC','SGRE.MC','MEL.MC']\n\n symbolsATX = ['SBO.VI','OMV.VI','FACC.VI','VER.VI','VOE.VI','TKA.VI','CAI.VI','VIG.VI','ATS.VI','BG.VI','SPI.VI','EBS.VI','RBI.VI','ANDR.VI','WIE.VI','POST.VI','IIA.VI','UQA.VI','DOC.VI','LNZ.VI']\n\n symbolsOMX = ['ABB.ST','TELIA.ST','NDA-SE.ST','SEB-A.ST','HEXA-B.ST','AZN.ST','SKF-B.ST','SSAB-A.ST','SHB-A.ST','TEL2-B.ST','SAND.ST','SWED-A.ST','ERIC-B.ST','ATCO-A.ST','INVE-B.ST','ATCO-B.ST','ASSA-B.ST','KINV-B.ST','VOLV-B.ST','ELUX-B.ST','SCA-B.ST','SWMA.ST','SKA-B.ST','ALIV-SDB.ST','ALFA.ST','GETI-B.ST','ESSITY-B.ST','BOL.ST-','SECU-B.ST','HM-B.ST']\n\n symbolsFTFE = ['SSE.L','RTO.L','VOD.L','MNG.L','RMV.L','BA.L','CCH.L','EXPN.L','RB.L','PSN.L','CNA.L','MGGT.L','SMT.L','RDSA.L','CPG.L','SMIN.L','RDSB.L','TUI.L','CCL.L','AUTO.L','SPX.L','SDR.L','BATS.L','ANTO.L','AHT.L','TSCO.L','PRU.L','NMC.L','RR.L','STJ.L']\n\n symbolsDOWJ = ['XOM','KO','DIS','TRV','CSCO','MSFT','GS','CVX','UNH','CAT','JPM','MRK','VZ','WMT','PG','JNJ','AXP','UTX','INTC','DOW','WBA','MMM','NKE','PFE','IBM','V','HD','MCD','AAPL','BA']\n\n symbolsNASDAQ = ['FNJN','FARM','FNKO','NVCR','NMRK','SLAB','AGLE','EXAS','FRTA','CVCO','SPHS','OPTT','BBQ','TSLA','KLXE','IBKC','FARO','TWST','SCOR','FNLC','FEYE','ATEC','BBI','POPE','NVCN','SDC','FRSX','SCON','NMRD','IFRX']\n\n symbolsList = symbolsFTFE + symbolsDOWJ + symbolsNASDAQ\n\n symbols = np.unique(symbolsList)\n\n #start and end date for the training data\n start = datetime(2014, 11, 28)\n end = datetime(2019, 11, 28)\n\n #number of threads to start\n num_threads = 16\n\n #Fetch data\n table = None\n\n numberOfMonthsPerExpands = 12\n numberOfExpandsPerYear = math.floor(12/numberOfMonthsPerExpands)\n for _ in range(0,numberOfExpandsPerYear):\n\n startTrainingData = start.strftime(\"%m/%d/%Y\")\n endTrainingData = end.strftime(\"%m/%d/%Y\")\n\n # start the clock for the processing time\n startStopWatch = time.time()\n\n\n #fetch data\n datasets = []\n print(\"Fetching Equities data for interval: {} - {}\".format(startTrainingData,endTrainingData))\n for symbol in symbols:\n try:\n f = web.DataReader(symbol, 'yahoo', start, end)\n f['ticker'] = np.full(f['Adj Close'].count(), symbol)\n f = f.drop([\"High\", \"Low\", \"Open\", \"Volume\", \"Close\"], axis=1)\n if f['Adj Close'].count() >= 800:\n datasets.append(f)\n else:\n print(\"{} Failed, not enough datapoints {}\".format(symbol,f['Adj Close'].count()))\n except:\n print(\"Something went wrong with {}\".format(symbol))\n pass\n data = pd.concat(datasets)\n table = data.pivot(columns='ticker')\n table.to_pickle(\"yahooDataSet.pkl\")\n table.columns = table.columns.droplevel()\n for col in table.columns:\n table.rename(columns={col: col.replace(\".\", \"_\")}, inplace=True)\n #dealing with NaN situation\n table.fillna(method='ffill')\n table.fillna(method='bfill')\n\n\n #build all combinations of 5 equities\n print(\"Building combinations of tickers\")\n symbolCombinations = itertools.combinations(table.columns, 5)\n\n\n #create new queue\n #initialisation of the result list\n manager = Manager()\n Queues = []\n resultList = manager.list()\n for i in range(num_threads):\n resultList.append(None)\n Queues.append(Queue(maxsize=0))\n\n\n\n\n\n\n\n queueIndex = 0\n #loop through all symbol combinations we generated\n for comb in symbolCombinations:\n #Create the queue\n Queues[queueIndex].put(comb)\n queueIndex =queueIndex+1\n if(queueIndex==num_threads):\n queueIndex = 0\n\n\n\n\n\n\n # Start the threads; they will wait until they get data via the queue\n threads = []\n for i in range(num_threads):\n worker = Process(target=minimizerThread, args=(Queues[i], i, table, resultList))\n worker.start()\n threads.append(worker)\n\n print(\"Waiting for data to be processed by our multiprocessing system...\")\n\n\n\n #wait untill all threads are done\n for worker in threads:\n worker.join()\n\n # take the best of all the threads\n resultListClean = [x if x != None else [0,0,0,0,0] for x in resultList]\n try:\n optimalPortfolio = max(resultListClean,key=itemgetter(4))\n optPortTickers, optPortFinalWeights, optPortReturns, optPortVolatility, optPortSharpe = optimalPortfolio\n # compare how previous optimal portfolio does with current dataset\n if prevOptPortfolio == None: # this is the first portfolio optimalisation\n prevOptPortfolio = (optPortTickers, optPortFinalWeights)\n prevOptPortTickers, prevOptPortFinalWeights = None, None\n else:\n # recalculate previous portfolio with current trainingset\n prevOptPortTickers, prevOptPortFinalWeights = prevOptPortfolio\n cIndex = []\n for co in prevOptPortTickers:\n cIndex.append(table.columns.get_loc(co))\n # Create a subset of the training data that just has the equities in the combination\n newTable = table.iloc[:, cIndex]\n num_assets = len(prevOptPortTickers)\n # calculate daily and annual returns of the stocks\n returns_daily = newTable.pct_change()\n returns_annual = returns_daily.mean() * 250\n\n # get daily and covariance of returns of the stock\n cov_daily = returns_daily.cov()\n cov_annual = cov_daily * 250\n\n # calculate returns volatility sharpe ratio\n prevReturns = np.dot(prevOptPortFinalWeights, returns_annual)\n prevVolatility = np.sqrt(np.dot(prevOptPortFinalWeights.T, np.dot(cov_annual, prevOptPortFinalWeights)))\n prevSharpe = (prevReturns - 0.03) / prevVolatility\n\n # store current portfolio for comparison next run\n prevOptPortfolio = (optPortTickers, optPortFinalWeights)\n except:\n print(resultList.index(None))\n endStopWatch = time.time()\n\n #report data\n print(\"----------------------------------------------------------------\")\n print(\"----------------------------------------------------------------\")\n print(\"Training data interval: {} - {}\".format(startTrainingData, endTrainingData))\n\n print(\"Time to completion:{}\".format(endStopWatch-startStopWatch))\n\n print(\"Max Sharpe Ratio: {} for Portofolio {} with respectively weights {}\".format(optPortSharpe,optPortTickers,optPortFinalWeights))\n print(\"Volatility: {} and return: {}\".format(optPortVolatility,optPortReturns))\n if prevOptPortTickers != None:\n print(\"Previous portfolio gave us:\")\n print(\n \"Max Sharpe Ratio: {} for Portofolio {} with respectively weights {}\".format(prevSharpe,\n prevOptPortTickers,\n prevOptPortFinalWeights))\n print(\"Volatility: {} and return: {}\".format(prevVolatility, prevReturns))\n print(\"----------------------------------------------------------------\")\n print(\"----------------------------------------------------------------\")\n SymbolsByMonth.append(optPortTickers)\n WeightsByMonth.append(optPortFinalWeights.tolist())\n # Expend dataframe with one\n end = end + relativedelta(months=numberOfMonthsPerExpands)\n\n globalStopWatchEnd = datetime.now()\n with open(\"MPT_results.txt\", 'a') as out:\n out.write(\"Tickers used :{}\\n\".format(symbols))\n out.write(\"Time to full completion:{}\\n\".format(globalStopWatchEnd-globalStopWatchStart))\n out.write(\"symbolsByMonth={}\\n\".format(SymbolsByMonth))\n out.write(\"weightsByMonth={}\\n\".format(WeightsByMonth))\n","sub_path":"MPT_optimizer_Multiprocess_worldwide.py","file_name":"MPT_optimizer_Multiprocess_worldwide.py","file_ext":"py","file_size_in_byte":15349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"168891501","text":"# from IPython import get_ipython\n# get_ipython().run_line_magic('matplotlib', 'inline')\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nimport math\nimport keras\n\nfrom keras.models import load_model\n\nfrom keras.datasets import cifar10\nimport numpy as np\n\ndef plot_digits(*args):\n args = [x.squeeze() for x in args]\n n = min([x.shape[0] for x in args])\n\n plt.figure(figsize=(2*n, 2*len(args)))\n for j in range(n):\n for i in range(len(args)):\n ax = plt.subplot(len(args), n, i*n + j + 1)\n plt.imshow(args[i][j])\n # plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n plt.show()\n\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\nx_train = x_train.astype('float32') / 255.\nx_test = x_test .astype('float32') / 255.\nx_train = np.reshape(x_train, (len(x_train), 32, 32, 3))\nx_test = np.reshape(x_test, (len(x_test), 32, 32, 3))\n\nautoencoder = load_model('neuro-example/autoencoder/cifar_autoencoder4.h5')\n\nkeras.utils.print_summary(autoencoder)\n\nprint(autoencoder.get_config())\n\nn = 10\n\nimgs = x_test[:n]\n\ndecoded_imgs = autoencoder.predict(imgs, batch_size=n)\n\nplot_digits(imgs, decoded_imgs)\n","sub_path":"neuro-example/autoencoder/conv-cifar-experiment.py","file_name":"conv-cifar-experiment.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"104839626","text":"import mock\nimport pytz\nfrom datetime import datetime\nfrom django.test import TestCase\n\nfrom django.contrib.auth.models import User\nfrom mysite.settings import (TIME_ZONE,\n )\nfrom mediaviewer.models.loginevent import LoginEvent\n\n@mock.patch('mediaviewer.models.loginevent.MAXIMUM_NUMBER_OF_STORED_LOGIN_EVENTS', 3)\nclass TestNew(TestCase):\n def setUp(self):\n self.ref_time = datetime.now(pytz.timezone(TIME_ZONE))\n self.objects_patcher = mock.patch('mediaviewer.models.loginevent.LoginEvent.objects')\n self.mock_objects = self.objects_patcher.start()\n self.mock_ordered_query = mock.MagicMock()\n self.mock_first = mock.MagicMock()\n self.mock_ordered_query.first.return_value = self.mock_first\n self.mock_objects.order_by.return_value = self.mock_ordered_query\n self.addCleanup(self.objects_patcher.stop)\n\n self.save_patcher = mock.patch('mediaviewer.models.loginevent.LoginEvent.save')\n self.mock_save = self.save_patcher.start()\n self.addCleanup(self.save_patcher.stop)\n\n self.datetime_patcher = mock.patch('mediaviewer.models.loginevent.datetime')\n self.mock_datetime = self.datetime_patcher.start()\n self.mock_datetime.now.return_value = self.ref_time\n self.addCleanup(self.datetime_patcher.stop)\n\n self.user = User()\n\n def test_less_stored_events(self):\n self.mock_objects.count.return_value = 1\n event = LoginEvent.new(self.user)\n\n self.assertEqual(event.user, self.user)\n self.assertEqual(event.datecreated, self.ref_time)\n self.mock_save.assert_called_once_with()\n self.assertFalse(self.mock_first.called)\n\n def test_more_stored_events(self):\n self.mock_objects.count.return_value = 4\n event = LoginEvent.new(self.user)\n\n self.assertEqual(event.user, self.user)\n self.assertEqual(event.datecreated, self.ref_time)\n self.mock_save.assert_called_once_with()\n self.mock_first.delete.assert_called_once_with()\n","sub_path":"mediaviewer/tests/models/test_loginevent.py","file_name":"test_loginevent.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"168148369","text":"#!/usr/bin/env python\n\n# Copyright 2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport subprocess\nimport re\n\n# this should only be run with python3\nimport sys\nif sys.version_info[0] < 3:\n print('ERROR: must run with python3')\n sys.exit(1)\n\nfrom setuptools import setup, find_packages, Extension\n\ntcf_root_dir = os.environ.get('TCF_HOME', '../../../../')\nscript_dir = os.path.dirname(os.path.realpath(__file__))\nenclave_dir = os.path.realpath(os.path.join(tcf_root_dir, 'tcs/core/tcs_trusted_worker_manager/enclave'))\n\nlog_dir = os.path.join(tcf_root_dir, \"logs\")\n\nopenssl_cflags = subprocess.check_output(['pkg-config', 'openssl', '--cflags']).decode('ascii').strip().split()\nopenssl_include_dirs = list(\n filter(None, re.split('\\s*-I', subprocess.check_output(['pkg-config', 'openssl', '--cflags-only-I']).decode('ascii').strip())))\nopenssl_libs = list(\n filter(None, re.split('\\s*-l', subprocess.check_output(['pkg-config', 'openssl', '--libs-only-l']).decode('ascii').strip())))\nopenssl_lib_dirs = list(\n filter(None, re.split('\\s*-L', subprocess.check_output(['pkg-config', 'openssl', '--libs-only-L']).decode('ascii').strip())))\n\nmodule_path = 'tcs/core/tcs_trusted_worker_manager/enclave_wrapper'\nmodule_src_path = os.path.join(tcf_root_dir, module_path)\n\nsgx_mode_env = os.environ.get('SGX_MODE', None)\nif not sgx_mode_env or (sgx_mode_env != \"SIM\" and sgx_mode_env != \"HW\"):\n print(\"error: SGX_MODE value must be HW or SIM, current value is: \", sgx_mode_env)\n sys.exit(2)\n\ndata_files = [\n (log_dir, []),\n (\"tcs/core/tcs_trusted_worker_manager/enclave_wrapper\", [module_src_path + \"/tcf_enclave_internal.py\"]),\n ('lib', [ os.path.join(enclave_dir, 'deps/bin/libtcf-enclave.signed.so')]),\n]\n\next_deps = [\n ('lib', [ os.path.join(script_dir, '../../core/enclave/deps/bin/libtcf-enclave.signed.so')])\n]\n\n## -----------------------------------------------------------------\n## set up the enclave\n## -----------------------------------------------------------------\ndebug_flag = os.environ.get('TCF_DEBUG_BUILD',0)\n\ncompile_args = [\n '-std=c++11',\n '-Wno-switch',\n '-Wno-unused-function',\n '-Wno-unused-variable',\n]\n\n\n# by default the extension class adds '-O2' to the compile\n# flags, this lets us override since these are appended to\n# the compilation switches\nif debug_flag :\n compile_args += ['-g']\n\ninclude_dirs = [\n module_src_path,\n os.path.join(tcf_root_dir, 'tcs/core/common/crypto'),\n os.path.join(tcf_root_dir, 'tcs/core/common'),\n os.path.join(module_src_path, 'build'),\n os.path.join(os.environ['SGX_SDK'],\"include\"),\n os.path.join(tcf_root_dir, 'tcs/core/common/packages/db_store'),\n os.path.join(tcf_root_dir, 'tcs/core/common/packages/base64')\n] + openssl_include_dirs\n\nlibrary_dirs = [\n os.path.join(tcf_root_dir, \"tcs/core/tcs_trusted_worker_manager/enclave/build/lib\"),\n os.path.join(tcf_root_dir, \"tcs/core/common/build\"),\n os.path.join(os.environ['SGX_SDK'], 'lib64'),\n os.path.join(os.environ['SGX_SSL'], 'lib64'),\n os.path.join(os.environ['SGX_SSL'], 'lib64', 'release')\n] + openssl_lib_dirs\n\nlibraries = [\n 'utcf-common',\n 'tcf-enclave',\n 'utcf-lmdb-store',\n 'lmdb'\n] + openssl_libs\n\nif sgx_mode_env == \"HW\":\n libraries.append('sgx_urts')\n libraries.append('sgx_uae_service')\n SGX_SIMULATOR_value = '0'\nif sgx_mode_env == \"SIM\":\n libraries.append('sgx_urts_sim')\n libraries.append('sgx_uae_service_sim')\n SGX_SIMULATOR_value = '1'\n\nlibraries.append('sgx_usgxssl')\nlibraries = libraries + openssl_libs\n\nmodule_files = [\n os.path.join(module_src_path, 'tcf_enclave_internal.i'),\n os.path.join(module_src_path, 'swig_utils.cpp'),\n os.path.join(module_src_path, 'ocall.cpp'),\n os.path.join(module_src_path, 'base.cpp'),\n os.path.join(module_src_path, 'enclave_u.c'),\n os.path.join(module_src_path, 'log.cpp'),\n \n os.path.join(module_src_path, 'work_order.cpp'),\n os.path.join(module_src_path, 'work_order_wrap.cpp'),\n os.path.join(module_src_path, 'signup.cpp'),\n os.path.join(module_src_path, 'enclave_queue.cpp'),\n os.path.join(module_src_path, 'enclave.cpp'),\n os.path.join(module_src_path, 'enclave_info.cpp'),\n os.path.join(module_src_path, 'signup_info.cpp'),\n os.path.join(module_src_path, 'db_store.cpp'),\n os.path.join(tcf_root_dir, 'tcs/core/common/packages/db_store/lmdb_store.cpp')\n]\n\ncrypto_modulefiles = [\n \"crypto/crypto.i\"\n]\n\ncrypto_module = Extension(\n 'crypto._crypto',\n crypto_modulefiles,\n swig_opts=['-c++'] + openssl_cflags + ['-I%s' % i for i in include_dirs],\n extra_compile_args=compile_args,\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n libraries=libraries)\n\n\nenclave_module = Extension(\n 'tcs.core.tcs_trusted_worker_manager.enclave_wrapper._tcf_enclave_internal',\n module_files,\n swig_opts = ['-c++', '-threads'],\n extra_compile_args = compile_args,\n libraries = libraries,\n include_dirs = include_dirs,\n library_dirs = library_dirs,\n define_macros = [\n ('_UNTRUSTED_', 1),\n ('TCF_DEBUG_BUILD', debug_flag),\n ('SGX_SIMULATOR', SGX_SIMULATOR_value)\n ],\n undef_macros = ['NDEBUG', 'EDEBUG']\n )\n\n## -----------------------------------------------------------------\n## -----------------------------------------------------------------\nversion = subprocess.check_output(\n os.path.join(tcf_root_dir, 'bin/get_version')).decode('ascii').strip()\n\nsetup(name='tcf_crypto_library',\n version=version,\n description='Common library for Trusted Compute Framework',\n author='Intel Labs',\n packages=find_packages(),\n install_requires=[],\n data_files=[],\n #namespace_packages=[''],\n ext_modules=[crypto_module],\n entry_points={}\n )\n\nsetup(name='tcf_eservice',\n version = version,\n description = 'Trusted Compute Framework SGX Worker',\n author = 'Hyperledger',\n url = 'http://www.intel.com',\n packages = find_packages(),\n #namespace_packages=[''],\n install_requires = [\n 'colorlog',\n 'requests',\n 'toml',\n 'twisted'\n ],\n ext_modules = [\n enclave_module\n ],\n data_files = data_files,\n entry_points = {}\n)\n","sub_path":"tcs/core/common/python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":6861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"405689142","text":"import keras\nfrom keras.layers import Input, Dense\nfrom keras.models import Model,Sequential\nfrom keras.callbacks import TensorBoard\nimport numpy as np\nimport readers\nfrom tensorflow import set_random_seed\nimport os\n\n\ndef seedy(s):\n np.random.seed(s)\n set_random_seed(s)\n\n\nclass AutoEncoder:\n def __init__(self, data, encoding_dim):\n self.encoding_dim = encoding_dim\n # r = lambda: np.random.randint(1, 3)\n # self.x = np.array([[r(), r(), r()] for _ in range(1000)])\n self.x = data\n print(self.x)\n\n def _encoder(self):\n #inputs = Input(shape=(self.x[0].shape))\n encoded_l1 = Dense(self.encoding_dim+4, activation='relu',input_shape=self.x[0].shape)\n model = Sequential()\n model.add(encoded_l1)\n encoded_l2 = Dense(self.encoding_dim+2,activation='relu')\n model.add(encoded_l2)\n encoded_l3 =Dense(self.encoding_dim,activation='relu')\n model.add(encoded_l3)\n\n\n self.encoder = model\n return model\n\n def _decoder(self):\n #inputs = Input(shape=(self.encoding_dim,))\n encoded_l1 = Dense(7, activation='relu',input_shape=(self.encoding_dim,))\n model = Sequential()\n model.add(encoded_l1)\n encoded_l2 = Dense(9, activation='relu')\n model.add(encoded_l2)\n encoded_l3 = Dense(11, activation='relu')\n model.add(encoded_l3)\n\n self.decoder = model\n return model\n\n def encoder_decoder(self):\n ec = self._encoder()\n dc = self._decoder()\n\n inputs = Input(shape=self.x[0].shape)\n ec_out = ec(inputs)\n dc_out = dc(ec_out)\n model = Model(inputs, dc_out)\n\n self.model = model\n return model\n\n def fit(self, batch_size=10, epochs=300):\n self.model.compile(optimizer='sgd', loss='mse')\n log_dir = './log/'\n tbCallBack = keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=True, write_images=True)\n\n self.model.fit(self.x, self.x,\n epochs=epochs,\n batch_size=batch_size,\n callbacks=[tbCallBack])\n\n def save(self):\n if not os.path.exists(r'./weights'):\n os.mkdir(r'./weights')\n else:\n self.encoder.save(r'./weights/encoder_weights.h5')\n self.decoder.save(r'./weights/decoder_weights.h5')\n self.model.save(r'./weights/ae_weights.h5')\n\n\n# if __name__ == '__main__':\n# (X, Y), feature_names = readers.read_dataset(screening='')\n# seedy(2)\n# ae = AutoEncoder(encoding_dim=16, data=X)\n# ae.encoder_decoder()\n# ae.fit(batch_size=20, epochs=1000)\n# ae.save()\n","sub_path":"autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"282259475","text":"\"\"\"\nThis module provides views for the Item Catalog App project.\n\"\"\"\n\n#pylint: disable=import-error,no-member,unused-variable\n\nfrom flask import flash, jsonify, make_response\nfrom flask import redirect, render_template, request, url_for\n\nfrom flask import session as login_session\n\nfrom catalog import app\nfrom catalog.models import Category, CategoryItem, User\nfrom catalog.database import db_session\nfrom catalog.database import get_all_objects_of_type, get_last_x_items_of_type, get_all_items\n\nfrom oauth2client.client import flow_from_clientsecrets, OAuth2Credentials\nfrom oauth2client.client import FlowExchangeError\n\nfrom sqlalchemy.orm.exc import NoResultFound\n\nimport httplib2, json, random, requests, string\n\nFACEBOOK_JSON = 'catalog/client_secrets_facebook.json'\nGOOGLE_JSON = 'catalog/client_secrets_google.json'\nCLIENT_ID_GOOGLE = json.loads(open(GOOGLE_JSON, 'r').read())['web']['client_id']\nAPP_ID_FACEBOOK = json.loads(open(FACEBOOK_JSON, 'r').read())['web']['app_id']\nAPP_SECRET_FACEBOOK = json.loads(open(FACEBOOK_JSON, 'r').read())['web']['app_secret']\nAPPLICATION_NAME = 'Catalog App'\n\n\n#-----------------------------------------------------------------------\n# Views\n#-----------------------------------------------------------------------\n\n# Main page\n@app.route('/', methods=['GET'])\n@app.route('/catalog/', methods=['GET'])\ndef index():\n \"\"\"\n Function to return a page listing all categories and most recent items.\n \"\"\"\n\n set_redirect_url()\n\n show_all = True if request.method == 'GET' and\\\n str(request.args.get('show_all', False)).lower() == 'true'\\\n else False\n categories = get_all_objects_of_type(Category)\n if not show_all:\n latest_items = get_last_x_items_of_type(10, CategoryItem)\n num_items = latest_items.count()\n else:\n latest_items = get_all_objects_of_type(CategoryItem)\n latest_items.reverse()\n num_items = len(latest_items)\n user = get_user()\n items = get_all_items()\n\n return render_template('home.html',\n show_all=show_all,\n categories=categories,\n items=items,\n latest_items=latest_items,\n num_items=num_items,\n user=user)\n\n# Category Information\n@app.route('/catalog/category//')\ndef category_info(category_id):\n \"\"\"\n Function to return a page to view items for specified category.\n\n Args:\n category_id: ID value of the category to view.\n \"\"\"\n\n set_redirect_url()\n\n # Retrieve Category object for template rendering.\n # If not found, render error template.\n category = db_session.query(Category)\\\n .filter_by(id=category_id)\\\n .first()\n if not category:\n return render_template('error.html',\n headline_text='Category Not Found',\n error_text='The specified category was not found.')\n\n login_session['last_category_id'] = category.id\n category_items = db_session.query(CategoryItem).filter_by(category_id=category.id).all()\n creator = category.user\n user = get_user()\n\n return render_template('category_info.html',\n categories=get_all_objects_of_type(Category),\n category=category,\n category_items=category_items,\n creator=creator,\n items=get_all_items(),\n user=user)\n\n# Category Item\n@app.route('/catalog/item//')\ndef category_item_info(item_id):\n \"\"\"\n Function to return a page to view a category item.\n\n Args:\n item_id: ID value of the category item to view.\n \"\"\"\n\n set_redirect_url()\n\n # Retrieve CategoryItem object for template rendering.\n # If not found, render error template.\n category_item = db_session.query(CategoryItem)\\\n .filter_by(id=item_id)\\\n .first()\n if not category_item:\n return render_template('error.html',\n headline_text='Item Not Found',\n error_text='The specified item was not found.')\n\n creator = category_item.user\n user = get_user()\n\n return render_template('category_item_info.html',\n categories=get_all_objects_of_type(Category),\n category=category_item.category,\n item=category_item,\n items=get_all_items(),\n creator=creator,\n user=user)\n\n# New Category\n#\n# Note: Though not overtly specified as necessary in the project,\n# I put logic for adding a new category to offer the ability for\n# registered users to add their own categories.\n@app.route('/catalog/category/new/', methods=['GET', 'POST'])\ndef new_category():\n \"\"\"\n Function to create a new category.\n \"\"\"\n\n set_redirect_url()\n\n user = get_user()\n if not user:\n return redirect(url_for('login'))\n if request.method == 'POST':\n category = Category(name=request.form['name'],\n user_id=login_session['user_id'])\n db_session.add(category)\n db_session.commit()\n flash('New Category {} Successfully Created!'.format(category.name))\n return redirect(url_for('index'))\n else:\n return render_template('new_category.html',\n user=user)\n\n# New Category Item\n@app.route('/catalog/item/new/', methods=['GET', 'POST'])\ndef new_category_item():\n \"\"\"\n Function to return a page to create a new category item.\n \"\"\"\n\n set_redirect_url()\n\n user = get_user()\n categories = get_all_objects_of_type(Category)\n category = None\n if not user:\n return redirect(url_for('login'))\n if request.method == 'POST':\n if request.form.get('name', '') == '' and request.form.get('category', '') != '':\n category = db_session.query(Category)\\\n .filter_by(id=request.form.get('category'))\\\n .first()\n return render_template('new_category_item.html',\n user=user,\n category=category,\n categories=categories,\n request=request)\n new_item = CategoryItem(name=request.form['name'],\n user_id=login_session['user_id'],\n description=request.form['description'],\n category_id=request.form['category'])\n db_session.add(new_item)\n db_session.commit()\n flash('New Item {} Successfully Created!'.format(new_item.name))\n return redirect(url_for('index'))\n else:\n return render_template('new_category_item.html',\n user=user,\n category=category,\n categories=categories)\n\n# Edit Category\n#\n# Note: Though not overtly specified as necessary in the project,\n# I put logic for editing a category, where users can edit their\n# own categories that they have created.\n@app.route('/catalog/category//edit/', methods=['GET', 'POST'])\ndef edit_category(category_id):\n \"\"\"\n Function to return a page to edit a category.\n\n Args:\n category_id: ID value of the category to edit.\n \"\"\"\n\n user = get_user()\n categories = get_all_objects_of_type(Category)\n edited_item = db_session.query(Category)\\\n .filter_by(id=category_id)\\\n .first()\n if not edited_item:\n return render_template('error.html',\n headline_text='Category Not Found',\n error_text='The specified category was not found.')\n\n # Make sure the user is the creator of the category.\n if not user or user and user.id != edited_item.user.id:\n return render_template('error.html',\n headline_text='Access Denied',\n error_text='Sorry, but you are not the creator of '\\\n 'the category \"{}\". As such, you are not authorized '\\\n 'to make edits to it.'.format(edited_item.name))\n\n if request.method == 'POST':\n edited_item.name = request.form['name']\n db_session.add(edited_item)\n db_session.commit()\n flash('Category Successfully Updated!')\n return redirect(url_for('category_info',\n category_id=edited_item.id))\n else:\n return render_template('edit_category.html',\n category=edited_item,\n user=user,\n categories=categories)\n\n# Edit Category Item\n@app.route('/catalog/item//edit/', methods=['GET', 'POST'])\ndef edit_category_item(item_id):\n \"\"\"\n Function to return a page to edit a category item.\n\n Args:\n item_id: ID value of the category item to edit.\n \"\"\"\n\n user = get_user()\n categories = get_all_objects_of_type(Category)\n edited_item = db_session.query(CategoryItem)\\\n .filter_by(id=item_id)\\\n .first()\n if not edited_item:\n return render_template('error.html',\n headline_text='Item Not Found',\n error_text='The specified item was not found.')\n\n # Make sure the user is the creator of the item.\n if not user or user and user.id != edited_item.user.id:\n return render_template('error.html',\n headline_text='Access Denied',\n error_text='Sorry, but you are not the creator of '\\\n 'the item \"{}\". As such, you are not authorized '\\\n 'to make edits to it.'.format(edited_item.name))\n\n\n if request.method == 'POST':\n edited_item.name = request.form['name']\n edited_item.description = request.form['description']\n edited_item.category_id = request.form['category']\n db_session.add(edited_item)\n db_session.commit()\n flash('Item Successfully Updated!')\n category = db_session.query(Category)\\\n .filter_by(id=edited_item.category_id)\\\n .first()\n return redirect(url_for('category_item_info',\n item_id=edited_item.id))\n else:\n return render_template('edit_category_item.html',\n item=edited_item,\n user=user,\n categories=categories)\n\n# Delete Category\n#\n# Note: Though not overtly specified as necessary in the project,\n# I put logic for deleting a category, where users can delete\n# categories that they have created.\n@app.route('/catalog/category//delete/', methods=['GET', 'POST'])\ndef delete_category(category_id):\n \"\"\"\n Function to return a page to delete a category.\n\n Args:\n category_id: ID of the category to delete.\n \"\"\"\n\n user = get_user()\n category = db_session.query(Category)\\\n .filter_by(id=category_id).first()\n if not category:\n return redirect(url_for('index'))\n\n # Make sure the user is the creator of the category.\n if not user or user and user.id != category.user.id:\n return render_template('error.html',\n headline_text='Access Denied',\n error_text='Sorry, but you are not the creator of '\\\n 'the category \"{}\". As such, you are not authorized '\\\n 'to delete it.'.format(category.name))\n\n if request.method == 'POST':\n # Get and delete all items associated with this category.\n items = db_session.query(CategoryItem)\\\n .filter_by(category_id=category.id)\\\n .all()\n for item in items:\n db_session.delete(item)\n\n # Delete the category itself and commit everything.\n db_session.delete(category)\n db_session.commit()\n flash(\"Category {} deleted.\".format(category.name))\n return redirect(url_for('index'))\n else:\n return render_template('delete_category.html',\n category=category)\n\n# Delete Category Item\n@app.route('/catalog/item//delete/', methods=['GET', 'POST'])\ndef delete_category_item(item_id):\n \"\"\"\n Function to return a page to delete a category item.\n\n Args:\n item_id: ID of the category item to delete.\n \"\"\"\n\n user = get_user()\n item = db_session.query(CategoryItem)\\\n .filter_by(id=item_id)\\\n .first()\n category_id = ''\n if not item:\n if login_session.get('last_category_id', '') == '':\n return redirect(url_for('index'))\n else:\n category_id = login_session.get('last_category_id')\n else:\n category_id = item.category.id\n\n # Make sure the user is the creator of the item.\n if not user or user and user.id != item.user.id:\n return render_template('error.html',\n headline_text='Access Denied',\n error_text='Sorry, but you are not the creator of '\\\n 'the item \"{}\". As such, you are not authorized '\\\n 'to delete it.'.format(item.name))\n\n if request.method == 'POST':\n db_session.delete(item)\n db_session.commit()\n flash(\"Item {} deleted.\".format(item.name))\n return redirect(url_for('category_info',\n category_id=category_id))\n else:\n return render_template('delete_category_item.html',\n item=item)\n\n\n\n#-----------------------------------------------------------------------\n# Login/Social Connect Functionality\n#-----------------------------------------------------------------------\n\n# Log In\n@app.route('/login/')\ndef login():\n \"\"\"\n Function to return a page for user login with redirect.\n \"\"\"\n\n # Create random number to store in session\n state = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32))\n login_session['state'] = state\n if login_session.get('redirect_url', '') == '':\n login_session['redirect_url'] = '/catalog/'\n return render_template('login.html', STATE=state, REDIRECT_URL=login_session[\"redirect_url\"])\n\n# Google\n@app.route('/gconnect', methods=['POST'])\ndef gconnect():\n \"\"\"\n Function to connect to Google for social login.\n \"\"\"\n\n # If state doesn't match login session state, we know it's an invalid\n # request and should redirect accordingly.\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Set code to the request data for use in retrieving credentials\n code = request.data\n credentials = None\n\n # Upgrade the authorization code into credentials object,\n # specify this is the one-time code server sending off, and\n # initiate exchange, passing in one-time code as input.\n # This exchanges authorization code for a credentials object\n try:\n oauth_flow = flow_from_clientsecrets(GOOGLE_JSON, scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that access token is valid.\n # Use UAuth2Credentials.from_json() method, which accepts a data str\n # argument of a JSON string from its own to_json() method.\n access_token = OAuth2Credentials.from_json(credentials.to_json()).access_token\n api_url = 'https://www.googleapis.com/oauth2/v1'\n url = '{}/tokeninfo?access_token={}'.format(api_url, access_token)\n http = httplib2.Http()\n result = json.loads(http.request(url, 'GET')[1])\n\n # If there was an error, abort\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify the access token is used for the intended user\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(json.dumps('Token of user ID does not match given user ID.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify the access token is valid for this app\n if result['issued_to'] != CLIENT_ID_GOOGLE:\n response = make_response(json.dumps('Token of client ID does not match app ID.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store access token and ID in session for later use\n login_session['access_token'] = access_token\n login_session['gplus_id'] = gplus_id\n\n # Get user info and store in login session\n userinfo_url = '{}/userinfo'.format(api_url)\n params = {'access_token': access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n data = answer.json()\n\n login_session['provider'] = 'google'\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n\n # See if user exists. If not, make a new one.\n user_id = get_user_id(login_session['email'])\n if not user_id:\n user_id = create_user()\n\n # Store user_id in login session\n login_session['user_id'] = user_id\n\n # Create flash message with user-specific info included\n flash(\"Logged in as {}\".format(login_session['username']))\n return ''\n\n# Facebook\n@app.route('/fbconnect', methods=['POST'])\ndef fbconnect():\n \"\"\"\n Function to connect to Facebook for social login.\n \"\"\"\n\n # If state doesn't match login session state, we know it's an invalid\n # request and should redirect accordingly.\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n access_token = request.data\n\n # Exchange client token for long-lived server-side token\n url = \"\"\"https://graph.facebook.com/oauth/access_token?grant_type=fb_exchange_token\n &client_id={}&client_secret={}&fb_exchange_token={}\"\"\"\\\n .format(APP_ID_FACEBOOK, APP_SECRET_FACEBOOK, access_token)\n http = httplib2.Http()\n result = http.request(url, 'GET')[1]\n\n # Use token to get user info from API, stripping expire tag from token.\n userinfo_url = 'https://graph.facebook.com/v2.5/me'\n token = result.split('&')[0]\n url = '{}?{}&fields=name,id,email'.format(userinfo_url, token)\n http = httplib2.Http()\n result = http.request(url, 'GET')[1]\n data = json.loads(result)\n\n # Store data in login_session\n login_session['provider'] = 'facebook'\n login_session['username'] = data['name']\n login_session['email'] = data['email']\n login_session['facebook_id'] = data['id']\n login_session['access_token'] = token\n\n # Get user picture and store in login session\n url = '{}/picture?{}&redirect=0&height=200&width=200'.format(userinfo_url, token)\n http = httplib2.Http()\n result = http.request(url, 'GET')[1]\n data = json.loads(result)\n login_session['picture'] = data['data']['url']\n\n # See if user exists. If not, make a new one.\n user_id = get_user_id(login_session['email'])\n if not user_id:\n user_id = create_user()\n login_session['user_id'] = user_id\n\n # Create flash message with user-specific info included\n flash(\"Logged in as {}\".format(login_session['username']))\n return ''\n\n\n#-----------------------------------------------------------------------\n# Logout/Disconnect Functionality\n#-----------------------------------------------------------------------\n\n# Google\n@app.route('/gdisconnect')\ndef gdisconnect():\n \"\"\"\n Function to disconnect from Google.\n \"\"\"\n\n # Only disconnect a connected user\n access_token = login_session.get('access_token')\n if access_token is None:\n response = make_response(json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Execute request to revoke current user\n url = 'https://accounts.google.com/o/oauth2/revoke?token={}'.format(access_token)\n http = httplib2.Http()\n result = http.request(url, 'GET')[0]\n\n if result['status'] != '200':\n # For some reason, given token was invalid\n response = make_response(json.dumps('Failed to revoke token for given user.'), 400)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n# Facebook\n@app.route('/fbdisconnect')\ndef fbdisconnect():\n \"\"\"\n Function to disconnect from Facebook.\n \"\"\"\n\n # Note: Access token must be included to successfully log out\n facebook_id = login_session['facebook_id']\n access_token = login_session['access_token']\n url = \"\"\"https://graph.facebook.com/{}/permissions?access_token={}\"\"\"\\\n .format(facebook_id, access_token)\n http = httplib2.Http()\n result = http.request(url, 'DELETE')[1]\n\n# General\n@app.route('/disconnect')\ndef disconnect():\n \"\"\"\n Function to disconnect from social provider and clear session.\n \"\"\"\n\n if 'provider' in login_session:\n if login_session['provider'] == 'google':\n gdisconnect()\n del login_session['gplus_id']\n if login_session['provider'] == 'facebook':\n fbdisconnect()\n del login_session['facebook_id']\n\n del login_session['access_token']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n del login_session['user_id']\n del login_session['provider']\n if login_session.get('last_category_id', '') != '':\n del login_session['last_category_id']\n if login_session.get('redirect_url', '') != '':\n del login_session['redirect_url']\n flash('You have successfully logged out.')\n return redirect(url_for('index'))\n else:\n flash(\"You were not logged in to begin with!\")\n return redirect(url_for('index'))\n\n\n#-----------------------------------------------------------------------\n# Convenience functions\n#-----------------------------------------------------------------------\n\ndef user_logged_in():\n \"\"\"\n Function to return whether or not user is logged in.\n \"\"\"\n\n return 'username' in login_session\n\ndef get_user():\n \"\"\"\n Function to retrieve user from email stored in login session.\n \"\"\"\n\n return get_user_info(get_user_id(login_session.get('email', '')))\n\n\ndef get_user_id(email):\n \"\"\"\n Function to return user ID for user with specified email address.\n\n Args:\n email: Email address for user whose ID will be returned.\n \"\"\"\n\n try:\n user = db_session.query(User).filter_by(email=email).one()\n return user.id\n except NoResultFound:\n return None\n\ndef get_user_info(user_id):\n \"\"\"\n Function to return User object for user with specified user ID.\n\n Args:\n user_id: User ID for user object to return.\n \"\"\"\n\n try:\n user = db_session.query(User).filter_by(id=user_id).one()\n return user\n except NoResultFound:\n return None\n\ndef create_user():\n \"\"\"\n Function to create a new user with the info from login session.\n \"\"\"\n\n new_user = User(name=login_session['username'],\n email=login_session['email'],\n picture=login_session['picture'])\n db_session.add(new_user)\n db_session.commit()\n user = db_session.query(User)\\\n .filter_by(email=login_session['email'])\\\n .one()\n return user.id\n\ndef set_redirect_url():\n \"\"\"\n Function to set the redirect_url key in login_session, used\n when a user logs in to the site so they are redirected back\n to the page they were on prior to logging in.\n \"\"\"\n\n login_session['redirect_url'] = request.path\n\n\n\n#-----------------------------------------------------------------------\n# JSON Endpoints\n#-----------------------------------------------------------------------\n\n@app.route('/catalog/json/')\ndef catalog_json():\n \"\"\"\n Function to return JSON of all categories and items.\n \"\"\"\n\n categories = get_all_objects_of_type(Category)\n categories_list = []\n for cat in categories:\n categories_list.append(cat.serialize)\n items = db_session.query(CategoryItem).filter_by(category_id=cat.id).all()\n categories_list[-1]['items'] = [item.serialize for item in items]\n return jsonify(categories=categories_list)\n\n\n@app.route('/catalog/categories/json/')\ndef categories_json():\n \"\"\"\n Function to return JSON of all categories.\n \"\"\"\n\n categories = get_all_objects_of_type(Category)\n return jsonify(categories=[cat.serialize for cat in categories])\n\n@app.route('/catalog/category//json/')\ndef category_info_json(category_id):\n \"\"\"\n Function to return JSON of specified category.\n\n Args:\n category_id: ID value of the category for the item.\n \"\"\"\n\n category = db_session.query(Category).filter_by(id=category_id).first()\n if not category:\n return jsonify({'error': 'The specified category was not found.'})\n return jsonify(category.serialize)\n\n@app.route('/catalog/category//items/json/')\ndef category_items_info_json(category_id):\n \"\"\"\n Function to return JSON of category items for specified category.\n\n Args:\n category_id: ID value of the category for the item.\n \"\"\"\n\n category = db_session.query(Category).filter_by(id=category_id).first()\n if not category:\n return jsonify({'error': 'The specified category was not found.'})\n items = db_session.query(CategoryItem).filter_by(category_id=category_id).all()\n return jsonify(items=[item.serialize for item in items])\n\n@app.route('/catalog/category//item//json/')\ndef category_item_info_json(category_id, item_id):\n \"\"\"\n Function to return JSON of specified category item.\n\n Args:\n category_id: ID value of the category for the item.\n item_id: ID value of the category item to view.\n \"\"\"\n\n category = db_session.query(Category).filter_by(id=category_id).first()\n if not category:\n return jsonify({'error': 'The specified category was not found.'})\n item = db_session.query(CategoryItem).filter_by(id=item_id).first()\n if not item:\n return jsonify({'error': 'The specified category item was not found.'})\n return jsonify(item.serialize)\n","sub_path":"vagrant/catalog/catalog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":27171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"36264802","text":"# based on https://www.section.io/engineering-education/run-length-encoding-algorithm-in-python/\n\n\ndef encode_message(message):\n encoded_string = \"\"\n i = 0\n while (i <= len(message)-1):\n count = 1\n ch = message[i]\n j = i\n while (j < len(message)-1):\n '''if the character at the current index is the same as the character at the next index. If the characters are the same, the count is incremented to 1'''\n if (message[j] == message[j + 1]):\n count = count + 1\n j = j + 1\n else:\n break\n '''the count and the character is concatenated to the encoded string'''\n encoded_string = encoded_string + str(count) + ch\n i = j + 1\n return encoded_string\n\n\ndef decode_message(our_message):\n decoded_message = \"\"\n i = 0\n j = 0\n # splitting the encoded message into respective counts\n while (i <= len(our_message) - 1):\n run_count = int(our_message[i])\n run_word = our_message[i + 1]\n # displaying the character multiple times specified by the count\n for j in range(run_count):\n # concatenated with the decoded message\n decoded_message = decoded_message+run_word\n j = j + 1\n i = i + 2\n return decoded_message\n\n\ndef display_example():\n # the original string\n our_message = \"AuuBBBCCCCCCcccccCCCCCCCCCA\"\n # pass in the original string\n encoded_message = encode_message(our_message)\n # pass in the decoded string\n decoded_message = decode_message(encoded_message)\n print(\"Original string: [\" + our_message + \"]\")\n print(\"Encoded string: [\" + encoded_message +\"]\")\n print(\"Decoded string: [\" + decoded_message +\"]\")\n\n\nif __name__ == '__main__':\n\n # display_example()\n message = input(\"Enter some repeated characters:\")\n encoded_message = encode_message(message)\n # pass in the decoded string\n decoded_message = decode_message(encoded_message)\n print(\"Original string: [\" + message + \"]\")\n print(\"Encoded string: [\" + encoded_message + \"]\")\n print(\"Decoded string: [\" + decoded_message + \"]\")","sub_path":"run_len.py","file_name":"run_len.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"565792795","text":"#!/usr/bin/python\nfrom bs4 import BeautifulSoup\nimport pymysql\nimport requests\nimport json\nimport threading\nimport re\n\n\ndef getDBcon():\n db = pymysql.connect(\n \"localhost\",\n \"root\",\n \"zqt1997\",\n \"21cnjy\",\n use_unicode=True,\n charset=\"utf8\")\n cursor = db.cursor()\n return db, cursor\n\n\ndef work():\n db, cursor = getDBcon()\n cursor.execute('select id,spId,xd,xueduan from msg2')\n data = cursor.fetchall()\n ok=[]\n for line in data:\n gradeId = line[0]\n spId = line[1]\n xd = line[2]\n xueduan = line[3]\n if xueduan in ok:\n continue\n #url = 'https://www.21cnjy.com/{}/{}/'.format(xd, spId)\n url='https://www.21cnjy.com/2/49247/'\n text = requests.get(url).text\n text = text.replace('\\n', '').replace(' ', '')\n p = r'data-param=\"bookversion=(.*?)\">(.*?)'\n res = re.findall(p, text)\n if len(res)==0:\n continue\n ok.append(xueduan)\n print(xueduan)\n for x in res:\n cursor.execute('insert into SpeTm(gradeId,bookversion,name,xueduan) values (%s,%s,%s,%s)',(gradeId,x[0],x[1],xueduan))\n print(x)\n db.commit()\n input()\n db.close()\n print(len(ok))\n\nif __name__ == '__main__':\n work()\n ","sub_path":"教育网站/21cnjy/getSpeTm.py","file_name":"getSpeTm.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"161483195","text":"from django.core.management.base import BaseCommand, CommandError\nfrom django.contrib.auth.models import User, Permission\n\nfrom majora2 import models\nfrom majora2 import util\nfrom tatl import models as tmodels\nfrom django.utils import timezone\n\nclass Command(BaseCommand):\n help = \"Load a list of MAGs\"\n def add_arguments(self, parser):\n parser.add_argument('filename')\n\n def handle(self, *args, **options):\n su = User.objects.get(is_superuser=True)\n fh = open(options[\"filename\"])\n\n seen_mags = set([])\n for i, line in enumerate(fh):\n if i == 0:\n # Root node\n node = util.mkroot(line.strip())\n\n else:\n mags, created = util.mkmag(line.strip(), sep='/', parents=True, artifact=False, physical=False, root=node)\n if sum(created) > 0:\n for m in [mag for i, mag in enumerate(mags) if created[i]]:\n treq = tmodels.TatlPermFlex(\n user = su,\n substitute_user = None,\n used_permission = \"majora2.management.commands.mkmag\",\n timestamp = timezone.now(),\n content_object = m,\n )\n treq.save()\n for m in mags:\n if m.id in seen_mags:\n continue\n print(\"\\t\".join([\n str(m.id),\n m.group_path\n ]))\n seen_mags.add(m.id)\n","sub_path":"majora2/management/commands/mkmag.py","file_name":"mkmag.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"585516079","text":"#!/usr/bin/env python\n# -*- encoding:utf-8 -*-\n\n# Author: lixingtie\n# Email: lixingtie@barfoo.com.cn\n# Create Date: 2013-9-10\n\nimport os\nimport codecs\nfrom core.builder.layout import build_layout\n\ndef create_layout(layout):\n \"\"\"\n 创建模板\n \"\"\"\n src = build_layout()\n path = get_layout_path(layout)\n\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n\n f = codecs.open(path, \"w\", 'utf-8')\n f.write(src)\n f.close()\n\n\ndef save_layout(layout, html, design):\n \"\"\"\n 保存布局\n \"\"\"\n html = build_layout(html)\n \n path = get_layout_path(layout)\n \n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n\n f = codecs.open(path, \"w\", 'utf-8')\n f.write(html)\n f.close()\n\n path = get_layout_design_path(layout)\n \n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n \n f = codecs.open(path, \"w\", 'utf-8')\n f.write(design)\n f.close()\n\n\ndef get_layout_html(layout):\n \"\"\"\n 获取模板html\n \"\"\"\n html = \"\"\n path = get_layout_path(layout)\n \n if os.path.exists(path):\n f = codecs.open(path, \"r\", 'utf-8')\n html = f.read()\n f.close()\n\n return html\n\n\ndef get_layout_design(layout):\n \"\"\"\n 获取模板设计html\n \"\"\"\n html = \"\"\n path = get_layout_design_path(layout)\n \n if os.path.exists(path):\n f = codecs.open(path, \"r\", 'utf-8')\n html = f.read()\n f.close()\n\n return html\n\n\ndef remove_layout(layout):\n \"\"\"\n 删除模板\n \"\"\"\n #删除模板文件(html)\n path = get_layout_path(layout)\n\n if os.path.exists(path):\n os.remove(path)\n\n #删除模板设计文件(design)\n path = get_layout_design_path(layout)\n \n if os.path.exists(path):\n os.remove(path)\n \n #删除模板预览图片\n if \"preview\" in layout and layout.preview:\n path = os.path.abspath(os.path.join(\"./static\", layout.preview))\n if os.path.exists(path):\n os.remove(path)\n\n\ndef get_layout_path(layout):\n \"\"\"\n 获取布局文件路径\n \"\"\"\n if \"system\" in layout:\n return os.path.abspath(\"platform/system/{0}/templates/share/layout/{1}.html\").format(layout.system.fetch().name.lower(), layout._id)\n else:\n return os.path.abspath(\"platform/layout/{0}.html\").format(layout._id)\n\n\ndef get_layout_design_path(layout):\n \"\"\"\n 获取布局设计文件路径\n \"\"\"\n if \"system\" in layout:\n return os.path.abspath(\"platform/system/{0}/platform/layout/{1}.design\").format(layout.system.fetch().name.lower(), layout._id)\n else:\n return os.path.abspath(\"platform/layout/{0}.design\").format(layout._id)\n\n\ndef get_layout_preview_path(layout, filename):\n \"\"\"\n 获取布局预览图片路径\n \"\"\"\n if \"system\" in layout:\n systemname = layout.system.fetch().name\n return os.path.abspath(\"platform/system/{0}/static/img/layout/{1}{2}\").format(systemname, layout._id, os.path.splitext(filename)[1])\n else:\n return os.path.abspath(\"static/img/layout/{0}{1}\").format(layout._id, os.path.splitext(filename)[1])\n","sub_path":"core/logic/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"572815638","text":"\"\"\"empty message\n\nRevision ID: e2786f25ece2\nRevises: 9d8227315937\nCreate Date: 2020-11-22 16:08:37.494954\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e2786f25ece2'\ndown_revision = '9d8227315937'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('tests', sa.Column('answers', sa.PickleType(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('tests', 'answers')\n # ### end Alembic commands ###\n","sub_path":"server/migrations/versions/e2786f25ece2_.py","file_name":"e2786f25ece2_.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"264933562","text":"from sys import stdin\ndef contador(cadena, number):\n contador = 0\n for i in range(len(cadena)):\n if number in cadena[i]:\n contador += 1\n return contador\ndef cadena(numero,y):\n for i in range(1,numero+1):\n y.append(str(i))\n return y\ndef main():\n casos = int(stdin.readline())\n for i in range(casos):\n lista = []\n numero = int(stdin.readline())\n string = \"\".join(cadena(numero,[]))\n for j in range(10):\n t = contador(string,str(j))\n lista.append(str(t))\n r = \" \".join(lista)\n print(r)\nmain()\n \n \n \n \n","sub_path":"ejercicios/Misc/cantnum.py","file_name":"cantnum.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"297388185","text":"import datetime\r\n\r\nimport telegram\r\nimport time\r\nimport webbrowser\r\nimport random\r\n \r\nfrom telegram.ext import Updater\r\nfrom telegram.ext import CommandHandler\r\nfrom telegram.ext import Updater\r\nfrom telegram.ext import CommandHandler, CallbackQueryHandler, MessageHandler, Filters\r\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup, ReplyKeyboardMarkup\r\nfrom staticmap import StaticMap, CircleMarker\r\nfrom googletrans import Translator\r\nfrom requests import get\r\nfrom bs4 import BeautifulSoup\r\nfrom geopy.geocoders import Nominatim\r\n\r\n############################### Bot ############################################\r\n\r\ntranslator = Translator() # Create object of Translator.\r\nbase='https://www.orpha.net/consor/cgi-bin/'\r\nurl = 'https://www.orpha.net/consor/cgi-bin/Clinics_ERN.php?lng=EN'\r\nresponse = get(url) \r\nhtml_soup = BeautifulSoup(response.text, 'html.parser')\r\ngeolocator = Nominatim(user_agent=\"share4bot\")\r\n\r\nmapa = StaticMap(500, 500)\r\n\r\n\r\ncenter_containers = html_soup.find_all('div', class_ = 'ERN')\r\nfirst=center_containers[0]\r\n\r\ntipus=[]\r\nmal=[]\r\ni=0\r\nfor tag in first.ul.find_all(\"li\", recursive=True): \r\n if tag.a.has_attr('href'):\r\n tipus.append(tag.a.text.lower())\r\n ini=tag.a.text.find('-')\r\n nom=tag.a.text[ini+2:]\r\n mal.append('mal%s'%i)\r\n exec('mal%s={}'%i)\r\n eval('mal%s'%i)['link']=tag.a.attrs['href']\r\n i+=1\r\n\r\nnums=[]\r\nciutats=[]\r\nlinkk=[]\r\ndef check(inf):\r\n global linkk\r\n global mapa\r\n global ciutats\r\n nums=[]\r\n linkk=[]\r\n for i in tipus:\r\n if inf in i:\r\n nums.append(tipus.index(i))\r\n i='mal'+str(nums[0])\r\n url = base+eval(i)['link']\r\n response1 = get(url)\r\n html_soup1= BeautifulSoup(response1.text, 'html.parser')\r\n centers = html_soup1.find_all('div', class_ = 'activityLoc')\r\n first1=centers[0]\r\n for tag in first1.find_all(\"div\", recursive=True):\r\n if tag.strong!=None:\r\n if tag.strong.text not in eval(i):\r\n eval(i)[tag.strong.text]=[]\r\n pais=tag.strong.text\r\n if tag.p!=None:\r\n city=tag.p.text\r\n if tag.a!=None:\r\n #if [tag.a.attrs['href'],city] not in eval(i)[pais]:\r\n if city[:-1] not in eval(i)[pais]:\r\n eval(i)[pais].append(city[:-1])\r\n if pais=='ESPAGNE':\r\n linkk.append(base+tag.a.attrs['href'])\r\n ciutats=[]\r\n for z in eval(i)['ESPAGNE']:\r\n ciutats.append(z)\r\n\r\n\r\n \r\n\r\n\r\nlanguage=\"EN\"\r\n\r\ndef translate(text):\r\n global language\r\n if language == \"ES\":\r\n translated = translator.translate(text, dest='es')\r\n return translated\r\n elif language == \"EN\":\r\n translated = translator.translate(text, dest='en')\r\n return translated\r\n elif language ==\"CAT\":\r\n translated = translator.translate(text, dest='ca')\r\n return translated\r\n elif language =='FR':\r\n translated = translator.translate(text, dest='fr')\r\n return translated\r\n elif language =='EUS':\r\n translated = translator.translate(text, dest='eu')\r\n return translated\r\n elif language =='GAL':\r\n translated = translator.translate(text, dest='gl')\r\n return translated \r\n\r\n\r\n\r\n\r\n\r\ndef start(bot, update):\r\n bot.sendMessage(chat_id=update.message.chat_id, text=translate(\"Hi! Choose your language typing /idioma + (CAT, ES, FR, EN, EUS, GAL)\").text) \r\n\r\n \r\n\r\n\r\ndef main_menu_cat(bot, update):\r\n query = update.callback_query\r\n bot.edit_message_text(chat_id=query.message.chat_id,\r\n message_id=query.message.message_id, #1r param: missatge \r\n text=translate(main_menu_message_cat()).text,\r\n reply_markup=main_menu_keyboard_cat()) #1r param: menú keyboard al que anem al pulsar \r\n\r\ndef link_menu_cat(bot, update):\r\n query = update.callback_query\r\n bot.edit_message_text(chat_id=query.message.chat_id,\r\n message_id=query.message.message_id,\r\n text=translate(link_menu_message_cat()).text,\r\n reply_markup=link_menu_keyboard_cat())\r\n\r\ndef rrss_menu_cat(bot, update):\r\n query = update.callback_query\r\n bot.edit_message_text(chat_id=query.message.chat_id,\r\n message_id=query.message.message_id,\r\n text=translate(rrss_menu_message()).text,\r\n reply_markup=rrss_menu_keyboard_cat()) \r\n\r\n\r\n \r\n\r\n############################ Keyboards #########################################\r\ndef main_menu_keyboard_cat():\r\n keyboard = [[InlineKeyboardButton(translate(\"Recursos\").text + \"♿\", url='http://www.creenfermedadesraras.es/creer_01/recuasoc/recursos/index.htm')],\r\n [InlineKeyboardButton(translate(\"Links d\\'interès\").text + \"🌐\", callback_data='link_menu_keyboard_cat')],\r\n [InlineKeyboardButton(translate(\"Test de concienciación\").text+'📚', url='https://forms.gle/tDh1fiKBdpNjG2S67')],\r\n [InlineKeyboardButton(translate(\"Donatius\").text+'🎉', url='https://www.ccma.cat/tv3/marato/es/2019/230/')]]\r\n return InlineKeyboardMarkup(keyboard)\r\n\r\n\r\ndef link_menu_keyboard_cat():\r\n keyboard = [[InlineKeyboardButton(translate('Xarxes Socials').text + \"📱\", callback_data='rrss_menu_keyboard_cat')],\r\n [InlineKeyboardButton(translate('Associacions').text + \"🚻\", url='http://fecamm.org/portal1/m_index.asp?idioma=1')],\r\n [InlineKeyboardButton(translate('Links d\\'interès').text + \"🌐\", url = \"https://www.share4rare.org/\")],\r\n [InlineKeyboardButton(translate('Libro de la cigüeña añil').text+'📖', url=\"https://weeblebooks.com/es/educacion-emocional/la-ciguena-anil/\")],\r\n [InlineKeyboardButton('BACK 🔙', callback_data='main_menu_cat')]]\r\n return InlineKeyboardMarkup(keyboard)\r\n\r\ndef rrss_menu_keyboard_cat():\r\n keyboard = [[InlineKeyboardButton('Instagram', url=\"https://www.instagram.com/share4rare/\")],\r\n [InlineKeyboardButton('Twitter', url=\"https://twitter.com/share4rare\")],\r\n [InlineKeyboardButton('Facebook', url = \"https://bit.ly/2PHPZr6\")],\r\n [InlineKeyboardButton('LinkedIn', url=\"https://www.linkedin.com/company/share4rare\")],\r\n [InlineKeyboardButton('WhatsApp', url=\"https://bit.ly/36ArtyU\")],\r\n [InlineKeyboardButton('BACK🔙', callback_data='link_menu_keyboard_cat')]]\r\n return InlineKeyboardMarkup(keyboard) \r\n\r\n \r\n\r\n#########################EXTRA#############################\r\n\r\ndef echo(bot, update):\r\n print(update.message.text)\r\n bot.send_message(chat_id=update.message.chat_id, text=translator.translate(update.message.text))\r\n\r\ndef where(bot, update, user_data):\r\n mapa = StaticMap(500, 500)\r\n global ciutats\r\n global linkk\r\n try:\r\n fitxer = \"%d.png\" % random.randint(1000000, 9999999)\r\n lat, lon = update.message.location.latitude, update.message.location.longitude\r\n mapa.add_marker(CircleMarker((lon, lat), 'blue', 10))\r\n for k in ciutats:\r\n location = geolocator.geocode(k)\r\n mapa.add_marker(CircleMarker((location.longitude,location.latitude), 'red', 10))\r\n \r\n imatge = mapa.render()\r\n imatge.save(fitxer)\r\n bot.send_photo(chat_id=update.message.chat_id, photo=open(fitxer, 'rb'))\r\n for p in linkk:\r\n bot.sendMessage(chat_id=update.message.chat_id, text=p) \r\n except Exception as e:\r\n print(e)\r\n bot.send_message(chat_id=update.message.chat_id, text='💣') \r\n\r\n \r\n\r\nmalaltia=\"\"\r\ndef info(bot, update):\r\n bot.send_message(chat_id=update.message.chat_id, text='D\\'acord! Enviam la meva ubicació')\r\n malaltia = update.message.text[6:]\r\n check(malaltia)\r\n\r\n\r\ndef idioma(bot, update): \r\n global language\r\n language=\"\"\r\n languageentrada = update.message.text[8:]\r\n if languageentrada == \"ES\":\r\n language = \"ES\"\r\n bot.sendMessage(chat_id=update.message.chat_id, text=translate(\"Gracias! El idioma ha sido configurado correctamente\").text) \r\n elif languageentrada == \"CAT\":\r\n language = \"CAT\"\r\n bot.sendMessage(chat_id=update.message.chat_id, text=translate(\"Gracias! El idioma ha sido configurado correctamente\").text)\r\n elif languageentrada == \"FR\":\r\n language = \"FR\"\r\n bot.sendMessage(chat_id=update.message.chat_id, text=translate(\"Gracias! El idioma ha sido configurado correctamente\").text)\r\n elif languageentrada == \"EUS\":\r\n language = \"EUS\"\r\n bot.sendMessage(chat_id=update.message.chat_id, text=translate(\"Gracias! El idioma ha sido configurado correctamente\").text)\r\n elif languageentrada == \"EN\":\r\n language = \"EN\"\r\n bot.sendMessage(chat_id=update.message.chat_id, text=translate(\"Gracias! El idioma ha sido configurado correctamente\").text)\r\n else:\r\n bot.sendMessage(chat_id=update.message.chat_id, text=\"Sorry, I don't speak your language!\")\r\n update.message.reply_text(translate(main_menu_message_cat()).text, #1r param: missatge \r\n reply_markup=main_menu_keyboard_cat())()\r\n\r\n\r\n \r\n\r\n\r\n############################# Messages #########################################\r\ndef main_menu_message_cat():\r\n return \"Hola! Benvingut a RareBot!\\nPots buscar Informació d\\'enfermetats minoritàries, buscar Material, veure els Links d\\'interès, fer un Test de concienciació o fer Donatius!\\nPots buscar informació de la malaltia escrivint /info + el nom de la malaltia!\"\r\n\r\ndef link_menu_message_cat():\r\n return \"Escull què vols!\"\r\n\r\ndef link_menu_message():\r\n return 'Aquí pots trobar diferents links d \\'interès'\r\n\r\n\r\ndef rrss_menu_message():\r\n return 'Xarxes socials... aquí en tens unes quantes!'\r\n\r\n############################# Handlers #########################################\r\n\r\nTOKEN = open('token.txt').read().strip()\r\nupdater = Updater(token=TOKEN)\r\ndispatcher = updater.dispatcher\r\n\r\nupdater.dispatcher.add_handler(CommandHandler('start', start))\r\nupdater.dispatcher.add_handler(CallbackQueryHandler(main_menu_cat, pattern='main_menu_cat'))\r\nupdater.dispatcher.add_handler(CallbackQueryHandler(link_menu_cat, pattern='link_menu_keyboard_cat'))\r\nupdater.dispatcher.add_handler(CallbackQueryHandler(rrss_menu_cat, pattern='rrss_menu_keyboard_cat'))\r\nupdater.dispatcher.add_handler(CallbackQueryHandler(sos_menu_cat))\r\nupdater.dispatcher.add_handler(CommandHandler(\"help\", help))\r\nupdater.dispatcher.add_handler(CommandHandler('info', info))\r\nupdater.dispatcher.add_handler(CommandHandler('idioma', idioma))\r\nupdater.dispatcher.add_handler(MessageHandler(Filters.location, where, pass_user_data=True)) \r\n\r\nupdater.start_polling()\r\n################################################################################\r\nupdater.idle()\r\n\r\n\r\n\r\n","sub_path":"src/scrappinghospital.py","file_name":"scrappinghospital.py","file_ext":"py","file_size_in_byte":10749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"24317488","text":"import numpy as np\n\n\ndef handle_zeros_in_scale(scale):\n if np.isscalar(scale):\n if scale == .0:\n scale = 1.\n return scale\n elif isinstance(scale, np.ndarray):\n scale[scale == 0.0] = 1.0\n return scale\n\n\nclass MinMaxBase:\n\n def __init__(self, feature_range=(0, 1), min_=0, max_=1):\n self.feature_range = feature_range\n self.min = min_\n self.max = max_\n self.scale = None\n self.fitted = False\n\n def _reset(self):\n if self.scale:\n self.min = 0\n self.max = 1\n self.scale = None\n self.fitted = False\n\n def fit(self):\n self._reset()\n feature_range = self.feature_range\n if feature_range[0] >= feature_range[1]:\n raise ValueError(\"Minimum of desired feature range must be smaller\"\n \" than maximum. Got %s.\" % str(feature_range))\n max_min = self.max - self.min\n self.scale = ((feature_range[1] - feature_range[0]) /\n handle_zeros_in_scale(max_min))\n\n self.scale_min = feature_range[0] - self.min * self.scale\n self.fitted = True\n return self\n\n def transform(self, X):\n if self.fitted:\n X *= self.scale\n X += self.scale_min\n return X\n else:\n raise Exception(\"Not fitted\")\n\n\nclass HomoMinMax(MinMaxBase):\n\n def __init__(self, min_li, max_li):\n\n super(HomoMinMax, self).__init__(min_=np.min(min_li, axis=0),\n max_=np.max(max_li, axis=0))\n\n def fit(self):\n super(HomoMinMax, self).fit()\n\n\nclass HeteroMinMax(MinMaxBase):\n\n def __init__(self, min_, max_):\n\n super(HeteroMinMax, self).__init__(min_=min_, max_=max_)\n\n def fit(self):\n super(HeteroMinMax, self).fit()\n\n\nclass TestMinMax:\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n X_train = np.array([[1., -1., 2.], [2., 0., 0.], [2, 1, 0], [0., 1., -1.]])\n scaler.fit(X_train)\n x_transform = scaler.transform(X_train)\n\n x1 = np.array([[1., -1., 2.], [2., 0., 0.]])\n x2 = np.array([[2, 1, 0], [0., 1., -1.]])\n\n x1_min = np.min(x1, axis=0)\n x2_min = np.min(x2, axis=0)\n\n x1_max = np.max(x1, axis=0)\n x2_max = np.max(x2, axis=0)\n\n min_li = np.vstack([x1_min, x2_min])\n max_li = np.vstack([x1_max, x2_max])\n\n homo_scale = HomoMinMax(min_li, max_li)\n homo_scale.fit()\n x1_transform = homo_scale.transform(x1)\n x2_transform = homo_scale.transform(x2)\n\n np.testing.assert_array_almost_equal(x1_transform,\n x_transform[:2,],\n decimal=6)\n np.testing.assert_array_almost_equal(x2_transform,\n x_transform[2:,],\n decimal=6)\n\n x3 = X_train[:, :2]\n x4 = X_train[:, 2:]\n\n hetero_scale = HeteroMinMax(np.min(x3, axis=0), np.max(x3, axis=0))\n hetero_scale.fit()\n x3_transfrom = hetero_scale.transform(x3)\n\n np.testing.assert_array_almost_equal(x3_transfrom,\n x_transform[:, :2],\n decimal=6)\n\n hetero_scale = HeteroMinMax(np.min(x4, axis=0), np.max(x4, axis=0))\n hetero_scale.fit()\n x4_transfrom = hetero_scale.transform(x4)\n\n np.testing.assert_array_almost_equal(x4_transfrom,\n x_transform[:, 2:],\n decimal=6)\n","sub_path":"minmax_scaler.py","file_name":"minmax_scaler.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"4728039","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n__author__ = 'calm.xia@gmail.com'\n\n'''\n狄克斯特拉算法(Dijkstra’s algorithm)-- 最快路径\n包含4个步骤 \n(1) (从起点开始)找出开销最低的节点\n(2) 对于该节点的邻居,检查是否有前往它们的更短路径,如果有,就更新其开销。 -- 开销指的是从起点开始的花费。 \n(3) 重复这个过程,直到对图中的每个节点都这样做了。-- 终点除外。 \n(4) 计算最终路径。\n'''\n\n# ==================================================\n# 1. 散列表 graph -- 表示整张图 \n# 一级散列表 \"key-value\" \n# --> \"节点-邻居节点\"\n# 二级散列表 \"key-value\" -- 一级散列表中元素的 value 使用散列表构造 \n# --> \"节点-(从父节点过来的)开销\"\n# ==================================================\n\ngraph = {}\n\ngraph[\"start\"] = {}\ngraph[\"start\"][\"a\"] = 6\ngraph[\"start\"][\"b\"] = 2\n# graph[\"start\"].keys() # 获取 start 节点的所有邻居节点\n\ngraph[\"a\"] = {}\ngraph[\"a\"][\"fin\"] = 1\n\ngraph[\"b\"] = {}\ngraph[\"b\"][\"a\"] = 3\ngraph[\"b\"][\"fin\"] = 5\n\ngraph[\"fin\"] = {} # 终点没有任何邻居\n\n# ==================================================\n# 2. 散列表 costs -- 存储每个节点的开销\n# 节点的开销指的是从起点出发前往该节点的总开销。\n# 对于还不知道的开销,将其设置为无穷大\n# 注意:刚开始设置散列表的时候,只知道起点的邻居节点的开销,其他的节点都是未知的,设置为无穷大\n# ==================================================\n\ninfinity = float(\"inf\")\ncosts = {}\ncosts[\"a\"] = 6\ncosts[\"b\"] = 2\ncosts[\"fin\"] = infinity\n\n# ==================================================\n# 3. 散列表 parents -- 存储父节点\n# ==================================================\n\nparents = {}\nparents[\"a\"] = \"start\"\nparents[\"b\"] = \"start\"\nparents[\"fin\"] = None\n\n# ==================================================\n# 4. 数组 processed -- 记录处理过的节点\n# 对于同一个节点,不用处理多次\n# ==================================================\n\nprocessed = []\n\n\ndef find_lowest_cost_node(costs):\n lowest_cost = float(\"inf\")\n lowest_cost_node = None\n for node in costs: # 遍历所有的节点\n cost = costs[node]\n if cost < lowest_cost and node not in processed: # 如果当前节点的开销更低且未处理过\n lowest_cost = cost # 就将其视为开销最低的节点\n lowest_cost_node = node\n return lowest_cost_node\n \ndef main():\n node = find_lowest_cost_node(costs) # 在未处理的节点中找出开销最小的节点\n print(\"First lowest code node: \", node)\n\n while node is not None: # while 循环在所有节点都被处理过后结束\n print(\"--------------\")\n cost = costs[node]\n neighbors = graph[node]\n print(\"node \", node, \": cost \", cost, \" neighbors \", neighbors)\n\n for n in neighbors.keys(): # 遍历当前节点的所有邻居\n new_cost = cost + neighbors[n]\n if costs[n] > new_cost: # 如果经当前节点前往该邻居更近,\n costs[n] = new_cost # 就更新该邻居的开销,\n parents[n] = node # 同时将该邻居的父节点设置为当前节点\n processed.append(node) # 将当前节点标记为处理过\n print(\"processed: \", processed)\n node = find_lowest_cost_node(costs) # 找出接下来要处理的节点, 并循环\n print()\n print(\"Done! Cost of 'fin' is : \", costs['fin'])\n print(\"'fin' <-- \", parents[\"fin\"], \" <-- \", parents[parents[\"fin\"]] )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"算法图解/Dijkstra.py","file_name":"Dijkstra.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"18613126","text":"pokemons = {\r\n 'Pikachu': {\r\n 'Tipo': 'Electrico',\r\n 'Evolucion': 'Raichu',\r\n 'Pokedex': 1,\r\n 'Ataque': 13,\r\n 'Defensa': 20\r\n },\r\n 'Raichu': {\r\n 'Tipo': 'Electrico',\r\n 'Evolucion': None,\r\n 'Pokedex': 2,\r\n 'Ataque': 20,\r\n 'Defensa': 28\r\n }\r\n }\r\n\r\ndef pokedex():\r\n for key in pokemons:\r\n print(\"------------------------------\")\r\n print(\"Nombre:\\t\\t\", key)\r\n print(\"Tipo:\\t\\t\", pokemons[key]['Tipo'])\r\n print(\"Evolucion:\\t\", pokemons[key]['Evolucion']) if pokemons[key]['Evolucion'] != None else 0\r\n print(\"Numero Pokedex:\\t\", pokemons[key]['Pokedex'])\r\n print(\"Ataque:\\t\\t\", pokemons[key]['Ataque'])\r\n print(\"Defensa:\\t\", pokemons[key]['Defensa'])\r\n print(\"------------------------------\")\r\n\r\ndef registro():\r\n name = input('Introduce el nombre del pokemon: ')\r\n if name in pokemons:\r\n choice = input('¿Desea modificar los datos del pokemon? [S/N] ').lower()\r\n if choice == 's':\r\n print('Introduzca los valores a modificar\\nEn caso de no querer modificar cierto valor, oprima ENTER')\r\n else:\r\n return None\r\n else:\r\n print('Introduzca los valores correspondientes')\r\n pokemons[name] = {\r\n 'Tipo': None,\r\n 'Evolucion': None,\r\n 'Pokedex': 0,\r\n 'Ataque': 0,\r\n 'Defensa': 0\r\n }\r\n for key in pokemons[name]:\r\n mod = input(f'{key}: ')\r\n if mod != '':\r\n pokemons[name][key] = mod\r\n","sub_path":"[games]-pokemon.py","file_name":"[games]-pokemon.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"365100801","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nnetwork_analysis.py\n\nDescription: A brief analysis of the U.S. Congressional Twitter network\n\nCreated by Drew Conway (drew.conway@nyu.edu) on 2011-08-09 \n# Copyright (c) 2011, under the Simplified BSD License. \n# For more information on FreeBSD see: http://www.opensource.org/licenses/bsd-license.php\n# All rights reserved.\n\"\"\"\n\nimport sys\nimport os\nimport networkx as nx\nfrom networkx import core\n\ndef main():\n\t# Load graph\n\ttwitter_graph = nx.read_edgelist('twitter_congress_graph.edgelist', delimiter=\"\\t\", nodetype=str, create_using=nx.DiGraph())\n\ttwitter_graph.remove_edges_from(twitter_graph.selfloop_edges())\n\t\n\t# Find largest connected component\n\ttwitter_mc = nx.weakly_connected_component_subgraphs(twitter_graph)[0]\n\t\n\t# Take 2-core of main component\n\tmc_core = core.k_core(twitter_mc, 2)\n\t\n\t# Output results\n\tnx.write_edgelist(twitter_graph, 'twitter_congress_clean', delimiter='\\t')\n\tnx.write_edgelist(twitter_mc, 'twitter_congress_mc.edgelist', delimiter='\\t')\n\tnx.write_edgelist(mc_core, 'twitter_congress_mc_2core.edgelist', delimiter='\\t')\n\nif __name__ == '__main__':\n\tmain()\n\n","sub_path":"supplemental/congressional_network/network_analysis.py","file_name":"network_analysis.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"184257077","text":"# 오징어게임 댓글 가져오기!\r\n\r\nimport pandas\r\nfrom googleapiclient.discovery import build\r\n\r\napi_key = '_'\r\nvideo_id = 'QcGYo3t82ig'\r\n\r\ncomments = list()\r\napi_obj = build('youtube', 'v3', developerKey=api_key)\r\nresponse = api_obj.commentThreads().list(part='snippet,replies', videoId=video_id, maxResults=450).execute()\r\n \r\nwhile response:\r\n for item in response['items']:\r\n comment = item['snippet']['topLevelComment']['snippet']\r\n comments.append([comment['textDisplay'], comment['authorDisplayName'], comment['publishedAt'], comment['likeCount']])\r\n \r\n if item['snippet']['totalReplyCount'] > 0:\r\n for reply_item in item['replies']['comments']:\r\n reply = reply_item['snippet']\r\n comments.append([reply['textDisplay'], reply['authorDisplayName'], reply['publishedAt'], reply['likeCount']])\r\n \r\n if 'nextPageToken' in response:\r\n response = api_obj.commentThreads().list(part='snippet,replies', videoId=video_id, pageToken=response['nextPageToken'], maxResults=100).execute()\r\n else:\r\n break\r\ndf = pandas.DataFrame(comments)\r\ndf.to_excel('results_오징어게임.xlsx', header=['comment', 'author', 'date', 'num_likes'], index=None)","sub_path":"youtubeapi.py","file_name":"youtubeapi.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"70390464","text":"import os\n\nKERNEL_TEMPLATE = r'''\n{{\n \"display_name\": \"{display_name}\",\n \"language\": \"python\",\n \"argv\": [\n \"python\",\n \"-c\",\n \"{launch_script}\",\n \"{{connection_file}}\",\n \"/mnt/c/Program Files/IDA Pro 7.4/{exe_name}\"\n ],\n \"codemirror_mode\": {{\n \"version\": 2,\n \"name\": \"ipython\"\n }}\n}}\n'''\n\n\ndef write_kernel(out_dir, display_name, exe_name, launch_script):\n kernel = KERNEL_TEMPLATE.format(display_name=display_name, exe_name=exe_name, launch_script=launch_script)\n\n kernel_dir = os.path.join(out_dir, display_name.lower())\n if not os.path.exists(kernel_dir):\n os.makedirs(kernel_dir)\n\n with open(os.path.join(kernel_dir, 'kernel.json'), 'wb') as f:\n f.write(kernel)\n\n\ndef generate_kernels(out_dir):\n with open('launch_ida_wsl.py', 'rb') as f:\n launch_ida_script = f.read()\n\n launch_ida_script = launch_ida_script.replace('\"', r'\\\"').replace('\\n', r'\\n').replace('\\r', r'\\r')\n\n for display_name, exe_name in (('IDA32', 'ida.exe'), ('IDA64', 'ida64.exe')):\n write_kernel(out_dir=out_dir,\n display_name=display_name,\n exe_name=exe_name,\n launch_script=launch_ida_script)\n","sub_path":"generate_kernels_wsl.py","file_name":"generate_kernels_wsl.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"425225619","text":"from google.cloud import storage\n\n# Setting credentials using the downloaded JSON file\n\nclient = storage.Client.from_service_account_json(json_credentials_path='C:\\Terraform\\dev-sa.json')\n\n\n\nbucket = client.bucket(\"dcmbucket\")\n\n# Name of the object to be stored in the bucket\n\nobject_name_in_gcs_bucket = bucket.blob('my_first_gcs_upload.txt')\n\n# Name of the object in local file system\n\nobject_name_in_gcs_bucket.upload_from_filename('C:/Terraform/testblob.txt')\n\nblobs = client.list_blobs(\"dcmbucket\")\nfor blob in blobs:\n #if str(blob) == \"my_first_gcs_upload.txt\":\n print(blob)\n","sub_path":"Upload_object.py","file_name":"Upload_object.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"506753664","text":"#/usr/local/bin/python3\n# -*- coding: utf-8 -*-\nimport os\n\ndef out_data(path, attribute, res, field, file):\n if attribute == 0 : \n print (\"Plz Sorting data before creating out put file... \")\n exit(0)\n \n file_name = path = os.getcwd()+\"/output/\"+file+\"_output_\"+str(attribute)+\".csv\"\n \n with open(file_name, \"w\") as fp :\n string = \",\".join(field)\n string += \"\\n\"\n fp.write(string)\n for line in res:\n string = \"\"\n string += \",\".join(line)\n string += \"\\n\"\n fp.write(string)\n \n print (\"Successfully Creating reslut data!\")\n print (\"============================\")\n\n","sub_path":"algorithm/Assignment-1/src/out_data.py","file_name":"out_data.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"137352808","text":"from django.conf.urls import patterns, include, url\nfrom projekt import views\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n\turl(r'^accounts/', include('accounts.urls', namespace=\"accounts\")),\n\turl(r'^main/', include('main.urls', namespace=\"main\")),\n\turl(r'^reviews/', include('reviews.urls', namespace=\"reviews\")),\n\turl(r'^admin/', include(admin.site.urls)),\n\turl(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'accounts/login.html'}),\n\turl(r'^$', views.IndexView.as_view()),\n)\n","sub_path":"projekt/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"101746682","text":"# ActivitySim\n# See full license in LICENSE.txt.\nimport logging\n\nfrom activitysim.core import pipeline\nfrom activitysim.core import inject\nfrom activitysim.core import tracing\n\nfrom activitysim.core.input import read_input_table\n\nlogger = logging.getLogger(__name__)\n\n\ndef read_raw_persons(households):\n\n df = read_input_table(\"persons\")\n\n if inject.get_injectable('households_sliced', False):\n # keep all persons in the sampled households\n df = df[df.household_id.isin(households.index)]\n\n return df\n\n\n@inject.table()\ndef persons(households, trace_hh_id):\n\n df = read_raw_persons(households)\n\n logger.info(\"loaded persons %s\" % (df.shape,))\n\n # replace table function with dataframe\n inject.add_table('persons', df)\n\n pipeline.get_rn_generator().add_channel('persons', df)\n\n if trace_hh_id:\n tracing.register_traceable_table('persons', df)\n tracing.trace_df(df, \"raw.persons\", warn_if_empty=True)\n\n return df\n\n\n# another common merge for persons\n@inject.table()\ndef persons_merged(persons, households, land_use, accessibility):\n return inject.merge_tables(persons.name, tables=[\n persons, households, land_use, accessibility])\n","sub_path":"activitysim/abm/tables/persons.py","file_name":"persons.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"143447174","text":"class Solution(object):\n # xor finds sum without carry\n # and finds all the CARRY, but you need to shift it to the left for one place just like what you do in math\n # then add the carry to the xor result, but since this is addition, you can just use recursion and set a = carry_without_carry, b = carry \n\n def sum_using_xor(self, a, b):\n \"\"\"\n :type a: int\n :type b: int\n :rtype: int\n \"\"\"\n while(b != 0):\n sum_without_carry = a ^ b\n carry = ( a & b ) << 1\n a = sum_without_carry\n b = carry\n return a\n\nif __name__ == \"__main__\":\n print(Solution().sum_using_xor(13,7))\n","sub_path":"sum_using_xor.py","file_name":"sum_using_xor.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"221811498","text":"import urllib\nimport urllib.request\nimport requests\nimport config\nimport telebot\nimport ssl\nimport sqlite3\nimport time\n \ndef add_card_to_table(user_id, card, link_for_card):\n \"\"\"Функция добавляет карту в базу данных соответствующего пользователя или в КЕШ\"\"\" \n conn = sqlite3.connect(\"id_database.db\") # подключаемся к базе\n cursor = conn.cursor()\n table_name = 'card_base_' + user_id # формулируем название таблицы\n card_name = card # название карты\n link_card = link_for_card # ссылка на карту\n new_card = [(card_name, link_card)] # создаем ячейку для записи в таблицу\n sql = \"INSERT INTO {0} VALUES (?,?)\" # формулируем SQL запрос\n sql = sql.format(table_name) # редактируем SQL запрос\n cursor.executemany(sql, new_card) # добавляем запись в таблицу\n conn.commit()\n \ndef check_card_from_table(user_id, card):\n \"\"\"функция проверяет наличие карты в базе данных пользователя или КЭШе.\n В случае отсутствия возвращает пустой массив.\n В случае наличия возвращает непустой массив\"\"\"\n\n conn = sqlite3.connect(\"id_database.db\") # подключаемся к базе\n cursor = conn.cursor()\n table_name = 'card_base_' + user_id # формулируем название таблицы\n\n search_name = card # задаем имя для поиска\n sql = \"SELECT * FROM {0} WHERE card_name=?\" # ищем записи с заданным именем\n sql = sql.format(table_name)\n cursor.execute(sql, [(search_name)])\n ans = cursor.fetchall() # найденные записи заносим в массив\n return ans\n\ndef get_user_cards(user_id):\n \"\"\"Функция выводит список карт пользователя. Если у пользователя нет карт то выводит соответствующее сообщение\"\"\"\n \n conn = sqlite3.connect(\"id_database.db\") # подключаемся к базе\n cursor = conn.cursor()\n table_name = 'card_base_' + user_id # формулируем название таблицы \n request = \"SELECT rowid, * FROM {0} ORDER BY card_name\" # создаем запрос на карты из заданной таблицы\n request = request.format(table_name) \n res = [] # создаем п��стой массив для результатов\n for row in cursor.execute(request): # фором выбираем в массив названия карт из таблицы\n res.append(row[1]) \n if len(res) == 0: # если массив пустой выводим сообщение что нет карт\n report = 'У вас в наличии нет карт. Для приобретения карты используйте команду /Request главного меню. Для выхода в меню просмотрщика карт введите команду /MTG_menu'\n return report\n elif len(res) > 0:\n report = 'У вас есть следующие карты: \\n'\n for element in res:\n report += element + '\\n'\n report += 'Для просмотра изображения карт воспользуйтесь коммандой /Request главного меню. Для выхода в меню просмотрщика карт введите команду /MTG_menu'\n return report\n \ndef Card_request(card, user_id):\n \"\"\"\"Функция получает на входе запрашиваемую карту и ID пользователя. Cначала проверяет хватает ли у пользователя\n денег на покупку карты. Если нет возвращает сообщение о том что средств не достаточно. Если денег хватает\n проверяет есть ли данная карта в КЕШе. Если карта есть в КЕШе - добавляем карту в базу карт пользователя,\n списываем стоимость карты с баланса пользователя и выводим на экран изображение карты. Если карты нет в\n КЕШе проверяем наличие карты на сервере SkryFall. Если карты нет на сервере - выводим сообщение о том, что\n такой карты не существует. Если карта найдена - сохраняем ее в КЕШ, сохраняем в базе карт пользователя,\n списываем стоимость карты с баланса пользователя и выводим изображение на экран\n \"\"\"\n price = 30 # обозначаем цену за карту равной 30\n flag_1 = check_card_from_table(user_id, card) # проверяем есть ли карта в базе пользователя\n if len(flag_1) > 0: # если длина массива больше 0 то карта есть в базе пользователя. выводим сообщения\n report_1 = 'Эта карта у вас уже есть. Вывожу изображение'\n img = open(flag_1[0][1], 'rb')\n report_2 = 'Для запроса еще 1 карты введите команду /Request. Для выхода в меню просмотрщика карт введите команду /MTG_menu'\n return report_1, img, report_2\n\n elif len(flag_1) == 0: # если длина массива равна 0 то карты нет в базе пользователя. проверяем баланс.\n balance = get_balance(user_id) # берем значение баланса из файла\n if balance >= price : # если баланс больше либо равен цене карты - запускаем дальнейшие проверки\n flag_2 = check_card_from_table('cache', card) # сначала проверяем есть ли карта в КЭШе. на выходе получаем массив \n if len(flag_2) > 0: # если длина массива больше 0 то карта есть в КЭШе. добавляем карту в базу пользователя, обновляем баланс и выводим сообщения\n add_card_to_table(user_id, card, flag_2[0][1]) # добавляем карту в базу пользователя\n balance -=price\n update_balance(user_id, balance) # обновляем баланс пользователя\n report_1 = 'Вы приобрели карту {0}. С вашего баланса списано 30$. Вывожу изображение'\n report_1 = report_1.format(card)\n img = open(flag_2[0][1], 'rb') # выводим изображение приобретенной карты\n report_2 = 'Для запроса еще 1 карты введите команду /Request. Для выхода в меню просмотрщика карт введите команду /MTG_menu'\n return report_1, img, report_2\n elif len(flag_2) == 0: # если длина массива равна 0 то карты нет в КЭШе. проверяем карту на сервера SkryFall\n try:\n r = requests.get(url='https://api.scryfall.com/catalog/card-names') # через API запрос получаем словарь в формате JSON\n data= r.json() # переменной присваиваем полученный словарь\n external_list_of_maps = data['data'] # присваиваем переменной массив с названиями карт\n \n except Exception as e:\n print('вызван except!')\n report_1 ='Сервер временно недоступен. Повторите попытку позже. Для выхода в меню просмотрщика карт введите команду /MTG_menu'\n img = False\n report_2 = False\n return report_1, img, report_2\n \n if card in external_list_of_maps: # если карта найдена во внешней базе\n url = 'https://api.scryfall.com/cards/named?exact={0}' \n url = url.format(card)\n time.sleep(0.1) # ставим задержку в 100 милисекунд на запрос\n r = requests.get(url) # отправляем запрос на сервер\n data = r.json() # получаем JSON с данными по карте\n link_curr = data['image_uris']['large'] # получаем ссылку на карту\n ssl._create_default_https_context = ssl._create_unverified_context # данная строка помогает устранить ошибку SSL_SHAKEHANDS_FAIL\n img = urllib.request.urlopen(link_curr).read()\n path ='images/{0}.jpg'\n path = path.format(card)\n out = open(path, 'wb') # сохраняем карту \n out.write(img)\n out.close \n add_card_to_table('cache', card, path) # добавляем скачанную карту в КЭШ\n add_card_to_table(user_id, card, path) # добавляем скачанную карту в базу карт пользователя\n balance -=price\n update_balance(user_id, balance) # обновляем баланс пользователя\n report_1 = 'Вы приобрели карту {0}. С вашего баланса списано 30$. Вывожу изображение'\n report_1 = report_1.format(card)\n img = open(path, 'rb') # выводим изображение приобретенной карты\n report_2 = 'Для запроса еще 1 карты введите команду /Request. Для выхода в меню просмотрщика карт введите команду /MTG_menu'\n return report_1, img, report_2 \n elif card not in external_list_of_maps:\n report_1 = 'Такой карты не существует. Для запроса еще 1 карты введите команду /Request. Для выхода в меню просмотрщика карт введите команду /MTG_menu'\n img = False\n report_2 = False\n return report_1, img, report_2 \n else: # если баланс меньше цены карты - информируем пользователя о том, что не достаточно средств\n report_1 = 'У вас недостаточно средств для приобретения карты. Для пополнения баланса обратитесь к администратору. Для выхода в меню просмотрщика карт введите команду /MTG_menu'\n img = False\n report_2 = False\n return report_1, img, report_2\n \ndef check_user(user_id):\n \"\"\"Функция проверяет наличие пользователя в базе данных. Если пользователь есть то ничего не делает.\n Если пользователя нет - создает запись пользователя в базе, балланс равный 300 и таблицу карт пользователя\"\"\"\n \n conn = sqlite3.connect(\"id_database.db\") # открываем базу\n cursor = conn.cursor() \n search_name = user_id # задаем имя для поиска(т.е. вводим user_id)\n sql = \"SELECT balance FROM customers WHERE user_id=?\" # ищем запись с заданным именем\n cursor.execute(sql, [(search_name)])\n ans = cursor.fetchone() \n if ans == None: # если результат поиска None значит такого пользователя нет. запускаем процесс добавления пользователя в базу\n balance = 300\n new_id = [(user_id, balance, 'user')] # добавляем user_ID, балансе равный 300 и статус 'юзер'\n cursor.executemany(\"INSERT INTO customers VALUES (?,?,?)\", new_id)\n conn.commit()\n # далее создаем пустую таблицу карт пользователя. в ней будут лежать карты пользователя и ссылки на них\n table_name = 'card_base_' + user_id # создаем название новой таблицы \n new_table = \"\"\"CREATE TABLE {0}\n (card_name text, link_card text)\n \"\"\" # создаем таблицу\n new_table = new_table.format(table_name) \n cursor.execute(new_table)\n conn.commit()\n\ndef get_balance(user_id):\n \"\"\"Функция получает из базы данных значение баланса пользователя по его ID\"\"\"\n conn = sqlite3.connect(\"id_database.db\") # открываем базу\n cursor = conn.cursor() \n search_name = user_id # задаем имя для поиска(т.е. вводим user_id)\n sql = \"SELECT balance FROM customers WHERE user_id=?\" # ищем запись с заданным именем\n cursor.execute(sql, [(search_name)])\n ans = cursor.fetchone() # найденную запись заносим в переменную\n balance_report = ans[0] \n return balance_report\n\ndef update_balance(user_id, new_balance):\n \"\"\"Функция записывает в базу данных новое значение баланса по ID пользователя\"\"\"\n conn = sqlite3.connect(\"id_database.db\") # открываем базу\n cursor = conn.cursor() \n user_id_in_quotes = '\"' + user_id + '\"' # создаем кавычки для ID\n sql = \"\"\"\n UPDATE customers \n SET balance ={0} \n WHERE user_id={1} \"\"\" # формулируем запрос \n sql= sql.format(str(new_balance),str(user_id_in_quotes)) # форматируем запрос\n cursor.execute(sql)\n conn.commit() # запускаем запрос\n\n\n\n \n","sub_path":"mag_gath.py","file_name":"mag_gath.py","file_ext":"py","file_size_in_byte":15369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"614941981","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/6/14 PM3:09\n# @Author : Qiming Zhang\n# @File : LongestSubstringwithAtMostKDistinctCharacters\nimport collections\nclass Solution(object):\n def lengthOfLongestSubstringKDistinct(self, s, k):\n \"\"\"\n :type s: str\n :type k: int\n :rtype: int\n \"\"\"\n def valid(dic, k):\n cnt = 0\n for key in dic:\n if dic[key] > 0:\n cnt += 1\n return True if cnt <= k else False\n ans = 0\n j = 0\n dic = collections.defaultdict(int)\n for i in range(len(s)):\n while j < len(s) + 1 and valid(dic, k):\n ans = max(j - i, ans)\n if j < len(s):\n dic[s[j]] += 1\n j += 1\n dic[s[i]] -= 1\n return ans\n","sub_path":"Array/LongestSubstringwithAtMostKDistinctCharacters.py","file_name":"LongestSubstringwithAtMostKDistinctCharacters.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"357966816","text":"import numpy as np\nimport sys\n\n\ndef sigmoid(Z):\n \"\"\"\n Compute the sigmoid of Z.\n\n Arguments:\n Z -- Output of the linear layer, of any shape.\n\n Return:\n A -- Post-activation parameter, of the same shape as Z.\n \"\"\"\n A = 1.0 / (1.0 + np.exp(-Z))\n assert(A.shape == Z.shape)\n\n return A\n\n\ndef relu(Z):\n \"\"\"\n Implement the Relu function.\n\n Arguments:\n Z -- Output of the linear layer, of any shape.\n\n Returns:\n A -- Post-activation parameter, of the same shape as Z.\n \"\"\"\n A = np.maximum(0, Z)\n assert(A.shape == Z.shape)\n\n return A\n\n\ndef tanh(Z):\n \"\"\"\n Implement the tanh function.\n\n Arguments:\n Z -- Output of the linear layer, of any shape.\n\n Returns:\n A -- Post-activation parameter, of the same shape as Z.\n \"\"\"\n A = np.tanh(Z)\n assert(A.shape == Z.shape)\n\n return A\n\n\ndef initialize_parameters(layer_dims):\n \"\"\"\n Arguments:\n layer_dims -- python array (list) containing the dimensions of each layer in our network\n\n Returns:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\":\n Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])\n bl -- bias vector of shape (layer_dims[l], 1)\n \"\"\"\n\n np.random.seed(2)\n parameters = {}\n L = len(layer_dims)\n\n for l in range(1, L):\n parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01\n parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))\n assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l - 1]))\n assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))\n\n return parameters\n\n\ndef linear_forward(A_prev, W, b):\n \"\"\"\n Implement the linear part of a layer's forward propagation.\n\n Arguments:\n A -- activations from ***previous*** layer (or input data).\n W -- weights matrix.\n b -- bias vector.\n\n A.shape == (size of previous layer, number of examples).\n W.shape == (size of current layer, size of previous layer).\n b.shape == (size of the current layer, 1).\n\n Returns:\n Z -- the input of the activation function, the \"pre-activation parameter\".\n \"\"\"\n\n Z = np.dot(W, A_prev) + b\n assert(Z.shape == (W.shape[0], A_prev.shape[1]))\n\n return Z\n\n\ndef linear_activation_forward(A_prev, W, b, activation):\n \"\"\"\n Implement the forward propagation for the LINEAR->ACTIVATION layer.\n\n Arguments:\n A_prev -- activations from previous layer (or input data):\n W -- weights matrix.\n b -- bias vector.\n activation -- the activation function: \"sigmoid\", \"tanh\" or \"relu\".\n\n A_prev.shape == (size of previous layer, number of examples).\n W.shape == (size of current layer, size of previous layer).\n b.shape == (size of the current layer, 1).\n\n Returns:\n A -- the post-activation value, which is the output of activation function.\n cache -- a python dictionary containing \"linear_cache\" and \"activation_cache\";\n stored for computing backward propagation.\n\n # A = []\n # linear_cache = []\n # activation_cache = []\n\n # Inputs: \"A[l-1]\", \"W[l]\", b[l]\".\n # Outputs: \"A[l]\", \"activation_cache\".\n # activation_cache == (linear_cache, activation_cache).\n # linear_cache == (A[l-1], W[l], b[l]).\n # activation_cache == Z[l].\n \"\"\"\n\n assert(isinstance(activation, str))\n\n if activation == \"sigmoid\":\n Z = linear_forward(A_prev, W, b)\n A = sigmoid(Z)\n elif activation == \"relu\":\n Z = linear_forward(A_prev, W, b)\n A = relu(Z)\n elif activation == \"tanh\":\n Z = linear_forward(A_prev, W, b)\n A = tanh(Z)\n else:\n print(\"invalid activation function!\")\n sys.exit(1)\n\n assert (A.shape == (W.shape[0], A_prev.shape[1]))\n\n return A\n\n\ndef L_model_forward(X, parameters):\n \"\"\"\n Implement forward propagation.\n Our L-layer model is: [LINEAR->RELU/TANH]*(L-1)->LINEAR->SIGMOID.\n\n Arguments:\n X -- data.\n X.shape == (input size, number of examples).\n parameters -- output of initialize_parameters_deep().\n\n Returns:\n AL -- the post-activation value from output(Lth) layer.\n \"\"\"\n\n A = X\n L = len(parameters) // 2\n\n # Propagate through the first L-1 layers.\n for l in range(1, L):\n A_prev = A\n A = linear_activation_forward(A_prev, parameters[\"W\" + str(l)], parameters[\"b\" + str(l)], \"sigmoid\")\n\n # Propagate through the output layer.\n AL = linear_activation_forward(A, parameters[\"W\" + str(L)], parameters[\"b\" + str(L)], \"sigmoid\")\n\n return AL\n\n\ndef predict(X, parameters):\n \"\"\"\n Using the learned parameters, to predict a class for each $x\\in X$.\n\n Arguments:\n parameters -- python dictionary containing your parameters.\n X -- input data of size (n_x, m).\n\n Returns\n predictions -- vector of predictions of our model (red: 0 / blue: 1)\n \"\"\"\n\n # Predict based on probabilities obtained by forward propagation.\n AL = L_model_forward(X, parameters)\n\n return AL\n\n\ndef compute_cost(AL, Y):\n \"\"\"\n Implement the cost function.\n\n Arguments:\n AL -- probability vector corresponding to your label predictions.\n Y -- true \"label\" vector.\n\n AL.shape == (1, number of examples).\n\n Returns:\n cost -- cross-entropy cost.\n \"\"\"\n\n assert(AL.shape == Y.shape)\n m = Y.shape[1]\n\n # logprobs = loss matrix.\n logprobs = Y * np.log(AL) + (1.0 - Y) * np.log(1.0 - AL)\n cost = - (1.0 / m) * np.sum(logprobs, axis=None)\n cost = np.squeeze(cost)\n assert(isinstance(cost, float))\n\n return cost\n\n\ndef evaluate(AL, test_data):\n predict_data = np.argmax(AL, axis=0)\n assert(predict_data.shape == (AL.shape[1], ))\n compare_results = list(zip(predict_data, test_data))\n\n return sum(int(x == y) for (x, y) in compare_results)\n","sub_path":"forward_propagation.py","file_name":"forward_propagation.py","file_ext":"py","file_size_in_byte":5800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"151111781","text":"import os\nfrom shutil import copyfile\nfrom typing import Any, Type\n\nimport pytest\n\nfrom demisto_sdk.common.constants import DIR_LIST\nfrom demisto_sdk.common.hook_validations.base_validator import BaseValidator\nfrom demisto_sdk.common.hook_validations.dashboard import DashboardValidator\nfrom demisto_sdk.common.hook_validations.incident_field import IncidentFieldValidator\nfrom demisto_sdk.common.hook_validations.layout import LayoutValidator\nfrom demisto_sdk.common.hook_validations.release_notes import ReleaseNotesValidator\nfrom demisto_sdk.common.hook_validations.reputation import ReputationValidator\nfrom demisto_sdk.common.hook_validations.script import ScriptValidator\nfrom demisto_sdk.common.hook_validations.structure import StructureValidator\nfrom demisto_sdk.common.hook_validations.playbook import PlaybookValidator\nfrom demisto_sdk.common.hook_validations.integration import IntegrationValidator\n\nfrom tests.tests_constants import VALID_LAYOUT_PATH, INVALID_LAYOUT_PATH, \\\n VALID_REPUTATION_PATH, INVALID_REPUTATION_PATH, VALID_WIDGET_PATH, INVALID_WIDGET_PATH, VALID_DASHBOARD_PATH, \\\n VALID_SCRIPT_PATH, INVALID_SCRIPT_PATH, INVALID_DASHBOARD_PATH, VALID_INCIDENT_FIELD_PATH, \\\n INVALID_INCIDENT_FIELD_PATH, VALID_INTEGRATION_TEST_PATH, VALID_ONE_LINE_CHANGELOG_PATH, \\\n VALID_ONE_LINE_LIST_CHANGELOG_PATH, VALID_MULTI_LINE_CHANGELOG_PATH, VALID_MULTI_LINE_LIST_CHANGELOG_PATH, \\\n INVALID_ONE_LINE_1_CHANGELOG_PATH, INVALID_ONE_LINE_2_CHANGELOG_PATH, INVALID_ONE_LINE_LIST_1_CHANGELOG_PATH, \\\n INVALID_ONE_LINE_LIST_2_CHANGELOG_PATH, INVALID_MULTI_LINE_1_CHANGELOG_PATH, INVALID_MULTI_LINE_2_CHANGELOG_PATH, \\\n LAYOUT_TARGET, WIDGET_TARGET, DASHBOARD_TARGET, INTEGRATION_TARGET, \\\n INCIDENT_FIELD_TARGET, SCRIPT_TARGET, SCRIPT_RELEASE_NOTES_TARGET, INTEGRATION_RELEASE_NOTES_TARGET, \\\n VALID_TEST_PLAYBOOK_PATH, PLAYBOOK_TARGET, INVALID_PLAYBOOK_PATH, INVALID_PLAYBOOK_ID_PATH, \\\n VALID_INTEGRATION_ID_PATH, INVALID_INTEGRATION_ID_PATH\n\nfrom demisto_sdk.common.hook_validations.widget import WidgetValidator\n\n\nclass TestValidators:\n CREATED_DIRS = list()\n\n @classmethod\n def setup_class(cls):\n print(\"Setups class\")\n for dir_to_create in DIR_LIST:\n if not os.path.exists(dir_to_create):\n cls.CREATED_DIRS.append(dir_to_create)\n os.mkdir(dir_to_create)\n\n @classmethod\n def teardown_class(cls):\n print(\"Tearing down class\")\n for dir_to_delete in cls.CREATED_DIRS:\n if os.path.exists(dir_to_delete):\n os.rmdir(dir_to_delete)\n\n INPUTS_IS_VALID_VERSION = [\n (VALID_LAYOUT_PATH, LAYOUT_TARGET, True, LayoutValidator),\n (INVALID_LAYOUT_PATH, LAYOUT_TARGET, False, LayoutValidator),\n (VALID_WIDGET_PATH, WIDGET_TARGET, True, WidgetValidator),\n (INVALID_WIDGET_PATH, WIDGET_TARGET, False, WidgetValidator),\n (VALID_DASHBOARD_PATH, DASHBOARD_TARGET, True, DashboardValidator),\n (INVALID_DASHBOARD_PATH, DASHBOARD_TARGET, False, DashboardValidator),\n (VALID_INCIDENT_FIELD_PATH, INCIDENT_FIELD_TARGET, True, IncidentFieldValidator),\n (INVALID_INCIDENT_FIELD_PATH, INCIDENT_FIELD_TARGET, False, IncidentFieldValidator),\n (INVALID_DASHBOARD_PATH, DASHBOARD_TARGET, False, DashboardValidator),\n (VALID_SCRIPT_PATH, SCRIPT_TARGET, True, ScriptValidator),\n (INVALID_SCRIPT_PATH, SCRIPT_TARGET, False, ScriptValidator),\n (VALID_TEST_PLAYBOOK_PATH, PLAYBOOK_TARGET, True, PlaybookValidator),\n (INVALID_PLAYBOOK_PATH, PLAYBOOK_TARGET, False, PlaybookValidator)\n ]\n\n @pytest.mark.parametrize('source, target, answer, validator', INPUTS_IS_VALID_VERSION)\n def test_is_valid_version(self, source, target, answer, validator):\n # type: (str, str, Any, Type[BaseValidator]) -> None\n try:\n copyfile(source, target)\n structure = StructureValidator(source)\n validator = validator(structure)\n assert validator.is_valid_version() is answer\n finally:\n os.remove(target)\n\n INPUTS_LOCKED_PATHS = [\n (VALID_REPUTATION_PATH, True, ReputationValidator),\n (INVALID_REPUTATION_PATH, False, ReputationValidator),\n ]\n\n @pytest.mark.parametrize('source, answer, validator', INPUTS_LOCKED_PATHS)\n def test_is_valid_version_locked_paths(self, source, answer, validator):\n \"\"\"Tests locked path (as reputations.json) so we won't override the file\"\"\"\n structure = StructureValidator(source)\n validator = validator(structure)\n assert validator.is_valid_version() is answer\n\n @pytest.mark.parametrize('source, target, answer, validator', INPUTS_IS_VALID_VERSION)\n def test_is_file_valid(self, source, target, answer, validator):\n # type: (str, str, Any, Type[BaseValidator]) -> None\n try:\n copyfile(source, target)\n structure = StructureValidator(source)\n validator = validator(structure)\n assert validator.is_valid_file(validate_rn=False) is answer\n finally:\n os.remove(target)\n\n INPUTS_RELEASE_NOTES_EXISTS_VALIDATION = [\n (VALID_SCRIPT_PATH, SCRIPT_TARGET, VALID_ONE_LINE_CHANGELOG_PATH, SCRIPT_RELEASE_NOTES_TARGET,\n ReleaseNotesValidator, True),\n (VALID_SCRIPT_PATH, SCRIPT_TARGET, VALID_ONE_LINE_CHANGELOG_PATH, INTEGRATION_RELEASE_NOTES_TARGET,\n ReleaseNotesValidator, False),\n (VALID_INTEGRATION_TEST_PATH, INTEGRATION_TARGET, VALID_ONE_LINE_CHANGELOG_PATH,\n INTEGRATION_RELEASE_NOTES_TARGET, ReleaseNotesValidator, True),\n (VALID_INTEGRATION_TEST_PATH, INTEGRATION_TARGET, VALID_ONE_LINE_CHANGELOG_PATH,\n SCRIPT_RELEASE_NOTES_TARGET, ReleaseNotesValidator, False)\n ]\n\n @pytest.mark.parametrize('source_dummy, target_dummy, source_release_notes, target_release_notes, '\n 'validator, answer',\n INPUTS_RELEASE_NOTES_EXISTS_VALIDATION)\n def test_is_release_notes_exists(self, source_dummy, target_dummy,\n source_release_notes, target_release_notes, validator, answer, mocker):\n # type: (str, str, str, str, Type[BaseValidator], Any) -> None\n try:\n copyfile(source_dummy, target_dummy)\n copyfile(source_release_notes, target_release_notes)\n mocker.patch.object(ReleaseNotesValidator, 'get_master_diff', side_effect=self.mock_get_master_diff)\n validator = ReleaseNotesValidator(target_dummy)\n assert validator.validate_file_release_notes_exists() is answer\n finally:\n os.remove(target_dummy)\n os.remove(target_release_notes)\n\n @staticmethod\n def create_release_notes_structure_test_package():\n changelog_needed = [\n (VALID_SCRIPT_PATH, 'Script'),\n (VALID_INTEGRATION_TEST_PATH, 'Integration')\n ]\n\n changelog_files_answer = [\n (VALID_ONE_LINE_CHANGELOG_PATH, True),\n (VALID_ONE_LINE_LIST_CHANGELOG_PATH, True),\n (VALID_MULTI_LINE_CHANGELOG_PATH, True),\n (VALID_MULTI_LINE_LIST_CHANGELOG_PATH, True),\n (INVALID_ONE_LINE_1_CHANGELOG_PATH, False),\n (INVALID_ONE_LINE_2_CHANGELOG_PATH, False),\n (INVALID_ONE_LINE_LIST_1_CHANGELOG_PATH, False),\n (INVALID_ONE_LINE_LIST_2_CHANGELOG_PATH, False),\n (INVALID_MULTI_LINE_1_CHANGELOG_PATH, False),\n (INVALID_MULTI_LINE_2_CHANGELOG_PATH, False)\n ]\n\n test_package = list()\n\n for (dummy_file, file_type) in changelog_needed:\n for (release_notes_file, answer) in changelog_files_answer:\n if file_type == 'Script':\n test_package.append((dummy_file, SCRIPT_TARGET, release_notes_file,\n SCRIPT_RELEASE_NOTES_TARGET, ReleaseNotesValidator, answer))\n elif file_type == 'Integration':\n test_package.append((dummy_file, INTEGRATION_TARGET, release_notes_file,\n INTEGRATION_RELEASE_NOTES_TARGET, ReleaseNotesValidator, answer))\n\n return test_package\n\n test_package = create_release_notes_structure_test_package.__func__()\n\n @pytest.mark.parametrize('source_dummy, target_dummy, source_release_notes, target_release_notes, '\n 'validator, answer', test_package)\n def test_valid_release_notes_structure(self, source_dummy, target_dummy,\n source_release_notes, target_release_notes, validator, answer, mocker):\n # type: (str, str, str, str, Type[BaseValidator], Any) -> None\n try:\n copyfile(source_dummy, target_dummy)\n copyfile(source_release_notes, target_release_notes)\n mocker.patch.object(ReleaseNotesValidator, 'get_master_diff', side_effect=self.mock_get_master_diff)\n validator = ReleaseNotesValidator(target_dummy)\n assert validator.is_valid_release_notes_structure() is answer\n finally:\n os.remove(target_dummy)\n os.remove(target_release_notes)\n\n @staticmethod\n def mock_get_master_diff():\n return 'Comment.'\n\n INPUTS_IS_ID_EQUALS_NAME = [\n (VALID_SCRIPT_PATH, SCRIPT_TARGET, True, ScriptValidator),\n (INVALID_SCRIPT_PATH, SCRIPT_TARGET, False, ScriptValidator),\n (VALID_TEST_PLAYBOOK_PATH, PLAYBOOK_TARGET, True, PlaybookValidator),\n (INVALID_PLAYBOOK_ID_PATH, PLAYBOOK_TARGET, False, PlaybookValidator),\n (VALID_INTEGRATION_ID_PATH, INTEGRATION_TARGET, True, IntegrationValidator),\n (INVALID_INTEGRATION_ID_PATH, INTEGRATION_TARGET, False, IntegrationValidator)\n ]\n\n @pytest.mark.parametrize('source, target, answer, validator', INPUTS_IS_ID_EQUALS_NAME)\n def test_is_id_equals_name(self, source, target, answer, validator):\n # type: (str, str, Any, Type[BaseValidator]) -> None\n try:\n copyfile(str(source), target)\n structure = StructureValidator(str(source))\n validator = validator(structure)\n assert validator.is_id_equals_name() is answer\n finally:\n os.remove(target)\n","sub_path":"tests/validators_test.py","file_name":"validators_test.py","file_ext":"py","file_size_in_byte":10261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"563595555","text":"\n\nfrom xai.brain.wordbase.verbs._overrate import _OVERRATE\n\n#calss header\nclass _OVERRATES(_OVERRATE, ):\n\tdef __init__(self,): \n\t\t_OVERRATE.__init__(self)\n\t\tself.name = \"OVERRATES\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"overrate\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_overrates.py","file_name":"_overrates.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"528388980","text":"from flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required, current_identity\nfrom models.item import ItemModel\n\n\nclass Item(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('price', type=float, required=True, help='Price is required')\n parser.add_argument('store_id', type=int, required=True, help='Store_id is required')\n\n @jwt_required()\n def get(self, name):\n try:\n item = ItemModel.get_item(name)\n except:\n return {'message': 'Could not get item, because of database error'}, 500\n\n if item:\n return item.json(), 200\n return {'message': f'Item {name} is not in the database'}, 404\n \n def post(self, name):\n try:\n item = ItemModel.get_item(name)\n except:\n return {'message': 'Could not create item, because of database error'}, 500\n \n if item:\n return {'message': f'Item {name} is already in the database'}, 400\n \n data = Item.parser.parse_args()\n new_item = ItemModel(name, data['price'], data['store_id'])\n\n try:\n new_item.save_item()\n except:\n return {'message': 'Could not save item, because of database error'}, 500\n\n return {'message': f'Item {name} is successfully saved'}, 201\n\n def put(self, name):\n data = Item.parser.parse_args()\n\n try:\n item = ItemModel.get_item(name)\n except:\n return {'message': 'Could not save item, because of database error'}, 500\n\n if item:\n item.price = data['price']\n item.store_id = data['store_id']\n else:\n item = ItemModel(name, data['price'], data['store_id'])\n \n try:\n item.save_item()\n except:\n return {'message': 'Could not save item, because of database error'}, 500\n \n return {'message': f'Item {name} is successfully saved'}, 201 \n\n def delete(self, name):\n try:\n item = ItemModel.get_item(name)\n except:\n return {'message': 'Could not get item, because of database error'}, 500\n\n if item:\n try:\n item.delete_item()\n except:\n return {'message': 'Could not delete item, because of database error'}, 500\n\n return {'message': f'Item {name} is successfully deleted'}, 201\n else:\n return {'message': f'Item {name} is not in database'}, 400\n\n\nclass Items(Resource):\n \n @jwt_required()\n def get(self):\n try:\n items = ItemModel.get_items()\n except:\n return {'message': 'Could not get items, because of database error'}, 500\n\n return {\"items\": [item.json() for item in items]}, 200","sub_path":"section6/resources/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"385828422","text":"'''******************************************************************************\r\n*\r\n* FILE NAME: \r\n* bta_serv_bssparam_bssparameter.py\r\n*\r\n* DESCRIPTION: \r\n* The file/module provides the fetching of BSS parameters for\r\n* BCF, BTS, TRX object by executing the required commands.\r\n* \r\n*\tREVISION HISTORY:\r\n*\r\n* Date Author REASON\r\n* 3th Feb 2005 Def Pronto XYZ Fix\r\n* 5th Jan 2005 Abc First Draft\r\n*\r\n* Copyright 2009, ARICENT\r\n*\r\n******************************************************************************'''\r\n\r\n\r\nfrom xml.dom import minidom\r\nimport time\r\nimport os\r\n\r\n#importing user defined modules\r\nimport bta_util_constants as CONSTANTS\r\nfrom bta_util_xml_parser import obj_xml_parser as xmlprsr\r\n\r\n\r\n\r\n###########################################\r\n# MODULE CONSTANTS #\r\n###########################################\r\nMODULE_NAME = \"bssparameters\"\r\nNULL_EVENT = 0\r\n\r\n'''\r\nXML TAGS USED IN CONFIGURATION XML FILE\r\n'''\r\nTAG_BSS_PARAMETER_SET = \"bss_patameter_set\"\r\nATT_BSC_VERSION = \"bsc_ver\"\r\nATT_COMMAND_ID = \"command_id\"\r\nATT_RESP_PAR_NAME = \"resp_par_name\"\r\nTAG_PARAMETER = \"parameter\"\r\nATT_NAME = \"name\"\r\nTAG_MOC_CLASS = \"moc_class\"\r\nTAG_DESCRIPTION = \"description\"\r\nTAG_DATA_TYPE = \"data_type\"\r\nTAG_DB_TABLE = \"db_table\"\r\n\r\n\r\nTAG_COMMAND = \"command\"\r\nATT_ID = \"id\"\r\nATT_OBJECT_TYPE = \"object_type\"\r\nATT_PAR_MAPPING_LIST = \"par_map_resp_to_cmd\"\r\n\r\nNULL_STRING = \"\"\r\nPARAM_SEPARATOR = \",\"\r\nMAPPING_SEPARATOR = \":\"\r\n\r\nOBJECT_TYPE_BCF = \"BCF\"\r\nOBJECT_TYPE_BTS = \"BTS\"\r\nOBJECT_TYPE_TRX = \"TRX\"\r\n\r\n#tags used in instrument DB format\r\nTAG_BSS_PARAMETERS = \"BSS_PARAMETERS\"\r\nTAG_PAR_NAME = \"par_name\"\r\nTAG_PAR_VALUE = \"par_value\"\r\nTAG_PAR_TYPE = \"par_type\"\r\n\r\n#tags used in DBSchema for table of BTS, TRX, BCF Level parameters\r\nTAG_DYNAMIC_PARAMETERS = \"dynamic_parameters\"\r\nTAG_LOG_TIME_STAMP = \"log_time\"\r\nTAG_BSC_CNUMBER = \"bsc_cnumber\"\r\nTAG_EXECUTION_ID = \"execution_id\"\r\nTAG_MOC_CLASS = \"moc_class\"\r\n'''\r\nClass : clsBSSParamParameter\r\nThis class reads all the values of bss parameter. e.g. parameter data type, description etc.\r\n'''\r\nclass clsBSSParamParameter:\r\n def __init__( self ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * __init__( self )\r\n *\r\n * DESCRIPTION: \r\n * The constructor of the class.\r\n * \r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n '''\r\n self.command_id = None\r\n self.name = CONSTANTS.EMPTY_STRING\r\n self.resp_name = CONSTANTS.EMPTY_STRING\r\n self.moc_class = CONSTANTS.EMPTY_STRING\r\n self.desc = CONSTANTS.EMPTY_STRING\r\n self.data_type = CONSTANTS.EMPTY_STRING \r\n self.db_table = CONSTANTS.EMPTY_STRING\r\n\r\n def readXMLFileParameters( self, xml_node ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * readXMLFileParameters( self, xml_node )\r\n *\r\n * DESCRIPTION: \r\n * The function reads the properties of the parameter node.\r\n * \r\n * INPUT:\r\n * xml_node - The parameter node containing the parameter properties. \r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n '''\r\n try:\r\n if xml_node:\r\n self.command_id = xmlprsr.getAttributeNodeValue( ATT_COMMAND_ID, xml_node, CONSTANTS.TYPE_INT )\r\n self.name = xmlprsr.getAttributeNodeValue( ATT_NAME, xml_node )\r\n self.resp_name = xmlprsr.getAttributeNodeValue( ATT_RESP_PAR_NAME, xml_node )\r\n self.moc_class = xmlprsr.getFirstElementNodeData( TAG_MOC_CLASS, xml_node )\r\n self.desc = xmlprsr.getFirstElementNodeData( TAG_DESCRIPTION, xml_node )\r\n self.data_type = xmlprsr.getFirstElementNodeData( TAG_DATA_TYPE, xml_node )\r\n self.db_table = xmlprsr.getFirstElementNodeData( TAG_DB_TABLE, xml_node )\r\n except Exception as detail:\r\n raise Exception(MODULE_NAME + \".clsBSSParamParameter.readXMLFileParameters, \" + str(detail))\r\n\r\n def printParameters( self ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * printParameters( self )\r\n *\r\n * DESCRIPTION: \r\n * The function prints the properties of the parameter node.\r\n * \r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n '''\r\n print(\"---- Parameter Values -----\")\r\n print(\"MOC : \", self.moc_class)\r\n print(\"Description : \", self.desc)\r\n print(\"Data type : \", self.data_type)\r\n print(\"DB Table : \", self.db_table)\r\n\r\n'''\r\nClass : clsBSSParamParameter\r\nThis class reads all the values of bss parameter. e.g. parameter data type, description etc.\r\n'''\r\nclass clsBSSParamCommand:\r\n def __init__( self ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * __init__( self )\r\n *\r\n * DESCRIPTION: \r\n * The constructor of the class.\r\n * \r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n '''\r\n self.id = None\r\n self.name = CONSTANTS.EMPTY_STRING\r\n self.object_type = CONSTANTS.EMPTY_STRING\r\n self.dict_param_map = {} #a dictionary of mapping of Par_Name (used in BSCReponse parsing) to Par_Name (used in MML command)\r\n\r\n def readXMLFileParameters( self, xml_node ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * readXMLFileParameters( self, xml_node )\r\n *\r\n * DESCRIPTION: \r\n * The function parses the command properties.\r\n * \r\n * INPUT:\r\n * xml_node - The command node\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n ''' \r\n try:\r\n if xml_node:\r\n self.id = xmlprsr.getAttributeNodeValue( ATT_ID, xml_node, CONSTANTS.TYPE_INT )\r\n self.name = xmlprsr.getElementNodeData( xml_node )\r\n self.object_type = xmlprsr.getAttributeNodeValue( ATT_OBJECT_TYPE, xml_node )\r\n par_list = xmlprsr.getAttributeNodeValue( ATT_PAR_MAPPING_LIST, xml_node )\r\n if par_list:\r\n list_params = par_list.split( PARAM_SEPARATOR )\r\n for param_map in list_params:\r\n resp_param_name, cmd_param_name = param_map.split( MAPPING_SEPARATOR )\r\n self.dict_param_map[ resp_param_name ] = cmd_param_name\r\n except Exception as detail:\r\n raise Exception(MODULE_NAME + \".clsBSSParamCommand.readXMLFileParameters, \" + str(detail))\r\n \r\n def printParameters( self ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * printParameters( self )\r\n *\r\n * DESCRIPTION: \r\n * The function prints the command properties.\r\n * \r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n ''' \r\n print(\"---- BSS Command Values -----\")\r\n print(\"ID : \", self.id)\r\n print(\"name : \", self.name)\r\n print(\"object_type : \", self.object_type)\r\n print(\"list_params : \", str(self.dict_param_map))\r\n\r\n'''\r\nClass : clsBSSParamParameterSet\r\nThis class reads the bss parameters from bssparameters.xml file.\r\n'''\r\nclass clsBSSParamParameterSet:\r\n def __init__( self, xml_file_name ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * __init__( self )\r\n *\r\n * DESCRIPTION: \r\n * The constructor of the class.\r\n * \r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n '''\r\n self.file_name = xml_file_name \r\n self.bsc_version = \"\"\r\n self.list_commands = []\r\n self.dict_parameters = {} #dictionary contains the parameter name as key and Object of parameter as Value, {'omu_bit_rate':obj_parameter_class}\r\n\r\n def readXMLFileParameters( self ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * readXMLFileParameters( self )\r\n *\r\n * DESCRIPTION: \r\n * The function parses the file parameters.\r\n * \r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n '''\r\n try:\r\n xml_doc = minidom.parse( self.file_name )\r\n bss_param_set_nodes = xml_doc.getElementsByTagName( TAG_BSS_PARAMETER_SET )\r\n \r\n if bss_param_set_nodes:\r\n self.bsc_version = xmlprsr.getAttributeNodeValue( ATT_BSC_VERSION,\\\r\n bss_param_set_nodes[0] )\r\n \r\n param_node_list = xml_doc.getElementsByTagName( TAG_PARAMETER )\r\n \r\n for param_node in param_node_list:\r\n param_name = xmlprsr.getAttributeNodeValue( ATT_NAME, param_node ) \r\n self.dict_parameters[ param_name ] = clsBSSParamParameter()\r\n self.dict_parameters[ param_name ].readXMLFileParameters( param_node )\r\n\r\n cmd_node_list = xml_doc.getElementsByTagName( TAG_COMMAND )\r\n for cmd_node in cmd_node_list:\r\n obj_command = clsBSSParamCommand()\r\n obj_command.readXMLFileParameters( cmd_node )\r\n self.list_commands.append( obj_command ) \r\n except Exception as detail:\r\n raise Exception(MODULE_NAME +\\\r\n \".clsBSSParamParameterSet.readXMLFileParameters, \" +\\\r\n str(detail))\r\n\r\n def printParameters( self ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * printParameters( self )\r\n *\r\n * DESCRIPTION: \r\n * The function prints the file parameters.\r\n * \r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n '''\r\n print(\"----- BSS Paramater Set -----\")\r\n print(\"BSC Version = \", self.bsc_version)\r\n for param_name, obj_param in self.dict_parameters.items():\r\n print(\"Parameter Name : \", param_name) \r\n obj_param.printParameters()\r\n for obj_command in self.list_commands:\r\n print(obj_command.printParameters())\r\n\r\n'''\r\nClass : clsBSSParamParameterSetController\r\nThis class sends the command to BSC and parse the response. Also populates the BSS Parameters into their respective tables.\r\n'''\r\nclass clsBSSParamParameterSetController:\r\n def __init__( self, xml_file_name, bsc_if_handler, xml_creator=None,\\\r\n db_handler=None, logger=None ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * __init__( self, xml_file_name, bsc_if_handler, xml_creator=None,\\\r\n * db_handler=None, logger=None )\r\n *\r\n * DESCRIPTION: \r\n * The constructor of the class.\r\n * \r\n * INPUT:\r\n * xml_file_name - The name of the clsBSSParamParameters.xml fle.\r\n * bsc_if_handler - The object of bscfunctions.BSCCommunicator class.\r\n * xml_creator - The xml creator for creating xml string.\r\n * db_handler - The db_handler for logging the query.\r\n * logger - The logger instance to log into log file.\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n ''' \r\n self.bsc_if_handler = bsc_if_handler \r\n self.obj_bss_param_set = None\r\n self.dict_populate_params = {} #{MOC : [table_fields_values, table_fields_values]}\r\n self.dict_cmd_run_with_params = {} #{Command ID : [{bts_id:10, trx_id:1}, {bts_id:10, trx_id:2}]}\r\n self.dict_command_response = {} #{Command ID : [Parsed_Response, Parsed_Response]}\r\n self.xml_creator = xml_creator\r\n self.db_handler = db_handler\r\n self.execution_id = 0\r\n self.logger = logger\r\n self.file_name = xml_file_name\r\n\r\n def readXMLFileParameters( self ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * readXMLFileParameters( self )\r\n *\r\n * DESCRIPTION: \r\n * This function reads all the bss parameters from their respective xml file\r\n * \r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n '''\r\n try:\r\n self.obj_bss_param_set = clsBSSParamParameterSet( self.file_name )\r\n self.obj_bss_param_set.readXMLFileParameters()\r\n except Exception as detail:\r\n raise Exception(MODULE_NAME +\\\r\n \".clsBSSParamParameterSetController.readXMLFileParameters, \"\\\r\n + str( detail ))\r\n\r\n def resetToDefaults( self ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * resetToDefaults( self )\r\n *\r\n * DESCRIPTION: \r\n * This function restores the class variable to default.\r\n * \r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n '''\r\n self.dict_populate_params = {}\r\n self.dict_cmd_run_with_params = {}\r\n self.dict_command_response = {}\r\n \r\n def runBSSParameterCommands( self, execution_id, obj_bts_site ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * runBSSParameterCommands( self, execution_id, obj_bts_site )\r\n *\r\n * DESCRIPTION: \r\n * This function executes all the commands defined in bssparameters.xml file.\r\n * \r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n ''' \r\n try:\r\n self.execution_id = execution_id\r\n self.resetToDefaults()\r\n dict_cmd_params = {}\r\n for obj_command in self.obj_bss_param_set.list_commands: #iterate through all the commands defined in bssparameters.xml\r\n if obj_command.object_type == OBJECT_TYPE_BCF:\r\n dict_cmd_params = self.getCommandParametersDict( obj_command.dict_param_map,\\\r\n obj_bts_site.bcf_object)\r\n self.sendMMLAndParseResponse(obj_command, dict_cmd_params, obj_bts_site) #send command to BSC\r\n elif obj_command.object_type == OBJECT_TYPE_BTS:\r\n for sector_index in range(len(obj_bts_site.bts_list)):\r\n dict_params = obj_bts_site.bts_list[sector_index]\r\n dict_cmd_params = self.getCommandParametersDict( obj_command.dict_param_map,\\\r\n dict_params )\r\n \r\n dict_cmd_params[CONSTANTS.BTS_ID_KEY] = dict_params[CONSTANTS.BTS_ID_KEY]\r\n dict_cmd_params[CONSTANTS.PAR_NAME_SECTOR_INDEX] = sector_index + 1\r\n \r\n self.sendMMLAndParseResponse(obj_command, dict_cmd_params, obj_bts_site)\r\n elif obj_command.object_type == OBJECT_TYPE_TRX:\r\n for sector_index in range(len(obj_bts_site.bts_list)):\r\n bts_id = obj_bts_site.bts_list[sector_index][CONSTANTS.BTS_ID_KEY]\r\n trx_index = 1\r\n for dict_params in obj_bts_site.trx_list: \r\n if dict_params[CONSTANTS.BTS_ID_KEY] == bts_id:\r\n dict_cmd_params = self.getCommandParametersDict(obj_command.dict_param_map, dict_params)\r\n \r\n dict_cmd_params[CONSTANTS.BTS_ID_KEY] = bts_id\r\n dict_cmd_params[CONSTANTS.PAR_NAME_SECTOR_INDEX] = sector_index + 1\r\n dict_cmd_params[CONSTANTS.TRX_ID_KEY] = dict_params[CONSTANTS.TRX_ID_KEY]\r\n dict_cmd_params[CONSTANTS.PAR_NAME_TRX_INDEX] = trx_index\r\n \r\n self.sendMMLAndParseResponse(obj_command, dict_cmd_params, obj_bts_site)\r\n trx_index = trx_index + 1\r\n except (Exception, Exception) as detail:\r\n error_detail = MODULE_NAME + \".clsBSSParamParameterSetController.runBSSParameterCommands, \" + str(detail)\r\n\r\n def sendMMLAndParseResponse( self, obj_command, dict_cmd_params, obj_bts_site ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * sendMMLAndParseResponse( self, obj_command, dict_cmd_params, obj_bts_site )\r\n *\r\n * DESCRIPTION: \r\n * This function sends the command to BSC and reads the response.\r\n * \r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n '''\r\n try:\r\n action_handle = None\r\n list_parsed_response_object = []\r\n dict_cmd_params[CONSTANTS.BCF_ID_KEY] = obj_bts_site.bcf_object[CONSTANTS.BCF_ID_KEY]\r\n dict_cmd_params[CONSTANTS.PAR_NAME_SITE_INDEX] = self.bsc_if_handler.ta_config.site_index \r\n \r\n self.bsc_if_handler.sendCommand(action_handle, obj_command.name, dict_cmd_params, NULL_EVENT)\r\n if self.bsc_if_handler.bsc_command_response: \r\n self.bsc_if_handler.obj_parser.parseBSCResponse(self.bsc_if_handler.bsc_command_response)\r\n list_parsed_response_object = self.bsc_if_handler.obj_parser.getAllCommandObjects(self.bsc_if_handler.bsc_cmd_cnt.getCommandByKey(obj_command.name).id)\r\n \r\n if not (obj_command.id in self.dict_command_response):\r\n self.dict_command_response[obj_command.id] = []\r\n self.dict_cmd_run_with_params[obj_command.id] = []\r\n\r\n self.dict_command_response[obj_command.id].append(list_parsed_response_object)\r\n self.dict_cmd_run_with_params[obj_command.id].append(dict_cmd_params)\r\n except (Exception, Exception) as detail:\r\n error_detail = MODULE_NAME + \".clsBSSParamParameterSetController.sendMMLAndParseResponse, \" + str(detail)\r\n\r\n def traverseAllBSSParameters( self ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * traverseAllBSSParameters( self )\r\n *\r\n * DESCRIPTION: \r\n * This function checks for all the bss pramaters.\r\n * \r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n '''\r\n try:\r\n dict_bss_param_xml = {}\r\n for par_name, obj_param in self.obj_bss_param_set.dict_parameters.items():\r\n list_obj_parsed_resp = self.dict_command_response[obj_param.command_id]\r\n for parsed_resp_index in range(len(list_obj_parsed_resp)):\r\n obj_parsed_resp = list_obj_parsed_resp[parsed_resp_index]\r\n for parsed_object in obj_parsed_resp:\r\n for dict_params in parsed_object.objects:\r\n #print dict_params\r\n if obj_param.resp_name in dict_params:\r\n dict_cmd_run_with_param = self.dict_cmd_run_with_params[obj_param.command_id][parsed_resp_index]\r\n \r\n self.makeBSSParametersDict(dict_cmd_run_with_param, obj_param, dict_params[obj_param.resp_name])\r\n break\r\n except (Exception, Exception) as detail:\r\n error_detail = MODULE_NAME + \".clsBSSParamParameterSetController.traverseAllBSSParameters, \" + str(detail)\r\n\r\n def makeBSSParametersDict( self, dict_cmd_run_with_param, obj_param, par_value ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * makeBSSParametersDict( self, dict_cmd_run_with_param, obj_param, par_value )\r\n *\r\n * DESCRIPTION: \r\n * This function makes the BSS params dict on the basis of theri MOC.\r\n * \r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n '''\r\n try:\r\n par_moc = obj_param.moc_class\r\n if par_moc in self.dict_populate_params:\r\n for i in range(len(self.dict_populate_params[par_moc])):\r\n dict_pop_params = self.dict_populate_params[par_moc][i]\r\n for run_with_par_name, param_value in dict_cmd_run_with_param.items():\r\n if run_with_par_name in dict_pop_params and dict_pop_params[run_with_par_name] == param_value:\r\n match = 1\r\n else:\r\n match = 0\r\n break\r\n if match == 1:\r\n match_index = i\r\n break \r\n if match == 1:\r\n self.dict_populate_params[par_moc][i][TAG_DYNAMIC_PARAMETERS].append(self.getParameterDict(obj_param.name, par_value))\r\n else:\r\n dict_pop_params = self.addDictOfPopulateParameters(dict_cmd_run_with_param, obj_param.name, par_value)\r\n dict_pop_params[TAG_DB_TABLE] = obj_param.db_table\r\n self.dict_populate_params[par_moc].append(dict_pop_params)\r\n else:\r\n self.dict_populate_params[par_moc] = []\r\n dict_pop_params = self.addDictOfPopulateParameters(dict_cmd_run_with_param, obj_param.name, par_value)\r\n dict_pop_params[TAG_DB_TABLE] = obj_param.db_table\r\n self.dict_populate_params[par_moc].append(dict_pop_params)\r\n except (Exception, Exception) as detail:\r\n error_detail = MODULE_NAME + \".clsBSSParamParameterSetController.makeBSSParametersDict, \" + str(detail)\r\n\r\n def addDictOfPopulateParameters( self, dict_cmd_run_with_param, par_name, par_value ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * addDictOfPopulateParameters( self, dict_cmd_run_with_param, par_name, par_value )\r\n *\r\n * DESCRIPTION: \r\n * This function adds the populated parameters dict.\r\n * \r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n '''\r\n try:\r\n dict_pop_param = dict_cmd_run_with_param.copy()\r\n dict_pop_param[TAG_DYNAMIC_PARAMETERS] = []\r\n dict_pop_param[TAG_DYNAMIC_PARAMETERS].append(self.getParameterDict(par_name, par_value))\r\n return dict_pop_param\r\n except (Exception, Exception) as detail:\r\n error_detail = MODULE_NAME + \".clsBSSParamParameterSetController.addDictOfPopulateParameters, \" + str(detail)\r\n\r\n def makeSQLForAllBSSParameters( self ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * makeSQLForAllBSSParameters( self )\r\n *\r\n * DESCRIPTION: \r\n * This function makes the SQL queries for BSS params.\r\n * \r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n '''\r\n try: \r\n log_time = time.strftime( CONSTANTS.DATE_TIME_FORMAT )\r\n if self.db_handler and self.db_handler.sql_query_creator and self.xml_creator: \r\n dict_db_table_params = {}\r\n for moc_class, list_pop_params in self.dict_populate_params.items(): \r\n for dict_params in list_pop_params:\r\n dict_params[TAG_MOC_CLASS] = moc_class\r\n dict_params[TAG_DYNAMIC_PARAMETERS] = self.xml_creator.getXMLNodeStringFromList(TAG_BSS_PARAMETERS, dict_params[TAG_DYNAMIC_PARAMETERS])\r\n \r\n dict_params[TAG_BSC_CNUMBER] = str(self.bsc_if_handler.ta_config.dict_obj_bts_site_config[self.bsc_if_handler.ta_config.site_index].obj_bsc_config.bsc_cnum)\r\n dict_params[TAG_LOG_TIME_STAMP] = log_time\r\n dict_params[TAG_EXECUTION_ID] = self.execution_id\r\n \r\n sql_query = self.db_handler.sql_query_creator.getSQLCommandAddToTable(dict_params[TAG_DB_TABLE], dict_params)\r\n #self.db_handler.executeQuery(CONSTANTS.DB_TYPE_EXECUTION, str(sql_query), CONSTANTS.DB_OPERATION_INSERT)\r\n self.db_handler.writeQueryIntoFile(CONSTANTS.DB_TYPE_EXECUTION, str(sql_query), CONSTANTS.DB_OPERATION_INSERT)\r\n except (Exception, Exception) as detail:\r\n error_detail = MODULE_NAME + \".clsBSSParamParameterSetController.makeSQLForAllBSSParameters, \" + str(detail) \r\n\r\n def getParameterDict( self, par_name, par_value ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * getParameterDict( self, par_name, par_value )\r\n *\r\n * DESCRIPTION: \r\n * This function returns the parameter dict.\r\n * \r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n '''\r\n try:\r\n param_dict = {}\r\n param_dict[TAG_PAR_NAME] = par_name \r\n if type(par_value) == type(0):\r\n param_dict[TAG_PAR_TYPE] = 'int'\r\n else:\r\n param_dict[TAG_PAR_TYPE] = 'string'\r\n param_dict[TAG_PAR_VALUE] = str(par_value)\r\n \r\n return param_dict\r\n except (Exception, Exception) as detail:\r\n error_detail = MODULE_NAME +\\\r\n \".clsBSSParamParameterSetController.getParameterDict, \" +\\\r\n str(detail) \r\n \r\n def getCommandParametersDict( self, dict_param_map, dict_params ):\r\n \r\n '''\r\n * FUNCTION NAME: \r\n * getCommandParametersDict(s elf, dict_param_map, dict_params )\r\n *\r\n * DESCRIPTION: \r\n * This function returns the parameter dict on the basis of\r\n * command parameter name and the parameter name in command\r\n * response.\r\n * \r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n * NOTES: \r\n * \r\n '''\r\n try:\r\n dict_cmd_params = {}\r\n for resp_par_name, cmd_par_name in dict_param_map.items():\r\n if resp_par_name in dict_params:\r\n dict_cmd_params[cmd_par_name] = dict_params[resp_par_name]\r\n return dict_cmd_params\r\n except (Exception, Exception) as detail:\r\n error_detail = MODULE_NAME + \".clsBSSParamParameterSetController.getCommandParametersDict, \" + str(detail)\r\n raise Exception(error_detail)\r\n","sub_path":"kk_ADTRAN/Execution_Framework1_build25/Execution_Framework1_build25/bta_serv_bssparam_bssparameters.py","file_name":"bta_serv_bssparam_bssparameters.py","file_ext":"py","file_size_in_byte":28121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"573205146","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def reverseBetween(self, head, m, n):\n \"\"\"\n :type head: ListNode\n :type m: int\n :type n: int\n :rtype: ListNode\n \"\"\"\n # start coding at 12:40\n if head == None or head.next == None or n - m < 1: return head;\n dummy = ListNode(0);\n dummy.next = head;\n left = dummy;\n i = 1;\n while i < m:\n left = left.next;\n i += 1;\n right = left.next;\n \n temp = right.next;\n before = right;\n while i < n:\n before.next = temp.next;\n left.next = temp;\n temp.next = right;\n right = temp;\n temp = before.next;\n i += 1;\n return dummy.next;\n # submit at 12:54\n # pass at 12:58","sub_path":"1-100/91-100/python/92_reverse_linked_lise_2.py","file_name":"92_reverse_linked_lise_2.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"244431876","text":"# 宏操作\nfrom flask import Flask,render_template\napp = Flask(__name__)\n\n\nclass MyItem:\n def __init__(self,id,name):\n self.id = id\n self.name = name\n@app.route('/')\ndef index():\n return render_template('macro.txt',\n items1=[MyItem(100,'Hello'),\n {'id':2,'name':'John'},\n {'id':3,'name':'Mary'}],\n items2=[MyItem(200,'World'),\n MyItem(400,'New')\n ],\n items3=(MyItem(800,'123'),\n MyItem(1600,'Horse')\n ))\nif __name__ == '__main__':\n app.run(host = '0.0.0.0', port='1234')","sub_path":"src/Jinja2_Template/demo06.py","file_name":"demo06.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"403118608","text":"\n# imports arq requirements.txt\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport plotly.graph_objs as go\nimport plotly.offline as py\nimport seaborn as sns\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.naive_bayes import GaussianNB\n\n\n# estilo de visualizacao dos dados plotados\nplt.style.use('ggplot') \n\n\n# diretorio base\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n#print('BASE_DIR', BASE_DIR)\n\n# join para BASE_DIR com diretorio filho\nDATA_DIR = os.path.join(BASE_DIR, 'data')\n#print('DATA_DIR', DATA_DIR)\n\n# list compreensions do dataset\nfile_names = [i for i in os.listdir(DATA_DIR) if i.endswith('.csv')]\n#print('FILE_NAME', file_names)\n\n# df = dataframe - file_names\nfor i in file_names:\n df = pd.read_csv(os.path.join(DATA_DIR, i))\n\n# tratamento dos dados e mapeamento status\n# Remanejamento = 1\n# Entrega Realizada = 2\nmap_data_status = {'R': 1, 'ER': 2}\ndf['STATUS'] = df['STATUS'].map(map_data_status)\nprint('Alteracao de valores categóricos: \\n', df.head(50))\n\n# tratamento dos dados da regiao\n##map_data_region = {'NORTE': 1, 'NORDESTE': 2, 'CENTRO-OESTE': 3, 'SUDESTE': 4, 'SUL': 5,\n## 'NORTE ': 1, 'NORDESTE ': 2, 'CENTRO-OESTE ': 3, 'SUDESTE ': 4, 'SUL ': 5}\n##df['REGIAO'] = df['REGIAO'].map(map_data_region)\n##print('Alteracao de valores categóricos: \\n', df.head(50))\n\n# tratamento dos dados do pragama saude\n##map_data_progsaude = {'COVID-19': 1, 'INFLUENZA': 2}\n##df['PROGSAUDE'] = df['PROGSAUDE'].map(map_data_progsaude)\n##print('Alteracao de valores categóricos: \\n', df.head(50))\n\n# tratamento dos dados do item\n##map_data_item = {'DIFOSFATO DE CLOROQUINA 150MG': 1, 'DIFOSFATO DE CLOROQUINA 150MG ': 1, \n## 'FOSFATO DE OSELTAMIVIR 30MG': 2, 'FOSFATO DE OSELTAMIVIR 30MG ': 2, \n## 'FOSFATO DE OSELTAMIVIR 45MG': 3, 'FOSFATO DE OSELTAMIVIR 45MG ': 3,\n## 'FOSFATO DE OSELTAMIVIR 75MG': 4, 'FOSFATO DE OSELTAMIVIR 75MG ': 4,\n## 'HIDROXICLOROQUINA 200MG': 5, 'HIDROXICLOROQUINA 200MG ': 5}\n##df['ITEM'] = df['ITEM'].map(map_data_item)\n##print('Alteracao de valores categóricos: \\n', df.head(50))\n\n# num e pandas\ndef ver_amostras_das_classes_status():\n sample_1 = np.where(df.loc[df['STATUS'] == 1])\n sample_2 = np.where(df.loc[df['STATUS'] == 2])\n print('\\nAmostra da classe 1 - Remanejamento: ', sample_1)\n print('\\nAmostra da classe 2 - Entrega Realizada: ', sample_2)\n\n# num e pandas\ndef ver_amostras_das_classes_regiao():\n sample_1 = np.where(df.loc[df['REGIAO'] == 1])\n sample_2 = np.where(df.loc[df['REGIAO'] == 2])\n sample_3 = np.where(df.loc[df['REGIAO'] == 3])\n sample_4 = np.where(df.loc[df['REGIAO'] == 4])\n sample_5 = np.where(df.loc[df['REGIAO'] == 5])\n print('\\nAmostra da classe 1 - Norte: ', sample_1)\n print('\\nAmostra da classe 2 - Nordeste: ', sample_2)\n print('\\nAmostra da classe 3 - Centro-Oeste: ', sample_3)\n print('\\nAmostra da classe 4 - Sudeste: ', sample_4)\n print('\\nAmostra da classe 5 - Sul: ', sample_5)\n\n#nume pandas\ndef ver_amostras_das_classes_progsaude():\n sample_1 = np.where(df.loc[df['PROGSAUDE'] == 1])\n sample_2 = np.where(df.loc[df['PROGSAUDE'] == 2])\n print('\\nAmostra da classe 1 - COVID-19: ', sample_1)\n print('\\nAmostra da classe 2 - INFLUENZA: ', sample_2)\n\n# qtde de amostras por classe status\ndef ver_qtde_amostras_por_classe_status():\n vl_remanejamento = len(df.loc[df['STATUS'] == 1])\n vl_entrega_realizada = len(df.loc[df['STATUS'] == 2])\n vl_remanejamento_malaria = len(df.loc[df['STATUS'] == 3])\n print('\\nAmostra da classe 1 - Remanejamento: ', vl_remanejamento)\n print('\\nAmostra da classe 2 - Entrega Realizada: ', vl_entrega_realizada)\n\n# qtde das amostras por classe regiao\ndef ver_qtde_amostras_por_classe_regiao():\n vl_norte = len(df.loc[df['REGIAO'] == 1])\n vl_nordeste = len(df.loc[df['REGIAO'] == 2])\n vl_centro_oeste = len(df.loc[df['REGIAO'] == 3])\n vl_sudeste = len(df.loc[df['REGIAO'] == 4])\n vl_sul = len(df.loc[df['REGIAO'] == 5])\n print('\\nAmostra da classe 1 - Norte: ', vl_norte)\n print('\\nAmostra da classe 2 - Nordeste: ', vl_nordeste)\n print('\\nAmostra da classe 3 - Centro-Oeste: ', vl_centro_oeste)\n print('\\nAmostra da classe 4 - Sudeste: ', vl_sudeste)\n print('\\nAmostra da classe 5 - Sul: ', vl_sul)\n\n# qtde das amostras por classe programa saude\ndef ver_qtde_amostras_por_classe_progsaude():\n vl_covid = len(df.loc[df['REGIAO'] == 1])\n vl_influenza = len(df.loc[df['REGIAO'] == 2])\n print('\\nAmostra da classe 1 - COVID-19: ', vl_covid)\n print('\\nAmostra da classe 2 - INFLUENZA: ', vl_influenza)\n\n\n# conjunto de dados\ndt_feature = df.iloc[:, :-1]\ndt_target = df.iloc[:,-1]\n\nprint('DT_FEATURE: ', dt_feature)\nprint('DT_TARGET: ', dt_target)\n\n\n# plotando os dados histograma de classes\ndef plot_hist():\n plt.hist(df.iloc[:,0], color='b', width=.1)\n plt.xlabel('Qtde. Amostras por Região')\n plt.ylabel('Hist da Classe')\n plt.show()\n\n# histograma web offline\ndef target_count():\n trace = go.Bar(x = df['PROGSAUDE'].value_counts().values.tolist(),\n y = [1, 2],\n orientation = 'v',\n text = df['PROGSAUDE'].value_counts().values.tolist(),\n textfont = dict(size=15),\n textposition = 'auto',\n opacity = 0.8, marker=dict(color=['lightskyblue', 'gold'],\n line=dict(color='#000000', width=1.5)))\n layout = dict(title='resultado')\n fig = dict(data=[trace], layout = layout)\n py.iplot(fig)\n\n\n# analise de correlacao\ndef correlation(size=5):\n corr = df.corr()\n fig, ax = plt.subplots(figsize=(size, size))\n ax.matshow(corr)\n plt.xticks(range(len(corr.columns)), corr.columns)\n plt.yticks(range(len(corr.columns)), corr.columns)\n plt.show()\n\n\n# bloxsplot\ndef bloxplot():\n f, ax = plt.subplots(figsize=(11, 5))\n ax.set_facecolor('#fafafa')\n ax.set(xlim=(-0.5, 200))\n plt.ylabel('quantidade')\n plt.title('Distribuição dos Medicamentos')\n ax = sns.boxplot(data=df['QTDE'], orient='v', palette='Set2')\n plt.show()\n\n\n\n# lista de armazenamento de acuracia\naccuracy_PC = []\n\n# vetor beisiano Naive Bayes\naccuracy_NB = []\n\ndef split_model():\n for i in range(5):\n x_train, x_test, y_train, y_test = train_test_split(dt_feature, dt_target, test_size=0.3, random_state=i)\n print('Divisão do conjunto de dados\\n')\n print('X_train: %d\\ny_train: %d\\nX_test: %d\\ny_test: %d\\n' %(len(x_train), len(y_train), len(x_test), len(y_test)))\n print('Quantidade de amostras da classe 1: ', len(y_train.loc[y_train == 1]))\n print('Quantidade de amostras da classe 2: ', len(y_train.loc[y_train == 2]))\n\n # Perceptron\n percep = Perceptron()\n percep.fit(x_train, y_train)\n percep.predictions = percep.predict(x_test)\n acc_percep = percep.score(x_test, y_test)\n\n # Naive Bayes\n gnb = GaussianNB() #criado o classificador\n gnb.fit(x_train, y_train) # treinar o classificador\n gnb.predictions = gnb.predict(x_test) #testar o classificador com o conjunto de test\n acc_nb = gnb.score(x_test, y_test) # apresentar o resultado\n\n # Accuracy\n accuracy_PC.append(acc_percep)\n accuracy_NB.append(acc_nb)\n\n print('\\n Resultados Perceptron: \\n Acc_Perceptron: ', acc_percep)\n print('\\n Resultados NB: \\n Acc_Perceptron: ', acc_nb)\n print(metrics.confusion_matrix(y_test, percep.predictions))\n print('\\n Classificacao: \\n', metrics.classification_report(y_test, percep.predictions))\n\n print('\\n Vetor de accuracy Peceptron: ', accuracy_PC)\n print('\\n Vetor de accuracy NB: ', accuracy_NB)\n\n median = np.mean(accuracy_PC)\n print('Vetor accuracy_PC - Media: ', median)\n\n\n# chamada das funcoes\n\n##ver_amostras_das_classes_status()\n##ver_amostras_das_classes_regiao()\n##ver_qtde_amostras_por_classe_status()\n##ver_qtde_amostras_por_classe_regiao()\n##ver_amostras_das_classes_progsaude()\n##ver_qtde_amostras_por_classe_progsaude()\n\n\n\n# chamadas dos graficos plot\n\n##plot_hist()\n##target_count()\n##correlation()\n##bloxplot()\n\n\n# chamada ML\n\nsplit_model()","sub_path":"src/medicamentos.py","file_name":"medicamentos.py","file_ext":"py","file_size_in_byte":8354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"630215696","text":"#!/usr/bin/env python\n\n\"\"\"\nTo run conv_data.py, please run:\n\nroslaunch neato_node bringup.launch host:=HOST_NAME\n\"\"\"\n\nfrom geometry_msgs.msg import Twist, Vector3\nfrom sensor_msgs.msg import LaserScan\nfrom neato_node.msg import Bump\nimport rospy\nfrom cmdVelPublisher import CmdVelPublisher\nfrom imageSubscriber import ImageSubscriber\nimport cv2\nimport tty\nimport select\nimport sys\nimport termios\n\n\"\"\"\nCompRobo Spring 2017\n\nThis script is a test script meant to collect image data for the convNet as training.\n\"\"\"\n\nclass Control_Robot(CmdVelPublisher, ImageSubscriber, object):\n \"\"\"\n A class for systematically collecting data on various scenarios while following a line\n \"\"\"\n\n def __init__(self):\n # init ROS node\n rospy.init_node('my_teleop')\n\n #super call to parent classes\n super(Control_Robot, self).__init__()\n\n #define states\n self.state = {'i':self.forward,\n 'j':self.leftTurn,\n 'l':self.rightTurn,\n 'k':self.stopState}\n\n # get key interupt things\n self.settings = termios.tcgetattr(sys.stdin)\n self.key = None\n\n # visualization purposes\n cv2.namedWindow('raw_image')\n\n\n def onKeypress(self):\n \"\"\"\n moves the robot based on keypress\n \"\"\"\n try:\n self.state[self.key].__call__()\n except:\n # on any other keypress, stop the robot\n self.state['k'].__call__()\n\n self.sendMessage()\n rospy.sleep(.25) # use desired action for .25 second\n self.state['k'].__call__() # set robot to stop for .25 second\n self.sendMessage()\n rospy.sleep(.25)\n\n\n def getKey(self):\n \"\"\"\n Interupt (I think) that get a non interrupting keypress\n \"\"\"\n tty.setraw(sys.stdin.fileno())\n select.select([sys.stdin], [], [], 0)\n self.key = sys.stdin.read(1)\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)\n\n\n def imageView(self):\n \"\"\"\n Visualize what the robot sees\n \"\"\"\n cv2.imshow('raw_image', self.cv_image)\n cv2.waitKey(1)\n cv2.imshow('video_window', self.binary_image)\n cv2.waitKey(1)\n\n\n def writeImages(self, cv_image, binary_image, time, category):\n \"\"\"\n Writes images into directory for storage\n \"\"\"\n # directory = 'test'\n directory = 'train'\n cv2.imwrite('data/{}/binary/{}/{}.png'.format(directory, category, time), binary_image)\n cv2.imwrite('data/{}/color/{}/{}.png'.format(directory, category, time), cv_image)\n\n\n def writeImagesMirror(self, cv_image, binary_image, time, category):\n \"\"\"\n Doubles image database by mirroring each received image and storing it\n \"\"\"\n # directory = 'test'\n directory = 'train'\n cv2.imwrite('data/{}/binary/{}/{}flip.png'.format(directory, category, time), cv2.flip(binary_image, 1))\n cv2.imwrite('data/{}/color/{}/{}flip.png'.format(directory, category, time), cv2.flip(cv_image, 1))\n\n\n def imageSave(self):\n \"\"\"\n Stores image with appropriate labels\n \"\"\"\n cv_image = self.cv_image\n binary_image = self.binary_image\n time = rospy.Time.now()\n\n if self.key == 'i':\n self.writeImages(cv_image, binary_image, time, 'forward')\n self.writeImagesMirror(cv_image, binary_image, time, 'forward')\n elif self.key == 'j':\n self.writeImages(cv_image, binary_image, time, 'left')\n self.writeImagesMirror(cv_image, binary_image, time, 'right')\n elif self.key == 'l':\n self.writeImages(cv_image, binary_image, time, 'right')\n self.writeImagesMirror(cv_image, binary_image, time, 'left')\n\n\n def run(self):\n \"\"\"\n Main loop\n \"\"\"\n while self.key != '\\x03':\n # continually loops through the 4 steps to find an image, save it, move, and see it\n self.getKey()\n self.imageSave()\n self.onKeypress()\n self.imageView()\n\n\nif __name__ == \"__main__\":\n #Initializes Control_Robot class and runs it\n control = Control_Robot()\n control.run()\n","sub_path":"src/scripts/test/conv_data.py","file_name":"conv_data.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"80370400","text":"import datetime\nimport sys\n\nsequence = [3, 7, 21]\n\ndef create_reminders(memo):\n\twith open('reminders.txt', 'a') as fp:\n\t\tfor day in sequence:\n\t\t\tdelta = datetime.timedelta(days=day)\n\t\t\tnextdate = datetime.date.today() + delta\n\t\t\tfp.write('{}:{}\\n'.format(nextdate.strftime('%Y %m %d'), memo))\n\nif __name__ == '__main__':\n\tprint('Welcome to the reviewer. Please enter any notes you would like to be reminded of. Type \"q\" to quit.')\n\n\twhile True:\n\t\tcommand = input()\n\n\t\tif command == 'q':\n\t\t\tbreak\n\n\t\tcreate_reminders(command)","sub_path":"remind.py","file_name":"remind.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"67711863","text":"def selectionsort(listtosort):\n \"\"\"sorts alist iteratively and in-place\"\"\"\n for starting_index in range(len(listtosort)):\n min_elem_index = index_of_min(listtosort, starting_index)\n swap(listtosort, starting_index, min_elem_index)\n return listtosort\n\n\n# And here is index_of_min!:\ndef index_of_min(alist, start_index):\n \"\"\"returns the index if the min element at or after start_index\"\"\"\n min_elem_index = start_index\n for i in range(start_index, len(alist)):\n if alist[i] < alist[min_elem_index]:\n min_elem_index = i\n return min_elem_index\n\n\n# And swap:\ndef swap(alist, i, j):\n \"\"\"swap the values of a alist[i] and alist[j]\"\"\"\n temp = alist[i] # store the value of alist[i] for a moment\n alist[i] = alist[j] # make alist[i] refer to the value of alist[j]\n alist[j] = temp # make alist[j] refer to the value of stored value\n\nlisttosort = [2, 5, 4]\nartist =['Maroon 5', 'Adele', 'Lady Gaga']\n\nprint(listtosort)\nprint(artist)\nselectionsort(listtosort)\nselectionsort(artist)\nprint(listtosort)\nprint(artist)\n\n\ndef standardizeall(storeedprefs):\n \"\"\"Return a new list of a lists of stored user preferences,\n With each artist string in Title Case,\n With leading and trailling whitespace removed.\n \"\"\"\n standardstoredprefs = []\n for storeduser in storeedprefs:\n standardstoreduser = []\n for artist in storeduser:\n standardstoreduser.append(artist.strip().title())\n standardstoredprefs.append(standardstoreduser)\n return standardstoredprefs\n\nprint (standardizeall([['adele', 'laDy GAGA'], ['maROON 5']]))\n","sub_path":"Python/CSforAll/CSforALL_ex5.py","file_name":"CSforALL_ex5.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"609903735","text":"from sdfbuilder import Element, Link\nfrom sdfbuilder.sensor import Sensor as SdfSensor\n\n\nclass Sensor(Element):\n \"\"\"\n Plugin sensor base class. This is used to communicate sensor\n configuration through the SDF plugin to the model controller\n in Gazebo.\n \"\"\"\n # SDF tag name, should not be changed in subclass\n TAG_NAME = 'rv:sensor'\n\n def __init__(self, part_id, link, sensor, sensor_type=None):\n \"\"\"\n :param link:\n :type link: Link\n :param part_id: ID of the part this sensor belongs to, required to identify\n the corresponding input neuron(s).\n :type part_id: str\n :param sensor: SDF element for this sensor\n :type sensor: SdfSensor\n :param sensor_type: Type of the sensor. Defaults to the type of the given SDF sensor,\n but it can be overridden to communicate loading a different sensor\n handler.\n :type sensor_type: str\n :return:\n \"\"\"\n super(Sensor, self).__init__()\n self.link = link\n self.type = sensor_type if sensor_type is not None else sensor.type\n self.sensor = sensor\n self.part_id = part_id\n\n def render_attributes(self):\n \"\"\"\n Adds default sensor attributes before render.\n \"\"\"\n attrs = super(Sensor, self).render_attributes()\n attrs.update({\n 'link': self.link.name,\n 'sensor': self.sensor.name,\n 'part_id': self.part_id,\n 'id': '%s__%s' % (self.part_id, self.sensor.name),\n 'type': self.type\n })\n\n return attrs\n","sub_path":"revolve/build/sdf/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"296372289","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 5 19:49:34 2017\n\n@author: AB053658\n\"\"\"\nimport re\npagelist= ['共50页: ', '上一页', '7', '8', '9', '10', '11', '下一页']\npattern=re.compile('共(.*?)页',re.S)\nnumber=re.search(pattern,pagelist[0]).group(1)\nprint(type(number))","sub_path":"pagelist.py","file_name":"pagelist.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"263290361","text":"# coding=utf-8\nimport functools\nfrom bson import ObjectId\nfrom bson.errors import InvalidId\nfrom marshmallow import fields, validate, Schema\nfrom webargs import argmap2schema\nimport aiohttp.web\n\nfrom asynclib.http.error import BaseError\n\nfields = fields\nvalidate = validate\nSchema = Schema\n\n\nclass ErrorBodySchema(Schema):\n description = fields.String()\n code = fields.String()\n body = fields.Dict()\n\n\nasync def error_middleware(app, handler):\n\n async def middleware_handler(request):\n try:\n response = await handler(request)\n return response\n except BaseError as error:\n body = ErrorBodySchema().dumps(error.map()).data\n return aiohttp.web.Response(body=body, status=error.status, content_type='application/json')\n\n return middleware_handler\n\n\ndef dump(schema, data):\n body = schema().dumps(data).data\n return aiohttp.web.json_response(body=body, status=200, content_type='application/json')\n\n\nclass Nested(fields.Nested):\n \"\"\"\n Кастомное поле Nested, для использования с множественными схемами\n Если указан callback - nested должен быть словарем\n callback принимает в качестве аргумента результат и возвращает ключ словаря nested (какую схему использовать)\n \"\"\"\n\n def __init__(self, nested, callback=None, *args, **kwargs):\n self.callback = None\n if callback is not None:\n self.callback = callback\n nested = {a: self.__argmap2schema(b) for a, b in nested.items()}\n self._nested = nested\n else:\n nested = self.__argmap2schema(nested)\n super(Nested, self).__init__(nested, *args, **kwargs)\n\n def __argmap2schema(self, schema):\n if isinstance(schema, dict):\n schema = argmap2schema(schema)\n return schema\n\n def _serialize(self, nested_obj, attr, obj):\n if self.callback is not None:\n self.__schema = None\n self.nested = self._nested[self.callback(nested_obj)]\n result = super(Nested, self)._serialize(nested_obj, attr, obj)\n if self.callback is not None:\n self.nested = self._nested\n return result\n\n\nclass MongoId(fields.String):\n def _serialize(self, value, attr, obj):\n return super(MongoId, self)._serialize(value, attr, obj)\n\n def _deserialize(self, value, attr, data):\n value = super(MongoId, self)._deserialize(value, attr, data)\n try:\n return ObjectId(value)\n except InvalidId:\n self.fail('invalid')\n\n\nclass DateTimeReplaced(fields.DateTime):\n def _serialize(self, value, attr, obj):\n value = value.replace(microsecond=0) if value is not None else None\n return super(DateTimeReplaced, self)._serialize(value, attr, obj)\n\n def _deserialize(self, value, attr, obj):\n return super(DateTimeReplaced, self)._serialize(value, attr, obj)\n\n\nclass OKSchema(Schema):\n ok = fields.Boolean()\n\n\ndef make_list_schema(schema, *args, **kwargs):\n class _Shema(Schema):\n total = fields.Integer()\n objects = fields.List(Nested(schema, *args, **kwargs))\n\n return _Shema\n\n\nskip_limit_args = {'skip': fields.Integer(), 'limit': fields.Integer()}\n\nfields.MongoId = MongoId","sub_path":"asynclib/http/marshall.py","file_name":"marshall.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"582925819","text":"from SPARQLWrapper import JSON, SPARQLWrapper, RDFXML, N3\n\nsparql = \"http://206.167.181.124:7200/repositories/cldi-test-7\"\ndbp_sparql = \"http://dbpedia.org/sparql/\"\ndbp_data = SPARQLWrapper(dbp_sparql)\ndbp_data.setReturnFormat(JSON)\nsparqlData = SPARQLWrapper(sparql)\nsparqlData.setCredentials(\"admin\", \"4Metadata!\")\nsparqlData.setReturnFormat(JSON)\n\ngeo = {\"type\": \"FeatureCollection\"}\nfeatures = []\n\nquery = \"\"\"select ?institution ?num_of_docs (count (distinct ?resource) as ?num_of_docs) where {?resource ?institution} group by ?institution order by DESC(?num_of_docs)\"\"\"\nsparqlData.setQuery(query) # set the query\nresults = sparqlData.query().convert()\nfor i, result in enumerate(results['results']['bindings']):\n print (i)\n url =result['institution']['value'] \n uni = url.replace('http://canlink.library.ualberta.ca/institution/', '').replace('_', ' ')\n docs = result['num_of_docs']['value']\n query = \"\"\"select ?lat ?long where {?s rdfs:label \"%s\"@en; geo:lat ?lat; geo:long ?long} LIMIT 10\"\"\" %(uni)\n dbp_data.setQuery(query) # set the query\n results = dbp_data.query().convert()\n for re in results[\"results\"][\"bindings\"]:\n \tfeature = {\"type\": \"Feature\"}\n \tfeature[\"properties\"] = {\n \t\t\"name\": str(uni),\n \t\t\"items\": str(docs),\n \"url\": str(url),\n \"search_link\": \"?page=1?q=\" + url + \"?search_type=institution?l=25?so=Relevance?facet=?facet_type=page?f=\",\n \t\t\"lng\": float(re[\"long\"][\"value\"]),\n \t\t\"lat\": float(re[\"lat\"][\"value\"])\n \t}\n \tfeature[\"geometry\"] = {\n \t\t\"type\": \"Point\",\n \t\t\"coordinates\": [float(re[\"long\"][\"value\"]),float(re[\"lat\"][\"value\"])]\n \t}\n \tfeatures.append(feature)\ngeo[\"features\"] = features\n\nprint (geo)","sub_path":"scripts/canlink-data/code/scripts/pull_from_dbpedia.py","file_name":"pull_from_dbpedia.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"637127137","text":"#!/usr/bin/env python\nimport pika\nimport time\n\nconnection = pika.BlockingConnection(\n pika.ConnectionParameters(host='localhost'))\nchannel = connection.channel()\n\nchannel.queue_declare(queue='task_queue', durable=True)\nprint(' [*] Waiting for messages. To exit press CTRL+C')\n\n\ndef callback(ch, method, properties, body):\n print(\" [x] Received %r\" % body)\n time.sleep(20) #在等待过程中停止程序,判断下一次接收此队列消息是否存在\n print(\" [x] Done\")\n ch.basic_ack(delivery_tag=method.delivery_tag) #消息确认符,确认处理完成并返回给rabbitmq\n\n#定义每次rev处理完一次才能再次接收\nchannel.basic_qos(prefetch_count=1)\n\nchannel.basic_consume(queue='task_queue',\n on_message_callback=callback,\n auto_ack=False, #约定处理完消息后,并向rabbitmq确认,否则此消息回传递给另一个接收者\n )\n\nchannel.start_consuming()","sub_path":"消息持久化/rev.py","file_name":"rev.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"296237478","text":"'''Given a list of integers, find the largest product you could make from 3 integers in the list'''\n\n\ndef solution(lst):\n # Start at index 2 (3rd element) and assign highest and lowest\n # based off of first two elements\n\n # Highest Number so far\n high = max(lst[0], lst[1])\n\n # Lowest number so far\n low = min(lst[0], lst[1])\n\n # Initiate Highest and lowest products of two numbers\n high_prod2 = lst[0] * lst[1]\n low_prod2 = lst[0] * lst[1]\n\n # Initiate highest product of 3 numbers\n high_prod3 = lst[0] * lst[1] * lst[2]\n\n # Iterate through list\n for num in lst[2:]:\n # Compare possible highest product of 3 numbers\n high_prod3 = max(high_prod3, num * high_prod2, num * low_prod2) # We are taking num * low_prod2 to consider the negative numbers also\n\n # Check for possible new highest products of 2 numbers\n high_prod2 = max(high_prod2, num * high, num * low) # We are taking the num*low to consider the negative numbers also\n\n # Check for possible new lowest products of 2 numbers\n low_prod2 = min(low_prod2, num * high, num * low) # We are taking num*low to consider the negative numbers also\n\n # Check for new possible high\n high = max(high, num)\n\n # Check for new possible low\n low = min(low, num)\n\n return high_prod3\n\nl = [99,-82,82,40,75,-24,39, -82, 5, 30, -25, -94, 93, -23, 48, 50, 49,-81,41,63]\n\nprint(solution(l))","sub_path":"Riddles&InterviewQuestions/HighestProd3numbers.py","file_name":"HighestProd3numbers.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"555462156","text":"import command\nimport manager\nimport xmlBuilder\n\nimport os\n\nif os.path.exists (\"index.json\"):\n\tmng = manager.dataManager (open (\"index.json\", \"r\").readline ())\nelse:\n\tmng = manager.dataManager ()\n\t\ndef export (path):\n\t\"\"\"Saves json\"\"\"\n\tprint (f\"{mng.output ()}\\n\")\n\tmng.save (path)\n\ndef buildXML (path):\n\txml = xmlBuilder.generate (mng.exportData ())\n\topen (path, \"w\").write (xml)\n\ndef printer (toPrint):\n\tif toPrint == \"json\":\n\t\tprint (mng.output ())\n\telif toPrint == \"xml\":\n\t\timport xml.dom.minidom as minidom\n\t\txml = xmlBuilder.generate (mng.exportData ())\n\t\tprint (minidom.parseString (xml).toprettyxml ())\n\ndef saver (toSave, path):\n\tif toSave == \"json\":\n\t\tmng.save (path)\n\telif toSave == \"xml\":\n\t\tbuildXML (path)\n\ndef adder (toAdd, feature, versionNumber, url):\n\tif toAdd == \"feature\":\n\t\tmng.addFeature (feature)\n\telif toAdd == \"version\":\n\t\tmng.addVersion (feature, versionNumber, url)\n\ndef remover (toRemove, name, number):\n\tif toRemove == \"feature\":\n\t\tmng.removeFeature (name)\n\telif toRemove == \"version\":\n\t\tmng.removeVersion (name, number)\n\ninterpreter = command.Interpreter ()\ninterpreter.addCommand (\"add\", adder)\ninterpreter.addCommand (\"rm\", remover)\ninterpreter.addCommand (\"print\", printer)\ninterpreter.addCommand (\"save\", saver)\n\nif __name__ == \"__main__\":\n\twhile True:\n\t\tinterpreter.call (input (\"---\"))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"569677061","text":"from flask import Flask, render_template, redirect, request, make_response, session, abort\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField,\\\n SubmitField, ValidationError, TextField\nfrom wtforms.validators import DataRequired\nfrom wtforms.fields.html5 import EmailField\nfrom data import db_session\nfrom data.users import User\nfrom data.news import News\nfrom flask_login import LoginManager, login_user, login_required, logout_user, current_user\nimport datetime\n\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'yandexlyceum_secret_key'\ndb_session.global_init(\"db/blogs.sqlite\")\napp.config['PERMANENT_SESSION_LIFETIME'] = datetime.timedelta(days=365)\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n\nclass RegisterForm(FlaskForm):\n email = EmailField('Почта', validators=[DataRequired()])\n password = PasswordField('Пароль', validators=[DataRequired()])\n password_again = PasswordField('Повторите пароль', validators=[DataRequired()])\n name = StringField('Имя пользователя', validators=[DataRequired()])\n about = TextAreaField(\"Немного о себе\")\n submit = SubmitField('Войти')\n\n\nclass LoginForm(FlaskForm):\n email = EmailField('Почта', validators=[DataRequired()])\n password = PasswordField('Пароль', validators=[DataRequired()])\n remember_me = BooleanField('Запомнить меня')\n submit = SubmitField('Войти')\n\n\nclass NewsForm(FlaskForm):\n title = StringField('Заголовок', validators=[DataRequired()])\n content = TextAreaField(\"Содержание\")\n is_private = BooleanField(\"Личное\")\n submit = SubmitField('Применить')\n\n\ndef main():\n # # ---------------------- тестирование системы --------------------\n # # ----------- добавление пользователя -------------\n # # user = User()\n # # user.name = \"Пользов3атель 2\"\n # # user.about = \"биография пользователя 3\"\n # # user.email = \"ema2i4l@email.ru\"\n #\n # # --------- создание сесии ------------------------\n # session = db_session.create_session()\n # # session.add(user) # ------- добавление инфы\n # session.commit()\n #\n # # --------- вывод инфы ----------------------------\n # print('------------- запрос инфы об об определенном параметре')\n # user = session.query(User).first()\n # print(user.name)\n #\n # print('------------- вывод всего и вся')\n # for user in session.query(User).all():\n # print(user)\n #\n # print('------------- фильтр с операцией AND')\n # for user in session.query(User).filter(User.id > 1, User.email.notilike(\"%1%\")):\n # print(user)\n #\n # print('------------- фильтр с операцией OR')\n # for user in session.query(User).filter((User.id > 1) | (User.email.notilike(\"%1%\"))):\n # print(user)\n #\n # print('--------- изменение инфы')\n # user = session.query(User).filter(User.id == 1).first()\n # print(user)\n # user.name = \"Измененное имя пользователя\"\n # user.created_date = datetime.datetime.now()\n # session.commit()\n #\n # print('---------- удаление инфы по фильтру')\n # session.query(User).filter(User.id >= 999).delete()\n # session.commit()\n #\n # print('---------- удаление инфы определенного элемента')\n # # user = session.query(User).filter(User.id == 2).first()\n # # session.delete(user)\n # # session.commit()\n #\n # print(\" ---------- Добавление записи юзеру\")\n # news = News(title=\"Первая новость\", content=\"Привет блог!\",\n # user_id=1, is_private=False)\n # # session.add(news)\n # session.commit()\n #\n # print('----------- Добавление записи вот так')\n # user = session.query(User).filter(User.id == 1).first()\n # news = News(title=\"Вторая новость\", content=\"Уже вторая запись!\",\n # user=user, is_private=False)\n # # session.add(news)\n # session.commit()\n #\n # print('----------- посмотри код, здесь один из лучших спопосбов добавления записи')\n # user = session.query(User).filter(User.id == 1).first()\n # news = News(title=\"Личная запись\", content=\"Эта запись личная\",\n # is_private=True)\n # # user.news.append(news)\n # session.commit()\n #\n # print('----------- тупо все новости юзера')\n # for news in user.news:\n # print(news) # ---------- надо бы добавить __repr__\n #\n # # ------------ запуск приложения\n app.run(port=8080, host='127.0.0.1')\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n session = db_session.create_session()\n return session.query(User).get(user_id)\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(\"/\")\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n session = db_session.create_session()\n user = session.query(User).filter(User.email == form.email.data).first()\n if user and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect(\"/\")\n return render_template('login.html',\n message=\"Неправильный логин или пароль\",\n form=form)\n return render_template('login.html', title='Авторизация', form=form)\n\n\n@app.route('/news', methods=['GET', 'POST'])\n@login_required\ndef add_news():\n form = NewsForm()\n if form.validate_on_submit():\n session = db_session.create_session()\n news = News()\n news.title = form.title.data\n news.content = form.content.data\n news.is_private = form.is_private.data\n current_user.news.append(news)\n session.merge(current_user)\n session.commit()\n return redirect('/')\n return render_template('news.html', title='Добавление новости',\n form=form)\n\n\n@app.route('/news/', methods=['GET', 'POST'])\n@login_required\ndef edit_news(id):\n form = NewsForm()\n print(current_user)\n if request.method == \"GET\":\n session = db_session.create_session()\n news = session.query(News).filter(News.id == id,\n News.user == current_user).first()\n print(news.title)\n if news:\n form.title.data = news.title\n form.content.data = news.content\n form.is_private.data = news.is_private\n print(form.title)\n else:\n abort(404)\n if form.validate_on_submit():\n session = db_session.create_session()\n news = session.query(News).filter(News.id == id,\n News.user == current_user).first()\n if news:\n news.title = form.title.data\n news.content = form.content.data\n news.is_private = form.is_private.data\n session.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('news.html', title='Редактирование новости', form=form)\n\n\n@app.route('/news_delete/', methods=['GET', 'POST'])\n@login_required\ndef news_delete(id):\n session = db_session.create_session()\n news = session.query(News).filter(News.id == id,\n News.user == current_user).first()\n if news:\n session.delete(news)\n session.commit()\n else:\n abort(404)\n return redirect('/')\n\n\n@app.route(\"/cookie_test\")\ndef cookie_test():\n visits_count = int(request.cookies.get(\"visits_count\", 0))\n if visits_count:\n res = make_response(f\"Вы пришли на эту страницу {visits_count + 1} раз\")\n res.set_cookie(\"visits_count\", str(visits_count + 1),\n max_age=20)\n else:\n res = make_response(\n \"Вы пришли на эту страницу в первый раз за последние 2 года\")\n res.set_cookie(\"visits_count\", '1',\n max_age=20)\n return res\n\n\n@app.route('/session_test/')\ndef session_test():\n if 'visits_count' in session:\n session['visits_count'] = session.get('visits_count') + 1\n else:\n session['visits_count'] = 1\n return make_response(f'kolvo {session[\"visits_count\"]}')\n # дальше - код для вывода страницы\n\n\n@app.route(\"/\")\ndef index():\n session = db_session.create_session()\n if current_user.is_authenticated:\n news = session.query(News).filter(\n (News.user == current_user) | (News.is_private != True))\n else:\n news = session.query(News).filter(News.is_private != True)\n return render_template(\"index.html\", news=news)\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if form.password.data != form.password_again.data:\n return render_template('register.html', title='Регистрация',\n form=form,\n message=\"Пароли не совпадают\")\n session = db_session.create_session()\n if session.query(User).filter(User.email == form.email.data).first():\n return render_template('register.html', title='Регистрация',\n form=form,\n message=\"Такой пользователь уже есть\")\n user = User(\n name=form.name.data,\n email=form.email.data,\n about=form.about.data\n )\n user.set_password(form.password.data)\n session.add(user)\n session.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\nif __name__ == '__main__':\n main()","sub_path":"FLASK-SQLALCHEMY #2/Veseha/base/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"518129545","text":"#!/usr/bin/env python\n\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\nfrom contextlib import contextmanager\nimport datetime as dt\nimport os\nimport sys\nimport re\nimport shutil\nimport subprocess\n\n\n@contextmanager\ndef cd(newdir):\n '''Context-managed chdir\n\n changes back to original directory on exit or failure\n '''\n prevdir = os.getcwd()\n os.chdir(os.path.expanduser(newdir))\n try:\n yield\n finally:\n os.chdir(prevdir)\n\n\ndef runningInDocker():\n '''Function to check whether script is running in a container\n '''\n with open('/proc/self/cgroup', 'r') as procfile:\n for line in procfile:\n fields = line.strip().split('/')\n if 'docker' in fields:\n return True\n return False\n\n\ndef setup_parser():\n # Create argument parser & set up arguments:\n parser = ArgumentParser(description=__doc__,\n formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument(\"-d\", \"--destdir\", default=None, required=True,\n help=\"Destination path for run directory\")\n parser.add_argument(\"configfile\", nargs=1, help=\"Run config\")\n parser.add_argument(\"-o\", \"--overwrite\", action=\"store_true\",\n help=\"Clobber an existing run directory\" +\n \"with the same name/path.\")\n parser.add_argument(\"--debug\", action=\"store_true\",\n help=\"Print debug information.\")\n # Handle arguments, noting that argparse expands linux wildcards.\n args = parser.parse_args()\n\n return args\n\n\ndef get_ramindices_end(fname='input/RamIndices.txt'):\n # read file and get last date\n with open(fname, 'r') as fh:\n inds = fh.readlines()\n lastline = inds[-1].strip()\n lastdatestr = lastline.split()[0]\n lastdate = dt.datetime.strptime(lastdatestr, '%Y%m%d')\n\n return lastdate\n\n\ndef parse_block(config_file, srch='#STARTTIME'):\n expr = re.compile(srch)\n with open(config_file, 'r') as fh:\n lines = fh.readlines()\n lines = [line.rstrip() for line in lines]\n for idx, line in enumerate(lines):\n searchyn = re.match(expr, line)\n if searchyn: # target line\n useidx = idx\n store = [ll.split()[0] for ll in lines[useidx:useidx+7]]\n year = int(store[1])\n month = int(store[2])\n day = int(store[3])\n hour = int(store[4])\n minute = int(store[5])\n second = int(store[6])\n return dt.datetime(year, month, day, hour, minute, second)\n\n\ndef parse_config(config_file):\n '''Placeholder, pending web interface\n '''\n st_date = parse_block(config_file, srch='#STARTTIME')\n end_date = parse_block(config_file, srch='#STOPTIME')\n print('LAUNCH_RUN: Requested {} to {}'.format(st_date, end_date))\n lastramind = get_ramindices_end()\n print('LAUNCH_RUN: RamIndices ends at {}'.format(lastramind))\n sys.stdout.flush()\n if end_date >= lastramind:\n # if run ends after the last date in the Ramindices file,\n # update it\n with cd(\"Scripts\"):\n subprocess.run(['python', 'updateRamIndices.py'])\n return st_date, end_date\n\n\ndef setup_rundir(args, st_date, end_date, gen_fluxbound=True):\n '''Make, populate, and move RAM-SCB run directory\n '''\n # Now make rundir and copy in everything we need\n stYYMMDD = '{:04d}-{:02d}-{:02d}'.format(st_date.year,\n st_date.month,\n st_date.day)\n enYYMMDD = '{:04d}-{:02d}-{:02d}'.format(end_date.year,\n end_date.month,\n end_date.day)\n if args.overwrite:\n shutil.rmtree('run_ram_ror', ignore_errors=True)\n compl = subprocess.run(['make', 'rundir', 'RUNDIR=run_ram_ror'],\n check=True, capture_output=True)\n # then make flux boundary files\n if gen_fluxbound:\n fmdir = '/SHIELDS/flux-model' if runningInDocker() else '../flux-model'\n cmdline = ' '.join(['python', f'{fmdir}/makeGEOboundary.py',\n f'-s {stYYMMDD}', f'-e {enYYMMDD} -m 0',\n '-r input -o run_ram_ror/input_ram'])\n compl = subprocess.run(cmdline, shell=True,\n check=True, stdout=subprocess.PIPE)\n sys.stdout.flush()\n # add supplied PARAM file\n shutil.copyfile(args.configfile[0], 'run_ram_ror/PARAM.in')\n # and move rundir to final location\n if args.overwrite:\n shutil.rmtree(args.destdir, ignore_errors=True)\n shutil.move('run_ram_ror', args.destdir)\n\n\ndef run_model(args):\n '''Launch RAM-SCB and wait so the launch script won't exit\n '''\n with cd(args.destdir):\n print(os.getcwd())\n if not os.path.isfile('ram_scb.exe'):\n raise RuntimeError(' '.join(['RAM-SCB not found in specified',\n 'directory, or directory not',\n 'created properly']))\n # launch RAM-SCB, requires all relevant data in run dir\n # and appropriate subdirectories\n pram = subprocess.Popen(['./ram_scb.exe'])\n # entrypoint process in docker must be kept in foreground\n # to prevent container stopping\n pram.wait()\n\n\ndef make_plots(args, st_date, en_date):\n '''\n '''\n stdt = st_date.isoformat()[:19]\n endt = end_date.isoformat()[:19]\n with cd('Scripts'):\n cmdline = ' '.join(['python', 'summaryPlots.py', f'-s {stdt}',\n f'-e {endt}', f'-o {args.destdir}',\n f'{args.destdir}'])\n print(cmdline)\n subprocess.run(cmdline, shell=True, check=True,\n stdout=subprocess.PIPE)\n\n\nif __name__ == '__main__':\n # get arguments this program was called with\n args = setup_parser()\n\n # get date range and update Kp/F10.7 if req'd\n st_date, end_date = parse_config(args.configfile[0])\n\n setup_rundir(args, st_date, end_date)\n run_model(args)\n make_plots(args, st_date, end_date)\n","sub_path":"launch_run.py","file_name":"launch_run.py","file_ext":"py","file_size_in_byte":6114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"12372359","text":"from setuptools import setup, find_packages\nimport os\nimport pleisthenes\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\nif os.path.isfile(os.path.join(os.path.dirname(__file__), 'README.md')):\n from pypandoc import convert\n readme_rst = convert(os.path.join(os.path.dirname(__file__), 'README.md'), 'rst')\n with open(os.path.join(os.path.dirname(__file__), 'README.rst'), 'w') as out:\n out.write(readme_rst + '\\n')\n\nsetup(\n name='Pleisthenes',\n version=pleisthenes.version,\n packages=find_packages(),\n license='MIT license',\n long_description=read('README.rst'),\n description=\"Pleisthenes is a weather microservice (connects to a weather site and provides e.g. local sun \"\n \"radiation).\",\n url='https://gitlab.com/pelops/pleisthenes/',\n author='Tobias Gawron-Deutsch',\n author_email='tobias@strix.at',\n keywords='mqtt service weather',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Environment :: No Input/Output (Daemon)\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Utilities\",\n \"Topic :: Home Automation\",\n \"License :: OSI Approved :: MIT License\",\n ],\n python_requires='>=3.5',\n install_requires=[\n \"pelops>=0.2.1\",\n ],\n test_suite=\"tests_unit\",\n entry_points={\n 'console_scripts': [\n 'pleisthenes = pleisthenes.weatherservice:standalone',\n ]\n },\n\n)\n","sub_path":"pypi_install_script/Pleisthenes-0.1a0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"181442663","text":"#!/usr/bin/env python\n# _*_ coding: utf-8 _*_\n# @Project: PyCharm\n# @File : dbsize.py\n# @Author : gaoyuan@infinities.com.cn\n# @Time : 2020/9/3 0012 17:53\nfrom config import db_file\nfrom lib.pub import html\nfrom lib.pub_sqlite import SqliteConnect\nfrom common_enums import YesNoStatus\n\n\nasync def getsize(request):\n return html(request, 'dbsize.html', res={})\n\n\nasync def getrank(request):\n return html(request, 'dbrank.html')\n\n\nasync def getsummary(request):\n sc = SqliteConnect(db_file)\n time_sql = f\"SELECT MAX(update_time) from db_list\"\n host_sql = f\"SELECT COUNT(1) FROM server_list WHERE is_delete={YesNoStatus.NO}\"\n instance_sql = \"SELECT COUNT(1) FROM instance_list WHERE update_time=(SELECT MAX(update_time) from instance_list);\"\n db_sql = \"SELECT COUNT(1) FROM db_list WHERE update_time=(SELECT MAX(update_time) from db_list);\"\n size_sql = \"SELECT SUM(db_size) FROM db_list WHERE update_time=(SELECT MAX(update_time) from db_list);\"\n\n time_res = sc.getresult(time_sql)\n host_res = sc.getresult(host_sql)\n instance_res = sc.getresult(instance_sql)\n db_res = sc.getresult(db_sql)\n size_res = sc.getresult(size_sql)\n\n update_time = time_res[0][0]\n host_count = host_res[0][0]\n instance_count = instance_res[0][0]\n db_count = db_res[0][0]\n db_size = size_res[0][0]\n\n return html(request, 'summary.html', update_time=update_time, host_count=host_count, instance_count=instance_count,\n db_count=db_count, db_size=db_size)\n","sub_path":"view/dbsize.py","file_name":"dbsize.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"238546061","text":"from .types import *\n\ndef load_integrals(fcidump_path) -> Tuple[int, float, One_electron_integral, Two_electron_integral]:\n \"\"\"Read all the Hamiltonian integrals from the data file.\n Returns: (E0, d_one_e_integral, d_two_e_integral).\n E0 : a float containing the nuclear repulsion energy (V_nn),\n d_one_e_integral : a dictionary of one-electron integrals,\n d_two_e_integral : a dictionary of two-electron integrals.\n \"\"\"\n import glob\n\n if len(glob.glob(fcidump_path)) == 1:\n fcidump_path = glob.glob(fcidump_path)[0]\n elif len(glob.glob(fcidump_path)) == 0:\n print(\"no matching fcidump file\")\n else:\n print(f\"multiple matches for {fcidump_path}\")\n for i in glob.glob(fcidump_path):\n print(i)\n\n # Use an iterator to avoid storing everything in memory twice.\n if fcidump_path.split(\".\")[-1] == \"gz\":\n import gzip\n\n f = gzip.open(fcidump_path)\n elif fcidump_path.split(\".\")[-1] == \"bz2\":\n import bz2\n\n f = bz2.open(fcidump_path)\n else:\n f = open(fcidump_path)\n\n # Only non-zero integrals are stored in the fci_dump.\n # Hence we use a defaultdict to handle the sparsity\n n_orb = int(next(f).split()[2])\n\n for _ in range(3):\n next(f)\n\n from collections import defaultdict\n\n d_one_e_integral = defaultdict(int)\n d_two_e_integral = defaultdict(int)\n\n for line in f:\n v, *l = line.split()\n v = float(v)\n # Transform from Mulliken (ik|jl) to Dirac's notation\n # (swap indices)\n i, k, j, l = list(map(int, l))\n\n if i == 0:\n E0 = v\n elif j == 0:\n # One-electron integrals are symmetric (when real, not complex)\n d_one_e_integral[(i, k)] = v\n d_one_e_integral[(k, i)] = v\n else:\n # Two-electron integrals have many permutation symmetries:\n # Exchange r1 and r2 (indices i,k and j,l)\n # Exchange i,k\n # Exchange j,l\n d_two_e_integral[(i, j, k, l)] = v\n d_two_e_integral[(i, l, k, j)] = v\n d_two_e_integral[(j, i, l, k)] = v\n d_two_e_integral[(j, k, l, i)] = v\n d_two_e_integral[(k, j, i, l)] = v\n d_two_e_integral[(k, l, i, j)] = v\n d_two_e_integral[(l, i, j, k)] = v\n d_two_e_integral[(l, k, j, i)] = v\n\n f.close()\n\n return n_orb, E0, d_one_e_integral, d_two_e_integral\n\n\ndef load_wf(path_wf) -> Tuple[List[float], List[Determinant]]:\n \"\"\"Read the input file :\n Representation of the Slater determinants (basis) and\n vector of coefficients in this basis (wave function).\"\"\"\n\n import glob\n\n if len(glob.glob(path_wf)) == 1:\n path_wf = glob.glob(path_wf)[0]\n elif len(glob.glob(path_wf)) == 0:\n print(f\"no matching wf file: {path_wf}\")\n else:\n print(f\"multiple matches for {path_wf}\")\n for i in glob.glob(path_wf):\n print(i)\n\n if path_wf.split(\".\")[-1] == \"gz\":\n import gzip\n\n with gzip.open(path_wf) as f:\n data = f.read().decode().split()\n elif path_wf.split(\".\")[-1] == \"bz2\":\n import bz2\n\n with bz2.open(path_wf) as f:\n data = f.read().decode().split()\n else:\n with open(path_wf) as f:\n data = f.read().split()\n\n def decode_det(str_):\n for i, v in enumerate(str_, start=1):\n if v == \"+\":\n yield i\n\n def grouper(iterable, n):\n \"Collect data into fixed-length chunks or blocks\"\n args = [iter(iterable)] * n\n return zip(*args)\n\n det = []\n psi_coef = []\n for (coef, det_i, det_j) in grouper(data, 3):\n psi_coef.append(float(coef))\n det.append(Determinant(tuple(decode_det(det_i)), tuple(decode_det(det_j))))\n\n # Normalize psi_coef\n from math import sqrt\n\n norm = sqrt(sum(c * c for c in psi_coef))\n psi_coef = [c / norm for c in psi_coef]\n\n return psi_coef, det\n\n\ndef load_eref(path_ref) -> float:\n \"\"\"Read the input file :\n Representation of the Slater determinants (basis) and\n vector of coefficients in this basis (wave function).\"\"\"\n\n import glob\n\n if len(glob.glob(path_ref)) == 1:\n path_ref = glob.glob(path_ref)[0]\n elif len(glob.glob(path_ref)) == 0:\n print(f\"no matching ref file: {path_ref}\")\n else:\n print(f\"multiple matches for {path_ref}\")\n for i in glob.glob(path_ref):\n print(i)\n\n if path_ref.split(\".\")[-1] == \"gz\":\n import gzip\n\n with gzip.open(path_ref) as f:\n data = f.read().decode()\n elif path_ref.split(\".\")[-1] == \"bz2\":\n import bz2\n\n with bz2.open(path_ref) as f:\n data = f.read().decode()\n else:\n with open(path_ref) as f:\n data = f.read()\n\n import re\n\n return float(re.search(r\"E +=.+\", data).group(0).strip().split()[-1])\n\ndef save_wf(psi_coef: List[float], psi_det: List[Determinant], wfpath: str, n_orb: int):\n \"\"\"\n write wf to file with dets in +/- format\n \"\"\"\n\n assert (len(psi_coef) == len(psi_det))\n\n def encode_det(det: Determinant):\n \"\"\"\n input: Determinant\n output: tuple of +/- strings (one for each spindet)\n \"\"\"\n return tuple(map(encode_spindet,det))\n\n def encode_spindet(spindet: Spin_determinant):\n \"\"\"\n translate a Spin_determinant to a +/- string representation\n \"\"\"\n return ''.join( ('+' if i in spindet else '-') for i in range(1,max(spindet)+1))\n\n with open(wfpath,'w') as outfile:\n for ci,di in zip(psi_coef, psi_det):\n outfile.write(f'{ci}\\n')\n for spindet in encode_det(di):\n outfile.write(spindet.ljust(n_orb,'-')+'\\n')\n return\n","sub_path":"qpx/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":5766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"309058751","text":"import json\nimport cv2\nimport numpy as np\nfrom progressbar import progressbar\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--N\", type=int, required=True)\nparser.add_argument(\"--unit\", default=\"\", choices=[\"\", \"k\", \"M\"])\nparser.add_argument(\"--crop_res\", type=int, required=True)\nparser.add_argument(\"--min_size\", type=float, default=.5)\nparser.add_argument(\"--max_size\", type=float, default=1.)\n\nargs = parser.parse_args()\n\nmovies = json.load(open(\"movies.json\")) # type: list\n\ncrop_size = (args.min_size, args.max_size) # fac of min(h, w)\ncrop_res = args.crop_res\n\nN = args.N * {\"\": 1, \"k\": int(1e3), \"M\": int(1e6)}[args.unit]\nNM = N // len(movies)\nassert N % NM == 0, \"N has to be a multiple of len(movies)\"\n\ncrops = np.empty((N, crop_res, crop_res, 3), dtype=np.uint8)\n\ncrop_i = 0\nfor movie_i, movie in enumerate(movies):\n name = movie[\"name\"]\n print(\"Movie {}/{}: {}\".format(movie_i, len(movies), name))\n\n cap = cv2.VideoCapture(movie[\"file_name\"])\n fps = cap.get(cv2.CAP_PROP_FPS)\n start, end = movie[\"start\"], movie[\"end\"]\n start_frame, end_frame = start * fps, end * fps\n\n frames = start_frame + (np.arange(NM) / NM) * (end_frame - start_frame)\n frames = frames.astype(np.int)\n\n for frame in progressbar(frames):\n # This is faster when sampling dense frames than setting POS_FRAMES\n while cap.get(cv2.CAP_PROP_POS_FRAMES) != frame:\n _, img = cap.read()\n\n # crop\n h, w = img.shape[:2]\n size_r = crop_size[0] + np.random.rand() * (crop_size[1] - crop_size[0])\n size_px = int(min(h, w) * size_r)\n top = np.random.randint(0, h - size_px)\n left = np.random.randint(0, w - size_px)\n img = img[top:top + size_px, left:left + size_px]\n\n # resize\n img = cv2.resize(img, (crop_res, crop_res), interpolation=cv2.INTER_AREA)\n crops[crop_i] = img[..., ::-1] # bgr -> rgb\n crop_i += 1\n\nname = \"crop_{}_{}{}_{}-{}\".format(args.crop_res, args.N, args.unit, int(args.min_size * 100), int(args.max_size * 100))\nnp.save(name, crops)\n","sub_path":"movie_reality_net/crop.py","file_name":"crop.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"558676987","text":"#!/usr/bin/env python3\nimport json\nimport subprocess\nimport sys\nimport os.path\nimport shutil\nimport logging\nimport fnmatch\n\n\nfrom python_modules import state_backup\nfrom python_modules import azure_account\nfrom python_modules import storage_creation\n\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\n\ndef create_config_file():\n\n git_root = azure_account.get_git_root()\n\n logging.info(\"Creating config.tf.json\")\n\n # Derive the names for key, resource group and storage account from the path\n cwd = os.path.basename(os.getcwd())\n\n appDir = os.path.split(os.path.dirname(os.getcwd()))[1]\n\n keyName = ''.join([appDir, '-', cwd, '.terraform.tfstate'])\n\n storage_account = appDir.replace('-','') + cwd + \"storage\"\n # make sure it's not over the 24 character limit for storage account names\n storage_account = storage_account[:24]\n\n resource_group = appDir + \"-\" + cwd\n\n # Check if this is a prod or dev environment\n prodEnvs = ['prod', 'preprod']\n\n environment = 'devtest'\n\n if cwd in prodEnvs:\n environment = 'prod'\n\n if not os.path.isfile(\"./azure-version-override.json\"):\n logging.info(\"Creating fresh azure-version-override.json\")\n src = ''.join([git_root, \"/tools/config/azure-version-override.json\"])\n dst = \".\"\n shutil.copy2(src, dst)\n\n env_versions = json.load(open(\"./azure-version-override.json\"))\n\n azure_account_ids = json.load(open(git_root + \"/tools/config/azure-account-config.json\"))\n\n configTfJson = {\n 'terraform': {\n 'required_version': env_versions[\"terraform_version\"],\n 'backend': {\n 'azurerm': {\n 'resource_group_name': resource_group,\n 'storage_account_name': storage_account,\n 'container_name': 'terraform',\n 'key': keyName\n }\n }\n },\n 'provider': {\n 'azurerm': {\n 'tenant_id': azure_account_ids[environment][\"tenant_id\"],\n 'subscription_id': azure_account_ids[environment][\"subscription_id\"],\n 'version': env_versions[\"azurerm_version\"]\n }\n }\n }\n\n\n\n jsonFile = json.dumps(configTfJson, indent=2)\n\n with open(\"config.tf.json\", \"w\") as f:\n f.write(jsonFile)\n\n logging.info(\"config.tf.json created\")\n\n\ndef check_first_time_terraform_init():\n\n if os.path.exists(\"./.terraform\"):\n\n try:\n state_exists = subprocess.run(\n [\"terraform\", \"show\",\n ],\n stdout=subprocess.PIPE,\n check=True\n ).stdout.decode()\n except:\n logging.warn(\"There is a problem with .terraform\")\n logging.warn(\"You may need to delete .terraform before running this script. Exiting.\")\n sys.exit()\n\n if \"No state\" in state_exists:\n logging.info(\"There is no Terraform state to backup\")\n return False\n else:\n logging.info(\"Terraform state exists\")\n return True\n else:\n logging.info(\"There is no .terraform directory\")\n return False\n\n\n\nif len(fnmatch.filter(os.listdir('.'), '*.tf')) < 1:\n logging.warn(\"There are no terraform config files. Exiting.\")\n sys.exit(1)\n\nif check_first_time_terraform_init():\n logging.info(\"Backing up the state\")\n state_backup.backup()\n\ncreate_config_file()\n\nconfig = json.load(open(\"./config.tf.json\"))\n\nsubscription_id = config[\"provider\"][\"azurerm\"][\"subscription_id\"]\nresource_group = config[\"terraform\"][\"backend\"][\"azurerm\"][\"resource_group_name\"]\nstorage_account = config[\"terraform\"][\"backend\"][\"azurerm\"][\"storage_account_name\"]\n\nlogging.info(\"Authorizing in Azure\")\nazure_account.azure_access_token(subscription_id)\n\nazure_account.azure_set_subscription(subscription_id)\n\nstorage_creation.create_storage_account(resource_group, storage_account)\n\nkey = subprocess.run(\n [\"az\", \"storage\", \"account\", \"keys\", \"list\",\n \"--resource-group\", resource_group,\n \"--account-name\", storage_account,\n \"--query\", \"[0].value\",\n \"--output\", \"tsv\",\n ],\n stdout=subprocess.PIPE,\n check=True\n).stdout.decode()\n\nlogging.info(\"Running terraform init\")\nsubprocess.run(\n [\"terraform\", \"init\", \"-backend-config\", \"access_key=%s\" % key],\n check=True\n)\n","sub_path":"tools/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"375099643","text":"\nfrom ASTBaseElement import ASTBaseElement\nimport __init__\nfrom util import make_list\n\nclass ASTIfElseStatement(ASTBaseElement):\n\n def __init__(self, tree):\n self.predicate = self.build(tree.predicate)\n self.true_statement = self.build(tree.if_true)\n self.false_statement = self.build(tree.if_false)\n\n def traverse(self, scope, builder):\n\n builder.add_string(\"if (\")\n self.predicate.traverse(scope, builder)\n builder.add_string(\") {\")\n builder.push_indent()\n builder.newline()\n builder.make_indent()\n self.true_statement.traverse(scope, builder)\n builder.add_string(\";\")\n builder.newline()\n builder.pop_indent()\n builder.make_indent()\n builder.add_string(\"}\")\n\n if self.false_statement:\n\n # Nested if else, dont add braces there\n if isinstance(self.false_statement, ASTIfElseStatement):\n builder.add_string(\" else \")\n self.false_statement.traverse(scope, builder)\n\n return\n\n builder.add_string(\" else {\")\n builder.newline()\n builder.push_indent()\n builder.make_indent()\n self.false_statement.traverse(scope, builder)\n builder.add_string(\";\")\n builder.newline()\n builder.pop_indent()\n builder.make_indent()\n builder.add_string(\"}\")\n\n","sub_path":"Source/ASTIfElseStatement.py","file_name":"ASTIfElseStatement.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"134223118","text":"from TestModel.models import Test, Tag, Contact, thing_info\nfrom django.shortcuts import render\nfrom funcation_py.common import *\n\n\n# def search_show(request):\n# return render_to_response('search_form.html')\n#\n\ndef insert_data_choice():\n sql_order = \"select WUPIN_NAME from mysql_thing_lh\"\n result = mysql_execute(sql_order)\n\n wupin_name_list = []\n for i in result:\n wupin_name_list.append(i[0])\n\n return wupin_name_list\n\n\ndef search_mysql(thing_txm):\n sql_order = \"select Location_OK,Location_NG from mysql_thing_lh where TXM='%s'\" % thing_txm\n\n result = mysql_execute(sql_order)\n\n print(result)\n\n return result\n\n\ndef search_info(request):\n ctx = {}\n print(\"头\", request, ctx)\n print('post', request.POST)\n\n try:\n txm = request.POST['q']\n\n result = search_mysql(txm)\n\n ctx['thing_okaddress'] = result[0][0]\n ctx['thing_ngaddress'] = result[0][1]\n\n print(\"thing_address\", result)\n\n obj = thing_info.objects.get(TXM=txm)\n print('22222', obj)\n ctx['txm'] = obj.TXM\n ctx['typeof'] = obj.TYPEOF\n ctx['wupin_name'] = obj.WUPIN_NAME\n # message = '你搜索的内容为: ' + request.POST['q']\n\n\n\n except Exception:\n print_exc()\n # message = '没有此结果'\n ctx['result'] = '查询无结果'\n # return HttpResponse(message)\n\n ctx['list'] = insert_data_choice()\n print(\"request\", request, ctx)\n\n return render(request, \"search_form.html\", ctx)\n\n # return HttpResponse(obj.TYPEOF)\n\n\ndef search_address(request):\n try:\n ctx = {}\n list_choise = request.POST['list_choice']\n print('选择的东西', list_choise)\n sql_order = \"select TXM,TYPEOF,WUPIN_NAME,Location_OK,Location_NG from mysql_thing_lh where WUPIN_NAME='%s'\" % list_choise\n result = mysql_execute(sql_order)\n\n print('选择得到的详细的地址', result)\n if len(result)!=0:\n ctx['txm'] = result[0][0]\n ctx['typeof'] = result[0][1]\n ctx['wupin_name'] = result[0][2]\n ctx['thing_okaddress'] = result[0][3]\n ctx['thing_ngaddress'] = result[0][4]\n\n\n\n\n\n except Exception:\n print_exc()\n\n ctx['list'] = insert_data_choice()\n return render(request, \"search_form.html\", ctx)\n","sub_path":"Web_Django/search_sql.py","file_name":"search_sql.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"4874674","text":"from wordcloud import WordCloud,ImageColorGenerator\nfrom PIL import Image\nimport numpy as np\nfrom os import path\nimport jieba\n#根据文本文件生成云图\n\n\n#待生成文本路径\ntxtpath= '../data/flag.txt'\n\n#设置背景图片\nbackimage = '../data/qi.jpg'\n\n#设置保存路径\nsavepath = '../data/flagall.png'\n\nlj=path.dirname(__file__) #当前文件路径\ntext=open(path.join(lj,txtpath),encoding='utf-8').read() #读取的文本\n# jieba.add_word('小水')\n# jieba.add_word('松冈') #添加结巴分辨不了的词汇\njbText=' '.join(jieba.cut(text))\n\n\n\nimgMask=np.array(Image.open(path.join(lj, backimage))) #读入背景图片\nwc=WordCloud(\n background_color='white',\n max_words=500,\n font_path='msyh.ttc', #默认不支持中文\n mask=imgMask, #设置背景图片\n random_state=30 #生成多少种配色方案\n).generate(jbText)\n\nImageColorGenerator(imgMask) #根据图片生成词云颜色\n# plt.imshow(wc)\n# plt.axis('off')\n# plt.show()\nwc.to_file(path.join(lj, savepath))\nprint('成功保存词云图片!')\n\n\n","sub_path":"july/crawler/cloud.py","file_name":"cloud.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"387663912","text":"import argparse\nimport random\nimport numpy as np\nimport time\nimport torch\nfrom torch import optim\n# from lf_evaluator import *\nfrom models import *\nfrom data import *\nfrom utils import *\nimport math\nfrom os.path import join\nfrom gadget import *\nimport os\nimport shutil\n\ndef _parse_args():\n parser = argparse.ArgumentParser(description='main.py')\n\n parser.add_argument('dataset', help='specified dataset')\n parser.add_argument('model_id', help='specified model id')\n \n parser.add_argument('--split', type=str, default='test', help='test split')\n parser.add_argument('--do_eval', dest='do_eval', default=False, action='store_true', help='only output')\n # parser.add_argument('--outfile', dest='outfile', default='beam_output.txt', help='output file of beam')\n # Some common arguments for your convenience\n\n parser.add_argument('--gpu', type=str, default=None, help='gpu id')\n parser.add_argument('--seed', type=int, default=0, help='RNG seed (default = 0)')\n parser.add_argument('--beam_size', type=int, default=20, help='beam size')\n\n # 65 is all you need for GeoQuery\n parser.add_argument('--decoder_len_limit', type=int, default=170, help='output length limit of the decoder')\n parser.add_argument('--input_dim', type=int, default=100, help='input vector dimensionality')\n parser.add_argument('--output_dim', type=int, default=100, help='output vector dimensionality')\n parser.add_argument('--hidden_size', type=int, default=200, help='hidden state dimensionality')\n\n # Hyperparameters for the encoder -- feel free to play around with these!\n parser.add_argument('--no_bidirectional', dest='bidirectional', default=True, action='store_false', help='bidirectional LSTM')\n parser.add_argument('--reverse_input', dest='reverse_input', default=False, action='store_true')\n parser.add_argument('--emb_dropout', type=float, default=0.2, help='input dropout rate')\n parser.add_argument('--rnn_dropout', type=float, default=0.2, help='dropout rate internal to encoder RNN')\n args = parser.parse_args()\n return args\n\ndef make_input_tensor(exs, reverse_input):\n x = np.array(exs.x_indexed)\n len_x = len(exs.x_indexed)\n if reverse_input:\n x = np.array(x[::-1])\n # add batch dim\n x = x[np.newaxis, :]\n len_x = np.array([len_x])\n x = torch.from_numpy(x).long()\n len_x = torch.from_numpy(len_x)\n return x, len_x\n\ndef decode(model_path, test_data, input_indexer, output_indexer, args):\n device = config.device\n if 'cpu' in str(device):\n checkpoint = torch.load(model_path, map_location=device)\n else:\n checkpoint = torch.load(model_path)\n \n # Create model\n model_input_emb = EmbeddingLayer(args.input_dim, len(input_indexer), args.emb_dropout)\n model_enc = RNNEncoder(args.input_dim, args.hidden_size, args.rnn_dropout, args.bidirectional)\n model_output_emb = EmbeddingLayer(args.output_dim, len(output_indexer), args.emb_dropout)\n model_dec = AttnRNNDecoder(args.input_dim, args.hidden_size, 2 * args.hidden_size if args.bidirectional else args.hidden_size,len(output_indexer), args.rnn_dropout)\n\n # load dict\n model_input_emb.load_state_dict(checkpoint['input_emb'])\n model_enc.load_state_dict(checkpoint['enc'])\n model_output_emb.load_state_dict(checkpoint['output_emb'])\n model_dec.load_state_dict(checkpoint['dec'])\n\n # map device\n model_input_emb.to(device)\n model_enc.to(device)\n model_output_emb.to(device)\n model_dec.to(device)\n\n # switch to eval\n model_input_emb.eval()\n model_enc.eval()\n model_output_emb.eval()\n model_dec.eval()\n\n pred_derivations = []\n with torch.no_grad():\n for i, ex in enumerate(test_data):\n if i % 50 == 0:\n print(\"Done\", i)\n x, len_x = make_input_tensor(ex, args.reverse_input)\n x, len_x = x.to(device), len_x.to(device)\n\n enc_out_each_word, enc_context_mask, enc_final_states = \\\n encode_input_for_decoder(x, len_x, model_input_emb, model_enc)\n \n pred_derivations.append(beam_decoder(enc_out_each_word, enc_context_mask, enc_final_states,\n output_indexer, model_output_emb, model_dec, args.decoder_len_limit, args.beam_size))\n\n\n output_derivations(test_data, pred_derivations, args)\n\ndef beam_decoder(enc_out_each_word, enc_context_mask, enc_final_states, output_indexer,\n model_output_emb, model_dec, decoder_len_limit, beam_size):\n ders, scores = batched_beam_sampling(enc_out_each_word, enc_context_mask, enc_final_states, output_indexer,\n model_output_emb, model_dec, decoder_len_limit, beam_size)\n pred_tokens = [[output_indexer.get_object(t) for t in y] for y in ders]\n return pred_tokens\n\ndef output_derivations(test_data, pred_derivations, args):\n outfile = get_decode_file(args.dataset, args.split, args.model_id)\n with open(outfile, \"w\") as out:\n for i, pred_ders in enumerate(pred_derivations):\n out.write(\" \".join([\"\".join(x[1]) for x in enumerate(pred_ders)]) + \"\\n\")\n\nif __name__ == '__main__':\n args = _parse_args()\n print(args)\n # global device\n set_global_device(args.gpu)\n \n print(\"Pytroch using device \", config.device)\n random.seed(args.seed)\n np.random.seed(args.seed)\n # Load the training and test data\n test, input_indexer, output_indexer = load_test_dataset(args.dataset, args.split)\n test_data_indexed = index_data(test, input_indexer, output_indexer, args.decoder_len_limit)\n # test_data_indexed = tricky_filter_data(test_data_indexed)\n\n model_path = get_model_file(args.dataset, args.model_id)\n decode(model_path, test_data_indexed, input_indexer, output_indexer, args)","sub_path":"code/decode.py","file_name":"decode.py","file_ext":"py","file_size_in_byte":5752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"429021702","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('resources', '0003_auto_20161129_1950'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='resource',\n name='language',\n field=models.CharField(max_length=7, choices=[('en', 'English'), ('as', 'Assamese'), ('bn', 'Bengali'), ('gu', 'Gujarati'), ('hi', 'Hindi'), ('kn', 'Kannada'), ('ml', 'Malayalam'), ('mr', 'Marathi'), ('lus', 'Mizo'), ('or', 'Odia'), ('pa', 'Punjabi'), ('te', 'Telugu'), ('ta', 'Tamil'), ('ur', 'Urdu')]),\n ),\n ]\n","sub_path":"resources/migrations/0004_auto_20170124_1531.py","file_name":"0004_auto_20170124_1531.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"155199117","text":"# coding: utf-8\r\n# 设损失函数loss = (w + 1)^2 , 令 w 是常数 5。反向传播就是求最小\r\n# loss 对应的 w 值\r\n\r\nimport tensorflow as tf\r\n# 定义待优化参数 w 初值赋予5\r\nw = tf.Variable(tf.constant(5, dtype=tf.float32))\r\n# 定义损失函数 loss\r\nloss = tf.square(w+1)\r\n# 定义反向传播方法\r\n# 学习率为:0.2\r\ntrain_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)\r\n# 生成会话,训练40轮\r\nwith tf.Session() as sess:\r\n init_op = tf.global_variables_initializer()\r\n sess.run(init_op)\r\n for i in range(40):\r\n sess.run(train_step)\r\n W_val = sess.run(w)\r\n loss_val = sess.run(loss)\r\n print(\"After %s steps: w: is %f, loss: is %f.\" %(i, W_val, loss_val))","sub_path":"TensorFlow/tf08learn.py","file_name":"tf08learn.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"303352248","text":"import numpy as np\nimport plotly.graph_objects as go\nfrom pythermalcomfort import psychrometrics as psy\nfrom math import ceil, floor\nimport dash_bootstrap_components as dbc\nfrom dash import dcc\nfrom dash import html\nfrom my_project.global_scheme import (\n container_row_center_full,\n container_col_center_one_of_three,\n)\nfrom my_project.utils import generate_chart_name\n\nfrom my_project.global_scheme import (\n dropdown_names,\n sun_cloud_tab_dropdown_names,\n more_variables_dropdown,\n sun_cloud_tab_explore_dropdown_names,\n)\nfrom dash.dependencies import Input, Output, State\nimport pandas as pd\n\nfrom app import app\n\nfrom my_project.global_scheme import (\n template,\n mapping_dictionary,\n tight_margins,\n)\n\npsy_dropdown_names = {\n \"None\": \"None\",\n \"Frequency\": \"Frequency\",\n}\npsy_dropdown_names.update(dropdown_names.copy())\npsy_dropdown_names.update(sun_cloud_tab_dropdown_names.copy())\npsy_dropdown_names.update(more_variables_dropdown.copy())\npsy_dropdown_names.update(sun_cloud_tab_explore_dropdown_names.copy())\n\n\ndef inputs():\n \"\"\"\"\"\"\n return html.Div(\n className=\"container-row full-width three-inputs-container\",\n children=[\n html.Div(\n className=container_col_center_one_of_three,\n children=[\n html.Div(\n className=container_row_center_full,\n children=[\n html.H6(\n children=[\"Color By:\"],\n style={\"flex\": \"30%\"},\n ),\n dcc.Dropdown(\n id=\"psy-color-by-dropdown\",\n options=[\n {\"label\": i, \"value\": psy_dropdown_names[i]}\n for i in psy_dropdown_names\n ],\n value=\"Frequency\",\n style={\"flex\": \"70%\"},\n ),\n ],\n ),\n ],\n ),\n html.Div(\n className=container_col_center_one_of_three,\n children=[\n dbc.Button(\n \"Apply month and hour filter\",\n color=\"primary\",\n id=\"month-hour-filter\",\n className=\"mb-2\",\n n_clicks=0,\n ),\n html.Div(\n className=\"container-row full-width justify-center mt-2\",\n children=[\n html.H6(\"Month Range\", style={\"flex\": \"20%\"}),\n html.Div(\n dcc.RangeSlider(\n id=\"psy-month-slider\",\n min=1,\n max=12,\n step=1,\n value=[1, 12],\n marks={1: \"1\", 12: \"12\"},\n tooltip={\n \"always_visible\": False,\n \"placement\": \"top\",\n },\n allowCross=False,\n ),\n style={\"flex\": \"50%\"},\n ),\n dcc.Checklist(\n options=[\n {\"label\": \"Invert\", \"value\": \"invert\"},\n ],\n value=[],\n id=\"invert-month-psy\",\n labelStyle={\"flex\": \"30%\"},\n ),\n ],\n ),\n html.Div(\n className=\"container-row align-center justify-center\",\n children=[\n html.H6(\"Hour Range\", style={\"flex\": \"20%\"}),\n html.Div(\n dcc.RangeSlider(\n id=\"psy-hour-slider\",\n min=1,\n max=24,\n step=1,\n value=[1, 24],\n marks={1: \"1\", 24: \"24\"},\n tooltip={\n \"always_visible\": False,\n \"placement\": \"topLeft\",\n },\n allowCross=False,\n ),\n style={\"flex\": \"50%\"},\n ),\n dcc.Checklist(\n options=[\n {\"label\": \"Invert\", \"value\": \"invert\"},\n ],\n value=[],\n id=\"invert-hour-psy\",\n labelStyle={\"flex\": \"30%\"},\n ),\n ],\n ),\n ],\n ),\n html.Div(\n className=container_col_center_one_of_three,\n children=[\n dbc.Button(\n \"Apply filter\",\n color=\"primary\",\n id=\"data-filter\",\n className=\"mb-2\",\n n_clicks=0,\n ),\n html.Div(\n className=container_row_center_full,\n children=[\n html.H6(\n children=[\"Filter Variable:\"], style={\"flex\": \"30%\"}\n ),\n dcc.Dropdown(\n id=\"psy-var-dropdown\",\n options=[\n {\"label\": i, \"value\": dropdown_names[i]}\n for i in dropdown_names\n ],\n value=\"RH\",\n style={\"flex\": \"70%\"},\n ),\n ],\n ),\n html.Div(\n className=container_row_center_full,\n children=[\n html.H6(children=[\"Min Value:\"], style={\"flex\": \"30%\"}),\n dbc.Input(\n id=\"psy-min-val\",\n placeholder=\"Enter a number for the min val\",\n type=\"number\",\n step=1,\n value=0,\n style={\"flex\": \"70%\"},\n ),\n ],\n ),\n html.Div(\n className=container_row_center_full,\n children=[\n html.H6(children=[\"Max Value:\"], style={\"flex\": \"30%\"}),\n dbc.Input(\n id=\"psy-max-val\",\n placeholder=\"Enter a number for the max val\",\n type=\"number\",\n value=100,\n step=1,\n style={\"flex\": \"70%\"},\n ),\n ],\n ),\n ],\n ),\n ],\n )\n\n\ndef layout_psy_chart():\n return (\n dcc.Loading(\n type=\"circle\",\n children=html.Div(\n className=\"container-col\",\n children=[inputs(), html.Div(id=\"psych-chart\")],\n ),\n ),\n )\n\n\n# psychrometric chart\n@app.callback(\n Output(\"psych-chart\", \"children\"),\n [\n Input(\"psy-color-by-dropdown\", \"value\"),\n Input(\"month-hour-filter\", \"n_clicks\"),\n Input(\"data-filter\", \"n_clicks\"),\n Input(\"global-local-radio-input\", \"value\"),\n ],\n [\n State(\"df-store\", \"data\"),\n State(\"psy-month-slider\", \"value\"),\n State(\"psy-hour-slider\", \"value\"),\n State(\"psy-min-val\", \"value\"),\n State(\"psy-max-val\", \"value\"),\n State(\"psy-var-dropdown\", \"value\"),\n State(\"meta-store\", \"data\"),\n State(\"invert-month-psy\", \"value\"),\n State(\"invert-hour-psy\", \"value\"),\n ],\n)\n# @cache.memoize(timeout=TIMEOUT)\ndef update_psych_chart(\n colorby_var,\n time_filter,\n data_filter,\n global_local,\n df,\n month,\n hour,\n min_val,\n max_val,\n data_filter_var,\n meta,\n invert_month,\n invert_hour,\n):\n df = pd.read_json(df, orient=\"split\")\n start_month, end_month = month\n if invert_month == [\"invert\"] and (start_month != 1 or end_month != 12):\n month = month[::-1]\n start_hour, end_hour = hour\n if invert_hour == [\"invert\"] and (start_hour != 1 or end_hour != 24):\n hour = hour[::-1]\n time_filter_info = [time_filter, month, hour]\n data_filter_info = [data_filter, data_filter_var, min_val, max_val]\n\n time_filter = time_filter_info[0]\n start_month = time_filter_info[1][0]\n end_month = time_filter_info[1][1]\n start_hour = time_filter_info[2][0]\n end_hour = time_filter_info[2][1]\n\n data_filter = data_filter_info[0]\n data_filter_var = data_filter_info[1]\n min_val = data_filter_info[2]\n max_val = data_filter_info[3]\n\n if time_filter:\n if start_month <= end_month:\n mask = (df[\"month\"] < start_month) | (df[\"month\"] > end_month)\n df[mask] = None\n else:\n mask = (df[\"month\"] >= end_month) & (df[\"month\"] <= start_month)\n df[mask] = None\n\n if start_hour <= end_hour:\n mask = (df[\"hour\"] < start_hour) | (df[\"hour\"] > end_hour)\n df[mask] = None\n else:\n mask = (df[\"hour\"] >= end_hour) & (df[\"hour\"] <= start_hour)\n df[mask] = None\n\n if data_filter:\n if min_val <= max_val:\n mask = (df[data_filter_var] < min_val) | (df[data_filter_var] > max_val)\n df[mask] = None\n else:\n mask = (df[data_filter_var] >= max_val) & (df[data_filter_var] <= min_val)\n df[mask] = None\n\n if df.dropna().shape[0] == 0:\n return (\n dbc.Alert(\n \"No data is available in this location under these conditions. Please \"\n \"either change the month and hour filters, or select a wider range for \"\n \"the filter variable\",\n color=\"danger\",\n style={\"text-align\": \"center\", \"marginTop\": \"2rem\"},\n ),\n )\n\n var = colorby_var\n if var == \"None\":\n var_color = \"darkorange\"\n elif var == \"Frequency\":\n var_color = [\"rgba(255,255,255,0)\", \"rgb(0,150,255)\", \"rgb(0,0,150)\"]\n else:\n var_unit = mapping_dictionary[var][\"unit\"]\n\n var_name = mapping_dictionary[var][\"name\"]\n\n var_color = mapping_dictionary[var][\"color\"]\n\n if global_local == \"global\":\n # Set Global values for Max and minimum\n var_range_x = mapping_dictionary[\"DBT\"][\"range\"]\n hr_range = [0, 0.03]\n var_range_y = hr_range\n\n else:\n # Set maximum and minimum according to data\n data_max = 5 * ceil(df[\"DBT\"].max() / 5)\n data_min = 5 * floor(df[\"DBT\"].min() / 5)\n var_range_x = [data_min, data_max]\n\n data_max = (5 * ceil(df[\"hr\"].max() * 1000 / 5)) / 1000\n data_min = (5 * floor(df[\"hr\"].min() * 1000 / 5)) / 1000\n var_range_y = [data_min, data_max]\n\n title = \"Psychrometric Chart\"\n\n if colorby_var != \"None\" and colorby_var != \"Frequency\":\n title = title + \" colored by \" + var_name + \" (\" + var_unit + \")\"\n\n dbt_list = list(range(-60, 60, 1))\n rh_list = list(range(10, 110, 10))\n\n rh_df = pd.DataFrame()\n for i, rh in enumerate(rh_list):\n hr_list = np.vectorize(psy.psy_ta_rh)(dbt_list, rh)\n hr_df = pd.DataFrame.from_records(hr_list)\n name = \"rh\" + str(rh)\n rh_df[name] = hr_df[\"hr\"]\n\n fig = go.Figure()\n\n # Add traces\n for i, rh in enumerate(rh_list):\n name = \"rh\" + str(rh)\n fig.add_trace(\n go.Scatter(\n x=dbt_list,\n y=rh_df[name],\n showlegend=False,\n mode=\"lines\",\n name=\"\",\n hovertemplate=\"RH \" + str(rh) + \"%\",\n line=dict(width=1, color=\"lightgrey\"),\n )\n )\n if var == \"None\":\n fig.add_trace(\n go.Scatter(\n x=df[\"DBT\"],\n y=df[\"hr\"],\n showlegend=False,\n mode=\"markers\",\n marker=dict(\n size=6,\n color=var_color,\n showscale=False,\n opacity=0.2,\n ),\n hovertemplate=mapping_dictionary[\"DBT\"][\"name\"]\n + \": %{x:.2f}\"\n + mapping_dictionary[\"DBT\"][\"name\"],\n name=\"\",\n )\n )\n elif var == \"Frequency\":\n fig.add_trace(\n go.Histogram2d(\n x=df[\"DBT\"],\n y=df[\"hr\"],\n name=\"\",\n colorscale=var_color,\n hovertemplate=\"\",\n autobinx=False,\n xbins=dict(start=-50, end=60, size=1),\n )\n )\n # fig.add_trace(\n # go.Scatter(\n # x=dbt_list,\n # y=rh_df[\"rh100\"],\n # showlegend=False,\n # mode=\"none\",\n # name=\"\",\n # fill=\"toself\",\n # fillcolor=\"#fff\",\n # )\n # )\n\n else:\n fig.add_trace(\n go.Scatter(\n x=df[\"DBT\"],\n y=df[\"hr\"],\n showlegend=False,\n mode=\"markers\",\n marker=dict(\n size=5,\n color=df[var],\n showscale=True,\n opacity=0.3,\n colorscale=var_color,\n colorbar=dict(thickness=30, title=var_unit + \"
\"),\n ),\n customdata=np.stack((df[\"RH\"], df[\"h\"], df[var], df[\"t_dp\"]), axis=-1),\n hovertemplate=mapping_dictionary[\"DBT\"][\"name\"]\n + \": %{x:.2f}\"\n + mapping_dictionary[\"DBT\"][\"unit\"]\n + \"
\"\n + mapping_dictionary[\"RH\"][\"name\"]\n + \": %{customdata[0]:.2f}\"\n + mapping_dictionary[\"RH\"][\"unit\"]\n + \"
\"\n + mapping_dictionary[\"h\"][\"name\"]\n + \": %{customdata[1]:.2f}\"\n + mapping_dictionary[\"h\"][\"unit\"]\n + \"
\"\n + mapping_dictionary[\"t_dp\"][\"name\"]\n + \": %{customdata[3]:.2f}\"\n + mapping_dictionary[\"t_dp\"][\"unit\"]\n + \"
\"\n + \"
\"\n + var_name\n + \": %{customdata[2]:.2f}\"\n + var_unit,\n name=\"\",\n )\n )\n\n fig.update_layout(template=template, margin=tight_margins)\n fig.update_xaxes(\n title_text=\"Temperature, °C\",\n range=var_range_x,\n showline=True,\n linewidth=1,\n linecolor=\"black\",\n mirror=True,\n )\n fig.update_yaxes(\n title_text=\"Humidity Ratio, kg_water/kg_dry air\",\n range=var_range_y,\n showline=True,\n linewidth=1,\n linecolor=\"black\",\n mirror=True,\n )\n\n return dcc.Graph(config=generate_chart_name(\"psy\", meta), figure=fig)\n","sub_path":"my_project/tab_psy_chart/app_psy_chart.py","file_name":"app_psy_chart.py","file_ext":"py","file_size_in_byte":16130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"480372439","text":"import pandas as pd\nimport numpy as np\n\n\ndef prep_data(dataframe):\n\n #make gold a float\n dataframe['gold_team1'] = dataframe['gold_team1'].apply(lambda x: float(x.replace('k', '')))\n dataframe['gold_team2'] = dataframe['gold_team2'].apply(lambda x: float(x.replace('k', '')))\n\n #add additonal difference columns to the dataset: dragon, baron, towers, gold, kills\n dataframe['dragon_diff_1'] = dataframe['dragons_team1'] - dataframe['dragons_team2']\n dataframe['dragon_diff_2'] = dataframe['dragons_team2'] - dataframe['dragons_team1']\n\n dataframe['baron_diff_1'] = dataframe['barons_team1'] - dataframe['barons_team2']\n dataframe['baron_diff_2'] = dataframe['barons_team2'] - dataframe['barons_team1']\n\n dataframe['tower_diff_1'] = dataframe['towers_team1'] - dataframe['towers_team2']\n dataframe['tower_diff_2'] = dataframe['towers_team2'] - dataframe['towers_team1']\n\n dataframe['gold_diff_1'] = dataframe['gold_team1'] - dataframe['gold_team2']\n dataframe['gold_diff_2'] = dataframe['gold_team2'] - dataframe['gold_team1']\n\n dataframe['kills_diff_1'] = dataframe['kills_team1'] - dataframe['kills_team2']\n dataframe['kills_diff_2'] = dataframe['kills_team2'] - dataframe['kills_team1']\n\n return(dataframe)\n\ndef arange_teamwise(dataframe_games):\n \"\"\"\n input:\n dataframe_games: input in a form that shows stats of games, i.e. of two teams\n\n output:\n dataframe_team: outputs dataframe with all games for all teams stacked on top of each other\n \"\"\"\n dataframe_games.sort_values(['week', 'gamenumber'], ascending=[True, True], inplace=True)\n\n #remove opponent\n games1 = dataframe_games[['date', 'week', 'team1', 'gamenumber', 'gametime_in_s', 'outcome_team1', 'gold_team1', 'kills_team1',\t'towers_team1',\t'dragons_team1',\n 'barons_team1', 'gold_diff_1', 'dragon_diff_1', 'baron_diff_1', 'tower_diff_1', 'kills_diff_1']]\n games2 = dataframe_games[['date', 'week', 'team2', 'gamenumber', 'gametime_in_s', 'outcome_team2', 'gold_team2', 'kills_team2',\t'towers_team2',\t'dragons_team2',\n 'barons_team2', 'gold_diff_2', 'dragon_diff_2', 'baron_diff_2', 'tower_diff_2', 'kills_diff_2']]\n games1.columns = [['date', 'week', 'team', 'gamenumber', 'gametime_in_s', 'outcome_team', 'gold_team', 'kills_team',\t'towers_team',\t'dragons_team',\n 'barons_team', 'gold_diff', 'dragon_diff', 'baron_diff', 'tower_diff', 'kills_diff']]\n games2.columns = [['date', 'week', 'team', 'gamenumber', 'gametime_in_s', 'outcome_team', 'gold_team', 'kills_team',\t'towers_team',\t'dragons_team',\n 'barons_team', 'gold_diff', 'dragon_diff', 'baron_diff', 'tower_diff', 'kills_diff']]\n df_teams = pd.concat([games1, games2], ignore_index=True)\n\n return(df_teams)\n\ndef get_teams(df_teams):\n \"\"\"\n input:\n dataframe_team\n\n output:\n list of dataframe_teams\n \"\"\"\n\n list_of_df_teams = []\n teamnames = pd.unique(df_teams['team'].squeeze())\n print(teamnames)\n for name in teamnames:\n df_team = df_teams[df_teams['team'].values == name]\n list_of_df_teams.append(df_team)\n\n return(list_of_df_teams)\n\ndef prep_for_training(list_of_df_teams, min_steps=5, step_size=3, target_col='dragons_team', test_teams=2):\n \"\"\"\n input:\n list_of_df_teams:\n min_steps: number of timesteps that a series must have at least\n step_size: number of steps to start a new series\n target_col: target column; either a feature (i.e. dragons) or outcome_team\n test_teams: number of teams that are held off for testing purposes\n output:\n X_train: multi-dimensional time series of predictive values\n y_train: targets of given\n X_test:\n y_test:\n \"\"\"\n X_train = []\n y_train = []\n X_test = []\n y_test = []\n\n def serialize(df_team, min_steps=5, step_size=3, target_col='dragons_team'):\n \"\"\"\n takes a df_team and splits it up into series\n \"\"\"\n #drop all columns that have no predictive value:\n df_team = df_team.drop(['date', 'week', 'team', 'gamenumber'], axis=1)\n y = df_team[target_col]\n X = df_team.drop([target_col], axis=1)\n\n #split into series\n X_return = []\n y_return = []\n for i in np.arange(start=min_steps-1, stop=len(df_team) - 1, step=step_size):\n #add every series to their respective list\n X_return.append(X.head(n=i).as_matrix())\n y_return.append(y.as_matrix()[i+1])\n\n return(X_return, y_return)\n\n for i in range(len(list_of_df_teams)):\n #do the training data first\n if i < test_teams:\n X_ , y_ = serialize(list_of_df_teams[i])\n X_test.extend(X_)\n y_test.extend(y_)\n\n #do the training data second\n else:\n X_ , y_ = serialize(list_of_df_teams[i])\n X_train.extend(X_)\n y_train.extend(y_)\n\n X_train = np.array(X_train)\n y_train = np.array(y_train)\n X_test = np.array(X_test)\n y_test = np.array(y_test)\n\n return(X_train, y_train, X_test, y_test)\n\ndef save_data_frame(X_train, y_train, X_test, y_test, filename):\n dict = {\n 'X_train': X_train,\n 'y_train': y_train,\n 'X_test': X_test,\n 'y_test': y_test\n }\n np.save('data/' + filename, dict)\n\nif __name__ == '__main__':\n\n dataset = pd.read_csv('/home/marius/Desktop/BayesLoLPredictions/Websearch_gamesoflegends/NA_spring_2017.csv')\n dataset = prep_data(dataset)\n dataset = arange_teamwise(dataset)\n print(dataset.head())\n list_of_df_teams = get_teams(dataset)\n print(list_of_df_teams[0])\n X_train, y_train, X_test, y_test = prep_for_training(list_of_df_teams)\n print(\"shape of X_train: \", np.shape(X_train))\n print(\"shape of y_train: \", np.shape(y_train))\n print(\"X_test: \", np.shape(X_test))\n print(\"y_test: \", np.shape(y_test))\n\n save_data_frame(X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test, filename='NA_spring_2017')\n","sub_path":"LeagueOfLegends/2018-12-Predict_Features_LSTM/prep_data.py","file_name":"prep_data.py","file_ext":"py","file_size_in_byte":6052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"340527240","text":"#\n# Copyright (c) 2021 Incisive Technology Ltd\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom os import getcwd\nfrom pathlib import Path\nfrom threading import Thread\nimport time\nfrom kubernetes import config\nfrom hikaru import set_global_default_release, load_full_yaml\nfrom hikaru.model.rel_1_16 import Pod, Namespace, ObjectMeta\nfrom hikaru.watch import Watcher, MultiplexingWatcher, WatchEvent\n\nset_global_default_release('rel_1_16')\n\nconfig.load_kube_config(config_file=\"/etc/rancher/k3s/k3s.yaml\")\n\ncwd = getcwd()\nif cwd.endswith('/e2e'):\n # then we're running in the e2e directory itself\n base_path = Path('../test_yaml')\nelse:\n # assume we're running in the parent directory\n base_path = Path('test_yaml')\ndel cwd\n\n\ndef test01():\n \"\"\"\n test01: simple watcher test; timeout after load\n \"\"\"\n w = Watcher(Pod, timeout_seconds=1)\n count = 0\n for we in w.stream(manage_resource_version=True, quit_on_timeout=True):\n assert isinstance(we.obj, Pod), f'got a {we.obj.__name__}, not a Pod'\n count += 1\n assert count > 0, 'got no Pod events'\n\n\ndef make_namespace(name):\n def do_it(nsname):\n time.sleep(0.1)\n ns = Namespace(metadata=ObjectMeta(name=nsname))\n ns.create()\n time.sleep(0.1)\n ns.delete()\n\n t = Thread(target=do_it, args=(name,))\n t.start()\n\n\ndef make_pod(name, nsname):\n def do_it(podname, ns):\n time.sleep(0.1)\n path = base_path / \"core-pod.yaml\"\n pod: Pod = load_full_yaml(path=str(path))[0]\n pod.metadata.name = podname\n pod.metadata.namespace = ns\n pod.create()\n time.sleep(0.1)\n pod.delete()\n\n t = Thread(target=do_it, args=(name, nsname))\n t.start()\n\n\ndef drain(w: Watcher) -> int:\n highest_rv = 0\n for we in w.stream(manage_resource_version=True, quit_on_timeout=True):\n rv = int(we.obj.metadata.resourceVersion)\n if rv > highest_rv:\n highest_rv = rv\n return highest_rv\n\n\ndef test02():\n \"\"\"\n test02: watch for namespace events, create/delete a namespace\n \"\"\"\n w = Watcher(Namespace)\n ns_name = \"test02-watch\"\n highest_rv = drain(w)\n w = Watcher(Namespace, resource_version=highest_rv)\n make_namespace(ns_name)\n for we in w.stream(manage_resource_version=True, quit_on_timeout=True):\n assert isinstance(we.obj, Namespace)\n if we.obj.metadata.name == ns_name and we.etype == \"DELETED\":\n w.stop()\n\n\ndef test03():\n \"\"\"\n test03: check we get all the events we expect for a create/delete\n \"\"\"\n w = Watcher(Namespace)\n highest_rv = drain(w)\n w.update_resource_version(highest_rv)\n ns_name = 'test03-watcher'\n expected_types = {'ADDED', 'MODIFIED', 'DELETED'}\n make_namespace(ns_name)\n seen_types = set()\n for we in w.stream(manage_resource_version=True, quit_on_timeout=False):\n assert isinstance(we.obj, Namespace)\n if we.obj.metadata.name != ns_name:\n continue\n seen_types.add(we.etype)\n if we.etype == 'DELETED':\n w.stop()\n assert expected_types == seen_types\n\n\ndef dump(we: WatchEvent):\n print(f\"e:{we.etype} t:{we.obj.kind} n:{we.obj.metadata.name} ns:\"\n f\"{we.obj.metadata.namespace}\")\n\n\ndef test04():\n \"\"\"\n test04: check basic mux operation\n \"\"\"\n ns_name = 'test04-watch'\n podname = 'test04-pod'\n\n nsw = Watcher(Namespace)\n hns = drain(nsw)\n nsw.update_resource_version(hns)\n\n pw = Watcher(Pod, namespace=ns_name)\n hp = drain(pw)\n pw.update_resource_version(hp)\n\n mux = MultiplexingWatcher()\n mux.add_watcher(nsw)\n mux.add_watcher(pw)\n expected = {'ADDED', 'MODIFIED', 'DELETED'}\n pod_seen = set()\n ns_seen = set()\n make_namespace(ns_name)\n make_pod(podname, ns_name)\n stopped_mux = False\n for we in mux.stream(manage_resource_version=True, quit_on_timeout=False):\n if we.obj.kind == 'Pod' and we.obj.metadata.namespace == ns_name:\n pod_seen.add(we.etype)\n elif we.obj.kind == 'Namespace' and we.obj.metadata.name == ns_name:\n ns_seen.add(we.etype)\n if 'DELETED' in pod_seen and 'DELETED' in ns_seen:\n stopped_mux = True\n mux.stop()\n assert stopped_mux, \"the mux exited via timeout or loss of watchers\"\n assert expected == ns_seen, f'Not enough namespace events: {expected-ns_seen}'\n assert expected == pod_seen, f'Not enough pod events: {expected-pod_seen}'\n\n\ndef test05():\n \"\"\"\n test05: check adding a Watcher on the fly to the mux\n \"\"\"\n ns_name = 'test05-watch'\n podname = 'test05-pod'\n\n nsw = Watcher(Namespace)\n hns = drain(nsw)\n nsw.update_resource_version(hns)\n\n pw = Watcher(Pod, namespace=ns_name)\n hp = drain(pw)\n pw.update_resource_version(hp)\n\n mux = MultiplexingWatcher()\n mux.add_watcher(nsw)\n expected = {'ADDED', 'MODIFIED', 'DELETED'}\n pod_seen = set()\n ns_seen = set()\n make_namespace(ns_name)\n make_pod(podname, ns_name)\n stopped_mux = False\n first = True\n for we in mux.stream(manage_resource_version=True, quit_on_timeout=False):\n if first:\n first = False\n mux.add_watcher(pw)\n if we.obj.kind == 'Pod' and we.obj.metadata.namespace == ns_name:\n pod_seen.add(we.etype)\n elif we.obj.kind == 'Namespace' and we.obj.metadata.name == ns_name:\n ns_seen.add(we.etype)\n if 'DELETED' in pod_seen and 'DELETED' in ns_seen:\n stopped_mux = True\n mux.stop()\n assert stopped_mux, \"the mux exited via timeout or loss of watchers\"\n assert expected == ns_seen, f'Not enough namespace events: {expected-ns_seen}'\n assert expected == pod_seen, f'Not enough pod events: {expected-pod_seen}'\n\n\n\nif __name__ == \"__main__\":\n for k, v in dict(globals()).items():\n if callable(v) and k.startswith('test'):\n print(f'running {k}')\n try:\n v()\n except Exception as e:\n print(f'{k} failed with {e}')\n","sub_path":"tests/e2e/watch_rel_1_16.py","file_name":"watch_rel_1_16.py","file_ext":"py","file_size_in_byte":7038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"521071238","text":"# -*- coding: utf-8 -*-\nfrom PySide import QtGui, QtCore\n\n\nclass ConfigureCommand(QtGui.QDialog):\n def __init__(self, parent=None):\n super(ConfigureCommand, self).__init__(parent)\n self.resize(500, 100)\n self.result = None\n main_layout = QtGui.QGridLayout(self)\n name_label = QtGui.QLabel(\"Name\")\n self.name_le = QtGui.QLineEdit()\n command_label = QtGui.QLabel(\"Command\")\n self.command_le = QtGui.QLineEdit()\n btn_layout = QtGui.QHBoxLayout()\n self.ok_btn = QtGui.QPushButton(\"OK\")\n self.cancel_btn = QtGui.QPushButton(\"Cancel\")\n btn_layout.addStretch()\n btn_layout.addWidget(self.ok_btn)\n btn_layout.addWidget(self.cancel_btn)\n main_layout.addWidget(name_label, 0, 0, 1, 1)\n main_layout.addWidget(self.name_le, 0, 1, 1, 6)\n main_layout.addWidget(command_label, 1, 0, 1, 1)\n main_layout.addWidget(self.command_le, 1, 1, 1, 6)\n main_layout.addLayout(btn_layout, 2, 1, 1, 6)\n self.set_signals()\n\n def set_signals(self):\n self.cancel_btn.clicked.connect(self.do_close)\n self.ok_btn.clicked.connect(self.get_result)\n\n def do_close(self):\n self.close()\n self.deleteLater()\n\n def get_result(self):\n name = str(self.name_le.text())\n command = str(self.command_le.text())\n if not all((name, command)):\n return\n self.result = {name: command}\n self.close()\n\n\ndef main():\n import sys\n app = QtGui.QApplication(sys.argv)\n sc = ConfigureCommand()\n sc.show()\n app.exec_()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"miraScripts/pipeTools/port_operation/configure_command.py","file_name":"configure_command.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"73100004","text":"'''\nCopyright (c) 2015 SONATA-NFV [, ANY ADDITIONAL AFFILIATION]\nALL RIGHTS RESERVED.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nNeither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]\nnor the names of its contributors may be used to endorse or promote \nproducts derived from this software without specific prior written \npermission.\n\nThis work has been performed in the framework of the SONATA project,\nfunded by the European Commission under Grant number 671517 through \nthe Horizon 2020 and 5G-PPP programmes. The authors would like to \nacknowledge the contributions of their colleagues of the SONATA \npartner consortium (www.sonata-nfv.eu).\n'''\n\nfrom django.conf.urls import url, include\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom api import views\n\n# API endpoints\n\n\n\nurlpatterns = [\n\turl(r'^api/v1/users$', views.SntUsersList.as_view()),\n\turl(r'^api/v1/user/(?P[0-9]+)/$', views.SntUsersDetail.as_view()),\n\n\turl(r'^api/v1/services$', views.SntServicesList.as_view()),\n\turl(r'^api/v1/services/user/(?P[^/]+)/$', views.SntServicesPerUserList.as_view()),\n url(r'^api/v1/service/(?P[0-9]+)/$', views.SntServicesDetail.as_view()),\n\turl(r'^api/v1/service/new$', views.SntNewServiceConf.as_view()),\n #url(r'^api/v1/serviceconf$', views.SntServiceConfList.as_view()),\n\n\turl(r'^api/v1/functions$', views.SntFunctionsList.as_view()),\n\turl(r'^api/v1/function/(?P[0-9]+)/$', views.SntFunctionsDetail.as_view()),\n\turl(r'^api/v1/functions/service/(?P[^/]+)/$', views.SntFunctionsPerServiceList.as_view()),\n\n\turl(r'^api/v1/metrics$', views.SntMetricsList.as_view()),\n\turl(r'^api/v1/metric/(?P[0-9]+)/$', views.SntMetricsDetail.as_view()),\n\turl(r'^api/v1/metrics/function/(?P[^/]+)/$', views.SntMetricsPerFunctionList1.as_view()),\n\n\turl(r'^api/v1/alerts/rules$', views.SntRulesList.as_view()),\n\turl(r'^api/v1/alerts/rule/(?P[0-9]+)/$', views.SntRulesDetail.as_view()),\n\n\turl(r'^api/v1/notification/types$', views.SntNotifTypesList.as_view()),\n\turl(r'^api/v1/notification/type/(?P[0-9]+)/$', views.SntNotifTypesDetail.as_view()),\n\n\turl(r'^api/v1/prometheus/metrics/list$', views.SntPromMetricList.as_view()),\n\turl(r'^api/v1/prometheus/metrics/data$', views.SntPromMetricData.as_view()),\n\n\turl(r'^docs/', include('rest_framework_swagger.urls')),\n\t\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n\n\n\n\t\n #url(r'^api/test/$', views.TestList.as_view()),\n #url(r'^api/test/(?P[0-9]+)/$', views.TestDetail.as_view()),\n #url(r'^users/$', views.UserList.as_view()),\n\t#url(r'^users/(?P[0-9]+)/$', views.UserDetail.as_view()),\n\t","sub_path":"manager/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"647762470","text":"import scenes as scn\nfrom sys import exit\nfrom world import levels as lvl\nfrom world import parser as prs\n\n\nclass Engine:\n \"\"\"The Engine class runs the map levels in a continuous loop,\n while receiving and setting the next levels begin() function\"\"\"\n def __init__(self, level):\n self._level = level\n\n def parse_level(self, level):\n \"\"\"This function runs the parser with the correct\n variables\"\"\"\n prs.GameParser(level).cmdloop()\n\n def play(self):\n \"\"\" function for the game loop\"\"\"\n cur_level = self._level.begin_game()\n\n while True:\n self.parse_level(cur_level)\n next_lvl_name = cur_level.nextlvl\n if next_lvl_name is None:\n exit(1)\n cur_level = self._level.next_level(next_lvl_name)\n\n\nclass Map:\n \"\"\"The Map class stores the character, starting level, scenes\n and the dictionary of levels\"\"\"\n def __init__(self, character, level):\n self._plyr = character\n self._level = level\n msg = scn.Scenes()\n outside_door = lvl.Level(\n self._plyr,\n msg.scene02,\n False,\n [],\n 'north',\n 'south',\n 'east',\n 'west'\n )\n self._levels = {\n 'door': outside_door,\n 'start': outside_door,\n 'home': lvl.Level(\n self._plyr,\n msg.scene01,\n False,\n ['helmet', 'prism'],\n 'door'\n ),\n 'north': lvl.Level(\n self._plyr,\n msg.north,\n False,\n [],\n 'home'\n ),\n 'south': lvl.Level(\n self._plyr,\n msg.south,\n False,\n [],\n 'start'\n ),\n 'east': lvl.Level(\n self._plyr,\n msg.east,\n False,\n ['edelweiss'],\n 'up'\n ),\n 'west': lvl.Level(\n self._plyr,\n msg.west,\n False,\n [],\n 'home'\n ),\n 'cave': lvl.Level(\n self._plyr,\n msg.cave,\n True,\n [],\n 'exit cave'\n ),\n 'up': lvl.Level(\n self._plyr,\n msg.caveEntry,\n False,\n [],\n 'cave'\n ),\n 'exit cave': lvl.Level(\n self._plyr,\n msg.caveExit,\n False,\n [],\n 'north',\n 'south',\n 'west'\n )\n }\n\n def next_level(self, level_name):\n \"\"\"Retrieves the game key from the levels dictionary\"\"\"\n return self._levels.get(level_name, 'Level not found')\n\n def begin_game(self):\n \"\"\"Initations the game loop with the beginning level\"\"\"\n return self.next_level(self._level)\n\n def __str__(self):\n output = \"\\tself._plyr: {0}\\n\".format(self._plyr.name)\n output += \"\\tself._level: {0}\\n\".format(self._levelStart)\n return output\n","sub_path":"world/game_engine.py","file_name":"game_engine.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"3714382","text":"# -*- coding:utf-8 -*-\n\"\"\"\nCode modified on March 2021 by Maria Teresa Parreira @Instituto Superior Tecnico, Lisboa\n\n GLCM\n Copyright (c) 2016 Tetsuya Shinaji\n This software is released under the MIT License.\n http://opensource.org/licenses/mit-license.php\n\n Date: 2016/01/29\n\"\"\"\n\nimport numpy as np\nfrom skimage.feature import greycomatrix, greycoprops\nfrom skimage.measure import shannon_entropy\n\n\n\nclass GLCM:\n \"\"\"\n Gray-Level Co-occurrence Matrix\n \"\"\"\n\n def __init__(self, img, level_min=1, level_max=256, threshold=None):\n \"\"\"\n initialize\n :param img: normalized image\n :param theta: definition of neighbor\n :param level_min: min intensity of normalized image\n :param level_max: max intensity of normalized image\n :param threshold: threshold of the minimal value\n \"\"\"\n\n \n self.img = img\n self.n_level = (level_max - level_min) + 1\n self.level_min = level_min\n self.level_max = level_max\n #self.theta = theta\n self.glcm = greycomatrix(img, distances=[1], angles=[0, np.pi/4, np.pi/2, 3*np.pi/4], levels=256, normed = True)\n #The grey-level co-occurrence histogram. The value P[i,j,d,theta] is the number of times that grey-level j \n #occurs at a distance d and at an angle theta from grey-level i. If normed is False, the output is of type \n #uint32, otherwise it is float64. The dimensions are: levels x levels x number of distances x number of angles.\n \n #here, we have 4 matrices\n\n matrix = np.sum(self.glcm, axis = 3) / 4 #makes the matrix invariant as it sums all the elements for different angles\n self.matrix = np.ndarray((256,256,1,1))#keeps it 4-dimensional\n self.matrix[:,:,:,0] = matrix\n\n\n self.features = self._calc_features()\n\n def _calc_features(self):\n \"\"\"\n calculate feature values\n :return: feature values\n \"\"\"\n \n \n features = {}\n unif = []\n ent = []\n for i in np.arange(self.glcm.shape[3]):\n mat = self.glcm[:,:,0,i]\n feature_unif = (mat ** 2).sum()\n unif.append(feature_unif)\n feature_ent = shannon_entropy(mat)\n ent.append(feature_ent)\n \n matrix = self.matrix[:,:,0,0]\n features['Uniformity'] = list(unif) #uniformity for each matrix/angle\n features['Invariant Uniformity'] = (matrix ** 2).sum()\n \n features['GLCM Entropy'] = list(ent) #same, but for entropy\n features['GLCM Invariant Entropy'] = shannon_entropy(matrix)\n \n features['Correlation'] = greycoprops(self.glcm, 'correlation')[0]\n aux_corr = greycoprops(self.matrix, 'correlation')\n features['Invariant Correlation'] = float(aux_corr[0][0])\n \n features['Dissimilarity'] = greycoprops(self.glcm, 'dissimilarity')[0]\n aux_diss = greycoprops(self.matrix, 'dissimilarity')\n features['Invariant Dissimilarity'] = float(aux_diss[0][0])\n \n features['Contrast'] = greycoprops(self.glcm, 'contrast')[0]\n aux_cont = greycoprops(self.matrix, 'contrast')\n features['Invariant Contrast'] = float(aux_cont[0][0])\n \n features['Homogeneity'] = greycoprops(self.glcm, 'homogeneity')[0]\n aux_hom = greycoprops(self.matrix, 'homogeneity')\n features['Invariant Homogeneity'] = float(aux_hom[0][0])\n \n features['Energy'] = greycoprops(self.glcm, 'energy')[0]\n aux_eng = greycoprops(self.matrix, 'energy')\n features['Invariant Energy'] = float(aux_eng[0][0])\n \n return features\n\n \n \n def print_features(self, print_values = True):\n \"\"\"\n print features\n \"\"\"\n \n if print_values:\n print(\"----GLCM-----\")\n feature_labels = []\n feature_values = []\n for key in self.features.keys():\n if print_values:\n print(\"{}: {}\".format(key, self.features[key]))\n feature_labels.append(key)\n feature_values.append(self.features[key])\n \n return feature_labels, feature_values\n\n","sub_path":"GLCM.py","file_name":"GLCM.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"306324213","text":"import keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D\r\nfrom keras.layers.normalization import BatchNormalization\r\nimport numpy as np\r\nimport pickle\r\nnp.random.seed(1000)\r\n\r\n# Load in data from PA3.py pickle\r\n\r\npickle_in = open(\"X.pickle\",\"rb\")\r\nX = pickle.load(pickle_in)\r\n\r\npickle_in = open(\"y.pickle\",\"rb\")\r\ny = pickle.load(pickle_in)\r\n\r\nX = X/255.0\r\n\r\n# AlexNet Model Implementation\r\n\r\n#Instantiate an empty model\r\nmodel = Sequential()\r\n\r\n# 1st Convolutional Layer\r\nmodel.add(Conv2D(filters=96, input_shape=(360,240,1), kernel_size=(11,11), strides=(4,4), padding='valid'))\r\nmodel.add(Activation('relu'))\r\n# Max Pooling\r\nmodel.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))\r\n\r\n# 2nd Convolutional Layer\r\nmodel.add(Conv2D(filters=256, kernel_size=(11,11), strides=(1,1), padding='valid'))\r\nmodel.add(Activation('relu'))\r\n# Max Pooling\r\nmodel.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))\r\n\r\n# 3rd Convolutional Layer\r\nmodel.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid'))\r\nmodel.add(Activation('relu'))\r\n\r\n# 4th Convolutional Layer\r\nmodel.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid'))\r\nmodel.add(Activation('relu'))\r\n\r\n# 5th Convolutional Layer\r\nmodel.add(Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='valid'))\r\nmodel.add(Activation('relu'))\r\n# Max Pooling\r\nmodel.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))\r\n\r\n# Passing it to a Fully Connected layer\r\nmodel.add(Flatten())\r\n\r\n# 1st Fully Connected Layer\r\nmodel.add(Dense(4096, input_shape=(224*224*3,)))\r\nmodel.add(Activation('relu'))\r\n\r\n# Add Dropout to prevent overfitting\r\nmodel.add(Dropout(0.4))\r\n\r\n# 2nd Fully Connected Layer\r\nmodel.add(Dense(4096))\r\nmodel.add(Activation('relu'))\r\n# Add Dropout\r\nmodel.add(Dropout(0.4))\r\n\r\n# 3rd Fully Connected Layer\r\nmodel.add(Dense(1000))\r\nmodel.add(Activation('relu'))\r\n# Add Dropout\r\nmodel.add(Dropout(0.4))\r\n\r\n# Output Layer\r\nmodel.add(Dense(10))\r\nmodel.add(Activation('softmax'))\r\n\r\nmodel.summary()\r\n\r\n# Compile the model\r\nmodel.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=[\"accuracy\"])\r\n\r\nmodel.fit(X, y, batch_size=32, epochs=20, validation_split=0.20)","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"177389657","text":"import os\nimport sys\nimport django\nimport requests\nimport csv\nsys.path.append(r'C:\\Users\\kevin\\OneDrive\\Desktop\\Data Science\\DataDisca\\module3')\nos.environ['DJANGO_SETTINGS_MODULE'] = 'django_api.settings'\ndjango.setup()\nfrom locations.models import Location\n\n\n# function to get geo-codes for locations using Google API\ndef get_geo_coordinates_from_google(location_address: str, connection_params: dict):\n \"\"\"\n Method to geo-code human readable address into latitudes and longitude codes using Google API.\n\n :param location_address: Physical address to geocode\n :param connection_params: Google API credentials for authorisation\n :return : Geocode information for the location\n \"\"\"\n\n base_url = 'https://maps.googleapis.com/maps/api/geocode'\n endpoint = '{}/{}?address={}&key={}'.format(\n base_url,\n connection_params['output_format'],\n location_address,\n connection_params['api_key']\n )\n\n # make the GET request\n results = requests.get(endpoint).json()\n # print(address, results)\n\n # check if codes were successfully obtained or not\n if results['status'] == 'ZERO_RESULTS':\n return None\n\n location = results['results'][0]['geometry']['location']\n return {\n 'longitude': location['lng'],\n 'latitude': location['lat']\n }\n\n\ndef get_locations(path: str) -> list:\n \"\"\"\n Method to retrieve unique locations from 'product_a' csv file.\n\n :param path: Path to 'product_a' csv file\n :return list: Unique locations as a list\n \"\"\"\n unique_locations = set()\n\n with open(path) as file:\n csv_reader = csv.reader(file, delimiter=',')\n for row in csv_reader:\n unique_locations.add(row[13])\n\n return list(unique_locations)\n\n\nif __name__ == '__main__':\n # NOTE: I am using models ONLY to save data longitude and latitude data in the SQLite table\n\n # get unique locations to save to database\n path_to_csv = r'C:\\Users\\kevin\\OneDrive\\Desktop\\Data Science\\DataDisca\\module3\\locations\\data\\product_a.csv'\n locations = get_locations(path_to_csv)\n\n # api key credentials\n GOOGLE_API_KEY = 'Your key'\n connection_params_ = {\n 'output_format': 'json',\n 'api_key': GOOGLE_API_KEY\n }\n\n # saving locations data in SQLite database\n for loc in locations:\n lat_lng = get_geo_coordinates_from_google('{},+US'.format(loc), connection_params_)\n\n if lat_lng is not None:\n latitude = lat_lng['latitude']\n longitude = lat_lng['longitude']\n\n location_instance = Location(lat=latitude, lon=longitude, loc=loc)\n location_instance.save()\n","sub_path":"locations/save_locations.py","file_name":"save_locations.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"561793068","text":"import pandas as pd\nimport numpy as np\nimport re\nimport glob\nimport math\nfrom math import log\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import AutoMinorLocator\nfrom matplotlib.offsetbox import AnchoredText\nfrom math import log\nfrom datetime import time\nfrom datetime import datetime\nfrom scipy import signal\nfrom scipy.interpolate import interp1d\n\n# 3rd code of MLS analysis\n# write the Uccle data to a df that matches with MLS\n\nproblem = open(\"ProblematicFiles.txt\", \"a\")\n\n# file = open(\"/home/poyraden/Analysis/AURA_MLS/MatchedDates.txt\", \"r\")\nfile = open(\"/home/poyraden/Analysis/AURA_MLS/Codes/MLSUccle_MatchedDates.txt\", \"r\")\n\n\nall_lines = file.readlines()\nmatcheddates = []\nfor il in all_lines:\n tmp = il.split(\"\\n\")[0]\n matcheddates.append(tmp)\nprint(len(matcheddates))\n\n#fileu = open(\"/home/poyraden/Analysis/AURA_MLS/UccleData_2004_2018.txt\", \"r\")\nfileu = open(\"/home/poyraden/Analysis/AURA_MLS/UccleData_2004_2018.txt\", \"r\")\n\ntest_lines = fileu.readlines()\n\nuccledata_list = ['']*len(test_lines)\n\nfor il in range(len(test_lines)):\n test_lines[il] = test_lines[il].split(\"\\n\")[0]\n tmp = test_lines[il].split(\".\")[0].split(\"uc\")[1]\n uccledata_list[il] = str('20'+tmp)\n\ncommon_dates = set(uccledata_list) & set(matcheddates)\nprint('common dates', len(common_dates))\ncommon_dates = list(common_dates)\n\nfile_toread = [] \n\nfor ib in test_lines:\n ibfull = '20'+ib.split(\".\")[0].split(\"uc\")[1]\n # print('ibfull', ibfull)\n for d in common_dates:\n # print('d', d)\n if(ibfull.find(d) != -1):\n # print('ib next', ib)\n #file_toread.append('/home/poyraden/Analysis/AURA_MLS/UccleData/' + ib)\n file_toread.append('/home/poyraden/Analysis/AURA_MLS/UccleData/' + ib)\n\n\ncolumnString = \"Time Altitude Pair Tair Humidity TPump PO3 WindDir WindSp AccumO3\"\ncolumnStr = columnString.split(\" \")\n\n#mls data frame to read\n# dfm = pd.read_csv(\"/home/poyraden/Analysis/AURA_MLS/AURA_MLSData_MatchedUccle.csv\")\ndfm = pd.read_hdf(\"/home/poyraden/Analysis/AURA_MLS/New/AURA_MLSData_MatchedUccle.h5\")\n\n\nlist_data = []\nlistall_data = []\n\nfile_test = file_toread[300:800]\n\n\nfor filename in file_toread:\n#for filename in file_test:\n print('filename', filename)\n file = open(filename,'r')\n file.readline()\n header_tmp = file.readline().split()\n header_date = header_tmp[1]\n header_date = datetime.strptime(header_date, '%Y%m%d')\n header_date = header_date.strftime('%Y%m%d')\n\n header_time = header_tmp[2]\n header_time = datetime.strptime(header_time, '%H:%M')\n header_time = header_time.strftime('%H:%M:%S')\n\n print(header_date, header_time)\n file.readline()\n header_flowarate = file.readline().split()[1]\n header_bkg = file.readline().split()[1]\n header_PAvg = file.readline().split()[1]\n header_PCorr = file.readline().split()[1]\n header_TCorr = file.readline().split()[1]\n header_HumCorr = file.readline().split()[1]\n header_TotO3 = file.readline().split()[1]\n header_TotO3Corr = file.readline().split()[1]\n header_PumpCorr = file.readline().split()[1]\n header_O3burst = file.readline().split()[1]\n \n df = pd.read_csv(filename, sep = \"\\s *\", engine=\"python\", skiprows=13, names=columnStr)\n\n df = df.join(pd.DataFrame(\n [[header_date, header_time, header_flowarate, header_bkg, header_PAvg,header_PCorr, header_TCorr,\n header_HumCorr, header_TotO3, header_TotO3Corr, header_PumpCorr,header_O3burst ]],\n index=df.index, \n columns=['Header_Date','Header_Time','Header_FlowRate','Header_Bkg','Header_PAvg','Header_PCorr','Header_TCorr',\n 'Header_HumCorr','Header_TotO3','Header_TotO3Corr','Header_PumpCorr','Header_O3Burst']\n ))\n\n print('len df', len(df))\n # df for all uccle data\n list_data.append(df) \n\n # now downsample the uccle data \n # now downsample the uccle data \n # dfn = df[df.Altitude > 0 ]\n # dfn['Descent'] = dfn.Altitude < dfn.Altitude.shift(2)\n # descent_list = dfn.index[dfn['Descent'] == True].tolist()\n # # ascent df\n # dfa = dfn.drop(descent_list)\n\n### another method for this 08/06/2020\n dfn = df[df.Altitude > 0]\n maxh = dfn.Altitude.max()\n index = dfn[dfn[\"Altitude\"] == maxh].index[0]\n\n descent_list = dfn[dfn.index > index].index.tolist()\n dfa = dfn.drop(descent_list)\n\n\n## for the frzoen solutions\n dfa = dfa.drop(dfa[ (dfa.PO3 <= 2) & (dfa.Pair <= 10) ].index)\n\n \n # skimming for the mls data\n dfas = dfa[(dfa.Pair >=3) & (dfa.Pair <= 400 )]\n #dfas = dfa[(dfa.Pair >= 10) & (dfa.Pair < 422 )]\n\n ymls = [1000.000, 825.404,681.292,562.341,464.159,383.119,316.228,261.016,215.443,177.828,146.780,121.153,100.000,\n 82.5404,68.1292,56.2341,46.4159,38.3119,31.6228,26.1016,21.5443,17.7828,14.6780,12.1153,10.0000,8.25404,\n 6.81292, 5.62341,4.64159,3.83119,3.16228,2.61016,2.15443,1.77828,1.46780,1.21153, 1.00000, 0.681292,\n 0.464159,0.316228,0.215443,0.146780,0.100000,0.0464159,0.0215443,0.0100000, 4.64159e-03 ,2.15443e-03,\n 1.00000e-03 ,4.64159e-04 ,2.15443e-04 ,1.00000e-04 ,4.64159e-05 ,2.15443e-05 ,1.00000e-05]\n\n # string to get pressure values of the mls\n st = ['']*55\n for p in range(55):\n st[p] = 'Pressure_' + str(p+1)\n \n\n # previous\n yref = [383.119 ,316.228, 261.016, 215.443, 177.828, 146.78, 121.153, 100.0, 82.5404, 68.1292, 56.2341, 46.4159,\n 38.3119, 31.6228, 26.1016, 21.5443, 17.7828, 14.678, 12.1153,10.0000,8.25404,6.81292,5.62341,4.64159]\n uybin = []\n\n for y in range(len(yref)-1):\n tmp = (log(yref[y]) + log(yref[y+1]))/2\n uybin.append(math.exp(tmp))\n #print(uybin)\n #means will be calculated between uybin[i] and uybin[i+1]\n\n size = len(uybin) -1\n xmean = [0]*size; xmedian = [0]*size\n \n\n for xb in range(size):\n tmp = dfas[(dfas.Pair < uybin[xb]) & ( dfas.Pair >= uybin[xb+1])]['PO3'].tolist()\n \n if(len(tmp) < 10):\n continue\n \n if( (np.mean(tmp) > 0)):\n #print(xb, 'posifitf tmp',tmp)\n\n xmean[xb] = np.mean(tmp)\n xmedian[xb] = np.median(tmp)\n \n if(xmean[xb] == 0): print('one here ?')\n\n if( (np.mean(tmp) < 0) ):\n tmp = np.array(tmp)\n ind = np.where(tmp == -9999.0)[0]\n new = np.delete(tmp, ind)\n\n if (int(len(tmp)/len(ind)) < 3 ):\n continue\n if(len(new) < 10 ):\n continue\n\n xmean[xb] = np.mean(new)\n xmedian[xb] = np.median(new)\n if(xmean[xb] == 0): print('two here ?')\n\n if( (np.mean(tmp) == 0) ):\n xmean[xb] = np.nan\n\n\n for aa in range(size):\n if(xmean[aa] ==0):\n #print(dfas.iloc[0]['Header_Date'],uybin[aa], aa, xmean[aa])\n xmean[aa] = np.nan\n \n #print('Last', dfas.iloc[0]['Header_Date'], 'xmean', xmean) \n \n #Method 2: scipy interpolation\n #xuccle = dfas[(dfas.Pair <= 349)]['PO3'].tolist()\n #yuccle = dfas[(dfas.Pair <= 349)]['Pair'].tolist()\n ## test debug\n xuccle = dfas['PO3'].tolist()\n yuccle = dfas['Pair'].tolist()\n\n # xuccle = dfas[(dfas.Pair <= 260)]['PO3'].tolist()\n # yuccle = dfas[(dfas.Pair <= 260)]['Pair'].tolist()\n\n xuccle = np.array(xuccle)\n yuccle = np.array(yuccle)\n if( (len(xuccle) < 15) | (len(xuccle) ==0)):\n # print('Problem here ? ', header_date)/\n problem.write(header_date + '\\n')\n continue\n \n indu = np.where(xuccle < 0)[0]\n \n xuccle = np.delete(xuccle, indu)\n yuccle = np.delete(yuccle, indu)\n if( (len(xuccle) < 10) | (len(xuccle) ==0)):\n continue\n\n #print('yuccle', yuccle)\n #print('xuccle', xuccle)\n\n if(header_date == '20150731'):continue\n if(header_date == '20170201'):continue\n if(header_date == '20171006'):continue\n\n \n #ymain = [316.228, 261.016, 215.443, 177.828, 146.78, 121.153, 100.0, 82.5404, 68.1292, 56.2341, 46.4159, 38.3119, 31.6228, 26.1016, 21.5443, 17.7828, 14.678]\n #ymain = [215.443, 177.828, 146.78, 121.153, 100.0, 82.5404, 68.1292, 56.2341, 46.4159, 38.3119, 31.6228, 26.1016, 21.5443, 17.7828, 14.678]\n ymain = [316.228, 261.016, 215.443, 177.828, 146.78, 121.153, 100.0, 82.5404, 68.1292, 56.2341, 46.4159, 38.3119,\n 31.6228, 26.1016, 21.5443, 17.7828, 14.678,12.1153,10.0000,8.25404,6.81292,5.62341]\n\n if(max(yuccle) < max(ymain)):continue\n # if(min(yuccle) > min(ymain)):continue\n\n #ymain = np.array(ymain)\n\n #indym = np.where(min(yuccle) > ymain)[0]\n #ymain[indym] = 0\n # if(len(xmeanvec) != len(ymain)): print(header_date)\n\n #print(len(xuccle), len(yuccle))\n # 5 different linear interpolations\n fl = interp1d(yuccle, xuccle)\n #fc = interp1d(yuccle, xuccle, kind='cubic')\n fn = interp1d(yuccle, xuccle, kind='nearest')\n fp = interp1d(yuccle, xuccle, kind='previous')\n fne = interp1d(yuccle, xuccle, kind='next')\n\n \n # xinter_linear = fl(ymain)\n # #xinter_cubic = fc(ymain)\n # xinter_nearest = fn(ymain)\n # xinter_previous = fp(ymain)\n # xinter_next = fne(ymain)\n\n ## try except part\n xinter_linear = [0]* len(ymain); xinter_nearest = [0]* len(ymain)\n xinter_previous = [0]* len(ymain)\n xinter_next = [0]* len(ymain)\n\n for ix in range(len(ymain)):\n try:\n xinter_linear[ix] = fl(ymain[ix])\n xinter_nearest[ix] = fn(ymain[ix])\n xinter_previous[ix] = fp(ymain[ix])\n xinter_next[ix] = fne(ymain[ix])\n except ValueError:\n xinter_linear[ix] = np.nan\n xinter_nearest[ix] = np.nan\n xinter_previous[ix] = np.nan\n xinter_next[ix] = np.nan\n #print(header_date, ix, xinter_linear[ix])\n\n\n for ir in range(len(xinter_linear)):\n if(xinter_linear[ir] <= 0): xinter_linear[ir] = np.nan\n if(xinter_nearest[ir] <= 0): xinter_nearest[ir] = np.nan\n if(xinter_previous[ir] <= 0): xinter_previous[ir] = np.nan\n if(xinter_next[ir] <= 0): xinter_next[ir] = np.nan\n \n ddate = [header_date] * len(ymain)\n #im = 23\n im = 28\n ##ib = 8\n ib = 6\n \n dl = dfm.index[dfm.Date == int(header_date)].tolist()\n #mlsdate = \n mlspo3 = list(dfm.loc[dl[0],st[ib:im]])\n tim = dfm.loc[dl[0],'Time']\n mlstime = [tim] * len(ymain)\n #print(header_date,'and', mlstime)\n dis = dfm.loc[dl[0],'Dis']\n mlsdis = [dis]*len(ymain)\n if(len(dl) == 2 ):\n mlspo3_two = list(dfm.loc[dl[1],st[ib:im]])\n tim2 = dfm.loc[dl[1],'Time']\n mlstime_two = [tim2]* len(ymain)\n dis2 = dfm.loc[dl[1],'Dis'] \n mlsdis_two = [dis2] * len(ymain)\n \n for il in range(len(mlspo3)):\n if(xmean[il] == 0 ): print('WHY')\n if(mlspo3[il] < 0):\n mlspo3[il]= np.nan\n xmean[il] = np.nan\n xmedian[il] = np.nan\n xinter_linear[il] = np.nan\n xinter_nearest[il] = np.nan\n xinter_previous[il] = np.nan\n xinter_next[il] = np.nan\n if(len(dl) == 2):\n if((mlspo3_two[il] < 0)):\n mlspo3_two[il]= np.nan\n xmean[il] = np.nan\n xmedian[il] = np.nan\n xinter_linear[il] = np.nan\n xinter_nearest[il] = np.nan\n xinter_previous[il] = np.nan\n xinter_next[il] = np.nan\n\n dfl = pd.DataFrame(columns=['Date', 'Time', 'Dis', 'PreLevel','PO3_MLS', 'PO3_UcMean', 'PO3_UcMedian', 'PO3_UcIntLin', 'PO3_UcIntNearest', 'PO3_UcIntPre','PO3_UcIntNe'])\n\n \n if(len(dl) == 1 ):\n dfl['Date'] = ddate\n dfl['Time'] = np.asarray(mlstime)\n dfl['Dis'] = np.asarray(mlsdis)\n dfl['PreLevel'] = np.asarray(ymain)\n dfl['PO3_MLS'] = mlspo3\n dfl['PO3_UcMean'] = xmean\n dfl['PO3_UcMedian'] = xmedian\n dfl['PO3_UcIntLin'] = xinter_linear\n dfl['PO3_UcIntNearest'] = xinter_nearest\n dfl['PO3_UcIntPre'] = xinter_previous\n dfl['PO3_UcIntNe'] = xinter_next\n \n if(len(dl) == 2 ):\n dfl['Date'] = np.concatenate((ddate,ddate))\n dfl['Time'] = np.concatenate((mlstime, mlstime_two))\n dfl['Dis'] = np.concatenate((mlsdis, mlsdis_two))\n dfl['PreLevel'] = np.concatenate((ymain,ymain))\n dfl['PO3_MLS'] = np.concatenate((mlspo3,mlspo3_two))\n #dfl['PO3_UcMean'] = np.concatenate((xmeanvec,xmeanvec))\n #dfl['PO3_UcMedian'] = np.concatenate((xmedianvec,xmedianvec))\n dfl['PO3_UcMean'] = np.concatenate((xmean,xmean))\n dfl['PO3_UcMedian'] = np.concatenate((xmedian,xmedian))\n dfl['PO3_UcIntLin'] = np.concatenate((xinter_linear,xinter_linear))\n dfl['PO3_UcIntNearest'] = np.concatenate((xinter_nearest,xinter_nearest))\n dfl['PO3_UcIntPre'] = np.concatenate((xinter_previous,xinter_previous))\n dfl['PO3_UcIntNe'] = np.concatenate((xinter_next, xinter_next))\n\n\n \n listall_data.append(dfl) \n\n# Merging all the data files to df\n\ndf = pd.concat(list_data,ignore_index=True)\ndfall = pd.concat(listall_data,ignore_index=True)\n\n\n\n# df.to_csv(\"/home/poyraden/Analysis/AURA_MLS/Ucclematched_2004_2018_db_DC.csv\")\n# dfall.to_csv(\"/home/poyraden/Analysis/AURA_MLS/MLS_UccleInterpolated_2004-2018_final_DC.csv\")\ndf.to_csv(\"/home/poyraden/Analysis/AURA_MLS/Ucclematched_2004_2018_presto.csv\")\ndfall.to_csv(\"/home/poyraden/Analysis/AURA_MLS/MLS_UccleInterpolated_2004-2018_presto.csv\")\nprint('write dif')\n\ndfcp = dfall.copy() \n\ndfcp['Dif_UcMean'] = np.asarray(dfall.PO3_UcMean) - np.asarray(dfall.PO3_MLS) \ndfcp['Dif_UcMedian'] = np.asarray(dfall.PO3_UcMedian) - np.asarray(dfall.PO3_MLS) \ndfcp['Dif_UcIntLin'] = np.asarray(dfall.PO3_UcIntLin) - np.asarray(dfall.PO3_MLS)\n\n\ndfcp['RDif_UcMean'] = 100 * (np.asarray(dfall.PO3_UcMean) - np.asarray(dfall.PO3_MLS)) / np.asarray(dfall.PO3_MLS)\ndfcp['RDif_UcMedian'] = 100 * (np.asarray(dfall.PO3_UcMedian) - np.asarray(dfall.PO3_MLS)) / np.asarray(dfall.PO3_MLS)\ndfcp['RDif_UcIntLin'] = 100 * (np.asarray(dfall.PO3_UcIntLin) - np.asarray(dfall.PO3_MLS)) / np.asarray(dfall.PO3_MLS)\n\ndfcp['Dif_UcMean2'] = np.asarray(dfall.PO3_MLS) - np.asarray(dfall.PO3_UcMean) \ndfcp['Dif_UcMedian2'] = np.asarray(dfall.PO3_MLS) - np.asarray(dfall.PO3_UcMedian) \ndfcp['Dif_UcIntLin2'] = np.asarray(dfall.PO3_MLS) - np.asarray(dfall.PO3_UcIntLin) \n\ndfcp['RDif_UcMean2'] = 100 * (np.asarray(dfall.PO3_MLS) - np.asarray(dfall.PO3_UcMean)) / np.asarray(dfall.PO3_UcMean)\ndfcp['RDif_UcMedian2'] = 100 * (np.asarray(dfall.PO3_MLS) - np.asarray(dfall.PO3_UcMedian)) / np.asarray(dfall.PO3_UcMedian)\ndfcp['RDif_UcIntLin2'] = 100 * (np.asarray(dfall.PO3_MLS) - np.asarray(dfall.PO3_UcIntLin)) / np.asarray(dfall.PO3_UcIntLin)\n\n# dfcp.to_csv(\"/home/poyraden/Analysis/AURA_MLS/MLS_UccleInterpolated_2004-2018_Dif_final_DC.csv\")\ndfcp.to_csv(\"/home/poyraden/Analysis/AURA_MLS/MLS_UccleInterpolated_2004-2018_Dif_prestov2.csv\")\n\n","sub_path":"ReadUccleData_backup.py","file_name":"ReadUccleData_backup.py","file_ext":"py","file_size_in_byte":14908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"324712655","text":"#!/usr/bin/python\n\nimport sys\nimport xmltodict\nimport urllib2\n\ndef getWunderWeather(station):\n # gets wunderground weather and prints it\n # used for conky and i3\n url = \"http://api.wunderground.com/weatherstation/WXCurrentObXML.asp?ID=%s\"%(station)\n\n file = urllib2.urlopen(url)\n data = file.read()\n file.close()\n\n data = xmltodict.parse(data)\n\n curTemp = data['current_observation']['temp_f']\n curHumi = data['current_observation']['relative_humidity']\n print(\"%s %s\") % (curTemp+'F', curHumi+'%')\n\ngetWunderWeather(sys.argv[1])\n","sub_path":"wunderground.py","file_name":"wunderground.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"629141584","text":"'''\nCreated on May 30, 2013\n\n@author: gczajkow\n'''\n\nfrom Filter import Filter\nfrom LoanEnum import LOAN_ENUM_grade\n\nclass CreditGrade(Filter):\n '''\n '''\n\n def __init__(self, args, current=None):\n '''\n Construct a set similar to this based on the passed in grades\n 'A',\n 'AB',\n 'ABC',\n 'ABCD',\n 'ABCDE',\n 'ABCDEF',\n 'ABCDEFG',\n 'B',\n 'BC',\n 'BCD',\n 'BCDE',\n 'BCDEF',\n 'BCDEFG',\n 'C',\n 'CD',\n 'CDE',\n 'CDEF',\n 'CDEFG',\n 'D',\n 'DE',\n 'DEF',\n 'DEFG',\n 'E',\n 'EF',\n 'EFG',\n 'F',\n 'FG',\n 'G'\n '''\n\n options = []\n grades_bitmap = {'A': 1 << 0,\n 'B': 1 << 1,\n 'C': 1 << 2,\n 'D': 1 << 3,\n 'E': 1 << 4,\n 'F': 1 << 5,\n 'G': 1 << 6,\n }\n\n self.conversion_table = grades_bitmap.copy()\n self.reverse_table = {v:k for k, v in grades_bitmap.items()}\n num_grades = len(args.grades)\n for i in range(1, num_grades + 1):\n for j in range(num_grades):\n if (j + i) <= num_grades:\n grades = args.grades[j:j + i]\n grades_bit_value = 0\n for grade in grades:\n grades_bit_value += grades_bitmap[grade]\n options.append(grades_bit_value)\n self.conversion_table[grades] = grades_bit_value\n self.reverse_table[grades_bit_value] = grades\n\n Filter.__init__(self, args, options, current)\n\n def convert(self, str_grades):\n # Convert a string of grades to a bit value\n # A = 1 << 0\n # B = 1 << 1\n # C = 1 << 2\n # ...\n return self.conversion_table[str(str_grades)]\n\n def __str__(self):\n return self.reverse_table[self.getCurrent()]\n\n def apply(self, loan):\n current = self.getCurrent()\n return loan[LOAN_ENUM_grade] & current > 0\n","sub_path":"CreditGrade.py","file_name":"CreditGrade.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"472328086","text":"from turtle import *\ncolor('red', 'yellow')\nbegin_fill()\ni = 10\nt = 5\nx = -400\ny = -200\npenup()\nsetx(x)\nsety(y)\nwhile True:\n\tpendown()\n\tcircle(i)\n\tpenup()\n\tforward(i*2+3)\n\ti += 1\n\tt += 5\n\tif t%50 == 0:\n\t\tpenup()\n\t\tsetx(x)\n\t\ty += 50\n\t\tsety(y)\n\t\ti = 10\n #if abs(pos()) < 1:\n # break\nend_fill()\ndone()","sub_path":"009 turtle/trtl.py","file_name":"trtl.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"620478574","text":"import requests\nimport json\n\nRPC_SERVER_ENDPOINT = 'http://localhost:3000/jsonrpc'\nAPI_SERVER_ENDPOINT = 'http://40.120.61.169:8080/'\n\ndata = {\n \"jsonrpc\": \"2.0\",\n \"method\": None,\n \"id\": 1,\n \"params\": []\n}\n\n\ndef rpcCall(method, parameters=None):\n\n data[\"method\"] = method\n if parameters == None:\n data[\"params\"] = []\n else:\n data[\"params\"] = [parameters]\n\n print(data)\n\n response = requests.post(\n RPC_SERVER_ENDPOINT, data=json.dumps(data))\n return response.json()\n\ndef apiCall(verb, data, pickHeader=None):\n\n response = requests.get(\n API_SERVER_ENDPOINT + verb, data=data)\n \n responseJSON = response.json()\n if pickHeader != None:\n responseJSON[pickHeader] = response.headers.get(pickHeader)\n\n return responseJSON","sub_path":"utilities/communicationToRPC.py","file_name":"communicationToRPC.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"21765915","text":"import queue\n\nH, W = map(int, input().split())\nF = []\nfor _ in range(H):\n F.append(list(input()))\n\ndxy_list = [(-1, 0), (0, 1), (1, 0), (0, -1)]\nq = queue.Queue()\nINF = 1000000\ncost = [[INF] * W for _ in range(H)]\n\nfor i in range(H):\n for j in range(W):\n if F[i][j] == '#':\n q.put((i, j))\n cost[i][j] = 0\n\nwhile not q.empty():\n \n x, y = q.get()\n for dxy in dxy_list:\n nx = x + dxy[0]\n ny = y + dxy[1]\n\n if 0 <= nx < H and 0 <= ny < W and F[nx][ny] == '.':\n F[nx][ny] = '#'\n cost[nx][ny] = cost[x][y] + 1\n q.put((nx, ny))\n\n\nmax_c = 0\nfor i in range(H):\n for j in range(W):\n max_c = max(max_c, cost[i][j])\n \nprint(max_c)\n \n \n ","sub_path":"AGC033/a_daker_and_darker.py","file_name":"a_daker_and_darker.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"564316554","text":"\"\"\"\r\neasy 2021-12-07 二维dp基础题\r\n左对齐,dp[i][j]=dp[i][j-1]+dp[i-1][j-1]\r\n注意遍历条件\r\n\"\"\"\r\nclass Solution:\r\n def getRow(self, rowIndex: int):\r\n # 定义二维dp\r\n dp = [[0]*(rowIndex+1) for _ in range(rowIndex+1)]\r\n dp[0][0] = 1\r\n\r\n for i in range(1, rowIndex+1):\r\n rowlist = []\r\n for j in range(i+1): # j<=i\r\n if j==0: dp[i][j] = 1\r\n else:dp[i][j] = dp[i-1][j] + dp[i-1][j-1] # 状态转移方程\r\n\r\n rowlist.append(dp[i][j]) # 每行的结果\r\n return dp[-1]\r\n # print(dp[-1])\r\n\r\nif __name__ == '__main__':\r\n rowIndex = 3\r\n print(Solution().getRow(rowIndex))","sub_path":"07_动态规划/2维DP/119-杨辉三角 II.py","file_name":"119-杨辉三角 II.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"91346933","text":"#! /bin/env python\n# _'*_ coding:UTF-8 _*_\n\"\"\" 日志模块 \"\"\"\nimport logging\nimport os\n\n\ndef init_config(log_path, level=logging.INFO, backup=7,\n format=\"%(levelname)s: %(asctime)s: %(filename)s:\"\n \"%(lineno)d pid-%(thread)d: %(message)s\",\n datefmt=\"%m-%d %H:%M:%S\"):\n \"\"\"\n @log_path:log_path is full log path\n \"\"\"\n # if not exists , mkdir\n dir = os.path.dirname(log_path)\n if not os.path.isdir(dir):\n os.makedirs(dir)\n\n logging.basicConfig(filename=log_path, level=logging.INFO, format=format, datefmt=datefmt)\n\n # for local debug , print to console\n logger = logging.getLogger()\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(logging.Formatter(format, datefmt))\n logger.addHandler(console)\n\n# def info(msg):\n# logging.info(msg)\n#\n# def warn(msg):\n# logging.warn(msg)\n#\n# def error(msg):\n# logging.error(msg)\n","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"281383099","text":"#!/usr/bin/env python -u\nimport logging\nimport sys\nimport common\nimport time\n\nfrom kubernetes import client\nfrom kubernetes.client.rest import ApiException\nfrom os import environ\n\nlogging.basicConfig(\n stream=sys.stderr,\n level=logging.INFO,\n format=\"%(levelname)s: %(name)s: %(message)s\"\n)\nlog = logging.getLogger(\"kubernetes-wait-job\")\n\n\ndef wait():\n try:\n name = environ.get(\"RD_CONFIG_NAME\")\n namespace = environ.get(\"RD_CONFIG_NAMESPACE\")\n retries = int(environ.get(\"RD_CONFIG_RETRIES\"))\n sleep = float(environ.get(\"RD_CONFIG_SLEEP\"))\n show_log = environ.get(\"RD_CONFIG_SHOW_LOG\") == \"true\"\n\n batch_v1 = client.BatchV1Api()\n core_v1 = client.CoreV1Api()\n\n # Poll for completion if retries\n retries_count = 0\n completed = False\n while True:\n api_response = batch_v1.read_namespaced_job_status(\n name,\n namespace,\n pretty=\"True\"\n )\n log.debug(api_response)\n\n retries_count = retries_count + 1\n if retries_count > retries:\n log.error(\"Number of retries exceeded\")\n completed = True\n\n if api_response.status.conditions:\n for condition in api_response.status.conditions:\n if condition.type == \"Failed\":\n completed = True\n\n if api_response.status.completion_time:\n completed = True\n\n if completed:\n break\n\n log.info(\"Waiting for job completion\")\n time.sleep(sleep)\n\n if show_log:\n log.debug(\"Searching for pod associated with job\")\n pod_list = core_v1.list_namespaced_pod(\n namespace,\n label_selector=\"job-name==\" + name\n )\n first_item = pod_list.items[0]\n pod_name = first_item.metadata.name\n log.debug(\"Fetching logs from pod: {0}\".format(pod_name))\n pod_log = core_v1.read_namespaced_pod_log(pod_name, namespace)\n\n log.info(\"========================== job log start ==========================\")\n log.info(pod_log)\n log.info(\"=========================== job log end ===========================\")\n\n if api_response.status.succeeded:\n log.info(\"Job succeeded\")\n sys.exit(0)\n else:\n log.info(\"Job failed\")\n sys.exit(1)\n\n except ApiException as e:\n log.error(\"Exception waiting for job: %s\\n\" % e)\n sys.exit(1)\n\n\ndef main():\n if environ.get(\"RD_CONFIG_DEBUG\") == \"true\":\n log.setLevel(logging.DEBUG)\n log.debug(\"Log level configured for DEBUG\")\n\n common.connect()\n wait()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"contents/job-wait.py","file_name":"job-wait.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"456775737","text":"#!/usr/bin/python3\r\n\r\nimport requests,time,random,os,re,sys,base64,urllib.parse\r\nfrom pyquery import PyQuery as pq\r\n\r\nbanner = '''\r\n __ __ ______ __ \r\n \\ \\ / / | ____| / _| \r\n \\ V / ______ | |__ ___ | |_ __ _ \r\n > < |______| | __| / _ \\ | _| / _` |\r\n / . \\ | | | (_) | | | | (_| |\r\n /_/ \\_\\ |_| \\___/ |_| \\__,_|\r\n \r\n by 斯文\r\n'''\r\n\r\ndef usera():\r\n #user_agent 集合\r\n user_agent_list = [\r\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '\r\n 'Chrome/45.0.2454.85 Safari/537.36 115Browser/6.0.3',\r\n 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',\r\n 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',\r\n 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',\r\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',\r\n 'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',\r\n 'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',\r\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',\r\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)',\r\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0',\r\n 'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',\r\n ]\r\n #随机选择一个\r\n user_agent = random.choice(user_agent_list)\r\n #传递给header\r\n headers = { 'User-Agent': user_agent }\r\n return headers\r\n \r\ndef getPage(cookie,search):\r\n\r\n url='https://classic.fofa.so/result?page=1&qbase64={}'.format(search)\r\n cookies = {'_fofapro_ars_session':cookie}\r\n req = requests.get(url=url,headers=usera(),cookies=cookies)\r\n pageHtml = pq(req.text)\r\n page = (pageHtml('div.list_jg')).text()\r\n # page = page.find('')\r\n \r\n pattern = re.compile(u'获得 (.*?) 条匹配结果')\r\n result = re.findall(pattern,page)\r\n result = result[0].replace(',','')\r\n\r\n if (int(result) % 10) >0:\r\n allPage = int(result) // 10 + 1\r\n else:\r\n allPage = int(result) // 10\r\n\r\n return allPage\r\n\r\ndef start(search,file,cookie):\r\n \r\n search=search.encode(encoding=\"utf-8\")\r\n search=base64.b64encode(search).decode()\r\n search=urllib.parse.quote(search)\r\n # if os.path.exists(\"result.txt\"): #删除存在的文件\r\n # os.remove(\"result.txt\")\r\n # cookie = input(\"请输入Fofa的Cookie的_fofapro_ars_session值:\")\r\n allPage = getPage(cookie,search)\r\n print(banner)\r\n startPage = input(\"[+ 搜索结果共有{}页,请输入从第几页开始收集地址(例:5):\".format(allPage))\r\n page = input(\"[+ 搜索结果共有{}页,请输入准备收集页数(例:20):\".format(allPage))\r\n endPage = int(startPage) + int(page)\r\n\r\n cookies={'_fofapro_ars_session':cookie}#这里是你的fofa账号登录后的cookie值\r\n url='https://fofa.so/result?qbase64={}'.format(search)\r\n # doc=pq(url)\r\n print(\"[+ 正在向{}.txt文件写入结果\".format(file))\r\n with open('%s.txt'%file,'a+',encoding='utf-8') as f:\r\n for i in range(int(startPage),endPage):\r\n url='https://classic.fofa.so/result?page={}&qbase64={}'.format(i,search)\r\n req = requests.get(url=url,headers=usera(),cookies=cookies)\r\n if '游客使用高级语法' in req.text:\r\n print('[- Cookie已失效,请重新填写https://classic.fofa.so的Cookie,不是https://fofa.so的Cookie')\r\n break\r\n print(\"[+ 正在读取第{}页 状态码:{}\".format(i,req.status_code))\r\n doc=pq(req.text)\r\n\r\n url=doc('div.results_content .list_mod_t').items()\r\n title=doc('div.list_mod_c ul').items()\r\n\r\n for u,t in zip(url,title):\r\n t.find('i').remove()\r\n relUrl = u.find('a').eq(0).attr.href\r\n relTitle = t.find('li').eq(0).text()\r\n\r\n if 'result?qbase64=' in relUrl:\r\n relDoc = pq(u)\r\n relIp = relDoc('.ip-no-url').text()\r\n relPort = (relDoc('.span')).find('a').eq(0).text()\r\n relUrl = 'http://{}:{}'.format(str(relIp),relPort)\r\n if relTitle == '':\r\n relTitle = '空'\r\n print(\"Url: %s Title: %s\"%(relUrl, relTitle))\r\n f.write(\"%s\\n\"%(relUrl))\r\n f.flush()\r\n\r\n time.sleep(3)\r\n\r\n\r\nif __name__ == '__main__':\r\n if len(sys.argv)==1:\r\n print(banner)\r\n print('''Usage:请输入参数\\n例如:python X-Fofa.py 'app=\"Solr\"' Solr 94bbbb177c4a564feddb8c7d413d5d61\\n例如:python FofaCrawler.py 'app=\"Solr\"'(Fofa搜索语法) Solr(搜索结果文件名) 94bbbb177c4a564feddb8c7d413d5d61(Fofa的Cookie的_fofapro_ars_session值)''')\r\n sys.exit(0)\r\n \r\n search=sys.argv[1]\r\n file=sys.argv[2]\r\n cookie = sys.argv[3]\r\n start(search,file,cookie)\r\n\r\n\r\n","sub_path":"X-Fofa.py","file_name":"X-Fofa.py","file_ext":"py","file_size_in_byte":5321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"316137044","text":"# 1.随机生成分数列表 2.对分数列表排序 3.将排序后列表转换为五级制\r\n\r\nfrom random import randint\r\n\r\n\r\ndef listSc(): # 随机生成100个成绩并输出\r\n a = [randint(0, 100) for i in range(100)]\r\n print(a)\r\n return a\r\n\r\n\r\ndef bs(): # 冒泡排序并输出\r\n b = listSc()\r\n c = len(b)\r\n for i in range(c):\r\n for j in range(0, c - i - 1):\r\n if b[j] > b[j + 1]:\r\n b[j], b[j + 1] = b[j + 1], b[j]\r\n print(b)\r\n return b\r\n\r\n\r\nclass Transfer(): # 转换类\r\n\r\n def __init__(self, ilist): # 输入列表参数\r\n self.ilist = ilist\r\n\r\n def sortG(self): # 转换五级制\r\n d = self.ilist\r\n for i in range(100):\r\n if i >= 80:\r\n d[i] = 'A'\r\n elif i >= 60:\r\n d[i] = 'B'\r\n elif i >= 40:\r\n d[i] = 'C'\r\n elif i >= 20:\r\n d[i] = 'D'\r\n else:\r\n d[i] = 'E'\r\n print(d)\r\n return d\r\n\r\n\r\n# 执行\r\nTransfer(bs()).sortG()\r\n\r\n","sub_path":"自己打的1/实习1.py","file_name":"实习1.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"209481285","text":"import multiprocessing\nimport threading\nimport os,sys\nimport time\n\nfrom BearPY.GlobalBear import *\nfrom BearPY.Libraries.Time import *\n\nclass ThreadPool:\n def __init__(self, Mode, Limit=None, Timer=False, Progress=False):\n self.TaskList = []\n self.Mode = Mode\n\n self.Limit = Limit\n self.Timer = Timer\n self.Progress = Progress\n\n def NewTask(self, Function, Arguments):\n self.TaskList.append([Function, tuple(Arguments)])\n\n def NewTasks(self, Group, Function, Arguments):\n for Item in Group:\n self.TaskList.append([Function, tuple([Item]+Arguments)])\n\n\n def Start(self, ParallelNumber):\n StartTime = time.time()\n WorkSpace = []\n for Item in range(ParallelNumber):\n WorkSpace.append(None)\n\n ProgressCounter = 0\n FinishLine = len(self.TaskList)\n\n LimitCounter = 0\n LimitTimer = Date()\n\n for Task in self.TaskList:\n if self.Progress:\n ProgressCounter += 1\n print('='* 80 +str(round(ProgressCounter*100/ FinishLine, 2))+ '%')\n\n CONTINUE = True\n AvailableThread = None\n\n LimitCounter += 1\n\n if self.Limit:\n while(LimitCounter > self.Limit):\n if Date() - LimitTimer > 60:\n LimitCounter = 1\n LimitTimer = Date()\n Chronus.Sleep(1)\n\n if self.Mode == 'Thread':\n while(CONTINUE):\n for Item in range(ParallelNumber):\n if (WorkSpace[Item] == None) or (WorkSpace[Item].isAlive() == False):\n CONTINUE = False\n AvailableThread = Item\n break\n \n WorkSpace[AvailableThread] = threading.Thread(target = Task[0], args = Task[1])\n WorkSpace[AvailableThread].start() \n\n elif self.Mode == 'Process':\n while(CONTINUE):\n for Item in range(ParallelNumber):\n if (WorkSpace[Item] == None) or (WorkSpace[Item].is_alive() == False):\n CONTINUE = False\n AvailableThread = Item\n break\n Chronus.Sleep(1)\n \n WorkSpace[AvailableThread] = threading.Thread(target = Task[0], args = Task[1])\n WorkSpace[AvailableThread].start() \n\n for Item in WorkSpace:\n if Item != None:\n Item.join() \n\n if self.Timer: \n TimeConsumed = round(float(time.time() - StartTime), 3)\n print('\\n\\t***Time-Consuming: ***\\n\\t' + str(TimeConsumed) + 's')\n\n\n def GetList(self):\n return multiprocessing.Manager().list() \n\nclass ThreadMatix:\n def __init__(self, Core, Thread, Limit = None):\n self.Core = Core\n self.Thread = Thread\n self.TaskList = []\n self.Limit = Limit\n\n for Item in range(self.Core):\n self.TaskList.append([])\n self.AutoArrangedTaskList = []\n\n\n def NewTask(self, Core, Function, Arguments):\n self.TaskList[Core-1].append([Function, tuple(Arguments)])\n\n def NewTasks(self, Core, Group, Function, Arguments):\n for Item in Group:\n self.TaskList[Core-1].append([Function, tuple([Item]+Arguments)])\n\n\n def NewTaskAutoArrange(self, Function, Arguments):\n self.AutoArrangedTaskList.append([Function, tuple(Arguments)])\n\n def NewTasksAutoArrange(self, Group, Function, Arguments):\n for Item in Group:\n self.AutoArrangedTaskList.append([Function, tuple([Item]+Arguments)])\n\n\n def NewProcess(self, Core):\n TP = ThreadPool('Thread', Limit = self.Limit, Progress=True)\n TP.TaskList = self.TaskList[Core]\n TP.Start(self.Thread)\n\n def Start(self):\n TaskPointer = 0\n Corepointer = 0\n TaskNumber = len(self.AutoArrangedTaskList)\n while(TaskPointer= 4500:\r\n print('-----------[Info]-----------\\n','Username: {} Level: {}\\n'.format(name,level),'Rank: Diamond | Score: {}\\n'.format(rate),'---------------------------\\n')\r\n input('Press any key to continue')\r\n elif rate >= 4100:\r\n print('-----------[Info]-----------\\n','Username: {} \\n'.format(name),'Rank: Platinum I | Score: {}\\n'.format(rate),'---------------------------\\n')\r\n input('Press any key to continue')\r\nexcept Exception:\r\n print('{} Username not found'.format(inp))\r\n input('Press any key to continue and try to do again')","sub_path":"Api calling.py","file_name":"Api calling.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"337711062","text":"#!/usr/bin/env python\n\n\"\"\"Recording script for a Raspberry Pi powered motorcycle helmet camera.\n\"\"\"\n\nimport os\nimport json\nimport datetime\nimport subprocess\nimport logging\nimport logging.handlers\nimport time\nimport pickle\nimport multiprocessing\nimport googleapiclient.discovery\nimport googleapiclient.http\nimport googleapiclient.model\nimport httplib2\nimport functools\nimport socket\ntry:\n import picamera\nexcept ImportError:\n print('Couldn\\'t import picamera: running as is for debug purposes.')\n\n\nformatter = logging.Formatter('%(asctime)s [%(processName)s] [%(levelname)-5.5s] %(message)s')\n\nrootLogger = logging.getLogger()\nrootLogger.setLevel(logging.DEBUG)\nfileHandler = logging.handlers.RotatingFileHandler(\n filename=os.path.join(os.path.dirname(__file__), 'camera.log'),\n maxBytes=1 * (10 ** 6), backupCount=1)\nfileHandler.setFormatter(formatter)\nrootLogger.addHandler(fileHandler)\nconsoleHandler = logging.StreamHandler()\nconsoleHandler.setFormatter(formatter)\nrootLogger.addHandler(consoleHandler)\n\nlogging.getLogger('googleapiclient.discovery').setLevel(logging.CRITICAL)\nlogging.getLogger('googleapiclient.discovery_cache').setLevel(logging.CRITICAL)\n\nVIDEO_DIR = os.path.join(os.path.dirname(__file__), 'video')\nUPLOADS_DIR = os.path.join(os.path.dirname(__file__), 'uploads')\nCREDENTIALS = os.path.join(os.path.dirname(__file__), '.credentials')\nFORMAT = 'h264'\nMAX_VIDEO_SIZE = 5000 * (10 ** 6) # ~45 minutes\nMIN_VIDEO_SIZE = 50 * (10 ** 6) # ~30 seconds\nVIDEO_MIN_INTERVALS = 12\n\nUPLOAD_CHUNK_SIZE = 50 * (10 ** 6)\nUPLOAD_MAX_WORKERS = 1\n\n# how many 0s to put in front of counter number\nZFILL_DECIMAL = 3\n\n# 8mp V2 camera\nRESOLUTION = (1640, 1232)\nFRAMERATE = 30\nSTABILIZATION = False\n\n# number of seconds to flush on disk\nINTERVAL = 5\n\n# check for enough disk space every N seconds\nSPACE_CHECK_INTERVAL = 30\n\n# what % of disk space must be free to start a new video\nREQUIRED_FREE_SPACE_PERCENT = 15 # about an hour with 64gb card\n\nYOUTUBE_TITLE_PREFIX = 'Helmet Camera'\n\nDATE_FORMAT = '%Y-%m-%d_%H-%M'\n\nSTART_TIME = time.time()\n\nqueue = []\n\n\nclass throttle(object):\n \"\"\"Decorator that prevents a function from being called more than once every\n time period.\n\n To create a function that cannot be called more than once a minute:\n @throttle(minutes=1)\n def my_fun():\n pass\n \"\"\"\n def __init__(self, seconds=0, minutes=0, hours=0):\n self.throttle_period = datetime.timedelta(\n seconds=seconds, minutes=minutes, hours=hours)\n self.time_of_last_call = datetime.datetime.min\n\n def __call__(self, fn):\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n now = datetime.datetime.now()\n time_since_last_call = now - self.time_of_last_call\n if time_since_last_call > self.throttle_period:\n self.time_of_last_call = now\n self.last_result = fn(*args, **kwargs)\n return self.last_result\n else:\n return self.last_result\n return wrapper\n\n\ndef use_led(status):\n \"\"\"Control on-board green LED status, True for on, False for off.\n \"\"\"\n with open('/sys/class/leds/led0/brightness', 'w') as f:\n f.write('%s\\n' % int(not status))\n\n\n@throttle(seconds=5)\ndef is_connected(host='8.8.8.8', port=53, timeout=1):\n \"\"\"Returns True if we have internet connection.\n \"\"\"\n try:\n socket.setdefaulttimeout(timeout)\n socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))\n result = True\n except socket.error:\n result = False\n finally:\n socket.setdefaulttimeout(None)\n return result\n\n\ndef make_room():\n \"\"\"Clear oldest video.\n \"\"\"\n sorted_videos = sorted(os.listdir(VIDEO_DIR))\n if sorted_videos:\n oldest_video = sorted_videos[0]\n logging.info('Removing oldest video: %s', oldest_video)\n # may not have permission if running as pi and video was created by root\n try:\n os.remove(os.path.join(VIDEO_DIR, oldest_video))\n except OSError:\n logging.error('Must run as root otherwise script cannot clear out old videos')\n else:\n logging.error('No videos in directory %s, cannot make room', VIDEO_DIR)\n time.sleep(SPACE_CHECK_INTERVAL)\n\n\ndef measure_temp():\n temp = os.popen('vcgencmd measure_temp').readline()\n return temp.replace('temp=', '').strip()\n\n\ndef enough_disk_space():\n \"\"\"Return true if we have enough space to start a new video.\n \"\"\"\n df = subprocess.Popen(['df', '/'], stdout=subprocess.PIPE)\n output = df.communicate()[0]\n percent_used_str = output.split(\"\\n\")[1].split()[4]\n percent_used = int(percent_used_str.replace('%', ''))\n enough = 100 >= REQUIRED_FREE_SPACE_PERCENT + percent_used\n logging.debug('%s%% of disk space used. Enough: %s', percent_used, enough)\n logging.debug('Device temperature: %s', measure_temp())\n return enough\n\n\ndef upload(filename):\n \"\"\"Upload given filename on YouTube using saved credentials.\n\n For each video file we will create a JSON file in `uploads` dir with upload\n progress. This way we can resume upload if it was interrupted and avoid\n duplicates.\n \"\"\"\n try:\n credentials = pickle.load(open(CREDENTIALS))\n except IOError:\n logging.error('Unable to read .credentials file to perform youtube upload.')\n return\n service = googleapiclient.discovery.build(\n 'youtube', 'v3', credentials=credentials)\n name_parts = os.path.split(filename)[1].split('.')\n title = '%s %s' % (\n YOUTUBE_TITLE_PREFIX,\n ':'.join(name_parts[0].replace('_', ' ').rsplit('-', 1)))\n part_num = int(filename.split('.')[1])\n if part_num:\n title = '%s Part %s' % (title, part_num + 1)\n body = dict(snippet=dict(title=title, tags=['helmet'], categoryId=2),\n status=dict(privacyStatus='unlisted'))\n logging.info('Preparing to upload %s...', filename)\n request = service.videos().insert(\n part=','.join(body.keys()),\n body=body,\n media_body=googleapiclient.http.MediaFileUpload(\n filename, chunksize=UPLOAD_CHUNK_SIZE, resumable=True)\n )\n progress_filename = os.path.join(UPLOADS_DIR, '%s.json' % os.path.split(\n filename)[1])\n try:\n with open(progress_filename) as f:\n progress = json.load(f)\n except IOError:\n progress = None\n except ValueError:\n logging.warning('Invalid upload progress in %s', progress_filename)\n os.remove(progress_filename)\n progress = None\n if progress is not None:\n logging.info('Resuming existing upload from %s...', progress_filename)\n request.resumable_progress = progress['resumable_progress']\n request.resumable_uri = progress['resumable_uri']\n response = None\n try:\n _prev_percent = 0\n while response is None:\n status, response = request.next_chunk(num_retries=3)\n if status:\n with open(progress_filename, 'w') as f:\n json.dump({\n 'resumable_progress': request.resumable_progress,\n 'resumable_uri': request.resumable_uri}, f)\n _percent = status.progress()\n if _percent - _prev_percent > 0.01:\n logging.info('Uploading at [%s]', '{:.2%}'.format(_percent))\n _prev_percent = _percent\n except googleapiclient.errors.HttpError as e:\n if 'The number of bytes uploaded' in e.content or e.resp.status == 404:\n logging.warning(e.content)\n logging.info('Removing upload progress and starting again.')\n os.remove(progress_filename)\n else:\n raise googleapiclient.errors.HttpError(e.resp, e.content)\n except IOError as e:\n logging.exception('Removing upload progress and starting again.')\n os.remove(progress_filename)\n except httplib2.ServerNotFoundError:\n logging.debug('Couldn\\'t upload %s since no connection is available.')\n else:\n logging.info('Successfully uploaded %s', response)\n try:\n os.remove(progress_filename)\n except OSError:\n pass\n os.remove(filename)\n\n\ndef watch():\n \"\"\"Background watcher which removes old videos and tries to perform an upload.\n \"\"\"\n while True:\n while not enough_disk_space():\n make_room()\n time.sleep(1)\n for i in reversed([i for i, p in enumerate(queue) if not p.is_alive()]):\n queue.pop(i)\n if queue:\n logging.debug('Upload queue: %s', queue)\n\n if is_connected():\n for video in sorted(os.listdir(VIDEO_DIR)):\n filename = os.path.join(VIDEO_DIR, video)\n if filename in [i.name for i in queue]:\n continue\n if os.stat(filename).st_size < MIN_VIDEO_SIZE:\n continue\n if len(queue) < UPLOAD_MAX_WORKERS:\n p = multiprocessing.Process(target=upload, name=filename, args=[filename])\n logging.debug('Starting background process %s', p)\n p.start()\n queue.append(p)\n time.sleep(SPACE_CHECK_INTERVAL)\n\n\nclass OutputShard(object):\n def __init__(self, filename):\n self.filename = filename\n self.is_new = self.size == 0\n self.stream = open(filename, 'ab')\n\n def __repr__(self):\n return '' % self.filename\n\n def write(self, buf):\n self.stream.write(buf)\n\n def close(self):\n self.stream.close()\n\n def remove(self):\n os.remove(self.filename)\n\n @property\n def size(self):\n try:\n return os.stat(self.filename).st_size\n except OSError:\n return 0\n\n\ndef uptime():\n \"\"\"Get current uptime in seconds.\n \"\"\"\n return time.time() - START_TIME\n\n\ndef record():\n \"\"\"Start recording if/after no connection is avilable and stop when connected.\n\n The idea is to stop recording so the upload can be completed without\n generating any new videos, and you are very unlikely to need a recordding when\n you are near WiFi anyway.\n \"\"\"\n with picamera.PiCamera() as camera:\n # make sure that camera is connected\n pass\n should_log = True\n is_led_on = False\n while is_connected():\n if should_log:\n logging.debug('Still connected to the network...')\n # blink the LED once in a while to know that we are ready to record\n # blink every second for first 5 minutes, then once in 5 seconds\n if uptime() < 60 * 5:\n for i in range(5):\n use_led(bool(i % 2))\n time.sleep(1)\n else:\n use_led(is_led_on)\n is_led_on = not is_led_on\n time.sleep(5)\n\n should_log = False\n\n now = datetime.datetime.now()\n # guard against writing into old files if system time is incorrect\n old_videos = [datetime.datetime.strptime(\n i.split('.')[0], DATE_FORMAT) for i in sorted(os.listdir(VIDEO_DIR))]\n for old_video in old_videos:\n if old_video >= now:\n shards = len([i for i in old_videos if i == old_video])\n if shards > 1:\n logging.critical('Existing video file %s is newer from current time %s. This is likely caused by incorrent system time. Trying again shortly...', old_video, now)\n time.sleep(10)\n return record()\n\n with picamera.PiCamera() as camera:\n camera.resolution = RESOLUTION\n camera.framerate = FRAMERATE\n camera.video_stabilization = STABILIZATION\n logging.debug('Recording with %s@%s FPS', RESOLUTION, FRAMERATE)\n camera.annotate_background = picamera.Color('black')\n counter = 0\n timestamp = now.strftime(DATE_FORMAT)\n filename = os.path.join(VIDEO_DIR, '%s.{}.%s' % (timestamp, FORMAT))\n shard = OutputShard(filename.format(str(counter).zfill(ZFILL_DECIMAL)))\n is_new = shard.is_new\n camera.start_recording(shard, format=FORMAT, intra_period=INTERVAL * FRAMERATE)\n intervals_recorded = 0\n use_led(True)\n while True:\n camera.annotate_text = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')\n camera.split_recording(shard)\n camera.wait_recording(INTERVAL)\n intervals_recorded += 1\n if intervals_recorded % 10 == 0:\n logging.debug('Recorded %s intervals...', intervals_recorded)\n if shard.size > MAX_VIDEO_SIZE:\n counter += 1\n logging.debug('Using next shard %s for video file', counter)\n if is_connected():\n use_led(False)\n logging.info('Connected to WiFi. Not recording anymore.')\n camera.stop_recording()\n shard.close()\n if is_new and intervals_recorded < VIDEO_MIN_INTERVALS:\n logging.debug('Cleaning up short video %s (%s intervals)', shard, intervals_recorded)\n shard.remove()\n break\n shard = OutputShard(filename.format(str(counter).zfill(ZFILL_DECIMAL)))\n logging.info('Trying to start recording again...')\n record()\n\n\ndef main():\n logging.info('Powered on at %s', datetime.datetime.now())\n\n # Set the PWR LED to GPIO mode (set 'off' by default)\n with open('/sys/class/leds/led0/trigger', 'w') as f:\n f.write('gpio\\n')\n\n if not os.path.isdir(VIDEO_DIR):\n logging.debug('Creating directory %s', VIDEO_DIR)\n os.mkdir(VIDEO_DIR)\n if not os.path.isdir(UPLOADS_DIR):\n logging.debug('Creating directory %s', UPLOADS_DIR)\n os.mkdir(UPLOADS_DIR)\n p = multiprocessing.Process(target=watch, name='watcher')\n logging.debug('Starting background process %s', p)\n p.start()\n record()\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n exit('Command killed by keyboard interrupt')\n except Exception as e:\n logging.exception(e)\n","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":12965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"455086158","text":"'''Handy utility functions for testing'''\nfrom contextlib import contextmanager\nimport mock\n\nimport luigi.mock\n\n\n@contextmanager\ndef mock_targets(task):\n '''Replaces both input and output targets for a Luigi task with\n luigi.mock.MockTarget\n\n This makes the targets in-memory targets instead of their original type\n to facilitate/speed up testing\n\n '''\n\n def _update(target):\n try:\n return luigi.mock.MockTarget(target.path)\n except AttributeError:\n try:\n return {t: _update(target[t]) for t in target}\n except (KeyError, TypeError):\n return [_update(t) for t in target]\n\n task._orig_input = task.input\n task._orig_output = task.output\n\n task.input = mock.MagicMock(\n spec=task.input,\n return_value=_update(task.input())\n )\n task.output = mock.MagicMock(\n spec=task.output,\n return_value=_update(task.output())\n )\n\n yield task\n\n task.input = task._orig_input\n task.output = task._orig_output\n del task._orig_input\n del task._orig_output\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"351120008","text":"from bottle import route, run, template, request\nimport psycopg2\n\nconn = psycopg2.connect(\"dbname=project user=postgres\")\ncur = conn.cursor()\n\n@route('/') \ndef index():\n return template(\"view/main.tpl\")\n\n@route('/search')\ndef search():\n name = request.query['name']\n cur.execute(\"SELECT * FROM test WHERE first_name ILIKE %s\", (name,)) \n data = cur.fetchall()\n return template(\"view/search_result.tpl\", rows=data)\n\nrun(host='localhost', port=3000)\n","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"430817723","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/12/17 15:46\n# @Author : Linder\n# @Email : lmj2018666@gmail.com\n# @Software: PyCharm\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nclass TreeNode:\n\tdef __init__(self,x):\n\t\tself.val=x\n\t\tself.left=None\n\t\tself.right=None\n\nclass Solution(object):\n\tdef buildTree(self, inorder, postorder):\n\t\t\"\"\"\n\t\t:type inorder: List[int]\n\t\t:type postorder: List[int]\n\t\t:rtype: TreeNode\n\t\t\"\"\"\n\t\tif not postorder:\n\t\t\treturn None\n\t\troot = TreeNode(postorder[-1])\n\t\tn = inorder.index(root.val)\n\t\troot.left = self.buildTree(inorder[:n], postorder[:n])\n\t\troot.right = self.buildTree(inorder[n + 1:], postorder[n:-1])\n\t\treturn root\n\nif __name__=='__main__':\n\tinorder=[9,3,15,20,7]\n\tpostorder=[9,15,7,20,3]\n\tprint(Solution().buildTree(inorder,postorder))","sub_path":"leetcode/1217从中序与后序遍历序列构造二叉树.py","file_name":"1217从中序与后序遍历序列构造二叉树.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"175552234","text":"\"\"\"Faça um programa em Python que solicite a quantidade de alunos de uma turma.\r\n\r\nApós esta informação, o usuário deve digitar a média de cada aluno da turma, e o programa apresentará a mensagem PARABÉNS VOCÊ ESTÁ APROVADO aos alunos com média maior ou igual a 6.0.\r\n\r\nO programa deve calcular e mostrar, no final da entrada de dados, a média geral da turma.\r\n\r\nObs.: Caso a quantidade informada de alunos da turma for igual a zero, o programa deve emitir a seguinte mensagem: NÃO HOUVE PROCESSAMENTO\r\n\"\"\"\r\n\r\nqtdalunos = int(input())\r\nif qtdalunos != 0:\r\n contador = 0\r\n notaDeTodos = 0\r\n while contador < qtdalunos:\r\n media = float(\r\n input())\r\n if(media >= 6.0):\r\n print(\"PARABÉNS VOCÊ ESTÁ APROVADO\")\r\n contador += 1\r\n notaDeTodos += media\r\n mediaTurma = notaDeTodos/qtdalunos\r\n print(mediaTurma)\r\nelse:\r\n print(\"NÃO HOUVE PROCESSAMENTO\")\r\n\r\n\"\"\"\r\nwhile qtdalunos != 0:\r\n media = qtdalunos\r\n media = int(input(\"digitar a média de cada aluno da turma\"))\r\n\r\n if media > 6:\r\n print(\"PARABÉNS VOCÊ ESTÁ APROVADO\")\r\n if qtdalunos == 0:\r\n print(\"NÃO HOUVE PROCESSAMENTO\")\r\n mediaalunos = media / qtdalunos\r\n\"\"\"\r\n","sub_path":"app_conhecimento_aula5.py","file_name":"app_conhecimento_aula5.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"566847362","text":"# 题目1\nimport re\n\nbest_language = \"PHP is the best programming language in the world!\"\nprint(best_language.replace(\"PHP\",\"Python\"))\n\n# 题目2\n\nnum = int(input('请输入1-7之间的数字:'))\nday = ['一','二','三','四','五','六','4日']\nif num in range(1, 8):\n print(\"今天是周{}\".format(day[num - 1]))\n\n# 题目3\n\nline3 = \"Python is the BEST programming Language!\"\nif re.match('[a-z]+$',line3):\n print(\"给出的字符串全为小写\")\nelse:\n print(\"给出的字符串不全为小写\")\n\n# 题目4\n\nline4 = \"xxx的电话号码是123-456-7890或(123) 456-7890\"\nphonenum = re.findall(r\"\\(\\d{3}\\)\\s\\d{3}-\\d{4}|\\d{3}-\\d{3}-\\d{4}\",line4)\nprint(\"电话号码为:\"+\"\".join(phonenum))\n\n# 题目5\n\nline5 = ['今天是2022/9/24', '今天是2017/09/25', '今天是2012-07-25', '今天是2020年04月25']\nfor date in line5:\n datetime = re.findall(r\"\\d{4}.*\\d{1,2}.*\\d{1,2}\",date)\n datetime = re.sub(r'\\D','-',str(datetime[0]))\n print(\"其中的日期有:\"+\"\".join(datetime))","sub_path":"HW6/Group12/hw6_1720327.py","file_name":"hw6_1720327.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"490948389","text":"if __name__ == '__main__':\n num_test_cases = int(input())\n for _ in range(num_test_cases):\n input()\n arr = list(map(int, input().split(' ')))\n arr_min = arr.index(min(arr)) + 1\n if arr_min == 0 or arr_min > len(arr):\n print('N o')\n continue\n left_side, right_side = True, True\n for i in range(1, arr_min):\n if arr[i] > arr[i - 1]:\n left_side = False\n break\n if left_side:\n for i in range(arr_min, len(arr)):\n if arr[i] < arr[i - 1]:\n right_side = False\n break\n print('Yes' if left_side and right_side else 'No')\n","sub_path":"HackerRank/Python/collections/cont_pilingUp/solution_nn.py","file_name":"solution_nn.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"485161140","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and / or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110 - 1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n\n# LOAD MODUL # \nimport bpy, random\nfrom bpy import *\nfrom bpy.props import *\n\n\nclass VIEW3D_TP_Visual_Set_Color(bpy.types.Operator):\n \"\"\"switch material color by choosen material id\"\"\" \n bl_idname = \"tp_mat.set_colors\"\n bl_label = \"Set Color\"\n bl_options = {'REGISTER', 'UNDO'} \n\n @classmethod\n def poll(self, context):\n if context.object and context.object.type == 'MESH':\n return len(context.object.data.materials)\n\n new_swatch = FloatVectorProperty(name = \"Color\", default=[0.0,1.0,1.0], min = 0, max = 1, subtype='COLOR')\n index_count_sw = bpy.props.IntProperty(name=\"MAT-ID\", description=\"set material index\", min=0, max=100, default=0) \n\n # DRAW REDO LAST PROPS [F6] # \n def draw(self, context):\n layout = self.layout\n col = layout.column(align = True)\n \n box = col.box().column(1) \n\n row = box.row()\n row.prop(self,\"index_count_sw\")\n row.prop(self,\"new_swatch\", text=\"\")\n\n box.separator() \n\n\n # LOAD CUSTOM SETTTINGS #\n def invoke(self, context, event): \n settings_load(self)\n return self.execute(context)\n\n\n def execute(self, context):\n\n settings_write(self)\n\n ob = bpy.context.object\n try:\n mat = ob.data.materials[self.index_count_sw]\n except IndexError:\n print(self)\n self.report({'INFO'}, \"No further Material!\") \n pass\n else: \n if bpy.context.scene.render.engine == 'BLENDER_RENDER': \n words = self.new_swatch\n color = (float(words[0]), float(words[1]), float(words[2])) \n mat.diffuse_color = color\n\n else:\n node=mat.node_tree.nodes['Diffuse BSDF'] \n words = self.new_swatch\n RGB = (float(words[0]), float(words[1]), float(words[2]),1) \n node.inputs['Color'].default_value = RGB\n \n return{'FINISHED'} \n\n\n\n\n\nclass VIEW3D_TP_Visual_Set_Color_Contrast(bpy.types.Operator):\n \"\"\"switch material color by choosen material id\"\"\" \n bl_idname = \"tp_mat.set_color_constrast\"\n bl_label = \"Set Color Constrast\"\n bl_options = {'REGISTER', 'UNDO'} \n\n @classmethod\n def poll(self, context):\n if context.object and context.object.type == 'MESH':\n return len(context.object.data.materials)\n\n mat_mode = bpy.props.StringProperty(default=\"\")\n index_count = bpy.props.IntProperty(name=\"MAT-ID\", description=\"set material index\", min=0, max=100, default=0) \n mat_switch = bpy.props.EnumProperty(\n items = [(\"tp_mat_00\", \"Light\", \"\", 1),\n (\"tp_mat_01\", \"Darken\", \"\", 2)],\n name = \"Contrast\",\n default = \"tp_mat_00\", \n description=\"material index switch\") \n\n # DRAW REDO LAST PROPS [F6] # \n def draw(self, context):\n layout = self.layout\n col = layout.column(align = True)\n \n box = col.box().column(1) \n \n row = box.row(1)\n row.prop(self, \"index_count\") \n row.prop(self, \"mat_switch\", text=\"\") \n\n row = box.row(1)\n row.operator('tp_mat.set_color_constrast', text='Invert', icon =\"FILE_REFRESH\").mat_mode = 'INVERT'\n row.operator('tp_mat.set_color_constrast', text='Repeat', icon =\"COLOR\")\n\n box.separator() \n \n\n # LOAD CUSTOM SETTTINGS #\n def invoke(self, context, event): \n settings_load(self)\n return self.execute(context)\n\n\n def execute(self, context):\n\n settings_write(self) \n\n ob = context.object\n if self.mat_switch == \"tp_mat_00\":\n try:\n \n mat = ob.data.materials[self.index_count]\n except IndexError:\n print(self)\n self.report({'INFO'}, \"No further Material!\") \n pass\n else:\n if bpy.context.scene.render.engine == 'BLENDER_RENDER': \n for i in range(3):\n mat.diffuse_color[i] = random.random()\n else:\n node=mat.node_tree.nodes['Diffuse BSDF']\n #r = random.randint(0, 20)\n #g = random.randint(0, 20)\n #b = random.randint(0, 20)\n #RGB = (r/255, g/255, b/255, 1) \n RGB = (random.random(),random.random(),random.random(),1)\n node.inputs['Color'].default_value = RGB\n\n\n if self.mat_switch == \"tp_mat_01\":\n try:\n mat = ob.data.materials[self.index_count]\n except IndexError:\n print(self)\n self.report({'INFO'}, \"No further Material!\") \n pass\n else:\n if bpy.context.scene.render.engine == 'BLENDER_RENDER':\n for i in range(3):\n mat.diffuse_color[i] *= random.random() \n else:\n node=mat.node_tree.nodes['Diffuse BSDF']\n for i in range(3):\n node.inputs['Color'].default_value[i] *= random.random() \n\n \n if \"INVERT\" in self.mat_mode: \n try:\n mat = ob.data.materials[self.index_count]\n except IndexError:\n print(self)\n self.report({'INFO'}, \"No further Material!\") \n pass\n else:\n for i in range(3):\n mat.diffuse_color[i] = 1 - mat.diffuse_color[i]\n\n return{'FINISHED'} \n\n\n\n\n# LOAD CUSTOM TOOL SETTINGS #\ndef settings_load(self):\n tp = bpy.context.window_manager.tp_props_visual\n tool = self.name.split()[0].lower()\n keys = self.as_keywords().keys()\n for key in keys:\n setattr(self, key, getattr(tp, key))\n\n\n\n# STORE CUSTOM TOOL SETTINGS #\ndef settings_write(self):\n tp = bpy.context.window_manager.tp_props_visual\n tool = self.name.split()[0].lower()\n keys = self.as_keywords().keys()\n for key in keys:\n setattr(tp, key, getattr(self, key))\n \n\n\n# REGISTRY #\ndef register(): \n bpy.utils.register_module(__name__)\n\ndef unregister():\n bpy.utils.unregister_module(__name__)\n\nif __name__ == \"__main__\":\n register()\n","sub_path":"2.79/Sets/toolplus_visuals/ops_visuals/matswitch.py","file_name":"matswitch.py","file_ext":"py","file_size_in_byte":7302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"477223564","text":"# -*- coding: utf-8 -*-\n\"\"\"ALGORYTM WYZNACZANIA NAJBLIŻSZEJ PARY PUNKTÓW NA PŁASZCZYŹNIE\"\"\"\nfrom math import hypot\n\n\ndef find_closest_points(points):\n \"\"\"FUNKCJA OBSŁUGUJĄCA DO WYSZUKIWANIA PUNKTÓW\n :param points: lista punktów\n :returns: para najbliższych punktów\"\"\"\n points_x = sorted(points)\n points_y = [(pt[0], pt[1], i) for i, pt in enumerate(points_x)]\n points_y.sort(key=lambda p: (p[1], p[0], p[2]), reverse=True)\n\n return _search_closest(points_x, points_y)\n\n\ndef _search_closest(points_x, points_y, index_begin=0, index_end=-1):\n \"\"\"ZNAJDOWANIE NAJBLIŻSZEJ PARY PUNKTÓW\n :param points_x: lista punktów posortowana po współrzędnej x\n :param points_y: lista punktów posortowana po współrzędnej y\n :param index_begin: początek fragmentu listy punktów po x\n :param index_end: koniec fragmentu listy punktów po x\n :returns: para najbliższych punktów\"\"\"\n def distance(pt1, pt2):\n return hypot(pt1[0] - pt2[0], pt1[1] - pt2[1])\n\n index_begin %= len(points_x)\n index_end %= len(points_x)\n\n if index_end - index_begin == 1:\n return (points_x[index_begin][0], points_x[index_end][0])\n\n if index_end - index_begin == 2:\n index_middle = index_begin + 1\n distance12 = distance(points_x[index_begin], points_x[index_middle])\n distance23 = distance(points_x[index_middle], points_x[index_end])\n distance31 = distance(points_x[index_begin], points_x[index_end])\n\n if distance12 <= distance23 and distance12 <= distance31:\n return (points_x[index_begin][0], points_x[index_middle][0])\n elif distance23 <= distance12 and distance23 <= distance31:\n return (points_x[index_middle][0], points_x[index_end][0])\n else:\n return (points_x[index_begin][0], points_x[index_end][0])\n\n index_middle = (index_begin + index_end) // 2\n middle_x = (points_x[index_middle][0] + points_x[index_middle + 1][0]) // 2\n points_yl = [p for p in points_y if p[2] <= index_middle]\n points_yr = [p for p in points_y if p[2] > index_middle]\n\n closest_l = _search_closest(points_x, points_yl, index_begin, index_middle)\n closest_r = _search_closest(\n points_x, points_yr, index_middle + 1, index_end)\n min_distance = min(distance(closest_l[0], closest_l[1]),\n distance(closest_r[0], closest_r[1]))\n belt_width = min_distance\n belt_points = [(i, pt[2] <= index_middle) for i, pt in enumerate(points_y)\n if middle_x - belt_width <= pt[0] <= middle_x + belt_width]\n\n for i in range(1, len(belt_points)):\n for j in range(i - 1, -1, -1):\n pt1 = belt_points[i][0]\n pt2 = belt_points[j][0]\n\n if points_y[pt2][1] < points_y[pt1][1] + belt_width:\n break\n\n if belt_points[i][1] != belt_points[j][1]:\n points_distance = distance(points_y[pt1], points_y[pt2])\n\n if points_distance < min_distance:\n min_distance = points_distance\n closest_points = (points_y[pt1], points_y[pt2])\n\n return closest_points\n","sub_path":"AlgoLib_Python/algolib/closest_points.py","file_name":"closest_points.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"39376267","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n'''\n@date: 2019-04-27\n@author: Shell.Xu\n@copyright: 2019, Shell.Xu \n@license: MIT\n'''\nimport os\nimport sys\nimport json\nimport time\nimport base64\nimport hashlib\nimport logging\nimport argparse\nimport binascii\nfrom os import path\ntry:\n from urllib.request import urlopen, Request # Python 3\nexcept ImportError:\n from urllib2 import urlopen, Request # Python 2\n\nfrom cryptography import x509\nfrom cryptography.x509.extensions import _key_identifier_from_public_key\nfrom cryptography.x509.oid import NameOID, ExtensionOID, ExtendedKeyUsageOID\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes, serialization\nfrom cryptography.hazmat.primitives.asymmetric import rsa, ec, padding\nfrom cryptography.hazmat.primitives.serialization import \\\n Encoding, PrivateFormat, PublicFormat\n\n\nRETRY_LIMIT = 100\nDEFAULT_INTERVAL = 2\nDEFAULT_DIRECTORY_URL = 'https://acme-staging-v02.api.letsencrypt.org/directory'\n# DEFAULT_DIRECTORY_URL = 'https://acme-v02.api.letsencrypt.org/directory'\nHEADERS = {\n 'Content-Type': 'application/jose+json',\n 'User-Agent': 'acme-tiny'}\nBACKEND = default_backend()\nPADALGO = padding.PKCS1v15()\nDGSTALGO = hashes.SHA256()\n\n\ndef _b64(b):\n s = base64.urlsafe_b64encode(b)\n return s.decode('utf8').replace('=', '')\n\n\ndef _d64(s):\n s += '=' * (4 - len(s)%4)\n return base64.urlsafe_b64decode(s)\n\n\ndef int2b64(i):\n s = hex(i)[2:]\n if len(s) % 2:\n s = '0' + s\n return _b64(binascii.unhexlify(s))\n\n\ndef b642int(s):\n return int(binascii.hexlify(_d64(s)), 16)\n\n\ndef read_privatekey(pemfile, password=None):\n with open(pemfile, 'rb') as fi:\n bkey = fi.read()\n return serialization.load_pem_private_key(\n bkey, password=password, backend=BACKEND)\n\n\nclass BadNonceError(Exception):\n pass\n\n\nclass WellKnownCheckError(Exception):\n pass\n\n\nclass ChallengeError(Exception):\n pass\n\n\nclass OrderError(Exception):\n pass\n\n\nclass ValidateError(Exception):\n pass\n\n\ndef httpget(url, data=None, err_msg='error'):\n logging.info('req: %s', url)\n try:\n req = Request(url, data=data, headers=HEADERS)\n resp = urlopen(req)\n code, headers = resp.getcode(), resp.headers\n logging.debug('resp: %d, headers:\\n%s',\n code, json.dumps(dict(headers), indent=4))\n respdata = resp.read().decode('utf8')\n except IOError as e:\n respdata = e.read().decode('utf8') if hasattr(e, 'read') else str(e)\n code, headers = getattr(e, 'code', None), {}\n try:\n respdata = json.loads(respdata) # try to parse json results\n logging.debug('json data:\\n%s', json.dumps(respdata, indent=4))\n except (ValueError, TypeError):\n pass # ignore json parsing errors\n if code == 400 and respdata['type'] == 'urn:ietf:params:acme:error:badNonce':\n raise BadNonceError(url)\n if code not in [200, 201, 204]:\n raise ValueError(err_msg)\n return respdata, code, headers\n\n\nclass Account(object):\n\n alg = 'RS256'\n\n def __init__(self, directory_url, contact=None):\n logging.info('get directory')\n self.directory, _, _ = httpget(directory_url,\n err_msg='get directory error')\n self.contact = contact\n self.kid = None\n self.nonce = None\n\n def read_pem(self, pemfile, password=None):\n try:\n pkey = read_privatekey(pemfile, password)\n except ValueError:\n return\n logging.info('load account key from pemfile: %s', pemfile)\n if not isinstance(pkey, rsa.RSAPrivateKey):\n raise ValueError('only support rsa key')\n pn = pkey.public_key().public_numbers()\n return pkey, {'kty': 'RSA', 'n': int2b64(pn.n), 'e': int2b64(pn.e)}\n\n def read_json(self, jsonfile):\n with open(jsonfile, 'r') as fi:\n data = fi.read()\n try:\n jkey = json.loads(data)\n except ValueError:\n return\n logging.info('load account key from jsonfile: %s', jsonfile)\n return self.load_json(jkey)\n\n def load_json(self, jkey):\n pubkey = rsa.RSAPublicNumbers(b642int(jkey['e']), b642int(jkey['n']))\n prikey = rsa.RSAPrivateNumbers(\n b642int(jkey['p']), b642int(jkey['q']), b642int(jkey['d']),\n b642int(jkey['dp']), b642int(jkey['dq']), b642int(jkey['qi']),\n pubkey)\n pkey = prikey.private_key(BACKEND)\n return pkey, {k: jkey[k] for k in ['kty', 'n', 'e']}\n\n def load_key(self, key):\n self.pkey, self.jwk = self.load_json(key)\n logging.debug('jwk:\\n%s', json.dumps(self.jwk, indent=4))\n\n def read_key(self, keyfile):\n if not path.isfile(keyfile):\n raise ValueError('%s not exist or not a file' % keyfile)\n for f in [self.read_pem, self.read_json]:\n r = f(keyfile)\n if r:\n self.pkey, self.jwk = r\n break\n else:\n raise ValueError(\"can't identity key file format\")\n logging.debug('jwk:\\n%s', json.dumps(self.jwk, indent=4))\n\n def sign(self, data):\n return self.pkey.sign(data, PADALGO, DGSTALGO)\n\n def get_thumbprint(self):\n accountkey_json = json.dumps(\n self.jwk, sort_keys=True, separators=(',', ':'))\n return _b64(hashlib.sha256(accountkey_json.encode('utf8')).digest())\n\n def get_nonce(self): # CAUTION: not thread safe\n if self.nonce is None:\n logging.info('get nonce')\n _, _, headers = httpget(self.directory['newNonce'])\n return headers['Replay-Nonce']\n nonce, self.nonce = self.nonce, None\n return nonce\n\n def signed_get(self, url, payload, err_msg):\n payload64 = '' if payload is None else _b64(json.dumps(payload).encode('utf8'))\n protected = {'url': url, 'alg': self.alg}\n if self.kid:\n protected['kid'] = self.kid\n else:\n protected['jwk'] = self.jwk\n for _ in range(RETRY_LIMIT):\n protected['nonce'] = self.get_nonce()\n protected64 = _b64(json.dumps(protected).encode('utf8'))\n protected_input = '{0}.{1}'.format(protected64, payload64).encode('utf8')\n data = json.dumps({\n 'protected': protected64,\n 'payload': payload64,\n 'signature': _b64(self.sign(protected_input)),\n }).encode('utf8')\n try:\n data, code, headers = httpget(url, data=data, err_msg=err_msg)\n except BadNonceError:\n continue\n if 'Replay-Nonce' in headers:\n self.nonce = headers['Replay-Nonce']\n return data, code, headers\n\n def wait(self, url, statuses, err_msg, interval=DEFAULT_INTERVAL):\n logging.info('waiting for status not in statuses: %s', statuses)\n while True:\n rslt, _, _ = self.signed_get(url, None, err_msg=err_msg)\n if rslt['status'] not in statuses:\n return rslt\n time.sleep(interval)\n\n def register(self):\n logging.info('register account')\n reg_payload = {'termsOfServiceAgreed': True}\n account, code, acct_headers = self.signed_get(\n self.directory['newAccount'], reg_payload, 'register error')\n logging.info('registered!' if code == 201 else 'already registered!')\n self.kid = acct_headers['Location']\n if self.contact:\n logging.info('update contact')\n account, _, _ = self.signed_get(\n self.kid, {'contact': self.contact}, 'update contact error')\n\n def make_order(self, domains, pem_prikey, password=None):\n logging.info('make order')\n od = Order(self, domains, pem_prikey, password)\n payload = od.gen_order()\n od.order, _, od.headers = self.signed_get(\n self.directory['newOrder'], payload, 'make order error')\n return od\n\n\nclass Order(object):\n\n def __init__(self, acct, domains, pem_prikey, password=None):\n self.acct = acct\n logging.info('domains: %s', domains)\n self.domains = domains\n logging.info('load domain key from pemfile: %s', pem_prikey)\n self.pkey = read_privatekey(pem_prikey, password)\n self.order, self.header = None, None\n\n def gen_order(self):\n return {'identifiers': [{'type': 'dns', 'value': d} for d in self.domains]}\n\n def gen_csr(self, fmt=Encoding.DER):\n name = x509.Name([\n x509.NameAttribute(NameOID.COMMON_NAME, self.domains[0]),\n ])\n csr = x509.CertificateSigningRequestBuilder().subject_name(name)\n if len(self.domains) > 1:\n alternative = [x509.DNSName(d.strip()) for d in domains[1:]]\n csr = csr.add_extension(\n x509.SubjectAlternativeName(alternative), critical=False)\n csr = csr.sign(self.pkey, hashes.SHA256(), BACKEND)\n return csr.public_bytes(fmt)\n\n def finalize(self):\n logging.debug('pem csr:\\n' + self.gen_csr(Encoding.PEM).decode('utf-8'))\n logging.warning('sign cert')\n payload = {'csr': _b64(self.gen_csr())}\n self.order, _, _= self.acct.signed_get(\n self.order['finalize'], payload, 'finalize order error')\n self.order = self.acct.wait(\n self.headers['Location'], {'pending', 'processing'},\n 'check order status error')\n if self.order['status'] != 'valid':\n raise OrderError('order failed: %s' % order)\n logging.warning('cert signed.')\n\n def download_cert(self):\n logging.info('download cert.')\n pem, _, _ = self.acct.signed_get(self.order['certificate'], None,\n err_msg='certificate download failed')\n return pem\n \n\ndef read_config_ini(configpath):\n try:\n import configparser\n cp = configparser.ConfigParser()\n except ImportError:\n import ConfigParser as configparser\n cp = configparser.SafeConfigParser()\n try:\n cp.read(configpath)\n except configparser.MissingSectionHeaderError:\n return\n cfg = dict(cp['main'])\n if 'domains' in cfg:\n cfg['domain'] = cfg.pop('domains').split(',')\n if 'contacts' in cfg:\n cfg['contact'] = cfg.pop('contacts').split(',')\n for n in cp.sections():\n if n == 'main':\n continue\n v = dict(cp[n])\n v['name'] = n\n cfg.setdefault('validator', []).append(v)\n return cfg\n\n\ndef read_config_jsonyaml(configpath):\n with open(configpath, 'rb') as fi:\n data = fi.read()\n try:\n return json.loads(data)\n except (ValueError, TypeError):\n pass\n try:\n import yaml\n return yaml.safe_load(data)\n except ValueError:\n pass\n\n\ndef read_config():\n parser = argparse.ArgumentParser()\n parser.add_argument('--account-key', '-a',\n help='path to your account pem key')\n parser.add_argument('--config', '-c',\n help='path to config file')\n parser.add_argument('--domain', '-d', action='append',\n help='domains')\n parser.add_argument('--domain-key', '-k',\n help='path to your csr pem key')\n parser.add_argument('--acme-path', '-p',\n help='path to the .well-known/acme-challenge/ directory')\n parser.add_argument('--loglevel', '-l',\n help='log level (e.g. DEBUG/INFO/WARNING/ERROR)')\n parser.add_argument('--logfile', '-f',\n help='log file')\n parser.add_argument('--nocheck', default=False, action='store_true',\n help='disable checking of the challenge file')\n parser.add_argument('--directory-url', '-u',\n help='certificate authority directory url')\n parser.add_argument('--contact', action='append',\n help='contact details (e.g. mailto:aaa@bbb.com)')\n args = parser.parse_args()\n\n if args.config:\n cfg = read_config_ini(args.config)\n if not cfg:\n cfg = read_config_jsonyaml(args.config)\n else:\n cfg = {}\n\n for n in ['account_key', 'domain', 'domain_key',\n 'loglevel', 'logfile', 'directory_url']:\n if getattr(args, n):\n cfg[n.replace('_', '-')] = getattr(args, n)\n if args.acme_path:\n v = {\n 'name': 'file',\n 'path': args.acme_path,\n 'nocheck': args.nocheck}\n cfg.setdefault('validator', []).append(v)\n\n cfg.setdefault('loglevel', 'WARNING')\n cfg.setdefault('directory-url', DEFAULT_DIRECTORY_URL)\n\n for n in ['account-key', 'domains', 'domain-key', 'validator']:\n if not cfg.get(n):\n raise ValueError('no %s' % n)\n return cfg\n\n\ndef main():\n cfg = read_config()\n\n logging.basicConfig(level=cfg['loglevel'], filename=cfg.get('logfile'),\n format='%(asctime)s [%(levelname)s] %(message)s')\n logging.debug('config:\\n%s', json.dumps(cfg, indent=4))\n\n acct = Account(cfg['directory-url'], cfg.get('contact'))\n if cfg['account-key'].startswith('-----BEGIN RSA PRIVATE KEY-----'):\n acct.load_key(cfg['account-key'])\n else:\n acct.read_key(cfg['account-key'])\n \n validators = []\n for v in cfg['validator']:\n mod = __import__(v.pop('name'))\n validators.append(mod.Validator(**v))\n\n acct.register()\n od = acct.make_order(cfg['domains'], cfg['domain-key'])\n for validator in validators:\n try:\n validator(od)\n break\n except Exception as e:\n logging.error(e)\n else:\n raise ValidateError('no validator works')\n od.finalize()\n sys.stdout.write(od.download_cert())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"acme.py","file_name":"acme.py","file_ext":"py","file_size_in_byte":13883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"541636404","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Author : leeyoshinari\r\nimport os\r\nimport re\r\nimport time\r\nimport json\r\nimport random\r\n\r\nimport redis\r\nimport scrapy\r\nfrom scrapy.utils.project import get_project_settings\r\n\r\nfrom zhihu_spider.login import Login\r\nfrom zhihu_spider.MySQL import MySQL\r\nfrom zhihu_spider.items import SoulmateAnswerItem, SoulmateCommentItem\r\nfrom zhihu_spider.SaveImageAndGetBeauty import get_image_and_beauty\r\n\r\n\r\nclass SoulmateSpider(scrapy.Spider):\r\n name = 'soulmate'\r\n allowed_domains = ['www.zhihu.com']\r\n start_urls = ['https://www.zhihu.com/question/275359100/answers/updated']\r\n all_urls = []\r\n\r\n question_url = 'https://www.zhihu.com/api/v4/questions/{}/answers?include=data%5B*%5D.is_normal%2Cadmin_clo' \\\r\n 'sed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse_reaso' \\\r\n 'n%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2Ccontent%2Ceditable_' \\\r\n 'content%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2Ccreated_time%2Cupdated_time%2C' \\\r\n 'review_info%2Crelevant_info%2Cquestion%2Cexcerpt%2Crelationship.is_authorized%2Cis_author%2Cvotin' \\\r\n 'g%2Cis_thanked%2Cis_nothelp%2Cis_labeled%2Cis_recognized%2Cpaid_info%3Bdata%5B*%5D.mark_infos%5B*%5' \\\r\n 'D.url%3Bdata%5B*%5D.author.follower_count%2Cbadge%5B*%5D.topics&offset=20&limit=20&sort_by=updated'\r\n\r\n comment_url = 'https://www.zhihu.com/api/v4/answers/{}/root_comments?include=data%5B*%5D.author%2Ccollapse' \\\r\n 'd%2Creply_to_author%2Cdisliked%2Ccontent%2Cvoting%2Cvote_count%2Cis_parent_author%2Cis_autho' \\\r\n 'r&order=normal&limit=20&offset=0&status=open'\r\n\r\n setting = get_project_settings()\r\n cookies = None\r\n headers = {\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\r\n 'Accept-Language': 'en',\r\n 'User-Agent': random.choice(setting.get('USER_AGENT')),\r\n }\r\n\r\n IS_REDIS = setting.get('IS_REDIS')\r\n IS_BAIDU = setting.get('IS_BAIDU')\r\n\r\n MySQL()\r\n\r\n \"\"\"连接redis\"\"\"\r\n if IS_REDIS:\r\n pool = redis.ConnectionPool(host=setting.get('REDIS_HOST'), port=setting.get('REDIS_PORT'),\r\n password=setting.get('REDIS_PASSWORD'), decode_responses=True)\r\n r_0 = redis.Redis(connection_pool=pool, db=0)\r\n r_1 = redis.Redis(connection_pool=pool, db=1)\r\n\r\n def start_requests(self):\r\n self.is_login()\r\n self.all_urls = self.read_start_urls()\r\n # yield scrapy.Request(url=self.question_url, cookies=self.cookies, callback=self.parse)\r\n for datas in self.all_urls:\r\n url = self.question_url.format(datas['questionId'])\r\n yield scrapy.Request(url=url, cookies=self.cookies, callback=self.parse, meta={'code': datas['code'], 'parentCode': datas['parentCode']})\r\n\r\n def parse(self, response):\r\n answer_res = json.loads(response.text)\r\n items = SoulmateAnswerItem()\r\n code = response.meta['code']\r\n\r\n answers = answer_res.get('data', None)\r\n if answers:\r\n for answer in answers:\r\n contents = answer.get('content')\r\n if len(contents) < 30: # 如果回答的字数太少,则直接跳过\r\n pass\r\n\r\n updateTime = str(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(answer.get('updated_time'))))\r\n if self.r_0.get(str(answer.get('id'))) == updateTime or str(answer.get('id'))=='550465108': # 如果这个答案爬过,直接跳过,防止爬虫中断后重新爬取\r\n print('{}:此回答未更新,跳过'.format(answer.get('id')))\r\n time.sleep(random.randint(1, 2))\r\n yield scrapy.Request(url=self.comment_url.format(answer.get('id')), headers=self.headers,\r\n cookies=self.cookies, callback=self.parser_comment, meta=response.meta)\r\n else:\r\n if self.r_0.get(str(answer.get('id'))):\r\n is_save = False\r\n else:\r\n is_save = True\r\n self.r_0.set(str(answer.get('id')), updateTime)\r\n\r\n items['answer_id'] = answer.get('id') # 答案id\r\n items['answerer_id'] = answer.get('author').get('id') # 回答者id\r\n items['beauty'] = -1 # 回答者颜值\r\n items['gender'] = answer.get('author').get('gender') # 回答者性别,知乎的性别\r\n items['face_shape'] = -1 # 回答者脸型,默认值\r\n\r\n user_url = answer.get('author').get('id')\r\n if user_url == '0': # 如果是匿名用户,id为0,则取答案的id命名图片\r\n user_url = answer.get('id')\r\n image_urls = self.get_imageurl(contents) # 获取回答中的所有图片的url\r\n result = get_image_and_beauty(user_url, image_urls, is_save=is_save, is_baidu=self.IS_BAIDU) # 计算图片中人脸的颜值和性别,以及有效图片(有人脸的)数\r\n if result['code'] == 0:\r\n items['beauty'] = result['beauty']\r\n elif result['code'] in [17, 18, 19, 222207]: # 异常返回错误码,表明颜值检测不可用,终止爬虫\r\n self.crawler.engine.close_spider(self, 'response msg error {}, job done!\\n'\r\n 'code is {}'.format(response.text, result['code']))\r\n break\r\n\r\n if result['counter']:\r\n items['gender'] = result['gender'] # 如果有人脸,则用人脸识别的性别修改从知乎上获取的性别\r\n items['face_shape'] = result['face_shape'] # 如果有人脸,则用人脸识别的脸型修改默认值\r\n\r\n items['pic_num'] = result['counter'] # 有效图片数,有人脸的图片\r\n\r\n user_info = self.get_user_info(contents)\r\n if user_info['gender'] != -1:\r\n items['gender'] = user_info['gender'] # 如果回答中明确说明自己的性别,则替换已有的值\r\n\r\n if user_info['height'] >= 180 and user_info['weight'] >= 65:\r\n items['gender'] = 1\r\n\r\n if 165 >= user_info['height'] > 135 and 50 >= user_info['weight'] > 30:\r\n items['gender'] = 0\r\n\r\n items['age'] = user_info['age'] # 回答者年龄\r\n items['height'] = user_info['height'] # 回答者身高\r\n items['weight'] = user_info['weight'] # 回答者体重\r\n items['url_token'] = answer.get('author').get('url_token') # 回答者token\r\n items['name'] = answer.get('author').get('name') # 回答者名字\r\n items['follower_count'] = answer.get('author').get('follower_count') # 回答者粉丝数\r\n items['headline'] = answer.get('author').get('headline') # 回答者签名\r\n items['content'] = answer.get('content') # 回答内容\r\n items['voteup_count'] = answer.get('voteup_count') # 答案点赞数\r\n items['comment_count'] = answer.get('comment_count') # 答案评论数\r\n items['create_time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(answer.get('created_time')))\r\n items['update_time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(answer.get('updated_time')))\r\n items['code'] = code\r\n\r\n yield items\r\n\r\n time.sleep(random.randint(1, 2))\r\n yield scrapy.Request(url=self.comment_url.format(answer.get('id')), headers=self.headers,\r\n cookies=self.cookies, callback=self.parser_comment, meta=response.meta)\r\n\r\n if answer_res.get('paging'):\r\n if not answer_res.get('paging').get('is_end'):\r\n next_url = answer_res.get('paging').get('next')\r\n time.sleep(random.randint(1, 3))\r\n yield scrapy.Request(url=next_url, headers=self.headers, cookies=self.cookies,\r\n callback=self.parse, meta=response.meta)\r\n\r\n def parser_comment(self, response):\r\n comment_res = json.loads(response.text)\r\n comment_items = SoulmateCommentItem()\r\n comment_son = SoulmateCommentItem()\r\n code = response.meta['code']\r\n parentCode = response.meta['parentCode']\r\n\r\n comments = comment_res.get('data', None)\r\n for comment in comments:\r\n commentId = comment.get('id')\r\n createTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(comment.get('created_time')))\r\n answerId = re.compile('answers/(\\d+)/root_comments').findall(response.url)[-1] # 答案id\r\n\r\n if self.r_1.get(str(commentId)) == createTime:\r\n print('{}:该评论已入库,跳过'.format(commentId))\r\n else:\r\n comment_items['answer_id'] = answerId\r\n comment_items['comment_id'] = commentId # 评论id\r\n comment_items['parent_id'] = ''\r\n comment_items['comment_content'] = comment.get('content') # 评论内容\r\n comment_items['vote_count'] = comment.get('vote_count') # 点赞数\r\n comment_items['commenter_id'] = comment.get('author').get('member').get('id') # 评论人id\r\n comment_items['commenter_token'] = comment.get('author').get('member').get('url_token') # 评论人token\r\n comment_items['commenter_name'] = comment.get('author').get('member').get('name') # 评论人名字\r\n comment_items['commenter_gender'] = comment.get('author').get('member').get('gender') # 评论人性别\r\n comment_items['commenter_headline'] = comment.get('author').get('member').get('headline') # 评论人签名\r\n comment_items['create_time'] = createTime\r\n comment_items['code'] = code\r\n comment_items['parentCode'] = parentCode % 10\r\n\r\n self.r_1.set(str(commentId), createTime)\r\n yield comment_items\r\n\r\n child_comments = comment.get('child_comments')\r\n for child in child_comments:\r\n comment_son['answer_id'] = answerId\r\n child_comment_id = child.get('id') # 评论id\r\n child_create_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(child.get('created_time')))\r\n if self.r_1.get(str(child_comment_id)) == child_create_time:\r\n print('{}:该评论已入库,跳过'.format(child_comment_id))\r\n else:\r\n self.r_1.set(str(child_comment_id), child_create_time)\r\n comment_son['comment_id'] = child_comment_id\r\n comment_son['parent_id'] = commentId\r\n comment_son['comment_content'] = child.get('content') # 评论内容\r\n comment_son['vote_count'] = child.get('vote_count') # 点赞数\r\n comment_son['commenter_id'] = child.get('author').get('member').get('id') # 评论人id\r\n comment_son['commenter_token'] = child.get('author').get('member').get('url_token') # 评论人token\r\n comment_son['commenter_name'] = child.get('author').get('member').get('name') # 评论人名字\r\n comment_son['commenter_gender'] = child.get('author').get('member').get('gender') # 评论人性别\r\n comment_son['commenter_headline'] = child.get('author').get('member').get('headline') # 评论人签名\r\n comment_son['create_time'] = child_create_time\r\n comment_son['code'] = code\r\n comment_son['parentCode'] = parentCode % 10\r\n\r\n yield comment_son\r\n\r\n if comment_res.get('paging'):\r\n if not comment_res.get('paging').get('is_end'):\r\n next_url = '{}{}'.format('https://www.zhihu.com/api/v4/answers',\r\n comment_res.get('paging').get('next').split('answers')[1])\r\n time.sleep(random.randint(1, 2))\r\n yield scrapy.Request(url=next_url, headers=self.headers, cookies=self.cookies,\r\n callback=self.parser_comment, meta=response.meta)\r\n\r\n def is_login(self, res=None):\r\n if res:\r\n if '有问题,上知乎' in res.text or res.status != 200:\r\n login = Login()\r\n cookie_path = os.path.join(self.setting.get('COOKIE_PATH'), 'cookies.txt')\r\n if os.path.exists(cookie_path):\r\n os.remove(cookie_path)\r\n\r\n self.cookies = login.get_cookie()\r\n del login\r\n else:\r\n login = Login()\r\n cookie_path = os.path.join(self.setting.get('COOKIE_PATH'), 'cookies.txt')\r\n if os.path.exists(cookie_path):\r\n self.cookies = login.read_cookie()\r\n else:\r\n self.cookies = login.get_cookie()\r\n del login\r\n\r\n def get_imageurl(self, content):\r\n if content:\r\n res = re.compile('data-original=\"(.*?)\"').findall(content)\r\n return list(set(res))\r\n\r\n def read_start_urls(self):\r\n start_urls_path = os.path.join(self.setting.get('COOKIE_PATH'), 'start_urls.txt')\r\n return json.load(open(start_urls_path, 'r', encoding='utf-8'))\r\n\r\n def get_user_info(self, content):\r\n age = -1\r\n gender = -1\r\n height = -1\r\n weight = -1\r\n\r\n def filte(s):\r\n return list(filter(None, list(s)))[0]\r\n\r\n if content:\r\n ages = re.compile('(\\d\\d)岁|年龄(\\d\\d)').findall(content)\r\n years = re.compile('(\\d\\d)年').findall(content)\r\n if '女' in content[:7]:\r\n gender = 0\r\n elif '男' in content[:7]:\r\n gender = 1\r\n\r\n genders = re.compile('性别([男|女])|本人([男|女])|\\d\\d年([男|女])').findall(content)\r\n heights = re.compile('身高.*?([1][4-9]\\d)|([1][4-9]\\d)cm|([1][4-9]\\d)CM').findall(content)\r\n h = re.compile('身高.*?([1]\\.\\d{1,2})|([1]\\.\\d{1,2})米|([1]\\.\\d{1,2})m|([1]\\.\\d{1,2})M').findall(content)\r\n weights = re.compile('体重.*?([3-9]\\d)|([3-9]\\d)kg|([3-9]\\d)KG|([3-9]\\d)Kg|([3-9]\\d)公斤').findall(content)\r\n w = re.compile('体重.*?(\\d{2,3})斤|(\\d{2,3})斤').findall(content)\r\n\r\n if ages:\r\n age = int(filte(ages[0]))\r\n if years:\r\n year = int(years[0])\r\n if year > 50:\r\n age = 121 - year\r\n elif year < 21:\r\n age = 21 - year\r\n if age < 16 or age > 48:\r\n age = -1\r\n\r\n if heights:\r\n height = int(filte(heights[0]))\r\n elif h:\r\n height = float(filte(h[0])) * 100\r\n\r\n if weights:\r\n weight = int(filte(weights[0]))\r\n elif w:\r\n weight = float(filte(w[0])) / 2\r\n\r\n if genders:\r\n try:\r\n gender = ['女', '男'].index(filte(genders[0]))\r\n except:\r\n with open('error.txt', 'w') as f:\r\n f.write(content)\r\n\r\n if '希望他' in content or '老阿姨' in content or '男的,' in content or ',女,' in content or '爱好男' in content \\\r\n or '我闺蜜' in content or '找个小哥哥' in content or '远嫁' in content or '嫁不出去' in content or '嘻嘻嘻' in content \\\r\n or '男朋友' in content or '可盐可甜' in content or '比我高' in content or '身高控' in content or '冷暴力' in content \\\r\n or '化妆' in content or '追剧' in content or '爱豆' in content or '喜欢的男孩' in content or '大龄女青年' in content or '独女' in content \\\r\n or '独生女' in content or '肤白' in content or '180以上' in content or '175以上' in content or '175及以上' in content or '男生较少' in content \\\r\n or '遇到他' in content or '颜值尚可' in content or '对男方' in content or '176以上' in content or '178以上' in content or '177以上' in content \\\r\n or '单身小哥哥' in content or '妆后' in content or '梨形身材' in content or '男生追过' in content or '陪我逛' in content \\\r\n or '年姑娘' in content or '夸可爱' in content or '本人女' in content or '身材苗条' in content or '高跟' in content \\\r\n or '优秀的小哥哥' in content or '甜甜的恋爱' in content or '小哥哥看这里' in content or '性别:女' in content \\\r\n or ' 女生 ' in content or 'p>女,' in content or '征男友' in content:\r\n gender = 0\r\n if '希望她' in content or '女的,' in content or ',男,' in content or '爱好女' in content or '找个小姐姐' in content \\\r\n or '到女朋友' in content or '有女朋友' in content or '遇到她' in content or '女生追过' in content or '不帅' in content \\\r\n or '对女方' in content or '本人男' in content or '老男孩' in content or '独生子,' in content or ',男生,' in content \\\r\n or '希望的她' in content or '独子' in content or '小姐姐看这里' in content or '性别:男' in content or ' 男生 ' in content \\\r\n or 'p>男,' in content or '征女友' in content:\r\n gender = 1\r\n\r\n if gender == 0 and height > 179:\r\n height = -1\r\n if gender == 1 and 160 > height > 100:\r\n height = -1\r\n\r\n if weight > 70 and gender == 0:\r\n weight = int(weight / 2)\r\n if 50 > weight > 1 and gender == 1:\r\n weight = -1\r\n\r\n return {'age': age, 'height': height, 'weight': weight, 'gender': gender}\r\n","sub_path":"zhihu_spider/spiders/soulmate.py","file_name":"soulmate.py","file_ext":"py","file_size_in_byte":18572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"180204715","text":"import time\nimport numpy as np\nimport pickle as pkl\nimport sys\nclass Generator:\n \"\"\"\n this is part of application level.\n don't have to worry about until we implement transport level.\n \"\"\"\n def __init__(self):\n # Check if it's time to generate another iframe/pframe\n self.iFrameList = []\n self.pFrameList = []\n self.CONST_dtype = 'int64'\n self.CONST_pFrameSize = 153633\n self.CONST_iFrameSize = 9830433\n self.pFrameShape = (80, 80, 3)\n self.iFrameShape = (640, 640, 3)\n\n def get_iframe(self):\n res = np.random.randint(10,240, size=self.iFrameShape)\n self.iFrameList.append(res)\n byteRes = res.tobytes()\n return byteRes\n\n def get_pframe(self):\n res = np.random.randint(0,11, size=self.pFrameShape)\n self.pFrameList.append(res)\n byteRes = res.tobytes()\n return byteRes\n\n def byteToFrame(self,frameInput):\n res = ''\n if sys.getsizeof(frameInput) == self.CONST_pFrameSize:\n res = np.frombuffer(frameInput,dtype=self.CONST_dtype).reshape(self.pFrameShape)\n elif sys.getsizeof(frameInput) == self.CONST_iFrameSize:\n res = np.frombuffer(frameInput,dtype=self.CONST_dtype).reshape(self.iFrameShape)\n return res\n \n # # integer to byte\n # def conv2Bytes(self, frame):\n # res = b''\n # for i in frame:\n # for j in i:\n # for k in j:\n # res += k.to_bytes(1, byteorder='big', signed=False)\n # return res\n\n def store(self):\n with open('iframePKL.pkl', 'wb') as f:\n pkl.dump(self.iFrameList, f)\n with open('pframePKL.pkl', 'wb') as f:\n pkl.dump(self.pFrameList, f)","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"79847738","text":"# Pygame/PyopenGL example by Bastiaan Zapf, Apr 2009\n### From http://python-opengl-examples.blogspot.sg/\n#\n# Draw an helix, wiggle it pleasantly\n#\n# Keywords: Alpha Blending, Textures, Animation, Double Buffer\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\n\nfrom math import * # trigonometry\n\nimport pygame # just to get a display\n\n# get an OpenGL surface\n\npygame.init() \npygame.display.set_mode((800,600), pygame.OPENGL|pygame.DOUBLEBUF)\n\n# How to catch errors here?\n\ndone = False\n\nt=0\n\nwhile not done:\n\n t=t+1\n \n # for fun comment out these two lines\n\n glClearColor(0.0, 0.0, 0.0, 1.0)\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n\n # Get a perspective at the helix\n\n glMatrixMode(GL_PROJECTION); \n\n glLoadIdentity()\n gluPerspective(90,1,0.01,1000)\n gluLookAt(sin(t/200.0)*3,sin(t/500.0)*3,cos(t/200.0)*3,0,0,0,0,1,0)\n\n # Draw the helix (this ought to be a display list call)\n\n glMatrixMode(GL_MODELVIEW)\n\n # get a texture (this ought not to be inside the inner loop)\n\n texture=glGenTextures( 1 )\n\n glBindTexture( GL_TEXTURE_2D, texture );\n glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE );\n\n # set sane defaults for a plethora of potentially uninitialized\n # variables\n\n glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,\n GL_REPEAT);\n glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,\n GL_REPEAT );\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n\n # a texture\n\n #pulse = sin(t/30)*0.5+0.5 # try this one\n pulse = 0\n\n texdata=[[[0.0,0,1,1],\n [0.0,0,0,0],\n [0.0,1,0,1],\n [0.0,0,0,0]],\n [[0.0,0,0,0],\n [pulse,pulse,pulse,1],\n [pulse,pulse,pulse,1],\n [0.0,0,0,0]],\n [[0.0,1,0,1],\n [1,pulse,pulse,1],\n [pulse,pulse,0,1],\n [0.0,0,0,0]],\n [[0.0,0,0,0],\n [0.0,0,0,0],\n [0.0,0,0,0],\n [0.0,0,0,0]]];\n\n glTexImage2Df(GL_TEXTURE_2D, 0,4,0,GL_RGBA,\n texdata)\n\n glEnable(GL_BLEND);\n glBlendFunc (GL_SRC_ALPHA, GL_ONE); # XXX Why GL_ONE?\n# alternatively:\n# glEnable(GL_DEPTH_TEST);\n\n glEnable( GL_TEXTURE_2D );\n # use the texture\n glBindTexture( GL_TEXTURE_2D, texture );\n\n # vertices & texture data\n\n glBegin(GL_TRIANGLE_STRIP);\n #pulse2 = 0.5\n\n for i in range(0,100):\n\n r=5.0 # try other values - integers as well\n d=1 # try other values\n\n\t#pulse2 += 0.5\n if (i%3==0):\n \tglTexCoord2f(0,i);\n \tglVertex3f( cos(i/r), -2.5+i*0.05, sin(i/r)); \n elif (i%3==1):\n \tglTexCoord2f(1,i);\n \tglVertex3f( cos(i/r+3.14/2), -2.5+i*0.05+d, sin(i/r+3.14/2));\n else:\n \tglTexCoord2f(2,i);\n \tglVertex3f( cos(i/r+3.14), -2.5+i*0.05+d, sin(i/r+3.14));\n# glVertex3f( cos(i/r+3.14)*pulse2, -2.5+i*0.05+d+pulse2*1, sin(i/r+3.14)*pulse2);\n \n\n glEnd();\n\n glFlush()\n\n glDeleteTextures(texture)\n pygame.display.flip()\n","sub_path":"blurry_beautiful_helix.py","file_name":"blurry_beautiful_helix.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"409974844","text":"#! /usr/bin/env python3\n\nimport os\nimport clip\nimport torch\n\nclass CLIPTransformerExporter(torch.nn.Module):\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def forward(self, text):\n return self.encode_text(text)\n\n def encode_text(self, text):\n x = self.model.token_embedding(text).type(self.model.dtype) # [batch_size, n_ctx, d_model]\n x = x + self.model.positional_embedding.type(self.model.dtype)\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.model.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n x = self.model.ln_final(x).type(self.model.dtype)\n\n # Need the cast to IntTensor for ORT support\n x = x[torch.arange(x.shape[0]), text.type(torch.IntTensor).argmax(dim=-1)] @ self.model.text_projection\n return x\n\n\ndevice = \"cpu\"\nmodel, preprocess = clip.load(\"ViT-B/32\", device=device)\ntext = clip.tokenize([\"dummy text to tokenize\"]).to(device)\nexporter = CLIPTransformerExporter(model)\n\nos.makedirs(\"src/main/application/models\", exist_ok=True)\ntorch.onnx.export(exporter, text,\n \"src/main/application/models/transformer.onnx\",\n input_names=[\"input\"],\n output_names=[\"output\"],\n dynamic_axes={\"input\":{0:\"batch\"}, \"output\":{0:\"batch\"}})","sub_path":"text-image-search/src/python/clip_export.py","file_name":"clip_export.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"393930283","text":"import begin\nimport psutil\nimport subprocess\nfrom pathlib import Path\n\nimport logging\n\nfrom scheduled.active_processes import active_processes\n\n\n__author__ = \"cw-andrews\"\n\n\n# SCRIPT_PATH = 'C:/Users/chris.andrews/Dropbox/code_projects/python/scheduled/scheduled_sanitizer.py'\n\n\n@begin.start\n@begin.logging\ndef script_launcher(env_path, script_path):\n \"\"\"\n Runs the script using the passed environment path, script path,\n and how often to repeat the script (in minutes).\n \"\"\"\n\n env_path = Path(env_path)\n script_path = Path(script_path)\n script_name = script_path.name\n\n if script_name in ' '.join(active_processes(ignore='script_launcher.py')):\n logging.info(\"Process already running: '{!s}'\".format(script_name))\n elif script_name not in ' '.join(active_processes(ignore='script_launcher.py')):\n logging.info(script_name)\n logging.info(\"Process is not running already so launching: '{!s}'\".format(\n script_name))\n subprocess.run([str(env_path), str(script_path)])\n if script_name in ' '.join(active_processes(ignore='script_launcher.py')):\n logging.info(\"Process launched successfully: '{!s}'\".format(script_name))\n else:\n logging.error(\"Process not launched successfully: '{!s}'\".format(script_name))\n else:\n logging.error(\"Something went wrong...\")\n\n","sub_path":"script_launcher.py","file_name":"script_launcher.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"462885381","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2021\n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\nDescription:\n\n\"\"\"\nimport socket\nimport select\n\nimport asyncio\nimport logging\nimport sys\nimport os\n\n\nimport patterns\nimport view\nimport argparse\n\nlogging.basicConfig(filename='view.log', level=logging.DEBUG)\nlogger = logging.getLogger()\n\n\nclass IRCClient(patterns.Subscriber):\n\n def __init__(self):\n super().__init__()\n self.username = str()\n self._run = True\n self.is_connected = False\n\n def __init__(self, nickname, host, port):\n super().__init__()\n self.nickname = nickname\n self.host = host\n self.port = port\n self._run = True\n self.is_connected = False\n\n def set_view(self, view):\n self.view = view\n\n def update(self, msg):\n # Will need to modify this\n if not isinstance(msg, str):\n raise TypeError(f\"Update argument needs to be a string\")\n elif not len(msg):\n # Empty string\n return\n logger.info(f\"IRCClient.update -> msg: {msg}\")\n self.process_input(msg)\n\n def process_input(self, msg):\n # Will need to modify this\n if msg.lower().startswith('/connect '):\n self.add_msg(msg)\n split_string = msg.split(\" \")\n if len(split_string) > 4:\n self.username = split_string[1]\n self.server_host = split_string[2]\n self.server_port = split_string[3]\n self.real_name = split_string[4]\n self.connect()\n\n if msg.lower().startswith('/msg '):\n msg = msg[5:]\n self.add_msg(msg)\n self.send_message(msg)\n\n if msg.lower().startswith('/quit'):\n # Command that leads to the closure of the process\n raise KeyboardInterrupt\n\n def add_msg(self, msg):\n self.view.add_msg(self.nickname, msg)\n\n def add_msg_from_other(self, nickname, msg):\n self.view.add_msg(nickname, msg)\n\n async def run(self):\n \"\"\"\n Driver of your IRC Client\n \"\"\"\n try:\n while True:\n if hasattr(self, 'server_socket'):\n socket_list = [sys.stdin, self.server_socket]\n read_sockets, write_sockets, error_sockets = select.select(socket_list, [], [])\n for sock in read_sockets:\n # incoming message from remote server\n if sock == self.server_socket:\n data = sock.recv(4096).decode('utf-8')\n if not data:\n print('\\nDisconnected from chat server')\n else:\n if data.startswith(\"NICK\"):\n nickname = data.split(\":\")[0][5:]\n message = data[len(nickname)+6:]\n self.add_msg_from_other(nickname, message)\n elif data.startswith(\"NOTICE\"):\n err_msg = data[7:]\n self.add_msg_from_other(\"server\", err_msg)\n elif data.endswith(\"joined the #Global channel\"):\n self.add_msg_from_other(\"server\", data)\n else:\n self.add_msg(data)\n else:\n pass\n await asyncio.sleep(1)\n\n except KeyboardInterrupt:\n self.add_msg(f\"\\nServer interrupted, closing socket connections\")\n self.close()\n except RuntimeError:\n self.add_msg(f\"\\nConnection interrupted, closing socket connections\")\n self.close()\n\n def close(self):\n # Terminate connection\n logger.debug(f\"Closing IRC Client object\")\n pass\n\n def connect(self):\n if hasattr(self, 'nickname'):\n nick_msg = \" \".join([\"NICK\", self.nickname])\n\n if hasattr(self, 'username') and hasattr(self, 'server_host') and hasattr(self, 'server_port'):\n user_msg = \" \".join([\"USER\", self.username, self.server_host, self.server_port, self.real_name])\n\n if not(hasattr(self, 'server_socket')):\n self.connect_to_server()\n\n logger.info(f\"Nick: {nick_msg} User: {user_msg}\")\n msg = \";\".join([nick_msg, user_msg])\n logger.info(f\"Msg: {msg}\")\n self.server_socket.send(msg.encode())\n logger.info(\"NICK USER sent\")\n self.is_connected = True\n\n def send_message(self, msg):\n if self.is_connected:\n msg = \"\".join([\"PRIVMSG #Global :\", msg])\n logger.info(f\"Sending message {msg} to #Global on server {self.server_socket}\")\n self.server_socket.send(msg.encode())\n\n def connect_to_server(self):\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n logger.info(f\"connecting to socket at host:{self.server_host}:{self.server_port}\")\n self.server_socket.connect((str(self.server_host), int(self.server_port)))\n logger.info(f\"connected to server\")\n\n\ndef set_parser(): \n parser = argparse.ArgumentParser()\n parser.add_argument('--nickname', action=\"store\", dest=\"nickname\", type=lambda x: x if len(x) <= 9 else False,\n default=\"client_01\")\n parser.add_argument('--host', action=\"store\", dest=\"host\", default=\"localhost\", help=\"server hostname\")\n parser.add_argument('--port', action=\"store\", dest=\"port\", default=8081, help=\"server port number\")\n return parser \n\ndef main(args):\n # Pass your arguments where necessary\n client = IRCClient(args.nickname, args.host, args.port)\n logger.info(f\"Client object created\")\n with view.View() as v:\n logger.info(f\"Entered the context of a View object\")\n client.set_view(v)\n client.add_msg_from_other(\"info\",\n \"Type /connect to connect to a \"\n \"server!\")\n logger.debug(f\"Passed View object to IRC Client\")\n v.add_subscriber(client)\n logger.debug(f\"IRC Client is subscribed to the View (to receive user input)\")\n async def inner_run():\n await asyncio.gather(\n v.run(),\n client.run(),\n return_exceptions=True,\n )\n try:\n asyncio.run( inner_run() )\n except KeyboardInterrupt as e:\n logger.debug(f\"Signifies end of process\")\n client.close()\n\nif __name__ == \"__main__\":\n parser = set_parser()\n args = parser.parse_args()\n if not args.nickname:\n sys.exit('Error: nickname can not be greater than 9 characters long')\n main(args)\n","sub_path":"irc_code/a2/Include/irc_client.py","file_name":"irc_client.py","file_ext":"py","file_size_in_byte":6912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"148663034","text":"# 素因数分解 O(√N)\n# 試し割り法\n# nより小さい数で順に割っていき割り切れるかどうかで素因数の判定を行う\n# 素因数が見つかった場合、その素因数で割り切れなくなるまでnを更新する\n# Nの素因数は最大でも√NであることよりO(√N)で計算可能\n# (素因数とその個数はcollections のCounterを使うと手軽)\n\nfrom collections import Counter, defaultdict\n\n\n# 各素因数の個数を辞書で返す\ndef prime_factrize_d(n):\n p_factors_d = defaultdict(int)\n if n < 2:\n return p_factors_d\n # 2だけ先に取り分けておく\n while n % 2 == 0:\n p_factors_d[2] += 1\n n //= 2\n # 残りの奇数の素因数を探す\n f = 3\n while f * f <= n:\n if n % f == 0:\n p_factors_d[f] += 1\n n //= f\n else:\n f += 2\n # nが素数の場合\n if n != 1:\n p_factors_d[n] += 1\n return p_factors_d\n\n\n# 素因数のリストを返す\ndef prime_factrize(n):\n if n < 2:\n return []\n p_factors = []\n # 2だけ先に取り分けておく\n while n % 2 == 0:\n p_factors.append(2)\n n //= 2\n # 残りの奇数の素因数を探す\n f = 3\n while f * f <= n:\n if n % f == 0:\n p_factors.append(f)\n n //= f\n else:\n f += 2\n # nが素数の場合\n if n != 1:\n p_factors.append(n)\n return p_factors\n\n\n# 約数の個数を返す\ndef count_divisor(n):\n res = 1\n d = prime_factrize_d(n)\n for v in d.values():\n res *= v + 1\n return res\n\n\n# 素因数とその個数のタプルのリストを返す\ndef get_prime_counts(n):\n p_factors = prime_factrize(n)\n counter = Counter(p_factors)\n return list(counter.items())\n","sub_path":"snippets/prime-factrizations/prime_factrization.py","file_name":"prime_factrization.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"124835589","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 10 11:04:32 2017\n\n@author: reidkostenuk\n\"\"\"\n\nimport nltk\nimport pandas as pd\nimport numpy as np\nimport time\n\nstartTime = time.time()\n\ndfTI = pd.read_csv('train_input.csv')\ndfTO = pd.read_csv('train_output.csv')\n\nframes = [dfTI, dfTO]\ndf = pd.concat(frames, axis=1)\ndf.drop(df.columns[2], axis=1, inplace=True)\n\nhockey = []\nmovies = []\nnba = []\nnews = []\nnfl = [] \npolitics = []\nsoccer = [] \nworldnews = []\n\ntokenizer = nltk.RegexpTokenizer(r'\\w+')\nword_lemma = nltk.WordNetLemmatizer()\ncachedStopWords = set(nltk.corpus.stopwords.words('english'))\ncachedStopWords.update(('number', 'com', 'speaker_1', 'speaker_2', 'speaker_3', 'speaker_4', 'speaker_5', 'speaker_6', 'speaker_7', 'speaker_8', 'speaker_9', 'speaker_10'))\n\nfor i in range(len(df)):\n token = tokenizer.tokenize(df.loc[i, 'conversation'])\n token = [word for word in token if word not in cachedStopWords]\n tokenLem = [None]*len(token)\n for w in range(len(token)):\n tokenLem[w] = word_lemma.lemmatize(token[w])\n freq = nltk.FreqDist(tokenLem)\n \n if df.loc[i,'category'] == 'hockey':\n hockey.extend(freq.keys())\n elif df.loc[i,'category'] == 'movies':\n movies.extend(freq.keys())\n elif df.loc[i,'category'] == 'nba':\n nba.extend(freq.keys())\n elif df.loc[i,'category'] == 'news':\n news.extend(freq.keys())\n elif df.loc[i,'category'] == 'nfl':\n nfl.extend(freq.keys())\n elif df.loc[i,'category'] == 'politics':\n politics.extend(freq.keys())\n elif df.loc[i,'category'] == 'soccer':\n soccer.extend(freq.keys())\n elif df.loc[i,'category'] == 'worldnews':\n worldnews.extend(freq.keys())\n \nhockeyTag = nltk.pos_tag(hockey, tagset='universal')\nhockeyNV = []\nfor word, pos in hockeyTag:\n if pos == 'NOUN':# or pos == 'VERB':\n hockeyNV.append(word)\nhockeyDist = nltk.FreqDist(hockeyNV)\nhockeyWord = []\nhockeyProb = []\nfor key, value in hockeyDist.items():\n total = len(hockeyDist)\n hockeyWord.append(key)\n hockeyProb.append(float(value)/total)\nhockeyWord = np.asarray(hockeyWord)\nhockeyProb = np.asarray(hockeyProb)\nhockeyWordPd = pd.DataFrame(hockeyWord)\nhockeyProbPd = pd.DataFrame(hockeyProb)\nhockeyFrame = [hockeyWordPd, hockeyProbPd]\ndfHockey = pd.concat(hockeyFrame, axis=1)\ndfHockey.columns = ['word', 'probability']\ndfHockey = dfHockey.sort_values(by='probability', ascending=0)\n\nmoviesTag = nltk.pos_tag(movies, tagset='universal')\nmoviesNV = []\nfor word, pos in moviesTag:\n if pos == 'NOUN':# or pos == 'VERB':\n moviesNV.append(word)\nmoviesDist = nltk.FreqDist(moviesNV)\nmoviesWord = []\nmoviesProb = []\nfor key, value in moviesDist.items():\n total = len(moviesDist)\n moviesWord.append(key)\n moviesProb.append(float(value)/total)\nmoviesWord = np.asarray(moviesWord)\nmoviesProb = np.asarray(moviesProb)\nmoviesWordPd = pd.DataFrame(moviesWord)\nmoviesProbPd = pd.DataFrame(moviesProb)\nmoviesFrame = [moviesWordPd, moviesProbPd]\ndfMovies = pd.concat(moviesFrame, axis=1)\ndfMovies.columns = ['word', 'probability']\ndfMovies = dfMovies.sort_values(by='probability', ascending=0)\n\nnbaTag = nltk.pos_tag(nba, tagset='universal')\nnbaNV = []\nfor word, pos in nbaTag:\n if pos == 'NOUN':# or pos == 'VERB':\n nbaNV.append(word)\nnbaDist = nltk.FreqDist(nbaNV)\nnbaWord = []\nnbaProb = []\nfor key, value in nbaDist.items():\n total = len(nbaDist)\n nbaWord.append(key)\n nbaProb.append(float(value)/total)\nnbaWord = np.asarray(nbaWord)\nnbaProb = np.asarray(nbaProb)\nnbaWordPd = pd.DataFrame(nbaWord)\nnbaProbPd = pd.DataFrame(nbaProb)\nnbaFrame = [nbaWordPd, nbaProbPd]\ndfNba = pd.concat(nbaFrame, axis=1)\ndfNba.columns = ['word', 'probability']\ndfNba = dfNba.sort_values(by='probability', ascending=0)\n\nnewsTag = nltk.pos_tag(news, tagset='universal')\nnewsNV = []\nfor word, pos in newsTag:\n if pos == 'NOUN':# or pos == 'VERB':\n newsNV.append(word)\nnewsDist = nltk.FreqDist(newsNV)\nnewsWord = []\nnewsProb = []\nfor key, value in newsDist.items():\n total = len(newsDist)\n newsWord.append(key)\n newsProb.append(float(value)/total)\nnewsWord = np.asarray(newsWord)\nnewsProb = np.asarray(newsProb)\nnewsWordPd = pd.DataFrame(newsWord)\nnewsProbPd = pd.DataFrame(newsProb)\nnewsFrame = [newsWordPd, newsProbPd]\ndfNews = pd.concat(newsFrame, axis=1)\ndfNews.columns = ['word', 'probability']\ndfNews = dfNews.sort_values(by='probability', ascending=0)\n\nnflTag = nltk.pos_tag(nfl, tagset='universal')\nnflNV = []\nfor word, pos in nflTag:\n if pos == 'NOUN':# or pos == 'VERB':\n nflNV.append(word)\nnflDist = nltk.FreqDist(nflNV)\nnflWord = []\nnflProb = []\nfor key, value in nflDist.items():\n total = len(hockeyDist)\n nflWord.append(key)\n nflProb.append(float(value)/total)\nnflWord = np.asarray(nflWord)\nnflProb = np.asarray(nflProb)\nnflWordPd = pd.DataFrame(nflWord)\nnflProbPd = pd.DataFrame(nflProb)\nnflFrame = [nflWordPd, nflProbPd]\ndfNfl = pd.concat(nflFrame, axis=1)\ndfNfl.columns = ['word', 'probability']\ndfNfl = dfNfl.sort_values(by='probability', ascending=0)\n\npoliticsTag = nltk.pos_tag(politics, tagset='universal')\npoliticsNV = []\nfor word, pos in politicsTag:\n if pos == 'NOUN':# or pos == 'VERB':\n politicsNV.append(word)\npoliticsDist = nltk.FreqDist(politicsNV)\npoliticsWord = []\npoliticsProb = []\nfor key, value in politicsDist.items():\n total = len(politicsDist)\n politicsWord.append(key)\n politicsProb.append(float(value)/total)\npoliticsWord = np.asarray(politicsWord)\npoliticsProb = np.asarray(politicsProb)\npoliticsWordPd = pd.DataFrame(politicsWord)\npoliticsProbPd = pd.DataFrame(politicsProb)\npoliticsFrame = [politicsWordPd, politicsProbPd]\ndfPolitics = pd.concat(politicsFrame, axis=1)\ndfPolitics.columns = ['word', 'probability']\ndfPolitics = dfPolitics.sort_values(by='probability', ascending=0)\n\nsoccerTag = nltk.pos_tag(soccer, tagset='universal')\nsoccerNV = []\nfor word, pos in soccerTag:\n if pos == 'NOUN':# or pos == 'VERB':\n soccerNV.append(word)\nsoccerDist = nltk.FreqDist(soccerNV)\nsoccerWord = []\nsoccerProb = []\nfor key, value in soccerDist.items():\n total = len(soccerDist)\n soccerWord.append(key)\n soccerProb.append(float(value)/total)\nsoccerWord = np.asarray(soccerWord)\nsoccerProb = np.asarray(soccerProb)\nsoccerWordPd = pd.DataFrame(soccerWord)\nsoccerProbPd = pd.DataFrame(soccerProb)\nsoccerFrame = [soccerWordPd, soccerProbPd]\ndfSoccer = pd.concat(soccerFrame, axis=1)\ndfSoccer.columns = ['word', 'probability']\ndfSoccer = dfSoccer.sort_values(by='probability', ascending=0)\n\nworldnewsTag = nltk.pos_tag(worldnews, tagset='universal')\nworldnewsNV = []\nfor word, pos in worldnewsTag:\n if pos == 'NOUN':# or pos == 'VERB':\n worldnewsNV.append(word)\nworldnewsDist = nltk.FreqDist(worldnewsNV)\nworldnewsWord = []\nworldnewsProb = []\nfor key, value in worldnewsDist.items():\n total = len(worldnewsDist)\n worldnewsWord.append(key)\n worldnewsProb.append(float(value)/total)\nworldnewsWord = np.asarray(worldnewsWord)\nworldnewsProb = np.asarray(worldnewsProb)\nworldnewsWordPd = pd.DataFrame(worldnewsWord)\nworldnewsProbPd = pd.DataFrame(worldnewsProb)\nworldnewsFrame = [worldnewsWordPd, worldnewsProbPd]\ndfWorldnews = pd.concat(worldnewsFrame, axis=1)\ndfWorldnews.columns = ['word', 'probability']\ndfWorldnews = dfWorldnews.sort_values(by='probability', ascending=0)\n\ndfHockey.to_csv('condProbHockey.csv')\ndfMovies.to_csv('condProbMovies.csv')\ndfNba.to_csv('condProbNba.csv')\ndfNews.to_csv('condProbNews.csv')\ndfPolitics.to_csv('condProbPolitics.csv')\ndfNfl.to_csv('condProbNfl.csv')\ndfSoccer.to_csv('condProbSoccer.csv')\ndfWorldnews.to_csv('condProbWorldnews.csv')\n\nstopTime = time.time()\nprint(\"--- %s seconds ---\" % (stopTime - startTime))","sub_path":"conv_contain_word_data.py","file_name":"conv_contain_word_data.py","file_ext":"py","file_size_in_byte":7724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"530677657","text":"from turtle import Turtle, screensize\r\nALIGNMENT=\"center\"\r\nFONT=(\"ROBOTO\", 18, \"normal\")\r\nclass ScoreBoard(Turtle):\r\n def __init__(self):\r\n super().__init__()\r\n self.score=0\r\n self.penup()\r\n self.color(\"white\")\r\n self.hideturtle() \r\n self.goto(-20,260) \r\n self.printscore()\r\n \r\n def printscore(self):\r\n self.write(f\"score={self.score}\",align=ALIGNMENT,move=False,font=FONT)\r\n\r\n def gameover(self):\r\n self.clear()\r\n self.goto(-10,30)\r\n self.write(f\"GAME OVER\\nFinal Score={self.score}\",align=ALIGNMENT,move=False,font=(\"Algerian\", 18, \"normal\"))\r\n \r\n \r\n def increse_score(self):\r\n self.score+=1\r\n self.clear()\r\n self.printscore() ","sub_path":"DAY-20-SNAKE_1/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"173052102","text":"#Extract Phone\nimport pandas as pd\nimport re\nfrom flashtext import KeywordProcessor\nfrom nltk import sent_tokenize\n\nclass extract_phone_details:\n \n def __init__(self, input_text):\n self.input_text = input_text\n self.df = pd.read_csv(\"country_code.csv\")\n \n \n def getPhone(self, inputString, debug=False):\n number = None\n try:\n pattern = re.compile(r'([+(]?\\d+[)\\-]?[ \\t\\r\\f\\v]*[(]?\\d{2,}[()\\-]?[ \\t\\r\\f\\v]*\\d{2,}[()\\-]?[ \\t\\r\\f\\v]*\\d*[ \\t\\r\\f\\v]*\\d*[ \\t\\r\\f\\v]*)')\n match = pattern.findall(inputString)\n match = [re.sub(r'[,.]', '', el) for el in match if len(re.sub(r'[()\\-.,\\s+]', '', el))>6]\n match = [re.sub(r'\\D$', '', el).strip() for el in match]\n match = [el for el in match if len(re.sub(r'\\D','',el)) <= 15]\n # Remove number strings that are greater than 15 digits\n try:\n for el in list(match):\n # Create a copy of the list since you're iterating over it\n if len(el.split('-')) > 3: continue # Year format YYYY-MM-DD\n for x in el.split(\"-\"):\n try:\n # Error catching is necessary because of possibility of stray non-number characters\n # if int(re.sub(r'\\D', '', x.strip())) in range(1900, 2100):\n if x.strip()[-4:].isdigit():\n if int(x.strip()[-4:]) in range(1900, 2100):\n # Don't combine the two if statements to avoid a type conversion error\n match.remove(el)\n except:\n pass\n except:\n pass\n number = match\n except:\n pass\n return(number)\n \n def country_code_number(self, code):\n result_dict = {}\n if(self.number[:len(code)] == code):\n if(code ==\"91\"):\n if(len(self.number) == 10):\n result_dict[\"country_code\"]= \"\"\n result_dict[\"number\"] = self.number\n if(len(self.number) == 12):\n result_dict[\"country_code\"]= code\n result_dict[\"number\"] = self.number[len(code):]\n else:\n result_dict[\"country_code\"]= code\n result_dict[\"number\"] = self.number[len(code):]\n return(result_dict)\n \n def extract_phone(self):\n result_dict = {}\n try:\n self.number = ''.join([n for n in self.getPhone(self.input_text)[0] if n.isdigit()])\n for i in range(self.df.shape[0]):\n result_dict = self.country_code_number(str(self.df.loc[i,'Code']))\n if(len(result_dict) > 1):\n return(result_dict)\n break\n elif(i == self.df.shape[0]):\n result_dict[\"country_code\"]= \"\"\n result_dict[\"number\"] = self.number\n return(result_dict)\n except:\n return(result_dict)\n \n\nclass extract_education_details:\n \n def __init__(self, input_text):\n self.input_text = input_text\n self.degree_file_path = \"doc_qualification/highest_qualification.txt\"\n self.college_file_path = \"doc_qualification/college.txt\"\n self.board_file_path = \"doc_qualification/education_board.txt\"\n self.specialization_file_path = \"doc_qualification/highest_specialization.txt\"\n \n def line_count(self,start):\n for k, v in self.line_index_dict.items():\n if(v > start):\n return(k)\n return(k+1)\n \n def degree_list(self):\n degree_dict ={}\n with open(self.degree_file_path) as fp:\n lines = fp.read().splitlines()\n processor = KeywordProcessor()\n processor.add_keywords_from_list(lines)\n found = processor.extract_keywords(self.document, span_info=True)\n for count, value in enumerate(found):\n line_number = self.line_count(value[1])\n degree_dict[value[0]] = line_number\n return(degree_dict)\n \n def board_list(self):\n board_dict = {}\n with open(self.board_file_path) as fp:\n lines = fp.read().splitlines()\n processor = KeywordProcessor()\n processor.add_keywords_from_list(lines)\n found = processor.extract_keywords(self.document, span_info=True)\n for count, value in enumerate(found):\n line_number = self.line_count(value[1])\n board_dict[value[0]] = line_number\n return(board_dict)\n \n def college_list(self):\n college_dict = {}\n with open(self.college_file_path) as fp:\n lines = fp.read().splitlines()\n processor = KeywordProcessor()\n processor.add_keywords_from_list(lines)\n found = processor.extract_keywords(self.document, span_info=True)\n for count, value in enumerate(found):\n line_number = self.line_count(value[1])\n college_dict[value[0]] = line_number\n return(college_dict)\n \n def specialization_list(self):\n specialization_dict = {}\n with open(self.specialization_file_path) as fp:\n lines = fp.read().splitlines()\n processor = KeywordProcessor()\n processor.add_keywords_from_list(lines)\n found = processor.extract_keywords(self.document, span_info=True)\n for count, value in enumerate(found):\n line_number = self.line_count(value[1])\n specialization_dict[value[0]] = line_number\n return(specialization_dict)\n \n def year_list(self):\n year_dict = {}\n for count, line in enumerate(self.lines):\n if re.search(r'\\b[21][09][8901][0-9]', line.lower()):\n value = re.findall(r'\\b[21][09][8901][0-9]',line.lower())\n year_dict[max(value)] = str(count)\n # year_dict[value[0]] = str(count) + \":\" + str(re.search(r'\\b[21][09][8901][0-9]', line.lower()).start())\n return(year_dict)\n \n def match_line_dict(self,line_index, comp_dict):\n maxi = 0\n temp_dict = {}\n for k,v in comp_dict.items():\n if(int(v) >= line_index):\n temp_dict[v] = 1/(abs(line_index - int(v))+1)\n if (maxi < 1/(abs(line_index - int(v))+1)):\n maxi = 1/(abs(line_index - int(v))+1)\n for k , v in temp_dict.items():\n if v == maxi and maxi > 0.05:\n for key,value in comp_dict.items():\n if(k == value):\n return(key)\n \n \n def extract_qualification(self):\n result = list()\n lines = []\n line_index_dict = {}\n for count, line in enumerate(sent_tokenize(self.input_text)):\n if (count > 0):\n line_index_dict[count] = len(''.join(line)) + (line_index_dict[int(count)-1])\n else:\n line_index_dict[count] = len(''.join(line))\n lines.append(line)\n self.lines = lines\n self.line_index_dict = line_index_dict\n self.document = ''.join(lines)\n d_dict = self.degree_list()\n for k,v in d_dict.items():\n result_dict = {}\n result_dict[\"degree\"] = k\n result_dict[\"year\"] = self.match_line_dict(int(v), self.year_list())\n result_dict[\"college\"] = self.match_line_dict(int(v), self.college_list())\n result_dict[\"board\"] = self.match_line_dict(int(v), self.board_list())\n result.append(result_dict) \n return(result)\n \n\n# result = extract_phone_details(\"My number is 917755057892\")\n# print(result.extract_phone())\nimport time\nst = time.time()\nresult_q = extract_education_details('''College/Institutes Board/ University Year Aggregate B.Tech(CSE) A.I.E.T Lucknow U.P.T.U. Lucknow 2009-2013 68.92% M.Sc(Math) B.S Mehta Bharwari Kausambi C.S.J.M.U. Kanpur 2005-2007 53.80% B.Sc(PCM) R R P G Amethi Dr.R M L Avadh University Faizabad 2003-2005 51.50% H.S.C RRIC Amethi UP UP Board 2001-2002 58.40% S.S.C SSPIC Amethi UP UP Board 1999-2000 60.00% Aggregate 68.92% Branch Computer Science.''')\nprint(result_q.extract_qualification())\n \nprint(time.time()-st)\n\n\n","sub_path":"src/extract_details.py","file_name":"extract_details.py","file_ext":"py","file_size_in_byte":8358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"215471583","text":"import sphinx_autobuild\nimport os\nimport sys\nfrom contextlib import contextmanager\nimport base64\nfrom livereload import Server\nimport BaseHTTPServer\nfrom SimpleHTTPServer import SimpleHTTPRequestHandler\n\nkey = \"\"\nauth_file = '.credentials'\nbuild_folder = \"_build/html\"\n\nclass AuthHandler(SimpleHTTPRequestHandler):\n def do_HEAD(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n def do_AUTHHEAD(self):\n self.send_response(401)\n self.send_header('WWW-Authenticate', 'Basic realm=\\\"Restricted area\\\"')\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n def do_GET(self):\n global key\n if self.headers.getheader('Authorization') == None:\n self.do_AUTHHEAD()\n self.wfile.write('Credentials required.')\n pass\n elif self.headers.getheader('Authorization') == 'Basic '+key:\n SimpleHTTPRequestHandler.do_GET(self)\n pass\n else:\n self.do_AUTHHEAD()\n self.wfile.write('Credentials required.')\n pass\n\ndef test(HandlerClass = AuthHandler,\n ServerClass = BaseHTTPServer.HTTPServer):\n BaseHTTPServer.test(HandlerClass, ServerClass)\n\n\n@contextmanager\ndef pushd(newDir):\n previousDir = os.getcwd()\n os.chdir(newDir)\n yield\n os.chdir(previousDir)\n\nif __name__ == \"__main__\":\n\n source_dir = os.path.realpath(\".\")\n dest_dir = os.path.realpath(build_folder)\n\n\n if os.environ.get(\"ENV\") is not None :\n\n ignored_files = []\n\n with open(\".gitignore\", \"r\") as ins:\n for line in ins:\n ignored_files.append(os.path.abspath(line.rstrip()))\n\n ins.close()\n\n builder = sphinx_autobuild.SphinxBuilder(outdir=build_folder,\n args=[\"-b\",\"html\",source_dir,dest_dir],\n ignored=ignored_files)\n server = Server(\n watcher=sphinx_autobuild.LivereloadWatchdogWatcher(),\n )\n\n server.watch(source_dir, builder)\n\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n\n server.watch(dest_dir)\n\n builder.build()\n\n server.serve(port=8000, host='0.0.0.0', root=dest_dir)\n\n else:\n builder = sphinx_autobuild.SphinxBuilder(outdir=build_folder,\n args=[\"-b\",\"html\",source_dir,dest_dir])\n\n\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n\n builder.build()\n\n sys.argv = [\"nouser\", \"8000\"]\n auth = \"\"\n\n with open(auth_file) as f:\n auth = f.readlines()\n auth = auth[0].rstrip()\n\n key = base64.b64encode(auth)\n with pushd(dest_dir):\n test()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"227148650","text":"import pygame\nfrom pygame.locals import MOUSEBUTTONDOWN, Rect, QUIT\nfrom sys import exit\n\ndef desenhar_tabu():\n pygame.draw.line(tela, (255, 255, 255), (200, 0), (200, 600), 10)\n pygame.draw.line(tela, (255, 255, 255), (400, 0), (400, 600), 10)\n pygame.draw.line(tela, (255, 255, 255), (0, 200), (600, 200), 10)\n pygame.draw.line(tela, (255, 255, 255), (0, 400), (600, 400), 10)\n\ndef desenhar_peca(pos):\n global VEZ\n x, y = pos\n if VEZ == 'JOGADOR2':\n pygame.draw.circle(tela, (0, 0, 255), pos, 50)\n else:\n img = pygame.image.load('x.png').convert_alpha()\n imgR = pygame.transform.scale(img, (100, 100))\n tela.blit(imgR, (x - 50, y - 50))\n\ndef testa_pos():\n for p in rec:\n if e.type == MOUSEBUTTONDOWN and p.collidepoint(mouse_pos):\n if p == rect1:\n confimar(0, [100, 100])\n if p == rect2:\n confimar(1, [300, 100])\n if p == rect3:\n confimar(2, [500, 100])\n if p == rect4:\n confimar(3, [100, 300])\n if p == rect5:\n confimar(4, [300, 300])\n if p == rect6:\n confimar(5, [500, 300])\n if p == rect7:\n confimar(6, [100, 500])\n if p == rect8:\n confimar(7, [300, 500])\n if p == rect9:\n confimar(8, [500, 500])\n\ndef confimar(indice, pos):\n global ESCOLHA, VEZ, espaco\n if marca_tabu[indice] == 'X':\n print('X')\n elif marca_tabu[indice] == 'O':\n print('O')\n else:\n marca_tabu[indice] = ESCOLHA\n desenhar_peca(pos)\n print(marca_tabu)\n if VEZ == 'JOGADOR1':\n VEZ = 'JOGADOR2'\n else:\n VEZ = 'JOGADOR1'\n espaco +=1\n\ndef teste_vitoria(l):\n return ((marca_tabu[0] == l and marca_tabu[1] == l and marca_tabu[2] == l) or\n (marca_tabu[3] == l and marca_tabu[4] == l and marca_tabu[5] == l) or\n (marca_tabu[6] == l and marca_tabu[7] == l and marca_tabu[8] == l) or\n (marca_tabu[0] == l and marca_tabu[3] == l and marca_tabu[6] == l) or\n (marca_tabu[1] == l and marca_tabu[4] == l and marca_tabu[7] == l) or\n (marca_tabu[2] == l and marca_tabu[5] == l and marca_tabu[8] == l) or\n (marca_tabu[0] == l and marca_tabu[4] == l and marca_tabu[8] == l) or\n (marca_tabu[2] == l and marca_tabu[4] == l and marca_tabu[6] == l))\n\ndef texto_vitoria(v):\n arial = pygame.font.SysFont('arial', 58)\n mensagem = 'JOGADOR {} VENCEU'.format(v)\n\n if v == 'EMPATE':\n mens_vitoria = arial.render('DEU VELHA', True, (0, 255, 0), 0)\n tela.blit(mens_vitoria, (115, 265))\n else:\n mens_vitoria = arial.render(mensagem, True, (0, 255, 0), 0)\n tela.blit(mens_vitoria, (0, 265))\n\ndef reset():\n global ESCOLHA, ESTADO, VEZ, marca_tabu, espaco\n ESTADO = 'JOGANDO'\n VEZ = 'JOGADOR1'\n ESCOLHA = 'X'\n espaco = 0\n marca_tabu = [\n 0, 1, 2,\n 3, 4, 5,\n 6, 7, 8\n ]\n tela.fill(0)\n\ndef pontos(pontos1, pontos2):\n arial = pygame.font.SysFont('mingliuextbpmingliuextbmingliuhkscsextb', 30)\n jogador1 = 'Jogador1 = {}'.format(pontos1)\n jogador2 = 'Jogador2 = {}'.format(pontos2)\n\n jd1 = arial.render(jogador1, True, (188, 186, 186))\n jd2 = arial.render(jogador2, True, (188, 186, 186))\n tela.blit(jd1, (0, 0))\n tela.blit(jd2, (420, 0))\n\npygame.init()\n\ntela = pygame.display.set_mode((600, 600), 0, 32)\npygame.display.set_caption('Jogo da velha')\npygame.mixer.music.load('Tetris Theme A.ogg')\npygame.mixer.music.set_volume(0.5)\n\nESTADO = 'JOGANDO'\nVEZ = 'JOGADOR1'\nESCOLHA = 'X'\nespaco = 0\nmarca_tabu = [\n 0, 1, 2,\n 3, 4, 5,\n 6, 7, 8\n]\n\nrect1 = Rect((0, 0), (200, 200))\nrect2 = Rect((200, 0), (200, 200))\nrect3 = Rect((400, 0), (200, 200))\nrect4 = Rect((0, 200), (200, 200))\nrect5 = Rect((200, 200), (200, 200))\nrect6 = Rect((400, 200), (200, 200))\nrect7 = Rect((0, 400), (200, 200))\nrect8 = Rect((200, 400), (200, 200))\nrect9 = Rect((400, 400), (200, 200))\n\nrec = [\n rect1,rect2,rect3,rect4,\n rect5,rect6,rect7,rect8,rect9,\n]\n\npontos1, pontos2 = 0, 0\n\nprint(pygame.font.get_fonts())\npygame.mixer.music.play(-1, 0.0)\n\nwhile True:\n mouse_pos = pygame.mouse.get_pos()\n if ESTADO == 'JOGANDO':\n desenhar_tabu()\n pontos(pontos1, pontos2)\n\n for e in pygame.event.get():\n if e.type == QUIT:\n pygame.quit()\n exit()\n if e.type == MOUSEBUTTONDOWN:\n if VEZ == 'JOGADOR1':\n ESCOLHA = 'X'\n testa_pos()\n else:\n ESCOLHA = 'O'\n testa_pos()\n\n if teste_vitoria('X'):\n print('X VENCEU')\n texto_vitoria('X')\n ESTADO = 'RESET'\n pontos1 += 1\n\n elif teste_vitoria('O'):\n print('O VENCEU')\n texto_vitoria('O')\n ESTADO = 'RESET'\n pontos2 +=1\n\n elif espaco >= 9:\n print('EMPATE')\n texto_vitoria('EMPATE')\n ESTADO = 'RESET'\n\n else:\n for u in pygame.event.get():\n if u.type == QUIT:\n pygame.quit()\n exit()\n if u.type == MOUSEBUTTONDOWN:\n reset()\n desenhar_tabu()\n\n pygame.display.flip()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"582803681","text":"import requests\n\n#send a basic request first and get some basic information we need:\n#video total count, video per page\n#api request: http://api.bilibili.com/archive_rank/getarchiverankbypartion\n#params: type(json), tid, pn(should be page number)\n\napiAddr = 'http://api.bilibili.com/archive_rank/getarchiverankbypartion'\nbasicParams = {'type':'json', 'tid':'27', 'pn':'1'} \nresp = requests.get(apiAddr, params=basicParams)\nrespJson = resp.json()\nprint('count=%s, size=%s' % (respJson['data']['page']['count'], respJson['data']['page']['size']))\n\n#record total count of video, video count per page, the request count we will send\ntotalCount = int(respJson['data']['page']['count'])\ncountPerPage = int(respJson['data']['page']['size'])\n\nrequestCount = -1\nif totalCount%countPerPage == 0:\n\trequestCount = totalCount/countPerPage\nelse:\n\trequestCount = (totalCount/countPerPage) + 1\n\nprint('request count = %d' % requestCount)\n\n\n#create video object and sqlalchemy opration related\n#video column: id, author, title, create date, play.\n\nimport sqlalchemy\nfrom datetime import datetime\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, DateTime\n\nengine = create_engine('sqlite:///blog.db', echo=False)\nBase = declarative_base()\n\n#some time the play count may be '--', so set it to Integer is not very make sense\n#some time create date is '--', and script will crush down, do some enhance\nclass Video(Base):\n\t__tablename__='videos'\n\n\tid = Column(Integer, primary_key=True)\n\ttitle = Column(String)\n\tauthor = Column(String)\n\tcreate_date = Column(DateTime, default=datetime.now)\n\tplay = Column(Integer)\t\n\n\tdef __rper__(self):\n\t\treturn 'video: \\n title=%s, author=%s, create_date=%s, play=%d' % (self.title, self.author, self.create_date, self.play)\n\nBase.metadata.create_all(engine)\n\n#print start time for time calculate\nprint('Start time ---------> %s' % datetime.now())\n\n#this part is for session management\nfrom sqlalchemy.orm import sessionmaker\n\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n#create a new video object\ndef createVideoObject(archive):\n\ttimeformat = '%Y-%m-%d %H:%M'\n\ttitle = archive['title']\n\tauthor = archive['author']\n\tif (archive['create'] == '--'):\n\t\tcreate_date = datetime.now()\n\telse:\n\t\tcreate_date = datetime.strptime(archive['create'], timeformat)\n\tif (archive['play'] == '--'):\n\t\tplay = 0\n\telse:\n\t\tplay = int(archive['play'])\n\t#print('play count = %s, title=%s, author=%s, create=%s' % (play, title, author, create_date))\n\treturn Video(title=title, author=author, create_date=create_date, play=play)\n\t\n\n\n#loop for create video object and insert them into db\ndef writeToDB(json, pageSize):\n\tstartCount = 0\n\twhile (startCount < pageSize):\n\t\tarchive = json['data']['archives'][startCount]\n\t\tvideo = createVideoObject(archive)\n\t\tsession.add(video)\t \n\t\tstartCount += 1\n\n\tsession.commit()\n\n#loop fetch data from bilibili api\ndef fetchVideoList(pageNumber):\n\tbasicParams['pn'] = str(pageNumber)\n\tresp = requests.get(apiAddr, basicParams)\n\twriteToDB(resp.json(), countPerPage)\n\n#fetch all video information\ncounter = 1\nwhile (counter < requestCount):\n\tfetchVideoList(counter)\n\tcounter += 1\n\n#print stop time for time calculate\nprint('Stop time ---------> %s' % datetime.now())\n","sub_path":"biliSpider_v0.1.py","file_name":"biliSpider_v0.1.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"590098930","text":"from flask import Flask, jsonify, request, redirect, render_template, g, url_for, send_file, Response\nfrom flask_cors import CORS\nimport sqlite3\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\n\napp = Flask(__name__)\ncors = CORS(app, resources={r\"/eleve/*\": {\"origins\": \"*\"}})\n\n\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n\ndef connect_db():\n sql = sqlite3.connect('gestioneleves.sqlite3')\n sql.row_factory = dict_factory # sqlite3.Row avoir un dict plutot qu'un tuple pour une ligne\n return sql\n\n\ndef get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db\n\n\n@app.teardown_appcontext\ndef close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()\n\n\n# route pour voir les eleves : get @/eleve\n@app.route('/eleve', methods=['GET'])\ndef get_eleves():\n db = get_db()\n eleve_cur = db.execute('select id, nom, prenom, adresse, idclasse from eleve')\n eleves = eleve_cur.fetchall()\n if len(eleves) == 0:\n return Response(response=None, status=403, mimetype='application/json')\n else:\n return jsonify(eleves)\n\n# route pour voir 1 eleve : get @/eleve/id\n@app.route('/eleve/')\ndef getOneEleve(eleve_id):\n db = get_db()\n\n req = f'select e.id, e.nom, e.prenom, e.adresse, e.idclasse,\\\n m.id as idMatiere, m.nom as nomMatiere \\\n from eleve e inner join matiere m on e.id = m.ideleve \\\n where e.id = {eleve_id}'\n\n eleve_cur = db.execute(req)\n eleves = eleve_cur.fetchall()\n\n return jsonify(eleves)\n\n@app.route('/eleve', methods=['POST'])\n # Methodes avec recuper un json dans postman\ndef post_eleve():\n recup = request.get_json()\n\n if 'nom' not in recup or 'prenom' not in recup or 'adresse' not in recup or 'idclasse' not in recup:\n return jsonify({'status': 'no data in the request'})\n nomR = recup['nom']\n prenomR = recup['prenom']\n adresseR = recup['adresse']\n idclasseR = recup['idclasse']\n\n db = get_db()\n\n req = f\"insert into eleve(nom, prenom, adresse, idclasse) values('{nomR}','{prenomR}','{adresseR}', '{idclasseR}')\"\n db.execute(req)\n db.commit()\n\n return jsonify({'status': 'ok'})\n\n\n# route pour modifier un eleve\n@app.route('/eleve', methods=['PUT'])\ndef put_eleve(eleve_id):\n # http: // 127.0.0.1: 5000 / eleve /2?nom = Potter\n nom = request.args.get('nom')\n prenom = request.args.get('prenom')\n adresse = request.args.get('adresse')\n\n if nom is None and prenom is None and adresse is None:\n return jsonify({'status' : 'no data in the request'})\n\n db = get_db()\n if nom is not None:\n db.execute(f\"UPDATE eleve set nom = '{nom}' where id= {eleve_id}\")\n db.commit()\n return 'This updates a eleve by ID.'\n if prenom is not None:\n db.execute(f\"UPDATE eleve set prenom = '{prenom}' where id= '{eleve_id}'\")\n db.commit()\n return 'This updates a eleve by ID.'\n if adresse is not None:\n db.execute(f\"UPDATE eleve set adresse = '{adresse}' where id= '{eleve_id}'\")\n db.commit()\n return 'This updates a eleve by ID.'\n return jsonify({'status': 'ok'})\n\n\n# route pour supprimer un eleve\n@app.route('/eleve/', methods=['DELETE'])\ndef delete_eleve(eleve_id):\n db = get_db()\n\n eleve_cur = db.execute(f\"select * from eleve e where e.id = {eleve_id}\")\n eleves = eleve_cur.fetchall()\n\n mat_cur = db.execute(f\"select * from matiere where ideleve = {eleve_id}\")\n matieres = mat_cur.fetchall()\n\n if len(eleves) == 0:\n # return Response(response=json.dumps('No student'), status=403, mimetype='application/json')\n return jsonify({'message':'No such student'}),403\n elif len(matieres) != 0:\n return Response(response=json.dumps('No authorization'), status=500, mimetype='application/json')\n else:\n db.execute(f\"delete from eleve where id = '{eleve_id}'\")\n db.commit()\n return Response(response=json.dumps('Deleted'), status=200, mimetype='application/json')\n # return jsonify({'message':'Student deleted'}), 200\n\n\n# route pour voir les eleves et leurs matieres\n@app.route('/eleveM', methods=['GET'])\ndef get_elevesM():\n db = get_db()\n eleve_cur = db.execute('select e.nom, e.prenom, c.nom as nomClasse ,\\\n m.nom as nomMatiere, m.id as idMatiere , n.nom as nomNote, n.valeur \\\n from classe c join eleve e on c.id = e.idclasse \\\n join matiere m on e.id = m.ideleve \\\n join note n on m.id = n.idmatiere ')\n eleves = eleve_cur.fetchall()\n\n return jsonify(eleves)\n\n\n# route pour voir les notes de l'eleve dans une matiere\n # get @eleve/id/matiere\n@app.route('/eleve//', methods=['GET'])\ndef eleveMatiere(eleve_id,matiere_id):\n db = get_db()\n eleve_cur = db.execute(f\"select e.nom, e.prenom, c.nom as nomClasse , m.nom as nomMatiere, n.nom as nomNote, n.valeur \\\n from classe c join eleve e on c.id = e.idclasse \\\n join matiere m on e.id = m.ideleve \\\n join note n on m.id = n.idmatiere where e.id = {eleve_id} and m.id = {matiere_id}\")\n eleves = eleve_cur.fetchall()\n\n return jsonify({'eleves': eleves})\n\n\n# faire le radar d'un eleve avec sa moyenne\n@app.route('/eleve/radar/', methods=['GET'])\ndef eleve_radar(eleve_id):\n db = get_db()\n req = f\" SELECT MoyenneMatieres.eleveid, MoyenneMatieres.matierenom,MoyenneMatieres.matierecoeff,MoyenneMatieres.moyenne,\\\n eleve.nom, eleve.prenom \\\n FROM MoyenneMatieres JOIN eleve ON MoyenneMatieres.eleveid=eleve.id \\\n WHERE eleve.id={eleve_id}\"\n eleve_cur = db.execute(req)\n moyenneR = eleve_cur.fetchall()\n\n listMatElev = []\n listMoyElev = []\n listNomElev = []\n listPrenomElev = []\n for m in moyenneR:\n matNom = m['matierenom']\n listMatElev.append(matNom)\n moy = m['moyenne']\n listMoyElev.append(moy)\n nomElev = m['nom']\n listNomElev.append(nomElev)\n prenomElev = m['prenom']\n listPrenomElev.append(prenomElev)\n\n listMatElev = np.concatenate((listMatElev, [listMatElev[0]]))\n listMoyElev = np.concatenate((listMoyElev, [listMoyElev[0]]))\n\n eleveN = np.unique(listNomElev)[0]\n eleveP = np.unique(listPrenomElev)[0]\n print(eleveN)\n\n angles = np.linspace(0, 2 * np.pi, len(moyenneR), endpoint=False)\n angles2 = np.concatenate((angles, [angles[0]]))\n fig = plt.figure()\n ax = fig.add_subplot(111, polar=True)\n ax.plot(angles2, listMoyElev, 'o-', linewidth=2, label=\"Elève\")\n ax.fill(angles2, listMoyElev, alpha=0.2)\n ax.set_thetagrids(angles2 * 180 / np.pi, listMatElev)\n plt.yticks([2, 4, 6, 8, 10, 12, 14, 16, 18], color=\"grey\", size=7)\n plt.ylim(0, 20)\n ax.set_title(f'{eleveN}, {eleveP}')\n ax.grid(True)\n plt.legend(loc='upper right')\n plt.savefig(\"radar_tmp.png\")\n\n return send_file(\"radar_tmp.png\", attachment_filename='fig_eleve{}.png'.format(eleve_id))\n\n\n# route pour ajouter une matiere d'un eleve\n# route pour modifier une matiere d'un eleve\n# route pour supprimer une matiere d'un eleve\n\n# route pour ajouter une note\n# route pour modifier une note\n# route pour supprimer une note\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)\n","sub_path":"flask_api.py","file_name":"flask_api.py","file_ext":"py","file_size_in_byte":7520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"455439148","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 23 21:50:53 2017\n\n@author: Martin Kamp Dalgaard\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import signal\n\nM = 92\nl = M+1\nFs = 10\n\nN = 2**15\ntd = 1/float(Fs)\nt = td*N\n\nn = np.linspace(0,M,M+1)\nx = np.linspace(-np.pi,np.pi,len(n))\nxf = np.linspace(0,10*np.pi,len(n))\nsamples = 2*np.pi*np.linspace(0,t,N)\nbins = np.linspace(0,Fs/2,N/2)\n\ndelta = (np.pi/15)\n\nf1 = (np.pi/2.) - delta\nf2 = (np.pi/2.) + delta\n\ndef h(n,M,f1,f2):\n hd = np.zeros(len(n))\n for i in range(len(n)):\n if n[i] == M/2:\n hd[i] = 1 - (f2 - f1)/np.pi\n else:\n hd[i] = (np.sin(f1*(n[i] - M/2.)) / (np.pi*(n[i] - M/2.))) \\\n - (np.sin(f2*(n[i] - M/2.)) / (np.pi*(n[i] - M/2.)))\n return hd\n\ndef ha(n,M,a): # Hanning window if a = 0.5. Hamming window if a = 0.54.\n w = np.zeros(len(n))\n for i in range(len(n)):\n if abs(x[i]) <= M/2.:\n w[i] = a - (1 - a)*np.cos((2*np.pi*n[i])/M)\n else:\n w[i] = 0\n return w\n\ndef blackman(n,M):\n w = np.zeros(len(n))\n for i in range(len(n)):\n w[i] = 0.42 - 0.5*np.cos((2*np.pi*n[i])/M) + 0.8*np.cos((4*np.pi*n[i])/M)\n return w \n\nw = ha(n,M,0.54)\nhd = h(n,M,f1,f2)\n\ndef fft(x):\n return np.fft.fft(x)\n\n#hn = np.pad(hd * w,(0,N-M),'constant',constant_values=0)\nhn = hd * w\n\nH = np.abs(fft(hn))\n\n\na = 2*np.pi\n\ndef sig(x):\n return np.sin(np.pi/3.*x) + np.sin(np.pi/2.*x+2*np.pi/3.) + np.sin(3*np.pi/4.*x+4*np.pi/3.)\n\ns = sig(samples)\nideal = sig(samples) - np.sin(np.pi/2.*samples+2*np.pi/3.)\n\n#s_f = np.convolve(s,hn)\n#S_F = fft(s)*fft(hn[:N])\n#s_f2 = np.fft.ifft(S_F)\n\ntid_inter = 300\n\n#plt.plot(np.abs(np.fft.fft(hd)))\n\n#plt.plot(samples[M/2.:tid_inter],s[0:tid_inter-M/2])\n#plt.plot(samples[M/2:tid_inter],ideal[0:tid_inter-M/2])\n#plt.plot(samples[M/2.:tid_inter],s_f[M/2.:tid_inter])\n\n#plt.plot(samples[:tid_inter],s_f2[:tid_inter])\n\n#plt.plot(bins,np.abs(fft(s)[0:N/2]/np.max(np.abs(fft(s)))))\n#plt.plot(bins,np.abs(fft(hn)[0:N/2]))\n\nplt.figure(2)\nplt.plot(x, H)\nplt.axis([0,np.pi,0,2])\nplt.title('Our bandstop filter frequency response')\nplt.xlabel('Frequency [radians / second]')\nplt.ylabel('Amplitude')\nplt.axvline(f1*(2*np.pi), color='yellow') # lower cutoff frequency\nplt.axvline(f2*(2*np.pi), color='yellow') # upper cutoff frequency\nplt.axvline(np.pi/2, color='red') # frequency to be eliminated\nplt.axvline(np.pi/3, color='green') # frequency to keep\nplt.axvline(3*np.pi/4, color='green') # frequency to keep\n\n#==============================================================================\n# Scipy \n#==============================================================================\n\n#omega1_scp = (5*np.pi/12)\n#omega2_scp = (5*np.pi/8)\n#\n#N = [omega1_scp,omega2_scp]\n#plt.figure(3)\n#b, a = signal.butter(15, N, 'bandstop', analog=True)\n#w, h = signal.freqs(b, a)\n#plt.plot(w, abs(h), \"b-\", label = \"Bandstop filter\")\n#plt.title('Scipys bandstop filter frequency response')\n#plt.xlabel('Frequency [radians / second]')\n#plt.ylabel('Amplitude')\n#plt.legend(loc = \"lower left\")\n#plt.margins(0, 0.1)\n#plt.axis([0,np.pi,0,2])\n#plt.grid(which='both', axis='both')\n#plt.axvline(omega1_scp, color='yellow') # lower cutoff frequency\n#plt.axvline(omega2_scp, color='yellow') # upper cutoff frequency\n#plt.axvline(np.pi/2, color='red') # frequency to be eliminated\n#plt.axvline(np.pi/3, color='green') # frequency to keep\n#plt.axvline(3*np.pi/4, color='green') # frequency to keep\n#plt.show()","sub_path":"PythonScripts/4. semester/P4/FIR_Filter_Martin.py","file_name":"FIR_Filter_Martin.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"601642743","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport pandas as pd\n\nimport sys\nimport pickle\n\nimport tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth=True\n#config.gpu_options.per_process_gpu_memory_fraction=0.333\nconfig.intra_op_parallelism_threads=1\nconfig.inter_op_parallelism_threads=2\n\nset_session(tf.Session(config=config))\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras.layers import Dropout, Reshape, Masking\nfrom keras.layers import SimpleRNN, LSTM, GRU\nfrom keras.layers import Conv1D\nfrom keras.layers.wrappers import TimeDistributed, Bidirectional\nfrom keras.layers.advanced_activations import LeakyReLU, PReLU\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.initializers import Orthogonal\nfrom keras.callbacks import EarlyStopping\nfrom keras import regularizers\n\nfrom keras import initializers\nfrom keras.utils import np_utils\nfrom keras.optimizers import RMSprop, Adam\nfrom keras.models import load_model\n\n# For reproducibilty\nnp.random.seed(7)\ntf.set_random_seed(7)\nimport os, random\nos.environ['PYTHONHASHSEED'] = '0'\nrandom.seed(7)\n\n# General parameter settings\ndata_path=sys.argv[1]\nread_raw_data=False\n\n# Create phone mapping\nsys.stderr.write('Create phone mappings...\\n')\nmapping={} # 48 -> 39 label mapping\nwith open(data_path+'phones/48_39.map','r') as m:\n for line in m:\n line=line.strip('\\n').split('\\t')\n mapping[line[0]]=line[1]\n\nchar_mapping={} # label -> char mapping\nwith open(data_path+'48phone_char.map','r') as m:\n for line in m:\n line=line.strip('\\n').split('\\t')\n char_mapping[line[0]]=line[2]\n\n# Create index mappings\nphone_index={} # label -> 48\nindex_phone=[] # 48 -> label\n'''\n# using 39 phones\nwith open('phone_index.map','r') as m:\n for line in m:\n line=line.strip('\\n').split('\\t')\n phone_index[line[0]]=line[1]\n index_phone.append(line[0])\n'''\n# using 48 phones\nwith open(data_path+'48phone_char.map','r') as m:\n for line in m:\n line=line.strip('\\n').split('\\t')\n phone_index[line[0]]=line[1]\n index_phone.append(line[0])\n\n# Read raw data if needed\nif read_raw_data:\n # Should have smarter approach here?\n sys.stderr.write('Load mfcc/train.ark...\\n')\n mfcc_df=pd.read_csv(data_path+'mfcc/train.ark',\n delim_whitespace=True,\n header=None,\n index_col=0)\n \n sys.stderr.write('Load fbank/train.ark...\\n')\n fbank_df=pd.read_csv(data_path+'fbank/train.ark',\n delim_whitespace=True,\n header=None,\n index_col=0)\n \n sys.stderr.write('Load label/train.lab...\\n')\n label_df=pd.read_csv(data_path+'label/train.lab',\n header=None,\n index_col=0)\n #label_df=label_df.replace(mapping)\n \n # pd.concat will align data automatically yet losing original order\n #mfcc_df=pd.concat([label_df,mfcc_df],axis=1,index=None)\n \n # list names of instances (sentences)\n sent_name=mfcc_df.index[mfcc_df.index.str.endswith('_1')].values\n sent_name=list(sent_name)\n for i in range(len(sent_name)):\n sent_name[i] = sent_name[i][:-2]\n \n sys.stderr.write('%d sentences to process\\n' % len(sent_name))\n \n # Preparing x_train, y_train\n # len(x_train)==len(y_train)== #sentences\n # x_train[0].shape would be (#frames, #dims==39+69)\n # y_train[0].shape would be (#frames,)\n \n x_train=[]\n y_train=[]\n i=0\n for name in sent_name:\n sys.stderr.write('Processing instance #%d...\\n' % i)\n i+=1\n \n mfcc=mfcc_df.iloc[mfcc_df.index.str.startswith(name)]\n mfcc=mfcc.as_matrix().astype(np.float32)\n \n fbank=fbank_df.iloc[fbank_df.index.str.startswith(name)]\n fbank=fbank.as_matrix().astype(np.float32)\n \n # Concatenate two features directly\n sent=np.concatenate((mfcc,fbank), axis=1)\n sys.stderr.write('shape=(%d,%d)...\\n' % (sent.shape[0],sent.shape[1]))\n \n x_train.append(sent)\n \n label=label_df.iloc[label_df.index.str.startswith(name)]\n label=label.as_matrix()\n y_train.append(label)\n \n # Free DataFrame memory\n del mfcc_df\n del label_df\n del fbank_df\n \n # Save data with pickle\n sys.stderr.write('Saving data...\\n')\n with open(data_path+'x_train.pickle', 'wb') as x_f,\\\n open(data_path+'y_train.pickle', 'wb') as y_f:\n pickle.dump(x_train, x_f)\n pickle.dump(y_train, y_f)\n\nelse:\n # Load data with pickle\n sys.stderr.write('Loading data...\\n')\n with open(data_path+'x_train.pickle', 'rb') as x_f,\\\n open(data_path+'y_train.pickle', 'rb') as y_f:\n x_train = pickle.load(x_f)\n y_train = pickle.load(y_f)\n\n# len(x_train)==len(y_train)==num_sentences\nif len(x_train)!=len(y_train):\n raise Exception\nnum_sent = len(x_train)\n\n# x_train[0].shape would be (num_frames, data_dim==39+69)\n# y_train[0].shape would be (num_frames,)\n\n# Model parameter settings\nframe_size=400 # padding size\nepochs=300\n\ndata_dim=108 # take both features into consideration\ndummy_class=48\nnum_classes=48+1 # +1 for dummy class\n\n# CNN settings\nnum_filters=512\nkernel_size=9\nkernel_size2=9\n\n# Pad 0. / Truncate x_train into timesteps=400\nsys.stderr.write('Processing x_train...\\n')\nfor i in range(num_sent):\n padding=np.zeros((frame_size,data_dim))\n index=-min(frame_size,x_train[i].shape[0])\n \n padding[index:] = (x_train[i][index:])[:,-data_dim:]\n # take only fbank feature\n x_train[i] = padding.copy()\n\n# x_train.shape should be (3696,400,69)\nx_train=np.array(x_train)\n\n# Pad / Truncate y_train to confirm dim.\n# use 48th as the index of \"dummy class\"\nsys.stderr.write('Processing y_train...\\n')\nfor i in range(num_sent):\n sent = y_train[i].flatten().tolist()\n labels = [phone_index[l] for l in sent]\n labels = np.array(labels)\n \n padding = np.full(frame_size,dummy_class)\n index=-min(frame_size,len(sent))\n padding[index:] = labels[index:]\n y_train[i] = np_utils.to_categorical(padding, num_classes=num_classes)\n\ny_train=np.array(y_train)\n\n# y_train should be list of (400,49) with len=3692\n#print(y_train[0].tolist(), y_train[1].tolist())\n#print(y_train.shape)\n#print(len(y_train))\n\n# Build models\nsys.stderr.write('Building NN model...\\n')\n\n#optimizer = RMSprop(clipnorm=1.)\n#optimizer = Adam(clipnorm=1.)\noptimizer = RMSprop()\nearly_stopping = EarlyStopping(patience=2,verbose=1)\nbatch_size = 64\n\nmodel = Sequential()\nmodel.add(TimeDistributed(Dense(512,activation='relu'),input_shape=(frame_size, data_dim)))\nmodel.add(BatchNormalization())\nmodel.add(Dropout(0.5))\nmodel.add(Conv1D(num_filters,\n kernel_size,\n padding='same',\n activation='relu',\n strides=1))\nmodel.add(BatchNormalization())\nmodel.add(Dense(num_filters, activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(Dropout(0.3))\n\nmodel.add(Conv1D(num_filters,\n kernel_size2,\n padding='same',\n activation='relu',\n strides=1))\nmodel.add(BatchNormalization())\nmodel.add(Dense(num_filters, activation='linear'))\nmodel.add(BatchNormalization())\nmodel.add(LeakyReLU(alpha=.001))\nmodel.add(Dropout(0.3))\nmodel.add(Bidirectional(GRU(256,\n return_sequences=True,\n activation='relu')))\nmodel.add(BatchNormalization())\nmodel.add(Dense(256, activation='linear'))\nmodel.add(BatchNormalization())\nmodel.add(LeakyReLU(alpha=.001))\nmodel.add(Dropout(0.3))\n\nmodel.add(Dense(128, activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(Dropout(0.2))\nmodel.add(Dense(64, activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(Dense(64, activation='sigmoid'))\nmodel.add(BatchNormalization())\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\nmodel.summary()\n\ntry:\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n #callbacks=[early_stopping],\n validation_split=0.1)\nexcept:\n model.save(sys.argv[2])\n \nmodel.save(sys.argv[2])","sub_path":"hw1/model_best.py","file_name":"model_best.py","file_ext":"py","file_size_in_byte":8475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"62619356","text":"\"\"\"gongda URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom didi import views as didi_views\nfrom django.contrib.auth import views\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^photo', didi_views.photo),\n url(r'^manage', didi_views.manage),\n url(r'^slogin', didi_views.studentlogin),\n url(r'^tlogin', didi_views.teacherlogin),\n url(r'^mlogin', didi_views.managerlogin),\n url(r'^rupdate', didi_views.roomupdate),\n url(r'^rdel', didi_views.roomdelete),\n url(r'^student', didi_views.studentdown),\n url(r'^teacher', didi_views.teacherdown),\n url(r'^$', didi_views.login),\n\n\n # url(r'^teacherlogin', didi_views.teacherlogin),\n\n # url(r'^$', views.login),\n # url(r'^login/', didi_views.login),\n # url(r'^download$', didi_views.down),\n]\n","sub_path":"android/guaduatedesign/gongda/gongda/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"342314274","text":"# coding=utf-8\nimport random\nfrom sklearn import preprocessing\n\n\ndef randomSplit(examples, num):\n remain = examples[:]\n groupnum = len(examples) / num\n groups = []\n for i in range(num - 1):\n group = random.sample(remain, groupnum)\n for g in group:\n remain.remove(g)\n groups.append(group)\n groups.append(remain)\n return groups\n\n\ndef randomSplit2(examples, ratio):\n remain = examples[:]\n num = int(len(examples) * ratio)\n trainset = random.sample(remain, num)\n for t in trainset:\n remain.remove(t)\n return [trainset, remain]\n\n\n# 有放回采样\ndef randomDraw(examples, num):\n samples = []\n for i in range(num):\n samples.append(examples[random.randint(0, len(examples) - 1)])\n return samples\n\n\n# 轮盘赌\ndef roulette(examples, num, P):\n samples = []\n for i in range(num):\n pointer = random.random()\n sum = 0.0\n for j in range(len(P)):\n sum += P[j]\n if sum > pointer:\n samples.append(examples[j])\n break\n return samples\n\n\ndef normalize(trainset):\n X = [e[0] for e in trainset]\n X = preprocessing.scale(X)\n # X = preprocessing.normalize(X, norm='l2')\n return [[X[i], trainset[i][1]] for i in range(len(trainset))]\n\n\ndef balance(examples):\n positive = [e for e in examples if e[1] == 1]\n negative = [e for e in examples if e[1] == 0]\n avelength = (len(positive) + len(negative)) / 2\n\n if len(positive) < avelength:\n copy = random.sample(positive, avelength - len(positive))\n positive = positive + copy\n negative = random.sample(negative, avelength)\n trainset = positive + negative\n elif len(positive) > avelength:\n copy = random.sample(negative, avelength - len(negative))\n negative = negative + copy\n positive = random.sample(positive, avelength)\n trainset = positive + negative\n return trainset\n","sub_path":"randomdata.py","file_name":"randomdata.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"178251705","text":"from django.conf import settings\nfrom django.conf.urls import patterns, include, url\nfrom django.conf.urls.static import static\nfrom django.views.generic import TemplateView\n\nfrom django.contrib import admin\nadmin.autodiscover()\nimport se.views\nimport video.views\nimport friendpair.views\n\n\nurlpatterns = patterns(\"\",\n #url(r\"^$\", TemplateView.as_view(template_name=\"homepage.html\"), name=\"home\"),\n url(r\"^$\",video.views.homepage_video_list, name=\"home\"),\n url(r\"^homepage$\",se.views.personalPage),\n \n \n \n \n url(r\"^admin/\", include(admin.site.urls)),\n url(r\"^account/signup/$\", se.views.SignupView.as_view(), name=\"account_signup\"),\n url(r\"^account/settings/$\", se.views.SettingsView.as_view(), name=\"account_settings\"),\n url(r\"^account/\", include(\"account.urls\")),\n# url(r\"^ratings/\", include(\"agon_ratings.urls\")),\n\n url(r\"^upload/$\",video.views.upload),\n url(r\"^upload/success/$\",video.views.uploadSuccess),\n \n url(r\"^videoplay/(\\d+)$\",video.views.video_play),\n\n url(r\"^videoplay/(\\d+)/ratings$\",video.views.rate_video),\n url(r\"^videoplay/(\\d+)/comment$\",video.views.comment_video),\n url(r\"^videoModify/(\\d+)$\",video.views.video_modify),\n url(r\"^videoDelete/(\\d+)$\",video.views.video_delete),\n url(r\"^videoShare/(\\d+)$\",video.views.video_share),\n \n \n url(r\"^personalPage/(\\d+)$\",se.views.personalPage, name=\"home\"),\n url(r\"^personalPage/(\\d+)/unfollow$\",se.views.personalPageUnfollow),\n \n url(r\"^timeline$\",se.views.timeline,name=\"timeLine\"),\n url(r\"^timeline/all$\",se.views.timelineall,name=\"timeLine\"),\n \n url(r\"friend-manage\",friendpair.views.friendManage, name=\"friend-manage\")\n)\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"22140567","text":"from poker_play import *\r\n\r\n\r\ndef startAccount(players):\r\n money = {}\r\n for key in players:\r\n money[key] = 200\r\n return money,blinds\r\n\r\ndef smallBlinds(smallBlind,n):\r\n if n % 2 == 0:\r\n smallBlind = 2 + (2 * n)\r\n n += 1\r\n return smallBlind,n\r\n'''\r\ndef bettingRound(currentBid,playersAccount):\r\n print 'Current bid is ' + currentBid\r\n ans = raw_input('Do you want to raise, call, throw or all-in: ')\r\n'''\r\n","sub_path":"Poker/poker_betting.py","file_name":"poker_betting.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"420167571","text":"\"\"\"\nContains all signals related to SLO models\n\"\"\"\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save, post_delete\nfrom makeReports.models import (\n SLOInReport\n)\n\n@receiver(post_save,sender=SLOInReport)\ndef post_save_slo_update_numbering(sender,instance,created,**kwargs):\n \"\"\"\n Post save receiver that triggers numbers to be updated\n\n Args:\n sender (type): model type sending hook\n instance (SLOInReport): SLO updated\n created (bool): whether model was newly created\n \"\"\"\n if created:\n instance.report.numberOfSLOs +=1\n instance.report.save()\n instance.slo.numberOfUses += 1\n instance.slo.save()\n\n@receiver(post_delete,sender=SLOInReport)\ndef post_delete_slo_update_numbering(sender,instance,**kwargs):\n \"\"\"\n Updates the numbering of SLOs in the same report\n\n Args:\n sender (type): model type sending hook\n instance (SLOInReport): SLO deleted\n \"\"\"\n oldNum = instance.number\n if instance.slo.numberOfUses <= 1:\n instance.slo.delete()\n else:\n instance.slo.numberOfUses -= 1\n instance.slo.save()\n slos = SLOInReport.objects.filter(report=instance.report).order_by(\"number\")\n for slo in slos:\n if slo.number > oldNum:\n slo.number -= 1\n slo.save()\n instance.report.numberOfSLOs -= 1\n instance.report.save()","sub_path":"AACForm/makeReports/signals/slo_signals.py","file_name":"slo_signals.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"633795076","text":"from collections import defaultdict\nimport itertools\n\nwith open(\"../../../data/2018/1/data.txt\") as f:\n dfreqs = [int(line.strip()) for line in f.readlines()]\n\nfreqs = defaultdict(lambda: 0)\nfreq = 0\nfor dfreq in itertools.cycle(dfreqs):\n freq += dfreq\n if freqs[freq] == 1:\n print(freq)\n break\n else:\n freqs[freq] = 1\n","sub_path":"python/2018/1/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"154540435","text":"import threading\nimport functools\nfrom collections import deque\n\nimport events\nimport utils\nfrom utils import put, each, bunch\n\ndef check_started(f):\n @functools.wraps(f)\n def f_(self, *args, **kwargs):\n if not self.started:\n self.start()\n setattr(self, f.__name__, f.__get__(self, type(self)))\n return f(self, *args, **kwargs)\n return f_\n\nclass Environment(object):\n\n # step granularity\n Time = 2\n Priority = 1\n Thread = 0\n\n def __init__(self):\n self.threads = []\n self.timeline = build_timeline()\n self.now = 0\n self.started = False\n\n @property\n def finished(self):\n if not self.started:\n return False\n return all(each(self.threads).finished)\n\n @finished.setter\n def finished(self, val):\n self._finished = val\n\n @check_started\n def run(self):\n while not self.finished:\n self.step()\n\n @check_started\n def step(self, by=None):\n if by is None:\n by = self.Time\n threads = self.timeline.get(depth=by)\n self.now = threads[0].current_event.now\n each(threads).resume()\n\n def start(self):\n each(self.threads).start()\n self.started = True\n\n def process(self, *f, **kwargs):\n def process_(f):\n thread = Thread(self, f, **kwargs)\n self.threads.append(thread)\n return process_(f[0]) if f else process_\n\n def priority(self, val):\n return self.process(priority=val)\n\n def timeout(self, laps):\n self.schedule(events.Timeout(self, laps)).wait()\n\n def event(self):\n return events.Event(self)\n\n def schedule(self, event):\n if event.now is None:\n event.now = self.now\n self.timeline.put(event)\n event.scheduled = True\n return event\n\nclass Thread(object):\n\n def __init__(self, env, f, name=None, priority=0, daemon=False):\n if name is None:\n name = f.__name__\n self.name = name\n self.priority = priority\n self.current_event = None\n self.blocked = threading.Event()\n self.resumed = threading.Event()\n self.thread = threading.Thread(target=Thread.wrapped(env, f))\n # the actual threading.Thread is all daemon\n self.thread.daemon = True\n # Thread.daemon means not accounting in env.finished status\n # i.e. when all non-daemon Thread is finished, env.finished == True\n self.daemon = daemon\n self.thread.master = self\n self.ending = False\n\n @property\n def finished(self):\n return self.daemon or not self.thread.is_alive()\n\n def start(self):\n self.thread.start()\n self.until_blocked()\n\n def block(self, ending=False):\n self.blocked.set()\n self.ending = ending\n if not ending:\n self.resumed.wait()\n self.resumed.clear()\n\n def resume(self):\n self.resumed.set()\n self.until_blocked()\n\n def until_blocked(self):\n self.blocked.wait()\n self.blocked.clear()\n if self.ending:\n self.thread.join()\n\n @staticmethod\n def wrapped(env, f):\n def f_():\n env.schedule(events.Initialize(env, env.now)).wait()\n f()\n threading.current_thread().master.block(ending=True)\n return f_\n\n def __repr__(self):\n return '{} {}'.format(self.name, self.current_event.name)\n\nfrom batchheap import BatchHeap\nfrom containerstack import ContainerStack\ndef build_timeline():\n\n def cvt(events, waiters):\n for event in events.get():\n event.trigger()\n for waiter in event.waiters:\n waiters.put(waiter)\n\n r = ContainerStack()\n r.add_layer(container=BatchHeap(lambda x,y: x.now < y.now),\n get=BatchHeap.pop,\n put=BatchHeap.push,\n broker=cvt)\n r.add_layer(container=BatchHeap(lambda x,y: x.priority > y.priority),\n get=BatchHeap.pop,\n put=BatchHeap.push)\n r.add_layer(container=deque(),\n get=deque.popleft,\n put=deque.extend)\n return r\n\nenv = Environment()\n","sub_path":"mint/_versions/20151129183818 gui switch/mint/timpy/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"336521294","text":"#!/usr/bin/env python\n#!@Author:lgx\n#!@时间:2018-11-22 13:49\n#!@Filename:studyOfParamiko.py\n\nimport paramiko\n'''\nparamiko是一个用于做远程控制的模块,使用该模块可以对远程服务器进行命令或文件操作,\n值得一说的是,fabric和ansible内部的远程管理就是使用的paramiko来现实。\n\n'''\n\nssh = paramiko.SSHClient()\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nssh.connect('192.168.1.128',22,'lgx','liao0121')\nstdin,stdout,stderr=ssh.exec_command('df')\nprint (stdout.read())\nssh.close()\n\n","sub_path":"newStudyOfPython/2018-11-21/studyOfModule/studyOfParamiko.py","file_name":"studyOfParamiko.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"96551098","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# Based on\n# https://github.com/tensorflow/models/blob/master/research/struct2depth/model.py#L625-L641\n\n\nclass DepthSmoothnessLoss(nn.Module):\n r\"\"\"Criterion that computes image-aware depth smoothness loss.\n\n .. math::\n\n \\text{loss} = \\left | \\partial_x d_{ij} \\right | e^{-\\left \\|\n \\partial_x I_{ij} \\right \\|} + \\left |\n \\partial_y d_{ij} \\right | e^{-\\left \\| \\partial_y I_{ij} \\right \\|}\n\n\n Shape:\n - Depth: :math:`(N, 1, H, W)`\n - Image: :math:`(N, 3, H, W)`\n - Output: scalar\n\n Examples::\n\n >>> depth = torch.rand(1, 1, 4, 5)\n >>> image = torch.rand(1, 3, 4, 5)\n >>> smooth = tgm.losses.DepthSmoothnessLoss()\n >>> loss = smooth(depth, image)\n \"\"\"\n\n def __init__(self) -> None:\n super(DepthSmoothnessLoss, self).__init__()\n\n @staticmethod\n def gradient_x(img: torch.Tensor) -> torch.Tensor:\n assert len(img.shape) == 4, img.shape\n return img[:, :, :, :-1] - img[:, :, :, 1:]\n\n @staticmethod\n def gradient_y(img: torch.Tensor) -> torch.Tensor:\n assert len(img.shape) == 4, img.shape\n return img[:, :, :-1, :] - img[:, :, 1:, :]\n\n def forward(self, depth: torch.Tensor, image: torch.Tensor) -> torch.Tensor:\n if not torch.is_tensor(depth):\n raise TypeError(\"Input depth type is not a torch.Tensor. Got {}\"\n .format(type(depth)))\n if not torch.is_tensor(image):\n raise TypeError(\"Input image type is not a torch.Tensor. Got {}\"\n .format(type(image)))\n if not len(depth.shape) == 4:\n raise ValueError(\"Invalid depth shape, we expect BxCxHxW. Got: {}\"\n .format(depth.shape))\n if not len(image.shape) == 4:\n raise ValueError(\"Invalid image shape, we expect BxCxHxW. Got: {}\"\n .format(image.shape))\n if not depth.shape[-2:] == image.shape[-2:]:\n raise ValueError(\"depth and image shapes must be the same. Got: {}\"\n .format(depth.shape, image.shape))\n if not depth.device == image.device:\n raise ValueError(\n \"depth and image must be in the same device. Got: {}\" .format(\n depth.device, image.device))\n if not depth.dtype == image.dtype:\n raise ValueError(\n \"depth and image must be in the same dtype. Got: {}\" .format(\n depth.dtype, image.dtype))\n # compute the gradients\n depth_dx: torch.Tensor = self.gradient_x(depth)\n depth_dy: torch.Tensor = self.gradient_y(depth)\n image_dx: torch.Tensor = self.gradient_x(image)\n image_dy: torch.Tensor = self.gradient_y(image)\n\n # compute image weights\n weights_x: torch.Tensor = torch.exp(\n -torch.mean(torch.abs(image_dx), dim=1, keepdim=True))\n weights_y: torch.Tensor = torch.exp(\n -torch.mean(torch.abs(image_dy), dim=1, keepdim=True))\n\n # apply image weights to depth\n smoothness_x: torch.Tensor = torch.abs(depth_dx * weights_x)\n smoothness_y: torch.Tensor = torch.abs(depth_dy * weights_y)\n return torch.mean(smoothness_x) + torch.mean(smoothness_y)\n\n\n######################\n# functional interface\n######################\n\n\ndef depth_smoothness_loss(\n depth: torch.Tensor,\n image: torch.Tensor) -> torch.Tensor:\n r\"\"\"Computes image-aware depth smoothness loss.\n\n See :class:`~torchgeometry.losses.DepthSmoothnessLoss` for details.\n \"\"\"\n return DepthSmoothnessLoss()(depth, image)\n","sub_path":"torchgeometry/losses/depth_smooth.py","file_name":"depth_smooth.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"488534378","text":"'''\nWe have created the seven folder containing the shape files (using ism_bbox_jsonToshp.py module), related to Lat-Lon scoop.\nWe store the folder names such as:\n\nWhy we do this: Because the shape files are very big in size and performing read operation for each record would not be feasible.\nHence we would like to arrange the lat lon in such a way that we select all records belonging to a particular Lat-Lon and\nread the shp file from disk and process all the records.\n'''\n\n\nimport os\nimport cv2\nimport numpy as np\nimport shapely.geometry as geom\nimport geopandas as gpd\n\n\nfrom config import pathDict\nfrom semantic_segmentation import utils as utl\nfrom semantic_segmentation.latlon_to_pixelXY import lonlatToPixel, get_image_corner_latlon_and_pxls_vals\n\nbbox_shape_folder_path = '/Users/sam/All-Program/App-DataSet/HouseClassification/shape_files/building_bbox/'\nscoop_arr = ['scoop_738_768', 'scoop_768_798', 'scoop_798_848', 'scoop_848_908', 'scoop_908_938', 'scoop_938_968', 'scoop_968_028']\n\nlat_scoop = dict(scoop_738_768 = '41.738, -87.803, 41.768, -87.510',\n scoop_768_798 = '41.768, -87.803, 41.798, -87.510',\n scoop_798_848 = '41.798, -87.803, 41.848, -87.510',\n scoop_848_908 = '41.848, -87.803, 41.908, -87.510',\n scoop_908_938 = '41.908, -87.803, 41.938, -87.510',\n scoop_938_968 = '41.938, -87.803, 41.968, -87.510',\n scoop_968_028 = '41.968, -87.803, 42.028, -87.510')\nzoom = 19\n\n\ndef image_corner_polygon_wraper(lat, lon, zoom):\n pxls_corner_arr, lonlat_corner_arr = get_image_corner_latlon_and_pxls_vals(lat, lon, zoom)\n tl_ll, tr_ll, bl_ll, br_ll = lonlat_corner_arr\n tl_pxl, tr_pxl, bl_pxl, br_pxl = pxls_corner_arr\n image_corner_polygon = geom.Polygon(np.array([tl_ll, tr_ll, br_ll, bl_ll, tl_ll]))\n return image_corner_polygon, tl_pxl\n\n\ndef get_search_polygons(df, scoop_latlon):\n query = ''\n for num, (i, j) in enumerate(scoop_latlon):\n query += '((lon_scoop==\"%s\") & (lat_scoop==\"%s\"))' % (str(i), (j))\n if num != (len(scoop_latlon) - 1):\n query += ' | '\n return df[df.eval(query)]\n\n\ndef get_parcels_inside_image(df, image_polygon):\n list_parcel_ids = []\n for parcel_id, lon_cent, lat_cent in np.array(df[['parcel_id', 'lon_center', 'lat_center']]):\n lon_cent = float(lon_cent)\n lat_cent = float(lat_cent)\n is_contains = image_polygon.contains(geom.Point([lon_cent, lat_cent]))\n if is_contains:\n # print (parcel_id)\n list_parcel_ids.append(parcel_id)\n \n if len(list_parcel_ids) > 0:\n return df[df['parcel_id'].isin(list_parcel_ids)]\n else:\n return []\n\n\ndef draw_polygons(img, xy_arr):\n alpha = 0.5\n xy_arr = np.array(xy_arr, np.int32)\n layer = img.copy()\n output = img.copy()\n layer = cv2.fillPoly(layer, [xy_arr], (0,0,255))#, offset=0.4)\n output = cv2.addWeighted(layer, alpha, output, 1 - alpha, 0, output)\n return output\n\n\ndef get_overlayed_image(parcels_in_image, tl_pxl, img):\n a = []\n obj_ll_to_pxl = lonlatToPixel(zoom=19)\n for parcel_id, polygon_geom in np.array(parcels_in_image[['parcel_id', 'geometry']]):\n points_tuple = polygon_geom.exterior.coords.xy\n lon_arr, lat_arr = list(points_tuple[0]), list(points_tuple[1])\n each_parcel_xy_arr = []\n for lon, lat in zip(lon_arr, lat_arr):\n mx, my = obj_ll_to_pxl.lonlat_to_meters(lon,lat)\n px, py = obj_ll_to_pxl.meters_to_pixels(mx, my)\n # print (tl_pxl)\n # print (px, py)\n img_x, img_y = obj_ll_to_pxl.convert_map_pxl_to_img_pxl(tl_pxl, [px, py])\n each_parcel_xy_arr.append([int(np.round(img_x)), int(np.round(img_y))])\n # print ('each_parcel_xy_arr ', each_parcel_xy_arr)\n # print('img ', img)\n img = draw_polygons(img, xy_arr=each_parcel_xy_arr)\n return img\n\n\n\ndef overlay_parcel_on_images(data_to_model):\n for sc_cnt,scoop in enumerate(scoop_arr):\n # if sc_cnt == 0:\n # continue\n min_lat, max_lon, max_lat, min_lon = [float(i.strip()) for i in lat_scoop[scoop].split(',')]\n # print(min_lat, max_lon, max_lat, min_lon)\n data_ = data_to_model[\n (data_to_model['lat'] >= min_lat) & (data_to_model['lat'] < max_lat) & (data_to_model['lon'] >= max_lon) & (\n data_to_model['lon'] < min_lon)]\n property_parcel = gpd.read_file(os.path.join(bbox_shape_folder_path, lat_scoop[scoop]))\n print('Initiating Run for scoop %s : DATA SHAPE %s'%(scoop, str(property_parcel.shape)))\n \n for rcnt, (pin, lat, lon, label) in enumerate(np.array(data_[['pin', 'lat', 'lon', 'indicator']])):\n # if rcnt<700:\n # continue\n # Get the Google map static images using Pin from the disk\n if label == 'Likely Land':\n output_path = os.path.join(pathDict['google_overlayed_image_path'], 'land', '%s.jpg'%str(pin))\n static_image_path = os.path.join(pathDict['google_aerial_image_path'], 'land', '%s.jpg'%str(pin))\n img = cv2.imread(static_image_path)\n elif label == 'Likely House':\n output_path = os.path.join(pathDict['google_overlayed_image_path'], 'house', '%s.jpg' % str(pin))\n static_image_path = os.path.join(pathDict['google_aerial_image_path'], 'house', '%s.jpg' % str(pin))\n img = cv2.imread(static_image_path)\n else:\n raise ValueError('The image is not indicated as \"Likely Land\" or \"Likely House\"')\n \n \n # If the image is not present and None is returned handle such cases here\n if type(img).__module__ != np.__name__:\n continue\n \n lat = float(lat)\n lon = float(lon)\n image_polygon, tl_pxl = image_corner_polygon_wraper(lat, lon, zoom)\n # print(image_polygon)\n \n scoop_lon, scooop_lat = utl.getscoopLonLat(lonIN=lon, latIN=lat, decimalPlaces=1000)\n scoop_latlon = utl.getscoopSearchItems(scoopLon=scoop_lon, scoopLat=scooop_lat, decimalPlaces=1000)\n \n search_polygons = get_search_polygons(property_parcel, scoop_latlon)\n # print(search_polygons.shape)\n \n parcels_in_image = get_parcels_inside_image(search_polygons, image_polygon)\n # print(len(parcels_in_image))\n\n if len(parcels_in_image) >0:\n overlayed_image = get_overlayed_image(parcels_in_image, tl_pxl, img)\n else:\n overlayed_image = img\n # print (os.path.join(pathDict['google_overlayed_image_path'], '%s.jpg'%str(pin)))\n\n cv2.imwrite(output_path, overlayed_image)\n\n if ((rcnt+1) % 100) == 0:\n print(\"TOTAL IMAGES PARSED ======== %s\"%(str(rcnt)))\n\n print('')\n # break\n #\n \n \n \ndebugg = False\nif debugg:\n google_stats_data = utl.collateData(pathDict['google_aerial_stats_path'])\n print('Shape: Google Stats images ', google_stats_data.shape)\n\n data_to_model = google_stats_data[(google_stats_data['loc_type'] != 'RANGE_INTERPOLATED') & (\n google_stats_data['city'].str.lower().str.strip().str.match('chicago')) & (\n ~google_stats_data['address'].str.lower().str.strip().str.match('0'))]\n print('Shape: Images for Overlaying: ', data_to_model.shape)\n overlay_parcel_on_images(data_to_model)","sub_path":"semantic_segmentation/overlay_building_bbox.py","file_name":"overlay_building_bbox.py","file_ext":"py","file_size_in_byte":7555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"325320633","text":"# VERSION OCTOBER 7, 2018\n\nfrom hashlib import sha256\nfrom time import time\n\nimport labs109\nimport itertools\nimport random\nimport re\nimport sys\n\n# Make sure that the source code file does not contain even one\n# print or input statement, except after the part that is run when\n# the module is run as a standalone application.\n\ndef has_banned_stuff(filename, banned = ['input']):\n file = open(filename, encoding='utf-8')\n for line in file:\n if line.strip().startswith(\"if __name__ ==\"):\n break # anything goes after that\n for word in banned:\n if line.find(word) > -1:\n return True\n file.close()\n return False\n\n# Runs the function f for all testcases, calculating SHA256 checksum\n# of the results. If the checksum matches the expected, return the\n# running time, otherwise return -1. If expected == None, print out\n# the computed checksum instead.\n\ndef test_one_function(f, testcases, expected = None):\n print(f.__name__ + \": \", end=\"\", flush = True)\n chk = sha256()\n starttime = time()\n crashed = False\n for elem in testcases:\n try:\n result = f(*elem)\n except: # catch any exception thrown by the function to be tested\n crashed = True\n break\n # If the result is a dictionary, canonize its representation first.\n if type(result) == type({}):\n result = [(key, result[key]) for key in result]\n result.sort(key = lambda x: x[0])\n # Use the result to update the checksum.\n chk.update(str(result).encode('utf-8'))\n totaltime = time() - starttime\n digest = chk.hexdigest()\n if not crashed and not expected:\n print(digest[:50])\n return totaltime\n elif not crashed and digest[:len(expected)] == expected:\n print(\"Success in %.3f seconds.\" % totaltime)\n return totaltime\n elif crashed:\n print(f\"CRASH: {sys.exc_info()[0]}\")\n else:\n print(\"Failed the test with checksum mismatch.\".upper())\n return -1\n\n# Runs the tests for all functions in the suite, returning the\n# count of how many of those were implemented and passed the test.\n\ndef test_all_functions(module, suite, modulename = None):\n if modulename:\n if has_banned_stuff(modulename):\n print(\"The file %s contains banned stuff in functions. Exiting.\" %\n modulename)\n return 0\n count = 0\n total = 0\n for (fname, testcases, expected) in suite:\n try:\n f = module.__dict__[fname]\n except KeyError:\n #print(\"Function [%s] not implemented, skipping...\" % fname)\n continue\n total += 1\n result = test_one_function(f, testcases, expected)\n if result >= 0:\n count += 1\n print(\"%d out of %d implemented functions (of %d total) pass the tester.\"\n % (count, total, len(suite)))\n return count\n\ndef ryerson_letter_grade_generator():\n for i in range(0, 150):\n yield (i,)\n \ndef is_ascending_generator(n):\n for i in range(n):\n for seq in itertools.permutations(range(n)):\n yield [seq]\n\ndef double_until_all_digits_generator():\n for i in range(3000):\n yield (i,)\n\ndef group_equal_generator(seed):\n random.seed(seed)\n for i in range(1000):\n items = []\n ilen = random.randint(1, 20)\n for j in range(ilen):\n burst = random.randint(1, 10)\n it = random.randint(0, 1000)\n for k in range(burst):\n items.append(it)\n yield (items,)\n\ndef longest_palindrome_generator(seed):\n random.seed(seed)\n for i in range(1000):\n m = random.randint(5, 50)\n text = \"\"\n for j in range(m):\n text += random.choice([\"a\",\"b\",\"c\",\"d\"])\n yield (text, )\n\ndef caps_lock_stuck_generator():\n wap = open(\"warandpeace.txt\", encoding='utf-8')\n text = list(wap)\n wap.close()\n for line in text:\n yield (line,)\n \ndef values_to_keys_generator(seed):\n random.seed(seed)\n for i in range(1000):\n used_values = set()\n used_keys = set()\n dic = {}\n size = random.randint(5, 100)\n while len(dic) < size:\n key = random.randint(-1000, 1000)\n value = random.randint(-1000, 1000)\n if key in used_keys or value in used_values:\n continue\n used_keys.add(key)\n used_values.add(value)\n dic[key] = value\n yield (dic,)\n \ndef paragraph_lengths_generator():\n wap = open(\"warandpeace.txt\", encoding='utf-8')\n text = list(wap)\n wap.close()\n yield (text,)\n \ndef reverse_ascending_sublists_generator(seed):\n random.seed(seed)\n for i in range(1000):\n curr = []\n n = random.randint(0, 20)\n for j in range(n):\n curr.append(random.randint(0, 10000))\n yield (curr, )\n \ndef give_change_generator(seed):\n random.seed(seed)\n for i in range(100000):\n coins = [1]\n curr = 1\n c = random.randint(2, 5)\n for j in range(c):\n curr = curr + random.randint(3, 30)\n coins.append(curr)\n coins.reverse()\n yield (random.randint(1, 500), coins)\n \n\nsuits = ['clubs', 'diamonds', 'hearts', 'spades']\nranks = {'deuce' : 2, 'trey' : 3 , 'four' : 4, 'five' : 5,\n 'six' : 6, 'seven' : 7, 'eight' : 8, 'nine' : 9,\n 'ten' : 10, 'jack' : 11, 'queen' : 12, 'king' : 13,\n 'ace' : 14 }\n\ndeck = [ (rank, suit) for suit in suits for rank in ranks.keys() ]\n\ndef hand_is_badugi_generator(seed):\n random.seed(seed)\n for i in range(100000):\n yield (random.sample(deck, 4),)\n\ndef bridge_hand_shape_generator(seed):\n random.seed(seed)\n for i in range(20000):\n yield (random.sample(deck, 13),)\n\ndef winning_card_generator(seed):\n random.seed(seed)\n for i in range(10000):\n hand = random.sample(deck, 4)\n for trump in [\"spades\", \"hearts\", \"diamonds\", \"clubs\", None]: \n yield (hand, trump)\n\ndef hand_shape_distribution_generator(seed):\n random.seed(seed)\n hands = [random.sample(deck, 13) for i in range(10000)]\n yield [hands]\n\ndef milton_work_point_count_generator(seed):\n random.seed(seed)\n strains = suits + ['notrump']\n for i in range(50000):\n st = random.choice(strains)\n hand = random.sample(deck, 13)\n yield (hand, st)\n\ndef limited_alphabet(words, chars):\n pat = re.compile('^[' + chars + ']+$')\n result = []\n for word in words:\n if pat.match(word):\n result.append(word)\n return result\n\ndef sort_by_typing_handedness_generator():\n f = open('words.txt', 'r', encoding='utf-8')\n words = [x.strip() for x in f if x.islower()]\n f.close()\n words = limited_alphabet(words, \"abcdefghijklmnopqrstuvwxyz\")\n yield [words]\n\ndef letter_pair_freqs_generator():\n f = open('words.txt', 'r', encoding='utf-8')\n words = [x.strip() for x in f if x.islower()]\n f.close()\n words = limited_alphabet(words, \"abcdefghijklmnopqrstuvwxyz\")\n yield [words]\n\ndef possible_words_generator(seed):\n f = open('words.txt', 'r', encoding='utf-8')\n words = [x.strip() for x in f if x.islower()]\n f.close()\n random.seed(seed)\n for i in range(100):\n patword = random.choice(words)\n pat = \"\"\n for ch in patword:\n if random.randint(0, 99) < 60:\n pat += '*'\n else:\n pat += ch\n yield (words, pat)\n \ndef word_salad_generator(seed):\n f = open('words.txt', 'r', encoding='utf-8')\n words = [x.strip() for x in f if x.islower()]\n f.close()\n random.seed(seed)\n for i in range(100):\n leng = random.randint(3, 10)\n num = random.randint(3, 6)\n ingredients = []\n while len(ingredients) < num:\n word = random.choice(words)\n if len(word) == leng:\n ingredients.append(word)\n yield (words, ingredients) \n\ndef postfix_evaluate_generator(seed):\n for i in range(1000):\n exp = []\n count = 0\n while len(exp) < 5 or count != 1:\n if count > 1 and (count > 10 or random.randint(0, 99) < 50):\n exp.append(random.choice(['+', '-', '*', '/']))\n count -= 1\n else:\n exp.append(random.randint(1, 10))\n count += 1\n yield (exp, )\n\ndef create_list(d):\n if d < 1:\n return random.randint(1, 100)\n else:\n n = random.randint(0, 10 - d)\n return [create_list(d - random.randint(1, 3)) for i in range(n)]\n\ndef reverse_reversed_generator(seed):\n random.seed(seed)\n for i in range(1000):\n items = create_list(1 + (i % 8))\n yield (items, )\n\ndef brick_wall_generator(seed):\n random.seed(seed)\n for i in range(1000):\n ht = random.randint(1, 100)\n n = random.randint(5, 100)\n wall = []\n for j in range(ht):\n total = 0\n row = []\n while total < n:\n if total == n - 1:\n brick = 1\n else:\n brick = random.randint(1, min(20, n - total))\n row.append(brick)\n total += brick\n wall.append(row)\n yield (wall, ) \n\ndef flatten_generator(seed):\n random.seed(seed)\n for i in range(10000):\n items = create_list(1 + i % 8)\n yield (items, )\n\ndef __create_random_word__(n):\n result = \"\"\n for i in range(n):\n result += chr(ord('a') + random.randint(0, 25))\n return result\n\ndef break_into_syllables_generator(seed):\n random.seed(seed)\n for i in range(1000):\n word = __create_random_word__(5 + (i % 15))\n splits = [random.randint(0, len(word) - 1) for i in range(1 + len(word)//2)]\n syllables = [word[i:i+random.randint(1, 6)] for i in splits]\n syllables.extend([__create_random_word__(1+random.randint(0, 4)) for i in range(1 + len(word)//2)])\n random.shuffle(syllables)\n yield (word, syllables)\n\ndef scrabble_value_generator(seed):\n random.seed(seed)\n f = open('words.txt', 'r', encoding='utf-8')\n words = [x.strip() for x in f if x.islower()]\n f.close()\n words = limited_alphabet(words, \"abcdefghijklmnopqrstuvwxyz\")\n for word in words:\n yield[word, [random.randint(1, 3) for i in range(len(word))]]\n\ndef expand_intervals_generator(seed):\n random.seed(seed)\n for j in range(1000):\n curr = 0\n result = \"\"\n first = True\n n = random.randint(1, 20)\n for i in range(n):\n if not first:\n result += \",\"\n first = False\n if random.randint(0, 99) < 20:\n result += str(curr)\n curr += random.randint(1, 10)\n else:\n end = curr + random.randint(1, 30)\n result += str(curr) + \"-\" + str(end)\n curr = end + random.randint(1, 10)\n yield (result,)\n\ndef collapse_intervals_generator(seed):\n random.seed(seed)\n for i in range(1000):\n items = []\n curr = 1\n n = random.randint(1, 20)\n for j in range(n):\n m = random.randint(1, 5)\n for k in range(m):\n items.append(curr)\n curr += 1\n curr += random.randint(1, 10)\n yield (items,)\n\ndef recaman_generator():\n yield (1000000,)\n\ndef __no_repeated_digits__(n, allowed):\n n = str(n)\n for i in range(4):\n if n[i] not in allowed:\n return False\n for j in range(i+1, 4):\n if n[i] == n[j]:\n return False\n return True \n\ndef bulls_and_cows_generator(seed):\n random.seed(seed)\n for i in range(100):\n result = []\n n = random.randint(1, 4)\n allowed = random.sample(\"123456789\", 6)\n while len(result) < n:\n guess = random.randint(1000, 9999)\n if __no_repeated_digits__(guess, allowed):\n bulls = random.randint(0, 3)\n cows = random.randint(0, 3)\n cows = min(cows, 4 - bulls)\n if not(bulls == 3 and cows == 1):\n result.append( (guess, bulls, cows) )\n yield (result,)\n\ndef __manhattan__(p1, p2):\n total = 0\n for i in range(len(p1)):\n total += abs(p1[i] - p2[i])\n return total\n\nfrom math import sqrt\n\ndef __euclidean__(p1, p2):\n total = 0\n for i in range(len(p1)):\n total += (p1[i] - p2[i]) * (p1[i] - p2[i])\n return int(sqrt(total))\n\ndef farthest_points_distance_generator(seed):\n random.seed(seed)\n dfs = [__euclidean__, __manhattan__]\n for i in range(10000):\n d = random.randint(1, 10)\n n = random.randint(5, 10)\n points = []\n for j in range(n):\n point = tuple([random.randint(-100,100) for k in range(d)])\n points.append(point)\n df = random.choice(dfs)\n yield (points, df)\n\ndef contains_bingo_generator(seed):\n random.seed(seed)\n nums = range(1, 99)\n for i in range(10000):\n card = random.sample(nums, 25)\n card = [card[i:i+5] for i in range(0, 25, 5)]\n m = random.randint(20, 80)\n numbers = random.sample(nums, m)\n numbers.sort()\n centerfree = [True, False][random.randint(0,1)]\n yield (card, numbers, centerfree)\n\ndef can_balance_generator(seed):\n random.seed(seed)\n for i in range(10000):\n n = random.randint(1, 30)\n items = [random.randint(1,10) for i in range(n)]\n yield (items, )\n\ndef calkin_wilf_generator():\n for v in [10, 42, 255, 987, 7654, 12356]:\n yield (v,)\n\ndef fibonacci_sum_generator(seed):\n random.seed(seed)\n curr = 1\n while curr < 10 ** 100:\n yield (curr,)\n curr = curr * 2\n curr += random.randint(0, min(curr, 1000))\n\ndef create_zigzag_generator(seed):\n random.seed(seed)\n for i in range(10000):\n rows = random.randint(1, 20)\n cols = random.randint(1, 20)\n start = random.randint(1, 100)\n yield (rows, cols, start)\n\ndef fibonacci_word_generator(seed):\n random.seed(seed)\n curr = 0\n for i in range(10000):\n yield (curr,)\n curr += random.randint(1, 10)\n curr = curr * 2\n\ndef duplicate_word_generator():\n wap = open(\"warandpeace.txt\", encoding='utf-8')\n text = list(wap)\n wap.close()\n for line in text:\n for n in range(1, 14):\n yield (line, n)\n\ndef all_cyclic_shifts_generator():\n f = open('words.txt', 'r', encoding='utf-8')\n words = [x.strip() for x in f if x.islower()]\n f.close()\n words = limited_alphabet(words, \"abcdefghijklmnopqrstuvwxyz\")\n for word in words:\n yield (word,)\n\ndef aliquot_sequence_generator():\n for i in range(1, 100):\n yield (i, 10)\n yield (i, 100)\n\ndef josephus_generator():\n for n in range(2, 100):\n for k in range(1, n):\n yield (n, k)\n\ndef balanced_ternary_generator(seed):\n random.seed(seed)\n curr = 1\n for i in range(1, 1000):\n yield (curr,)\n yield (-curr,)\n curr += random.randint(1, max(3, curr // 10))\n\n__names__ = [\"brad\", \"ben\", \"britain\", \"donald\", \"bill\", \"ronald\",\n \"george\", \"laura\", \"barbara\",\n \"barack\", \"angelina\", \"jennifer\", \"ross\", \"rachel\",\n \"monica\", \"phoebe\", \"joey\", \"chandler\",\n \"hillary\", \"michelle\", \"melania\", \"nancy\", \"homer\",\n \"marge\", \"bart\", \"lisa\", \"maggie\", \"waylon\", \"montgomery\",\n \"california\", \"canada\", \"germany\", \"sheldon\", \"leonard\",\n \"rajesh\", \"howard\", \"penny\", \"amy\", \"bernadette\"]\n\ndef brangelina_generator():\n for i in range(len(__names__)):\n for j in range(len(__names__)):\n yield (__names__[i], __names__[j])\n \ndef frequency_sort_generator(seed):\n random.seed(seed)\n for i in range(1000):\n ln = random.randint(1, 1000)\n elems = [random.randint(1, 2 + ln // 2) for x in range(ln)]\n yield(elems,)\n\ndef count_consecutive_summers_generator():\n for i in range(1, 1000):\n yield(i,)\n\ndef detab_generator(seed):\n wap = open(\"warandpeace.txt\", encoding='utf-8')\n text = list(wap)\n wap.close()\n random.seed(seed)\n for line in text:\n n = random.randint(1, 7)\n yield (line, n, ' ')\n\ndef running_median_of_three_generator(seed):\n random.seed(seed)\n yield ([],)\n yield ([42],)\n for i in range(100):\n n = random.randint(2, 1000)\n items = [random.randint(1, 100) for x in range(n)]\n yield (items,)\n \ndef iterated_remove_pairs_generator(seed):\n random.seed(seed)\n for k in range(1000):\n n = random.randint(0, 100)\n vals = [random.randint(1, 10000) for i in range(7)]\n items = [vals[random.randint(0, 6)] for i in range(n)]\n yield (items,)\n\ndef is_perfect_power_generator(seed):\n random.seed(seed)\n for k in range(500):\n base = random.randint(2, 10)\n exp = random.randint(2, 13 - base)\n off = random.randint(0, 1)\n yield (base ** exp - off, )\n\ndef sort_by_digit_count_generator(seed):\n random.seed(seed)\n for k in range(1000):\n n = random.randint(1, 1000)\n yield ([random.randint(1, 10**6) for i in range(n)],)\n\ndef count_divisors_in_range_generator(seed):\n random.seed(seed)\n v = 3\n step = 1\n up = 10\n for i in range(100000):\n start = random.randint(-v, v)\n end = random.randint(0, v) + start\n n = random.randint(1, v)\n yield (start, end, n)\n v += step\n if i == up:\n up = 10 * up\n step = step * 10\n\n__players__ = ['anita', 'suzanne', 'suzy', 'tom', 'steve', 'ilkka', 'rajesh',\n 'amy', 'penny', 'sheldon', 'leonard', 'bernadette', 'howard']\n\ndef highest_n_scores_generator(seed):\n random.seed(seed)\n for i in range(10000):\n scores = [(name, random.randint(1, 100)) for name in __players__\\\n for k in range(random.randint(0, 20))]\n n = random.randint(1, 10)\n yield (scores, n)\n\n# Let the good times roll!\n\ntest_all_functions(labs109, [\n (\n \"milton_work_point_count\",\n milton_work_point_count_generator(12345),\n \"478b4f9abb802dcd9c175851e5de90febea421622b851dbb54\" \n ), \n (\n \"highest_n_scores\",\n highest_n_scores_generator(12345),\n \"978ce1599544e991c1cdc5824a762ffbed54ebcee76ca87821\" \n ),\n (\n \"count_divisors_in_range\", \n count_divisors_in_range_generator(12345),\n \"046f15a3e3a38735d04736da74262a54f7c6882c61b3e4db5a\"\n ),\n (\n \"sort_by_digit_count\",\n sort_by_digit_count_generator(12345),\n \"faa4547a1a4fc27a0e8c16c1f1d4f8d6385587ab08e9c9d0c5\" \n ),\n (\n \"is_perfect_power\", \n is_perfect_power_generator(12345),\n \"5c396434e95e5899055195e80660137588f6d81c3cf6594d32\"\n ), \n (\n \"iterated_remove_pairs\",\n iterated_remove_pairs_generator(12345),\n \"f3d6588ec3c251abfc024698c2a7371dcc7e175af1e41bb0aa\"\n ),\n (\n \"detab\",\n detab_generator(12345),\n \"d3e7eea790490fd172a01cdf48639aad2462d7f440fe68cba4\"\n ),\n (\n \"running_median_of_three\",\n running_median_of_three_generator(12345),\n \"4325b7bb7172d5a4f7e478174661d109aea0de9bba3480536d\" \n ),\n (\n \"frequency_sort\",\n frequency_sort_generator(12345),\n \"608f5351a1e77413aff8779d4586ca536eb5314e686892b391\"\n ),\n (\n \"count_consecutive_summers\", \n count_consecutive_summers_generator(),\n \"3ade63a194b40ff5aa1b53642eee754d30f2ab48ef77330540\"\n ),\n (\n \"brangelina\",\n brangelina_generator(),\n \"fdbbfd7aa2ebcb989862f4e23defc6cafd4aca55ce3235a463\"\n ), \n (\n \"balanced_ternary\",\n balanced_ternary_generator(12345),\n \"08dcda71f136c16362cc53e62f98d49b28bb45c43ddee4ea32\" \n ),\n (\n \"josephus\",\n josephus_generator(),\n \"3ff6a944f6f48e41cc53a7013e785da77be27c7372b4a4cdbb\"\n ),\n (\n \"aliquot_sequence\",\n aliquot_sequence_generator(),\n \"5942bb5b3dc190eaddff33df990de03666441706387cde0d7e\" \n ), \n (\n \"all_cyclic_shifts\",\n all_cyclic_shifts_generator(),\n \"0890c1b6077f0ec28642ab7723ba49e6453f0d7251a25e9e5a\" \n ), \n (\n \"duplicate_word\",\n duplicate_word_generator(),\n \"aa182f216a2d696bdebd03eb2f01400dcc9322e31e302471d7\" \n ), \n (\n \"fibonacci_word\",\n fibonacci_word_generator(12345),\n \"275ac5dc13b0bf5364bb25fca249b2115357fc7666154d1cd6\" \n ), \n (\n \"create_zigzag\",\n create_zigzag_generator(12345),\n \"e3376a7132fe7ed1b04f38215dea836d70e8cf8d0e316868cf\" \n ), \n (\n \"fibonacci_sum\",\n fibonacci_sum_generator(12345),\n \"c4052229fe7b1abf54e3d0757ed2d27777b9323fb753127cf9\"\n ), \n (\n \"calkin_wilf\",\n calkin_wilf_generator(),\n \"e5ff0851c0830b72802a818eeaec66711b6e3b91a004263674\" \n ), \n (\n \"can_balance\",\n can_balance_generator(12345),\n \"0d79528d49fc77f06d98f3d2672306097a1aacfcb65e050f6a\" \n ),\n (\n \"contains_bingo\",\n contains_bingo_generator(12345),\n \"c352ce01918d0d47ca13adedf25556e5fd4ab1f672e07bc52f\"\n ), \n (\n \"farthest_points_distance\",\n farthest_points_distance_generator(12345),\n \"2bc835828a4967b35bf1c022fc4c204e2fdc955c03947f3a2b\"\n ),\n (\n \"bulls_and_cows\",\n bulls_and_cows_generator(12345),\n \"e00ca4cd1996a51ef5cd5588a7facd0a00f2e3f3946d5f4e96\" \n ), \n (\n \"recaman\",\n recaman_generator(),\n \"48f7b14610fe8f54ab2b1d81265847eec47d450d13e4a4c6c5\"\n ),\n (\n \"collapse_intervals\",\n collapse_intervals_generator(12345),\n \"bb95484119b5e00b704121baa1f7ef5312154ad542cf9da828\"\n ), \n (\n \"expand_intervals\",\n expand_intervals_generator(12345),\n \"9fecebbd937380814f804508ed3f491a6a0c353050e60a3d60\"\n ), \n (\n \"scrabble_value\", \n scrabble_value_generator(12345),\n \"398a3bd44dbc4cf3116e25e52db44809c7cad86ddb03eb0186\"\n ),\n (\n \"break_into_syllables\", \n break_into_syllables_generator(12345),\n \"d3b84c8f7387becbb82a17b15989066231983a7947745581f9\"\n ), \n (\n \"reverse_ascending_sublists\",\n reverse_ascending_sublists_generator(12345),\n \"78fed45a9925dd87964e1433e1db5451900de41a491f2b8144\"\n ), \n (\n \"flatten\",\n flatten_generator(12345),\n \"965fda78b5ad7ae3924edf4b545c84b6d5a78158d92b234f65\" \n ), \n (\n \"brick_wall\",\n brick_wall_generator(12345),\n \"1927b926398675551013143c95658f8f9d8123d1990fc1d117\" \n ),\n (\n \"reverse_reversed\", \n reverse_reversed_generator(12345),\n \"c3ec2d6688cc38e8ad384ed5cbf5dabc663dbf9e97d7608367\"\n ),\n (\n \"longest_palindrome\",\n longest_palindrome_generator(12345),\n \"3dd73f155d4e4debbcaba8a2815479ecf42f528ec577173a63\" \n ),\n (\n \"group_equal\",\n group_equal_generator(12345),\n \"242fac179412d7ad82bebadbd74ac7d0044b33942a714870b9\"\n ), \n (\n \"postfix_evaluate\",\n postfix_evaluate_generator(99),\n \"6a37236b142ad06ab0e3f97cf2733c831eeab1f9463c819e97\"\n ), \n (\n \"letter_pair_freqs\",\n letter_pair_freqs_generator(),\n \"e18e2c32dc977e5647fc11af704cc98bfe8e994782349c4cd4\" \n ),\n (\n \"paragraph_lengths\",\n paragraph_lengths_generator(),\n \"df303673536dce13da7c626b2a07ba949cd62388761d98b68d\" \n ),\n (\n \"ryerson_letter_grade\", \n ryerson_letter_grade_generator(),\n \"b9b86a019c4502be825b0ed52c187f9a29106a08fbbb1ffcc6\"\n ),\n (\n \"is_ascending\", \n is_ascending_generator(7),\n \"4c5f0dbf663f3350b7cf3d16f0589fc7dc5168ca17e4aefd3f\"\n ),\n (\n \"double_until_all_digits\",\n double_until_all_digits_generator(),\n \"7c4ba46364765cb0679f609d428bbbae8ba0df440b001c4162\"\n ),\n (\n \"caps_lock_stuck\",\n caps_lock_stuck_generator(),\n \"4aad29b868200851957f4e59a51321d5be077c1217f66ca1b9\" \n ),\n (\n \"give_change\",\n give_change_generator(12345),\n \"e8419a56ab09d1cf1effb2bb9c45802ae21a2304793cc8a892\"\n ),\n (\n \"winning_card\",\n winning_card_generator(12345),\n \"32c7fee1415a8095db6f318ad293dd08dec4e6904f304c4a73\"\n ),\n (\n \"hand_is_badugi\",\n hand_is_badugi_generator(987),\n \"d37917aab58ce06778d3f667f6c348d1e30ee67271d9d1de60\"\n ),\n (\n \"bridge_hand_shape\",\n bridge_hand_shape_generator(12345),\n \"61cfd31019c2838780311603caee80a9c57fae37d4f5b561ce\" \n ),\n (\n \"hand_shape_distribution\",\n hand_shape_distribution_generator(12345),\n \"0a34b7e0409552587469623bd8609dae1218f909c178c592db\" \n ),\n (\n \"sort_by_typing_handedness\",\n sort_by_typing_handedness_generator(),\n \"c093675bb9814e5a2a761c829e8fb5b3a714e93ea2031fd1c3\" \n ),\n (\n \"word_salad\",\n word_salad_generator(12345),\n \"9f4b64f15b92814a5bba3e3c422e0ed74fc8fb802ccb9b004b\" \n ), \n (\n \"possible_words\",\n possible_words_generator(999), \n \"55e494a37554d8f8b2c98bd7451de2b05728aa66be210478cd\"\n ), \n], \"labs109.py\")","sub_path":"tester109.py","file_name":"tester109.py","file_ext":"py","file_size_in_byte":26275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"156209773","text":"from pybrain.rl.learners import NFQ\nfrom scipy import r_\nimport numpy as np\nfrom pybrain.rl.learners.valuebased.valuebased import ValueBasedLearner\nfrom pybrain.datasets import SupervisedDataSet\nfrom pybrain.supervised.trainers.rprop import RPropMinusTrainer\nfrom pybrain.supervised.trainers import BackpropTrainer\nfrom pybrain.utilities import one_to_n\n\n\nclass Knee_NFQ(NFQ):\n def __init__(self, batch_size=5):\n super(Knee_NFQ, self).__init__(maxEpochs=20)\n self._batch_size = batch_size\n\n\n def learn(self):\n # convert reinforcement dataset to NFQ supervised dataset\n supervised = SupervisedDataSet(self.module.network.indim, 1)\n states = self.dataset['state']\n actions = self.dataset['action']\n rewards = self.dataset['reward']\n next_states = self.dataset['next_state']\n library_size = states.shape[0]\n if library_size < 4:\n return\n batch_size = min(library_size, self._batch_size)\n for _ in range(batch_size):\n experience_index = np.random.randint(0, library_size)\n state = states[experience_index, :]\n action = actions[experience_index, :]\n reward = rewards[experience_index, :]\n next_state = next_states[experience_index, :]\n Q = self.module.getValue(state, action[0])\n inp = r_[state, one_to_n(action[0], self.module.numActions)]\n tgt = Q + 0.5*(reward + self.gamma * max(self.module.getActionValues(next_state)) - Q)\n supervised.addSample(inp, tgt)\n # train module with backprop/rprop on dataset\n #trainer = RPropMinusTrainer(self.module.network, dataset=supervised, batchlearning=True, verbose=False)\n #trainer.trainUntilConvergence(maxEpochs=self.maxEpochs)\n\n # alternative: backprop, was not as stable as rprop\n trainer = BackpropTrainer(self.module.network, dataset=supervised, learningrate=0.5, batchlearning=True, verbose=False)\n trainer.trainUntilConvergence(maxEpochs=self.maxEpochs)\n\n \"\"\"\n for seq in self.dataset:\n lastexperience = None\n for state, action, reward in seq:\n if not lastexperience:\n # delay each experience in sequence by one\n lastexperience = (state, action, reward)\n continue\n\n # use experience from last timestep to do Q update\n (state_, action_, reward_) = lastexperience\n\n Q = self.module.getValue(state_, action_[0])\n\n inp = r_[state_, one_to_n(action_[0], self.module.numActions)]\n tgt = Q + 0.5*(reward_ + self.gamma * max(self.module.getActionValues(state)) - Q)\n supervised.addSample(inp, tgt)\n\n # update last experience with current one\n lastexperience = (state, action, reward)\n\n # train module with backprop/rprop on dataset\n trainer = RPropMinusTrainer(self.module.network, dataset=supervised, batchlearning=True, verbose=False)\n trainer.trainUntilConvergence(maxEpochs=self.maxEpochs)\n\n # alternative: backprop, was not as stable as rprop\n # trainer = BackpropTrainer(self.module.network, dataset=supervised, learningrate=0.005, batchlearning=True, verbose=True)\n # trainer.trainUntilConvergence(maxEpochs=self.maxEpochs)\n \"\"\"\n","sub_path":"knee_NFQ.py","file_name":"knee_NFQ.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"604639935","text":"bucketName = 'org.cicsnc.albedo'\nbasePath = 'Input/area/'\nsatellite = 'goes13'\nyear = '2017'\nstartDay = 1\nendDay = 10\nfilterBand = 'BAND_01'\ndryrun = False\n\nimport re\nfrom os import fdopen, remove\nfrom shutil import move\nfrom tempfile import mkstemp\n\nimport boto3\n\n\ndef replace(file_path, pattern, subst):\n fh, abs_path = mkstemp()\n with fdopen(fh, 'w') as new_file:\n with open(file_path) as old_file:\n for line in old_file:\n new_file.write(re.sub(pattern, subst, line))\n remove(file_path)\n move(abs_path, file_path)\n\n\ndef removePrefix(text, prefix):\n return text[text.startswith(prefix) and len(prefix):]\n\n\ndef getS3FileNames(bucket, folder):\n keys = []\n kwargs = {'Bucket': bucket, 'Prefix': folder}\n while True:\n resp = s3.list_objects_v2(**kwargs)\n for obj in resp['Contents']:\n keys.append(obj['Key'])\n try:\n kwargs['ContinuationToken'] = resp['NextContinuationToken']\n except KeyError:\n break\n return keys\n\n\nif satellite == 'goes08' or 'goes12' or 'goes13' or 'goes14':\n startTimeA = 0\n endTimeA = 130\n startTimeB = 730\n endTimeB = 2359\n replace('../ancillary.src/AlgorithmConfigurationFile_docker', 'GRIDID=GOES_..._VIS02', 'GRIDID=GOES_075_VIS02')\nelif satellite == 'goes09' or 'goes10' or 'goes11' or 'goes15':\n startTimeA = 0\n endTimeA = 530\n startTimeB = 1130\n endTimeB = 2359\n replace('../ancillary.src/AlgorithmConfigurationFile_docker', 'GRIDID=GOES_..._VIS02', 'GRIDID=GOES_135_VIS02')\nelse:\n print(\"Invalid satellite setting\")\n exit(-1)\n\ns3 = boto3.client('s3')\ns3.upload_file(\"../ancillary.src/AlgorithmConfigurationFile_docker\", bucketName, \"AlgorithmConfigurationFile_docker\")\n\nfileNames = getS3FileNames(bucketName, basePath)\nfileNames = map(lambda d: removePrefix(d, basePath), fileNames)[1:]\nfileNames = filter(lambda f: f.startswith(satellite), fileNames)\nfileNames = filter(lambda f: f.endswith(filterBand), fileNames)\nfileNames = filter(lambda f: f[7:11] == year, fileNames)\ndays = list(\"%03d\" % day for day in range(startDay, endDay + 1))\nfileNames = [x for x in fileNames if x[12:15] in days]\ntimesA = list(\"%04d\" % time for time in range(startTimeA, endTimeA + 1))\ntimesB = list(\"%04d\" % time for time in range(startTimeB, endTimeB + 1))\nfileNames = [x for x in fileNames if x[16:20] in timesA + timesB]\n\nclient = boto3.client('sqs', region_name='us-east-1')\nqueues = client.list_queues(QueueNamePrefix='AlbedoPh1')\nqueueURL = queues['QueueUrls'][0]\n\nfor fileName in fileNames:\n print(fileName)\n if not dryrun:\n enqueueResponse = client.send_message(QueueUrl=queueURL, MessageBody=fileName)\n\nprint(len(fileNames))\n","sub_path":"Albedo/AlbedoPh1Queue.py","file_name":"AlbedoPh1Queue.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"143450379","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nfrom botany.choices import EASTING_CHOICES, NORTHING_CHOICES, RECOVERY_METHODS, MATERIALS, FRACTIONS\n\nclass Sample(models.Model):\n sample_id = models.AutoField(primary_key=True)\n area_easting = models.IntegerField(choices = EASTING_CHOICES)\n area_northing = models.IntegerField(choices = NORTHING_CHOICES)\n context_number = models.IntegerField()\n sample_number = models.IntegerField()\n # material_type = models.CharField(max_length=200, default='', blank=True, null=True, choices = MATERIALS)\n sample_type = models.CharField(max_length=200, default='', blank=True, null=True, choices = MATERIALS)\n weight = models.DecimalField(max_digits=6, decimal_places=2)\n description = models.CharField(max_length=500, default='', blank=True, null=True)\n recovery_method = models.CharField(max_length=200, default='', blank=True, null=True, choices = RECOVERY_METHODS)\n taken_by = models.ForeignKey(settings.AUTH_USER_MODEL, db_column='taken_by', on_delete = models.PROTECT)\n # taken_by = models.ForeignKey(public.auth_user, db_column='taken_by', on_delete = models.PROTECT)\n comments = models.CharField(max_length=1000, default='', blank=True, null=True)\n\n def __str__(self):\n # return self.taken_by.first_name\n return str(self.sample_id)\n # return str(self.firstname)+ '-' +str(self.lastname)\n # return u'%s %s' % (self.first_name, self.last_name)\n\n\n class Meta:\n db_table = 'kap\\\".\\\"sample'\n ordering = [\"sample_id\"]\n managed = False\n #verbose_name_plural = \"samples\"\n\n\nclass Flotation(models.Model):\n flotation_id = models.AutoField(primary_key=True)\n sample_id = models.ForeignKey(Sample, db_column='sample_id', on_delete = models.PROTECT)\n # area_easting = models.IntegerField(choices = EASTING_CHOICES)\n # area_northing = models.IntegerField(choices = NORTHING_CHOICES)\n # context_number = models.IntegerField(blank=True, null=True)\n # sample_number = models.IntegerField(blank=True, null=True)\n flotation_date = models.DateTimeField(auto_now=False)\n entry_date = models.DateTimeField(auto_now_add=False)\n analyst_id = models.ForeignKey(settings.AUTH_USER_MODEL, db_column='analyst_id', on_delete = models.PROTECT)\n notes = models.CharField(max_length=600, default='', blank=True, null=True)\n\n def __str__(self):\n return str(self.flotation_id)\n\n class Meta():\n managed=False\n db_table = 'kap\\\".\\\"flotation'\n #ordering = [\"orderby\"]\n verbose_name_plural = \"Flotation\"\n\n\nclass LightResidue(models.Model):\n lightresidue_id = models.AutoField(primary_key=True)\n flotation_id = models.ForeignKey(Flotation, db_column='flotation_id', on_delete = models.PROTECT)\n proportion_analysed = models.DecimalField(max_digits=5, decimal_places=3)\n soil_volume = models.DecimalField(max_digits=15, decimal_places=4)\n sample_volume = models.DecimalField(max_digits=15, decimal_places=4)\n sample_weight = models.DecimalField(max_digits=15, decimal_places=4)\n sediment = models.BooleanField()\n stone = models.BooleanField()\n roots = models.BooleanField()\n leaves = models.BooleanField()\n insect_parts = models.BooleanField()\n charred_dung = models.BooleanField()\n bone = models.BooleanField()\n shell = models.BooleanField()\n\n def __str__(self):\n return str(self.lightresidue_id)\n\n class Meta():\n managed=False\n db_table = 'kap\\\".\\\"lightresidue'\n #ordering = [\"orderby\"]\n verbose_name_plural = \"LightResidue\"\n\nclass Composition(models.Model):\n composition_id = models.AutoField(primary_key=True)\n lightresidue_id = models.ForeignKey(LightResidue, db_column='lightresidue_id', on_delete = models.PROTECT)\n material_type = models.CharField(max_length=50, default='')\n type_count = models.DecimalField(max_digits=15, decimal_places=4)\n whole_weight = models.DecimalField(max_digits=15, decimal_places=4)\n fragment_weight = models.DecimalField(max_digits=15, decimal_places=4)\n\n def __str__(self):\n return str(self.composition_id)\n\n class Meta():\n managed=False\n db_table = 'kap\\\".\\\"composition'\n #ordering = [\"orderby\"]\n verbose_name_plural = \"Composition\"\n\nclass Fraction(models.Model):\n fraction_id = models.AutoField(primary_key=True)\n composition_id = models.ForeignKey(Composition, db_column='composition_id', on_delete = models.PROTECT)\n fraction = models.CharField(max_length=20, choices = FRACTIONS)\n def __str__(self):\n return str(self.fraction_id)\n\n class Meta():\n managed=False\n db_table = 'kap\\\".\\\"fraction'\n #ordering = [\"orderby\"]\n verbose_name_plural = \"Fraction\"\n\nclass Species(models.Model):\n species_id = models.AutoField(primary_key=True)\n taxon = models.CharField(max_length=50, blank=True, null=True)\n common_name = models.CharField(max_length=50, blank=True, null=True)\n species = models.CharField(max_length=50, blank=True, null=True)\n genus = models.CharField(max_length=50, blank=True, null=True)\n # family_name = models.CharField(max_length=50, blank=True, null=True)\n\n def __str__(self):\n return str(self.species)\n\n class Meta():\n managed=False\n db_table = 'kap\\\".\\\"plant_species'\n ordering = [\"genus\",\"species\"]\n verbose_name_plural = \"species\"\n\nclass PlantPart(models.Model):\n plantpart_id = models.AutoField(primary_key=True)\n fraction_id = models.ForeignKey(Fraction, db_column='fraction_id', on_delete = models.PROTECT)\n species_id = models.ForeignKey(Species, db_column='species_id', on_delete = models.PROTECT, related_name='plant_species', blank=True, null=True)\n part = models.CharField(max_length=100)\n weight = models.DecimalField(max_digits=10, decimal_places=3)\n quantity = models.DecimalField(max_digits=10, decimal_places=3)\n\n\n def __str__(self):\n return str(self.plantpart_id)\n\n class Meta():\n managed=False\n db_table = 'kap\\\".\\\"plant_part'\n #ordering = [\"orderby\"]\n verbose_name_plural = \"plant parts\"\n\nclass Seed(models.Model):\n seed_id = models.AutoField(primary_key=True)\n fraction_id = models.ForeignKey(Fraction, db_column='fraction_id', blank=True, null=True, on_delete = models.PROTECT)\n species_id = models.ForeignKey(Species, db_column='species_id', blank=True, null=True, on_delete = models.PROTECT, related_name='seed_species')\n weight_type = models.CharField(max_length=50)\n weight = models.DecimalField(max_digits=10, decimal_places=3)\n quantity_type = models.CharField(max_length=50)\n quantity = models.IntegerField()\n\n\n def __str__(self):\n return str(self.seed_id)\n\n class Meta():\n managed=False\n db_table = 'kap\\\".\\\"seed'\n ordering = [\"species_id\",\"fraction_id\"]\n verbose_name_plural = \"seeds\"\n","sub_path":"botany/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"438699334","text":"#!/bin/python3\nfrom shorthand import *\nfrom amyhead import *\n\ndef _bfs(obj,step=8,turn=0,stateLimit=4095,notViolate=None,info={}):\n\t#if \"h\" in info: print(info[\"h\"]) # debug\n\t#hvv=info[\"hvv\"] if \"hvv\" in info else []\n\t#hv=[ h for hv in hvv for h in hv ]\n\thv=info['hv']\n\tstateCnt=0\n\trtv={}\n\tt=(obj.copy(),0,(-1,None)) # ( ; , total_puts , ((turn,last_put_loc) , lastStatHash) )\n\t#q=queue()\n\t#q.push(t)\n\torderNum=0\n\t#hInfo=tuple([0 for _ in range(len(hv))])\n\thInfo=tuple([0 for _ in range(len(hv))])\n\thDistinct,hDF=[[],[]],[min,max] # [ min_arr , max_arr ]\n\tcmpInfo=(hInfo,orderNum)\n\theap=[]\n\theappush(heap,(cmpInfo,t))\n\torderNum+=1\n\tdel t\n\t#while q.size()!=0:\n\twhile len(heap)!=0:\n\t\t#t=q.pop()\n\t\tt=heappop(heap)[1]\n\t\tcurrstat=t[0]\n\t\tcurrstep=t[1]\n\t\tlast_put=t[2][0]\n\t\tcurrstatNum=currstat.hash()\n\t\tif currstatNum in rtv: continue\n\t\trtv[currstatNum]=t\n\t\tdel t\n\t\tstateCnt+=1\n\t\tif stateCnt>stateLimit: break\n\t\tnear1=currstat.near1(info=info)\n\t\tfor near in near1:\n\t\t\tstat=near[2]\n\t\t\t#if stat.turn()==-1 and not (isNone(notViolate) or matchGoaltree_find_inSet(stat,notViolate)):\n\t\t\t#\tcontinue\n\t\t\tactinfo=near[:2] # (who does, does what)\n\t\t\tif currstep Building model..')\n global best_acc0\n global start_epoch\n if work == True:\n net = MobileNet_V1()\n net = net.to(device)\n # if device == 'cuda':\n # net = torch.nn.DataParallel(net)\n # cudnn.benchmark = True\n # optimizer = optim.Adam(net.parameters(), lr=args.lr)\n optimizer0 = optim.SGD(net.parameters(), lr=args.lr0)\n optimizer1 = optim.SGD(net.parameters(), lr=args.lr1)\n # torch.optim.lr_scheduler.StepLR(optimizer, 60, gamma=0.1, last_epoch=-1)\n # torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20, 50, 100, 200, 200, 300], gamma=0.1, last_epoch=-1)\n scheduler0 = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer0, mode='min', factor=0.1, patience=20, verbose=True, threshold=1e-4, threshold_mode='rel')\n scheduler1 = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer1, mode='min', factor=0.1, patience=20, verbose=True, threshold=1e-4, threshold_mode='rel')\n criterion0 = nn.CrossEntropyLoss()\n criterion1 = nn.CrossEntropyLoss()\n # checkpoint = torch.load('./checkpoint/flod.t7')\n # net.load_state_dict(checkpoint['net'])\n return net, optimizer0, optimizer1, scheduler0, criterion0, scheduler1, criterion1\n\n\ndef train(epoch, dataloader, net, optimizer0, criterion0, optimizer1, criterion1, vali=True):\n \"\"\"Train the network\"\"\"\n print('\\nEpoch: %d' % epoch)\n global tr_loss0, tr_loss1\n net.train()\n num_id0 = 1e-10\n num_id1 = 1e-10\n train_loss0 = 0\n correct0 = 0\n total0 = 1e-10\n train_loss1 = 0\n correct1 = 0\n total1 = 1e-10\n gate = 0\n for batch_id, (inputs, targets) in enumerate(dataloader):\n # if batch_id < (12800 / args.batch_size):\n optimizer0.zero_grad()\n optimizer1.zero_grad()\n inputs, targets = inputs.to(device), targets.to(device)\n outputs, gate_prt = net(inputs, epoch)\n if epoch % 2 == 0:\n gate = gate_prt\n num_id0 += 1\n loss0 = criterion0(outputs, targets.long())\n loss0.backward()\n optimizer0.step()\n\n train_loss0 += loss0.item()\n _, predicted0 = outputs.max(1)\n total0 += targets.size(0)\n correct0 += predicted0.eq(targets).sum().item()\n progress_bar(batch_id, len(dataloader), 'Loss: %.3f | Acc: %.3f (%d/%d)'\n % (train_loss0 / num_id0, 100. * correct0 / total0, correct0, total0))\n elif epoch % 2 != 0:\n gate = gate_prt\n num_id1 += 1\n loss1 = criterion1(outputs, targets.long())\n loss1.backward()\n optimizer1.step()\n\n train_loss1 += loss1.item()\n _, predicted1 = outputs.max(1)\n total1 += targets.size(0)\n correct1 += predicted1.eq(targets).sum().item()\n progress_bar(batch_id, len(dataloader), 'Loss: %.3f | Acc: %.3f (%d/%d)'\n % (train_loss1 / num_id1, 100. * correct1 / total1, correct1, total1))\n # else:\n # print('End of the train')\n # break\n if vali is True:\n if epoch % 2 == 0:\n tr_loss0 = train_loss0 / num_id0\n elif epoch % 2 != 0:\n tr_loss1 = train_loss1 / num_id1\n return train_loss0 / num_id0, 100. * correct0 / total0, train_loss1 / num_id1, 100. * correct1 / total1, \\\n gate\n\n\ndef test(epoch, dataloader, net, criterion0, vali=True):\n \"\"\"Validation and the test.\"\"\"\n global best_acc0\n net.eval()\n num_id = 0\n test_loss = 0\n correct = 0\n total = 0\n acc = 0\n with torch.no_grad():\n for batch_id, (inputs, targets) in enumerate(dataloader):\n # if batch_id < (2560 / args.batch_size):\n inputs, targets = inputs.to(device), targets.to(device)\n outputs, gate_prt = net(inputs, epoch)\n num_id += 1\n loss0 = criterion0(outputs, targets.long())\n\n test_loss += loss0.item()\n _, predicted = outputs.max(1) # judge max elements in predicted`s Row(1:Row 0:Column)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item() # judge how many elements same in predicted and targets\n if (100. * correct / total) < 90.49:\n acc = 90.49\n else:\n acc = (100. * correct / total)\n progress_bar(batch_id, len(dataloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (test_loss / num_id, acc, correct, total))\n # else:\n # print('End of the test')\n # break\n return test_loss / num_id, acc\n\n\nif __name__ == '__main__':\n epoch0 = 0\n epoch1 = 0\n trainloader, testloader = data_prepare()\n net, optimizer0, optimizer1, scheduler0, criterion0, scheduler1, criterion1 = model_prepare(work)\n train_loss_list0, train_acc_list0, train_loss_list1, train_acc_list1 = [], [], [], []\n for epoch in range(start_epoch, start_epoch+args.max_epoch):\n train_loss0, train_acc0, train_loss1, train_acc1, gate = train(epoch, trainloader, net, optimizer0, criterion0, optimizer1, criterion1)\n scheduler0.step(tr_loss0)\n scheduler1.step(tr_loss1)\n lr0 = optimizer0.param_groups[0]['lr']\n lr1 = optimizer1.param_groups[0]['lr']\n if epoch % 2 == 0:\n epoch0 += 1\n train_loss_list0.append(train_loss0)\n train_acc_list0.append(train_acc0)\n elif epoch % 2 != 0:\n epoch1 += 1\n train_loss_list1.append(train_loss1)\n train_acc_list1.append(train_acc1)\n train_loss_array0 = numpy.array(train_loss_list0)\n train_acc_array0 = numpy.array(train_acc_list0)\n train_loss_array1 = numpy.array(train_loss_list1)\n train_acc_array1 = numpy.array(train_acc_list1)\n if lr0 < 5e-3 and lr1 < 5e-3:\n test_loss, test_acc = test(epoch, testloader, net, criterion0)\n print('Saving:')\n state1 = {\n 'net': net.state_dict(),\n 'acc': test_acc,\n 'epoch': epoch,\n }\n if not os.path.isdir('acc'):\n os.mkdir('acc')\n torch.save(state1, './acc/flod''.t7')\n acc = open('MobileNet_V1.txt', 'w')\n acc.write(str(test_acc))\n acc.close()\n print('Saving:')\n plt.figure(1)\n plt.subplot(2, 2, 1)\n plt.xlabel('epoch')\n plt.ylabel('train loss')\n plt.plot([i for i in range(epoch0)], train_loss_array0, '-')\n plt.subplot(2, 2, 2)\n plt.xlabel('epoch')\n plt.ylabel('train acc')\n plt.plot([i for i in range(epoch0)], train_acc_array0, '-')\n plt.savefig(\"MobileNet_V1_First_CE.jpg\")\n plt.figure(2)\n plt.subplot(2, 2, 1)\n plt.xlabel('epoch')\n plt.ylabel('train loss')\n plt.plot([i for i in range(epoch1)], train_loss_array1, '-')\n plt.subplot(2, 2, 2)\n plt.xlabel('epoch')\n plt.ylabel('train acc')\n plt.plot([i for i in range(epoch1)], train_acc_array1, '-')\n plt.savefig(\"MobileNet_V1_Second_CE.jpg\")\n plt.show()\n print('OVER')\n break\n else:\n pass","sub_path":"Mining_Knowledge_of_Weights/CIFAR10/MobileNet_V1_Optimal_Model_Connection/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"646582712","text":"#!/usr/bin/python\n\nfrom mininet.topo import Topo\nfrom mininet.net import Mininet\nfrom mininet.util import dumpNodeConnections\nfrom mininet.log import setLogLevel,info\nfrom mininet.node import RemoteController, OVSSwitch\nfrom mininet.cli import CLI\n\ndef emptyNet():\n\tnet = Mininet(topo=None, build=False)\n\n\tnet.addController('c0', controller=RemoteController, ip=\"10.0.0.1\", port=6633)\n\th0 = net.addHost('h0', ip='10.0.0.1')\n\th1 = net.addHost('h1', ip='10.0.2.1')\n\th2 = net.addHost('h2', ip='10.0.2.2')\n\th3 = net.addHost('h3', ip='10.0.2.3')\n\th4 = net.addHost('h4', ip='10.0.2.4')\n\th5 = net.addHost('h5', ip='10.0.2.5')\n\th6 = net.addHost('h6', ip='10.0.2.6')\n\th7 = net.addHost('h7', ip='10.0.2.7')\n\th8 = net.addHost('h8', ip='10.0.2.8')\n\th9 = net.addHost('h9', ip='10.0.2.9')\n\th10 = net.addHost('h10', ip='10.0.2.10')\n\tswitches=[]\n\tfor i in range(11):\n\t\tswitches.append(net.addSwitch('s%s' % (i+1), cls=OVSSwitch))\n\tnet.addLink(h0,switches[10])\n\thosts=[h1,h2,h3,h4,h5,h6,h7,h8,h9,h10]\n\tfor i in range(10):\n\t\tnet.addLink(hosts[i],switches[i])\n\tfor i in range(11):\n\t\tfor j in range(i+1,11):\n\t\t\tnet.addLink(switches[i],switches[j])\n\n\tnet.start()\n\t\n\tfor i in range(4):\n\t\tswitches[i].cmd('ifconfig s%s 10.0.1.%s' % (i+1,i+1))\n\n\tfor i in range(4):\n\t\tswitches[i].cmd('ovs-vsctl set bridge s%s stp-enable=true' % (i+1))\n\tCLI(net)\n\tnet.stop()\n\t\nif __name__ == '__main__':\n\tsetLogLevel('info')\n\temptyNet()\n","sub_path":"mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"568428739","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n#*** nmeta - Network Metadata - TC Identity Class and Methods\n\n\"\"\"\nThis module is part of the nmeta suite running on top of Ryu SDN controller\nto provide network identity and flow (traffic classification) metadata\n\"\"\"\n\nimport logging\nimport logging.handlers\nimport struct\nimport time\nimport re\n\nimport socket\n\n#*** Ryu imports:\nfrom ryu.lib import addrconv\nfrom ryu.lib.packet import packet\nfrom ryu.lib.packet import ethernet\nfrom ryu.lib.packet import lldp\nfrom ryu.lib.packet import ipv4\nfrom ryu.lib.packet import ipv6\nfrom ryu.lib.packet import tcp\n\n#*** nmeta imports:\nimport nmisc\n\nclass IdentityInspect(object):\n \"\"\"\n This class is instantiated by tc_policy.py\n (class: TrafficClassificationPolicy) and provides methods to\n ingest identity updates and query identities\n \"\"\"\n def __init__(self, _config):\n #*** Get logging config values from config class:\n _logging_level_s = _config.get_value \\\n ('tc_identity_logging_level_s')\n _logging_level_c = _config.get_value \\\n ('tc_identity_logging_level_c')\n _syslog_enabled = _config.get_value('syslog_enabled')\n _loghost = _config.get_value('loghost')\n _logport = _config.get_value('logport')\n _logfacility = _config.get_value('logfacility')\n _syslog_format = _config.get_value('syslog_format')\n _console_log_enabled = _config.get_value('console_log_enabled')\n _console_format = _config.get_value('console_format')\n #*** Set up Logging:\n self.logger = logging.getLogger(__name__)\n self.logger.setLevel(logging.DEBUG)\n self.logger.propagate = False\n #*** Syslog:\n if _syslog_enabled:\n #*** Log to syslog on host specified in config.yaml:\n self.syslog_handler = logging.handlers.SysLogHandler(address=(\n _loghost, _logport),\n facility=_logfacility)\n syslog_formatter = logging.Formatter(_syslog_format)\n self.syslog_handler.setFormatter(syslog_formatter)\n self.syslog_handler.setLevel(_logging_level_s)\n #*** Add syslog log handler to logger:\n self.logger.addHandler(self.syslog_handler)\n #*** Console logging:\n if _console_log_enabled:\n #*** Log to the console:\n self.console_handler = logging.StreamHandler()\n console_formatter = logging.Formatter(_console_format)\n self.console_handler.setFormatter(console_formatter)\n self.console_handler.setLevel(_logging_level_c)\n #*** Add console log handler to logger:\n self.logger.addHandler(self.console_handler)\n\n #*** Instantiate the System and NIC Identity Tables (Legacy):\n self._sys_identity_table = nmisc.AutoVivification()\n self._nic_identity_table = nmisc.AutoVivification()\n #*** Identity Dictionaries\n #*** Let these be accessed directly to avoid overhead of getters:\n self.id_mac = {}\n self.id_ip = {}\n self.id_node = {}\n self.id_service = {}\n #*** Initialise Identity Tables unique reference numbers:\n #*** Start at 1 so that value 0 can be used for boolean\n #*** false on checks\n self._sys_id_ref = 1\n self._nic_id_ref = 1\n #*** Get config values for tidy up of dynamic data:\n self.max_age_nic = _config.get_value('identity_nic_table_max_age')\n self.max_age_sys = _config.\\\n get_value('identity_system_table_max_age')\n self.arp_max = _config.get_value('identity_arp_max_age')\n\n def check_identity(self, policy_attr, policy_value, pkt, ctx):\n \"\"\"\n Passed an identity attribute, value and packet and\n return True or False based on whether or not the packet strongly\n correlates to the identity attribute/value\n \"\"\"\n pkt_eth = pkt.get_protocol(ethernet.ethernet)\n pkt_ip4 = pkt.get_protocol(ipv4.ipv4)\n pkt_ip6 = pkt.get_protocol(ipv6.ipv6)\n pkt_tcp = pkt.get_protocol(tcp.tcp)\n if policy_attr == \"identity_lldp_chassisid\":\n sys_ref = self._get_sys_ref_by_chassisid(policy_value)\n if sys_ref:\n #*** Have matched a chassis ID record, now check if the packet\n #*** relates to that system:\n nic_ref = self._get_sys_nic_ref(sys_ref)\n if nic_ref:\n if pkt_eth.src == self._get_nic_MAC_addr(nic_ref):\n #*** Source MAC addr matches the NIC MAC address\n return True\n if pkt_eth.dst == self._get_nic_MAC_addr(nic_ref):\n #*** Dest MAC addr matches the NIC MAC address\n return True\n if pkt_ip4:\n if pkt_ip4.src == self._get_nic_ip4_addr(nic_ref):\n #*** Source IP addr matches the NIC identity IP\n return True\n if pkt_ip4.dst == self._get_nic_ip4_addr(nic_ref):\n #*** Dest IP addr matches the NIC identity IP\n return True\n else:\n #*** Didn't match that LLDP Chassis ID so return false:\n return False\n\n elif ((policy_attr == \"identity_lldp_systemname\") or\n (policy_attr == \"identity_lldp_systemname_re\")):\n sys_ref = self._get_sys_ref_by_systemname(policy_attr,\n policy_value)\n if sys_ref:\n #*** Have matched a record with that system name, now check\n #*** if the packet relates to that system:\n nic_ref = self._get_sys_nic_ref(sys_ref)\n if nic_ref:\n if pkt_eth.src == self._get_nic_MAC_addr(nic_ref):\n #*** Source MAC addr matches the NIC MAC address\n return True\n if pkt_eth.dst == self._get_nic_MAC_addr(nic_ref):\n #*** Dest MAC addr matches the NIC MAC address\n return True\n if pkt_ip4:\n if pkt_ip4.src == self._get_nic_ip4_addr(nic_ref):\n #*** Source IP addr matches the NIC identity IP\n return True\n if pkt_ip4.dst == self._get_nic_ip4_addr(nic_ref):\n #*** Dest IP addr matches the NIC identity IP\n return True\n else:\n #*** Didn't match that LLDP system name so return false:\n return False\n\n elif policy_attr == \"identity_service_dns\":\n #*** Look up service in id_ip structure:\n ips = []\n if pkt_ip4:\n #*** turn the src and dst IPs into a list so can iterate:\n ips = [pkt_ip4.src, pkt_ip4.dst]\n if pkt_ip6:\n #*** turn the src and dst IPs into a list so can iterate:\n ips = [pkt_ip6.src, pkt_ip6.dst]\n if ctx in self.id_ip:\n ip_ctx = self.id_ip[ctx]\n for ip in ips:\n if ip in self.id_ip[ctx]:\n ip_ctx_ip = ip_ctx[ip]\n if 'service' in ip_ctx_ip:\n for service in ip_ctx_ip['service']:\n if service == policy_value:\n #*** Matched service but is it valid?:\n if self.valid_id_ip_service(ctx, ip,\n service):\n return True\n\n elif policy_attr == \"identity_service_dns_re\":\n #*** Look up service in id_ip structure:\n ips = []\n if pkt_ip4:\n #*** turn the src and dst IPs into a list so can iterate:\n ips = [pkt_ip4.src, pkt_ip4.dst]\n if pkt_ip6:\n #*** turn the src and dst IPs into a list so can iterate:\n ips = [pkt_ip6.src, pkt_ip6.dst]\n if ctx in self.id_ip:\n ip_ctx = self.id_ip[ctx]\n for ip in ips:\n if ip in self.id_ip[ctx]:\n ip_ctx_ip = ip_ctx[ip]\n if 'service' in ip_ctx_ip:\n for service in ip_ctx_ip['service']:\n if (re.match(policy_value, service)):\n #*** Matched service but is it valid?:\n if self.valid_id_ip_service(ctx, ip,\n service):\n return True\n\n else:\n self.logger.error(\"Policy attribute %s did not match\", policy_attr)\n return False\n\n def valid_id_ip_service(self, ctx, ip, service):\n \"\"\"\n Passed variables to look up a service in id_ip structure.\n Check that this service is valid (i.e. not stale)\n Return boolean\n \"\"\"\n _time = time.time()\n svc = self.id_ip[ctx][ip]['service'][service]\n if 'source' in svc:\n if svc['source'] == 'dns' or svc['source'] == 'dns_cname':\n last_seen = svc['last_seen']\n ttl = svc['ttl']\n if (last_seen + ttl) > _time:\n #*** TTL is current, so service is valid:\n return True\n return False\n\n def lldp_in(self, pkt, dpid, inport):\n \"\"\"\n Passed an lldp packet, a Data Path ID (dpid) and in port\n and update identity tables (if required) with this identity\n information\n \"\"\"\n _pkt_lldp = pkt.get_protocol(lldp.lldp)\n if (_pkt_lldp):\n _tlv_chassis_id = _pkt_lldp.tlvs[0]\n _chassis_id = _tlv_chassis_id.chassis_id\n _chassis_id_text = addrconv.mac.bin_to_text(_chassis_id)\n _tlv_system_name = _pkt_lldp.tlvs[3]\n _system_name = _tlv_system_name.tlv_info\n _table_ref = self._get_sys_ref_by_chassisid(_chassis_id_text)\n if _table_ref:\n #*** Update the last seen timestamp on the System table entry:\n self._sys_identity_table[_table_ref]['time_last'] = time.time()\n else:\n #*** Add a new record to the System table:\n self._set_sys_record_new_chassisid(_chassis_id_text,\n _system_name, pkt, dpid, inport)\n else:\n self.logger.warning(\"Passed an LLDP packet that did not parse \"\n \"properly\")\n self.logger.debug(\"Problem LLDP pkt=%s\", pkt)\n return(0)\n\n def dhcp_in(self, mac, ip, hostname, ctx):\n \"\"\"\n Passed a MAC address, IP address, DHCP host name\n (from option 12), and a context\n and add to relevant metadata\n \"\"\"\n #*** TBD - add lease time and ensure that request was properly acked\n\n #*** If ip is 0.0.0.0 then just return, as not useful info:\n if ip == '0.0.0.0':\n return\n #*** Add to the id_ip structure:\n self.logger.debug(\"Adding dhcp hostname=%s ip=%s mac=%s ctx=%s to \"\n \"id_ip structure\",\n hostname, ip, mac, ctx)\n #*** Make sure keys exist:\n self.id_ip.setdefault(ctx, {})\n if not ip in self.id_ip[ctx]:\n #*** IP not in table, add it:\n self.id_ip[ctx].setdefault(ip, {})\n #*** Ensure 'node' key exists:\n self.id_ip[ctx][ip].setdefault('node', {})\n #*** Default the hostname:\n self.id_ip[ctx][ip]['node'].setdefault(hostname, {})\n #*** Default the source key:\n self.id_ip[ctx][ip]['node'][hostname].setdefault('source', 'dhcp')\n\n def dns_reply_in(self, queries, answers, ctx):\n \"\"\"\n Passed a DNS parameters and a context\n and add to relevant metadata\n \"\"\"\n #*** TBD: Need to add security to this... Checks are\n #*** needed to ensure that the answer is a response\n #*** to a query, and that the relevant fields match\n #*** to ensure response is not spoofed.\n for qname in queries:\n self.logger.debug(\"dns_query=%s\", qname.name)\n for answer in answers:\n if answer.type == 1:\n #*** DNS A Record:\n answer_ip = socket.inet_ntoa(answer.rdata)\n answer_name = answer.name\n answer_ttl = answer.ttl\n self.logger.debug(\"dns_answer_name=%s dns_answer_A=%s \"\n \"answer_ttl=%s\",\n answer_name, answer_ip, answer_ttl)\n #*** Make sure context key exists:\n self.id_ip.setdefault(ctx, {})\n if not answer_ip in self.id_ip[ctx]:\n #*** IP not in table, add it:\n self.id_ip[ctx].setdefault(answer_ip, {})\n #*** Ensure 'service' key exists:\n self.id_ip[ctx][answer_ip].setdefault('service', {})\n #*** Check if know mapping to service:\n if not answer_name in self.id_ip[ctx][answer_ip]['service']:\n #*** Add service name to this IP:\n self.id_ip[ctx][answer_ip]['service'][answer_name] = {}\n #*** Update time last seen and set source attribution:\n svc = self.id_ip[ctx][answer_ip]['service'][answer_name]\n svc['last_seen'] = time.time()\n svc['ttl'] = answer_ttl\n svc['source'] = 'dns'\n #*** Check if service is a CNAME for another domain:\n #*** Make sure context key exists:\n self.id_service.setdefault(ctx, {})\n if answer_name in self.id_service[ctx]:\n #*** Add the original domain to the IP so that\n #*** rules can be written for services without\n #*** needing to understand CNAMES\n #*** Update the service that is the cname to ref this:\n svc['source'] = 'dns_cname'\n #*** Could be multiple original domains for the cname:\n odom_dict = self.id_service[ctx][answer_name]['domain']\n for odom_value in odom_dict:\n ipsvcodom = self.id_ip[ctx][answer_ip]['service'] \\\n .setdefault(odom_value, {})\n ipsvcodom['last_seen'] = time.time()\n ipsvcodom['ttl'] = answer.ttl\n ipsvcodom['source'] = 'dns'\n elif answer.type == 5:\n #*** DNS CNAME Record:\n answer_cname = answer.cname\n answer_name = answer.name\n self.logger.debug(\"dns_answer_name=%s dns_answer_CNAME=%s\",\n answer_name, answer_cname)\n svc_ctx = self.id_service.setdefault(ctx, {})\n svc_cname = svc_ctx.setdefault(answer_cname, {})\n svc_cname['type'] = 'dns_cname'\n svc_cname_dom = svc_cname.setdefault('domain', {})\n svc_cname_dom_a = svc_cname_dom.setdefault(answer.name, {})\n svc_cname_dom_a['last_seen'] = time.time()\n svc_cname_dom_a['ttl'] = answer.ttl\n else:\n #*** Not a type that we handle yet\n pass\n\n def arp_reply_in(self, arped_ip, arped_mac, ctx):\n \"\"\"\n Passed an IPv4 ARP reply MAC and IPv4 address and a context\n and add to relevant metadata\n \"\"\"\n #*** Make sure context key exists:\n self.id_mac.setdefault(ctx, {})\n if not arped_mac in self.id_mac[ctx]:\n #*** MAC not in table, add it:\n self.id_mac[ctx].setdefault(arped_mac, {})\n #*** Ensure 'ip' key exists:\n self.id_mac[ctx][arped_mac].setdefault('ip', {})\n #*** Check if know mapping to IPv4 addr:\n if not arped_ip in self.id_mac[ctx][arped_mac]['ip']:\n #*** Add IP to this MAC:\n self.id_mac[ctx][arped_mac]['ip'][arped_ip] = {}\n #*** Update time last seen and set source attribution:\n self.id_mac[ctx][arped_mac]['ip'][arped_ip]['last_seen'] = time.time()\n self.id_mac[ctx][arped_mac]['ip'][arped_ip]['source'] = 'arp'\n\n def ip4_in(self, pkt):\n \"\"\"\n Passed an IPv4 packet\n and update NIC identity table (if required) with the IPv4\n address if the MAC address matches an entry\n \"\"\"\n pkt_eth = pkt.get_protocol(ethernet.ethernet)\n pkt_ip4 = pkt.get_protocol(ipv4.ipv4)\n if pkt_ip4:\n #*** Get the NIC identity table reference for the source\n #*** MAC address (if it exists):\n _nic_table_ref = self._get_nic_ref_by_MAC(pkt_eth.src)\n if _nic_table_ref:\n #*** Write the IP address to this table row:\n self._set_nic_record_add_IP4_addr(_nic_table_ref, pkt_ip4.src)\n else:\n self.logger.warning(\"Passed an IPv4 packet that did not parse\"\n \"properly\")\n return(0)\n\n def get_identity_nic_table(self):\n \"\"\"\n Return the Identity NIC table\n \"\"\"\n return self._nic_identity_table\n\n def get_identity_system_table(self):\n \"\"\"\n Return the Identity System table\n \"\"\"\n return self._sys_identity_table\n\n def get_augmented_fm_table(self, _flows):\n \"\"\"\n Return the flow metadata table augmented with\n appropriate identity metadata\n \"\"\"\n _result_dict = {}\n for idx in _flows:\n flow = _flows[idx]\n if 'ip_A' in flow:\n ip = flow['ip_A']\n #self.logger.debug(\"checking ip_A=%s\", ip)\n for ctx in self.id_ip:\n ip_ctx = self.id_ip[ctx]\n if ip:\n if ip in ip_ctx:\n ip_ctx_ip = ip_ctx[ip]\n #*** Found IP in id_ip, add any metadata to flow:\n if 'service' in ip_ctx_ip:\n flow['ip_A_services'] = ip_ctx_ip['service']\n if 'ip_B' in flow:\n ip = flow['ip_B']\n #self.logger.debug(\"checking ip_B=%s\", ip)\n for ctx in self.id_ip:\n ip_ctx = self.id_ip[ctx]\n if ip:\n if ip in ip_ctx:\n ip_ctx_ip = ip_ctx[ip]\n #*** Found IP in id_ip, add any metadata to flow:\n if 'service' in ip_ctx_ip:\n flow['ip_B_services'] = ip_ctx_ip['service']\n #*** Accumulate updated flows into results dict\n _result_dict[idx] = flow\n return _result_dict\n\n def maintain_identity_tables(self):\n \"\"\"\n Deletes old entries from Identity NIC and\n System tables.\n This function is passed maximum age values\n and deletes any entries in the\n tables that have a time_last that is\n older than that when compared to\n current time\n \"\"\"\n _time = time.time()\n _for_deletion = []\n for _table_ref in self._nic_identity_table:\n if self._nic_identity_table[_table_ref]['time_last']:\n _last = self._nic_identity_table[_table_ref]['time_last']\n if (_time - _last > self.max_age_nic):\n self.logger.debug(\"Deleting NIC\"\n \" table ref id=%s\", _table_ref)\n #*** Can't delete while iterating dictionary so just note\n #*** the table ref:\n _for_deletion.append(_table_ref)\n #*** Now iterate over the list of references to delete:\n for _del_ref in _for_deletion:\n del self._nic_identity_table[_del_ref]\n #*** Now do same for system identity table:\n _for_deletion = []\n for _table_ref in self._sys_identity_table:\n if self._sys_identity_table[_table_ref]['time_last']:\n _last = self._sys_identity_table[_table_ref]['time_last']\n if (_time - _last > self.max_age_sys):\n self.logger.debug(\"Deleting \"\n \"System table ref id=%s\", _table_ref)\n #*** Can't delete while iterating dictionary so just note\n #*** the table ref:\n _for_deletion.append(_table_ref)\n #*** Now iterate over the list of references to delete:\n for _del_ref in _for_deletion:\n del self._sys_identity_table[_del_ref]\n\n #*** Maintain the id_mac structure:\n _for_deletion = []\n self.logger.debug(\"Maintaining the id_mac structure\")\n for ctx in self.id_mac:\n mac_ctx = self.id_mac[ctx]\n for mac in mac_ctx:\n mac_ctx_mac = mac_ctx[mac]\n for ip in mac_ctx_mac['ip']:\n mac_ctx_mac_ip = mac_ctx_mac['ip'][ip]\n last_seen = mac_ctx_mac_ip['last_seen']\n #*** Has the ARP not been seen for more than max age?:\n if (last_seen + self.arp_max) < _time:\n #*** Mark for deletion:\n del_dict = {'ctx': ctx, 'mac': mac, 'ip': ip}\n _for_deletion.append(del_dict)\n age = _time - last_seen\n self.logger.debug(\"marking ARP ip=%s mac=%s age=%s \"\n \"seconds for deletion\", ip, mac, age)\n #*** Now iterate over the list of references to delete:\n for _del_ref in _for_deletion:\n ctx = _del_ref['ctx']\n mac = _del_ref['mac']\n ip = _del_ref['ip']\n del self.id_mac[ctx][mac]['ip'][ip]\n #*** TBD: check if that was the only IP for that MAC and if so\n #*** delete the MAC:\n if self.id_mac[ctx][mac]['ip'] == {}:\n del self.id_mac[ctx][mac]['ip']\n if self.id_mac[ctx][mac] == {}:\n del self.id_mac[ctx][mac]\n\n #*** Maintain the id_ip structure:\n _for_deletion = []\n self.logger.debug(\"Maintaining the id_ip structure\")\n for ctx in self.id_ip:\n ip_ctx = self.id_ip[ctx]\n for ip in ip_ctx:\n ip_ctx_ip = ip_ctx[ip]\n if 'service' in ip_ctx_ip:\n for service in ip_ctx_ip['service']:\n ip_ctx_ip_svc = ip_ctx_ip['service'][service]\n self.logger.debug(\"service is %s\", service)\n if ip_ctx_ip_svc['source'] == 'dns' or \\\n ip_ctx_ip_svc['source'] == 'dns_cname':\n self.logger.debug(\"source is dns or dns_cname\")\n last_seen = ip_ctx_ip_svc['last_seen']\n ttl = ip_ctx_ip_svc['ttl']\n if (last_seen + ttl) < _time:\n #*** Mark for deletion:\n del_dict = {'ctx': ctx, 'ip': ip,\n 'service': service}\n _for_deletion.append(del_dict)\n self.logger.debug(\"marking IP del_dict=%s \"\n \"for deletion\", del_dict)\n #*** Now iterate over the list of references to delete:\n for _del_ref in _for_deletion:\n ctx = _del_ref['ctx']\n ip = _del_ref['ip']\n service = _del_ref['service']\n del self.id_ip[ctx][ip]['service'][service]\n #*** also delete the IP address if no other services or other keys\n #*** exist:\n if self.id_ip[ctx][ip]['service'] == {}:\n del self.id_ip[ctx][ip]['service']\n if self.id_ip[ctx][ip] == {}:\n self.logger.debug(\"struct=id_ip deleting ip=%s\", ip)\n del self.id_ip[ctx][ip]\n\n def _get_sys_ref_by_chassisid(self, chassis_id_text):\n \"\"\"\n Passed a Chassis ID in text format and check to\n see if it already exists in the system identity table.\n If it does, return the table reference otherwise\n return 0\n \"\"\"\n for table_ref in self._sys_identity_table:\n if (chassis_id_text == self._sys_identity_table[table_ref] \\\n ['chassis_id']):\n return(table_ref)\n return(0)\n\n def _get_sys_ref_by_systemname(self, policy_attr, systemname):\n \"\"\"\n Passed a system name in text format and check to\n see if it already exists in the system identity table.\n If it does, return the table reference otherwise\n return 0\n \"\"\"\n if policy_attr == 'identity_lldp_systemname':\n for table_ref in self._sys_identity_table:\n if (systemname == self._sys_identity_table[table_ref] \\\n ['system_name']):\n return(table_ref)\n return(0)\n elif policy_attr == 'identity_lldp_systemname_re':\n for table_ref in self._sys_identity_table:\n if (re.match(systemname, self._sys_identity_table[table_ref] \\\n ['system_name'])):\n return(table_ref)\n return(0)\n else:\n return(0)\n\n def _get_nic_ref_by_MAC(self, mac_addr):\n \"\"\"\n Check for a matching NIC record in NIC identity table.\n Passed a MAC address\n Check if the MAC address is recorded in the\n table and if so, return the table reference.\n \"\"\"\n for table_ref in self._nic_identity_table:\n if (mac_addr == self._nic_identity_table[table_ref]['mac_addr']):\n self.logger.debug(\"Matched on nic table_ref id=%s\", table_ref)\n return(table_ref)\n return(0)\n\n def _get_nic_MAC_addr(self, table_ref):\n \"\"\"\n Check for existance of an IPv4 address in NIC identity table\n record as per passed reference. If an IPv4 address is recorded\n return it otherwise return 0.\n \"\"\"\n result = self._nic_identity_table[table_ref]['mac_addr']\n return(result)\n\n def _get_nic_ip4_addr(self, table_ref):\n \"\"\"\n Check for existence of an IPv4 address in NIC identity table\n record as per passed reference. If an IPv4 address is recorded\n return it otherwise return 0.\n \"\"\"\n result = self._nic_identity_table[table_ref]['ip4_addr']\n return(result)\n\n def _get_sys_nic_ref(self, sys_ref):\n \"\"\"\n Return reference to a NIC table (if it exists) from a\n system identity table entry, otherwise return 0\n \"\"\"\n result = self._sys_identity_table[sys_ref]['nic_table_ref']\n return(result)\n\n def _set_sys_record_new_chassisid(self, chassis_id_text, system_name, pkt,\n dpid, inport):\n \"\"\"\n Record a new system identity into the system identity table.\n Passed an LLDP Chassis ID in text format, an LLDP system name,\n a packet, a Data Path ID (dpid)\n and in port and write a row describing this identity into the\n system identity table. Check the NIC identity table and update\n this too if required.\n \"\"\"\n eth = pkt.get_protocol(ethernet.ethernet)\n pkt_ip4 = pkt.get_protocol(ipv4.ipv4)\n #*** Check to see if a NIC identity table record exists\n #*** and if not create one:\n _nic_table_ref = self._get_nic_ref_by_MAC(eth.src)\n if not _nic_table_ref:\n _nic_table_ref = self._set_nic_record_new(pkt, dpid, inport)\n #*** Write a new row into the system identity table:\n self._sys_identity_table[self._sys_id_ref] = \\\n {\n 'chassis_id' : chassis_id_text,\n 'system_name' : system_name,\n 'nic_table_ref' : _nic_table_ref,\n 'time_first' : time.time(),\n 'time_last' : time.time()\n }\n #*** Update the NIC table ref with a reference back to the system\n #*** identity table:\n self._set_nic_record_add_sys_ref(_nic_table_ref, self._sys_id_ref)\n self.logger.debug(\"Adding new sys identity table entry: %s ref: %s\",\n self._sys_identity_table[self._sys_id_ref],\n self._sys_id_ref)\n #*** increment table ref:\n self._sys_id_ref += 1\n\n def _set_nic_record_new(self, pkt, dpid, inport):\n \"\"\"\n Create a new NIC identity record and return\n the table reference\n \"\"\"\n eth = pkt.get_protocol(ethernet.ethernet)\n pkt_ip4 = pkt.get_protocol(ipv4.ipv4)\n #*** add the source MAC address:\n self._nic_identity_table[self._nic_id_ref]['mac_addr'] = eth.src\n #*** add the source IP (if we have one):\n if (pkt_ip4):\n self._nic_identity_table[self._nic_id_ref]['ip4_addr'] = pkt_ip4.src\n #*** add details about the switch port:\n self._nic_identity_table[self._nic_id_ref]['dpid'] = dpid\n self._nic_identity_table[self._nic_id_ref]['inport'] = inport\n #*** add timestamps:\n self._nic_identity_table[self._nic_id_ref]['time_first'] = time.time()\n self._nic_identity_table[self._nic_id_ref]['time_last'] = time.time()\n #*** record table ref:\n table_ref = self._nic_id_ref\n self.logger.debug(\"Adding new NIC identity table entry: %s ref: %s\",\n self._nic_identity_table[table_ref], table_ref)\n #*** increment table ref:\n self._nic_id_ref += 1\n #*** return a reference to the table row:\n return(table_ref)\n\n def _set_nic_record_add_sys_ref(self, nic_ref, sys_ref):\n \"\"\"\n Update an existing NIC identity record with a sys identity\n table reference\n \"\"\"\n self._nic_identity_table[nic_ref]['sys_ref'] = sys_ref\n self.logger.debug(\"Adding sys_ref: %s to nic_ref: %s\",\n sys_ref, nic_ref)\n #*** Update timestamp:\n self._nic_identity_table[nic_ref]['time_last'] = time.time()\n\n def _set_nic_record_add_IP4_addr(self, nic_ref, ip4_addr):\n \"\"\"\n Update an existing NIC identity record with an IPv4\n address\n \"\"\"\n self._nic_identity_table[nic_ref]['ip4_addr'] = ip4_addr\n self.logger.debug(\"Adding ip4_addr: %s to nic_ref: %s\", ip4_addr, nic_ref)\n #*** Update timestamp:\n self._nic_identity_table[nic_ref]['time_last'] = time.time()\n\n","sub_path":"tc_identity.py","file_name":"tc_identity.py","file_ext":"py","file_size_in_byte":31866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"542893459","text":"import PySimpleGUI as sg\nimport Gameplay as gp\nimport pygame\nfrom pygame import mixer\n\n\ndef playerTurn():\n if g.currentPlayer.getName() != \"AI\":\n event, values = setupWin.read()\n else:\n event, values = setupWin.read(timeout =900)\n event = g.currentPlayer.checkMoves(g)\n setupWin.close()\n return event, values\n\npygame.init()\n\ng = gp.GamePlay()\ng.loading()\nmixer.music.load(\"background.wav\")\nmixer.music.play(-1)\n\ng.gameSetup()\nsetupWin = sg.Window(\"CHOMP\", g.updateBoard())\nevent, value = playerTurn()\n\nwhile event != (1, 1):\n g.loading()\n g.getPlay(event)\n setupWin = sg.Window(\"CHOMP\", g.updateBoard())\n event, value = playerTurn()\n\nd = g.playAgain()\n\nwhile d and event != (1,1):\n g.loading()\n g.gameSetup()\n setupWin = sg.Window(\"CHOMP\", g.updateBoard())\n event, value = playerTurn()\n\n while event != (1, 1):\n g.loading()\n g.getPlay(event)\n setupWin = sg.Window(\"CHOMP\", g.updateBoard())\n\n event, value = playerTurn()\n\n d = g.playAgain()\n\n\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"13480448","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport sys\nimport splitfolders\nimport os\nfrom os import path\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom data.const import IMG_SIZE, BATCH_SIZE, CLASSES, SYS_PATH, STUDENT_ANNOTATIONS, NEW_DATASET_ANNOTATIONS\nfrom data.randaugment import Rand_Augment\n\nfrom PIL import Image\nimport numpy as np\n\nsys.path.append(SYS_PATH)\n\ndef loadTeacherDatabase():\n # loadLabels() #TODO #FIX\n train = pd.read_csv(r'../misc/train.csv', dtype=str, index_col=[0])\n test = pd.read_csv(r'../misc/test.csv', dtype=str, index_col=[0])\n\n train_labeled = train[train.label != 0]\n train_unlabeled = train[train.label == 0]\n\n return createGenerators()\n\ndef loadStudentDatabase():\n # loadLabels() #TODO #FIX\n train = pd.read_csv(r'../misc/'+STUDENT_ANNOTATIONS, dtype=str, index_col=[0])\n test = pd.read_csv(r'../misc/test.csv', dtype=str, index_col=[0])\n\n return createGenerators()\n\n\ndef loadNewDatabase():\n # loadLabels() #TODO #FIX\n # train = pd.read_csv(r'../misc/train'+NEW_DATASET_ANNOTATIONS, dtype=str, index_col=[0])\n # test = pd.read_csv(r'../misc/test.csv', dtype=str, index_col=[0])\n #\n # train.drop(train.filter(regex=\"Unname\"), axis=1, inplace=True)\n return createGenerators()\n\ndef loadDatabaseUnlabeled():\n train = pd.read_csv(r'../misc/train.csv', dtype=str, index_col=[0])\n train_unlabeled = train[train.label == '0']\n return createTestGenerator(train_unlabeled, False, False)\n\ndef loadTESTDatabase(tt = r'../misc/test.csv'):\n test = pd.read_csv(tt, dtype=str, index_col=[0])\n\n return createTESTGenerators(test)\n\ndef loadTESTDatabase2(n=10):\n test = pd.read_csv(r'../misc/test.csv', dtype=str, index_col=[0], nrows=n)\n\n return createTESTGenerators(test)\n\ndef createTestGenerator(test: pd.DataFrame, shuffle=False, to_fit=False):\n generator = ImageDataGenerator(rescale=1.0 / 255)\n\n test_generator = generator.flow_from_dataframe(\n dataframe=test,\n x_col=\"path\",\n y_col=None,\n # y_col=\"label\",\n shuffle=shuffle,\n # class_mode=\"categorical\",\n class_mode=None,\n target_size=(IMG_SIZE, IMG_SIZE),\n batch_size=BATCH_SIZE,\n classes=CLASSES,\n to_fit=to_fit\n )\n return test_generator\n\n\nrandaugment = Rand_Augment()\ndef preprocessing_function(image):\n\n image = Image.fromarray(image.astype(np.uint8))\n image = np.array(randaugment(image))\n return image.astype(np.float64)\n\ndef createGenerators():\n\n # main_dir = '/media/kenny/Extra/downloads/1mil/train_by_columns_new'\n # output_dir = '/media/kenny/Extra/downloads/1mil/train_by_columns_new_output'\n\n main_dir = '/media/kenny/Extra/downloads/1mil/train_cat_cols2/1/1_1'\n output_dir = '/media/kenny/Extra/downloads/1mil/train_cat_cols2/1/1_1_output'\n splitfolders.ratio(main_dir, output=output_dir, seed=1337, ratio=(.8, .2))\n\n\n train_generator = ImageDataGenerator(\n rescale=1.0 / 255,\n rotation_range=5,\n width_shift_range=0.1,\n height_shift_range=0.1,\n brightness_range=(0.75, 1),\n shear_range=0.1,\n zoom_range=[0.75, 1],\n horizontal_flip=True,\n )\n validation_generator = ImageDataGenerator(rescale=1.0 / 255) # except for rescaling, no augmentations are needed for validation and testing generators\n\n train_generator = train_generator.flow_from_directory(\n os.path.join(output_dir,'train'),\n shuffle=True,\n class_mode=\"categorical\",\n target_size=(IMG_SIZE, IMG_SIZE),\n batch_size=BATCH_SIZE\n )\n\n validation_generator = validation_generator.flow_from_directory(\n os.path.join(output_dir,'val'),\n shuffle=True,\n class_mode=\"categorical\",\n target_size=(IMG_SIZE, IMG_SIZE),\n batch_size=BATCH_SIZE\n )\n\n return train_generator, validation_generator\n\n\ndef createTESTGenerators(test: pd.DataFrame):\n test_generator = ImageDataGenerator(rescale=1.0 / 255)\n\n test_generator = test_generator.flow_from_dataframe(\n dataframe=test,\n x_col=\"path\",\n # y_col=None,\n y_col=\"label\",\n shuffle=False,\n class_mode=\"categorical\",\n # class_mode=None,\n target_size=(IMG_SIZE, IMG_SIZE),\n batch_size=BATCH_SIZE,\n classes=CLASSES,\n to_fit=False\n )\n return test_generator\n\n\n\n\ndef visualizeAugmentations(data_generator: ImageDataGenerator, df: pd.DataFrame):\n \"\"\"Visualizes the keras augmentations with matplotlib in 3x3 grid. This function is part of create_generators() and\n can be accessed from there.\n\n Parameters\n ----------\n data_generator : Iterator\n The keras data generator of your training data.\n df : pd.DataFrame\n The Pandas DataFrame containing your training data.\n \"\"\"\n # super hacky way of creating a small dataframe with one image\n series = df.iloc[2]\n\n print(\"!!AA\")\n print(series)\n\n # for index, row in series.iterrows():\n # print(row['path'], row['label'], path.exists(row['path']))\n\n print(series['path'], series['label'], path.exists(series['path']))\n\n df_augmentation_visualization = pd.concat([series, series], axis=1).transpose()\n\n iterator_visualizations = data_generator.flow_from_dataframe( # type: ignore\n dataframe=df_augmentation_visualization,\n x_col=\"path\",\n y_col=\"label\",\n # class_mode=\"raw\",\n target_size=(IMG_SIZE, IMG_SIZE), # size of the image\n batch_size=1, # use only one image for visualization\n )\n\n for i in range(9):\n ax = plt.subplot(3, 3, i + 1) # create a 3x3 grid\n batch = next(iterator_visualizations) # get the next image of the generator (always the same image)\n img = batch[0] # type: ignore\n print('!!!! img:', img)\n print('!!!! img.shape:', img.shape)\n img = img[0, :, :, :] # remove one dimension for plotting without issues\n plt.imshow(img)\n plt.show()\n plt.close()\n\n\n# train, val, b5 = loadDatabase(True)","sub_path":"data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":6012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"91388127","text":"from __future__ import print_function\nimport sys\nimport os\nimport argparse\nimport random\n\nfrom data import VOCAnnotationTransform, VOCDetection, BaseTransform, VOC_ROOT, VOC_CLASSES, CUSTOM_CLASSES, MEANS\nfrom ssd import build_ssd\n\nfrom PIL import Image\nimport cv2\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nimport torch.utils.data as data\n\nparser = argparse.ArgumentParser(description='Single Shot MultiBox Detection')\nparser.add_argument('--trained-model', dest='trained_model', default='weights/ssd_300_VOC0712.pth', type=str, help='Trained state_dict file path to open')\nparser.add_argument('--visual-threshold', dest='visual_threshold', default=0.6, type=float, help='Final confidence threshold')\nparser.add_argument('--voc-root', dest='voc_root', default=VOC_ROOT, help='Location of VOC root directory')\n\nparser.add_argument('--cuda', dest='cuda', action='store_true', help='Use CUDA to train model (default)')\nparser.add_argument('--no-cuda', dest='cuda', action='store_false', help='Do not use CUDA to train model')\nparser.set_defaults(cuda=True)\n\nparser.add_argument('--custom-voc', dest='use_custom', action='store_true', help='Use a custom VOC-like dataset')\nparser.add_argument('--standard-voc', dest='use_custom', action='store_false', help='Use the standard VOC dataset (default)')\nparser.set_defaults(use_custom=False)\n\nargs = parser.parse_args()\n\n\nif torch.cuda.is_available():\n if args.cuda:\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n if not args.cuda:\n print(\"WARNING: It looks like you have a CUDA device, but aren't using \\\n CUDA. Run with --cuda for optimal eval speed.\")\n torch.set_default_tensor_type('torch.FloatTensor')\nelse:\n torch.set_default_tensor_type('torch.FloatTensor')\n\n\nif args.use_custom:\n from data import CUSTOM_CLASSES as labelmap\nelse:\n from data import VOC_CLASSES as labelmap\n\n\ndef test_random_img(net, cuda, testset, transform, thresh):\n i = random.randint(0, len(testset))\n img = testset.pull_image(i)\n height, width = img.shape[:2]\n img_id, annotation = testset.pull_anno(i)\n x = torch.from_numpy(transform(img)[0]).permute(2, 0, 1)\n\n img_copy = img.copy()\n\n print(f'GROUND TRUTH FOR: {img_id}')\n for box in annotation:\n print('label: '+' || '.join(str(b) for b in box[:4])+'\\n')\n\n cv2.rectangle(img_copy, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0), 3)\n\n if cuda:\n x = x.cuda()\n\n x = Variable(x.unsqueeze(0))\n y = net(x) # forward pass\n detections = y.data\n\n # scale each detection back up to the image\n scale = torch.Tensor([width, height, width, height])\n\n i = 1\n j = 0\n score = detections[0, i, j, 0]\n pt = (detections[0, i, j, 1:] * scale).cpu().numpy()\n coords = (pt[0], pt[1], pt[2], pt[3])\n\n cv2.rectangle(img_copy, (int(pt[0]), int(pt[1])), (int(pt[2]), int(pt[3])), (255, 0, 0), 3)\n print('score: ' + str(score) + ' ' + ' || '.join(str(c) for c in coords))\n\n cv2.imshow('output', img_copy)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\ndef test_voc():\n # load net\n num_classes = len(CUSTOM_CLASSES if args.use_custom else VOC_CLASSES) + 1 # +1 background\n net = build_ssd('test', 300, num_classes) # initialize SSD\n\n if args.cuda:\n net.load_state_dict(torch.load(args.trained_model, map_location=torch.device('cuda')))\n else:\n net.load_state_dict(torch.load(args.trained_model, map_location=torch.device('cpu')))\n\n net.eval()\n print('Finished loading model!')\n\n # load data\n if args.use_custom:\n custom_class_to_ind = dict(zip(CUSTOM_CLASSES, range(len(CUSTOM_CLASSES))))\n testset = VOCDetection(\n root=args.voc_root,\n image_sets=[('2019', 'test')],\n dataset_name='VOC2019',\n transform=BaseTransform(300, MEANS),\n target_transform=VOCAnnotationTransform(class_to_ind=custom_class_to_ind))\n else:\n testset = VOCDetection(\n root=args.voc_root,\n image_sets=[('2007', 'test')],\n dataset_name='VOC0712',\n transform=BaseTransform(300, MEANS),\n target_transform=VOCAnnotationTransform())\n\n if args.cuda:\n net = net.cuda()\n cudnn.benchmark = True\n\n # evaluation\n test_random_img(net, args.cuda, testset,\n BaseTransform(300, MEANS),\n thresh=args.visual_threshold)\n\nif __name__ == '__main__':\n test_voc()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"245278684","text":"\"\"\" utility for migrating github -> assembla \"\"\"\nimport argparse\nfrom datetime import datetime, timezone\nimport logging\nimport json\nimport string\nimport sys\nimport git\nimport pathlib\nfrom tabulate import tabulate\nimport requests\nimport time\nimport github\nimport re\nimport itertools\nimport colorama\nimport functools\n\n# Ensure colored output on win32 platforms\ncolorama.init()\n\n# Map Assembla field values to GitHub lables. The value 'None' indicates that\n# the field will be omitted.\nASSEMBLA_TO_GITHUB_LABELS = {\n 'status': {\n 'New': 'new',\n 'Accepted': 'accepted',\n 'Test': 'test',\n 'Invalid': 'invalid',\n 'Fixed': 'fixed',\n 'Duplicate': 'duplicate',\n 'WontFix': 'wontfix',\n 'WorksForMe': 'worksforme',\n },\n 'priority': {\n 'Highest (1)': 'p1',\n 'High (2)': 'p2',\n 'Normal (3)': 'p3',\n 'Low (4)': 'p4',\n 'Lowest (5)': 'p5',\n },\n 'tags': {\n 'osx': 'osx',\n 'linux': 'linux',\n 'docs': 'docs',\n 'windows': 'windows',\n 'git': 'git',\n 'qa': 'qa',\n },\n 'component': {\n },\n 'keywords': {\n }\n}\n\n# New GitHub labels to create. The value is the RGB hex color for that label.\n# For reference, GitHub comes with the following default labels:\n# 'bug': 'd73a4a',\n# 'documentation': '0075ca',\n# 'duplicate': 'cfd3d7',\n# 'enhancement': 'a2eeef',\n# 'good first issue': '7057ff',\n# 'help wanted': '008672',\n# 'invalid': 'e4e669',\n# 'question': 'd876e3',\n# 'wontfix': 'ffffff',\nNEW_GITHUB_LABELS = {\n 'accepted': '70aa23',\n 'worksforme': 'e6ed6d',\n}\n\n# User mapping from assembla to github\n# - login: Assembla user name. Used to match tickets \"assigned to\" fields\n# - name: Presented name. Used for wiki git commits and tickets (if no github id exists)\n# - email: Used for wiki git commits\n# - github: GitHub user name. Used for tickets as @mentions\nASSEMBLA_USERID = {\n}\n\n# Settings for Wiki conversions\nWIKI_FIXUP_AUTHOR_NAME = \"Wiki converter\"\nWIKI_FIXUP_AUTHOR_EMAIL = \"none@localhost\"\nWIKI_FIXUP_MESSAGE = \"Updated Wiki to GitHub formatting\"\nWIKI_UNKNOWN_EMAIL = \"none@localhost\"\n\n# URLs to replace when converting Wiki\nWIKI_URL_REPLACE = [\n ('https://www.assembla.com/spaces/portaudio/tickets/', '#'),\n ('https://app.assembla.com/spaces/portaudio/tickets/', '#'),\n ('https://app.assembla.com/spaces/portaudio/git/commits/', ''),\n]\n\nASSEMBLA_MILESTONES = []\nASSEMBLA_TICKETS = []\nASSEMBLA_TICKET_STATUSES = []\nASSEMBLA_TICKET_COMMENTS = []\nGITHUB_ISSUES = []\nGITHUB_USERS = []\nGITHUB_MILESTONES = []\n\n\nclass UnsetMeta(type):\n def __repr__(self):\n return \"\"\n\n\nclass Unset(metaclass=UnsetMeta):\n \"\"\" Unset class \"\"\"\n\n\n# Inheriting dict isn't recommended, but this is a small mixin so it is probably ok for this use\nclass DictPlus(dict):\n \"\"\" dict mixin class with extra convenience methods \"\"\"\n\n def find(self, table, id, default=Unset):\n if default is Unset:\n return self['_index'][table][id]\n return self['_index'][table].get(id, default)\n\n\ndef nameorid(user):\n \"\"\" Return the name or the id of the user \"\"\"\n return user.get('name', user.get('id'))\n\n\ndef githubuser(user):\n \"\"\" Return the github user if present, otherwise return the name or id \"\"\"\n if 'github' in user:\n return f\"@{user['github']}\"\n return user.get('name', user.get('id'))\n\n\ndef transpose(data, keys=None):\n \"\"\"\n Transpose the given dict.\n :param data: Dict indexed by id containing rows as values, where each row is\n a dictionary with columns as keys\n :param keys: List of keys to include in transpose\n :returns: Transposed dictionary. Dict indexed by keys/columns containing\n arrays of rows.\n \"\"\"\n if not data:\n return {}\n if not keys:\n keys = list(data[0])\n rawlist = [[v.get(k) for k in keys] for v in data]\n transposed = list(map(list, zip(*rawlist)))\n return {k: transposed[i] for i, k in enumerate(keys)}\n\n\ndef printtable(data, keys=None, exclude=None, include=None, filter=None, slice=None):\n \"\"\"\n Print the data formatted in tables.\n :param data: Dict or list containing rows.\n :param keys: List of keys to include in transpose\n :param exclude: List of keys to omit from output\n :param include: List of keys to include in output\n :param filter: Callback function fn(row) to filter rows to print\n :param slice: Pass a slice object to limit the number of lines\n \"\"\"\n if isinstance(data, dict):\n data = list(data.values())\n if filter:\n data = [v for v in data if filter(v)]\n if slice:\n data = data[slice]\n data = transpose(data, keys)\n if not exclude:\n exclude = []\n if not include:\n include = []\n for k in list(data.keys()):\n if k in include:\n continue\n if k in exclude or k.startswith('_'):\n del data[k]\n print(tabulate(data, headers=\"keys\"))\n\n\ndef mapjsonlinetoassembblaobject(jsonstring, fieldlist, linenum, linetype):\n \"\"\"\n converts json string -> dict\n :param jsonstring: string array \"['a', 123, ...]\"\n :param fieldlist: expected ordered list of fields expected in json array\n :param linenum: current line num\n :param linetype: for the error message report if needed. tells us the type of line we are trying to read\n :returns: a dict with the values from the jsonstring and the keys from the fieldlist\n \"\"\"\n logging.debug('attempting to parse line #{0} as a {1}'.format(linenum, linetype))\n arr = json.loads(jsonstring)\n if len(arr) != len(fieldlist):\n raise AssertionError('Assertion fail: {3} line [{0}] actual fields [{1}] != expected fields [{2}]'.format(linenum, len(arr), len(fieldlist), linetype))\n return {field: value for field, value in zip(fieldlist, arr)}\n\n\ndef findgithubobjectbyassemblaid(assemblaid, githubobjectcollection):\n \"\"\"\n :param assemblaid: the assembla id [#ID] assumed to be at the beginning of the title of the github object\n :param githubobjectcollection: the github objects to search\n :returns: return the first match or None\n \"\"\"\n return next(iter(filter(lambda x: x.title.startswith(assemblaid), githubobjectcollection)), None)\n\n\ndef filereadertoassemblaobjectgenerator(filereader, fieldmap):\n \"\"\"\n File reader to assembla object generator\n :param filereader: File object which is read line by line\n :returns: Generator which yields tuple (linenum, line, linetype, assemblaobject)\n \"\"\"\n\n # for each line determine the assembla object type, read all attributes to dict using the mappings\n # assign a key for each object which is used to link github <-> assembla objects to support updates\n for linenum, line in enumerate(filereader.readlines()):\n\n # Remove all non printable characters from the line\n _line = line.rstrip()\n line = ''.join(x for x in _line if x in string.printable)\n if line != _line:\n logging.debug(f\"line #{linenum}: Unprintable chars in '{line}'\")\n logging.debug(f\"line #{linenum}: {line}\")\n\n # Parse the field definition if present\n fields = line.split(':fields, ')\n if len(fields) > 2:\n logging.error(f\"line #{linenum}: Unexpected field count in '{line}'\")\n continue\n if len(fields) > 1:\n key = fields[0]\n fieldmap[key] = json.loads(fields[1])\n continue\n\n # Parse the table entry\n heading = line.split(', [')\n if len(heading) < 2:\n logging.error(f\"line #{linenum}: Unexpected syntax in '{line}'\")\n continue\n table = heading[0]\n if table not in fieldmap:\n logging.error(\"line #{linenum}: Table '{table}' not defined before '{line}'\")\n continue\n currentline = line.replace(table + ', ', '').strip()\n row = mapjsonlinetoassembblaobject(currentline, fieldmap[table], linenum, table)\n\n yield (linenum, line, table, row)\n\n\ndef indexassembladata(data, keymap):\n \"\"\"\n Convert each table in data dict from list of rows to dict indexed by key\n specified in keymap.\n :param data: Dict indexed by tablename containing list of rows\n :param keymap: A dict indexed by tablename containing the key field.\n :returns: Dict indexed by tablename containing a dict indexed by keys.\n \"\"\"\n\n # keymap[None] contains the default key field name\n default = keymap.get(None)\n\n index = {}\n for table, objects in data.items():\n\n # Get the key field name. If None, keep skip the table\n key = keymap.get(table, default)\n if key is None or table.startswith('_'):\n continue\n\n ids = [k[key] for k in objects]\n # if not ids: # Skip empty tables\n # continue\n if len(ids) != len(set(ids)):\n logging.warning(f\"Non unique id in table '{table}', {len(set(ids))} unique of {len(ids)} rows\")\n\n # Append the table data into a dict\n index[table] = {k[key]: k for k in objects}\n\n return index\n\n\ndef wikiparser(data):\n \"\"\"\n Parse the wiki tables\n :param data: assembla dataset\n :returns: A list of sorted wiki pages in presentation order\n \"\"\"\n\n # wiki_pages\n # ==========\n # change_comment, contents, created_at, id, page_name, parent_id, position, space_id, status,\n # updated_at, user_id, version, wiki_format\n wikitree = {}\n for v in data['wiki_pages']:\n\n # Add the reference to the parent and children\n # v['_parent'] = data.find('wiki_pages', v['parent_id'], None)\n v.setdefault('_children', [])\n\n # Add the reference to the user\n v['_user'] = data.find('_users', v['user_id'])\n\n # Convert dates\n v['_created_at'] = datetime.fromisoformat(v['created_at'])\n v['_updated_at'] = datetime.fromisoformat(v['updated_at'])\n\n # Append element to the wiki directory list\n parent = v['parent_id']\n wikitree.setdefault(parent, [])\n wikitree[parent].append(v)\n\n if parent:\n # Link parent to child list and increse the level on this row\n parentobj = data.find('wiki_pages', parent)\n parentobj['_children'] = wikitree[parent]\n v['_level'] = parentobj.get('_level', 0) + 1\n else:\n v['_level'] = 0\n\n # DEBUG\n # printtable(data['wiki_pages'], include=('_level', ))\n\n # wiki_page_blobs\n # ===============\n # blob_id, version_id\n\n # wiki_page_versions\n # ==================\n # change_comment, contents, created_at, id, updated_at, user_id, version, wiki_page_id\n for v in data['wiki_page_versions']:\n\n # Add reference to the blob\n # v['_blob_id'] = data.find('wiki_page_blobs', v['id']).get('blob_id')\n\n # Add reference to the wiki page object\n v['_wiki_page'] = data.find('wiki_pages', v['wiki_page_id'])\n\n # Add the user\n v['_user'] = data.find('_users', v['user_id'])\n\n # Convert dates\n v['_created_at'] = datetime.fromisoformat(v['created_at'])\n v['_updated_at'] = datetime.fromisoformat(v['updated_at'])\n\n # DEBUG\n # printtable(data['wiki_page_versions'], include=('_blob_id', ))\n\n def _wikitraverse(tree):\n \"\"\" Generator to produce all wiki pages in order from top to bottom \"\"\"\n for v in sorted(tree, key=lambda v: v['position']):\n yield v\n if '_children' in v:\n yield from _wikitraverse(v['_children'])\n\n return list(_wikitraverse(wikitree[None]))\n\n\ndef mergewikidata(wikidata, wiki_page_versions):\n \"\"\"\n Merge incoming wikidata with the main data dict\n :param wikidata: imported wiki page dataset from file fetched with wikidump\n :param wiki_page_versions: dict of all wiki page version which the data will be\n inserted into.\n \"\"\"\n\n # Data is arranged as [PAGE1,PAGE2,...] where PAGE is [VER1,VER2,...]\n # which itertools.chain() will flatten\n count = 0\n for v in itertools.chain(*wikidata):\n count += 1\n # Get the corresponding wiki page data from the dump\n w = wiki_page_versions.get(v['id'])\n if not w:\n logging.warning(f\"Skipping wiki page '{v['id']}'. Not found in main dump file\")\n continue\n\n # Ensure the data contains the same keys\n vkeys = set(v.keys())\n wkeys = set(w.keys())\n\n k = vkeys.difference(wkeys)\n if k:\n logging.warning(f\"Wiki page '{v['id']}' contains keys not in main dump file {k}\")\n k = wkeys.difference(vkeys)\n if k:\n logging.warning(f\"Wiki page '{v['id']}' missing keys {k}\")\n\n for k in v:\n if k in ('contents', ):\n continue\n left, right = v[k], w[k]\n if k in ('created_at', 'updated_at'):\n if left.endswith('Z'):\n left = left[:-1] + '+00:00'\n if left != right:\n logging.warning(f\"Difference in key '{k}' for '{v['id']}': '{left}' vs '{right}'\")\n\n # Get the page contents\n contents = v.get('contents')\n if not contents:\n logging.warning(f\"Wiki page '{v['id']}' missing 'contents'\")\n continue\n\n # Update the wiki page data\n w['contents'] = contents\n w['_merged'] = True\n\n # Print all pages that have missing data after load\n missing = [v['id'] for v in wiki_page_versions.values() if '_merged' not in v]\n if missing:\n logging.warning(f\"Missing wiki contents data for {missing}\")\n\n logging.info(f\" Found {count} wiki page entries\")\n\n\ndef wikicommitgenerator(wikiversions, order):\n \"\"\"\n A generator producing a dict of git commits data containing wiki edits\n \"\"\"\n\n # Collect all the latest current versions of the wiki pages\n pages = {}\n missing_authors = set()\n\n for v in sorted(wikiversions, key=lambda v: v['_updated_at']):\n p = v['_wiki_page']\n now = v['_updated_at']\n\n # Make ordered list of wiki pages that are present at this time\n indexpages = filter(lambda w: w['_created_at'] <= now and w['status'] == 1, order)\n\n fname = p['page_name'] + '.md'\n author = v['_user']\n\n # Warn if we don't have the data for the user\n if v['user_id'] not in missing_authors and (not author.get('name') or not author.get('email')):\n logging.warning(f\"Missing name or email for user '{v['user_id']}'\")\n missing_authors.add(v['user_id'])\n\n pages[fname] = v['contents'] or None\n\n yield {\n 'name': p['page_name'] + ':' + str(v['version']),\n 'files': {\n '_Sidebar.md': wikiindexproducer(indexpages),\n fname: v['contents'] or None,\n },\n 'author_name': nameorid(author),\n 'author_email': author.get('email', WIKI_UNKNOWN_EMAIL),\n 'message': v['change_comment'] or '',\n 'date': now,\n }\n\n # Convert the repo to GitHub format\n page_names = set(v['page_name'] for v in order)\n files = {}\n for k, v in pages.items():\n if not v:\n continue\n logging.debug(f\"Migrating page '{k}'\")\n contents = migratetexttomd(v, k, page_names)\n if contents == v:\n continue\n files[k] = contents\n\n if files:\n yield {\n 'name': 'ALL',\n 'files': files,\n 'author_name': WIKI_FIXUP_AUTHOR_NAME,\n 'author_email': WIKI_FIXUP_AUTHOR_EMAIL,\n 'message': WIKI_FIXUP_MESSAGE,\n 'date': datetime.now().replace(microsecond=0),\n }\n\n\ndef wikiindexproducer(index):\n \"\"\" Produce the index menu \"\"\"\n\n out = '''# PortAudio\n\n'''\n for v in index:\n out += (' ' * v['_level']) + f\"* [[{v['page_name']}]]\\n\"\n return out\n\n\ndef scrapeusers(data):\n \"\"\"\n Find all users reference in all tables\n \"\"\"\n\n # Copy the predefined user database\n users = {k: v.copy() for k, v in ASSEMBLA_USERID.items()}\n\n for table, entries in data.items():\n if table.startswith('_'):\n continue\n for v in entries:\n for t in ('user_id', 'created_by', 'updated_by', 'reporter_id', 'assigned_to_id'):\n if t in v:\n uid = v[t]\n if not uid:\n continue\n u = users.setdefault(uid, {})\n u.setdefault('id', uid)\n u.setdefault('tables', set())\n u['tables'].add(table)\n\n return users\n\n\ndef mergeuserdata(userdata, users):\n \"\"\"\n Merge incoming user data with the main data dict\n :param userdata: imported user data from file fetched with userdump\n :param users: dict of all users which the imported data will update\n \"\"\"\n\n count = 0\n for v in userdata:\n count += 1\n w = users.get(v['id'])\n if not w:\n logging.warning(f\"Skipping user '{v['id']}'. Not mentioned in main dump file\")\n continue\n\n # The redacted emails in file will interfere with preset emails. Its better to remove\n # it altogether\n if v.get('email') == 'name@domain':\n del v['email']\n\n w.update(v)\n w['_merged'] = True\n\n missing = [v['id'] for v in users.values() if '_merged' not in v]\n if missing:\n logging.warning(f\"Missing user data for {missing}\")\n\n logging.info(f\" Found {count} user entries\")\n\n\n# To find old lists. Variants:\n# # List\nRE_LIST = re.compile(r'^# (.*)$', re.M)\n\n# To find '** line'\nRE_LIST2 = re.compile(r'^\\*\\*([^\\*]*)$', re.M)\n\n# To find '** line'\nRE_LIST3 = re.compile(r'^([ \\t]*)\\*\\*([^\\*]*)$', re.M)\n\ndef sub_list2(m):\n print(f\"MATCH: xxx[{m[0]}]xxx\")\n return m[0]\n\n# To find old headers. Variants:\n# .h1 Title .h2 Title\nRE_HEADING = re.compile(r'^h(\\d). ', re.M)\n\n# To find !text!. Variants:\n# ! blocks\nRE_PRECODE = re.compile(r'
(.*?)
', re.M|re.S)\n\n# To find
 blocks\nRE_PRE = re.compile(r'
(.*?)
', re.M|re.S)\n\ndef sub_preformat(m):\n \"\"\" Substitute as preformatted text \"\"\"\n line = m[1].splitlines()\n pre = '\\n' if line[0] else ''\n post = '\\n' if m[1][-1] not in ('\\n\\r') else ''\n return f\"```{pre}{m[1]}{post}```\"\n\n# To find table headers (|_. col1 |_. col2 |_. ... |)\n# 1=pre indent, 2=headers excluding opening '|_.' and closing '|'\nRE_TABLEHEADER = re.compile(r'^([ \\t]+)?\\|_\\.(.*?)\\|\\s*$', re.M)\n\ndef sub_tableheader(m):\n \"\"\" Substitute table header format \"\"\"\n columns = m[2].split('|_.')\n return f'| {\" | \".join([c.strip() for c in columns])} |\\n|{\" --- |\" * len(columns)}'\n\n# Find whole table (indicated by lines of |something|)\nRE_TABLE = re.compile(r'(^[ \\t]*\\|.*\\|[ \\t]*$\\n)+', re.M)\n\ndef sub_tableaddheader(m):\n \"\"\" Ensure table has header \"\"\"\n if '| --- |' in m[0]:\n return m[0]\n lines = m[0].split('\\n')\n columns = len(lines[0].split('|')) - 2\n return f'|{\" |\"*columns}\\n|{\" --- |\"*columns}\\n{m[0]}'\n\n# To find [[links]]. Variants:\n# [[Wiki]] [[Wiki|Printed name]] [[url:someurl]] [[url:someurl|Printed name]]\n# 1=pre indent, 2=prefix:, 3=first group, 4=| second group, 5=second group\nRE_LINK = re.compile(r'(^[ \\t]+)?\\[\\[(\\w+?:)?(.+?)(\\|(.+?))?\\]\\]', re.M)\n\ndef sub_link(m, page_names, ref):\n \"\"\" Subsitute [[link]] blocks with MD links \"\"\"\n m3 = m[3]\n if not m[2]:\n # Is a wiki link (no prefix:)\n # Special fixups\n if m3 == 'tips/index':\n m3 = 'Tips'\n if m3 == 'platforms/index':\n m3 = 'Platforms'\n if m3 not in page_names:\n logging.warning(f\"{ref}: Wiki links to unknown page '{m3.strip()}'\")\n if not m[5]:\n # Bare wiki link\n return f\"[[{m3}]]\"\n # Wiki link with name\n return f\"[[{m[5].strip()}|{m3}]]\"\n if m[2] == 'url:':\n # Plain link\n if not m[5]:\n return m3\n # Assembla git reference\n #url = 'https://app.assembla.com/spaces/portaudio/git/commits/'\n #if m3.startswith(url):\n # m3 = m3.replace(url, '')\n # return m3\n # Link with name\n return f\"[{m[5].strip()}]({m3})\"\n if m[2] in ('http:', 'https:'):\n return f\"[{m[5].strip()}]({m[2]}{m3})\"\n # Fallthrough\n logging.warning(f\"{ref}: Unknown wiki link '{m[2]}'\")\n return f\"[[{m[2] or ''}{m3 or ''}{m[4] or ''}]]\"\n\n# To find URLs\nRE_URL = re.compile(r'\\b(http|https)://([\\w\\.]+)([\\w\\./]*)')\n\n\ndef migratetexttomd(text, ref, page_names):\n if not text:\n return text\n\n # Convert to unix line endings\n text = \"\\n\".join(text.splitlines())\n\n # Replace # lines with bullet points\n text = RE_LIST.sub(lambda m: '* ' + m[1], text)\n\n # Replace '** line' with ' * line'\n text = RE_LIST2.sub(lambda m: ' * ' + m[1], text)\n\n # Replace ' ** line' with ' * line'\n text = RE_LIST3.sub(lambda m: m[1] + '* ' + m[2], text)\n\n # Replacing .h1 .h2 headers\n text = RE_HEADING.sub(lambda m: '#' * int(m[1]) + ' ', text)\n\n # Replacing !image!\n text = RE_IMAGE.sub(lambda m: f'![{m[2].split(\"/\")[-1]}]({m[2]})', text)\n\n # Replacing @quote@\n text = RE_QUOTE.sub(lambda m: f'`{m[1]}`', text)\n\n # Replacing
text
\n text = RE_PRECODE.sub(sub_preformat, text)\n\n # Replacing
text
\n text = RE_PRE.sub(sub_preformat, text)\n\n # Replacing
\n text = text.replace('
', '---')\n\n # Replace table headers\n text = RE_TABLEHEADER.sub(sub_tableheader, text)\n\n # Ensure tables have table headers\n text = RE_TABLE.sub(sub_tableaddheader, text)\n\n # Replacing [[links]]\n text = RE_LINK.sub(functools.partial(sub_link, ref=ref, page_names=page_names), text)\n\n # Replace URLs in list\n for a, b in WIKI_URL_REPLACE:\n text = text.replace(a, b)\n\n # Inform about remaining assembla links\n for m in RE_URL.finditer(text):\n if 'assembla' not in m[2]:\n continue\n logging.warning(f\"{ref}: Link to Assembla: '{m[0]}'\")\n\n return text\n\n\ndef check_config(auth, parser, required):\n\n # Ensure we have auth data and the fields needed\n if not auth:\n parser.error(\"Authentication config --auth is required\")\n missing = [\n k for k in required\n if k not in auth or not auth[k] or (auth[k].startswith('**') and auth[k].endswith('**'))\n ]\n if missing:\n parser.error(f\"Missing auth fields: {' '.join(missing)}\")\n\n\nclass ColorFormatter(logging.Formatter):\n \"\"\" Logger for formatting colored console output \"\"\"\n def format(self, record):\n # Replace the original format with one customized by logging level\n self._style._fmt = {\n logging.ERROR: f'{colorama.Fore.RED}%(levelname)s:{colorama.Style.RESET_ALL} %(msg)s',\n logging.WARNING: f'{colorama.Fore.YELLOW}%(levelname)s:{colorama.Style.RESET_ALL} %(msg)s',\n }.get(record.levelno, '%(levelname)s: %(msg)s')\n return super().format(record)\n\n\n# -----------------------------------------------------------------------------\n# MAIN\n#\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--verbose', '-v', action=\"count\", default=0, help='verbose logging')\n parser.add_argument('--dumpfile', '-f', metavar=\"FILE\", required=True, help='assembla dumpfile')\n parser.add_argument('--wikidump', '-w', metavar=\"FILE\", help=\"wiki dumpfile\")\n parser.add_argument('--userdump', '-u', metavar=\"FILE\", help=\"user dumpfile\")\n parser.add_argument('--auth', '-a', help='Authentication config')\n subparser = parser.add_subparsers(dest=\"command\", required=True, title=\"command\", help=\"Command to execute\")\n\n subcmd = subparser.add_parser('dump', help=\"Dump assembla tables\")\n subcmd.add_argument('table', nargs='?', help=\"Table to dump\")\n subcmd.add_argument('--headers', action=\"store_true\", help=\"Dump header fields\")\n subcmd.add_argument('--include', '-i', action=\"append\", help=\"Fields to include\")\n subcmd.add_argument('--exclude', '-x', action=\"append\", help=\"Fields to exclude\")\n subcmd.add_argument('--limit', '-l', type=int, help=\"Limit the number of lines\")\n subcmd.set_defaults(func=cmd_dump)\n\n subcmd = subparser.add_parser('lsusers', help=\"List users\")\n subcmd.add_argument('--table', '-t', action=\"append\", help=\"Show only users from this table\")\n subcmd.set_defaults(func=cmd_lsusers)\n\n subcmd = subparser.add_parser('lswiki', help=\"List wiki pages\")\n subcmd.add_argument('--changes', action=\"store_true\", help=\"Show page changes\")\n subcmd.set_defaults(func=cmd_lswiki)\n\n subcmd = subparser.add_parser('userscrape', help=\"Scrape users from Assembla\")\n subcmd.add_argument('dump', help=\"Output file to store users scrape\")\n subcmd.set_defaults(func=cmd_userscrape)\n\n subcmd = subparser.add_parser('wikiconvert', help=\"Convert to GitHub wiki repo\")\n subcmd.add_argument('repo', help='cloned git wiki repo directory')\n subcmd.add_argument('--dry-run', '-n', action=\"store_true\", help=\"Do not commit any data\")\n subcmd.add_argument('--no-convert', action=\"store_true\", help=\"Do not commit conversion change\")\n subcmd.set_defaults(func=cmd_wikiconvert)\n\n subcmd = subparser.add_parser('wikiscrape', help=\"Scrape wiki from Assembla\")\n subcmd.add_argument('dump', help=\"Output file to store wiki scrape\")\n subcmd.set_defaults(func=cmd_wikiscrape)\n\n runoptions = parser.parse_args()\n\n # log to stdout\n logging_level = logging.DEBUG if runoptions.verbose > 1 else logging.INFO\n root = logging.getLogger()\n root.setLevel(logging_level)\n channel = logging.StreamHandler(sys.stdout)\n channel.setLevel(logging_level)\n # channel.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\n channel.setFormatter(ColorFormatter())\n root.addHandler(channel)\n\n # -------------------------------------------------------------------------\n # Read auth file\n\n auth = {}\n if runoptions.auth:\n logging.info(f\"Reading authentication data from '{runoptions.auth}'\")\n with open(runoptions.auth, 'r') as f:\n auth = json.load(f)\n\n # -------------------------------------------------------------------------\n # Read the dump file\n\n logging.info(f\"Parsing dumpfile '{runoptions.dumpfile}'\")\n with open(runoptions.dumpfile, encoding='utf8') as filereader:\n data = DictPlus()\n tablefields = {}\n\n # for each line determine the assembla object type, read all attributes to dict using the mappings\n # assign a key for each object which is used to link github <-> assembla objects to support updates\n for linenum, line, table, row in filereadertoassemblaobjectgenerator(filereader, tablefields):\n\n # Collect the file data\n data.setdefault(table, [])\n data.get(table).append(row)\n\n logging.info(f\" Parsed {linenum} lines\")\n\n # -------------------------------------------------------------------------\n # Index the data\n\n logging.info(\"Indexing the data\")\n\n # Store the fields for the tables\n data['_fields'] = tablefields\n\n # Convert table list to dicts indexed by key using keymap\n data['_index'] = indexassembladata(data, {\n\n # None key specified index key for all unlisted tables.\n # None: 'id',\n\n # Tables to index\n 'wiki_pages': 'id',\n 'milestones': 'id',\n 'ticket_statuses': 'id',\n 'workflow_property_defs': 'id',\n 'wiki_page_versions': 'id',\n })\n\n # -------------------------------------------------------------------------\n # Read the wiki dump data\n\n if runoptions.wikidump:\n\n logging.info(f\"Parsing wiki dumpfile '{runoptions.wikidump}'\")\n\n with open(runoptions.wikidump, encoding='utf8') as filereader:\n wikidata = json.load(filereader)\n\n # Merge the file data with the main assembla database\n mergewikidata(wikidata, data['_index']['wiki_page_versions'])\n\n # -------------------------------------------------------------------------\n # UserID scrape\n\n logging.info(\"Scraping for user IDs\")\n\n users = scrapeusers(data)\n data[\"_index\"][\"_users\"] = users\n data[\"_users\"] = list(users.values())\n\n # -------------------------------------------------------------------------\n # Read the user dump data\n\n if runoptions.userdump:\n\n logging.info(f\"Parsing user dumpfile '{runoptions.userdump}'\")\n\n with open(runoptions.userdump, encoding='utf8') as filereader:\n userdata = json.load(filereader)\n\n # Merge the file data with the main assembla database\n mergeuserdata(userdata, data['_index']['_users'])\n\n # -------------------------------------------------------------------------\n # Run the command\n\n # Set the verbosity\n logging_level = logging.DEBUG if runoptions.verbose else logging.INFO\n root.setLevel(logging_level)\n channel.setLevel(logging_level)\n\n logging.info(f\"Executing command '{runoptions.command}'\")\n runoptions.func(parser, runoptions, auth, data)\n\n\n# -----------------------------------------------------------------------------\n# Dump table command\ndef cmd_dump(parser, runoptions, auth, data):\n\n if not runoptions.table:\n\n tables = sorted(data.keys())\n if runoptions.headers:\n print(\"Assembla table fields:\")\n headers = [\n {\n 'table': t,\n 'fields': sorted(data['_fields'].get(t, [])),\n }\n for t in tables\n ]\n printtable(headers)\n return\n\n print(\"Assembla tables:\")\n printtable([{'table': t} for t in tables])\n return\n\n table = data.get(runoptions.table)\n if not table:\n parser.error(f\"No such table: '{runoptions.table}'\")\n\n srange = None\n if runoptions.limit:\n srange = slice(0, runoptions.limit)\n\n print(f\"Table '{runoptions.table}':\")\n printtable(table, include=runoptions.include, exclude=runoptions.exclude, slice=srange)\n\n\n# -----------------------------------------------------------------------------\n# Print users\ndef cmd_lsusers(parser, runoptions, auth, data):\n users = data[\"_index\"][\"_users\"]\n tables = set(runoptions.table or [])\n if runoptions.table:\n logging.info(f\"Showing users present in tables: {' '.join(tables)}\")\n users = list(filter(lambda v: any(v['tables'].intersection(tables)), users.values()))\n\n printtable(users, exclude=('tables', ))\n\n\n# -----------------------------------------------------------------------------\n# User scrape from Assembla\ndef cmd_userscrape(parser, runoptions, auth, data):\n\n # Check for required auth fields\n check_config(auth, parser, ('assembla_key', 'assembla_secret'))\n\n headers = {\n 'X-Api-Key': auth['assembla_key'],\n 'X-Api-Secret': auth['assembla_secret'],\n }\n\n # Fetch all user info\n out = []\n for v in data[\"_index\"][\"_users\"].values():\n\n # Brute force to ensure to not hit any rate limits\n time.sleep(0.1)\n\n logging.info(f\"Fetching user '{v['id']}'\")\n\n req = requests.get(\n f\"https://api.assembla.com/v1/users/{v['id']}.json\",\n headers=headers,\n )\n if req.status_code != 200:\n logging.error(f\" Failed to fetch: Error code {req.status_code}\")\n continue\n jsdata = req.json()\n\n out.append(jsdata)\n\n # Save the entries to disk\n with open(runoptions.dump, 'w') as f:\n json.dump(out, f)\n\n\n# -----------------------------------------------------------------------------\n# List wiki pages\ndef cmd_lswiki(parser, runoptions, auth, data):\n\n # Parse the wiki entries (making rich additions to objects in data) and\n # return the order of wiki pages\n wikiorder = wikiparser(data)\n\n if not runoptions.changes:\n printtable(wikiorder, exclude=('space_id', 'contents'))\n else:\n printtable(data['wiki_page_versions'], exclude=('contents',))\n\n\n# -----------------------------------------------------------------------------\n# WIKI scrape from Assembla\ndef cmd_wikiscrape(parser, runoptions, auth, data):\n\n # Check for required auth fields\n check_config(auth, parser, ('assembla_key', 'assembla_secret'))\n\n headers = {\n 'X-Api-Key': auth['assembla_key'],\n 'X-Api-Secret': auth['assembla_secret'],\n }\n\n # Parse the wiki entries (making rich additions to objects in data) and\n # return the order of wiki pages\n wikiorder = wikiparser(data)\n\n # Fetch all wiki pages\n out = []\n for v in wikiorder:\n\n # Brute force to ensure to not hit any rate limits\n time.sleep(0.1)\n\n logging.info(f\"Fetching wiki page '{v['page_name']}'\")\n\n req = requests.get(\n f\"https://api.assembla.com/v1/spaces/{v['space_id']}/wiki_pages/{v['id']}/versions.json?per_page=40\",\n headers=headers,\n )\n if req.status_code != 200:\n logging.error(f\" Failed to fetch: Error code {req.status_code}\")\n continue\n jsdata = req.json()\n\n out.append(jsdata)\n\n # Save the entries to disk\n logging.info(f\"Saving wiki scrape data in '{runoptions.dump}'\")\n with open(runoptions.dump, 'w') as f:\n json.dump(out, f)\n\n\n# -----------------------------------------------------------------------------\n# WIKI conversion\ndef cmd_wikiconvert(parser, runoptions, auth, data):\n\n live = not runoptions.dry_run\n\n # Check arguments\n wikirepo = pathlib.Path(runoptions.repo)\n if not wikirepo.is_dir():\n parser.error(f\"{str(wikirepo)}: Not a directory\")\n\n # Open git repo\n repo = git.Repo(wikirepo)\n workdir = pathlib.Path(repo.working_tree_dir)\n\n # Parse the wiki entries (making rich additions to objects in data) and\n # return the order of wiki pages\n wikiorder = wikiparser(data)\n\n # DEBUG\n # printtable(wikiorder, include=('_level', ))\n\n # Iterate over each wiki page version in order from old to new and get\n # the data required for git commit\n for commit in wikicommitgenerator(data['wiki_page_versions'], wikiorder):\n\n logging.debug(f\"Converting page '{commit['name']}'\")\n\n files = []\n for name, contents in commit['files'].items():\n if not contents:\n logging.warning(f\"Missing page data for {commit['name']}\")\n continue\n fname = pathlib.Path(workdir, name)\n fname.write_bytes(contents.encode())\n files.append(str(fname))\n\n # Add the files\n repo.index.add(files)\n\n actor = git.Actor(commit['author_name'], commit['author_email'])\n date = commit['date'].astimezone(timezone.utc).replace(tzinfo=None).isoformat()\n\n # Skip commit of convert if --no-convert is used\n if commit['name'] == 'ALL' and runoptions.no_convert:\n continue\n\n if live:\n repo.index.commit(\n commit['message'],\n author=actor,\n author_date=date,\n committer=actor,\n commit_date=date,\n )\n\n\n# -----------------------------------------------------------------------------\n# Tickets conversion\ndef cmd_tickets(parser, runoptions, auth, data):\n\n # Check for required auth fields\n check_config(auth, parser, ('username', 'password'))\n\n # Parse the dump file data\n for milestone in data['milestones']:\n milestone['githubtitle'] = '[#{0}] - {1}'.format(milestone['id'], milestone['title'])\n milestone['assemblakey'] = '[#{0}]'.format(milestone['id'])\n ASSEMBLA_MILESTONES.append(milestone)\n\n for ticket in data['tickets']:\n ticket['githubtitle'] = '[#{0}] - {1}'.format(ticket['number'], ticket['summary'])\n ticket['assemblakey'] = '[#{0}]'.format(ticket['number'])\n ASSEMBLA_TICKETS.append(ticket)\n\n for ticketstatus in data['ticket_status']:\n ticketstatus['githubtitle'] = '[#{0}] - {1}'.format(ticketstatus['id'], ticketstatus['name'])\n ticketstatus['assemblakey'] = '[#{0}]'.format(ticketstatus['id'])\n ASSEMBLA_TICKET_STATUSES.append(ticketstatus)\n\n for ticketcomment in data['ticket_comments']:\n ticketcomment['assemblakey'] = '[#{0}]'.format(ticketcomment['id'])\n ticketcomment['createdate'] = datetime.fromisoformat(ticketcomment['created_on']).strftime('%Y-%m-%d %H:%M')\n ASSEMBLA_TICKET_COMMENTS.append(ticketcomment)\n\n # establish github connection\n ghub = github.Github(auth['username'], auth['password'])\n\n repo = ghub.get_repo(runoptions.repo)\n GITHUB_ISSUES = [x for x in repo.get_issues()]\n GITHUB_MILESTONES = [x for x in repo.get_milestones()]\n GITHUB_USERS = [x for x in repo.get_collaborators()]\n\n logging.info('Refreshing milestones->milestones...')\n for assemblamilestone in ASSEMBLA_MILESTONES:\n githubmilestone = findgithubobjectbyassemblaid(assemblamilestone['assemblakey'], GITHUB_MILESTONES)\n if not githubmilestone:\n logging.info('creating milestone: [{0}]'.format(assemblamilestone['githubtitle']))\n githubmilestone = repo.create_milestone(assemblamilestone['githubtitle'])\n else:\n logging.info('found existing milestone [{0}]'.format(assemblamilestone['githubtitle']))\n githubmilestone.edit(assemblamilestone['githubtitle'], description=assemblamilestone['description'])\n GITHUB_MILESTONES = repo.get_milestones()\n\n logging.info('Refreshing tickets->issues...')\n for assemblaticket in ASSEMBLA_TICKETS:\n assemblakey = assemblaticket['assemblakey']\n logging.info('Working on assembla ticket #{0}'.format(assemblakey))\n githubissue = findgithubobjectbyassemblaid(assemblakey, GITHUB_ISSUES)\n\n # create or find github issue using assembla key\n if not githubissue:\n logging.debug('Creating new issue: [{0}]'.format(assemblakey))\n githubissue = repo.create_issue(assemblaticket['githubtitle'], body=(assemblaticket['description'] or '(no description)'))\n else:\n logging.debug('Found existing issue: [{0}]'.format(assemblaticket['githubtitle']))\n\n logging.debug('Attempting to locate the milestone for assembla ticket #{0}'.format(assemblakey))\n assemblamilestone = next(iter(filter(lambda x: x['id'] == assemblaticket['milestone_id'], ASSEMBLA_MILESTONES)), None)\n\n # create or find github milestone using assembla key\n if assemblamilestone:\n logging.debug('Found assembla milestone for assembla ticket #{0}. Finding associated milestone.'.format(assemblakey))\n githubmilestone = findgithubobjectbyassemblaid(assemblamilestone['assemblakey'], GITHUB_MILESTONES) or github.GithubObject.NotSet\n\n logging.debug('Attempting to locate ticket status for assembla ticket #{0}'.format(assemblakey))\n assemblaticketstatus = next(iter(filter(lambda x: x['id'] == assemblaticket['ticket_status_id'], ASSEMBLA_TICKET_STATUSES)))\n githubissuestatus = ASSEMBLA_TICKET_STATUS_TO_GITHUB_ISSUE_STATUS.get(assemblaticketstatus['name'], 'open')\n\n logging.debug('Attempting to locate assigned user for assembla ticket #{0}'.format(assemblakey))\n githubuserid = ASSEMBLA_USERID_TO_GITHUB_USERID.get(assemblaticket['assigned_to_id'], None)\n githubuser = next(iter(filter(lambda x: x.login == githubuserid, GITHUB_USERS)), github.GithubObject.NotSet)\n\n logging.debug('Updating github issue for ticket #{0}'.format(assemblakey))\n assemblaticket['description'] = assemblaticket['description'] or '(no description)'\n githubissue.edit(assemblaticket['githubtitle'], body=assemblaticket['description'], milestone=githubmilestone, state=githubissuestatus, assignee=githubuser)\n\n # assembla ticket comments -> github issue comments\n logging.debug('Rebuilding issue comments for issue #{0}'.format(assemblaticket['assemblakey']))\n assemblaticketcomments = filter(lambda x: x['ticket_id'] == assemblaticket['id'], ASSEMBLA_TICKET_COMMENTS)\n\n # wipe out all the github issue comments and rebuild every time.\n # probably a better way but the github api has limited support for comment modification.\n for githubissuecomment in githubissue.get_comments():\n githubissuecomment.delete()\n for assemblaticketcomment in assemblaticketcomments:\n if assemblaticketcomment['comment']:\n githubissue.create_comment('({}) - {}'.format(assemblaticketcomment['createdate'], assemblaticketcomment['comment']))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"assembla2github.py","file_name":"assembla2github.py","file_ext":"py","file_size_in_byte":40857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"249425669","text":"#!python3\n\nfrom stats import text_in_dir_stats\nimport xlsxwriter\n\n\n# Root directory with all texts\nroot_dir='texts'\n\n# Check Zipf's law\nprint('Working on Zipf\\'s law...')\ndictionary = text_in_dir_stats(root_dir)\nsorted_dict = sorted(dictionary.items(), key=lambda x: (x[1],x[0]), reverse=True)\nworkbook = xlsxwriter.Workbook('Zipf.xlsx')\nworksheet = workbook.add_worksheet()\nworksheet.write(0, 0, 'Rank')\nworksheet.write(0, 1, 'Count')\nworksheet.write(0, 2, 'Word')\nrow = 1\ntotal_words = 0\nfor [key, value] in sorted_dict:\n\tworksheet.write(row, 0, row)\n\tworksheet.write(row, 1, value)\n\tworksheet.write(row, 2, key)\n\ttotal_words += value\n\trow += 1\nworkbook.close()\nprint('Done! Total words count: ' + str(total_words) + '\\n')\n\n# Check Heaps' law\nprint('Working on Heaps\\' law...')\nfiles = 500 # eat 500 files\nworkbook = xlsxwriter.Workbook('Heaps.xlsx')\nworksheet = workbook.add_worksheet()\nworksheet.write(0, 0, 'Texts analyzed')\nworksheet.write(0, 1, 'Unique Word Count')\nworksheet.write(0, 2, 'Total words in texts')\nfor max_files in range (1, files):\n\tdictionary = text_in_dir_stats(root_dir, max_files)\n\tworksheet.write(max_files, 0, max_files)\n\tworksheet.write(max_files, 1, len(dictionary))\n\tworksheet.write(max_files, 2, sum(dictionary.values()))\n\tprint('Progress: ' + str(round(100 * max_files / files)) + '%', end='\\r')\nworkbook.close()\nprint('\\nDone!')","sub_path":"Tugas2 Heap Lap Zip Law/tempCodeRunnerFile.py","file_name":"tempCodeRunnerFile.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"493778346","text":"# importing the required libraries\r\nfrom __future__ import absolute_import, division, print_function, unicode_literals\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler\r\nfrom tensorflow import keras\r\nimport tensorflow as tf\r\nimport keras\r\nfrom keras.models import Model, Sequential\r\nfrom keras.layers import Dense, Flatten, Dropout\r\n# from tensorflow.keras import datasets, layers, models\r\nfrom sklearn.compose import ColumnTransformer\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom keras.wrappers.scikit_learn import KerasClassifier\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\n# Importing dataset\r\ndataset = pd.read_csv(\r\n r\"C:\\Users\\ChampWk38\\Desktop\\Deep_Learning_A_Z\\Volume 1 - Supervised Deep Learning\\Part 1 - Artificial Neural Networks (ANN)\\Section 4 - Building an ANN\\Artificial_Neural_Networks\\Churn_Modelling.csv\")\r\nX = dataset.iloc[:, 3: 13].values\r\ny = dataset.iloc[:, 13].values\r\n\r\n# Encoding the categorical data\r\nlabelencoder_X_1 = LabelEncoder()\r\nX[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])\r\n\r\nlabelencoder_X_2 = LabelEncoder()\r\nX[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])\r\nonehotencoder = ColumnTransformer(transformers=[('Test', OneHotEncoder(), [1])], remainder='passthrough')\r\n# onehotencoder = OneHotEncoder(categories=\"auto\")\r\nX = onehotencoder.fit_transform(X)\r\n# print(X[0])\r\nX = X[:, 1:]\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\r\n\r\n# Feature scaling\r\nsc = StandardScaler()\r\nX_train = sc.fit_transform(X_train)\r\nX_test = sc.fit_transform(X_test)\r\n\r\n\r\n#\r\n# # fitting the ANN to the training\r\n# classifier.fit(X_train, y_train, batch_size=10, epochs=100, verbose=2)\r\n#\r\n# # making the predictions and evaluation\r\n# y_pred = classifier.predict(X_test)\r\n#\r\n# y_pred = (y_pred > 0.5)\r\n#\r\n# cm = confusion_matrix(y_test, y_pred)\r\n#\r\n# print(cm)\r\n# acc = (cm[0][0] + cm[1][1]) / 2000\r\n# print(acc)\r\n\r\ndef build_classifier(optimizer):\r\n classifier = Sequential()\r\n classifier.add(Dense(output_dim=6, init=\"uniform\", activation=\"relu\", input_dim=11))\r\n classifier.add(Dropout(rate=0.1))\r\n classifier.add(Dense(output_dim=6, init=\"uniform\", activation=\"relu\"))\r\n classifier.add(Dropout(rate=0.1))\r\n classifier.add(Dense(output_dim=1, init=\"uniform\", activation=\"sigmoid\"))\r\n classifier.compile(optimizer=optimizer, loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\r\n return classifier\r\n\r\n\r\nclassifier = KerasClassifier(build_fn=build_classifier)\r\n\r\n# Grid search implementation\r\nparameters = {'batch_size': [25, 32],\r\n 'epochs': [100, 500],\r\n 'optimizer': ['adam', 'rmsprop']}\r\n\r\ngrid_search = GridSearchCV(estimator=classifier,\r\n param_grid=parameters,\r\n scoring=\"accuracy\",\r\n cv=10\r\n )\r\n\r\ngrid_search = grid_search.fit(X_train, y_train)\r\nbest_parameters = grid_search.best_estimator_\r\nbest_accuracy = grid_search.best_score_\r\n\r\nprint(best_parameters)\r\nprint(best_accuracy)\r\n# accuracies = cross_val_score(estimator=classifier, X=X_train, y=y_train, cv=10, n_jobs=-1)\r\n# mean = accuracies.mean()\r\n# variance = accuracies.std()\r\n#\r\n# print(mean)\r\n# print(variance)\r\n","sub_path":"Supervised Deep Learning/Artificial Neural Networks (ANN)/grid_Search.py","file_name":"grid_Search.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"247298819","text":"import numpy as np\n\nfrom magpie.config import EMBEDDING_SIZE\n\nFEATURE_VECTOR = {\n # Candidate features\n 'tf_mean': 'float64',\n 'tf_sum': 'float64',\n 'tf_min': 'float64',\n 'tf_max': 'float64',\n 'idf_mean': 'float64',\n 'idf_sum': 'float64',\n 'idf_min': 'float64',\n 'idf_max': 'float64',\n 'tfidf': 'float64',\n 'first_occurrence_mean': 'float64',\n 'first_occurrence_min': 'float64',\n 'first_occurrence_max': 'float64',\n 'last_occurrence_mean': 'float64',\n 'last_occurrence_min': 'float64',\n 'last_occurrence_max': 'float64',\n 'spread_means': 'float64',\n 'spread_minmax': 'float64',\n 'no_of_words': 'uint8',\n 'no_of_letters': 'uint16',\n 'hops_from_anchor': 'uint16',\n # 'word2vec': 'float64', # N dimensional\n\n # Document features\n 'total_words_in_doc': 'uint32',\n 'unique_words_in_doc': 'uint32',\n}\n\n\ndef preallocate_feature_matrix(n_samples):\n \"\"\"\n Create an empty feature matrix represented as a dictionary of arrays\n :param n_samples: number of samples/rows in the matrix\n\n :return: dictionary of numpy arrays\n \"\"\"\n X = {k: np.zeros(n_samples, dtype=v)\n for k, v in FEATURE_VECTOR.items()}\n X['word2vec'] = np.zeros((n_samples, EMBEDDING_SIZE), dtype='float32')\n\n return X\n","sub_path":"magpie/linear_classifier/feature_extraction/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"336882982","text":"from airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom datetime import datetime, timedelta\n\n\ndefault_args = {\n \"owner\": \"healz\",\n \"depends_on_past\": False,\n \"start_date\": datetime(2020, 2, 14),\n \"email\": [\"healyt22@gmail.com.com\"],\n \"email_on_failure\": False,\n \"email_on_retry\": False,\n \"retries\": 1,\n \"retry_delay\": timedelta(minutes=5),\n # 'queue': 'bash_queue',\n # 'pool': 'backfill',\n # 'priority_weight': 10,\n # 'end_date': datetime(2016, 1, 1),\n}\n\ndag = DAG(\n dag_id = \"Tweets\",\n default_args = default_args,\n schedule_interval = \"0 */1 * * *\"\n)\n\nt1 = BashOperator(\n task_id = \"GetTweetDataFromAPI\",\n bash_command = \"python ~/git/politopics/politopics/twitter_api.py\",\n dag = dag\n)\n","sub_path":"dags/Tweets/dag.py","file_name":"dag.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"513964639","text":"# coding=utf-8\nimport json\nimport math\nimport time\nimport urllib.request\nimport urllib.response\nfrom datetime import datetime, timedelta\nfrom pprint import pprint\n\nimport schedule\n\nordertemp = [0] * 50\norderlast = [0] * 50\ndingurl = 'https://oapi.dingtalk.com/robot/send?access_token=549961be9efed1a4a1c56318e3480834521ad64920f9d7e29263dbb4bb0d30a8'\nurlcomment = 'https://app-api.shop.ele.me/buttonwood/invoke/?method=GadgetzanAPIService.getAppraisalListByServiceNO'\norderurl = 'https://app-api.shop.ele.me/buttonwood/invoke/?method=ClassifyService.getServicesByClassifyCode'\nheader = {'Content-Type': 'application/json'}\nnewxpgid = \"266c3696a385377e647a9ac28f2bc1db\"\nchainxpgid = \"52f0b0f2bd8b19061654ba10608ab17a\"\nsqid = \"3cd8c323d6f4e2baa6107158802b49bb\"\ndkdid = \"749ff8fc717c4426e825e8d42ac6d4ce\"\ncjdzid = \"7c1fccc89fd9c3a356ab276fdf9e4403\"\nxpgid = \"6d4fdd6db6c4c2a0507599e5c29efdfb\"\ncjjpid = \"4cef596097ddf479b2cc16b0df3aedf2\"\nnewxpgname = \"小评果新店版\"\nchainxpgname = \"小评果连锁版\"\nxpgname = \"小评果正式版\"\nsqname = \"商圈排名\"\nglobal ksid\ntest = 1\n\n\ndef updateksid():\n global ksid\n file_temp = open('ksid.txt', 'r')\n ksid = file_temp.readline()\n # ksid = \"MTUwMDFiZGMtNjVkNC00NmY01fXI9pYmNlMW\"\n return ksid\n\n\n# 实时抓取评价\n\n\ndef jobgetcomment():\n\n try:\n file_temp = open('getcomment.txt', 'r')\n file_temp.close()\n except:\n pass\n\n file_object = open('getcomment.txt', 'a')\n sent = 0\n content = \"\"\n\n for service in [xpgid, newxpgid, chainxpgid, sqid]:\n if service == xpgid:\n tempname = xpgname\n elif service == newxpgid:\n tempname = newxpgname\n elif service == chainxpgid:\n tempname = chainxpgname\n elif service == sqid:\n tempname = sqname\n\n data = {\n \"id\": \"2C0DE4DBA2E8400DBCCF8AE4F779CCF2|1526630263938\",\n \"metas\": {\n \"appName\": \"melody\",\n \"appVersion\": \"4.4.0\",\n \"ksid\": ksid,\n \"key\": \"1.0.0\",\n },\n \"ncp\": \"2.0.0\",\n \"service\": \"GadgetzanAPIService\",\n \"method\": \"getAppraisalListByServiceNO\",\n \"params\": {\"offset\": 0, \"limit\": 5, \"serviceNO\": service},\n }\n\n params = json.dumps(data).encode('utf8')\n req = urllib.request.Request(urlcomment, data=params, headers=header)\n try:\n res = urllib.request.urlopen(req)\n d1 = json.load(res)\n d1 = d1['result']['result']\n except IOError:\n file_object.write(\"\\nError\\n\")\n print(\"Error, will try again\")\n jobgetcomment()\n except TypeError:\n content = '被饿了么反爬了,请更新ksid'\n else:\n for index in range(len(d1)):\n try:\n if datetime.now() - timedelta(minutes=10) > datetime.strptime(\n d1[index]['createTime'], '%Y-%m-%d %H:%M:%S'\n ):\n print(tempname + \"Nothing New --getcomment\")\n elif int(d1[index]['compositionalScore']) < 5:\n file_object.write(\n \"\\n\"\n + tempname\n + \" \"\n + str(d1[index]['orderNO'])\n + \" \"\n + str(d1[index]['compositionalScore'])\n + str(d1[index]['valuator'])\n + str(d1[index]['createTime'])\n + \"\\n\"\n )\n content += (\n tempname\n + \" \"\n + str(d1[index]['createTime'])\n + \"有新的差评,\"\n + str(d1[index]['compositionalScore'])\n + \"分来自于\"\n + str(d1[index]['valuator'])\n + \"用户说\"\n + str(d1[index]['content'] + \"\\n\")\n )\n print(\"New Comment\" + \"\\n--getcomment\")\n sent = 1\n except TypeError:\n pass\n\n file_object.write(\n str(datetime.now()) + \"\\n===========================================\"\n )\n file_object.close()\n\n mobilelist = \"18600536524\"\n dingdata = {\n \"msgtype\": \"text\",\n \"text\": {\"content\": content},\n \"at\": {\"atMobiles\": [mobilelist], \"isAtAll\": False},\n }\n json_str = json.dumps(dingdata).encode('utf8')\n dingreq = urllib.request.Request(dingurl, data=json_str, headers=header)\n\n if test == 0:\n if sent == 1:\n dingres = urllib.request.urlopen(dingreq)\n print(str(dingres.read()) + \"\\nSent --getcomment\")\n sent = 0\n elif sent == 1:\n print(\"Test\\nSent --getcomment\")\n sent = 0\n\n\n# 评分计算\n\n\ndef jobgetallcomment():\n\n content = \"\"\n file_object = open('getallcomment.txt', 'w')\n\n res = query(\"xpg\")\n # print (\"\\n小评果\"+res[0])\n file_object.write(\"\\n小评果\" + res[2])\n content += \"\\n小评果\" + res[0]\n\n res = query(\"dkd\")\n # print (\"\\n店客多\"+res[0])\n file_object.write(\"\\n店客多\" + res[2])\n content += \"\\n店客多\" + res[0]\n\n res = query(\"cjdz\")\n # print (\"\\n超级店长\"+res[0])\n file_object.write(\"\\n超级店长\" + res[2])\n content += \"\\n超级店长\" + res[0]\n\n file_object.write(\n \"\\n\" + str(datetime.now()) + \"\\n===========================================\"\n )\n file_object.close()\n print(str(datetime.now()))\n\n mobilelist = \"18600536524\"\n dingdata = {\n \"msgtype\": \"text\",\n \"text\": {\"content\": content},\n \"at\": {\"atMobiles\": [mobilelist], \"isAtAll\": False},\n }\n json_str = json.dumps(dingdata).encode('utf8')\n dingreq = urllib.request.Request(dingurl, data=json_str, headers=header)\n if test == 0:\n dingres = urllib.request.urlopen(dingreq)\n print(str(dingres.read()) + \"\\n评分计算完成,发送成功\")\n else:\n print(content)\n print(\"评分计算完成,发送成功(测试)\")\n\n\ndef query(request):\n\n if request == \"dkd\":\n tempid = dkdid\n elif request == \"cjdz\":\n tempid = cjdzid\n elif request == \"xpg\":\n tempid = xpgid\n elif request == \"cjjp\":\n tempid = cjjpid\n\n offset = 0\n limit = 200\n count = 0\n scoresum = 0\n remain = 0\n filetext = \"\"\n antispyder = 0\n\n while True:\n data = {\n \"id\": \"008DBE4D482D431BBAC8ECC11E7EABE4|1528683444787\",\n \"metas\": {\n \"appName\": \"melody\",\n \"appVersion\": \"4.4.0\",\n \"ksid\": ksid,\n \"key\": \"1.0.0\",\n },\n \"ncp\": \"2.0.0\",\n \"service\": \"GadgetzanAPIService\",\n \"method\": \"getAppraisalListByServiceNO\",\n \"params\": {\"offset\": offset, \"limit\": limit, \"serviceNO\": tempid},\n }\n params = json.dumps(data).encode('utf8')\n req = urllib.request.Request(urlcomment, data=params, headers=header)\n try:\n res = urllib.request.urlopen(req)\n d1 = json.load(res)\n if d1['result']['result'] == None:\n break\n try:\n if d1['error']['code'] == 'SERVER_ERROR':\n print(\"ServerError, will try again\")\n jobgetallcomment()\n except:\n pass\n except TypeError:\n content = '被饿了么反爬了,请更新ksid'\n antispyder = 1\n break\n else:\n d1 = d1['result']['result']\n for index in range(len(d1)):\n t1 = datetime.strptime(d1[index]['createTime'], '%Y-%m-%d %H:%M:%S')\n d = datetime.now() - timedelta(days=900)\n if t1 > d:\n if (\n (\"i**1\" in d1[index]['valuator'])\n | (\"i**2\" in d1[index]['valuator'])\n | (\"i**v\" in d1[index]['valuator'])\n ):\n filetext += (\n \"\\n\"\n + str(d1[index]['orderNO'])\n + \" \"\n + str(d1[index]['compositionalScore'])\n + \" \"\n + str(d1[index]['createTime'])\n )\n count += 1\n scoresum += int(d1[index]['compositionalScore'])\n # pass\n else:\n filetext += (\n \"\\n\"\n + str(d1[index]['orderNO'])\n + \" \"\n + str(d1[index]['compositionalScore'])\n + \" \"\n + str(d1[index]['createTime'])\n )\n count += 1\n scoresum += int(d1[index]['compositionalScore'])\n else:\n break\n\n offset += limit\n\n # print(count)\n if antispyder == 0:\n scorenow = (round_up(scoresum / count * 10000)) / 10000\n content = \"目前总共\" + str(count) + \"条评价\\n评分:\" + str(scorenow) + \"\\n\"\n for score in range(math.ceil(scoresum / count * 10), 51, 1):\n while round_up(scoresum / count) * 10 < score:\n scoresum += 5.0\n count += 1\n remain += 1\n content += \"距离\" + str(score / 10) + \"分还差\" + str(remain) + \"条好评\" + \"\\n\"\n else:\n print(content)\n pass\n return content, count, filetext\n\n\n# 销量统计\n\n\ndef jobsendPostDing():\n\n data = {\n \"id\": \"E8EA1587AC8040F29AF64EC30294E399|1528683065007\",\n \"metas\": {\n \"appName\": \"melody\",\n \"appVersion\": \"4.4.0\",\n \"ksid\": ksid,\n \"key\": \"1.0.0\",\n },\n \"ncp\": \"2.0.0\",\n \"service\": \"ClassifyService\",\n \"method\": \"getServicesByClassifyCode\",\n \"params\": {\"classifyCode\": \"1\", \"offset\": 0, \"limit\": 99},\n }\n\n file_object = open('ordercount.txt', 'a')\n\n params = json.dumps(data).encode('utf8')\n req = urllib.request.Request(orderurl, data=params, headers=header)\n res = urllib.request.urlopen(req)\n\n d1 = json.load(res)\n # print(d1)\n try:\n d1 = d1['result']['result']\n content = ''\n for index in range(len(d1)):\n ordertemp[index] = int(d1[index]['orderCount']) - orderlast[index]\n orderlast[index] = int(d1[index]['orderCount'])\n content += (\n str(d1[index]['serviceName'])\n + str(d1[index]['orderCount'])\n + \"/\"\n + str(ordertemp[index])\n + \"\\n\"\n )\n file_object.write(\n str(d1[index]['serviceName'])\n + str(d1[index]['orderCount'])\n + \"/\"\n + str(ordertemp[index])\n + \"\\n\"\n )\n except TypeError:\n content = '被饿了么反爬了,请更新ksid'\n\n file_object.write(\n str(datetime.now()) + \"\\n===========================================\\n\"\n )\n file_object.close()\n\n mobilelist = \"18513249383\"\n dingdata = {\n \"msgtype\": \"text\",\n \"text\": {\"content\": content + str(datetime.now())},\n \"at\": {\"atMobiles\": [mobilelist], \"isAtAll\": False},\n }\n json_str = json.dumps(dingdata).encode('utf8')\n dingreq = urllib.request.Request(dingurl, data=json_str, headers=header)\n\n # appid = '22547'\n # to = '18511067574'\n # signature = 'dded839a7db21a859155793987c46c85'\n # submaildata = 'appid='+appid+'&to='+to+'&content=【小评果】'+content+'退订回N &signature='+signature\n # submailurl = 'https://api.mysubmail.com/message/send.json'\n # submailparam = submaildata.encode('utf8')\n # subreq = urllib.request.Request(submailurl, data=submailparam)\n if int(datetime.now().hour) in range(0, 8):\n print(str(datetime.now().hour) + \" Pass\" + \"\\n销量统计完成,不发送\")\n elif test == 0:\n # subres = urllib.request.urlopen(subreq)\n # d2 = json.load(subres)\n dingres = urllib.request.urlopen(dingreq)\n print(str(dingres.read()) + \"\\n销量统计完成,发送成功\")\n else:\n print(\"销量统计完成,发送成功(测试)\")\n\n\n# 四舍五入,小数点后一位\n\n\ndef round_up(value):\n return round(value * 10) / 10.0\n\n\nupdateksid()\njobsendPostDing()\njobgetcomment()\njobgetallcomment()\n\n# 定时任务\nschedule.every().day.at(\"10:00\").do(jobgetallcomment)\nschedule.every(10).minutes.do(updateksid)\nschedule.every(10).minutes.do(jobgetcomment)\nfor runtime in range(8, 24):\n schedule.every().day.at(str(runtime) + \":59\").do(jobsendPostDing)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n","sub_path":"runjobs.py","file_name":"runjobs.py","file_ext":"py","file_size_in_byte":13101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"23805805","text":"numbers = [1,1,1,1,1]\ntarget = 3\n\na = [0]\n\nfor i in numbers:\n b = []\n for j in a:\n b.append(j+i)\n b.append(j-i)\n a=b\n\nprint(a.count(target))\n","sub_path":"ALGORITHM/PROGRAMMERS/ALGORITHM_TEST/11. 타겟 넘버.py","file_name":"11. 타겟 넘버.py","file_ext":"py","file_size_in_byte":164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"134169069","text":"from pymmortals.datatypes.serializable import Serializable\nfrom typing import List\n\n\n# noinspection PyPep8Naming\nclass AndroidEmulatorRequirement(Serializable):\n _validator_values = dict()\n\n _types = dict()\n\n def __init__(self,\n androidVersion: int = None,\n externallyAccessibleUrls: List[str] = None,\n superuserAccess: bool = None,\n uploadBandwidthLimitKilobitsPerSecond: int = None):\n super().__init__()\n self.androidVersion = androidVersion\n self.externallyAccessibleUrls = externallyAccessibleUrls\n self.superuserAccess = superuserAccess\n self.uploadBandwidthLimitKilobitsPerSecond = uploadBandwidthLimitKilobitsPerSecond\n\n\n# noinspection PyPep8Naming\nclass ChallengeProblemRequirements(Serializable):\n _validator_values = dict()\n\n _types = dict()\n\n def __init__(self,\n androidEmulators: List[AndroidEmulatorRequirement] = None,\n challengeProblemUrl: str = None):\n super().__init__()\n self.androidEmulators = androidEmulators\n self.challengeProblemUrl = challengeProblemUrl\n\n\n# noinspection PyPep8Naming\nclass DASPrerequisites(Serializable):\n _validator_values = dict()\n\n _types = dict()\n\n def __init__(self,\n cp1: ChallengeProblemRequirements = None,\n cp2: ChallengeProblemRequirements = None,\n cp3: ChallengeProblemRequirements = None):\n super().__init__()\n self.cp1 = cp1\n self.cp2 = cp2\n self.cp3 = cp3\n","sub_path":"phase02/immortals_repo/harness/pymmortals/generated/mil/darpa/immortals/core/api/ll/phase2/dasprerequisites.py","file_name":"dasprerequisites.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"475544186","text":"# 4. Odd or Even\n\n# Write a function f(x) that returns 1 if x is odd and -1 if x is even. Plot it for x values of -5 to 5 in increments of 1. This time, instead of using plot.plot, use plot.bar instead to make a bar graph.\n\nimport matplotlib.pyplot as plot \n\ndef f(x): \n if x % 2 != 0:\n return 1\n else:\n return -1\n\nxs = list(range(-5, 6)) \nys = [] \n\nfor x in xs: \n ys.append(f(x))\n\nplot.bar(xs, ys) \nplot.show()","sub_path":"FunctionExercises/FunctionExercise4.py","file_name":"FunctionExercise4.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"115881847","text":"from pygame import image as pyimage\nfrom pygame.sprite import Sprite\n\nclass Cookie(Sprite):\n \"\"\"A class to handle single cookie from a cookie bag\"\"\"\n def __init__(self, game):\n \"\"\" Initalize cookie and set it's position \"\"\"\n super().__init__()\n\n self.game = game\n self.settings = game.settings\n\n self.screen = game.screen\n self.screen_rect = game.screen.get_rect()\n\n # Load the image for a cookie\n self.image = pyimage.load('images/cookie1.bmp')\n self.rect = self.image.get_rect()\n\n # Set starting position and speed\n self.rect.x = 0\n self.rect.y = 0\n self.cookie_velocity = 0\n\n\n\n\n\n","sub_path":"cookie.py","file_name":"cookie.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"400507628","text":"if __debug__:\n from .dprint import dprint\n from .member import Member\n\n def is_address(address):\n assert isinstance(address, tuple), type(address)\n assert len(address) == 2, len(address)\n assert isinstance(address[0], str), type(address[0])\n assert address[0], address[0]\n assert not address[0] == \"0.0.0.0\", address\n assert isinstance(address[1], int), type(address[1])\n assert address[1] >= 0, address[1]\n return True\n\n# update version information directly from SVN\nfrom .revision import update_revision_information\nupdate_revision_information(\"$HeadURL$\", \"$Revision$\")\n\n# delay and lifetime values are chosen to ensure that a candidate will not exceed 60.0 or 30.0\n# seconds. However, taking into account round trip time and processing delay we to use smaller\n# values without conflicting with the next 5.0 walk cycle. Hence, we pick 2.5 seconds below the\n# actual cutoff point.\nCANDIDATE_ELIGIBLE_DELAY = 27.5\nCANDIDATE_ELIGIBLE_BOOTSTRAP_DELAY = 57.5\nCANDIDATE_WALK_LIFETIME = 57.5\nCANDIDATE_STUMBLE_LIFETIME = 57.5\nCANDIDATE_INTRO_LIFETIME = 27.5\nCANDIDATE_LIFETIME = 180.0\nassert isinstance(CANDIDATE_ELIGIBLE_DELAY, float)\nassert isinstance(CANDIDATE_ELIGIBLE_BOOTSTRAP_DELAY, float)\nassert isinstance(CANDIDATE_WALK_LIFETIME, float)\nassert isinstance(CANDIDATE_STUMBLE_LIFETIME, float)\nassert isinstance(CANDIDATE_INTRO_LIFETIME, float)\nassert isinstance(CANDIDATE_LIFETIME, float)\n\nclass Candidate(object):\n def __init__(self, sock_addr, tunnel):\n assert is_address(sock_addr), sock_addr\n assert isinstance(tunnel, bool), type(tunnel)\n self._sock_addr = sock_addr\n self._tunnel = tunnel\n\n # @property\n def __get_sock_addr(self):\n return self._sock_addr\n # @sock_addr.setter\n def __set_sock_addr(self, sock_addr):\n self._sock_addr = sock_addr\n # .setter was introduced in Python 2.6\n sock_addr = property(__get_sock_addr, __set_sock_addr)\n\n @property\n def tunnel(self):\n return self._tunnel\n\n def get_destination_address(self, wan_address):\n assert is_address(wan_address), wan_address\n return self._sock_addr\n\n def get_members(self, community):\n # preferably use the WalkerCandidate directly\n candidate = community.dispersy.get_candidate(self._sock_addr)\n if candidate:\n return candidate.get_members(community)\n else:\n return []\n\n def __str__(self):\n return \"{%s:%d}\" % self._sock_addr\n\nclass WalkCandidate(Candidate):\n \"\"\"\n A Candidate instance represents a communication endpoint with one or more member/community\n pairs.\n\n A WalkCandidate is added and removed by the Dispersy random walker when events occur. These\n events results in the following marks:\n\n - WALK: we sent an introduction-request. Viable up to CANDIDATE_WALK_LIFETIME seconds after the\n message was sent.\n\n - STUMBLE: we received an introduction-request. Viable up to CANDIDATE_STUMBLE_LIFETIME seconds\n after the message was received.\n\n - INTRO: we know about this candidate through hearsay. Viable up to CANDIDATE_INACTIVE seconds\n after the introduction-response message (talking about the candidate) was received.\n \"\"\"\n class Timestamps(object):\n __slots__ = [\"timeout_adjustment\", \"last_walk\", \"last_stumble\", \"last_intro\"]\n\n def __init__(self):\n self.timeout_adjustment = 0.0\n self.last_walk = 0.0\n self.last_stumble = 0.0\n self.last_intro = 0.0\n\n def merge(self, other):\n assert isinstance(other, WalkCandidate.Timestamps), other\n self.timeout_adjustment = max(self.timeout_adjustment, other.timeout_adjustment)\n self.last_walk = max(self.last_walk, other.last_walk)\n self.last_stumble = max(self.last_stumble, other.last_stumble)\n self.last_intro = max(self.last_intro, other.last_intro)\n\n def __init__(self, sock_addr, tunnel, lan_address, wan_address, connection_type):\n assert is_address(sock_addr), sock_addr\n assert isinstance(tunnel, bool), type(tunnel)\n assert is_address(lan_address)\n assert is_address(wan_address)\n assert isinstance(connection_type, unicode) and connection_type in (u\"unknown\", u\"public\", u\"symmetric-NAT\")\n\n super(WalkCandidate, self).__init__(sock_addr, tunnel)\n self._lan_address = lan_address\n self._wan_address = wan_address\n self._connection_type = connection_type\n self._associations = set()\n self._timestamps = dict()\n self._global_times = dict()\n\n if __debug__:\n if not (self.sock_addr == self._lan_address or self.sock_addr == self._wan_address):\n dprint(\"Either LAN \", self._lan_address, \" or the WAN \", self._wan_address, \" should be SOCK_ADDR \", self.sock_addr, level=\"error\", stack=True)\n assert False\n\n @property\n def lan_address(self):\n return self._lan_address\n\n @property\n def wan_address(self):\n return self._wan_address\n\n @property\n def connection_type(self):\n return self._connection_type\n\n def get_destination_address(self, wan_address):\n assert is_address(wan_address), wan_address\n return self._lan_address if wan_address[0] == self._wan_address[0] else self._wan_address\n\n def merge(self, other):\n assert isinstance(other, WalkCandidate), other\n self._associations.update(other._associations)\n \n for cid, timestamps in other._timestamps.iteritems():\n if cid in self._timestamps:\n self._timestamps[cid].merge(timestamps)\n else:\n self._timestamps[cid] = timestamps\n \n #TODO: this should be improved\n from .dispersy import Dispersy\n dispersy = Dispersy.get_instance()\n community = dispersy._communities.get(cid, None)\n community.add_candidate(self)\n \n for cid, global_time in self._global_times.iteritems():\n self._global_times[cid] = max(self._global_times.get(cid, 0), global_time)\n \n def set_global_time(self, community, global_time):\n self._global_times[community.cid] = max(self._global_times.get(community.cid, 0), global_time)\n\n def get_global_time(self, community):\n return self._global_times.get(community.cid, 0)\n\n def _get_or_create_timestamps(self, community):\n if __debug__:\n from .community import Community\n assert isinstance(community, Community)\n timestamps = self._timestamps.get(community.cid)\n if not timestamps:\n self._timestamps[community.cid] = timestamps = self.Timestamps()\n return timestamps\n\n def associate(self, community, member):\n \"\"\"\n Once it is confirmed that the candidate is represented by a member, i.e. though a 3-way\n handshake, the member can be associated with the candidate.\n \"\"\"\n if __debug__:\n from .community import Community\n assert isinstance(community, Community)\n assert isinstance(member, Member)\n self._associations.add((community.cid, member))\n\n def is_associated(self, community, member):\n \"\"\"\n Check if the (community, member) pair is associated with this candidate.\n \"\"\"\n if __debug__:\n from .community import Community\n assert isinstance(community, Community)\n assert isinstance(member, Member)\n return (community.cid, member) in self._associations\n\n def disassociate(self, community, member):\n \"\"\"\n Remove the association with a member.\n \"\"\"\n if __debug__:\n from .community import Community\n assert isinstance(community, Community)\n assert isinstance(member, Member)\n self._associations.remove((community.cid, member))\n if community.cid in self._global_times:\n del self._global_times[community.cid]\n\n def get_members(self, community):\n \"\"\"\n Returns all unique Member instances in COMMUNITY associated to this candidate.\n \"\"\"\n return set(member for cid, member in self._associations if community.cid == cid)\n\n def in_community(self, community, now):\n \"\"\"\n Returns True if SELF is either walk, stumble, or intro in COMMUNITY.\n \"\"\"\n timestamps = self._timestamps.get(community.cid)\n if timestamps:\n return (timestamps.last_walk + timestamps.timeout_adjustment <= now < timestamps.last_walk + CANDIDATE_WALK_LIFETIME or\n now < timestamps.last_stumble + CANDIDATE_STUMBLE_LIFETIME or\n now < timestamps.last_intro + CANDIDATE_INTRO_LIFETIME)\n else:\n return False\n\n def is_active(self, community, now):\n \"\"\"\n Returns True if SELF is either walk or stumble in COMMUNITY.\n \"\"\"\n timestamps = self._timestamps.get(community.cid)\n if timestamps:\n return (timestamps.last_walk + timestamps.timeout_adjustment <= now < timestamps.last_walk + CANDIDATE_WALK_LIFETIME or\n now < timestamps.last_stumble + CANDIDATE_STUMBLE_LIFETIME)\n return False\n\n def is_any_active(self, now):\n \"\"\"\n Returns True if SELF is either walk or stumble in any of the associated communities.\n\n This is used when deciding if this candidate can be used for communication, the assumption\n is that if any community is still active, that all will still be active. The exception to\n this rule is when a node decides to leave one or more communities while remaining active in\n one or more others.\n \"\"\"\n return any(timestamps.last_walk + timestamps.timeout_adjustment <= now < timestamps.last_walk + CANDIDATE_WALK_LIFETIME or now < timestamps.last_stumble + CANDIDATE_STUMBLE_LIFETIME\n for timestamps\n in self._timestamps.itervalues())\n\n def is_all_obsolete(self, now):\n \"\"\"\n Returns True if SELF exceeded the CANDIDATE_LIFETIME of all the associated communities.\n \"\"\"\n return all(max(timestamps.last_walk, timestamps.last_stumble, timestamps.last_intro) + CANDIDATE_LIFETIME < now\n for timestamps\n in self._timestamps.itervalues())\n\n def age(self, now):\n \"\"\"\n Returns the time between NOW and the most recent walk or stumble or any of the associated communities.\n \"\"\"\n return now - max(max(timestamps.last_walk, timestamps.last_stumble) for timestamps in self._timestamps.itervalues())\n\n def inactive(self, community, now):\n \"\"\"\n Called to set SELF to inactive for COMMUNITY.\n \"\"\"\n timestamps = self._timestamps.get(community.cid)\n if timestamps:\n timestamps.last_walk = now - CANDIDATE_WALK_LIFETIME\n timestamps.last_stumble = now - CANDIDATE_STUMBLE_LIFETIME\n timestamps.last_intro = now - CANDIDATE_INTRO_LIFETIME\n\n def obsolete(self, community, now):\n \"\"\"\n Called to set SELF to obsolete for all associated communities.\n \"\"\"\n timestamps = self._timestamps.get(community.cid)\n if timestamps:\n timestamps.last_walk = now - CANDIDATE_LIFETIME\n timestamps.last_stumble = now - CANDIDATE_LIFETIME\n timestamps.last_intro = now - CANDIDATE_LIFETIME\n\n def all_inactive(self, now):\n \"\"\"\n Called to set SELF to inactive (or keep it at OBSOLETE) for all associated communities.\n\n This is used when a timeout occurs while waiting for an introduction-response. We choose to\n set all communities to inactive to improve churn handling. Setting the entire candidate to\n inactive will not remove it and any associated 3-way handshake information. This is\n retained until the entire candidate becomes obsolete.\n \"\"\"\n for timestamps in self._timestamps.itervalues():\n timestamps.last_walk = now - CANDIDATE_WALK_LIFETIME\n timestamps.last_stumble = now - CANDIDATE_STUMBLE_LIFETIME\n timestamps.last_intro = now - CANDIDATE_INTRO_LIFETIME\n\n def is_eligible_for_walk(self, community, now):\n \"\"\"\n Returns True when the candidate is eligible for taking a step.\n\n A candidate is eligible when:\n - SELF is either walk, stumble, or intro in COMMUNITY; and\n - the previous step is more than CANDIDATE_ELIGIBLE_DELAY ago.\n \"\"\"\n timestamps = self._timestamps.get(community.cid)\n if timestamps:\n return (timestamps.last_walk + CANDIDATE_ELIGIBLE_DELAY <= now and\n (timestamps.last_walk + timestamps.timeout_adjustment <= now < timestamps.last_walk + CANDIDATE_WALK_LIFETIME or\n now < timestamps.last_stumble + CANDIDATE_STUMBLE_LIFETIME or\n now < timestamps.last_intro + CANDIDATE_INTRO_LIFETIME))\n else:\n return False\n\n def last_walk(self, community):\n assert community.cid in self._timestamps\n return self._timestamps[community.cid].last_walk\n\n def last_stumble(self, community):\n assert community.cid in self._timestamps\n return self._timestamps[community.cid].last_stumble\n\n def last_intro(self, community):\n assert community.cid in self._timestamps\n return self._timestamps[community.cid].last_intro\n\n def get_category(self, community, now):\n \"\"\"\n Returns the category (u\"walk\", u\"stumble\", u\"intro\", or u\"none\") depending on the current\n time NOW.\n \"\"\"\n timestamps = self._timestamps.get(community.cid)\n if timestamps:\n if timestamps.last_walk + timestamps.timeout_adjustment <= now < timestamps.last_walk + CANDIDATE_WALK_LIFETIME:\n return u\"walk\"\n \n if now < timestamps.last_stumble + CANDIDATE_STUMBLE_LIFETIME:\n return u\"stumble\"\n \n if now < timestamps.last_intro + CANDIDATE_INTRO_LIFETIME:\n return u\"intro\"\n\n return u\"none\"\n\n def walk(self, community, now, timeout_adjustment):\n \"\"\"\n Called when we are about to send an introduction-request to this candidate.\n \"\"\"\n timestamps = self._get_or_create_timestamps(community)\n timestamps.timeout_adjustment = timeout_adjustment\n timestamps.last_walk = now\n \n if not isinstance(self, BootstrapCandidate):\n community.add_candidate(self)\n\n def walk_response(self, community):\n \"\"\"\n Called when we received an introduction-response to this candidate.\n \"\"\"\n self._get_or_create_timestamps(community).timeout_adjustment = 0.0\n\n def stumble(self, community, now):\n \"\"\"\n Called when we receive an introduction-request from this candidate.\n \"\"\"\n self._get_or_create_timestamps(community).last_stumble = now\n \n if not isinstance(self, BootstrapCandidate):\n community.add_candidate(self)\n\n def intro(self, community, now):\n \"\"\"\n Called when we receive an introduction-response introducing this candidate.\n \"\"\"\n self._get_or_create_timestamps(community).last_intro = now\n \n if not isinstance(self, BootstrapCandidate):\n community.add_candidate(self)\n\n def update(self, tunnel, lan_address, wan_address, connection_type):\n assert isinstance(tunnel, bool)\n assert lan_address == (\"0.0.0.0\", 0) or is_address(lan_address), lan_address\n assert wan_address == (\"0.0.0.0\", 0) or is_address(wan_address), wan_address\n assert isinstance(connection_type, unicode), type(connection_type)\n assert connection_type in (u\"unknown\", u\"public\", \"symmetric-NAT\"), connection_type\n self._tunnel = tunnel\n if lan_address != (\"0.0.0.0\", 0):\n self._lan_address = lan_address\n if wan_address != (\"0.0.0.0\", 0):\n self._wan_address = wan_address\n # someone can also reset from a known connection_type to unknown (i.e. it now believes it is\n # no longer public nor symmetric NAT)\n self._connection_type = u\"public\" if connection_type == u\"unknown\" and lan_address == wan_address else connection_type\n\n if __debug__:\n if not (self.sock_addr == self._lan_address or self.sock_addr == self._wan_address):\n dprint(\"Either LAN \", self._lan_address, \" or the WAN \", self._wan_address, \" should be SOCK_ADDR \", self.sock_addr, level=\"error\", stack=True)\n\n def __str__(self):\n if self._sock_addr == self._lan_address == self._wan_address:\n return \"{%s:%d}\" % self._lan_address\n elif self._sock_addr in (self._lan_address, self._wan_address):\n return \"{%s:%d %s:%d}\" % (self._lan_address[0], self._lan_address[1], self._wan_address[0], self._wan_address[1])\n else:\n # should not occur\n return \"{%s:%d %s:%d %s:%d}\" % (self._sock_addr[0], self._sock_addr[1], self._lan_address[0], self._lan_address[1], self._wan_address[0], self._wan_address[1])\n\nclass BootstrapCandidate(WalkCandidate):\n def __init__(self, sock_addr, tunnel):\n super(BootstrapCandidate, self).__init__(sock_addr, tunnel, sock_addr, sock_addr, connection_type=u\"public\")\n\n def in_community(self, community, now):\n \"\"\"\n Bootstrap nodes are, by definition, in every possible community.\n \"\"\"\n if not community.cid in self._timestamps:\n self._timestamps[community.cid] = self.Timestamps()\n return True\n\n def is_eligible_for_walk(self, community, now):\n \"\"\"\n Bootstrap nodes are, by definition, always online, hence the timeouts do not apply.\n \"\"\"\n assert community.cid in self._timestamps\n timestamps = self._timestamps[community.cid]\n return now >= timestamps.last_walk + CANDIDATE_ELIGIBLE_BOOTSTRAP_DELAY\n\n def is_associated(self, community, member):\n \"\"\"\n Bootstrap nodes are, by definition, always associated hence we return true.\n \"\"\"\n return True\n\n def __str__(self):\n return \"B!\" + super(BootstrapCandidate, self).__str__()\n\nclass LoopbackCandidate(Candidate):\n def __init__(self):\n super(LoopbackCandidate, self).__init__((\"localhost\", 0), False)\n","sub_path":"candidate.py","file_name":"candidate.py","file_ext":"py","file_size_in_byte":18492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"516815357","text":"#-*- encoding:UTF-8 -*-\n__author__ = 'kejun'\n# create Datetime: 15-7-7 上午9:12\nimport wx\n\nclass HA_Panel(wx.Panel):\n def __init__(self, parent):\n wx.Panel.__init__(self, parent)\n self.countdown1 = wx.StaticText(self, -1, \"高可用总体概述: \\n\".decode(\"utf-8\"), (0, 20))\n self.countdown2 = wx.StaticText(self, -1, \"高可用的考核的具体目标、方向: 提高可用演练和测试的质量,提高应急手册的准确性。\".decode(\"utf-8\"), size=(600, 80),pos=(0,40))\n self.countdown3 = wx.StaticText(self, -1, \"高可用评分标准: \\n\".decode(\"utf-8\"), (0, 80))\n self.countdown4 = wx.StaticText(self, -1, \"高可用评分的判断标准由客观指标和主观评价构成,具体细则可以查看首页说明手册。\\n\"\n .decode(\"utf-8\"), (0, 100))\n self.font = wx.Font(8,wx.DEFAULT,wx.NORMAL,wx.BOLD)\n self.countdown1.SetFont(self.font)\n self.countdown3.SetFont(self.font)\n","sub_path":"AssessTool/GUI/Show/HAMain.py","file_name":"HAMain.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"443436684","text":"from django.test import TestCase\n\nfrom django.contrib.auth.models import User\nfrom user_profile.models import UserProfile\n\nfrom ..models import Topic\nfrom ..models import Tag\nfrom ..models import Debate\nfrom ..models import Vote\nfrom ..models import Comment\nfrom ..models import Admire\n\n\nclass FixtureTestCaseBase(TestCase):\n def setUp(self):\n self.user = User.objects.create(username='a', email='a').user_profile\n self.user2 = User.objects.create(username='b', email='b').user_profile\n self.tag = Tag.objects.create(name='a', description='a')\n self.topic = Topic.objects.create(name='a', description='a')\n self.debate = Debate.objects.create(\n topic=self.topic,\n created_user=self.user,\n title='a', abstract='a', detail='a'\n )\n self.debate.tags.add(self.tag)\n self.vote = Vote.objects.create(\n user=self.user, debate=self.debate, type=Vote.PRO\n )\n self.vote2 = Vote.objects.create(\n user=self.user2, debate=self.debate, type=Vote.CON\n )\n self.comment = Comment.objects.create(\n user=self.user, debate=self.debate,\n type=Comment.OPINION, text='a'\n )\n self.comment2 = Comment.objects.create(\n user=self.user2, debate=self.debate, parent=self.comment,\n type=Comment.REFUTATION, text='a'\n )\n self.admire = Admire.objects.create(\n user=self.user, comment=self.comment2\n )\n\n def tearDown(self):\n Admire.objects.all().delete()\n Vote.objects.all().delete()\n Comment.objects.all().delete()\n Debate.objects.all().delete()\n UserProfile.objects.all().delete()\n Topic.objects.all().delete()\n Tag.objects.all().delete()\n\n\nclass OneTimeFixtureTestCaseBase(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.user = User.objects.create(username='a', email='a').user_profile\n cls.user2 = User.objects.create(username='b', email='b').user_profile\n cls.tag = Tag.objects.create(name='a', description='a')\n cls.topic = Topic.objects.create(name='a', description='a')\n cls.debate = Debate.objects.create(\n topic=cls.topic,\n created_user=cls.user,\n title='a', abstract='a', detail='a'\n )\n cls.debate.tags.add(cls.tag)\n cls.vote = Vote.objects.create(\n user=cls.user, debate=cls.debate, type=Vote.PRO\n )\n cls.vote2 = Vote.objects.create(\n user=cls.user2, debate=cls.debate, type=Vote.CON\n )\n cls.comment = Comment.objects.create(\n user=cls.user, debate=cls.debate,\n type=Comment.OPINION, text='a'\n )\n cls.comment2 = Comment.objects.create(\n user=cls.user2, debate=cls.debate, parent=cls.comment,\n type=Comment.REFUTATION, text='a'\n )\n cls.admire = Admire.objects.create(\n user=cls.user, comment=cls.comment2\n )\n\n @classmethod\n def tearDownClass(cls):\n Admire.objects.all().delete()\n Vote.objects.all().delete()\n Comment.objects.all().delete()\n Debate.objects.all().delete()\n UserProfile.objects.all().delete()\n Topic.objects.all().delete()\n Tag.objects.all().delete()\n","sub_path":"core/tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"242864990","text":"def is_slow_zombie(speed):\n if (speed < 5):\n return True\n else:\n return False\n\ndef take_action(mutation, speed):\n if (is_slow_zombie(speed) == True):\n print(\"This \" + mutation + \" zombie is a slow zombie. You can run around it!\")\n else:\n print(\"This \" + mutation + \" zombie is a fast zombie. You better hide!\")\n\ndef run():\n print(\"What is the mutation type of the zombie?\")\n mutation = input()\n\n print(\"what is the speed of the zombie?\")\n speed = int(input())\n\n print(\"what do you wish to do (identify or action)?\")\n reply = input()\n\n if (reply == \"identify\"):\n print(\"A slow zombie: \" + str(is_slow_zombie(speed)) ) \n elif (reply == \"action\"):\n print(take_action(mutation, speed))\n else:\n print(\"Unknown zombie!\")\n\nrun()\n\n\n\n\n\n","sub_path":"practice/AE1/TCA 4/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"82368218","text":"from typing import Any\n\nimport proto\n\nclass KeywordPlanCompetitionLevelEnum(proto.Message):\n class KeywordPlanCompetitionLevel(proto.Enum):\n UNSPECIFIED = 0\n UNKNOWN = 1\n LOW = 2\n MEDIUM = 3\n HIGH = 4\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n ) -> None: ...\n","sub_path":"google-stubs/ads/googleads/v13/enums/types/keyword_plan_competition_level.pyi","file_name":"keyword_plan_competition_level.pyi","file_ext":"pyi","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"628122062","text":"import os\nimport re\nimport lxml\n# import cchardet\nimport numpy as np\nfrom collections import OrderedDict\nimport gensim\nimport gensim.corpora as corpora\nfrom ranker import load_ranker\nfrom bs4 import BeautifulSoup\n\n\nCORPUS_FILE = '/data/expertsearch/lda_corpus'\nTOPIC_MODEL_FILE = '/data/expertsearch/lda_mallet_model'\nCORPUS_DICTIONARY_FILE = '/data/expertsearch/corpus_dictionary'\nBIOS_FILE = '/data/compiled_bios'\nMODEL_FILES = '/data/expertsearch/model_files/'\nINFERENCES = '/data/expertsearch/model_files/doctopics.txt'\n\n\ndef get_topics_from_many_documents(doc_names):\n \"\"\" A function that extracts a list of terms associated with each document's inferred topic.\n\n Parameters\n ----------\n doc_names: list\n A list of document filenames\n \"\"\"\n relevant_terms = []\n for doc_name in doc_names:\n relevant_terms.append(get_topic_from_single_document(doc_name))\n\n return relevant_terms\n\ndef get_topic_from_single_document(doc_name):\n \"\"\" A function that extracts a list of terms associated with a document's inferred topic.\n\n Parameters\n ----------\n doc_name: str\n The document's filename\n \"\"\"\n\n current_dir = os.getcwd()\n model = get_model()\n corpus = corpora.MmCorpus(current_dir + CORPUS_FILE)\n doc_id = int(doc_name.replace('.txt', ''))\n print(doc_id)\n print(doc_id >= len(corpus))\n if doc_id >= len(corpus): # Do inference on new document\n\n doc = get_document(current_dir + BIOS_FILE, doc_name)\n corpus_dictionary = load_dictionary(current_dir + CORPUS_DICTIONARY_FILE)\n\n topic_num = max(model[corpus_dictionary.doc2bow(doc)], key=lambda x: x[1])[0]\n else: \n\n with open(current_dir + INFERENCES) as f:\n for i, line in enumerate(f):\n if i == doc_id:\n topic_distribution = list(map(float,line.replace('\\n','').split('\\t')[2:]))\n topic_num = topic_distribution.index(max(topic_distribution))\n\n return clean_topic_terms(model.show_topic(topic_num))\n\ndef get_top_words_from_query_topic(query):\n \"\"\" A function that extracts a list of terms associated with a query's inferred topic.\n\n Parameters\n ----------\n query: str\n The query\n \"\"\"\n\n current_dir = os.getcwd()\n \n model = get_model()\n\n corpus_dictionary = load_dictionary(current_dir + CORPUS_DICTIONARY_FILE)\n\n split_query = query.split(' ')\n\n topic_num = max(model[corpus_dictionary.doc2bow(split_query)], key=lambda x: x[1])[0]\n topic = model.show_topic(topic_num)\n top_terms = clean_topic_terms(topic)\n\n yield top_terms\n\ndef clean_topic_terms(topic):\n \"\"\" A function that replaces '_' with ' ' for each term in a list of top-10 terms associated with a topic cluster.\n\n Parameters\n ----------\n topic: list\n The top-10 terms associated with a topic\n \"\"\"\n return[termscore[0].replace('_', ' ') for termscore in topic]\n\ndef get_document(top_directory, doc_name):\n \"\"\" Processes specified document to yield a list of utf-8 tokens.\n\n Parameters\n ----------\n top_directory: str\n Path to folder containing documents\n\n doc_name: str\n Filename for document\n \"\"\"\n\n fp = open(os.path.join(top_dir, file))\n document = fp.read().lower() # read the entire document, as one big string\n \n #soup = BeautifulSoup(document, \"lxml\")\n #document = soup.get_text(separator='\\n').split(' ')\n document = gensim.utils.simple_preprocess(' '.join(document))\n document = [word for word in document if len(word) > 4]\n\n fp.close()\n return document\n\ndef get_model():\n \"\"\" Gets a loaded topic model. \"\"\"\n current_dir = os.getcwd()\n model = load_topic_model(current_dir + TOPIC_MODEL_FILE)\n model.prefix = current_dir + MODEL_FILES\n return model\n\ndef load_topic_model(file_name):\n \"\"\" Attempts to load a topic model from disk.\n\n Parameters\n ----------\n file_name: str\n Name of the file that holds the topic model\n \"\"\"\n try :\n return gensim.utils.SaveLoad.load(file_name)\n except IOError as err:\n print(err)\n except:\n print(\"Something went wrong:\", sys.exc_info()[0])\n\ndef load_dictionary(file_name):\n \"\"\" Attempts to load a gensim dictionary from disk.\n\n Parameters\n ----------\n file_name: str\n Name of the file that holds the dictionary\n \"\"\"\n\n try:\n return gensim.utils.SaveLoad.load(file_name)\n except IOError as err:\n print(err)\n except:\n print(\"Something went wrong:\", sys.exc_info()[0])\n\n\n","sub_path":"ExpertSearch/data/expertsearch/miner.py","file_name":"miner.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"491809367","text":"#! /usr/bin/env python\n\"\"\"\n Name:\n robot_name.py\n Purpose:\n exercism, python track, robot_name\n Written by:\n Z Knight, 2019.10.09\n\"\"\"\nimport string\nimport random\n\nclass Robot(object):\n # class level list to store all robot names\n name_store = []\n\n def __init__(self):\n self.reset()\n\n def _randUPPER(self):\n return random.choice(string.ascii_uppercase)\n\n def _randint(self):\n return str(random.randint(0,9))\n\n def reset(self):\n unique_name = False\n while not unique_name:\n name = \"\"\n name += self._randUPPER()\n name += self._randUPPER()\n name += self._randint()\n name += self._randint()\n name += self._randint()\n \n # is this name taken?\n if name not in Robot.name_store:\n Robot.name_store.append(name)\n unique_name = True\n self.name = name\n\n","sub_path":"python/robot-name/robot_name.py","file_name":"robot_name.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"372197711","text":"import os\nimport docker\nfrom dock.util import split_repo_img_name_tag, join_repo_img_name_tag, get_baseimage_from_dockerfile, \\\n join_repo_img_name, join_img_name_tag, wait_for_command, clone_git_repo, LazyGit, figure_out_dockerfile\nfrom tests.constants import DOCKERFILE_GIT, INPUT_IMAGE\n\n\nTEST_DATA = [\n (\"repository.com/image-name\", (\"repository.com\", \"image-name\", \"\")),\n (\"repository.com/prefix/image-name:1\", (\"repository.com\", \"prefix/image-name\", \"1\")),\n (\"image-name\", (\"\", \"image-name\", \"\")),\n (\"registry:5000/image-name:latest\", (\"registry:5000\", \"image-name\", 'latest')),\n (\"fedora:20\", (\"\", \"fedora\", \"20\")),\n]\n\n\nTEST_DATA_IMG_TAG = [\n (\"image-name\", (\"image-name\", \"\")),\n (\"prefix/image-name:1\", (\"prefix/image-name\", \"1\")),\n (\"fedora:20\", (\"fedora\", \"20\")),\n]\n\n\nTEST_DATA_REG_IMG = [\n (\"repository.com/image-name\", (\"repository.com\", \"image-name\")),\n (\"repository.com/prefix/image-name\", (\"repository.com\", \"prefix/image-name\")),\n (\"image-name\", (\"\", \"image-name\")),\n (\"registry:5000/image-name\", (\"registry:5000\", \"image-name\")),\n]\n\n\ndef test_split_image_repo_name():\n global TEST_DATA\n for chain, chunks in TEST_DATA:\n result = split_repo_img_name_tag(chain)\n assert result == chunks\n\n\ndef test_join_repo_img_name_tag():\n global TEST_DATA\n for chain, chunks in TEST_DATA:\n result = join_repo_img_name_tag(*chunks)\n assert result == chain\n\n\ndef test_join_reg_img():\n global TEST_DATA_REG_IMG\n for chain, chunks in TEST_DATA_REG_IMG:\n result = join_repo_img_name(*chunks)\n assert result == chain\n\n\ndef test_join_img_tag():\n global TEST_DATA_IMG_TAG\n for chain, chunks in TEST_DATA_IMG_TAG:\n result = join_img_name_tag(*chunks)\n assert result == chain\n\n\ndef test_wait_for_command():\n d = docker.Client()\n logs_gen = d.pull(INPUT_IMAGE, stream=True)\n assert wait_for_command(logs_gen) is not None\n\n\ndef test_clone_git_repo(tmpdir):\n tmpdir_path = str(tmpdir.realpath())\n clone_git_repo(DOCKERFILE_GIT, tmpdir_path)\n assert os.path.isdir(os.path.join(tmpdir_path, '.git'))\n\n\ndef test_get_baseimg_from_df(tmpdir):\n tmpdir_path = str(tmpdir.realpath())\n clone_git_repo(DOCKERFILE_GIT, tmpdir_path)\n base_img = get_baseimage_from_dockerfile(tmpdir_path)\n assert base_img.startswith('fedora')\n\n\ndef test_figure_out_dockerfile(tmpdir):\n tmpdir_path = str(tmpdir.realpath())\n clone_git_repo(DOCKERFILE_GIT, tmpdir_path)\n path, dir = figure_out_dockerfile(tmpdir_path)\n assert os.path.isfile(path)\n assert os.path.isdir(dir)\n\n\ndef test_lazy_git():\n lazy_git = LazyGit(git_url=DOCKERFILE_GIT)\n with lazy_git:\n assert lazy_git.git_path is not None\n\n\ndef test_lazy_git_with_tmpdir(tmpdir):\n t = str(tmpdir.realpath())\n lazy_git = LazyGit(git_url=DOCKERFILE_GIT, tmpdir=t)\n assert lazy_git._tmpdir == t\n assert lazy_git.git_path is not None\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"611435487","text":"from os import path\r\nimport pygame\r\nimport json\r\nimport tkinter as tk\r\nimport shutil\r\nfrom tkinter.filedialog import askopenfile\r\n\r\nfrom pygame.image import load\r\npygame.font.init()\r\n\r\n\r\n\r\npathToSave = \"\"\r\nactiveIndex = 0\r\n\r\n\r\ndef importFile(oPath,type,quit):\r\n \"\"\"Imports the file from the given path to the given new path \\n\r\n Keyword arguments: \\n\r\n oPath -- original path of the file given as a raw string \\n\r\n nPath -- new path of the file given as a raw string \\n\r\n \"\"\"\r\n global pathToSave\r\n nPath = f\"data/graphics/{type}.png\"\r\n rOPath = r\"{}\".format(oPath)\r\n shutil.move(rOPath,nPath)\r\n quit.destroy()\r\n \r\n pathToSave = nPath\r\ndef getName(path):\r\n path = path.replace(\"data/graphics/\",'')\r\n path = path.replace(\".png\",'')\r\n return path\r\ndef formatPath(path):\r\n \"\"\"Formats a path gotten from the open_file() function so that it can be used in the importFile() function \\n\r\n Keyword arguments: \\n\r\n path -- the string to be formatted\r\n \"\"\"\r\n temp = str(path)\r\n temp = temp.replace(\"<_io.TextIOWrapper name='\",'')\r\n temp = temp.replace(\"' mode='r' encoding='cp1252'>\",'')\r\n return temp\r\ndef getFile():\r\n global pathToSave\r\n file_path = askopenfile(mode='r',filetypes=[('Image Files', '*png')])\r\n file_path = formatPath(file_path)\r\n pathToSave = file_path\r\n\r\nws = tk.Tk()\r\ntk.Label(ws,text=\"type\").grid(row = 0)\r\ntk.Label(ws,text=\"file\").grid(row=1)\r\ntt = tk.Entry(ws)\r\ntt.grid(row = 0,column= 1)\r\nfile = tk.Button(ws,\r\n text=\"import\",\r\n command=lambda: getFile())\r\nfile.grid(row=1,column=1)\r\ndone = tk.Button(ws,\r\n text='add',\r\n command=lambda:importFile(pathToSave,tt.get(),ws)).grid(row=2)\r\n\r\nclass Button:\r\n def __init__(self,x,y,texture,text,buttonData):\r\n self.x = x \r\n self.y = y \r\n self.text = text\r\n self.selectTexture = texture\r\n\r\n self.buttonData = buttonData\r\n\r\n self.font = pygame.font.Font(\"data/graphics/ROADSTORE Dafont.ttf\", 16)\r\n\r\n self.textToRender = self.font.render(text,True,(0,0,0),(255,253,208))\r\n self.textRect = self.textToRender.get_rect()\r\n\r\n print(texture)\r\n self.image = pygame.image.load(texture)\r\n self.imageRect = self.image.get_rect() \r\n\r\n if(self.textRect.width > self.imageRect.width):\r\n self.masterSurface = pygame.Surface((self.textRect.width+5,self.imageRect.height+self.textRect.height+1))\r\n else:\r\n self.masterSurface = pygame.Surface((self.imageRect.width+5, self.imageRect.height+self.textRect.height+1))\r\n\r\n self.imageRect.midtop = (self.masterSurface.get_width()//2,0)\r\n self.textRect.midtop = (self.masterSurface.get_width()//2,self.imageRect.bottom)\r\n\r\n self.masterSurface.fill((255,253,208))\r\n\r\n self.masterRect = self.masterSurface.get_rect(topleft=(x,y))\r\n \r\n def render(self,viewport):\r\n self.masterSurface.blit(self.image,self.imageRect)\r\n self.masterSurface.blit(self.textToRender,self.textRect)\r\n viewport.blit(self.masterSurface,self.masterRect)\r\n \r\n def selectTile(self):\r\n for i in range(len(self.buttonData[\"text\"])):\r\n if self.text == self.buttonData[\"text\"][i]:\r\n return i \r\n\r\n\r\n\r\n def onClick(self,position):\r\n \r\n if self.masterRect.collidepoint(position):\r\n return True\r\n \r\n \r\n\r\n\r\n def getWidth(self):\r\n return self.masterSurface.get_width()\r\n\r\n def getHeight(self):\r\n return self.masterSurface.get_height()\r\n\r\n\r\n\r\nclass Panel:\r\n\r\n SIZE_X=400\r\n SIZE_Y=640\r\n\r\n BOTTOM_X=400+640\r\n BOTTOM_Y=200\r\n\r\n def __init__(self):\r\n self.area = pygame.Surface((Panel.SIZE_X,Panel.SIZE_Y))\r\n self.areaRect = self.area.get_rect(topleft = (640,0))\r\n self.area.fill((255,253,208))\r\n \r\n self.buttonX = 10\r\n self.buttonY = 0\r\n\r\n self.buttonArray = []\r\n\r\n self.bottomArea = pygame.Surface((Panel.BOTTOM_X,Panel.BOTTOM_Y))\r\n self.bottomAreaRect = self.bottomArea.get_rect(topleft=(0,Panel.SIZE_Y))\r\n self.bottomArea.fill((255,253,208))\r\n\r\n def addButtons(self,buttonData):\r\n self.buttonX = 0\r\n self.buttonY = 0\r\n self.indexToPass = 0\r\n self.buttonArray = []\r\n for i in range(len(buttonData[\"text\"])):\r\n self.buttonArray.append(Button(self.buttonX + 640,self.buttonY,buttonData[\"path\"][i],buttonData[\"text\"][i],buttonData))\r\n \r\n self.buttonX += self.buttonArray[len(self.buttonArray)-1].getWidth()*2\r\n if self.buttonX + self.buttonArray[len(self.buttonArray)-1].getWidth()*2 >= 400:\r\n self.buttonX = 0\r\n self.buttonY += self.buttonArray[len(self.buttonArray)-1].getHeight() * 2\r\n\r\n def addAButton(self):\r\n global pathToSave\r\n ws.mainloop()\r\n\r\n dict = {}\r\n with open(\"types.json\",'r') as types:\r\n dict = json.load(types)\r\n dict[\"text\"].append(getName(pathToSave))\r\n dict[\"path\"].append(pathToSave)\r\n with open(\"types.json\",'w') as types:\r\n json.dump(dict,types)\r\n self.load(\"types.json\")\r\n \r\n def load(self,file):\r\n with open(file) as toLoad:\r\n dict = json.load(toLoad)\r\n self.addButtons(dict)\r\n \r\n \r\n def onClick(self,position):\r\n\r\n for b in range(len(self.buttonArray)):\r\n if self.areaRect.collidepoint(position):\r\n if self.buttonArray[b].onClick(position):\r\n self.indexToPass = self.buttonArray[b].selectTile()\r\n break\r\n return self.indexToPass\r\n \r\n \r\n def renderButtons(self,viewport):\r\n for b in range(len(self.buttonArray)):\r\n self.buttonArray[b].render(viewport)\r\n \r\n \r\n\r\n def render(self,viewport):\r\n viewport.blit(self.bottomArea,self.bottomAreaRect)\r\n viewport.blit(self.area,self.areaRect)\r\n self.renderButtons(viewport)\r\n\r\n \r\n\r\n \r\n","sub_path":"MapEditor/improvedGui.py","file_name":"improvedGui.py","file_ext":"py","file_size_in_byte":6048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"146956930","text":"from flask import Flask, render_template, request, redirect, url_for, g, send_from_directory, flash\nfrom flask_login import LoginManager, current_user, login_user, login_required, logout_user\nfrom flask_sqlalchemy import SQLAlchemy # pip3 install flask-sqlalchemy\nfrom werkzeug.urls import url_parse\nfrom datetime import datetime\n# librerias para crear las categorias\nfrom werkzeug.utils import secure_filename\nimport os\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '\\xd5\\xeb\\xb16\\x1e79\\xd6[\\xcb\\x9fBX\\xc0x\\xa3K~d\\x9d\\x02\\xdc\\xc2FX\\x9a\\xe5)\\xc4\\n\\x97Q\\xef\\xba\\x07\\x82n\\x0b\\x1a\\xa7'\napp.config['SQLALCHEMY_DATABASE_URI'] = \"mysql+mysqlconnector://kevinguzman:kevinguzman@127.0.0.1:3306/subir_archivos\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\nlogin_manager = LoginManager(app)\nlogin_manager.login_view = 'index'\n\ndb = SQLAlchemy(app)\n\nfrom models import *\n\nEXTENSIONES_PERMITIDAS = set([\"png\", \"jpg\", \"gif\", \"jpeg\"])\n\ndef extensiones_permitidas(filename):\n return \".\" in filename and filename.rsplit(\".\", 1)[1] in EXTENSIONES_PERMITIDAS\n\n@app.before_request\ndef before_request_for_user():\n\tg.usuario = current_user\n\n@app.route('/')\ndef index():\n if current_user.is_authenticated:\n return render_template(\"index.html\", categorias=Categoria.obtener_categorias(current_user.id))\n else:\n return render_template(\"index.html\")\n\n@app.route('/inicio-sesion/', methods=['GET', 'POST'])\ndef inicio_sesion():\n if current_user.is_authenticated:\n return redirect(url_for('crear_categoria'))\n if request.method == \"POST\":\n usuario = Usuario.get_by_email(request.form.get(\"correo\"))\n print(\"paso el post\")\n if usuario is not None and usuario.check_password(request.form.get(\"contraseña\")):\n \n login_user(usuario, remember=True)\n\n next_page = request.args.get('next')\n if not next_page or url_parse(next_page).netloc != '':\n categorias = Categoria.obtener_categorias(current_user.id) # no se usa...\n next_page = url_for(\"crear_categoria\")\n # print(\"Bienvenido \", usuario.nombre_usuario)\n flash(\"Bienvenido \" + usuario.nombre_usuario)\n return redirect(next_page)\n flash(\"Usuario inválido.\")\n print(\"usuario: \" + str(usuario))\n return render_template(\"inicio_sesion.html\")\n\n@app.route('/registro/', methods=['GET', 'POST'])\ndef registro():\n if request.method == \"POST\":\n usuario = Usuario.get_by_email(request.form.get(\"correo\"))\n if usuario is not None:\n flash(\"Ya existe una cuenta asociada a este correo.\")\n else:\n nombre = request.form.get(\"nom-usuario\")\n correo = request.form.get(\"correo\")\n contrasenia = request.form.get(\"contraseña\")\n\n try:\n usuario = Usuario(nombre_usuario=nombre, correo=correo, id_rol=None)\n usuario.set_password(contrasenia)\n usuario.save()\n next_page = request.args.get('next', None)\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('inicio_sesion')\n flash(\"el usuario ha sido creado con exito.\")\n return redirect(next_page)\n except:\n flash(\"Error en el servidor, intentelo más tarde.\")\n if current_user.is_authenticated:\n return render_template(\"registro.html\", categorias=Categoria.obtener_categorias(current_user.id))\n else:\n return render_template(\"registro.html\")\n\n@app.route('/crear-categoria/', methods=['GET', 'POST'])\ndef crear_categoria():\n if request.method == \"POST\":\n if request.form.get(\"nombre_categoria\"):\n nom_categoria = request.form.get(\"nombre_categoria\").upper()\n nombre_categoria = secure_filename(nom_categoria)\n print(nombre_categoria)\n # Crear la carpeta de la categoria:\n if not os.path.exists(\"./static/images/\" + nombre_categoria):\n try:\n categoria = Categoria.verificar_categoria(nombre_categoria)\n if categoria is not None:\n flash(\"Ya tienes una categoria con el mismo nombre.\")\n else:\n categoria = Categoria(nombre=nombre_categoria.upper(), id_usuario=current_user.id)\n categoria.save()\n os.mkdir(\"./static/images/\" + nombre_categoria)\n categorias = Categoria.obtener_categorias(current_user.id)\n flash(\"Categoria creada con exito.\")\n # return redirec(url_for(\"subir_archivo\"))\n return render_template(\"crear_categoria.html\", categorias=Categoria.obtener_categorias(current_user.id))\n\n except:\n flash(\"Ha habido un error 500 en el servidor, vuelva e intentarlo más tarde.\")\n else:\n categoria = Categoria.verificar_categoria(nombre_categoria)\n if categoria is not None:\n flash(\"Ya tienes una categoria con el mismo nombre.\")\n else:\n categoria = Categoria(nombre=nombre_categoria.upper(), id_usuario=current_user.id)\n categoria.save()\n flash(\"Categoria creada con exito.\")\n else:\n flash(\"Ingrese nombre de la categoria.\")\n return render_template(\"crear_categoria.html\", categorias=Categoria.obtener_categorias(current_user.id))\n\n@app.route('/subir-archivos/', methods=['GET', 'POST'])\ndef subir_archivo():\n # categorias = Categoria.obtener_categorias(current_user.id)\n if request.method == \"POST\":\n nombre_categoria = request.form.get(\"opciones\")\n # if Categoria.query.filter_by(nombre=nombre_categoria).first():\n categoria = Categoria.query.filter_by(nombre=nombre_categoria, id_usuario=current_user.id).first()\n print(categoria.nombre)\n UPLOAD_FOLDER = os.path.abspath(\"./static/images/\" + nombre_categoria)\n app.config[\"UPLOAD_FOLDER\"] = UPLOAD_FOLDER\n if \"cargar_archivo\" not in request.files:\n flash(\"El formulario no tiene la parte que corresponde al archivo.\")\n f = request.files[\"cargar_archivo\"]\n if f.filename == \"\":\n flash(\"No ha seleccionado un archivo.\")\n \"\"\"\n LAS IMAGENES YA ESTAN GUARDADAS EN LA CARPETA DE SU CATEGORIA CORRESPONDIENTE,\n HACE FALTA GUARDARLAS EN LA CATEGORIA CORRESPONDIENTE EN LA BASE DE DATOS.\n \"\"\"\n if f and extensiones_permitidas(f.filename):\n filename = secure_filename(f.filename)\n\n if request.form.get(\"renombrar_archivo\"):\n filename = request.form.get(\"renombrar_archivo\")\n filename = secure_filename(filename)\n\n f.save(os.path.join(app.config[\"UPLOAD_FOLDER\"], filename))\n flash(\"archivo guardado correctamente en la categoria: \" + nombre_categoria)\n \n fecha = datetime.now()\n fecha = fecha.strftime(\"%Y-%m-%d\")\n imagen = Imagen(nombre=filename, descripcion=request.form.get(\"descripcion_imagen\"), \\\n fecha=fecha, id_categoria=categoria.id)\n imagen.save()\n flash(\"archivo guardado correctamente\")\n return render_template(\"subir_archivo.html\", categorias=Categoria.obtener_categorias(current_user.id))\n else:\n flash(\"El archivo tiene una extensión no permitida.\")\n return render_template(\"subir_archivo.html\", categorias=Categoria.obtener_categorias(current_user.id))\n\n# @app.route('/mis-archivos/')\n@app.route('/mis-archivos//')\ndef mis_archivos(categoria=None):\n if categoria:\n print(\"antes categoria\")\n categoria = Categoria.query.filter_by(nombre=categoria, id_usuario=current_user.id).first()\n # categoria = db.session.query(Categoria).filter(Categoria.nombre==categoria, Categoria.id_usuario==current_user.id).first()\n # categoria = Categoria.query.filter_by(nombre=categoria).filter(Categoria.id_usuario==current_user.id)\n\n\n print(categoria)\n\n # categorias = Categoria.obtener_categorias(current_user.id).join(imagen, imagen.id_categoria==categoria.id).all()\n categoria_imagen = db.session.query(Imagen).join(Categoria).join(Usuario).filter(Usuario.id==current_user.id). \\\n filter(Imagen.id_categoria==categoria.id)\n lista_imagenes = []\n for cat in categoria_imagen:\n lista_imagenes.append(cat)\n \n for l in lista_imagenes:\n print(l)\n print(l.nombre)\n print(l.fecha)\n\n\n context = {\"categorias\": Categoria.obtener_categorias(current_user.id),\n \"categoria\": categoria.nombre,\n \"imagenes\": lista_imagenes}\n\n return render_template(\"mis_archivos.html\", **context)\n # return render_template(\"mis_archivos.html\", categorias=Categoria.obtener_categorias(current_user.id))\n \n\n # imagenes = Imagen.query.join(categoria).join(usuario).add_columns(usuario.id, usuario.nombre_usuario, categoria.id, categoria.nombre, \n # imagen.id, imagen.nombre).filter_by(usuario.id=current_user.id).filter_by(categoria.id=categoria.id).paginate(page, 1, False)\n # imagenes = Post.query.join(followers, (followers.c.followed_id == Post.user_id))\n # imagenes = Imagen.query.join(categoria, (Imagen.id_categoria == categoria.id))\n # imagenes = Imagen.query.join(categoria).join(usuario).filter_by(categoria.id=categoria.id, usuario.id=current_user.id).all()\n # game = Game.query.join(Round).join(League, Round.league_id == League.id).filter(Game.utc_time < datetime.utcnow(),League.id == league.id \\\n # ).order_by(Game.utc_time.desc()).first() \n # imagenes = Imagen.query.join(Categoria).join(Usuario).filter(Imagen.id_categoria==Categoria.id, Categoria.id_usuario == current_user.id \\\n # ).all()\n # return imagenes\n return render_template(\"mis_archivos.html\", categorias=Categoria.obtener_categorias(current_user.id))\n\n@app.route('/mis-archivos///')\ndef ver_imagen(categoria, nombre_archivo):\n UPLOAD_FOLDER = os.path.abspath(\"./static/images/\" + categoria)\n app.config[\"UPLOAD_FOLDER\"] = UPLOAD_FOLDER\n return send_from_directory(app.config[\"UPLOAD_FOLDER\"], nombre_archivo)\n\n@app.route('///')\ndef borrar_imagen(categoria, imagen_nombre):\n print(categoria + \" \" + imagen_nombre)\n categoria = Categoria.query.filter_by(nombre=categoria, id_usuario=current_user.id).first()\n # print(str(categoria.id))\n nombre_imagen = Imagen.query.filter_by(nombre=imagen_nombre, id_categoria=categoria.id).first()\n print(str(nombre_imagen.id))\n db.session.delete(nombre_imagen)\n db.session.commit()\n return redirect(url_for('mis_archivos', categoria=categoria.nombre))\n\n# CARGAR AL USUARIO:\n@login_manager.user_loader\ndef load_user(user_id):\n\treturn Usuario.get_by_id(int(user_id))\n# ---\n\n# LOGOUT:\n@app.route(\"/salir\")\n@login_required\ndef salir():\n\tusuario = current_user.nombre_usuario\n\tlogout_user()\n\t# flash(\"Come back soon, \" + user)\n\tprint(\"Vuelve pronto, \" + usuario)\n\treturn redirect(url_for('index'))\n# ---\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"534676785","text":"import datetime\nimport xlsxwriter\n# Create a workbook and add a worksheet.\nfileName = input(\"파일명을 입력하세요(확장자포함) ::\")\nworkbook = xlsxwriter.Workbook(fileName+'_RESULT.xlsx')\nworksheet = workbook.add_worksheet()\nworksheet.write_row(0,0,['카드번호','트랜잭션ID','사용자구분코드','출발ID','출발_호선','출발수단','출발시간','도착ID','도착_호선','도착수단','도착시간','환승시간(초)'])\nrow = 1\ncol = 0\n\n# Start from the first cell. Rows and columns are zero indexed.\ndef get_second(first, last):\n try:\n first_time = datetime.datetime(int(first[0:4]), int(first[4:6]), int(first[6:8]), int(first[8:10]), int(first[10:12]), int(first[12:]))\n last_time = datetime.datetime(int(last[0:4]), int(last[4:6]), int(last[6:8]), int(last[8:10]), int(last[10:12]), int(last[12:]))\n td = last_time - first_time\n return td.seconds\n except:\n return \"안찍고내림\"\n\nf = open(fileName,'r',encoding='utf8').readlines()\nf2 = open(fileName+'_RESULT.csv','w')\nf2.writelines(['카드번호',',','트랜잭션ID',',','사용자구분코드',',','출발ID',',','출발_호선',',','출발수단',',','출발시간',',','도착ID',',','도착_호선',',','도착수단',',','도착시간',',','환승시간(초)',',','\\n'])\n#번호,일련번호,카드번호,승차일시,트랜잭션ID,교통수단코드,환승횟수,표준노선ID,교통사업자ID,표준차량ID,사용자구분코드,운행출발일시,표준승차정류장ID,하차일시,표준하차정류장ID,이용객수_다인승,승차금액,하차금액,임시필드\nlines = f\noldCardNo = ''\noldTranjectionId = 0\noldLineIdx = 1\nisFirst = True\nfor line in lines[1:]:\n datas = line.split(',')\n #\n if isFirst:\n oldCardNo = datas[2]\n oldTranjectionId = datas[4]\n isFirst = False\n else:\n newCardNo = datas[2]\n newTranjectionId = datas[4]\n newLineIdx = oldLineIdx + 1\n if oldCardNo == newCardNo and oldTranjectionId == newTranjectionId: #환승\n lateIdx = 0\n firstIdx = 0\n print(\"{}줄과 {}줄 환승\".format(oldLineIdx,newLineIdx))\n #print(\"\\t old는 {} 그리고 new는 {} \".format(str(lines[oldLineIdx].split(',')[3]),str(lines[newLineIdx].split(',')[3])))\n if lines[oldLineIdx].split(',')[3] > lines[newLineIdx].split(',')[3]:\n #print(\"나중에 내린거 {}\".format(lines[oldLineIdx].split(',')[3]))\n firstIdx = newLineIdx\n lateIdx = oldLineIdx\n else:\n #print(\"나중에 내린거 {}\".format(lines[newLineIdx].split(',')[3]))\n firstIdx = oldLineIdx\n lateIdx = newLineIdx\n\n #출발수단코드 계산\n if len(str(lines[firstIdx].split(',')[14]).strip()) <= 4: #지하철코드\n startCode = '0'\n else:#버스코드\n startCode = '1'\n #도착수단코드계산\n if len(str(lines[lateIdx].split(',')[12]).strip()) <= 4: #지하철코드\n endCode = '0'\n else:#버스코드\n endCode = '1'\n print(lines[0].split(','))\n print(lines[firstIdx].split(','))\n print(lines[lateIdx].split(','))\n\n #print(\"카드번호 : {} \\n 트랜잭션ID : {} \\n 사용자구분코드 : {} \\n 출발ID : {} \\n 출발호선 : {} \\n 출발수단 : {} \\n 도착 ID : {} \\n 도착호선 : {} \\n 도착수단 : {} \\n 환승시간(초) : {}\"\n # .format( oldCardNo, oldTranjectionId, datas[10], lines[firstIdx].split(',')[14], lines[firstIdx].split(',')[14],\n # startCode, lines[lateIdx].split(',')[12],lines[lateIdx].split(',')[12],endCode,get_second(lines[firstIdx].split(',')[13],lines[lateIdx].split(',')[3])))\n worksheet.write_row(row,col, [oldCardNo, oldTranjectionId, datas[10], lines[firstIdx].split(',')[14], lines[firstIdx].split(',')[14],startCode, lines[firstIdx].split(',')[13],lines[lateIdx].split(',')[12],lines[lateIdx].split(',')[12],endCode,lines[lateIdx].split(',')[3],get_second(lines[firstIdx].split(',')[13],lines[lateIdx].split(',')[3])])\n f2.writelines([oldCardNo, ',',oldTranjectionId,',',datas[10], ',',lines[firstIdx].split(',')[14], ',',lines[firstIdx].split(',')[14],',',startCode, ',',lines[firstIdx].split(',')[13],',',lines[lateIdx].split(',')[12],',',lines[lateIdx].split(',')[12],',',endCode, ',',lines[lateIdx].split(',')[3],',',str ( get_second(lines[firstIdx].split(',')[13],lines[lateIdx].split(',')[3]) ),',','\\n'])\n row+=1\n oldCardNo = newCardNo\n oldTranjectionId = newTranjectionId\n oldLineIdx += 1\nworkbook.close()\nf2.close()\n# 데이터셋 보는곳\n\"\"\"\nfor line in lines:\n datas = line.split(',')\n print(datas)\n\"\"\"\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"17284868","text":"import numpy as np\r\n\r\n\r\ndef sigmoid(z):\r\n s = 1 / (1 + np.exp(-z))\r\n return s\r\n\r\n\r\ndef identity(z):\r\n return z\r\n\r\n\r\ndef initialize_with_zeros(dim):\r\n w = np.zeros(shape=dim)\r\n b = np.zeros(shape=(dim[0], 1))\r\n # assert (w.shape == dim)\r\n # assert (isinstance(b, float) or isinstance(b, int))\r\n return w, b\r\n\r\n\r\ndef logLikehood_cost_grad(m, Y, A, X):\r\n cost = (- 1 / m) * np.sum(Y * np.log(A) + (1 - Y) * (np.log(1 - A))) # compute cost\r\n dw = (1 / m) * np.dot(X, (A - Y).T).T\r\n db = (1 / m) * np.sum(A - Y).T\r\n return cost, dw, db\r\n\r\n\r\ndef optimize_sgd(model, X, Y, num_iterations, learning_rate, print_cost=False, epsilion=0.0001):\r\n \"\"\"\r\n This function optimizes w and b by running a gradient descent algorithm\r\n\r\n Arguments:\r\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\r\n b -- bias, a scalar\r\n X -- data of shape (num_px * num_px * 3, number of examples)\r\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)\r\n num_iterations -- number of iterations of the optimization loop\r\n learning_rate -- learning rate of the gradient descent update rule\r\n print_cost -- True to print the loss every 100 steps\r\n\r\n Returns:\r\n params -- dictionary containing the weights w and bias b\r\n grads -- dictionary containing the gradients of the weights and bias with respect to the cost function\r\n costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.\r\n\r\n Tips:\r\n You basically need to write down two steps and iterate through them:\r\n 1) Calculate the cost and the gradient for the current parameters. Use propagate().\r\n 2) Update the parameters using gradient descent rule for w and b.\r\n \"\"\"\r\n\r\n costs = []\r\n\r\n for i in range(num_iterations):\r\n\r\n grads, cost = model.propagate(X, Y)\r\n\r\n # Retrieve derivatives from grads\r\n dw = grads[\"dw\"]\r\n db = grads[\"db\"]\r\n\r\n model.w = model.w - learning_rate * dw # need to broadcast\r\n model.b = model.b - learning_rate * db\r\n\r\n costs.append(cost)\r\n\r\n # Print the cost every 100 training examples\r\n if print_cost and i % 100 == 0:\r\n print(\"Cost after iteration %i: %f\" % (i, cost))\r\n\r\n grads = {\"dw\": dw,\r\n \"db\": db}\r\n\r\n return grads, costs\r\n\r\n\r\nclass OneLayer:\r\n def __init__(self, number_of_neurons, number_of_outputs=1, act_func=identity, init_func=initialize_with_zeros,\r\n cost_func=logLikehood_cost_grad):\r\n self.w, self.b = init_func((number_of_outputs, number_of_neurons))\r\n self.number_of_neurons = number_of_neurons\r\n self.number_of_outputs = number_of_outputs\r\n self.act_func = act_func\r\n self.cost_func = cost_func\r\n\r\n if number_of_outputs == 1:\r\n self.classes = 2\r\n else:\r\n self.classes = number_of_outputs\r\n\r\n def re_init(self, init_func):\r\n self.w, self.b = init_func((self.number_of_outputs, self.number_of_neurons))\r\n\r\n def propagate(self, X, Y, type_of_y='0'):\r\n \"\"\"\r\n Implement the cost function and its gradient for the propagation explained above\r\n\r\n Arguments:\r\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\r\n b -- bias, a scalar\r\n X -- data of size (num_px * num_px * 3, number of examples)\r\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)\r\n\r\n Return:\r\n cost -- negative log-likelihood cost for logistic regression\r\n dw -- gradient of the loss with respect to w, thus same shape as w\r\n db -- gradient of the loss with respect to b, thus same shape as b\r\n\r\n Tips:\r\n - Write your code step by step for the propagation\r\n \"\"\"\r\n m = X.shape[1]\r\n\r\n Z = np.dot(self.w, X) + self.b\r\n A = self.act_func(Z)\r\n\r\n cost, dw, db = self.cost_func(m, Y, A, X)\r\n\r\n grads = {\"dw\": dw,\r\n \"db\": db}\r\n\r\n return grads, cost\r\n\r\n def predict(self, X, threshold=0.5, z_value=False):\r\n '''\r\n Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)\r\n\r\n Arguments:\r\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\r\n b -- bias, a scalar\r\n X -- data of size (num_px * num_px * 3, number of examples)\r\n\r\n Returns:\r\n Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X\r\n '''\r\n\r\n # todo should a class start with a one or zero for the first class\r\n # num_of_classes = [i for i in range(self.classes)]\r\n m = X.shape[1]\r\n Y_prediction = np.zeros((1, m))\r\n # w = w.reshape(X.shape[0], 1)\r\n\r\n Z = np.dot(self.w, X) + self.b\r\n\r\n if z_value == True:\r\n return Z\r\n\r\n A = self.act_func(Z)\r\n\r\n if self.classes == 2:\r\n for i in range(A.shape[1]):\r\n # todo check if should be 0 or -1\r\n Y_prediction[0, i] = 1 if A[0, i] > threshold else 0\r\n\r\n else:\r\n for i in range(A.shape[1]):\r\n # todo check this later\r\n Y_prediction[0, i] = np.argmax(A[:, i])\r\n\r\n return Y_prediction\r\n\r\n def train(self, X_train, Y_train, num_iterations=2000, learning_rate=0.5, print_cost=False):\r\n \"\"\"\r\n Builds the logistic regression model by calling the function you've implemented previously\r\n\r\n Arguments:\r\n X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)\r\n Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)\r\n X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)\r\n Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)\r\n num_iterations -- hyperparameter representing the number of iterations to optimize the parameters\r\n learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()\r\n print_cost -- Set to true to print the cost every 100 iterations\r\n\r\n Returns:\r\n d -- dictionary containing information about the model.\r\n \"\"\"\r\n\r\n # Gradient descent (≈ 1 line of code)\r\n grads, costs = optimize_sgd(self, X_train, Y_train, num_iterations, learning_rate, print_cost)\r\n\r\n # Predict test/train set examples (≈ 2 lines of code)\r\n\r\n # Y_prediction_test = self.predict(X_test)\r\n # Y_prediction_train = self.predict(X_train)\r\n\r\n\r\n\r\n # print(\"train accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))\r\n # print(\"test accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))\r\n\r\n d = {\"costs\": costs,\r\n \"w\": self.w,\r\n \"b\": self.b,\r\n \"learning_rate\": learning_rate,\r\n \"num_iterations\": num_iterations}\r\n\r\n return d\r\n\r\n def accuracy(self, X, Y):\r\n prediction = self.predict(X)\r\n accuracy = 100 - np.mean(np.abs(prediction - Y)) * 100\r\n return accuracy\r\n\r\n def test(self, X_test, Y_test):\r\n Y_prediction_test = self.predict(X_test)\r\n accuracy = 100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100\r\n return accuracy\r\n\r\n\r\n# model = OneLayer(2, act_func=sigmoid)\r\n# X, Y = np.array([[1,2], [3,4]]), np.array([[1, 0]])\r\n# model.w = np.array([[1], [2]]).T\r\n# model.b = 2\r\n# grad ,cost = model.propagate(X, Y)\r\n#\r\n# print(cost)\r\n# print(grad)\r\n#\r\n# print(model.predict(X))\r\n#\r\n# grads, costs = optimize_sgd(model, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)\r\n#\r\n# print(\"w = \" + str(model.w))\r\n# print(\"b = \" + str(model.b))\r\n# print(\"dw = \" + str(grads[\"dw\"]))\r\n# print(\"db = \" + str(grads[\"db\"]))\r\n\r\ndef cross_entropy(m, A, Y):\r\n cost = (- 1 / m) * np.sum(Y * np.log(A) + (1 - Y) * (np.log(1 - A))) # compute cost\r\n return cost\r\n\r\n\r\ndef cross_entropy_der(m, A, Y):\r\n return ((-1 * Y) / A) + ((1 - Y) / (1 - A))\r\n\r\n\r\ndef sigmoid_der(A):\r\n return A * (1 - A)\r\n\r\n\r\ndef tanh(z):\r\n return np.tanh(z)\r\n\r\n\r\ndef tanh_der(A):\r\n return 1 - A ** 2\r\n\r\n\r\n\r\ndef relu(z):\r\n A = np.maximum(0, z)\r\n return A\r\n\r\ndef relu_der(A):\r\n Z = np.array(A, copy=True)\r\n der = np.array(A, copy=True)\r\n\r\n der[Z <= 0] = 0\r\n der[Z > 0] = 1\r\n\r\n return der\r\n\r\ndef random_init_zero_bias(n_2, n_1, mult=0.01):\r\n return np.random.randn(n_2, n_1) * 0.01, np.zeros(shape=(n_2, 1))\r\n\r\n\r\ndef determine_der_act_func(func):\r\n if func == sigmoid:\r\n return sigmoid_der\r\n elif func == tanh:\r\n return tanh_der\r\n elif func == relu:\r\n return relu_der\r\n\r\ndef determine_der_cost_func(func):\r\n if func == cross_entropy:\r\n return cross_entropy_der\r\n\r\n\r\nclass MultiLayer:\r\n def __init__(self, number_of_neurons=0, cost_func=cross_entropy):\r\n self.w, self.b = [], []\r\n self.parameters = {}\r\n self.layer_size = []\r\n\r\n self.number_of_input_neurons = number_of_neurons\r\n self.number_of_outputs = 0\r\n\r\n self.act_func = []\r\n self.derivative_act_func = []\r\n\r\n self.cost_func = cost_func\r\n self.cost_func_der = determine_der_cost_func(self.cost_func)\r\n\r\n self.cache = {}\r\n self.prev = []\r\n\r\n def addLayerInput(self, size):\r\n self.number_of_input_neurons = size\r\n self.layer_size.append(size)\r\n\r\n def addHidenLayer(self, size, act_func=sigmoid):\r\n self.layer_size.append(size)\r\n self.act_func.append(act_func)\r\n self.derivative_act_func.append(determine_der_act_func(act_func))\r\n\r\n def addOutputLayer(self, size, act_func=sigmoid):\r\n self.number_of_outputs = size\r\n self.layer_size.append(size)\r\n self.act_func.append(act_func)\r\n self.derivative_act_func.append(determine_der_act_func(act_func))\r\n\r\n def initialize_parameters(self, seed=2, init_func=random_init_zero_bias):\r\n \"\"\"\r\n Argument:\r\n n_x -- size of the input layer\r\n n_h -- size of the hidden layer\r\n n_y -- size of the output layer\r\n\r\n Returns:\r\n params -- python dictionary containing your parameters:\r\n W1 -- weight matrix of shape (n_h, n_x)\r\n b1 -- bias vector of shape (n_h, 1)\r\n W2 -- weight matrix of shape (n_y, n_h)\r\n b2 -- bias vector of shape (n_y, 1)\r\n \"\"\"\r\n\r\n #todo very important check later\r\n\r\n np.random.seed(seed) # we set up a seed so that your output matches ours although the initialization is random.\r\n\r\n # for i in range(len(self.layer_size) - 1):\r\n # out = init_func(self.layer_size[i + 1], self.layer_size[i])\r\n # self.w.append(out[0])\r\n # self.b.append(out[1])\r\n L = len(self.layer_size) # number of layers in the network\r\n\r\n for l in range(1, L):\r\n self.w.append(np.random.randn(self.layer_size[l], self.layer_size[l - 1]) / np.sqrt(\r\n self.layer_size[l - 1])) # *0.01\r\n self.b.append(np.zeros((self.layer_size[l], 1)))\r\n\r\n for i in range(len(self.layer_size)-1):\r\n self.parameters[\"W\" + str(i + 1)] = self.w[i]\r\n self.parameters[\"b\" + str(i + 1)] = self.b[i]\r\n\r\n return self.parameters\r\n\r\n def forward_propagation(self, X):\r\n \"\"\"\r\n Argument:\r\n X -- input data of size (n_x, m)\r\n parameters -- python dictionary containing your parameters (output of initialization function)\r\n\r\n Returns:\r\n A2 -- The sigmoid output of the second activation\r\n cache -- a dictionary containing \"Z1\", \"A1\", \"Z2\" and \"A2\"\r\n \"\"\"\r\n self.prev = []\r\n self.prev.append((1, X))\r\n for i in range(len(self.layer_size) - 1):\r\n Zi = np.dot(self.w[i], self.prev[i][1]) + self.b[i]\r\n Ai = self.act_func[i](Zi)\r\n self.prev.append((Zi, Ai))\r\n\r\n A_last = self.prev[-1][1]\r\n\r\n for i in range(len(self.layer_size) - 1):\r\n self.cache[\"Z\" + str(i + 1)] = self.prev[i + 1][0]\r\n self.cache[\"A\" + str(i + 1)] = self.prev[i + 1][1]\r\n\r\n # todo sould i compute cost in here\r\n\r\n return A_last, self.cache\r\n\r\n def set_cost(self, cost_func):\r\n self.cost_func = cost_func\r\n self.cost_func_der = determine_der_cost_func(cost_func)\r\n\r\n def compute_cost(self, Alast, Y):\r\n m = Alast.shape[1]\r\n return self.cost_func(m, Alast, Y)\r\n\r\n def backward_propagation(self, X, Y):\r\n \"\"\"\r\n Implement the backward propagation using the instructions above.\r\n\r\n Arguments:\r\n parameters -- python dictionary containing our parameters\r\n cache -- a dictionary containing \"Z1\", \"A1\", \"Z2\" and \"A2\".\r\n X -- input data of shape (2, number of examples)\r\n Y -- \"true\" labels vector of shape (1, number of examples)\r\n\r\n Returns:\r\n grads -- python dictionary containing your gradients with respect to different parameters\r\n \"\"\"\r\n m = X.shape[1]\r\n\r\n # just for testing\r\n # temp = []\r\n # if self.prev[0][0] != 1:\r\n # temp.append((1, X))\r\n # for i in range(len(self.prev)):\r\n # temp.append(self.prev[i])\r\n #\r\n # self.prev = temp\r\n\r\n # todo all depends on the type of function in cost and actviation function\r\n grad_list1_w = []\r\n grad_list1_b = []\r\n\r\n Alast = self.prev[-1][1]\r\n final_act = self.derivative_act_func[-1]\r\n dzi = self.cost_func_der(m, Alast, Y) * final_act(Alast)\r\n\r\n if self.cost_func == cross_entropy:\r\n if self.act_func[-1] == sigmoid:\r\n pass\r\n\r\n for i in range(len(self.w), 0, -1):\r\n A = self.prev[i-1][1]\r\n dwi = (1 / m) * np.dot(dzi, self.prev[i-1][1].T)\r\n dbi = (1 / m) * np.sum(dzi, axis=1, keepdims=True)\r\n if i != 1:\r\n der_func = self.derivative_act_func[i - 2]\r\n A = self.prev[i - 1][1]\r\n dzi = np.multiply(np.dot((self.w[i - 1]).T, dzi), der_func(A))\r\n\r\n grad_list1_w.append(dwi)\r\n grad_list1_b.append(dbi)\r\n\r\n # reverse grad list\r\n grad_list_w = []\r\n grad_list_b = []\r\n\r\n for i in range(len(grad_list1_w) - 1, -1, -1):\r\n grad_list_w.append(grad_list1_w[i])\r\n grad_list_b.append(grad_list1_b[i])\r\n\r\n grads = {}\r\n\r\n for i in range(len(grad_list_w)):\r\n grads['dW' + str(i + 1)] = grad_list_w[i]\r\n grads['db' + str(i + 1)] = grad_list_b[i]\r\n\r\n return grads\r\n\r\n def set_cashe(self, cache,X):\r\n self.cache = cache\r\n self.prev = []\r\n self.prev.append((1, X))\r\n for i in range(int(len(cache.keys()) / 2)):\r\n A, Z = cache[\"A\" + str(i + 1)], cache[\"Z\" + str(i + 1)]\r\n self.prev.append((Z, A))\r\n\r\n def set_parameters(self, para):\r\n self.parameters = para\r\n self.w = []\r\n self.b = []\r\n for i in range(int(len(para.keys()) / 2)):\r\n W, b = para[\"W\" + str(i + 1)], para[\"b\" + str(i + 1)]\r\n self.w.append(W)\r\n self.b.append(b)\r\n\r\n def set_parameters_internal(self):\r\n self.parameters = {}\r\n for i in range(len(self.w)):\r\n self.parameters[\"W\"+str(i+1)] = self.w[i]\r\n self.parameters[\"b\" + str(i + 1)] = self.b[i]\r\n\r\n def update_parameters(self,grads, learning_rate=1.2):\r\n \"\"\"\r\n Updates parameters using the gradient descent update rule given above\r\n\r\n Arguments:\r\n parameters -- python dictionary containing your parameters\r\n grads -- python dictionary containing your gradients\r\n\r\n Returns:\r\n parameters -- python dictionary containing your updated parameters\r\n \"\"\"\r\n # Retrieve each parameter from the dictionary \"parameters\"\r\n\r\n\r\n # Retrieve each gradient from the dictionary \"grads\"\r\n ### START CODE HERE ### (≈ 4 lines of code)\r\n\r\n for i in range(len(self.w)):\r\n self.w[i] = self.w[i] - learning_rate * grads[\"dW\"+str(i+1)]\r\n self.b[i] = self.b[i] - learning_rate * grads[\"db\" + str(i+1)]\r\n\r\n\r\n\r\n self.set_parameters_internal()\r\n\r\n return self.parameters\r\n\r\n def train(self,X, Y, num_iterations=10000, print_cost=False, init_func=random_init_zero_bias ,cont=0 ,learning_rate=1):\r\n \"\"\"\r\n Arguments:\r\n X -- dataset of shape (2, number of examples)\r\n Y -- labels of shape (1, number of examples)\r\n n_h -- size of the hidden layer\r\n num_iterations -- Number of iterations in gradient descent loop\r\n print_cost -- if True, print the cost every 1000 iterations\r\n\r\n Returns:\r\n parameters -- parameters learnt by the model. They can then be used to predict.\r\n \"\"\"\r\n\r\n # if cont == 0:\r\n # self.initialize_parameters(init_func=init_func,seed=3)\r\n # print(self.w)\r\n\r\n for i in range(0, num_iterations):\r\n\r\n # Forward propagation. Inputs: \"X, parameters\". Outputs: \"A2, cache\".\r\n Alast, cache = self.forward_propagation(X)\r\n\r\n # Cost function. Inputs: \"A2, Y, parameters\". Outputs: \"cost\".\r\n cost = self.compute_cost(Alast, Y)\r\n\r\n # Backpropagation. Inputs: \"parameters, cache, X, Y\". Outputs: \"grads\".\r\n grads = self.backward_propagation(X, Y)\r\n\r\n # Gradient descent parameter update. Inputs: \"parameters, grads\". Outputs: \"parameters\".\r\n parameters = self.update_parameters(grads,learning_rate=learning_rate)\r\n\r\n if print_cost and i % 100 == 0:\r\n print(\"Cost after iteration %i: %f\" % (i, cost))\r\n\r\n return parameters","sub_path":"num2/frameWork1.py","file_name":"frameWork1.py","file_ext":"py","file_size_in_byte":17989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"45058039","text":"# -*- coding: utf-8 -*-\n# @Time : 2/28/19 10:14 AM\n# @Author : zhoujun\nimport os\nimport pathlib\nimport shutil\nfrom tqdm import tqdm\nimport multiprocessing as mp\nfrom multiprocessing import Pool\n\ndef chunkIt(seq, num):\n \"\"\"\n 将list进行等分\n :param seq: list\n :param num: num\n :return:\n \"\"\"\n len_seq = len(seq)\n avg = len_seq / float(num)\n out = []\n last = 0.0\n\n while last < len_seq:\n out.append(seq[int(last):int(last + avg)])\n last += avg\n return [x for x in out if len(x)]\n\n\n# def copy_list(output_path,txt_path,file_list):\n# with open(txt_path,mode='w',encoding='utf8') as fw:\n# for line in file_list:\n# line = line.strip('\\n').replace('.jpg ', '.jpg\\t').split('\\t')\n# src_path = pathlib.Path(line[0])\n# if not src_path.exists():\n# continue\n# new_path = str(pathlib.Path(output_path) / src_path.parents._parts[-2])\n# if not os.path.exists(new_path):\n# os.makedirs(new_path)\n# le = len(os.listdir(new_path))\n# new_path = os.path.join(new_path, str(le) + src_path.suffix)\n# shutil.copy(str(src_path),new_path)\n# fw.write(new_path + '\\t' + line[1] + '\\n')\n\n\n# def copy_all(output_path,txt_path):\n# num = mp.cpu_count()\n# with open(txt_path,mode='r',encoding='utf8') as fr:\n# lists = chunkIt(fr.readlines(),num)\n# pool = Pool(processes=num)\n# pbar = tqdm(total=num)\n# for idx in range(len(lists)):\n# pool.apply_async(func=copy_list,args=(output_path,\"{}/train_{}.txt\".format(output_path,idx),lists[idx]))\n# pbar.update(1)\n# pbar.close()\n# pool.close()\n# pool.join()\n\n\ndef copy_all(output_path,txt_path):\n with open(output_path + '/train.txt',mode='w',encoding='utf8') as fw:\n with open(txt_path,mode='r',encoding='utf8') as fr:\n i = 0\n for line in tqdm(fr.readlines()):\n line = line.strip('\\n').replace('.jpg ', '.jpg\\t').split('\\t')\n src_path = pathlib.Path(line[0])\n if not os.path.exists(str(src_path)):\n continue\n new_path = str(pathlib.Path(output_path) / src_path.parents._parts[-2])\n if not os.path.exists(new_path):\n os.makedirs(new_path)\n new_path = os.path.join(new_path, str(i) + src_path.suffix)\n shutil.copy(str(src_path),new_path)\n fw.write(new_path + '\\t' + line[1] + '\\n')\n i+=1\n\nif __name__ == '__main__':\n import config\n output_path = '/data1/zj/data/crnn/all_test'\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n s = config.testfile\n copy_all(output_path,s)\n","sub_path":"save.py","file_name":"save.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"489920484","text":"\n\nclass Website:\n\n __SITE = \"{website_name}\"\n\n __WEBSITES = {1: \"official\", 2: \"wikia\", 3: \"wikipedia\", 4: \"facebook\",\n 5: \"twitter\", 6: \"twitch\", 8: \"instagram\", 9: \"youtube\",\n 10: \"iphone\", 11: \"ipad\", 12: \"android\", 13: \"steam\"}\n\n def __init__(self):\n self.category = 0\n self.url = \"\"\n self.name = \"\"\n\n @staticmethod\n def as_website(d):\n w = Website()\n w.__dict__.update(d)\n w.name = Website.__WEBSITES[d[\"category\"]]\n return w\n\n def __str__(self):\n s = Website.__SITE.format(\n website_name=self.name.capitalize(),\n website_url=self.url)\n\n return s\n","sub_path":"igdb/objects/Website.py","file_name":"Website.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"106977183","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 1 09:45:27 2021\r\n\r\n@author: 2210104029 kondo iori\r\n\"\"\"\r\n\r\nimport math\r\nimport itertools\r\nimport copy\r\n\r\n#100以下の素数を求める\r\nS=[]\r\n\r\nfor i in range(2,100):\r\n flag = 0\r\n for j in range(2,i//2+1):\r\n if (i%j==0):\r\n flag=1\r\n break\r\n if flag == 0:\r\n S.append(i)\r\nprint(S)\r\nprint(len(S))\r\n\r\nS_sum=sum(S)\r\nprint(S_sum)\r\n\r\n#集合SAとSBの差の最小値を求める\r\nvalue=0\r\nmin_value=2000\r\nvalue_abs=0\r\nSA=[]\r\nSB=[]\r\nSA_sum=0\r\nSA_fin=[]\r\n\r\nfor i in range(1,len(S)//2+1):\r\n for pair in itertools.combinations(S,i): #Sの要素i個による組み合わせ全パターン取得\r\n SA=copy.copy(list(pair))\r\n SA_sum=sum(SA)\r\n value=2*SA_sum-S_sum #SA-SB=SA-(S-SA)=2SA-S\r\n value_abs=abs(value)\r\n if min_value>value_abs:\r\n min_value=value_abs\r\n SA_fin=copy.copy(SA)\r\n\r\nfor i in S:\r\n flag=0\r\n for j in SA_fin:\r\n if(i==j):\r\n flag=1\r\n break\r\n if flag == 0:\r\n SB.append(i)\r\n \r\nprint(\"SAとSBの差の絶対値の最小値\")\r\nprint(min_value)\r\nprint(\"その時のSAとSB\")\r\nprint(\"SA:\",SA_fin)\r\nprint(\"SB:\",SB)\r\n","sub_path":"0527/2210104029/shinka_7_1_2210104029.py","file_name":"shinka_7_1_2210104029.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"646904758","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport seaborn as sns\nfrom .helpers import *\n\nsns.set(style='darkgrid', font_scale=1.5)\nsns.set_palette('cubehelix')\n\n\ndef draw_chip_usage(raw_data, save_path=None):\n \"\"\"Draw chip usage in vertical bar plot\"\"\"\n data = prepare_data(raw_data, 'name', 'usage', 10)\n draw_bar_plot(data, 'name', 'usage', 'Chip', 'Usage %', False, True, save_path)\n\n\ndef draw_captaincy_stats(raw_data, save_path=None):\n \"\"\"Draw captaincy stats in horizontal bar plot\"\"\"\n data = prepare_data(raw_data, 'name', 'captaincy', 10)\n draw_bar_plot(data, 'captaincy', 'name', 'Captaincy %', 'Name', True, False, save_path)\n\n\ndef draw_ownership_stats(raw_data, save_path=None):\n \"\"\"Draw ownership stats in horizontal bar plot\"\"\"\n data = prepare_data(raw_data, 'name', 'ownership', 10)\n draw_bar_plot(data, 'ownership', 'name', 'Ownership %', 'Name', True, False, save_path)\n\n\ndef draw_effective_ownership_stats(raw_data, save_path=None):\n \"\"\"Draw effective ownership stats in horizontal bar plot\"\"\"\n data = prepare_data(raw_data, 'name', 'effective_ownership', 10)\n draw_bar_plot(data, 'effective_ownership', 'name', 'Effective Ownership %', 'Name', True, False, save_path)\n\n\ndef draw_template_team(template_team, game='FPL', save_path=None):\n \"\"\"Draw template team on subplot with player portraits or team kits\"\"\"\n data = prepare_template_team_data(template_team)\n images = prepare_images(data, game)\n\n fig = plt.figure(figsize=(12, 8))\n\n for index, image in enumerate(images):\n ax = fig.add_subplot(4, 5, image['index'] + 1)\n ax.title.set_text(data[index]['label'])\n plt.axis('off')\n plt.imshow(image['image'])\n\n plt.tight_layout(pad=0.5, w_pad=1, h_pad=1.0)\n\n if save_path is None:\n plt.show()\n else:\n plt.savefig(save_path)\n\n\ndef draw_bar_plot(data, key_name, value_name, xlabel, ylabel, x_percentage=False, y_percentage=False, save_path=None):\n \"\"\"Wrapper function for bar plots.\"\"\"\n plt.figure(figsize=(20, 10))\n\n ax = sns.barplot(x=key_name, y=value_name, data=pd.DataFrame(data))\n ax.set(xlabel=xlabel, ylabel=ylabel)\n\n if y_percentage:\n values = ax.get_yticks()\n ax.set_yticklabels(['{:,.2%}'.format(y) for y in values])\n\n if x_percentage:\n values = ax.get_xticks()\n ax.set_xticklabels(['{:,.2%}'.format(x) for x in values])\n\n if save_path is None:\n plt.show()\n else:\n plt.savefig(save_path)\n","sub_path":"fantasy_premier_league/visualization/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"239913774","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"Calib1\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\n#from PhysicsTools.PatAlgos.tools.coreTools import *\n## Geometry and Detector Conditions (needed for a few patTuple production steps)\nprocess.load(\"Configuration.StandardSequences.Geometry_cff\")\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\n\n## Standard PAT Configuration File\n#process.load(\"PhysicsTools.PatAlgos.patSequences_cff\")\n\n## global tag for data\nprocess.GlobalTag.globaltag = 'GR_R_42_V12::All' ## needed for CMSSW_3_8_0 due to changes in the DB access for JEC ## process.GlobalTag.globaltag = cms.string('GR_R_35X_V8B::All')\n\n\n#from PhysicsTools.PatAlgos.tools.metTools import *\n#removeMCMatching(process, ['All'], outputInProcess = False)\n\n#process.selectedPatElectrons.cut = 'pt > 20. '\n\n#process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(110000) )\n\nprocess.source = cms.Source(\"PoolSource\",\n# eventsToProcess = cms.untracked.VEventRange('1:1-148655:max'),\n fileNames = cms.untracked.vstring( )\n )\n\nprocess.es_ascii = cms.ESSource(\"HcalTextCalibrations\",\n input = cms.VPSet(\n cms.PSet(\n object = cms.string('RespCorrs'),\n file = cms.FileInPath('Work/HFRescaler/data/corrHFfactors2011.txt')\n )\n )\n )\nprocess.prefer(\"es_ascii\")\n\nprocess.hfrecalib=cms.EDProducer('HFRescaler',\n input = cms.InputTag('hfreco'),\n invert = cms.bool(False)\n)\nprocess.load(\"RecoEgamma.EgammaHFProducers.hfEMClusteringSequence_cff\")\n\nprocess.hfEMClusters.hits=cms.InputTag(\"hfrecalib\")\nprocess.hfRecoEcalCandidate.intercept2DCut=0.42\n\nprocess.calib = cms.EDFilter('HFZCalib',\n hfClusterShapes = cms.untracked.InputTag(\"hfEMClusters\"),\n hfRecoEcalCandidate = cms.untracked.InputTag(\"hfRecoEcalCandidate\"),\n hfHits = cms.untracked.InputTag(\"hfrecalib\"),\n selectedPatElectrons = cms.untracked.string('patElectrons')\n)\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string('HFZCalib_from_data.root')\n)\n\n\n#process.p = cms.Path(process.patDefaultSequence*process.calib)\nprocess.p = cms.Path(process.hfrecalib*process.hfEMClusteringSequence*process.calib)\n","sub_path":"HFZCalib/python/hfzcalib_datasingle_phic_cfg.py","file_name":"hfzcalib_datasingle_phic_cfg.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"635435736","text":"import numpy as np\nimport itertools\nimport csv\nimport random\nfrom IPython import embed\n\nnumRW = 4\ndatasetSize = 1000 #per scenario\nmaxTime = 60 #seconds\n#startIncpetionRange = [0,10] #case 1 and 2\nstartIncpetionRange = [5,55] #case 3\n\nktNominal = 0.029\nktFaultDeviation = 0.002\nktSigDig = 4\n\nvbusNominal = 6\nvbusFaultDeviation = 2\nvbusFaultDeviationStep = 0.25\n\ndef generateFaultScenarioDict(numRW=numRW, singleFaults=False):\n\trw = np.arange(numRW)+1\n\tfaultDict = [[0]]\n\tfor i in range(1,len(rw)+1):\n\t\tfaultDict += [list(i) for i in list(itertools.combinations(rw,i))]\n\t\tif singleFaults:\n\t\t\tbreak\n\treturn {idx:i for idx,i in enumerate(faultDict)}\n\ndef randStartTime(startIncpetionRange=startIncpetionRange, N=datasetSize):\n\treturn np.random.randint(*startIncpetionRange, size=N)\n\t#return 5.0*np.ones(datasetSize)\n\ndef randDuration(startTimes, N=datasetSize):\n\t#return np.array([np.random.randint(1,(maxNum+1-timeOffset)-t) for t in startTimes]) #removal of +1 allows min 1 sec duration\n\t#return maxTime*np.ones(len(startTimes))\n\t#return np.array([int(maxTime-t) for t in startTimes]) #case 1\n\t#return np.random.randint(10,20,size=N) #case 2\n\treturn np.array([max(5,np.random.randint(maxTime-t)) for t in startTimes]) #case 3\n\ndef randKtSeverity(ktNominal=ktNominal, ktFaultDeviation=ktFaultDeviation, N=datasetSize):\n\treturn (2*ktFaultDeviation)*np.random.random_sample(N)+(ktNominal-ktFaultDeviation)\n\t#return np.ones(N)*(ktNominal+ktFaultDeviation)\n\ndef randVbusSeverity(vbusNominal=vbusNominal, dev=vbusFaultDeviation, step=vbusFaultDeviationStep, N=datasetSize):\n\treturn np.random.choice(np.arange(vbusNominal-dev,vbusNominal+dev+step,vbusFaultDeviationStep),N)\n\t#return np.ones(N)*(vbusNominal+vbusFaultDeviation)\n\nfaultScenarioDict = generateFaultScenarioDict(singleFaults=True)\ntotalResults = np.array([\"scenario\",\"kt\", \"vbus\", \"ktInception\", \"vbusInception\",\"ktDuration\", \"vbusDuration\", \"ktSeverity\", \"vbusSeverity\"]).T\nfor scenario in faultScenarioDict.keys():\n\tif scenario == 0:\n\t\tnomValues = np.array([[ktNominal,vbusNominal]]*datasetSize).T\n\t\tzeroScenario = np.vstack((np.zeros((totalResults.shape[0]-nomValues.shape[0],datasetSize)),nomValues)).T\n\t\ttotalResults = np.vstack((totalResults,zeroScenario))\n\t\tcontinue\n\tscenarioArr = np.array([scenario]*datasetSize)\n\tktBinary, vbusBinary = np.array([random.choice([[1,1],[1,0],[0,1]]) for i in range(datasetSize)]).T\n\t\n\ttime = randStartTime()\n\tduration = randDuration(time)\n\tvbusFaultStartTime = time #randStartTime()*vbusBinary\n\tvbusFaultDuration = duration #randDuration(vbusFaultStartTime)*vbusBinary\n\tvbusFaultSeverity = randVbusSeverity()*vbusBinary\n\n\tktFaultStartTime = time #randStartTime()*ktBinary\n\tktFaultDuration = duration #randDuration(ktFaultStartTime)*ktBinary\n\tktFaultSeverity = np.around(randKtSeverity()*ktBinary, ktSigDig)\n\n\tvbusFaultSeverity = [vbusNominal if vbusBinary[idx]==0 else v for idx,v in enumerate(vbusFaultSeverity)]\n\tktFaultSeverity = [ktNominal if ktBinary[idx]==0 else kt for idx,kt in enumerate(ktFaultSeverity)]\n\n\t#from IPython import embed; embed()\n\tscenarioResults = np.array([scenarioArr,ktBinary, vbusBinary, ktFaultStartTime, vbusFaultStartTime,ktFaultDuration, vbusFaultDuration, ktFaultSeverity, vbusFaultSeverity]).T\n\ttotalResults = np.vstack((totalResults,scenarioResults))\n\nnums = np.hstack(([\"num\"],np.arange(totalResults.shape[0]-1)))\ntotalResults = np.vstack((nums,totalResults.T)).T\n\nwith open(\"adcs_fdi_inputs_{}_randomSeverity_singleFaults_5to55Inception_randRemainDuration.csv\".format(datasetSize), 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerows(totalResults)\n\ncsvFile.close()\n\n#results = np.array(list(itertools.product(*params)))","sub_path":"generateInputs/generateDataset.py","file_name":"generateDataset.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"281226862","text":"from middlewared.service import private, Service\n\n\nclass PoolService(Service):\n\n @private\n def find_disk_from_topology(self, label, pool):\n check = []\n found = None\n for root, children in pool['topology'].items():\n check.append((root, children))\n\n while check:\n root, children = check.pop()\n for c in children:\n if c['type'] == 'DISK':\n if label in (c['path'].replace('/dev/', ''), c['guid']):\n found = (root, c)\n break\n if c['children']:\n check.append((root, c['children']))\n return found\n","sub_path":"src/middlewared/middlewared/plugins/pool_/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"274190394","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 3 21:01:13 2018\r\n\r\n@author: chenyushao\r\n\"\"\"\r\n\r\nimport sys, getopt\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport time\r\n\r\ndef argmax(mylist):\r\n return mylist.index(max(mylist))\r\ndef argmin(mylist):\r\n return mylist.index(max(mylist))\r\n\r\ndef loadData(fileName):\r\n \"\"\"\r\n global xmax\r\n global y1max\r\n global y2max\r\n global y1maxIndex\r\n global y2maxIndex\r\n \"\"\"\r\n inFile = open(fileName, 'r') #read-only way\r\n \r\n outFile = open(\"testOutFile\", 'w')\r\n \r\n times = []\r\n loss = []\r\n entropy = []\r\n # references for easier and more explicit usage of variables\r\n x = times\r\n y1 = loss\r\n y2 = entropy\r\n \"\"\"\r\n xmax = 0\r\n y1max = 0\r\n y2max = 0\r\n \"\"\"\r\n count = 0\r\n for line in inFile:\r\n if line is '\\n':\r\n continue\r\n \r\n count += 1\r\n trainingSet = line.split(',')\r\n if count == 1:\r\n continue\r\n xtemp = int(trainingSet[0])\r\n x.append(xtemp)\r\n y1temp = float(trainingSet[1])\r\n y1.append(y1temp)\r\n y2temp = float(trainingSet[2])\r\n y2.append(y2temp)\r\n# xmax = xtemp\r\n \r\n # test output\r\n \"\"\"\r\n outFile.write(str(xtemp))\r\n outFile.write(' ')\r\n outFile.write(str(y1temp))\r\n outFile.write(' ')\r\n outFile.write(str(y2temp))\r\n outFile.write('\\n')\r\n \"\"\"\r\n# print(x[len(x) - 1], y1[len(y1) - 1], y2[len(y2) - 1])\r\n \"\"\"\r\n y1num = [float(y1[i]) for i in range(len(y1))]\r\n y1max = max(y1num)\r\n y1maxIndex = argmax(y1num)\r\n y2num = [float(y2[i]) for i in range(len(y2))]\r\n y2max = max(y2num)\r\n y2maxIndex = argmax(y2num)\r\n \"\"\"\r\n \"\"\"\r\n print(y1max)\r\n print(y2max)\r\n \"\"\"\r\n inFile.close()\r\n outFile.close()\r\n return (x, y1, y2)\r\n\r\ndef plotData(x, y, s = 'Loss'):\r\n \r\n plt.figure(figsize=(20,5))\r\n plt.title(\"Conclusion\")\r\n plt.xlabel(\"Times of Self-Plays\")\r\n plt.ylabel(\"Value of Loss and Entropy\")\r\n# plt.xticks(np.linspace(0,xmax,10),(x))\r\n xrange = range(len(x))\r\n# yrange = np.linspace(0,max(y1max, y2max),10)\r\n# plt.yticks(yrange)\r\n plt.plot(xrange, y, '-', label = s)\r\n plt.grid(True)\r\n plt.legend()\r\n \r\n global order\r\n order += 1\r\n savename = time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\r\n plt.savefig(s + savename + str(order) + '.png')\r\n \r\n plt.show()\r\n \r\n \r\n return []\r\n\r\ndef main(argv):\r\n inputfile = ''\r\n outputfile = ''\r\n try:\r\n opts, args = getopt.getopt(argv,\"hi:o:\",[\"ifile=\",\"ofile=\"])\r\n except getopt.GetoptError:\r\n print('test.py -i -o ')\r\n sys.exit(2)\r\n for opt, arg in opts:\r\n if opt == '-h':\r\n print('txt_plot.py -i -o ')\r\n sys.exit()\r\n elif opt in (\"-i\", \"--ifile\"):\r\n inputfile = arg\r\n elif opt in (\"-o\", \"--ofile\"):\r\n outputfile = arg\r\n \r\n global order\r\n order = 0\r\n if inputfile is not '':\r\n (x, y1, y2) = loadData(inputfile)\r\n plotData(x, y1, 'Loss')\r\n plotData(x, y2, 'Entropy')\r\n \r\nif __name__ == \"__main__\":\r\n main(sys.argv[1:])\r\n \r\n \r\n\r\n\r\n","sub_path":"info/txt_plot.py","file_name":"txt_plot.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"555615605","text":"def sumDigits(s):\n \"\"\"\n :param s: A string\n :return: The sum of the deimals digits in S\n For example, if s is 'a2b3c' it returns 5\n Checks for an exception in case user doesn't enter a string\n \"\"\"\n try:\n ord(s[0])\n except TypeError:\n print(\"sumDigits wasn't called with a string input\")\n\n result = 0\n for c in s:\n if '0' <= c <= '9':\n result += ord(c)-48\n return print(\"The sum of the decimal digits in the string is\", result)\n","sub_path":"Week4/Week/Ex7_1_Book.py","file_name":"Ex7_1_Book.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"241587911","text":"import re\nimport operator\nimport time\n\n\ndef time_measure(f):\n def wrapped_with_timer_func(*args, **kwargs):\n t0 = time.perf_counter()\n func_result = f(*args, **kwargs)\n t1 = time.perf_counter()\n return t1 - t0, func_result\n return wrapped_with_timer_func\n\n\ndef gen_file_lines(file_name, number_lines_to_read):\n try:\n for i in range(number_lines_to_read):\n read_line = next(file_name)\n yield read_line\n except StopIteration:\n pass\n\n\nclass LinesContainer:\n n = 500\n\n def __init__(self, file_name):\n self.line_list = [line for line in gen_file_lines(file_name, self.n)]\n #self.line_list = file_name.readlines()\n\n self.chunk_size = len(self.line_list)\n self.is_last_chunk = (self.chunk_size != self.n)\n\n def __str__(self):\n output_str = \"\"\n\n for line in self.line_list:\n output_str += line\n return output_str\n\n\nclass FileStatistic:\n def __init__(self):\n self.links_stat = dict()\n self.bad_strings = list()\n self.bad_string_counter = 0\n #self.pattern = r'.*?(\\[.*?].*?){4}\\[(.*?)\\].*'\n self.pattern = r'.*\\[(/.*?)\\].*'\n self.re_pattern = re.compile(self.pattern)\n\n def check_file_chunk(self, file_chunk):\n for line in file_chunk.line_list:\n obj = self.re_pattern.match(line)\n if obj:\n found_url = obj.group(1)\n if found_url in self.links_stat:\n self.links_stat[found_url] += 1\n else:\n self.links_stat[found_url] = 1\n else:\n self.bad_string_counter += 1\n #self.bad_strings.append(line)\n\n def get_sorted_list(self):\n return sorted(self.links_stat.items(), key=operator.itemgetter(1), reverse=True)\n\n\n@time_measure\ndef parse_log_file(file_name):\n file_statistics = FileStatistic()\n with open(file_name, 'r', encoding=\"utf8\") as log_file:\n read_chunk = LinesContainer(log_file)\n file_statistics.check_file_chunk(read_chunk)\n\n while not read_chunk.is_last_chunk:\n read_chunk = LinesContainer(log_file)\n file_statistics.check_file_chunk(read_chunk)\n\n return file_statistics, file_statistics.get_sorted_list()\n\n\nif __name__ == \"__main__\":\n execution_time, url_stat_dict = parse_log_file(\"logfile.log\")\n for value in url_stat_dict[1][0:5]:\n print(value)\n\n print(\"Number of bad points {}\".format(url_stat_dict[0].bad_string_counter))\n print(\"The function worked {} sec\".format(execution_time))\n\n","sub_path":"src/script006/kzheronkin/parser_log_file.py","file_name":"parser_log_file.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"323456591","text":"# =============================================================================\n# Group Number: 30\n#\n# PROGRAMMER1: Tomas Ortega\n# PANTHER\tID1: 5677483\n\n# PROGRAMMER2: Pablo Mueller\n# \tPANTHER\tID2: 3283876\n\n# \tCLASS: CAP4506\n# \tSECTION: U01\n# \tSEMESTER: Spring 2019\n# \tCLASSTIME: M/W\t6:25-7:45 PM\n\n# \tProject: This program will alow the user to find nash equilibriums and calculate expected payoffs for each player.\n# \tDUE: Sunday, Apruk\t7,\t2019 at midnight.\n\n# \tCERTIFICATION: I certify\tthat\tthis\twork\tis\tmy own\tand\tthat none\tof it is the work of any other\tperson.\n# =============================================================================\n\nimport random\nimport numpy as np \n\nclass NormalForm():\n\n def __init__(self, mode, rows, columns, lower_limit=-99, upper_limit=99):\n \"\"\" Initialize a grid that represents the normal form of a game\n\n Keyword Arguments:\n\n mode: random or manual. If 'random' then we generate values in the range (lower_limit, upper_limit)\n rows: number of rows in the normal form grid (the number of strategies for player 2)\n columns: number of columsn in the normal for grid (the number of strategies for player 1)\n lower_limit: lower limit for the random values for payoffs if the mode is set to random\n upper_limit: upper limit for the random values for payoffs if the mode is set to random\n \"\"\"\n self.rows = rows\n self.columns = columns\n self.mode = mode # except if the mode is not 'random' or 'manual'\n self.lower_limit = lower_limit\n self.upper_limit = upper_limit\n # a list of lists for the rows and columns in the normal form\n self.grid = [[(0, 0) for i in range(self.columns)]\n for j in range(self.rows)]\n self.grid_pure_nash = [[(0, 0) for i in range(self.columns)]\n for j in range(self.rows)]\n # a list of tuples that represent the x and y coordinates each nash equilibrium\n self.nash_equilibria = []\n self.p1_br = [] # the set of best responses for player 1\n self.p2_br = [] # the set of best responses for player 2\n\n # We need to fix the format in this function.\n def print_payoffs(self, player):\n payoffs = \"\"\n count = 1\n num_rows = 7\n if player == 1:\n for rows in self.grid:\n for col in rows:\n value = str(col[0])\n if len(value) == 1:\n value = \" \" + value\n elif len(value) == 2:\n value = \" \" + value\n if count == num_rows:\n payoffs += str(value) + \"\\n\"\n count = 0\n else:\n payoffs += str(value) + \" \"\n count += 1\n elif player == 2:\n for rows in self.grid:\n for col in rows:\n value = str(col[1])\n if len(value) == 1:\n value = \" \" + value\n elif len(value) == 2:\n value = \" \" + value\n if count == num_rows:\n payoffs += str(value) + \"\\n\"\n count = 0\n else:\n payoffs += str(value) + \" \"\n count += 1\n else:\n raise ValueError(\"There are only two players\")\n print(payoffs)\n\n def print_strategies(self, player):\n # player 1 are rows and player 2 are the columns\n strategies = \"{\"\n if player == 1:\n for i in range(self.rows):\n if i == self.rows - 1:\n strategies += f\"A{i + 1}\"\n else:\n strategies += f\"A{i + 1}, \"\n elif player == 2:\n for i in range(self.columns):\n if i == self.columns - 1:\n strategies += f\"B{i + 1}\"\n else:\n strategies += f\"B{i + 1}, \"\n else:\n raise ValueError(\"There are only 2 players\")\n strategies += \"}\"\n print(strategies)\n\n def print_normal_form(self):\n columns = \"\\t\"\n for i in range(self.columns):\n columns += f\" B{i + 1}\\t\\t\\t\"\n print(columns)\n r = 1\n for row in self.grid:\n row_string = f\"A{r}\\t\"\n for c in row:\n new_value_x = str(c[0])\n new_value_y = str(c[1])\n \n while len(new_value_x) < 3:\n new_value_x = \" \"+ new_value_x\n while len(new_value_y) < 3:\n new_value_y = \" \"+ new_value_y\n row_string += f\"({new_value_x}, {new_value_y})\\t\\t\"\n r += 1\n print(row_string)\n\n def add_payoffs(self):\n # this is just a counter for displaying the cells in the normal form as A1, A2, ...\n r = 0\n for row in self.grid:\n c = 0\n for column in row:\n if self.mode == 'r':\n p1 = random.randint(self.lower_limit, self.upper_limit)\n p2 = random.randint(self.lower_limit, self.upper_limit)\n self.grid[r][c] = (p1, p2)\n self.grid_pure_nash[r][c] = (p1, p2)\n elif self.mode == 'm':\n \n payoff = input(\n f\"Enter payoff for ( A{r + 1}, B{c + 1} ) = \")\n values = payoff.split(',')\n self.grid[r][c] = (int(values[0]), int(values[1]))\n self.grid_pure_nash[r][c] = (\n int(values[0]), int(values[1]))\n else:\n raise ValueError\n c += 1\n r += 1\n\n\n # Maybe use for i in range(self.row) instead of for l in self.grid\n def find_br(self, player, mixing=False, beliefs=None):\n \"\"\" Finds all the best responses of the specified player. This function only supports \n the scenario where one player uses a mixed strategy and the other player plays a pure strategy.\n br stands for best responses and opm stands for one player mixing. Later we will have to make a function for both players mixing (bpm?)\n\n Keyword Arguments:\n player : (1 or 2) it specifies the number of the player that we are going to analyze \n mixing : (boolean) \n\n Returns:\n A List containing the coordinates of the best strategies for the specifies player.\n \"\"\"\n if (player is not 1) and (player is not 2):\n raise ValueError(\"player must be an int with the value of 1 or 2\")\n if not mixing:\n if player is 1:\n for i in range(self.columns):\n br_coordinates = best = None\n counter = 0\n multiple_br_values = []\n for l in self.grid:\n current_value = l[i][player-1]\n # in x, y format (columns, row)\n current_value_coordinates = (i, counter)\n if best is None or current_value > best:\n best = current_value\n br_coordinates = current_value_coordinates\n elif best == current_value:\n if current_value_coordinates not in multiple_br_values:\n multiple_br_values.append(\n current_value_coordinates)\n counter += 1\n # we should have the highest value in column i for player 1 in the variable best\n # and the coordinates for this cell in the variable br_coordinates (if there are multiple tuples with the same\n # value then br_coordinates contains the position of the first tuple that was found with this value and the rest are in multiple_best_values)\n if br_coordinates not in multiple_br_values:\n multiple_br_values.append(br_coordinates)\n\n for coordinates in multiple_br_values:\n c = self.grid_pure_nash[coordinates[1]][coordinates[0]]\n self.grid_pure_nash[coordinates[1]\n ][coordinates[0]] = ('H', c[1])\n for value in multiple_br_values:\n if value in self.p1_br:\n continue\n else:\n self.p1_br.append(value)\n return self.p1_br\n elif player is 2:\n counter = 0\n for l in self.grid:\n br_coordinates = best = None\n multiple_br_values = []\n for i in range(self.columns):\n current_value = l[i][player-1]\n # in x, y format (columns, row)\n current_value_coordinates = (i, counter)\n if best is None or current_value > best:\n best = current_value\n br_coordinates = current_value_coordinates\n elif best == current_value:\n if current_value_coordinates not in multiple_br_values:\n multiple_br_values.append(\n current_value_coordinates)\n counter += 1\n\n if br_coordinates not in multiple_br_values:\n multiple_br_values.append(br_coordinates)\n\n for coordinates in multiple_br_values:\n c = self.grid_pure_nash[coordinates[1]][coordinates[0]]\n self.grid_pure_nash[coordinates[1]\n ][coordinates[0]] = (c[0], 'H')\n for value in multiple_br_values:\n if value in self.p2_br:\n continue\n else:\n self.p2_br.append(value)\n return self.p2_br\n else:\n expected_payoffs = {}\n if player is 1:\n for i in range(self.rows):\n result = 0\n result_string = ''\n for j in range(self.columns):\n result += beliefs[j] * self.grid[i][j][player - 1]\n result_string += f\"({beliefs[j]} * {self.grid[i][j][player - 1]})\"\n # print(f\"A{i + 1}: {result_string} = {result}\")\n key = f\"A{i + 1}\"\n expected_payoffs[key] = result\n return expected_payoffs\n elif player is 2:\n for i in range(self.columns):\n counter = 0\n result = 0\n result_string = \"\"\n for l in self.grid:\n p2_payoff = l[i][1]\n b = beliefs[counter]\n result += b * p2_payoff\n result_string += f\"({b} * {p2_payoff}) \"\n counter += 1\n # print(f\"B{i + 1} : {result_string} = {result}\")\n key = f\"B{i + 1}\"\n expected_payoffs[key] = result\n return expected_payoffs\n\n def find_pure_nash_equi(self):\n player1 = self.find_br(player=1)\n player2 = self.find_br(player=2)\n self.nash_equilibria = [value for value in player1 if value in player2]\n return self.nash_equilibria\n\n def create_random_beliefs(self, mode='dirichlet'):\n if mode == 'dirichlet':\n # We can use the Dirichlet distribution https://en.wikipedia.org/wiki/Dirichlet_distribution\n p1_beliefs = np.random.dirichlet(np.ones(self.rows),size=1).tolist()[0] # we need the [0] because it is a list of lists\n p2_beliefs = np.random.dirichlet(np.ones(self.columns),size=1).tolist()[0]\n elif mode == 'sum':\n # Or we can create a random array of numbers, then get the sum and divide every number by the sum\n p1_rand_numbers = [random.random() for i in range(self.rows)]\n s = sum(p1_rand_numbers)\n p1_beliefs = [round(i/s, 3) for i in p1_rand_numbers]\n\n p2_rand_numbers = [random.random() for i in range(self.columns)]\n s = sum(p2_rand_numbers)\n p2_beliefs = [round(i/s, 3) for i in p2_rand_numbers]\n return [p1_beliefs, p2_beliefs]\n\n def print_pure_nash(self):\n columns = \"\\t\"\n for i in range(self.columns):\n columns += f\" B{i + 1}\\t\\t\\t\"\n print(columns)\n r = 1\n for row in self.grid_pure_nash:\n row_string = f\"A{r}\\t\"\n for c in row:\n new_value_x = str(c[0])\n new_value_y = str(c[1])\n\n while len(new_value_x) < 3:\n new_value_x = \" \"+ new_value_x\n while len(new_value_y) < 3:\n new_value_y = \" \"+ new_value_y\n \n row_string += f\"({new_value_x}, {new_value_y})\\t\\t\"\n r += 1\n print(row_string)\n\n\n def ep_bpm(self, p1_beliefs, p2_beliefs):\n # We need to create another grid with the product of the beliefsf\n beliefs = []\n for i in range(self.rows):\n row = []\n for j in range(self.columns):\n row.append(p1_beliefs[j] * p2_beliefs[i])\n beliefs.append(row)\n # Now we need to do some kind of matrix multiplication between beliefs and self.grid\n # We start with player 1\n p1_ep = 0\n for i in range(self.rows):\n for j in range(self.columns):\n x = beliefs[i][j]\n y = self.grid[i][j][0]\n p1_ep += x * y\n\n p2_ep = 0\n for i in range(self.rows):\n for j in range(self.columns):\n p2_ep += beliefs[i][j] * self.grid[i][j][1]\n return p1_ep, p2_ep\n\n def get_indifference_probabilities(self):\n if len(self.nash_equilibria) == 0:\n # Starting with player 1. We need to find a strategy that makes player 2 indiferent\n # we can make a formula to find p\n # p | (?, x) | (?, z) |\n # |--------|--------|\n # 1-p | (?, y) | (?, w) |\n # Now to make the expected payoff of player 2 the same regardless of the strategy that he plays\n # p(x) + (1-p)(y) = p(z) + (1-p)(w)\n # px + y - yp = pz + w - wp\n # px - py + wp - pz = w - y\n # p(x - y + w - z) = w - y\n # p = (w - y) / (x - y + w - z)\n x = self.grid[0][0][1]\n y = self.grid[1][0][1]\n z = self.grid[0][1][1]\n w = self.grid[1][1][1]\n if (x - y + w - z) != 0:\n p = (w - y) / (x - y + w - z)\n if p < 0 or (1-p) < 0:\n print(\"There are negative probabilities. One or more strategies are be dominated\")\n else:\n print(\"There is a problem (division by 0). One or more strategies are be dominated\")\n return []\n p1_strategy = [p, 1-p]\n\n # We can make a similar case with player 2\n # q 1-q\n # p | (x, ?) | (z, ?) |\n # |--------|--------|\n # 1-p | (y, ?) | (w, ?) |\n # q(x) + (1-q)(z) = q(y) + (1-q)(w)\n # qx + z - qz = qy + w - qw\n # ...\n # q = (w - z) / (x - z - y + w)\n x = self.grid[0][0][0]\n y = self.grid[1][0][0]\n z = self.grid[0][1][0]\n w = self.grid[1][1][0]\n if (x - y + w - z) != 0:\n q = (w - z) / (x - y + w - z)\n if q < 0 or (1-q) < 0:\n print(\"There are negative probabilities. One or more strategies are be dominated\")\n else:\n print(\"There is a problem (division by 0). One or more strategies are be dominated\")\n return []\n p2_strategy = [q, 1-q]\n\n return [p1_strategy, p2_strategy]","sub_path":"normal_form/NormalForm.py","file_name":"NormalForm.py","file_ext":"py","file_size_in_byte":16492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"140191154","text":"#!/usr/bin/env python\nfrom contextlib import redirect_stdout\nimport io\nimport os\n\nfrom flask import Flask, request\n\nfrom handler import handler\n\napp = Flask(__name__)\n\ndef touch(fname, mode=0o666, dir_fd=None, **kwargs):\n flags = os.O_CREAT | os.O_APPEND\n with os.fdopen(os.open(fname, flags=flags, mode=mode, dir_fd=dir_fd)) as f:\n os.utime(f.fileno() if os.utime in os.supports_fd else fname,\n dir_fd=None if os.supports_fd else dir_fd, **kwargs)\n\n@app.route('/', methods=['GET', 'POST'])\ndef main():\n print(request)\n\n data = request.get_data()\n f = io.StringIO()\n with redirect_stdout(f):\n handler(request.get_data())\n return f.getvalue()\n\n# mark the container as healthy\ntouch(\"/tmp/.lock\")\n# start the server\napp.run(host='0.0.0.0', port=8080)\n","sub_path":"template/pyclassic/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"467899883","text":"from bs4 import BeautifulSoup\r\nimport requests\r\nimport csv\r\nimport urllib\r\nimport pandas as pd\r\n\r\n\r\ndef download_file(id, soup):\r\n soup2 = soup.find(\"div\", class_=\"m-slider\")\r\n image = \"https:\" + soup2.find('img').get('src')\r\n try:\r\n image_file = open(\"images/\" + str(id) + \".jpeg\", 'wb')\r\n image_file.write(urllib.request.urlopen(image).read())\r\n image_file.close()\r\n except:\r\n print(\"urllib error\")\r\n\r\n\r\ndef delete_duplicates():\r\n data = pd.read_csv('products.csv', sep=';', header=0, encoding='utf8', engine='python')\r\n data.sort_values(\"Name\", inplace=True)\r\n data.drop_duplicates(subset=\"Name\", keep=\"first\", inplace=True)\r\n data.to_csv('products.csv', sep=';', index=False)\r\n\r\n\r\ndef create_products_file():\r\n\r\n categories = [\"filmy-dvd\", \"filmy-blu-ray\"]\r\n subcategories = [\"animowanefamilijne\", \"dokumentalne\", \"dramat\", \"fantasysci-fi\", \"horrorthriller\",\r\n \"komediakomedia-romantyczna\", \"muzycznemusicale\", \"sensacyjneprzygodowe\"]\r\n subsubcategories_names = [[\"Animowany\", \"Familijny\"], [\"Dokumentalny\", \"\"], [\"Dramat\", \"\"],\r\n [\"Fantasy\", \"Sciencefiction\"],\r\n [\"Horror\", \"Thriller\"], [\"Komedia\", \"Romans\"], [\"Muzyczny\", \"Musical\"],\r\n [\"Sensacja\", \"Przygodowy\"]]\r\n id = 1\r\n\r\n # Save to file\r\n with open('products.csv', mode='w', encoding=\"utf8\", newline='') as csvfile:\r\n fieldnames = [\"Id\", \"Name\", \"Link\", \"Categories\", \"Price\", \"Wholesale price\", \"Features\", \"Description\"]\r\n\r\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames, delimiter=\";\")\r\n writer.writeheader()\r\n\r\n for i, category in enumerate(categories):\r\n for j, subcategory in enumerate(subcategories):\r\n a = 0\r\n # Download html\r\n response = requests.get(\r\n \"https://mediamarkt.pl/filmy/\" + category + \"/\" + subcategory + \"?sort=0&limit=100&page=1\")\r\n\r\n # Parse html\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n\r\n # Search for products\r\n for product in soup.find_all(\"div\", class_=\"m-offerBox_box\"):\r\n\r\n url = product.find('a', href=True)\r\n\r\n # Go to the product page\r\n response_2 = requests.get(\"https://mediamarkt.pl\" + url['href'])\r\n\r\n # Parse html\r\n soup2 = BeautifulSoup(response_2.text.replace(\"
\", \"\\n\"), 'html.parser')\r\n\r\n # Name\r\n name = soup2.find(\"h1\", class_=\"b-ofr_headDataTitle\").text.replace('\\n', '').replace(' ', '')\r\n\r\n # Attributes\r\n specifications_names = soup2.find_all(\"dt\", class_=\"m-offerShowData_name js-offerShowData_row\")\r\n specifications_params = soup2.find_all(\"dd\", class_=\"m-offerShowData_param js-offerShowData_row\")\r\n\r\n if specifications_params[0] is None:\r\n features = \"\"\r\n else:\r\n features = \"Producent@\" + specifications_params[0].text.replace('\\n', '') + \"|\"\r\n\r\n for k in range(len(specifications_names)):\r\n if specifications_names[k].text.replace('\\n', '') == \"Gatunek\":\r\n subsubcategories = specifications_params[k].text.replace('\\n', '')\r\n\r\n params = specifications_params[k].find_all(\"span\")\r\n\r\n if specifications_names[k].text.replace('\\n', '') == \"Dodatkowo na płycie\" and len(\r\n params[0].text) > 255:\r\n continue\r\n\r\n for param in params:\r\n features +=specifications_names[k].text.replace('\\n', '') + \"@\" + param.text\\\r\n .replace('\\n', '').replace(' ', '').replace('\\r', ' ')+ \"|\"\r\n features = features[:-1]\r\n\r\n # Price\r\n price = soup2.find(\"div\", class_=\"m-priceBox_price\").text.replace('zł', '').replace(' ', '').replace('\\n', '').replace(',', '.').replace('-', '0')\r\n\r\n # Description\r\n description = soup2.find(\"div\", class_=\"widget text_editor\")\r\n\r\n if description is None:\r\n description = soup2.find(\"span\", itemprop=\"description\")\r\n if description is None:\r\n description = soup2.find(\"div\",\r\n class_=\"b-offerRWD_descriptionInner js-offerRWD_descriptionInner\")\r\n if description is None:\r\n description = \"Description\"\r\n else:\r\n description = \"

\" + description.find(\"p\").text.replace('\\n', '
').replace('\\t', ' ').replace('\\r', ' ') + \"

\"\r\n else:\r\n description = \"

\" + description.text.replace('\\n', '
').replace('\\t', ' ').replace('\\r', ' ') + \"

\"\r\n else:\r\n description = \"

\" + description.text.replace('\\n', '
').replace('\\t', ' ').replace('\\r', ' ') + \"

\"\r\n\r\n # Categories\r\n film_category = str(i * 8 + j + 1003) + \"|\" + str(1000 + i + 1) + \"|\" + str(1000)\r\n for subsubcategory in subsubcategories.split(','):\r\n subsubcategory = subsubcategory.replace('\\n', '').replace(' ', '')\r\n for k, subsubcategory_name in enumerate(subsubcategories_names):\r\n if subsubcategory == subsubcategory_name[0] or subsubcategory == \\\r\n subsubcategory_name[1]:\r\n film_category += \"|\" + str(i * 8 + k + 1003)\r\n film_category_list = list(dict.fromkeys(film_category.split('|')))\r\n film_category = '|'.join(film_category_list)\r\n\r\n # Image\r\n download_file(id, soup2)\r\n\r\n # Save product in products.csv\r\n writer.writerow({\"Id\": id,\r\n \"Name\": name,\r\n \"Link\": url['href'].split(\"/\")[-1],\r\n \"Categories\": film_category,\r\n \"Price\": str(round(float(price) / 1.23, 4)),\r\n \"Wholesale price\": round((float(price) / 1.23) * 0.9, 2),\r\n \"Features\": features,\r\n \"Description\": description,\r\n })\r\n id += 1\r\n a += 1\r\n print(id - 1)\r\n # Only 50 products per category\r\n if a > 50:\r\n break\r\n\r\n\r\ndef main():\r\n \r\n create_products_file()\r\n delete_duplicates()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":7165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"261243739","text":"##########################################################\n#\n# Licensed under the terms of the GNU Public License\n# (see docs/LICENSE.GPL)\n#\n# Copyright (c) 2005:\n# - The Open Planning Project (http://www.openplans.org/)\n# - Whit Morriss \n# - Rob Miller (RaFromBRC)\n# - and contributors\n#\n##########################################################\nfrom interfaces import IAmWickedField, IAmWicked, IFieldEvent\nfrom interfaces import ICacheManager, IValueToString, IScope\nfrom interfaces import IWickedFilter, IWickedQuery, IBacklinkManager\nfrom normalize import titleToNormalizedId as normalize\nfrom wicked import utils\nfrom wicked.fieldevent.interfaces import EndFiltrationException\nfrom wicked.fieldevent.interfaces import ITxtFilterList, IFieldRenderEvent\nfrom wicked.fieldevent.interfaces import IFieldValueSetter, IFieldStorageEvent\nfrom wicked.fieldevent.interfaces import IFieldEvent\nfrom zope.component.interfaces import ComponentLookupError\nfrom wicked.fieldevent.txtfilter import TxtFilter\nfrom zope.component import getMultiAdapter, adapts, adapter\nfrom zope.interface import implements, implementer, Interface, alsoProvides\n\nimport re\n\n_marker = object()\n\npattern1 = re.compile(r'\\(\\(([\\w\\W]+?)\\)\\)') # matches ((Some Text To link 123))\npattern2 = re.compile(r'\\[\\[([\\w\\W]+?)\\]\\]') # matches [[Some Text To link 123]]\n\ndef removeParens(wikilink):\n wikilink.replace('((', '')\n wikilink.replace('))', '')\n wikilink.replace('[[', '')\n wikilink.replace(']]', '')\n return wikilink\n\nclass WickedFilter(TxtFilter):\n implements(IWickedFilter)\n adapts(IAmWickedField, IAmWicked, IFieldEvent)\n\n name = 'Wicked Filter'\n\n #pattern = [pattern1, pattern2]\n query_iface = IWickedQuery\n _encoding = 'UTF8'\n\n def __init__(self, field, instance, event):\n super(WickedFilter, self).__init__(field, instance, event)\n self.section = field.__name__\n self.pattern = None\n\n @utils.memoizedproperty\n def scope(self):\n try:\n return getMultiAdapter((self.field, self.context), IScope)\n except ComponentLookupError:\n return ''\n\n # avoid global lookup\n getMatch = staticmethod(utils.getMatch)\n _normalize = staticmethod(normalize)\n\n # optimization\n @utils.memoize\n def normalize(self, value):\n return self._normalize(value)\n\n @utils.memoizedproperty\n def encoding(self):\n \"\"\"AT hack\"\"\"\n try:\n encoding = self.context.getCharset()\n except AttributeError:\n encoding = self._encoding\n return encoding\n\n def _filterCore(self, chunk, **kwargs):\n normalled = self.normalize(chunk)\n links=self.getLinks(chunk, normalled)\n self.renderer.load(links, chunk)\n return self.renderer().encode(self.encoding)\n\n @property\n def filtered_text(self):\n \"\"\"syntax preprocessing\"\"\"\n return super(WickedFilter, self).filtered_text\n\n @utils. memoize\n @utils.linkcache\n def getLinks(self, chunk, normalled):\n self.resolver.configure(chunk, normalled, self.scope)\n brains = self.resolver.search\n if not brains:\n brains = self.resolver.scopedSearch\n links = [utils.packBrain(b) for b in brains if b]\n return links\n\n @utils.memoizedproperty\n def resolver(self):\n \"\"\"\n @return query object\n \"\"\"\n return self.query_iface(self.context)\n\n @utils.memoizedproperty\n def backlinker(self):\n return getMultiAdapter((self, self.context), IBacklinkManager)\n\n def manageLink(self, obj, link):\n self.backlinker.manageLink(obj, link)\n\n def unlink(self, uid):\n self.backlinker.unlink(uid)\n\n def manageLinks(self, links):\n self.backlinker.manageLinks(links)\n\n @utils.memoizedproperty\n def cache(self):\n return getMultiAdapter((self, self.context), ICacheManager)\n\n @utils.memoizedproperty\n def renderer(self):\n # @@ better way to get request? maybe a txtfilter should be a view?\n renderer = getMultiAdapter((self.context, self.context.REQUEST), Interface, 'link_renderer')\n renderer.section = self.section\n # hook for zope2 aq wrapper\n if hasattr(renderer, '__of__'):\n return renderer.__of__(self.context)\n return renderer\n\n def __call__(self):\n if self.event.kwargs.get('raw', False):\n raise EndFiltrationException('Kwargs flag for raw return')\n super(WickedFilter, self).__call__()\n\n## def removeParens(wikilink):\n## wikilink.replace('((', '')\n## wikilink.replace('))', '')\n## return wikilink\n removeParens=staticmethod(removeParens)\n\n\nclass BrackettedWickedFilter(WickedFilter):\n \"\"\"media wiki style bracket matching\"\"\"\n pattern=pattern2\n def removeParens(wikilink):\n wikilink.replace('[[', '')\n wikilink.replace(']]', '')\n return wikilink\n removeParens=staticmethod(removeParens)\n\nNAME = WickedFilter.name\n\n\n## event handlers ##\n\nclass WickedListener(object):\n\n def __init__(self, pattern):\n self.pattern = pattern\n\n def render(self, field, instance, event):\n \"\"\"standalone wicked filter (ie not as a txtfilter). Optimal if\n not using txtfilters\"\"\"\n\n if event.kwargs.get('raw', False):\n return\n\n wicked = getMultiAdapter((field, instance, event), IWickedFilter)\n wicked.pattern = self.pattern\n try:\n wicked()\n except EndFiltrationException:\n pass\n\n def store(self, field, event):\n try:\n wicked = utils.getWicked(field, event.instance, event)\n except ComponentLookupError:\n # no adapter registered for this type currently\n # @@ This might be handle better by redispatch\n return\n\n wicked.pattern = self.pattern\n if not event.value:\n return\n\n value = event.value\n value_str = value\n\n try:\n # this block handle conversions for file uploads and\n # atapi.BaseUnit or any other not quite plain text \"value objects\"\n value_str = getMultiAdapter((value, field), IValueToString)\n except ComponentLookupError:\n pass\n\n found = wicked.findall(value_str)\n\n if not len(found):\n return\n\n new_links = [wicked.removeParens(link) for link in found]\n wicked.manageLinks(new_links)\n\nfrom zope.interface import classImplements\n\n@implementer(IFieldValueSetter)\n@adapter(IAmWickedField, IFieldStorageEvent)\ndef backlink_handler(field, event):\n try:\n wicked = utils.getWicked(field, event.instance, event)\n except ComponentLookupError:\n # no adapter registered for this type currently\n # @@ This might be handle better by redispatch\n return\n\n if not event.value:\n return\n\n value = event.value\n value_str = value\n\n try:\n # this block handle conversions for file uploads and\n # atapi.BaseUnit or any other not quite plain text \"value objects\"\n value_str = getMultiAdapter((value, field), IValueToString)\n except ComponentLookupError:\n pass\n\n\n found = wicked.findall(value_str)\n\n if not len(found):\n return\n\n new_links = [wicked.removeParens(link) for link in found]\n wicked.manageLinks(new_links)\n\npattern1_listeners = WickedListener(pattern1)\npattern2_listeners = WickedListener(pattern2)\n\n## hack around fact that functions can not be pickled ##\n\nclass wicked_listener(object):\n __init__=staticmethod(pattern1_listeners.render)\n\nclass bracketted_wicked_listener(object):\n __init__=staticmethod(pattern2_listeners.render)\n\nclass backlink(object):\n implements(IFieldValueSetter)\n adapts(IAmWickedField, IFieldStorageEvent)\n\n __init__ = staticmethod(pattern1_listeners.store)\n\nBacklinkRegistrationProxy = backlink\n\nclass brackettedbacklink(object):\n implements(IFieldValueSetter)\n adapts(IAmWickedField, IFieldStorageEvent)\n\n __init__ = staticmethod(pattern2_listeners.store)\n\n\n## toy example code ##\n\n@adapter(IAmWickedField, IAmWicked, IFieldRenderEvent)\n@implementer(ITxtFilterList)\ndef filter_list(field, context, event):\n \"\"\"example adapter for a one item list for ordering a txtfilter\n pipeline involving wicked only. Practically useless, for example\n only\"\"\"\n return [NAME]\n","sub_path":"wicked/txtfilter.py","file_name":"txtfilter.py","file_ext":"py","file_size_in_byte":8401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"150206508","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^index', views.index, name='index'),\n url(r'^location', views.location, name='location'),\n url(r'^community', views.community, name='community'),\n url(r'^schedule', views.schedule, name='schedule'),\n url(r'^test_api',views.test_api, name=\"test_api\"),\n url(r'^login',views.login, name=\"login\"),\n\n url(r'^show_write_form', views.show_write_form, name=\"show_write_form\"),\n url(r'^DoWriteBoard', views.DoWriteBoard),\n url(r'^viewWork', views.viewWork),\n url(r'^viewForUpdate', views.viewForUpdate),\n url(r'^updateBoard', views.updateBoard),\n url(r'^viewForDelete', views.viewForDelete),\n url(r'^searchWithSubject', views.searchWithSubject),\n\n\n\n url(r'^listSearchedSpecificPageWork/$', views.listSearchedSpecificPageWork),\n\n\n]\n","sub_path":"recPoint/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"375028843","text":"#!/usr/bin/env python2\nfrom __future__ import print_function\n\ndef check(file_stream):\n lines = file_stream.readlines()\n current_defs = set()\n for line in lines:\n # print(current_defs)\n s_line = line.strip()\n if s_line.startswith('#define'):\n name = s_line.split(' ')[1].partition('(')[0]\n # print('Define: {0}'.format(name))\n current_defs.add(name)\n elif s_line.startswith('#ifdef'):\n name = s_line.split(' ')[1].partition('(')[0]\n # print('Ifdef: {0}'.format(name))\n current_defs.add(name)\n elif s_line.startswith('#ifndef'):\n name = s_line.split(' ')[1].partition('(')[0]\n # print('Ifndef: {0}'.format(name))\n current_defs.add(name)\n elif s_line.startswith('#undef'):\n try:\n name = s_line.split(' ')[1]\n # print('Undef: {0}'.format(name))\n current_defs.remove(name)\n except KeyError:\n print ('#undefed unknown macro {}'\n .format(s_line.split(' ')[1]))\n pass\n if current_defs:\n print('Style error, {0} still #defined.'.format(', '.join(current_defs)))\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description='Check a C file for specific errors')\n parser.add_argument('file')\n args = parser.parse_args()\n with open(args.file) as the_file:\n check(the_file)\n","sub_path":"src/style_check.py","file_name":"style_check.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"167188651","text":"# 2018-11-05 gangdeng@yufuid.com\nimport unittest\nimport time\nfrom api.auth import Auth\nfrom api.appcategory import AppCategory\nfrom api.logger import logger\nfrom api.yufuapi import RunEnv\n\nclass AppCategoryTest(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n re = RunEnv()\n self.auth = Auth(re)\n self.api = AppCategory(re)\n\n def setUp(self):\n self.auth.login('gangdeng@yufuid.com', 'iphone@5S')\n self.auth.switch_to_admin()\n\n def tearDown(self):\n self.auth.logout()\n \n def test_api_get_categories(self):\n \"\"\"GET /api/v1/{tenantId}/categories\"\"\"\n result = self.api.get_categories()\n logger.info(result.text)\n self.assertEqual(result.status_code, 200)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"apitest/apitest/appcategory_test.py","file_name":"appcategory_test.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"279935758","text":"import numpy as np\nfrom flask import Flask,request,jsonify,render_template\n\nfrom download_vids import download_videos\nfrom email_send import mail_user\n\napp=Flask(__name__)\n\n\n@app.route(\"/\",methods=[\"GET\",\"POST\"])\n\ndef main_page():\n if request.method == \"POST\":\n singer_name = request.form['singer_name'].replace(' ','').lower()\n email_id = request.form['email_id']\n num_vids = int(request.form['num_vids'])\n download_videos(singer_name,num_vids)\n mail_user(email_id)\n # print(keyword,emailid,limit)\n return render_template(\"index.html\")\n\n\nif __name__==\"__main__\":\n app.run(debug=True)\n","sub_path":"Mini_Proj_11-Download Youtube videos/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"374775278","text":"\"\"\"\nCode for Assignment #1: try Singular Value Decomposition (SVD)\n\nCalculate SVD with two methods:\n - Method #1: use eigenvalue and eigenvector for SVD, minor component is not accurate, not sure why\n - Method #2: use svd function directly\n\n\"\"\"\n\n\n#\nimport numpy as np\nfrom numpy import array\nfrom numpy import diag\nfrom numpy import dot\nfrom numpy import zeros\nfrom scipy.linalg import svd\n# define a 5x4 matrix\nA = array([[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6], [4, 5, 6, 7], [5, 6, 7, 8]])\n\n# Method #1: use eigenvalue and eigenvector for SVD\nB = np.dot(np.transpose(A), A)\nC = np.dot(A, np.transpose(A))\n\n# print(\"A.shape: \", A.shape)\nprint(\"A: \\n\", A)\n\nw, v = np.linalg.eig(B)\nidx = w.argsort()[::-1]\nw = w[idx]\nw = np.sqrt(w)\nvT = np.transpose(v)\nSigma = zeros((A.shape[1], A.shape[1]))\n# populate Sigma with n x n diagonal matrix\nSigma[:A.shape[1], :A.shape[1]] = diag(w)\n\nprint(\"B = AT A: \\n\", B)\n# print(\"EigenValue: \\n\", w)\nprint(\"EigenVector-v \\n\", vT)\nprint(\"Sigma: \\n\", Sigma)\n\nr, u = np.linalg.eig(C)\nidx = r.argsort()[::-1]\nr = r[idx]\nr = np.sqrt(r)\nu = u[:, idx]\nSigma = zeros((A.shape[0], A.shape[0]))\n# populate Sigma with n x n diagonal matrix\n# Sigma[:A.shape[0], :A.shape[0]] = diag(r)\nSigma = diag(r)\n\nprint(\"\\n\\nC = A AT: \\n\", C)\n# print(\"EigenValue: \\n\", r)\nprint(\"EigenVector-u: \\n\", u)\n# print(\"Sigma: \\n\", Sigma)\n\n# # Another way to calculate\n# u = np.linalg.eig(A.dot(A.T))[1]\n# print(\"EigenVector-u \\n\", u)\n#\n# v = np.linalg.eig(A.T.dot(A))[1]\n# print(\"EigenVector-v \\n\", v)\n#\n# sigma = np.sqrt(np.linalg.eig(A.T.dot(A))[0])\n# print(\"sigma \\n\", sigma)\n\n\n# Method #2: use SVD library directly\nprint(\"\\n\\n\")\nU, s, VT = svd(A)\nprint(\"Left Singular Vector (U): \\n\", U)\nprint(\"Singular Values (Sigma): \\n\", s)\nprint(\"Right Singular Vectors (VT): \\n\", VT)\n","sub_path":"GraduateStudy/ENGG6500_MachineLearning/A1-MySVD.py","file_name":"A1-MySVD.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"475536634","text":"\r\n'''\r\n*****************************************************************************************\r\n*\r\n* \t\t===============================================\r\n* \t\tRapid Rescuer (RR) Theme (eYRC 2019-20)\r\n* \t\t===============================================\r\n*\r\n* This script is to implement Task 1C of Rapid Rescuer (RR) Theme (eYRC 2019-20).\r\n* \r\n* This software is made available on an \"AS IS WHERE IS BASIS\".\r\n* Licensee/end user indemnifies and will keep e-Yantra indemnified from\r\n* any and all claim(s) that emanate from the use of the Software or \r\n* breach of the terms of this agreement.\r\n* \r\n* e-Yantra - An MHRD project under National Mission on Education using ICT (NMEICT)\r\n*\r\n*****************************************************************************************\r\n'''\r\n\r\n\r\n# Team ID:\t\t\t[ Team-ID ]\r\n# Author List:\t\t[ Names of team members worked on this file separated by Comma: Name1, Name2, ... ]\r\n# Filename:\t\t\ttask_1c.py\r\n# Functions:\t\tcomputeSum\r\n# \t\t\t\t\t[ Comma separated list of functions in this file ]\r\n# Global variables:\tNone\r\n# \t\t\t\t\t[ List of global variables defined in this file ]\r\n\r\n\r\n# Import necessary modules\r\nimport cv2\r\nimport numpy as np\r\nimport os\r\nimport sys\r\n\r\n\r\n#############\tYou can import other modules here\t#############\r\n\r\n# edit \"generalize it later\" lines\r\nimport tensorflow as tf\r\nimport requests\r\nmodel_name = 'my_first_ml'\r\n\r\n########### heavily refrenced code start ###########\r\n\r\n#following code for direct download from cloud storage\r\n#taken from this StackOverflow answer: https://stackoverflow.com/a/39225039\r\ndef download_file_from_google_drive(id, destination):\r\n URL = \"\"\r\n\r\n session = requests.Session()\r\n\r\n response = session.get(URL, params = { 'id' : id }, stream = True)\r\n token = get_confirm_token(response)\r\n\r\n if token:\r\n params = { 'id' : id, 'confirm' : token }\r\n response = session.get(URL, params = params, stream = True)\r\n\r\n save_response_content(response, destination) \r\n#taken from this StackOverflow answer: https://stackoverflow.com/a/39225039\r\ndef get_confirm_token(response):\r\n for key, value in response.cookies.items():\r\n if key.startswith('download_warning'):\r\n return value\r\n\r\n return None\r\n#taken from this StackOverflow answer: https://stackoverflow.com/a/39225039\r\ndef save_response_content(response, destination):\r\n CHUNK_SIZE = 32768\r\n\r\n with open(destination, \"wb\") as f:\r\n for chunk in response.iter_content(CHUNK_SIZE):\r\n if chunk: # filter out keep-alive new chunks\r\n f.write(chunk)\r\n\r\n########### heavily refrenced code end #############\r\n\r\ntry:\r\n\tfile_id = ''\r\n\tdir_path = os.getcwd()\r\n\tmodel_path = dir_path + '/' + model_name\r\n\tdestination = model_path\r\n\tdownload_file_from_google_drive(file_id, destination)\r\n\tmodel = tf.keras.models.load_model(model_name)\r\nexcept:\r\n\timport model_maker\r\n\tx_train, y_train, x_test, y_test = model_maker.loadData()\r\n\tx_train, y_train, x_test, y_test = model_maker.dataReady(x_train, y_train, x_test, y_test)\r\n\tmodel = model_maker.createModel()\r\n\tmodel = model_maker.trainModel(model, x_train, y_train, x_test, y_test)\r\n\r\n#################################################################\r\n\r\n\r\n# Function Name:\tcomputeSum\r\n# Inputs: \t\t\timg_file_path [ file path of image ]\r\n# \t\t\t\t\tshortestPath [ list of coordinates of shortest path from initial_point to final_point ]\r\n# Outputs:\t\t\tdigits_list [ list of digits present in the maze image ]\r\n# \t\t\t\t\tdigits_on_path [ list of digits present on the shortest path in the maze image ]\r\n# \t\t\t\t\tsum_of_digits_on_path [ sum of digits present on the shortest path in the maze image ]\r\n# Purpose: \t\t\tthe function takes file path of original image and shortest path in the maze image\r\n# \t\t\t\t\tto return the list of digits present in the image, list of digits present on the shortest\r\n# \t\t\t\t\tpath in the image and sum of digits present on the shortest\tpath in the image\r\n# Logic:\t\t\t[ write the logic in short of how this function solves the purpose ]\r\n# Example call: \tdigits_list, digits_on_path, sum_of_digits_on_path = computeSum(img_file_path, shortestPath)\r\n\r\ndef computeSum(img_file_path, shortestPath):\r\n\r\n\t\"\"\"\r\n\tPurpose:\r\n\t---\r\n\tthe function takes file path of original image and shortest path as argument and returns list of digits, digits on path and sum of digits on path\r\n\tInput Arguments:\r\n\t---\r\n\t`img_file_path` :\t\t[ str ]\r\n\t\tfile path of image\r\n\t`shortestPath` :\t\t[ list ]\r\n\t\tlist of coordinates of shortest path from initial_point to final_point\r\n\tReturns:\r\n\t---\r\n\t`digits_list` :\t[ list ]\r\n\t\tlist of all digits on image\r\n\t`digits_on_path` :\t[ list ]\r\n\t\tlist of digits adjacent to the path from initial_point to final_point\r\n\t`sum_of_digits_on_path` :\t[ int ]\r\n\t\tsum of digits on path\r\n\tExample call:\r\n\t---\r\n\toriginal_binary_img = readImage(img_file_path)\r\n\t\"\"\"\r\n\r\n\tdigits_list = []\r\n\tdigits_on_path = []\r\n\tsum_of_digits_on_path = 0\r\n\r\n\t############# Add your Code here ###############\r\n\r\n\t# the code is simple\r\n\t# 1. it read the image convert it into binary and gray scale form\r\n\t# 2. then it checks each cell for PRESENCE of num\r\n\t# 3. then the it checks the cell neighbourhood for presence of shortestpath\r\n\t# results of the above checks is stored as coordinated and not images\r\n\t# 4. the coordinates in helper lists are used to make an array of images\r\n\t# 5. this array of images is send to model.predict\r\n\t# 6. finally results are stored in respective lists\r\n\r\n\t#print('hello', task_1a.CELL_SIZE)\r\n\t# image ready\r\n\tgiven_img = cv2.imread(img_file_path)\r\n\tgray_img = cv2.cvtColor(given_img, cv2.COLOR_BGR2GRAY)\r\n\tret, binary_img= cv2.threshold(gray_img,127,255,cv2.THRESH_BINARY)\r\n\t\r\n\t# cell_img used to predict presence and value of num\r\n\tcell_img = np.zeros((task_1a.CELL_SIZE, task_1a.CELL_SIZE), dtype=int)\r\n\r\n\t# some declarations\r\n\theight_given, width_given = gray_img.shape\r\n\tnum_cells_height = int(height_given / task_1a.CELL_SIZE)\r\n\tnum_cells_width = int(width_given / task_1a.CELL_SIZE)\r\n\r\n\t# some more declarations\r\n\tprobableCoordinates = []\r\n\tprobableImages = []\r\n\tvalidNum = []\r\n\twallWidth = 4 # generalize it later\r\n\r\n\t# points to remember\r\n\t# wall = 0 = black\r\n\t# path = 255 = white\r\n\r\n\t# this for loops are for cell traversal\r\n\tfor i in range(num_cells_height):\r\n\t\tfor j in range(num_cells_width):\r\n\t\t\t# will be used to predict presence of num\r\n\t\t\tcell_img = binaryCell(i, j, binary_img, cell_img)\r\n\t\t\tcell_img_noWall = removeWalls(cell_img, wallWidth)\r\n\r\n\t\t\t# will be used to predict value of num\r\n\t\t\tcell_img2 = grayCell(i, j, gray_img, cell_img)\r\n\t\t\tcell_img_noWall2 = removeWalls(cell_img2, wallWidth)\r\n\r\n\t\t\t# check num predicts presence of num\r\n\t\t\tif(checkNum(cell_img_noWall) == True):\r\n\t\t\t\tprobableCoordinates.append((i, j))\r\n\r\n\t\t\t\t# invert! because training was done on black background\r\n\t\t\t\ttemp = invertImg(cell_img_noWall2)\r\n\t\t\t\tprobableImages.append(temp)\r\n\t\t\t\t\r\n\t\t\t\t# is in path checks all 4 directions of the num \r\n\t\t\t\t# for presence of shortest path\r\n\t\t\t\tif(isInPath(i, j, shortestPath, cell_img) == True):\r\n\t\t\t\t\tvalidNum.append((i, j))\r\n\r\n\t# this loop converts shape of images to be predicted\r\n\t# into the shape of input layer\r\n\timg_rows, img_cols = 28, 28\r\n\tfor i in range(len(probableImages)):\r\n\t\tprobableImages[i] = cv2.resize(probableImages[i], (img_rows, img_cols))\r\n\t\tprobableImages[i] = probableImages[i].reshape(img_rows, img_cols, 1)\r\n\t\tprobableImages[i] = probableImages[i].astype('float32')\r\n\t\tprobableImages[i] = probableImages[i] / 255.0\r\n\r\n\t# Finally, this lines predicts value of num \r\n\t# with help of loaded model\r\n\tprobableImages = np.asarray(probableImages)\r\n\tpredictions = model.predict(probableImages)\r\n\r\n\t# this loop considers max value of probablity\r\n\tfor i in range(len(probableCoordinates)):\r\n\t\tj = np.argmax(predictions[i])\r\n\t\tdigits_list.append(j)\r\n\t\t## some useful print statements\r\n\t\t#print('predicted val', j)\r\n\t\t#print('percentage max', str(predictions[i][j]*100))\r\n\t\t#print('remaining percentage', predictions[i])\r\n\r\n\t# this loop finds the num that are adjecent to shortest path\r\n\t# it compares coordinates in helper lists declared\r\n\tfor i in range(len(validNum)):\r\n\t\tj = probableCoordinates.index(validNum[i])\r\n\t\tdigits_on_path.append(digits_list[j])\r\n\r\n\t# Finally, the SUM! :)\r\n\tsum_of_digits_on_path = sum(digits_on_path)\r\n\r\n\t## some useful prints\r\n\t#print('probcoorr', probableCoordinates)\r\n\t#print('validnum',validNum)\r\n\r\n\t# delete the used variables\r\n\tprobableCoordinates.clear()\r\n\tvalidNum.clear()\r\n\tdel probableImages\r\n\tdel predictions\r\n\r\n\t###################################################\r\n\r\n\treturn digits_list, digits_on_path, sum_of_digits_on_path\r\n\r\n\r\n#############\tYou can add other helper functions here\t\t#############\r\n\r\ndef binaryCell(i, j, img, cell_img):\r\n\ti = i*task_1a.CELL_SIZE \r\n\tj = j*task_1a.CELL_SIZE \r\n\tcell_img = img[i:i + task_1a.CELL_SIZE, j:j + task_1a.CELL_SIZE]\r\n\treturn cell_img\r\n\r\ndef grayCell(i, j, img, cell_img):\r\n\ti = i*task_1a.CELL_SIZE \r\n\tj = j*task_1a.CELL_SIZE \r\n\tcell_img = img[i:i + task_1a.CELL_SIZE, j:j + task_1a.CELL_SIZE]\r\n\treturn cell_img\r\n\r\ndef removeWalls(cell_img, wallWidth):\r\n\tcell_img = cell_img[wallWidth : task_1a.CELL_SIZE - wallWidth, \r\n\t\t\t\t\t\t\twallWidth : task_1a.CELL_SIZE - wallWidth]\r\n\treturn cell_img\r\n\r\ndef checkNum(cell_img):\r\n\ttemp_h, temp_w = cell_img.shape\r\n\tfor k in range(temp_h):\r\n\t\tfor l in range(temp_w):\r\n\t\t\tif(cell_img[k][l] < 255):\r\n\t\t\t\treturn True\r\n\treturn False\r\n\r\ndef isWall(cell_img, k):\r\n\tcell_h, cell_w = cell_img.shape\r\n\tif(k == 0 and cell_img[0][cell_w//2] == 255):\r\n\t\treturn False\r\n\r\n\telif(k == 1 and cell_img[cell_h - 1][cell_w//2] == 255):\r\n\t\treturn False\r\n\r\n\telif(k == 2 and cell_img[cell_h//2][0] == 255):\r\n\t\treturn False\r\n\r\n\telif(k == 3 and cell_img[cell_h//2][cell_w - 1] == 255):\r\n\t\treturn False\r\n\r\n\treturn True\r\n\r\ndef isInPath(i, j, shortestPath, cell_img):\r\n\tcol = [-1, 1, 0, 0] # up, down, left, right\r\n\trow = [0, 0, -1, 1]\r\n\tfor k in range(4):\r\n\t\tif(isWall(cell_img, k) == False):\r\n\t\t\ti, j = i + col[k], j + row[k]\r\n\t\t\tif (i, j) in shortestPath:\r\n\t\t\t\t#print(i, j)\r\n\t\t\t\treturn True\r\n\t\t\ti, j = i - col[k], j - row[k]\r\n\treturn False\r\n\r\ndef invertImg(img):\r\n\th, w = img.shape\r\n\tfor i in range(h):\r\n\t\tfor j in range(w):\r\n\t\t\timg[i][j] = 255 - img[i][j]\r\n\t\t\tif(img[i][j] <= 15):\r\n\t\t\t\timg[i][j] = 0\r\n\treturn img\r\n\r\n#########################################################################\r\n\r\n\r\n# NOTE:\tYOU ARE NOT ALLOWED TO MAKE ANY CHANGE TO THIS FUNCTION\r\n# \r\n# Function Name:\tmain\r\n# Inputs:\t\t\tNone\r\n# Outputs: \t\t\tNone\r\n# Purpose: \t\t\tthe function first takes 'maze00.jpg' as input and solves the maze by calling computeSum\r\n# \t\t\t\t\tfunction, it then asks the user whether to repeat the same on all maze images\r\n# \t\t\t\t\tpresent in 'task_1c_images' folder or not\r\n\r\nif __name__ != '__main__':\r\n\t\r\n\tcurr_dir_path = os.getcwd()\r\n\r\n\t# Importing task_1a and image_enhancer script\r\n\ttry:\r\n\r\n\t\ttask_1a_dir_path = curr_dir_path + '/../../Task 1A/codes'\r\n\t\tsys.path.append(task_1a_dir_path)\r\n\r\n\t\timport task_1a\r\n\t\timport image_enhancer\r\n\r\n\texcept Exception as e:\r\n\r\n\t\tprint('\\ntask_1a.py or image_enhancer.pyc file is missing from Task 1A folder !\\n')\r\n\t\texit()\r\n\r\nif __name__ == '__main__':\r\n\t\r\n\tcurr_dir_path = os.getcwd()\r\n\timg_dir_path = curr_dir_path + '/../task_1c_images/'\t\t\t\t# path to directory of 'task_1c_images'\r\n\t\r\n\tfile_num = 0\r\n\timg_file_path = img_dir_path + 'maze0' + str(file_num) + '.jpg'\t\t# path to 'maze00.jpg' image file\r\n\r\n\t# Importing task_1a and image_enhancer script\r\n\ttry:\r\n\r\n\t\ttask_1a_dir_path = curr_dir_path + '/../../Task 1A/codes'\r\n\t\tsys.path.append(task_1a_dir_path)\r\n\r\n\t\timport task_1a\r\n\t\timport image_enhancer\r\n\r\n\texcept Exception as e:\r\n\r\n\t\tprint('\\n[ERROR] task_1a.py or image_enhancer.pyc file is missing from Task 1A folder !\\n')\r\n\t\texit()\r\n\r\n\t# modify the task_1a.CELL_SIZE to 40 since maze images\r\n\t# in task_1c_images folder have cell size of 40 pixels\r\n\ttask_1a.CELL_SIZE = 40\r\n\r\n\tprint('\\n============================================')\r\n\r\n\tprint('\\nFor maze0' + str(file_num) + '.jpg')\r\n\r\n\ttry:\r\n\t\t\r\n\t\toriginal_binary_img = task_1a.readImage(img_file_path)\r\n\t\theight, width = original_binary_img.shape\r\n\r\n\texcept AttributeError as attr_error:\r\n\t\t\r\n\t\tprint('\\n[ERROR] readImage function is not returning binary form of original image in expected format !\\n')\r\n\t\texit()\r\n\r\n\t\r\n\tno_cells_height = int(height/task_1a.CELL_SIZE)\t\t\t\t\t# number of cells in height of maze image\r\n\tno_cells_width = int(width/task_1a.CELL_SIZE)\t\t\t\t\t# number of cells in width of maze image\r\n\tinitial_point = (0, 0)\t\t\t\t\t\t\t\t\t\t\t# start point coordinates of maze\r\n\tfinal_point = ((no_cells_height-1),(no_cells_width-1))\t\t\t# end point coordinates of maze\r\n\r\n\ttry:\r\n\r\n\t\tshortestPath = task_1a.solveMaze(original_binary_img, initial_point, final_point, no_cells_height, no_cells_width)\r\n\r\n\t\tif len(shortestPath) > 2:\r\n\r\n\t\t\timg = image_enhancer.highlightPath(original_binary_img, initial_point, final_point, shortestPath)\r\n\t\t\t\r\n\t\telse:\r\n\r\n\t\t\tprint('\\n[ERROR] shortestPath returned by solveMaze function is not complete !\\n')\r\n\t\t\texit()\r\n\t\r\n\texcept TypeError as type_err:\r\n\t\t\r\n\t\tprint('\\n[ERROR] solveMaze function is not returning shortest path in maze image in expected format !\\n')\r\n\t\texit()\r\n\r\n\tprint('\\nShortest Path = %s \\n\\nLength of Path = %d' % (shortestPath, len(shortestPath)))\r\n\r\n\tdigits_list, digits_on_path, sum_of_digits_on_path = computeSum(img_file_path, shortestPath)\r\n\r\n\tprint('\\nDigits in the image = ', digits_list)\r\n\tprint('\\nDigits on shortest path in the image = ', digits_on_path)\r\n\tprint('\\nSum of digits on shortest path in the image = ', sum_of_digits_on_path)\r\n\r\n\tprint('\\n============================================')\r\n\r\n\tcv2.imshow('canvas0' + str(file_num), img)\r\n\tcv2.waitKey(0)\r\n\tcv2.destroyAllWindows()\r\n\r\n\tchoice = input('\\nWant to run your script on all maze images ? ==>> \"y\" or \"n\": ')\r\n\r\n\tif choice == 'y':\r\n\r\n\t\tfile_count = len(os.listdir(img_dir_path))\r\n\r\n\t\tfor file_num in range(file_count):\r\n\r\n\t\t\timg_file_path = img_dir_path + 'maze0' + str(file_num) + '.jpg'\t\t# path to 'maze00.jpg' image file\r\n\r\n\t\t\tprint('\\n============================================')\r\n\r\n\t\t\tprint('\\nFor maze0' + str(file_num) + '.jpg')\r\n\r\n\t\t\ttry:\r\n\t\t\t\t\r\n\t\t\t\toriginal_binary_img = task_1a.readImage(img_file_path)\r\n\t\t\t\theight, width = original_binary_img.shape\r\n\r\n\t\t\texcept AttributeError as attr_error:\r\n\t\t\t\t\r\n\t\t\t\tprint('\\n[ERROR] readImage function is not returning binary form of original image in expected format !\\n')\r\n\t\t\t\texit()\r\n\r\n\t\t\t\r\n\t\t\tno_cells_height = int(height/task_1a.CELL_SIZE)\t\t\t\t\t# number of cells in height of maze image\r\n\t\t\tno_cells_width = int(width/task_1a.CELL_SIZE)\t\t\t\t\t# number of cells in width of maze image\r\n\t\t\tinitial_point = (0, 0)\t\t\t\t\t\t\t\t\t\t\t# start point coordinates of maze\r\n\t\t\tfinal_point = ((no_cells_height-1),(no_cells_width-1))\t\t\t# end point coordinates of maze\r\n\r\n\t\t\ttry:\r\n\r\n\t\t\t\tshortestPath = task_1a.solveMaze(original_binary_img, initial_point, final_point, no_cells_height, no_cells_width)\r\n\r\n\t\t\t\tif len(shortestPath) > 2:\r\n\r\n\t\t\t\t\timg = image_enhancer.highlightPath(original_binary_img, initial_point, final_point, shortestPath)\r\n\t\t\t\t\t\r\n\t\t\t\telse:\r\n\r\n\t\t\t\t\tprint('\\n[ERROR] shortestPath returned by solveMaze function is not complete !\\n')\r\n\t\t\t\t\texit()\r\n\t\t\t\r\n\t\t\texcept TypeError as type_err:\r\n\t\t\t\t\r\n\t\t\t\tprint('\\n[ERROR] solveMaze function is not returning shortest path in maze image in expected format !\\n')\r\n\t\t\t\texit()\r\n\r\n\t\t\tprint('\\nShortest Path = %s \\n\\nLength of Path = %d' % (shortestPath, len(shortestPath)))\r\n\r\n\t\t\tdigits_list, digits_on_path, sum_of_digits_on_path = computeSum(img_file_path, shortestPath)\r\n\r\n\t\t\tprint('\\nDigits in the image = ', digits_list)\r\n\t\t\tprint('\\nDigits on shortest path in the image = ', digits_on_path)\r\n\t\t\tprint('\\nSum of digits on shortest path in the image = ', sum_of_digits_on_path)\r\n\r\n\t\t\tprint('\\n============================================')\r\n\r\n\t\t\tcv2.imshow('canvas0' + str(file_num), img)\r\n\t\t\tcv2.waitKey(0)\r\n\t\t\tcv2.destroyAllWindows()\r\n\r\n\telse:\r\n\r\n\t\tprint('')\r\n\r\n\r\n","sub_path":"task_1c.py","file_name":"task_1c.py","file_ext":"py","file_size_in_byte":15931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"171814758","text":"class Solution(object):\r\n def plusOne(self, digits):\r\n \"\"\"\r\n :type digits: List[int]\r\n :rtype: List[int]\r\n \"\"\"\r\n is_carry = 1\r\n n = len(digits)\r\n for i in range(n-1, -1, -1):\r\n digits[i] += is_carry\r\n if digits[i] >= 10:\r\n is_carry = 1\r\n digits[i] %= 10\r\n else:\r\n is_carry = 0\r\n break\r\n if is_carry:\r\n digits.insert(0, 1)\r\n return digits\r\n \r\n#############################################################\r\n\"\"\"\r\nhttps://leetcode.com/discuss/14616/is-it-a-simple-code-c\r\n\"\"\"\r\nclass Solution(object):\r\n def plusOne(self, digits):\r\n for i in range(len(digits)-1, -1, -1):\r\n if digits[i] == 9:\r\n digits[i] = 0\r\n else:\r\n digits[i] += 1\r\n return digits\r\n digits.append(0)\r\n digits[0] = 1\r\n return digits","sub_path":"src/066_PlusOne.py","file_name":"066_PlusOne.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"503850086","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 17 23:28:59 2017\r\n\r\n@author: Dhebar\r\n\"\"\"\r\nimport nsga2_classes\r\nimport math\r\nimport global_vars\r\nimport numpy as np\r\nimport sys\r\n#import evaluate_pytorch\r\n#from ednn.evaluator import Evaluator\r\n#.....problem definitions....\r\n\r\n#prob_name = global_vars.params.prob_name\r\nprob_name = 'ednn' \r\ndef compute_fitness_ind(indiv):\r\n #...bbob...\r\n if prob_name == 'bbob':\r\n f1, f2 = global_vars.params.fun(indiv.xreal)\r\n indiv.fitness[0] = f1\r\n indiv.fitness[1] = f2\r\n return\r\n \r\n #...ZDT1....\r\n if prob_name == 'ZDT1':\r\n f1 = indiv.xreal[0]\r\n c = np.sum(indiv.xreal[1:])\r\n g = 1 + 9.0*c/(global_vars.params.n_var - 1)\r\n f2 = g*(1 - pow((f1*1.0/g),0.5))\r\n indiv.fitness[0] = f1\r\n indiv.fitness[1] = f2\r\n return\r\n \r\n if prob_name == 'ZDT2':\r\n f1 = indiv.xreal[0]\r\n c = np.sum(indiv.xreal[1:])\r\n g = 1.0 + 9.0*(c)/(global_vars.params.n_var - 1)\r\n f2 = g*(1 - math.pow((f1*1.0/g),2))\r\n \r\n indiv.fitness[0] = f1\r\n indiv.fitness[1] = f2\r\n return\r\n \r\n if prob_name == 'ZDT3':\r\n f1 = indiv.xreal[0]\r\n c = np.sum(indiv.xreal[1:])\r\n g = 1.0 + 9.0*(c)/(global_vars.params.n_var - 1)\r\n f2 = g*(1 - math.pow(f1*1.0/g,0.5) - (f1*1.0/g)*math.sin(10*math.pi*f1))\r\n indiv.fitness[0] = f1\r\n indiv.fitness[1] = f2\r\n return\r\n \r\n if prob_name == 'ZDT4':\r\n f1 = indiv.xreal[0]\r\n c = 0\r\n for i in range(1,global_vars.params.n_var):\r\n c += math.pow(indiv.xreal[i],2) - 10*math.cos(4*math.pi*indiv.xreal[i])\r\n g = 1 + 10*(global_vars.params.n_var - 1) + c\r\n f2 = g*(1 - math.sqrt(f1*1.0/g))\r\n \r\n indiv.fitness[0] = f1\r\n indiv.fitness[1] = f2\r\n \r\n return\r\n \r\n if prob_name == 'ZDT6':\r\n f1 = 1 - math.exp(-4*indiv.xreal[0])*math.pow(math.sin(6*math.pi*indiv.xreal[0]),6)\r\n g = 1 + 9.0*math.pow(sum(indiv.xreal[1:])/(global_vars.params.n_var - 1.0),0.25)\r\n f2 = g*(1 - math.pow(f1*1.0/g,2))\r\n \r\n indiv.fitness[0] = f1\r\n indiv.fitness[1] = f2\r\n return\r\n \r\n if prob_name == 'DTLZ1':\r\n n_obj = global_vars.params.n_obj\r\n n_var = global_vars.params.n_var \r\n g = 0\r\n for j in range(n_obj - 1, n_var):\r\n g += 100.0*(1.0 + math.pow((indiv.xreal[j]-0.5),2) - math.cos(20.0*math.pi*(indiv.xreal[j] - 0.5)))\r\n \r\n indiv.fitness[0] = 0.5*indiv.xreal[0]*indiv.xreal[1]*(1+g)\r\n indiv.fitness[1] = 0.5*indiv.xreal[0]*(1 - indiv.xreal[1])*(1+g)\r\n indiv.fitness[2] = 0.5*(1 - indiv.xreal[0])*(1 + g)\r\n \r\n return\r\n \r\n \r\n if prob_name == 'DTLZ1_g':\r\n n_obj = global_vars.params.n_obj\r\n n_var = global_vars.params.n_var \r\n g = 0\r\n for j in range(n_obj - 1, n_var):\r\n g += 100.0*(1.0 + math.pow((indiv.xreal[j]-0.5),2) - math.cos(20.0*math.pi*(indiv.xreal[j] - 0.5)))\r\n \r\n indiv.fitness[0] = 0.5*np.prod(indiv.xreal[:(n_obj - 1)])*(1.0 + g)\r\n for j in range(1,n_obj-1):\r\n indiv.fitness[j] = 0.5*np.prod(indiv.xreal[:(n_obj - j - 1)])*(1 - indiv.xreal[n_obj - j])*(1 + g)\r\n \r\n indiv.fitness[n_obj - 1] = 0.5*(1 - indiv.xreal[0])*(1 + g)\r\n \r\n return\r\n \r\n if prob_name == 'DTLZ2':\r\n n_obj = global_vars.params.n_obj\r\n n_var = global_vars.params.n_var \r\n g = 0\r\n for j in range(n_obj-1, n_var):\r\n g = g + ((indiv.xreal[j] - 0.5)*(indiv.xreal[j] - 0.5))\r\n \r\n indiv.fitness[0] = (1 + g)*math.cos(indiv.xreal[0]*(math.pi)/2.0)*math.cos(indiv.xreal[1]*(math.pi)/2.0)\r\n indiv.fitness[1] = (1 + g)*math.cos(indiv.xreal[0]*(math.pi)/2.0)*math.sin(indiv.xreal[1]*(math.pi)/2.0)\r\n indiv.fitness[2] = (1 + g)*math.sin(indiv.xreal[0]*(math.pi)/2.0)\r\n return\r\n \r\n if prob_name == 'DTLZ2_c':\r\n n_obj = global_vars.params.n_obj\r\n n_var = global_vars.params.n_var \r\n g = 0\r\n for j in range(n_obj-1, n_var):\r\n g = g + ((indiv.xreal[j] - 0.5)*(indiv.xreal[j] - 0.5))\r\n \r\n for j in reversed(range(n_obj)):\r\n prod = 1\r\n for k in range(n_obj - j):\r\n prod *= math.cos(indiv.xreal[k]*math.pi/2.0) if (k>=0) else 1.0\r\n if j == 2:\r\n indiv.fitness[j] = (1.0+g)*prod*(math.sin(indiv.xreal[n_obj - j -1]*math.pi/2.0) if (j>0) else 1.0)\r\n else:\r\n indiv.fitness[j] = (1.0+g)*prod*(math.sin(indiv.xreal[n_obj - j -1]*math.pi/2.0) if (j>0) else 1.0)\r\n \r\n if prob_name == 'ednn':\r\n #n_obj = global_vars.params.n_obj\r\n #n_var = global_vars.params.n_var\r\n #indiv.fitness[0] = np.sum(indiv.xreal)\r\n #indiv.fitness[1] = evluator.evaluate([inputs])[0]\r\n #indiv.fitness[1] = -indiv.fitness[1]\r\n #evaluate_pytorch.demo(indiv.gpu_id)\r\n fitness1 = 1\r\n fitness1 = -fitness1\r\n return (fitness1)\r\n \r\n else:\r\n print('supply problem name')\r\n sys.exit()\r\n\r\n","sub_path":"test_problems.py","file_name":"test_problems.py","file_ext":"py","file_size_in_byte":5213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"42985447","text":"import sys\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Input, Conv1D, MaxPooling1D, LSTM, Dense, Activation, TimeDistributed, Flatten\n\nsys.path.append('..')\nfrom run_experiments import Experiment, run_experiments, save_experiment_results\n\ndef D():\n last = l0 = Input(shape=(512,256))\n last = Flatten()(last)\n last = Dense(4)(last)\n last = Activation('softmax')(last)\n model = tf.keras.Model([l0], last)\n myfuncname = sys._getframe().f_code.co_name\n return Experiment(myfuncname, model, 'first')\n\ndef LD():\n last = l0 = Input(shape=(512,256))\n last = LSTM(32)(last)\n last = Dense(4)(last)\n last = Activation('softmax')(last)\n model = tf.keras.Model([l0], last)\n myfuncname = sys._getframe().f_code.co_name\n return Experiment(myfuncname, model, 'first')\n\ndef CL():\n last = l0 = Input(shape=(512,256))\n last = Conv1D(3, (32,), strides=32)(last)\n last = LSTM(4)(last)\n last = Activation('softmax')(last)\n model = tf.keras.Model([l0], last)\n myfuncname = sys._getframe().f_code.co_name\n return Experiment(myfuncname, model, 'first')\n\nexperiments = [\n D(),\n LD(),\n CL(),\n]\n\nresults = []\nfor d in run_experiments(experiments,\n batch_size=100,\n validation_batch_size=10,\n validation_steps=10,\n steps_per_epoch=10,\n epochs=150,\n val_acc_limit=0.9):\n print(d)\n results.append(d)\n\nsave_experiment_results('experiments.tsv', results)\n","sub_path":"carving-experiments-16/1-first/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"633348007","text":"import sys\nimport os\nfrom keras.models import load_model\nfrom keras.preprocessing import image\nimport numpy as np\n\nlabel = ''\nfor char in open('labels.txt', 'r'):\n label += char\nlabel = label.split('\\n')\n\nif len(sys.argv) != 4:\n print('python cnn-model-evaluation.py model-name folder-name image-input-size')\nelse:\n model_name = sys.argv[1]\n root_folder = sys.argv[2]\n image_input_size = int(sys.argv[3])\n\n model = load_model(model_name)\n\n correct_call = 0\n for folder in os.listdir(root_folder):\n temp_correct_call = 0\n index = label.index(folder)\n for immg in os.listdir(root_folder + '/' + folder):\n img = image.load_img(root_folder+'/'+folder+'/'+immg, color_mode='grayscale', target_size=(image_input_size, image_input_size))\n img = image.img_to_array(img)\n img = img / 255\n img = np.where(img < 0.3, 0, 1)\n img = img.reshape(1, image_input_size, image_input_size, 1)\n prediction = model.predict_classes(img)\n if prediction == index:\n temp_correct_call += 1\n\n print(folder + ':', temp_correct_call, ' out of 50')\n correct_call += temp_correct_call\n print('total correct call: ', correct_call, 'out of', 50*88, 'i.e', (correct_call*100)/(50*88), '%')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# end\n","sub_path":"testing-cnn-models/cnn-model-evaluation.py","file_name":"cnn-model-evaluation.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"24417626","text":"# scrapy parse --spider=popadancy_com -d 3 'http://popadancy.com/'\n# scrapy crawl popadancy_com\n\nimport scrapy\nimport requests\nimport re\n\n\nclass popadancy_com(scrapy.Spider):\n name = 'popadancy_com'\n headers = {'cookie': 'beget=begetok'}\n page = requests.get('https://popadancy.com/', headers=headers)\n total_pages = re.search(r'https://popadancy.com/page/(\\d){3,4}', page.text).group()\n fin_page = int(re.search(r'\\d+', total_pages).group())\n start_urls = ['https://popadancy.com/page/{0}'.format(page) for page in range(2, fin_page+1)]\n start_urls.insert(0, 'https://popadancy.com/')\n allowed_domains = ['popadancy.com']\n\n def start_requests(self):\n for url in self.start_urls:\n yield self.request(url, self.get_sitemap)\n\n def request(self, url, callback):\n request = scrapy.Request(url=url, callback=callback)\n request.cookies['beget'] = 'begetok'\n return request\n\n def get_sitemap(self, response):\n self.book_hrefs = list()\n selected_hrefs = response.xpath('//h2[@class=\"entry-title\"]/a/@href')\n for href in selected_hrefs:\n self.book_hrefs.append(href.extract())\n for book_href in self.book_hrefs:\n yield self.request(book_href, self.book)\n\n def book(self, response):\n book = dict()\n book['URL'] = response.url\n\n book['Название'] = response.xpath('//h1[@class=\"entry-title\"]/text()').extract_first()\n\n authot = response.xpath('//div[@id=\"content\"]//p/strong/text()').extract_first()\n if authot:\n book['Автор'] = authot.replace('Автор:', '').strip()\n\n pirat_1 = response.xpath('//div[@class=\"entry-content\"]//a[@class=\"download-link\"]/@href').extract_first()\n if pirat_1:\n book['Пиратка 1'] = pirat_1\n litres = response.xpath('//div[@class=\"entry-content\"]/p/a[contains(@href, \"litres.ru\")]').extract()\n if litres:\n book['Литрес (старая ссылка)'] = litres[1]\n lock = response.xpath('//div[@class=\"entry-content\"]//div[@class=\"onp-sl-header onp-sl-strong\"]')\n if lock:\n book['Заглушка'] = 'Ссылка заблокирована'\n\n yield book\n","sub_path":"popadancy_com.py","file_name":"popadancy_com.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"553725686","text":"'''\nprint all prime numbers in a given range\n'''\n\ndef all_prime_numbers(l, u):\n for num in range(l, u + 1):\n if num > 1:\n for i in range(2, num):\n if (num % i) == 0:\n break\n\n else:\n return num\n\n\nfor i in range(4):\n print(f\"running for the {i + 1}th time\")\n lower = int(input(\"Enter lower range : \"))\n upper = int(input(\"Enter upper range : \"))\n num = all_prime_numbers(lower, upper)\n print(\"List of prime numbers\", num)\n print(\"\\n\")","sub_path":"Assignments/Aug292020/all_prime_numbers.py","file_name":"all_prime_numbers.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"471146592","text":"import pyspark\r\nfrom pyspark import SparkContext\r\nSparkContext.setSystemProperty('spark.executor.memory', '4g')\r\nSparkContext.setSystemProperty('spark.driver.memory', '4g')\r\n\r\ndef cleanVIN(line):\r\n try:\r\n fields = line.split(',')\r\n if len(fields)!=3:\r\n return False\r\n\r\n return True\r\n except:\r\n return False\r\n\r\ndef cleanVOUT(line):\r\n try:\r\n fields = line.split(',')\r\n if len(fields)!=4:\r\n return False\r\n\r\n return True\r\n except:\r\n return False\r\n\r\ndef filterVOUT(line):\r\n fields = line.split(',')\r\n if fields[3] == \"{1HB5XMLmzFVj8ALj6mfBsbifRoD4miY36v}\":\r\n return True\r\n return False\r\n\r\nsc = pyspark.SparkContext()\r\n\r\nvout = sc.textFile(\"/data/bitcoin/vout.csv\")\r\nvoutFiltered = vout.filter(cleanVOUT).filter(filterVOUT).map(lambda x: x.split(\",\"))\r\nvoutJoined = voutFiltered.map(lambda fields: (fields[0],(fields[1], fields[2], fields[3])))\r\n\r\nvin = sc.textFile(\"/data/bitcoin/vin.csv\")\r\nvinFiltered = vin.filter(cleanVIN).map(lambda y: y.split(\",\"))\r\nvinJoined = vinFiltered.map(lambda fields: (fields[0],(fields[1], fields[2])))\r\n\r\nfirstJoin = voutJoined.join(vinJoined)\r\n\r\n\r\nvoutFiltered1 = vout.filter(cleanVOUT).map(lambda x: x.split(\",\"))\r\nvoutJoined1 = voutFiltered1.map(lambda fields: ((fields[0], fields[2]), (fields[1], fields[3])))\r\n\r\nsecondJoin = firstJoin.map(lambda secondJoin: ((secondJoin[1][1][0], secondJoin[1][1][1]), (secondJoin[0], secondJoin[1][0][0], secondJoin[1][0][1], secondJoin[1][0][2])))\r\n\r\nfinalJoin = secondJoin.join(voutJoined1)\r\n\r\ndata = finalJoin.map(lambda sss: (sss[1][1][1],float(sss[1][1][0])))\r\n\r\nfinalData= data.reduceByKey(lambda a,b: a+b)\r\n\r\ntop10=finalData.takeOrdered(10, key= lambda c: -c[1])\r\n\r\nfor w in top10:\r\n print(w)\r\n","sub_path":"partB.py","file_name":"partB.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"208877839","text":"# -*- coding: utf-8 -*-\n# !python3\n\"\"\"\nPPT convert PDF\n\"\"\"\n\nimport os\n\nimport filetype\n\nimport win32com\n\nfrom win32com.client import Dispatch\n\ndef ppt2pdf(filename,dst_filename):\n \"\"\"A folder with the same name as the PPT file will be created in the \n same directory.This folder contains all PDF images generated by PPT \n files.Where * filename * is the path to the PPT file.* dst_filename * \n is the destination file format.\n \"\"\"\n ppt = win32com.client.Dispatch('PowerPoint.Application')\n # ppt.DisplayAlerts = False\n pptSel = ppt.Presentations.Open(filename, WithWindow = False)\n pptSel.SaveAs(dst_filename,32); # 32 for ppt to pdf\n ppt.Quit()\n\nppt_dir = os.getcwd() # Get the current working directory\n\nfor fn in (fns for fns in os.listdir(ppt_dir) \n if fns.endswith(('.ppt','.pptx'))):\n try:\n kind = filetype.guess(fn)\n if kind is None:\n print('Cannot guess file type ' + fn)\n elif kind.mime == 'application/zip': # File type must be PPT\n file_name = os.path.splitext(fn)[0]\n print('Converting ' + fn)\n ppt_file = os.path.join(ppt_dir, fn)\n img_file = os.path.join(ppt_dir, file_name + '.pdf')\n ppt2pdf(ppt_file,img_file)\n except:\n print('Getting file type error ' + fn)\n\nprint('pdf conversion completed')","sub_path":"convert_pdf.py","file_name":"convert_pdf.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"155520989","text":"import PySimpleGUI as sg\nfrom tema import tema\nimport interfazCargaLibros\n\nimport os \n\n\ndef interfazInicial():\n \n layout= [\n [sg.Button('Agregar Libro',font='Italic 20',size=(12,3),key='newBook'),\n sg.Button('Editar Libro',font='Italic 20',size=(12,3),key='editLibro'),\n sg.Button('Borrar Libro',font='Italic 20',size=(12,3),key='delLibro')]\n ]\n salir = [[sg.Button('Salir',font='Italic 20',size=(12,3),key='salir')]]\n return layout + salir\n\ndef principal():\n alto = 500\n ancho = 900\n tema() \n ventana = sg.Window ('Librería Don Cipriano',interfazInicial(), size = (ancho,alto),element_justification='center')\n ventana.Finalize()\n\n while True:\n evento, value = ventana.read()\n if (evento == None or evento == 'salir') :\n break\n if (evento == 'newBook'):\n interfazCargaLibros.inicioConsignas()\n \n ventana.Close()\nprincipal()\n","sub_path":"interfazPrincipal.py","file_name":"interfazPrincipal.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"628598654","text":"\"\"\"Define support for Eufy Security cameras/doorbells.\"\"\"\nimport asyncio\nimport logging\n\nfrom eufy_security.errors import EufySecurityError\nfrom haffmpeg.camera import CameraMjpeg\nfrom haffmpeg.tools import ImageFrame, IMAGE_JPEG\n\nfrom homeassistant.components.camera import SUPPORT_ON_OFF, SUPPORT_STREAM, Camera\nfrom homeassistant.components.ffmpeg import DATA_FFMPEG\nfrom homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream\n\nfrom .const import DOMAIN, DATA_API, DATA_COORDINATOR\nfrom .device import DeviceEntity\n\n\n_LOGGER = logging.getLogger(__name__)\n\nDEFAULT_FFMPEG_ARGUMENTS = \"-pred 1\"\n\n\nasync def async_setup_entry(hass, entry, async_add_entities):\n ffmpeg = hass.data[DATA_FFMPEG]\n api = hass.data[DOMAIN][entry.entry_id][DATA_API]\n coordinator = hass.data[DOMAIN][entry.entry_id][DATA_COORDINATOR]\n async_add_entities(\n EufySecurityCam(ffmpeg, camera, coordinator)\n for camera in api.cameras.values()\n )\n\n\nclass EufySecurityCam(DeviceEntity, Camera):\n \"\"\"Define a Eufy Security camera/doorbell.\"\"\"\n\n def __init__(self, ffmpeg, device, coordinator):\n \"\"\"Initialize.\"\"\"\n super().__init__(device, coordinator)\n Camera.__init__(self)\n\n self._async_unsub_dispatcher_connect = None\n self._ffmpeg = ffmpeg\n self._ffmpeg_arguments = DEFAULT_FFMPEG_ARGUMENTS\n self._ffmpeg_image_frame = ImageFrame(ffmpeg.binary)\n self._ffmpeg_stream = CameraMjpeg(ffmpeg.binary)\n self._last_image = None\n self._last_image_url = None\n self._stream_url = None\n\n @property\n def supported_features(self):\n \"\"\"Return supported features.\"\"\"\n return SUPPORT_ON_OFF | SUPPORT_STREAM\n\n @property\n def motion_detection_enabled(self):\n \"\"\"Return the camera motion detection status.\"\"\"\n return self._device.params['CAMERA_PIR']\n\n async def async_camera_image(self):\n \"\"\"Return a still image response from the camera.\"\"\"\n if self._last_image_url != self._device.last_camera_image_url:\n self._last_image = await asyncio.shield(\n self._ffmpeg_image_frame.get_image(\n self._device.last_camera_image_url,\n output_format=IMAGE_JPEG,\n extra_cmd=self._ffmpeg_arguments,\n )\n )\n self._last_image_url = self._device.last_camera_image_url\n\n return self._last_image\n\n async def async_disable_motion_detection(self):\n \"\"\"Disable doorbell's motion detection\"\"\"\n await self._device.async_update_param('DETECT_SWITCH', False)\n\n async def async_enable_motion_detection(self):\n \"\"\"Enable doorbell's motion detection\"\"\"\n await self._device.async_update_param('DETECT_SWITCH', True)\n\n async def async_turn_off(self):\n \"\"\"Turn off the RTSP stream.\"\"\"\n try:\n await self._device.async_stop_stream()\n _LOGGER.info(\"Stream stopped for %s\", self._device.name)\n except EufySecurityError as err:\n _LOGGER.error(\"Unable to stop stream (%s): %s\", self._device.name, err)\n\n self._stream_url = None\n\n async def async_turn_on(self):\n \"\"\"Turn on the RTSP stream.\"\"\"\n try:\n self._stream_url = await self._device.async_start_stream()\n _LOGGER.info(\"Stream started (%s): %s\", self._device.name, self._stream_url)\n except EufySecurityError as err:\n _LOGGER.error(\"Unable to start stream (%s): %s\", self._device.name, err)\n\n async def handle_async_mjpeg_stream(self, request):\n \"\"\"Generate an HTTP MJPEG stream from the camera.\"\"\"\n await self.async_turn_on()\n if not self._stream_url:\n return await self.async_camera_image()\n\n await self._ffmpeg_stream.open_camera(\n self._stream_url, extra_cmd=self._ffmpeg_arguments\n )\n\n try:\n stream_reader = await self._ffmpeg_stream.get_reader()\n return await async_aiohttp_proxy_stream(\n self.hass,\n request,\n stream_reader,\n self._ffmpeg.ffmpeg_stream_content_type,\n )\n finally:\n await self._ffmpeg_stream.close()\n\n async def stream_source(self):\n self._stream_url = await self._device.async_start_stream()\n return self._stream_url\n","sub_path":"custom_components/eufy_security/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":4370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"230579830","text":"import datetime\nimport dateutil.parser\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport numpy as np\nimport json\nimport time\n\nimport tweepy\nfrom tweepy import OAuthHandler\nfrom textblob import TextBlob\n\nfrom json import dumps, loads, JSONEncoder, JSONDecoder\nimport pickle\n\nclass PythonObjectEncoder(JSONEncoder):\n def default(self, obj):\n if isinstance(obj, (list, dict, str, unicode, int, float, bool, type(None))):\n return JSONEncoder.default(self, obj)\n return {'_python_object': pickle.dumps(obj)}\n\ndef as_python_object(dct):\n if '_python_object' in dct:\n return pickle.loads(str(dct['_python_object']))\n return dct\n\n# Twitter API Information\nconsumer_key = ''\nconsumer_secret = ''\naccess_token = ''\naccess_secret = ''\n\nauth = OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_secret)\n\napi = tweepy.API(auth)\n\nurl = 'https://coinmarketcap.com/all/views/all/'\n\ni = 0\nBTCTweets = {}\nticks = ['BTC','ETH','LTC','EOS','ADA']\nsleep_time = 60*5 # 5 minutes\nfil_mc = '../data/btcpricesentiment_mc_aws1.json'\nfil_txt = '../data/btcpricesentiment_txt_aws1.json'\n\nwhile i < 100000:\n btcpricesentiment_mc = {}\n btcpricesentiment_txt = {}\n txt_btc = []\n txt_eth = []\n txt_blk = []\n\n agg_btc = []\n agg_eth = []\n agg_blk = []\n tstmp = str(datetime.datetime.now()).replace('-','').replace(' ','').split(':')[0] + str(datetime.datetime.now()).split(':')[1]\n try:\n # Twitter sentiment anlysis\n bitcoin_tweets = api.search('bitcoin')\n ethereum_tweets = api.search('ethereum')\n blockchain_tweets = api.search('blockchain')\n\n for tweet in bitcoin_tweets:\n analysis = TextBlob(tweet.text)\n sentiment = analysis.sentiment.polarity\n if sentiment != 0:\n agg_btc.append(sentiment)\n txt_btc.append([str(analysis),str(sentiment)])\n\n for tweet in ethereum_tweets:\n analysis = TextBlob(tweet.text)\n sentiment = analysis.sentiment.polarity\n if sentiment != 0:\n agg_eth.append(sentiment)\n txt_eth.append([str(analysis),str(sentiment)])\n\n for tweet in blockchain_tweets:\n analysis = TextBlob(tweet.text)\n sentiment = analysis.sentiment.polarity\n if sentiment != 0:\n agg_blk.append(sentiment)\n txt_blk.append([str(analysis),str(sentiment)])\n\n # Coinmarketcap marketcap snapshot\n response=requests.get(url)\n page=response.text\n soup=BeautifulSoup(page,\"lxml\")\n tables=soup.find_all(\"table\")\n rows=[row for row in tables[0].find_all('tr')]\n\n df = pd.read_html(tables[0].prettify())[0]\n df = df[['Symbol','Market Cap']]\n df = df.dropna() # filter out question marks; change question marks to None\n hour_data = {}\n tweet_data = {}\n rows = len(df)\n for row in range(rows):\n if df['Symbol'][row] in ticks:\n symbol = df['Symbol'][row]\n mkt_cap = df['Market Cap'][row] # .replace('$','').replace(',','')\n hour_data[symbol] = mkt_cap\n hour_data['bitcoin_S'] = [sum(agg_btc),len(agg_btc)]\n hour_data['ethereum_S'] = [sum(agg_eth),len(agg_eth)]\n hour_data['blockchain_S'] = [sum(agg_blk),len(agg_blk)]\n mktcap=soup.find_all(\"span\", class_=\"market-cap\")\n mktcap = re.split(' ',str(mktcap))[5]\n hour_data['Crypto Market Cap'] = mktcap\n\n btcpricesentiment_mc[tstmp] = hour_data\n\n tweet_data['BTC'] = txt_btc\n tweet_data['ETH'] = txt_eth\n tweet_data['Blk'] = txt_blk\n btcpricesentiment_txt[tstmp] = tweet_data\n i += 1\n\n with open(fil_txt, 'a') as fp:\n json.dump(btcpricesentiment_txt, fp)\n with open(fil_mc, 'a') as fp:\n json.dump(btcpricesentiment_mc, fp)\n\n print('{} and {} have been updated at {}.'.format(fil_mc,fil_txt, tstmp))\n except Exception as e:\n print('{} or {} exception has occurred at {}.'.format(fil_mc,fil_txt, tstmp))\n print(e)\n pass\n\n time.sleep(sleep_time)\n","sub_path":"scripts/btcpricesentiment6.py","file_name":"btcpricesentiment6.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"497335155","text":"from pyhive import hive \nimport pymysql\nconn = hive.Connection(host='localhost', port=10000, username='root', database='test') \ncursor = conn.cursor() \ncursor.execute('select count(*),avg(gprs_error),stddev(gprs_error) from ct_data') \nresults = cursor.fetchall()\n(count, average,std)=results[0]\ndb = pymysql.connect(\"localhost\", \"root\", \"123456\", \"test\", charset='utf8' )\ncursor = db.cursor()\nsql = \"insert into ct_data_out(user_count ,gprs_error_average) values ('%f', '%f')\" % (count, average)\ntry:\n cursor.execute(sql)\n db.commit()\nexcept:\n db.rollback()\n \ndb.close()\nprint(\"Done\")","sub_path":"1.项目提交/test_pyHive.py","file_name":"test_pyHive.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"109224756","text":"# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE\n\nimport os.path\nfrom setuptools import setup\n\n\ndef get_version():\n g = {}\n with open(os.path.join(\"uproot\", \"version.py\")) as f:\n exec(f.read(), g)\n return g[\"__version__\"]\n\n\nextras = {\n \"test\": open(\"requirements-test.txt\").read().strip().split(\"\\n\"),\n \"dev\": open(\"requirements-dev.txt\").read().strip().split(\"\\n\"),\n}\nextras[\"all\"] = sum(extras.values(), [])\n\nsetup(\n version = get_version(),\n extras_require = extras,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"223789815","text":"# Copyright (c) 2021 OpenKS Authors, DCD Research Lab, Zhejiang University. \n# All Rights Reserved.\n\nimport tensorflow as tf\nfrom .rec_operator import RecOperator\nfrom ...models import *\nfrom ...abstract.mtg import MTG\nfrom ...abstract.mmd import MMD\n\nclass IteractionOnlyRec(RecOperator):\n\t'''\n\treference to: https://github.com/xiangwang1223/neural_graph_collaborative_filtering\n\t'''\n\n\tdef __init__(self, platform: str, executor: str, model: str):\n\t\tself.platform = platform\n\t\tself.executor = executor\n\t\tself.model = model\n\t\tself.saver = tf.train.Saver()\n\t\tconfig = tf.ConfigProto()\n\t\tself.sess = tf.Session(config=config)\n\n\tdef train(self, dataset: MMD):\n\t\targs = {\n\t\t\t'lr': 0.0005, \n\t\t\t'embed_size': 64, \n\t\t\t'batch_size': 1024, \n\t\t\t'layer_size': [64,64,64], \n\t\t\t'regs': [1e-5], \n\t\t\t'epoch': 200, \n\t\t\t'node_dropout': [0.1], \n\t\t\t'mess_dropout': [0.1,0.1,0.1], \n\t\t\t'ranks': [20, 40, 60, 80, 100],\n\t\t\t'model_dir': './'\n\t\t}\n\t\texecutor = OpenKSModel.get_module(self.platform, self.executor)\n\t\tself.model_obj = executor(dataset=dataset, model=OpenKSModel.get_module(self.platform, self.model), args=args)\n\t\tself.model_obj.run()\n\n\tdef rec_user_embed(self, user_ids, item_ids, model_path):\n\t\tself.saver.restore(self.sess, model_path)\n\t\trate_batch = self.sess.run(self.model_obj, feed_dict={model.users: user_ids, model.pos_items: item_ids})\n\t\treturn rate_batch\n\t\t\n\n\t\t","sub_path":"openks/apps/rec/model_rec.py","file_name":"model_rec.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"457182921","text":"# -*- coding:utf-8 -*-\r\n# author:yufeixu\r\n# datetime:2020/4/17 17:08\r\n# software: PyCharm\r\n\r\n\"\"\"\r\n 编写一个方法,计算从 0 到 n (含 n) 中数字 2 出现的次数。\r\n\r\n 示例:\r\n 输入: 25\r\n 输出: 9\r\n 解释: (2, 12, 20, 21, 22, 23, 24, 25)(注意 22 应该算作两次)\r\n n <= 10^9\r\n\"\"\"\r\n\r\n\r\nclass Solution:\r\n def numberOf2sInRange(self, n: int) -> int:\r\n \"\"\"\r\n 结果对,但效率较低\r\n :param n:\r\n :return:\r\n \"\"\"\r\n if n < 2:\r\n return 0\r\n pre, current = 0, 0\r\n for i in range(2, n + 1):\r\n pre, current = current, pre + self.count(i)\r\n return pre + current\r\n\r\n def count(self, n) -> int:\r\n return str(n).count('2')\r\n\r\nclass Solution:\r\n def numberOf2sInRange(self, n: int) -> int:\r\n res = 0\r\n s = str(n)\r\n for i in range(len(s))[::-1]:\r\n c = s[i]\r\n left = 0 if i == 0 else int(s[0:i])\r\n if c > '2':\r\n left += 1\r\n res += left * (10**(len(s) - i - 1))\r\n if c == '2':\r\n right = int(s[i + 1:]) + 1 if i + 1 < len(s) else 1\r\n res += right\r\n return res\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n n = 222\r\n print(Solution().numberOf2sInRange(n))\r\n","sub_path":"src/leetcode/dp/no1706_numberOf2sInRange.py","file_name":"no1706_numberOf2sInRange.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"215414075","text":"\n\nclass Wrapper:\n\n def __init__(self, filename):\n self.filename = filename\n\n def __call__(self):\n self.read_file()\n total_area = 0\n ribbon_length = 0\n for i in range(len(self.data)):\n temp = self.data[i][0].split('x')\n total_area += self.calculate_surface(int(temp[0]), int(temp[1]), int(temp[2]))\n ribbon_length += self.calculate_ribbon(int(temp[0]), int(temp[1]), int(temp[2]))\n print (total_area)\n print (ribbon_length)\n\n def calculate_ribbon(self, l, w, h):\n length = 2*min(l + w, l + h, w + h) + l*w*h\n return length\n\n def calculate_surface(self, l, w, h):\n local_area = 2*l*w + 2*w*h + 2*h*l\n local_area += self.calculate_slack(l, w, h)\n return local_area\n\n def calculate_slack(self, l, w, h):\n area_1 = l * w\n area_2 = l * h\n area_3 = w * h\n if area_1 <= area_2 and area_1 <= area_3:\n return area_1\n else:\n if area_2 <= area_3:\n return area_2\n else:\n return area_3\n\n\n def read_file(self):\n self.data = []\n with open(self.filename, 'r') as f:\n for line in f:\n self.data.append(line.split())\n\nif __name__ == '__main__':\n wrapper = Wrapper(\"puzzle_1.dat\")\n wrapper()\n","sub_path":"day_2/solve_1.py","file_name":"solve_1.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"91032204","text":"import pygame, sys\nimport numpy as np\n\npygame.init()\n\nWIDTH = 600\nHEIGHT = 600\nLINE_WIDTH = 15\nBOARD_ROWS = 3\nBOARD_COLS = 3\n\nRED = (255, 0, 0)\nBG_COLOR = (28, 170, 156)\nLINE_COLOR = (23, 145, 135)\n\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption('TIC TAC TOE')\nscreen.fill(BG_COLOR)\n\n# board\nboard = np.zeros((BOARD_ROWS, BOARD_COLS))\n\n\n# print(board)\n\n\n# pygame.draw.line(screen, RED, (10, 10), (300, 300), 10)\n\n\ndef draw_lines():\n # 1horizontal line\n pygame.draw.line(screen, LINE_COLOR, (0, 200), (600, 200), LINE_WIDTH)\n # 2 horizontal line\n\n\npygame.draw.line(screen, LINE_COLOR, (0, 400), (600, 400), LINE_WIDTH)\n\n# 1 vertical line\npygame.draw.line(screen, LINE_COLOR, (200, 0), (200, 600), LINE_WIDTH)\n# 2 vertical line\npygame.draw.line(screen, LINE_COLOR, (400, 0), (400, 600), LINE_WIDTH)\n\n\ndef mark_square(row, col, player):\n board[row][col] = player\n\n\ndef available_square(row, col):\n return board[row][col] == 0\n\n\ndef is_board_full():\n for row in range(BOARD_ROWS):\n for col in range(BOARD_COLS):\n if board[row][col] == 0:\n return False\n return True\n\n\nprint(is_board_full())\n\n# marking all squares\n\nfor row in range(BOARD_ROWS):\n for col in range(BOARD_COLS):\n mark_square(row, col, 1)\n# board is full - true\nprint(is_board_full())\n\ndraw_lines()\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n mouseX = event.pos[0]\n mouseY = event.pos[1]\n\n clicked_row = int(mouseY // 200)\n clicked_col = int(mouseX // 200)\n\n print(clicked_row)\n print(clicked_col)\n\n if available_square(clicked_row, clicked_col):\n pygame.display.update()\n","sub_path":"tictactoe.py/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"295549140","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport time\nfrom unitbench import Benchmark\n\nfrom redis import Redis\n\nclient = Redis(host='localhost', port=6379)\n\nclass RedisBenchmark(Benchmark):\n\n def setup(self):\n self.cur_time = int(time.time())\n\n def warmup(self):\n return 0\n\n def input(self):\n for i in range(10):\n yield i\n\n def bench_zad(self, input):\n i=1\n while i < 150000:\n key = \"feedlist:{0}\".format(i)\n i = i+1\n client.zadd(key, 1232444, self.cur_time)\n\n\nif __name__ == \"__main__\":\n RedisBenchmark().run()\n","sub_path":"bench/test_redis.py","file_name":"test_redis.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"443334611","text":"import arcpy\nimport os\n\n\nclass SingleFeatureKmlTool(object):\n\n def __init__(self):\n\n self.label = \"Single feature KML\"\n self.description = \"Exports features into individual KML files\"\n self.canRunInBackground = False\n\n return\n\n def getParameterInfo(self):\n\n param0 = arcpy.Parameter(\n displayName=\"Features to Export\",\n name=\"in_features\",\n datatype=\"GPFeatureLayer\",\n parameterType=\"Required\",\n direction=\"Input\")\n\n param1 = arcpy.Parameter(\n displayName=\"Features ID Field\",\n name=\"in_features_id\",\n datatype=\"Field\",\n parameterType=\"Required\",\n direction=\"Input\")\n\n param1.parameterDependencies = [\"in_features\"] # should be constant\n\n param2 = arcpy.Parameter(\n displayName=\"Output Workspace\",\n name=\"in_outws\",\n datatype=\"DEWorkspace\",\n parameterType=\"Required\",\n direction=\"Input\")\n\n param2.defaultEnvironmentName = \"workspace\"\n\n return [param0, param1, param2]\n\n def isLicensed(self):\n\n return True\n\n def updateParameters(self, parameters):\n\n return\n\n def updateMessages(self, parameters):\n\n return\n\n def execute(self, parameters, messages):\n\n features = parameters[0].valueAsText\n feature_id_field = parameters[1].valueAsText\n out_ws = parameters[2].valueAsText\n\n feat_search_cursor = arcpy.da.SearchCursor(features, [feature_id_field])\n\n for feat_row in feat_search_cursor:\n\n feat_id = feat_row[0].strip().replace(\" \", \"-\")\n\n tmp_lyr = feat_id\n\n if arcpy.Exists(tmp_lyr):\n arcpy.Delete_management(tmp_lyr)\n\n where = \"{} = '{}'\".format(arcpy.AddFieldDelimiters(features, feature_id_field), feat_id)\n\n try:\n arcpy.MakeFeatureLayer_management(features, tmp_lyr, where)\n messages.addMessage(\"Temp layer for feature ID = {} created\".format(feat_id))\n except Exception as e:\n messages.addWarningMessage(\"Could not create temp layer for feature ID = {}: {}\".format(feat_id, e))\n continue\n\n# LayerToKML_conversion(layer, out_kmz_file, {layer_output_scale}, {is_composite}, {boundary_box_extent}, {image_size}, {dpi_of_client}, {ignore_zvalue})\n out_kmz = os.path.join(out_ws, \"{}_{}_{}.kmz\".format(features, feature_id_field, feat_id))\n arcpy.LayerToKML_conversion(tmp_lyr, out_kmz)\n arcpy.Delete_management(tmp_lyr)\n\n messages.addMessage(\"KML '{}' created\".format(out_kmz))\n\n return\n","sub_path":"single_feature_kml.py","file_name":"single_feature_kml.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"430015777","text":"__author__ = 'xiejianqiao'\n\nimport socket\nfrom Daemon import Daemon\nfrom threading import Thread\n\nclass MultithreadSocketListener(Daemon):\n def __init__(self,pidfile,hostname,hostport,threadnum):\n self.host=hostname\n self.port=hostport\n self.threadnum=int(threadnum)\n Daemon.__init__(self,pidfile)\n\n def createListener(self):\n self.listener = socket.socket()\n self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.listener.bind((self.host,self.port))\n self.listener.listen(5)\n\n def serverdaemon1(self):\n self.createListener()\n for i in range(self.threadnum):\n Thread(target=self.server,args=(self.listener,)).start()\n\n def server(self):\n \"\"\"\n you should override this method.\n \"\"\"\n\n","sub_path":"hpc_system_server/util/MultithreadSocketListener.py","file_name":"MultithreadSocketListener.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"198987451","text":"from django.conf.urls import url,include\nfrom rest_framework.routers import DefaultRouter\n\nfrom . import views\n\nrouter = DefaultRouter()\nrouter.register(r'idtypes', views.IdTypeViewSet)\nrouter.register(r'subjects', views.SubjectViewSet)\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n url(r'^subjectbyidcard/$', views.SubjectByIDCardView.as_view()),\n url(r'^subjectbyregdate/$', views.SubjectByRegistrationDateView.as_view()),\n url(r'^subjectbybirthdate/$', views.SubjectByBirthdateView.as_view()),\n url(r'^subjectbycountry/$', views.SubjectByCountryView.as_view()),\n url(r'^subjectbygender/$', views.SubjectByGenderView.as_view()),\n url(r'^subjectbyphone/$', views.SubjectByPhone.as_view()),\n url(r'^subjectbyemail/$', views.SubjectByEmail.as_view()),\n url(r'^subjectbyidcardtype/$', views.SubjectByIdCardTypeView.as_view()),\n url(r'^subjectbyfullname/$', views.SubjectByFullName.as_view()),\n]\n","sub_path":"gdprapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"450263339","text":"from sqlalchemy import Column, Integer, DateTime, VARCHAR\n\nfrom ihb.database.orm_base import ORMBase\n\n\nclass Users(ORMBase):\n __tablename__ = 'users'\n\n id = Column(Integer, primary_key=True)\n user_name = Column(VARCHAR, primary_key=True)\n email = Column(VARCHAR, nullable=False)\n created_at = Column(DateTime)\n","sub_path":"ihb-core/ihb/database/models/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"45809616","text":"\"\"\"\nVarious tools for working with Workflows and Operations\n\"\"\"\n\nimport glob\nimport copy\nfrom collections import Iterator\nfrom collections import OrderedDict\n\nfrom . import Operation as opmod \n\nclass FileSystemIterator(Iterator):\n\n def __init__(self,dirpath,regex,include_existing_files=True):\n self.dirpath = dirpath\n self.rx = regex\n self.paths_done = []\n if not include_existing_files:\n self.paths_done = glob.glob(self.dirpath+'/'+self.rx)\n super(FileSystemIterator,self).__init__()\n\n def next(self):\n batch_list = glob.glob(self.dirpath+'/'+self.rx)\n for path in batch_list:\n if not path in self.paths_done:\n self.paths_done.append(path)\n return [path]\n return [None]\n\nclass ExecutionError(Exception):\n def __init__(self,msg):\n super(ExecutionError,self).__init__(self,msg)\n\ndef get_uri_from_dict(uri,d):\n keys = uri.split('.')\n itm = d\n for k in keys:\n if not isinstance(itm,dict):\n msg = 'something in {} is not a dict'.format(uri)\n raise KeyError(msg)\n if not k in itm.keys():\n msg = 'did not find uri {} in dict'.format(uri)\n raise KeyError(msg)\n else:\n itm = itm[k]\n return itm\n\ndef dict_contains_uri(uri,d):\n keys = uri.split('.')\n itm = d\n for k in keys:\n if not k in itm.keys():\n return False\n else:\n itm = itm[k]\n return True\n\ndef locate_input(il,wf=None,wf_manager=None,plugin_manager=None):\n \"\"\"\n Return the data pointed to by a given InputLocator object.\n A WfManager and/or a PluginManager can be provided \n as optional arguments,\n in which case they are used to fetch data.\n \"\"\"\n if il.tp == opmod.no_input or il.val is None:\n return None\n elif il.tp == opmod.workflow_item:\n if isinstance(il.val,list):\n return [wf.get_data_from_uri(v) for v in il.val]\n else:\n return wf.get_data_from_uri(il.val)\n elif il.tp == opmod.entire_workflow:\n return wf_manager.workflows[il.val]\n elif il.tp == opmod.plugin_item:\n if isinstance(il.val,list):\n return [plugin_manager.get_data_from_uri(v) for v in il.val]\n else:\n return plugin_manager.get_data_from_uri(il.val)\n elif il.tp == opmod.auto_type:\n return il.val\n else:\n msg = '[{}] failed to parse InputLocator (type: {}, val: {})'.format(\n __name__,il.tp,il.val)\n raise ValueError(msg)\n #elif il.tp == opmod.integer_type:\n # if isinstance(il.val,list):\n # return [int(v) for v in il.val]\n # else:\n # return int(il.val)\n #elif il.tp == opmod.float_type:\n # if isinstance(il.val,list):\n # return [float(v) for v in il.val]\n # else:\n # return float(il.val)\n #elif il.tp == opmod.bool_type:\n # if isinstance(il.val,list):\n # return [bool(eval(str(v))) for v in il.val]\n # else:\n # return bool(eval(str(il.val)))\n #elif (il.tp == opmod.filesystem_path\n # or il.tp == opmod.workflow_path\n # or il.tp == opmod.string_type):\n # if isinstance(il.val,list):\n # return [str(v) for v in il.val]\n # else:\n # return str(il.val)\n\ndef print_stack(stk):\n stktxt = ''\n opt_newline = '\\n'\n for i,lst in zip(range(len(stk)),stk):\n if i == len(stk)-1:\n opt_newline = ''\n if len(lst) > 1:\n if isinstance(lst[1],list):\n substk = lst[1]\n stktxt += ('[\\'{}\\':\\n{}\\n]'+opt_newline).format(lst[0],print_stack(lst[1]))\n else:\n stktxt += ('{}'+opt_newline).format(lst)\n else:\n stktxt += ('{}'+opt_newline).format(lst)\n return stktxt\n\n# TODO: the following\ndef check_wf(wf):\n \"\"\"\n Check the dependencies of the workflow.\n Ensure that all loaded operations have inputs that make sense.\n Return a status code and message for each of the Operations.\n \"\"\"\n pass\n\n","sub_path":"paws/core/operations/optools.py","file_name":"optools.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"269932200","text":"import numpy as np\n# get_ipython().magic(u'matplotlib inline')\nimport matplotlib.pyplot as plt\n\n\ndef Our_random(seed,num):\n sequ=[0]*num\n prime=8121\n offset=28411\n mody=134456\n sequ[0]=(seed*prime+offset)%mody\n \n for i in range(1,(num)):\n sequ[i]=(sequ[i-1]*prime+offset)%mody\n j=[float(x)/mody for x in sequ]\n return j \n \n\n\n# In[96]:\n\nnumbers=np.array(Our_random(49,1000000))\n\nn, bins, patches = plt.hist(numbers, 100, normed=1, facecolor='green', alpha=0.75)\nplt.title(r'distribution of $10^6$ random numbers')\nplt.ylabel('normalized frequency')\nplt.xlabel('random number from 0 to 1')\nplt.show()\n","sub_path":"Code/Random.Gen-Goodness.of.test2.py","file_name":"Random.Gen-Goodness.of.test2.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"528991428","text":"import sys\nimport turicreate as tc\n\ndef execute(algo):\n data = tc.SFrame('dataset.csv')\n train_data, test_data = data.random_split(0.8)\n\n if algo == 1:\n model = tc.boosted_trees_classifier.create(train_data, target='Result', max_iterations=2, max_depth = 3)\n elif algo == 2:\n model = tc.decision_tree_classifier.create(train_data, target='Result')\n elif algo == 3:\n model = tc.nearest_neighbor_classifier.create(train_data, target='Result')\n else:\n model = tc.logistic_classifier.create(train_data, target='Result')\n\n predictions = model.classify(test_data)\n return model.evaluate(test_data)\n\ntry:\n print(\"[1] Boosted decision tree\\n\"\n \"[2] Decision tree\\n\"\n \"[3] Nearest neighbour\\n\"\n \"[4] Logistic regression\")\n\n num = int(input(\"Select ML Algorithm: \"))\n\n if not num or num < 1 or num > 4:\n raise ValueError()\n\n results = execute(num)\n print(\"\\n\\n\\n>>> Accuracy\\t: %s\" % results['accuracy'])\n print(\">>> Precision\\t: %s\" % results['precision'])\n print(\">>> Recall\\t: %s\" % results['recall'])\n print(\">>> F1 Score\\t: %s\" % results['f1_score'])\n\nexcept ValueError:\n print(\"\\nError: Please select a number between 1 to 3.\")\n sys.exit(1)\n\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"546840245","text":"from .exceptions import *\nimport random\n\n# Complete with your own, just for fun :)\nLIST_OF_WORDS = []\n\n\ndef _get_random_word(list_of_words):\n if not list_of_words:\n raise InvalidListOfWordsException(Exception)\n return random.choice(list_of_words)\n\n\ndef _mask_word(word):\n if not word:\n raise InvalidWordException()\n return '*' * len(word)\n\n\ndef _uncover_word(answer_word, masked_word, character): \n if not answer_word:\n raise InvalidWordException\n if not masked_word:\n raise InvalidWordException\n if len(character) > 1:\n raise InvalidGuessedLetterException\n if len(answer_word) != len(masked_word):\n raise InvalidWordException\n char1 = character.lower()\n answer_word1 = answer_word.lower()\n count1 = answer_word1.count(char1)\n answer_list = list(answer_word1)\n masked_list = list(masked_word)\n for i in range(len(answer_list)):\n if answer_list[i] == char1:\n masked_list[i] = char1\n masked_answer = \"\".join(masked_list)\n return masked_answer.lower()\n\ndef guess_letter(game, letter):\n if game['remaining_misses'] == 0 or '*' not in game['masked_word']:\n raise GameFinishedException\n masked_answer =_uncover_word(game['answer_word'], game['masked_word'], letter.lower())\n if masked_answer == game['masked_word']:\n game['remaining_misses']-=1\n if game['remaining_misses'] == 0:\n raise GameLostException\n game['previous_guesses'].append(letter.lower())\n game['masked_word'] = masked_answer\n if masked_answer == game['answer_word']:\n raise GameWonException\n\n\ndef start_new_game(list_of_words=None, number_of_guesses=5):\n if list_of_words is None:\n list_of_words = LIST_OF_WORDS\n\n word_to_guess = _get_random_word(list_of_words)\n masked_word = _mask_word(word_to_guess)\n game = {\n 'answer_word': word_to_guess,\n 'masked_word': masked_word,\n 'previous_guesses': [],\n 'remaining_misses': number_of_guesses,\n }\n\n return game\n","sub_path":"hangman/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"241807835","text":"#------------------------------------------------------------------------------\n# Copyright (c) 2012, Enthought, Inc.\n# All rights reserved.\n#------------------------------------------------------------------------------\nimport datetime\nfrom uuid import uuid4\n\nfrom enaml.qt.qt import QtCore\nfrom enaml.qt.qt.QtGui import QApplication\nfrom enaml.qt.qt_datetime_edit import QtDatetimeEdit\nfrom enaml.qt.qt_local_pipe import QtLocalPipe\n\n# Workarounds for an incompatibility between PySide and PyQt\ntry: # pragma: no cover\n qdatetime_to_python = QtCore.QDateTime.toPython\nexcept AttributeError: # pragma: no cover\n qdatetime_to_python = QtCore.QDateTime.toPyDateTime\n\nclass TestQtDatetimeEdit(object):\n \"\"\" Unit tests for the QtDatetimeEdit\n\n \"\"\"\n def __init__(self):\n \"\"\" Create an application instance so that widgets can be created\n\n \"\"\"\n if not QApplication.instance():\n self.app = QApplication([])\n \n def setUp(self):\n \"\"\" Set up the widget for testing\n\n \"\"\"\n self.datetime_edit = QtDatetimeEdit(None, uuid4().hex,\n QtLocalPipe(uuid4))\n self.datetime_edit.create()\n\n def test_set_datetime(self):\n \"\"\" Test the QtDatetimeEdit's set_datetime command\n\n \"\"\"\n date_time = datetime.datetime(2012,6,22,0,0,0,0)\n self.datetime_edit.recv_message({'action':'set-datetime',\n 'datetime':str(date_time)})\n widget_date_time = qdatetime_to_python(self.datetime_edit.widget.dateTime())\n assert widget_date_time == date_time\n\n def test_set_min_datetime(self):\n \"\"\" Test the QtDatetimeEdit's set_min_datetime command\n\n \"\"\"\n min_date_time = datetime.datetime(1752,9,14, 0, 0, 0, 0)\n self.datetime_edit.recv_message({'action':'set-minimum',\n 'minimum':str(min_date_time)})\n widget_min_date_time = qdatetime_to_python(\n self.datetime_edit.widget.minimumDateTime())\n assert widget_min_date_time == min_date_time\n\n def test_set_max_datetime(self):\n \"\"\" Test the QtDatetimeEdit's set_max_datetime command\n\n \"\"\"\n max_date_time = datetime.datetime(7999, 12, 31, 23, 59, 59, 999000)\n self.datetime_edit.recv_message({'action':'set-maximum',\n 'maximum':str(max_date_time)})\n widget_max_date_time = qdatetime_to_python(\n self.datetime_edit.widget.maximumDateTime())\n assert widget_max_date_time == max_date_time\n\n def test_set_datetime_format(self):\n \"\"\" Test the QtDatetimeEdit's set_datetime_format command\n\n \"\"\"\n date_time_format = 'd M y - hh:mm:ss'\n self.datetime_edit.recv_message({'action':'set-datetime_format',\n 'datetime_format':date_time_format})\n widget_format = self.datetime_edit.widget.displayFormat()\n assert widget_format == date_time_format\n","sub_path":"enaml/tests/qt/test_qt_datetime_edit.py","file_name":"test_qt_datetime_edit.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"433449345","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import login, authenticate, logout \nfrom django.views import View\n\nfrom app.forms import UserSignupForm, UserSignupProfileform, NewsForm\nfrom app.models import Profile, News\n\n\nclass Index(View):\n def get(sef, request, *args, **kwargs):\n news = News.objects.all().order_by('timestamp').reverse().first()\n path_to_file = news.news_file.path\n file = open(path_to_file, 'r')\n news_text = file.readlines()\n file.close()\n args = {}\n args['news']=news\n args['title']=news_text[0]\n args['index_body']=news_text[1]\n args['body']=news_text[2]\n return render(request, 'index.html', {'args':args})\n\nclass SignUp(View):\n template_name = 'registration/signup.html'\n\n def get(self, request, *args, **kwargs):\n user_form = UserSignupForm()\n profile_form = UserSignupProfileform()\n forms = {}\n forms['user_form'] = user_form\n forms['profile_form'] = profile_form\n return render(request, self.template_name, forms)\n\n\n def post(self, request, *args, **kwargs):\n user_form = UserSignupForm(request.POST)\n profile_form = UserSignupProfileform(request.POST, request.FILES)\n if user_form.is_valid() and profile_form.is_valid():\n user = User.objects.create_user(\n username=user_form.cleaned_data['username'],\n first_name=user_form.cleaned_data['first_name'],\n last_name=user_form.cleaned_data['last_name'],\n email=user_form.cleaned_data['email'],\n password=user_form.cleaned_data['password']\n )\n profile = Profile.objects.create(\n user=user,\n about_me=profile_form.cleaned_data['about_me'],\n img_file=profile_form.cleaned_data['img_file']\n )\n user.save()\n profile.save()\n return redirect('login')\n return redirect('signup')\n\n\nclass PostNews(View):\n template_name = 'posts/postnews.html'\n\n def get(self, request, *args, **kwargs):\n news_form = NewsForm()\n return render(request, self.template_name, { 'news_form':news_form })\n\n def post(self, request, *args, **kwargs):\n news_form = NewsForm(request.POST, request.FILES)\n author_id = User.objects.filter(id=request.user.id).first()\n if news_form.is_valid():\n news = News.objects.create(\n title=news_form.cleaned_data['title'],\n index_body=news_form.cleaned_data['index_body'],\n body=news_form.cleaned_data['body'],\n img_file=news_form.cleaned_data['img_file'],\n news_file=news_form.cleaned_data['news_file'],\n author_id=author_id\n )\n news.save()\n return redirect('index')\n print(news_form.errors)\n input()\n return render(request, self.template_name, { 'news_form':news_form })\n\nclass ShowProfile(View):\n\n def get(self, request, *args, **kwargs):\n profile = Profile.objects.filter(user=request.user).first()\n return render(request, 'registration/profile.html', { 'profile':profile })\n\n","sub_path":"spacenow/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"215038752","text":"'''Everything related to perception of the world'''\n\n# ######################################################################\n# Imports\n# ######################################################################\n\n# Core ROS imports come first.\nimport roslib\nroslib.load_manifest('pr2_pbd_interaction')\nimport rospy\n\n# System builtins\nimport threading\nimport time\n\n# 3rd party\nfrom numpy import array\nfrom numpy.linalg import norm\n\n# ROS builtins\nimport actionlib\nfrom actionlib_msgs.msg import GoalStatus\nfrom geometry_msgs.msg import Quaternion, Vector3, Point, Pose, PoseStamped\nfrom std_msgs.msg import ColorRGBA, Header\nimport tf\nfrom tf import TransformListener, TransformBroadcaster\n\n# ROS 3rd party\nfrom interactive_markers.interactive_marker_server import (\n InteractiveMarkerServer)\nfrom interactive_markers.menu_handler import MenuHandler\nfrom manipulation_msgs.msg import GraspableObjectList\nfrom object_manipulation_msgs.srv import FindClusterBoundingBox\nfrom pr2_interactive_object_detection.msg import (\n UserCommandAction, UserCommandGoal)\nfrom visualization_msgs.msg import (\n Marker, InteractiveMarker, InteractiveMarkerControl,\n InteractiveMarkerFeedback)\n\n# Local\nfrom pr2_pbd_interaction.msg import Object, ArmState\nfrom pr2_social_gaze.msg import GazeGoal\nfrom response import Response\n\n\n# ######################################################################\n# Module level constants\n# ######################################################################\n\n# Two objects must be closer than this to be considered 'the same'.\nOBJ_SIMILAR_DIST_THRESHHOLD = 0.075\n\n# When adding objects, if they are closer than this they'll replace one\n# another.\nOBJ_ADD_DIST_THRESHHOLD = 0.02\n\n# How close to 'nearest' object something must be to be counted as\n# 'near' it.\nOBJ_NEAREST_DIST_THRESHHOLD = 0.4\n\n# Object distances below this will be clamped to zero.\nOBJ_DIST_ZERO_CLAMP = 0.0001\n\nACTION_OBJ_DETECTION = 'object_detection_user_command'\nTOPIC_OBJ_RECOGNITION = 'interactive_object_recognition_result'\nTOPIC_TABLE_SEG = 'tabletop_segmentation_markers'\nTOPIC_IM_SERVER = 'world_objects'\nSERVICE_BB = 'find_cluster_bounding_box'\n\n# Scales\nSCALE_TEXT = Vector3(0.0, 0.0, 0.03)\nSURFACE_HEIGHT = 0.01 # 0.01 == 1cm (I think)\nOFFSET_OBJ_TEXT_Z = 0.06 # How high objects' labels are above them.\n# Object dimensions. I don't fully understand this, as it seems like\n# each object's dimensions should be extracted from the point cloud.\n# But apparently this works and is a default or something?\nDIMENSIONS_OBJ = Vector3(0.2, 0.2, 0.2)\n\n# Colors\nCOLOR_OBJ = ColorRGBA(0.2, 0.8, 0.0, 0.6)\nCOLOR_SURFACE = ColorRGBA(0.8, 0.0, 0.4, 0.4)\nCOLOR_TEXT = ColorRGBA(0.0, 0.0, 0.0, 0.5)\n\n# Frames\nBASE_LINK = 'base_link'\n\n# Time\nMARKER_DURATION = rospy.Duration(2)\n# How long to pause when waiting for external code, like gaze actions or\n# object segmentation, to finish before checking again.\nPAUSE_SECONDS = rospy.Duration(0.1)\n# How long we're willing to wait for object recognition.\nRECOGNITION_TIMEOUT_SECONDS = rospy.Duration(5.0)\n\n\n# ######################################################################\n# Classes\n# ######################################################################\n\nclass WorldObject:\n '''Class for representing objects'''\n\n def __init__(self, pose, index, dimensions, is_recognized):\n '''\n Args:\n pose (Pose): Position of bounding box\n index (int): For naming object in world (e.g. \"thing 0\")\n dimensions (Vector3): Size of bounding box\n is_recognized (bool): Result of object recognition.\n '''\n self.index = index\n self.assigned_name = None\n self.is_recognized = is_recognized\n self.object = Object(\n Object.TABLE_TOP, self.get_name(), pose, dimensions)\n self.menu_handler = MenuHandler()\n self.int_marker = None\n self.is_removed = False\n self.menu_handler.insert('Remove from scene', callback=self.remove)\n\n def get_name(self):\n '''Return this object's name.\n\n Returns:\n str\n '''\n if self.assigned_name is None:\n if self.is_recognized:\n return 'object ' + str(self.index)\n else:\n return 'thing ' + str(self.index)\n else:\n return self.assigned_name\n\n def remove(self, __):\n '''Function for removing object from the world.\n\n Args:\n __ (???): Unused\n '''\n rospy.loginfo('Will remove object: ' + self.get_name())\n self.is_removed = True\n\n # TODO(mbforbes): Re-implement object recognition or remove\n # this dead code.\n\n # def assign_name(self, name):\n # '''Function for assigning a different name to this object.\n\n # Args:\n # name (str): The new name.\n # '''\n # self.assigned_name = name\n # self.object.name = name\n\n # def decrease_index(self):\n # '''Function to decrese object index.'''\n # self.index -= 1\n\n\nclass World:\n '''Handles object recognition, localization, and coordinate space\n transformations.'''\n\n tf_listener = None\n\n # Type: [WorldObject]\n objects = []\n\n def __init__(self):\n # Public attributes\n if World.tf_listener is None:\n World.tf_listener = TransformListener()\n self.surface = None\n\n # Private attributes\n self._lock = threading.Lock()\n self._tf_broadcaster = TransformBroadcaster()\n self._im_server = InteractiveMarkerServer(TOPIC_IM_SERVER)\n rospy.wait_for_service(SERVICE_BB)\n self._bb_service = rospy.ServiceProxy(\n SERVICE_BB, FindClusterBoundingBox)\n self._object_action_client = actionlib.SimpleActionClient(\n ACTION_OBJ_DETECTION, UserCommandAction)\n self._object_action_client.wait_for_server()\n rospy.loginfo(\n 'Interactive object detection action server has responded.')\n\n # Setup other ROS machinery\n rospy.Subscriber(\n TOPIC_OBJ_RECOGNITION, GraspableObjectList,\n self.receive_object_info)\n rospy.Subscriber(TOPIC_TABLE_SEG, Marker, self.receive_table_marker)\n\n # Init\n self.clear_all_objects()\n\n # ##################################################################\n # Static methods: Public (API)\n # ##################################################################\n\n @staticmethod\n def get_pose_from_transform(transform):\n '''Returns pose for transformation matrix.\n\n Args:\n transform (Matrix3x3): (I think this is the correct type.\n See ActionStepMarker as a reference for how to use.)\n\n Returns:\n Pose\n '''\n pos = transform[:3, 3].copy()\n rot = tf.transformations.quaternion_from_matrix(transform)\n return Pose(\n Point(pos[0], pos[1], pos[2]),\n Quaternion(rot[0], rot[1], rot[2], rot[3])\n )\n\n @staticmethod\n def get_matrix_from_pose(pose):\n '''Returns the transformation matrix for given pose.\n\n Args:\n pose (Pose)\n\n Returns:\n Matrix3x3: (I think this is the correct type. See\n ActionStepMarker as a reference for how to use.)\n '''\n pp, po = pose.position, pose.orientation\n rotation = [po.x, po.y, po.z, po.w]\n transformation = tf.transformations.quaternion_matrix(rotation)\n position = [pp.x, pp.y, pp.z]\n transformation[:3, 3] = position\n return transformation\n\n @staticmethod\n def get_absolute_pose(arm_state):\n '''Returns absolute pose of an end effector state (trasnforming\n if relative).\n\n Args:\n arm_state (ArmState)\n\n Returns:\n Pose\n '''\n if arm_state.refFrame == ArmState.OBJECT:\n arm_state_copy = ArmState(\n arm_state.refFrame, Pose(\n arm_state.ee_pose.position,\n arm_state.ee_pose.orientation),\n arm_state.joint_pose[:],\n arm_state.refFrameObject)\n World.convert_ref_frame(arm_state_copy, ArmState.ROBOT_BASE)\n return arm_state_copy.ee_pose\n else:\n return arm_state.ee_pose\n\n @staticmethod\n def get_most_similar_obj(ref_object, ref_frame_list):\n '''Finds the most similar object in the world.\n\n Args:\n ref_object (?)\n ref_frame_list ([Object]): List of objects (as defined by\n Object.msg).\n\n Returns:\n Object|None: As in one of Object.msg, or None if no object\n was found close enough.\n '''\n best_dist = 10000 # Not a constant; an absurdly high number.\n chosen_obj = None\n for ref_frame in ref_frame_list:\n dist = World.object_dissimilarity(ref_frame, ref_object)\n if dist < best_dist:\n best_dist = dist\n chosen_obj = ref_frame\n if chosen_obj is None:\n rospy.loginfo('Did not find a similar object.')\n else:\n rospy.loginfo('Object dissimilarity is --- ' + str(best_dist))\n if best_dist > OBJ_SIMILAR_DIST_THRESHHOLD:\n rospy.loginfo('Found some objects, but not similar enough.')\n chosen_obj = None\n else:\n rospy.loginfo(\n 'Most similar to new object: ' + str(chosen_obj.name))\n\n # Regardless, return the \"closest object,\" which may be None.\n return chosen_obj\n\n @staticmethod\n def get_frame_list():\n '''Function that returns the list of reference frames (Objects).\n\n Returns:\n [Object]: List of Object (as defined by Object.msg), the\n current reference frames.\n '''\n return [w_obj.object for w_obj in World.objects]\n\n @staticmethod\n def has_objects():\n '''Returns whetehr there are any objects (reference frames).\n\n Returns:\n bool\n '''\n return len(World.objects) > 0\n\n @staticmethod\n def object_dissimilarity(obj1, obj2):\n '''Returns distance between two objects.\n\n Returns:\n float\n '''\n d1 = obj1.dimensions\n d2 = obj2.dimensions\n return norm(array([d1.x, d1.y, d1.z]) - array([d2.x, d2.y, d2.z]))\n\n @staticmethod\n def get_ref_from_name(ref_name):\n '''Returns the reference frame type from the reference frame\n name specified by ref_name.\n\n Args:\n ref_name (str): Name of a referene frame.\n\n Returns:\n int: One of ArmState.*, the number code of the reference\n frame specified by ref_name.\n '''\n if ref_name == 'base_link':\n return ArmState.ROBOT_BASE\n else:\n return ArmState.OBJECT\n\n @staticmethod\n def convert_ref_frame(arm_frame, ref_frame, ref_frame_obj=Object()):\n '''Transforms an arm frame to a new ref. frame.\n\n Args:\n arm_frame (ArmState)\n ref_frame (int): One of ArmState.*\n ref_frame_obj (Object): As in Object.msg\n\n Returns:\n ArmState: arm_frame (passed in), but modified.\n '''\n if ref_frame == ArmState.ROBOT_BASE:\n if arm_frame.refFrame == ArmState.ROBOT_BASE:\n # Transform from robot base to itself (nothing to do).\n rospy.logdebug(\n 'No reference frame transformations needed (both ' +\n 'absolute).')\n elif arm_frame.refFrame == ArmState.OBJECT:\n # Transform from object to robot base.\n abs_ee_pose = World.transform(\n arm_frame.ee_pose,\n arm_frame.refFrameObject.name,\n 'base_link'\n )\n arm_frame.ee_pose = abs_ee_pose\n arm_frame.refFrame = ArmState.ROBOT_BASE\n arm_frame.refFrameObject = Object()\n else:\n rospy.logerr(\n 'Unhandled reference frame conversion: ' +\n str(arm_frame.refFrame) + ' to ' + str(ref_frame))\n elif ref_frame == ArmState.OBJECT:\n if arm_frame.refFrame == ArmState.ROBOT_BASE:\n # Transform from robot base to object.\n rel_ee_pose = World.transform(\n arm_frame.ee_pose, 'base_link', ref_frame_obj.name)\n arm_frame.ee_pose = rel_ee_pose\n arm_frame.refFrame = ArmState.OBJECT\n arm_frame.refFrameObject = ref_frame_obj\n elif arm_frame.refFrame == ArmState.OBJECT:\n # Transform between the same object (nothign to do).\n if arm_frame.refFrameObject.name == ref_frame_obj.name:\n rospy.logdebug(\n 'No reference frame transformations needed (same ' +\n 'object).')\n else:\n # Transform between two different objects.\n rel_ee_pose = World.transform(\n arm_frame.ee_pose,\n arm_frame.refFrameObject.name,\n ref_frame_obj.name\n )\n arm_frame.ee_pose = rel_ee_pose\n arm_frame.refFrame = ArmState.OBJECT\n arm_frame.refFrameObject = ref_frame_obj\n else:\n rospy.logerr(\n 'Unhandled reference frame conversion: ' +\n str(arm_frame.refFrame) + ' to ' + str(ref_frame))\n return arm_frame\n\n @staticmethod\n def has_object(object_name):\n '''Returns whether the world contains an Object with object_name.\n\n Args:\n object_name (str)\n\n Returns:\n bool\n '''\n return object_name in [wobj.object.name for wobj in World.objects]\n\n @staticmethod\n def is_frame_valid(object_name):\n '''Returns whether the frame (object) name is valid for\n transforms.\n\n Args:\n object_name (str)\n\n Returns:\n bool\n '''\n return object_name == 'base_link' or World.has_object(object_name)\n\n @staticmethod\n def transform(pose, from_frame, to_frame):\n '''Transforms a pose between two reference frames. If there is a\n TF exception or object does not exist, it will return the pose\n back without any transforms.\n\n Args:\n pose (Pose)\n from_frame (str)\n to_frame (str)\n\n Returns:\n Pose\n '''\n if World.is_frame_valid(from_frame) and World.is_frame_valid(to_frame):\n pose_stamped = PoseStamped()\n try:\n common_time = World.tf_listener.getLatestCommonTime(\n from_frame, to_frame)\n pose_stamped.header.stamp = common_time\n pose_stamped.header.frame_id = from_frame\n pose_stamped.pose = pose\n rel_ee_pose = World.tf_listener.transformPose(\n to_frame, pose_stamped)\n return rel_ee_pose.pose\n except tf.Exception:\n rospy.logerr('TF exception during transform.')\n return pose\n except rospy.ServiceException:\n rospy.logerr('ServiceException during transform.')\n return pose\n else:\n rospy.logdebug(\n 'One of the frame objects might not exist: ' + from_frame +\n ' or ' + to_frame)\n return pose\n\n @staticmethod\n def pose_distance(pose1, pose2, is_on_table=True):\n '''Returns distance between two world poses.\n\n Args:\n pose1 (Pose)\n pose2 (Pose)\n is_on_table (bool, optional): Whether the objects are on the\n table (if so, disregards z-values in computations).\n\n Returns:\n float\n '''\n if pose1 == [] or pose2 == []:\n return 0.0\n else:\n p1p = pose1.position\n p2p = pose2.position\n if is_on_table:\n arr1 = array([p1p.x, p1p.y])\n arr2 = array([p2p.x, p2p.y])\n else:\n arr1 = array([p1p.x, p1p.y, p1p.z])\n arr2 = array([p2p.x, p2p.y, p2p.z])\n dist = norm(arr1 - arr2)\n if dist < OBJ_DIST_ZERO_CLAMP:\n dist = 0\n return dist\n\n @staticmethod\n def log_pose(log_fn, pose):\n '''For printing a pose to rosout. We don't do it on one line\n becuase that messes up the indentation with the rest of the log.\n\n Args:\n log_fn (function(str)): A logging function that takes a\n string as an argument. For example, rospy.loginfo.\n pose (Pose): The pose to log\n '''\n p, o = pose.position, pose.orientation\n log_fn(' - position: (%f, %f, %f)' % (p.x, p.y, p.z))\n log_fn(' - orientation: (%f, %f, %f, %f)' % (o.x, o.y, o.z, o.w))\n\n # ##################################################################\n # Static methods: Internal (\"private\")\n # ##################################################################\n\n @staticmethod\n def _get_mesh_marker(marker, mesh):\n '''Generates and returns a marker from a mesh.\n\n Args:\n marker (Marker)\n mesh (Mesh)\n\n Returns:\n Marker\n '''\n marker.type = Marker.TRIANGLE_LIST\n index = 0\n marker.scale = Vector3(1.0, 1.0, 1.0)\n while index + 2 < len(mesh.triangles):\n if (mesh.triangles[index] < len(mesh.vertices)\n and mesh.triangles[index + 1] < len(mesh.vertices)\n and mesh.triangles[index + 2] < len(mesh.vertices)):\n marker.points.append(mesh.vertices[mesh.triangles[index]])\n marker.points.append(mesh.vertices[mesh.triangles[index + 1]])\n marker.points.append(mesh.vertices[mesh.triangles[index + 2]])\n index += 3\n else:\n rospy.logerr('Mesh contains invalid triangle!')\n break\n return marker\n\n @staticmethod\n def _get_surface_marker(pose, dimensions):\n '''Returns a surface marker with provided pose and dimensions.\n\n Args:\n pose (Pose)\n dimensions (Vector3)\n\n Returns:\n InteractiveMarker\n '''\n int_marker = InteractiveMarker()\n int_marker.name = 'surface'\n int_marker.header.frame_id = BASE_LINK\n int_marker.pose = pose\n int_marker.scale = 1\n button_control = InteractiveMarkerControl()\n button_control.interaction_mode = InteractiveMarkerControl.BUTTON\n button_control.always_visible = True\n object_marker = Marker(\n type=Marker.CUBE,\n id=2000,\n lifetime=MARKER_DURATION,\n scale=dimensions,\n header=Header(frame_id=BASE_LINK),\n color=COLOR_SURFACE,\n pose=pose\n )\n button_control.markers.append(object_marker)\n text_pos = Point()\n position = pose.position\n dimensions = dimensions\n text_pos.x = position.x + dimensions.x / 2 - 0.06\n text_pos.y = position.y - dimensions.y / 2 + 0.06\n text_pos.z = position.z + dimensions.z / 2 + 0.06\n text_marker = Marker(\n type=Marker.TEXT_VIEW_FACING,\n id=2001,\n scale=SCALE_TEXT, text=int_marker.name,\n color=COLOR_TEXT,\n header=Header(frame_id=BASE_LINK),\n pose=Pose(text_pos, Quaternion(0, 0, 0, 1))\n )\n button_control.markers.append(text_marker)\n int_marker.controls.append(button_control)\n return int_marker\n\n # ##################################################################\n # Instance methods: Public (API)\n # ##################################################################\n\n def receive_table_marker(self, marker):\n '''Callback function for markers to determine table'''\n if marker.type == Marker.LINE_STRIP:\n if len(marker.points) == 6:\n rospy.loginfo('Received a TABLE marker.')\n xmin = marker.points[0].x\n ymin = marker.points[0].y\n xmax = marker.points[2].x\n ymax = marker.points[2].y\n depth = xmax - xmin\n width = ymax - ymin\n\n pose = Pose(marker.pose.position, marker.pose.orientation)\n pose.position.x = pose.position.x + xmin + depth / 2\n pose.position.y = pose.position.y + ymin + width / 2\n dimensions = Vector3(depth, width, SURFACE_HEIGHT)\n self.surface = World._get_surface_marker(pose, dimensions)\n self._im_server.insert(\n self.surface, self.marker_feedback_cb)\n self._im_server.applyChanges()\n\n def receive_object_info(self, object_list):\n '''Callback function to receive object info'''\n self._lock.acquire()\n rospy.loginfo('Received recognized object list.')\n if len(object_list.graspable_objects) > 0:\n for i in range(len(object_list.graspable_objects)):\n models = object_list.graspable_objects[i].potential_models\n if len(models) > 0:\n object_pose = None\n best_confidence = 0.0\n for j in range(len(models)):\n if best_confidence < models[j].confidence:\n object_pose = models[j].pose.pose\n best_confidence = models[j].confidence\n if object_pose is not None:\n rospy.logwarn(\n 'Adding the recognized object with most ' +\n 'confident model.')\n self._add_new_object(\n object_pose,\n DIMENSIONS_OBJ,\n True,\n object_list.meshes[i]\n )\n else:\n rospy.logwarn(\n '... this is not a recognition result, it is ' +\n 'probably just segmentation.')\n cluster = object_list.graspable_objects[i].cluster\n bbox = self._bb_service(cluster)\n cluster_pose = bbox.pose.pose\n if cluster_pose is not None:\n rospy.loginfo('Adding unrecognized object with pose:')\n World.log_pose(rospy.loginfo, cluster_pose)\n rospy.loginfo(\n '...in ref frame ' +\n str(bbox.pose.header.frame_id))\n self._add_new_object(\n cluster_pose, bbox.box_dims, False)\n else:\n rospy.logwarn('... but the list was empty.')\n self._lock.release()\n\n def update_object_pose(self):\n ''' Function to externally update an object pose.'''\n # Look down at the table.\n rospy.loginfo('Head attempting to look at table.')\n Response.perform_gaze_action(GazeGoal.LOOK_DOWN)\n while (Response.gaze_client.get_state() == GoalStatus.PENDING or\n Response.gaze_client.get_state() == GoalStatus.ACTIVE):\n rospy.sleep(PAUSE_SECONDS)\n if Response.gaze_client.get_state() != GoalStatus.SUCCEEDED:\n rospy.logerr('Could not look down to take table snapshot')\n return False\n rospy.loginfo('Head is now (successfully) stairing at table.')\n\n # Reset object recognition.\n rospy.loginfo('About to attempt to reset object recognition.')\n goal = UserCommandGoal(UserCommandGoal.RESET, False)\n self._object_action_client.send_goal(goal)\n while (self._object_action_client.get_state() == GoalStatus.ACTIVE or\n self._object_action_client.get_state() == GoalStatus.PENDING):\n rospy.sleep(PAUSE_SECONDS)\n rospy.loginfo('Object recognition has been reset.')\n rospy.loginfo('STATUS: ' +\n self._object_action_client.get_goal_status_text())\n self._reset_objects() # Also do this internally.\n if self._object_action_client.get_state() != GoalStatus.SUCCEEDED:\n rospy.logerr('Could not reset recognition.')\n return False\n\n # Do segmentation\n rospy.loginfo('About to attempt table segmentation.')\n goal = UserCommandGoal(UserCommandGoal.SEGMENT, False)\n self._object_action_client.send_goal(goal)\n while (self._object_action_client.get_state() == GoalStatus.ACTIVE or\n self._object_action_client.get_state() == GoalStatus.PENDING):\n rospy.sleep(PAUSE_SECONDS)\n rospy.loginfo('Table segmentation is complete.')\n rospy.loginfo(\n 'STATUS: ' + self._object_action_client.get_goal_status_text())\n if self._object_action_client.get_state() != GoalStatus.SUCCEEDED:\n rospy.logwarn('Could not segment.')\n return False\n\n # Do recognition\n rospy.loginfo('About to attempt object recognition.')\n goal = UserCommandGoal(UserCommandGoal.RECOGNIZE, False)\n self._object_action_client.send_goal(goal)\n while (self._object_action_client.get_state() == GoalStatus.ACTIVE or\n self._object_action_client.get_state() == GoalStatus.PENDING):\n rospy.sleep(PAUSE_SECONDS)\n rospy.loginfo('Objects on the table have been recognized.')\n rospy.loginfo(\n 'STATUS: ' + self._object_action_client.get_goal_status_text())\n\n # Record the result\n if self._object_action_client.get_state() == GoalStatus.SUCCEEDED:\n wait_time = rospy.Duration(0.0)\n while (not World.has_objects() and\n wait_time < RECOGNITION_TIMEOUT_SECONDS):\n rospy.sleep(PAUSE_SECONDS)\n wait_time += PAUSE_SECONDS\n\n if not World.has_objects():\n rospy.logerr('Timeout waiting for a recognition result.')\n return False\n else:\n rospy.loginfo('Got the object list.')\n return True\n else:\n rospy.logerr('Could not recognize.')\n return False\n\n def clear_all_objects(self):\n '''Removes all objects from the world.'''\n goal = UserCommandGoal(UserCommandGoal.RESET, False)\n self._object_action_client.send_goal(goal)\n while (self._object_action_client.get_state() == GoalStatus.ACTIVE or\n self._object_action_client.get_state() == GoalStatus.PENDING):\n rospy.sleep(PAUSE_SECONDS)\n rospy.loginfo('Object recognition has been reset.')\n rospy.loginfo('STATUS: ' +\n self._object_action_client.get_goal_status_text())\n if self._object_action_client.get_state() == GoalStatus.SUCCEEDED:\n rospy.loginfo('Successfully reset object localization pipeline.')\n self._reset_objects()\n self._remove_surface()\n\n def get_nearest_object(self, arm_pose):\n '''Returns the nearest object, if one exists.\n\n Args:\n arm_pose (Pose): End-effector pose.\n\n Returns:\n Object|None: As in Object.msg, the nearest object (if it\n is close enough), or None if there were none close\n enough.\n '''\n # First, find which object is the closest.\n distances = []\n for wobj in World.objects:\n dist = World.pose_distance(wobj.object.pose, arm_pose)\n distances.append(dist)\n\n # Then, see if the closest is actually below our threshhold for\n # a 'closest object.'\n if len(distances) > 0:\n if min(distances) < OBJ_NEAREST_DIST_THRESHHOLD:\n chosen = distances.index(min(distances))\n return World.objects[chosen].object\n\n # We didn't have any objects or none were close enough.\n return None\n\n def marker_feedback_cb(self, feedback):\n '''Callback for when feedback from a marker is received.\n\n Args:\n feedback (InteractiveMarkerFeedback)\n '''\n if feedback.event_type == InteractiveMarkerFeedback.BUTTON_CLICK:\n rospy.loginfo('Clicked on object ' + str(feedback.marker_name))\n rospy.loginfo('Number of objects ' + str(len(World.objects)))\n else:\n # This happens a ton, and doesn't need to be logged like\n # normal events (e.g. clicking on most marker controls\n # fires here).\n rospy.logdebug('Unknown event: ' + str(feedback.event_type))\n\n def update(self):\n '''Update function called in a loop.\n\n Returns:\n bool: Whether any tracked objects were removed, AKA \"is\n world changed.\"\n '''\n # Visualize the detected object\n is_world_changed = False\n self._lock.acquire()\n if World.has_objects():\n to_remove = None\n for i in range(len(World.objects)):\n self._publish_tf_pose(\n World.objects[i].object.pose,\n World.objects[i].get_name(),\n BASE_LINK\n )\n if World.objects[i].is_removed:\n to_remove = i\n if to_remove is not None:\n self._remove_object(to_remove)\n is_world_changed = True\n\n self._lock.release()\n return is_world_changed\n\n # ##################################################################\n # Instance methods: Internal (\"private\")\n # ##################################################################\n\n def _reset_objects(self):\n '''Removes all objects.'''\n self._lock.acquire()\n for wobj in World.objects:\n self._im_server.erase(wobj.int_marker.name)\n self._im_server.applyChanges()\n if self.surface is not None:\n self._remove_surface()\n self._im_server.clear()\n self._im_server.applyChanges()\n World.objects = []\n self._lock.release()\n\n def _add_new_object(self, pose, dimensions, is_recognized, mesh=None):\n '''Maybe add a new object with the specified properties to our\n object list.\n\n It might not be added if too similar of an object already\n exists (and has been added).\n\n Args:\n pose (Pose)\n dimensions (Vector3)\n is_recognized (bool)\n mesh (Mesh, optional): A mesh, if it exists. Default is\n None.\n\n Returns:\n bool: Whether the object was actually added.\n '''\n to_remove = None\n if is_recognized:\n # TODO(mbforbes): Re-implement object recognition or remove\n # this dead code.\n return False\n # # Check if there is already an object\n # for i in range(len(World.objects)):\n # distance = World.pose_distance(\n # World.objects[i].object.pose, pose)\n # if distance < OBJ_ADD_DIST_THRESHHOLD:\n # if World.objects[i].is_recognized:\n # rospy.loginfo(\n # 'Previously recognized object at the same ' +\n # 'location, will not add this object.')\n # return False\n # else:\n # rospy.loginfo(\n # 'Previously unrecognized object at the same ' +\n # 'location, will replace it with the recognized '+\n # 'object.')\n # to_remove = i\n # break\n\n # # Remove any duplicate objects.\n # if to_remove is not None:\n # self._remove_object(to_remove)\n\n # # Actually add the object.\n # self._add_new_object_internal(\n # pose, dimensions, is_recognized, mesh)\n # return True\n else:\n # Whether whether we already have an object at ~ the same\n # location (and if so, don't add).\n for wobj in World.objects:\n if (World.pose_distance(wobj.object.pose, pose)\n < OBJ_ADD_DIST_THRESHHOLD):\n rospy.loginfo(\n 'Previously detected object at the same location, ' +\n 'will not add this object.')\n return False\n\n # Actually add the object.\n self._add_new_object_internal(\n pose, dimensions, is_recognized, mesh)\n return True\n\n def _add_new_object_internal(self, pose, dimensions, is_recognized, mesh):\n '''Does the 'internal' adding of an object with the passed\n properties. Call _add_new_object to do all pre-requisite checks\n first (it then calls this function).\n\n Args:\n pose (Pose)\n dimensions (Vector3)\n is_recognized (bool)\n mesh (Mesh|None): A mesh, if it exists (can be None).\n '''\n n_objects = len(World.objects)\n World.objects.append(WorldObject(\n pose, n_objects, dimensions, is_recognized))\n int_marker = self._get_object_marker(len(World.objects) - 1)\n World.objects[-1].int_marker = int_marker\n self._im_server.insert(int_marker, self.marker_feedback_cb)\n self._im_server.applyChanges()\n World.objects[-1].menu_handler.apply(\n self._im_server, int_marker.name)\n self._im_server.applyChanges()\n\n def _remove_object(self, to_remove):\n '''Remove an object by index.\n\n Args:\n to_remove (int): Index of the object to remove in\n World.objects.\n '''\n obj = World.objects.pop(to_remove)\n rospy.loginfo('Removing object ' + obj.int_marker.name)\n self._im_server.erase(obj.int_marker.name)\n self._im_server.applyChanges()\n # TODO(mbforbes): Re-implement object recognition or remove\n # this dead code.\n # if (obj.is_recognized):\n # for i in range(len(World.objects)):\n # if ((World.objects[i].is_recognized)\n # and World.objects[i].index > obj.index):\n # World.objects[i].decrease_index()\n # self.n_recognized -= 1\n # else:\n # for i in range(len(World.objects)):\n # if ((not World.objects[i].is_recognized) and\n # World.objects[i].index > obj.index):\n # World.objects[i].decrease_index()\n # self.n_unrecognized -= 1\n\n def _remove_surface(self):\n '''Function to request removing surface (from IM).'''\n rospy.loginfo('Removing surface')\n self._im_server.erase('surface')\n self._im_server.applyChanges()\n self.surface = None\n\n def _get_object_marker(self, index, mesh=None):\n '''Generate and return a marker for world objects.\n\n Args:\n index (int): ID for the new marker.\n mesh (Mesh, optional): Mesh to use for the marker. Only\n utilized if not None. Defaults to None.\n\n Returns:\n InteractiveMarker\n '''\n int_marker = InteractiveMarker()\n int_marker.name = World.objects[index].get_name()\n int_marker.header.frame_id = 'base_link'\n int_marker.pose = World.objects[index].object.pose\n int_marker.scale = 1\n\n button_control = InteractiveMarkerControl()\n button_control.interaction_mode = InteractiveMarkerControl.BUTTON\n button_control.always_visible = True\n\n object_marker = Marker(\n type=Marker.CUBE,\n id=index,\n lifetime=MARKER_DURATION,\n scale=World.objects[index].object.dimensions,\n header=Header(frame_id=BASE_LINK),\n color=COLOR_OBJ,\n pose=World.objects[index].object.pose\n )\n\n if mesh is not None:\n object_marker = World._get_mesh_marker(object_marker, mesh)\n button_control.markers.append(object_marker)\n\n text_pos = Point()\n text_pos.x = World.objects[index].object.pose.position.x\n text_pos.y = World.objects[index].object.pose.position.y\n text_pos.z = (\n World.objects[index].object.pose.position.z +\n World.objects[index].object.dimensions.z / 2 + OFFSET_OBJ_TEXT_Z)\n button_control.markers.append(\n Marker(\n type=Marker.TEXT_VIEW_FACING,\n id=index,\n scale=SCALE_TEXT,\n text=int_marker.name,\n color=COLOR_TEXT,\n header=Header(frame_id=BASE_LINK),\n pose=Pose(text_pos, Quaternion(0, 0, 0, 1))\n )\n )\n int_marker.controls.append(button_control)\n return int_marker\n\n def _publish_tf_pose(self, pose, name, parent):\n ''' Publishes a TF for object named name with pose pose and\n parent reference frame parent.\n\n Args:\n pose (Pose): The object's pose.\n name (str): The object's name.\n parent (str): The parent reference frame.\n '''\n if pose is not None:\n pp = pose.position\n po = pose.orientation\n pos = (pp.x, pp.y, pp.z)\n rot = (po.x, po.y, po.z, po.w)\n # TODO(mbforbes): Is it necessary to change the position\n # and orientation into tuples to send to TF?\n self._tf_broadcaster.sendTransform(\n pos, rot, rospy.Time.now(), name, parent)\n","sub_path":"pr2_pbd_interaction/src/world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":38042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"141480302","text":"import numpy as np\n\n# The right ascending red diagonal has the squared numbers\nindex = np.array(range(1,1002,2))\nra = index**2\n\n# The left ascending is obtained by subtrating from ra the (index - 1)\n# and so on, for the other diagonals in a conter clockwise fashion\nla = np.zeros((501,)).astype(int)\nld = np.zeros((501,)).astype(int)\nrd = np.zeros((501,)).astype(int)\n\nfor i in range(0,len(la)):\n la[i] = ra[i]-(index[i]-1)\n ld[i] = la[i]-(index[i]-1)\n rd[i] = ld[i]-(index[i]-1)\n\n# the -3 is to compensate for the extra 1s\nprint('Answer: ' + str(np.sum(ra)+np.sum(la)+np.sum(ld)+np.sum(rd) - 3))","sub_path":"python/028/028.py","file_name":"028.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"525960397","text":"\ndef mergesort(lst):\n if len(lst) <= 1:\n return lst\n if len(lst) == 2:\n if lst[0] <= lst[1]:\n return lst\n else:\n return lst[::-1]\n\n return merge(mergesort(lst[:len(lst)/2]), mergesort(lst[len(lst)/2:]))\n\ndef merge(lst1, lst2):\n result = []\n pt1 = pt2 = 0\n length1 = len(lst1)\n length2 = len(lst2)\n\n while pt1 outro.tempo\r\n\r\n'''\r\nSOLUCAO da QUESTAO 3.B: funcao vencedorDaCorrida\r\n'''\r\n\r\ndef vencedorDaCorrida(corredores):\r\n vencedor = corredores[0]\r\n \r\n for i in range(1, len(corredores)):\r\n if vencedor.tempo > corredores[i].tempo:\r\n vencedor = corredores[i]\r\n \r\n return vencedor\r\n\r\n\r\n'''\r\nPRINCIPAL: retire os # para testar suas respostas\r\n'''\r\n\r\nc1 = Corredor(222,'Buba',2,30,15)\r\nc2 = Corredor(999,'Nana',1,35,20)\r\nc3 = Corredor(666,'Lulu',1,20,40)\r\nc4 = Corredor(777,'Vivi',2,10,12)\r\nlcorredores = [c1,c2,c3,c4]\r\nprint(vencedorDaCorrida(lcorredores))","sub_path":"classes/class-08/ex-03/corredor.py","file_name":"corredor.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"287420247","text":"import numpy\n\nfrom tensorflow.data import Dataset\n\n\ndef input_fn_train(\n\tfeatures,\n\tlabels,\n\tbatch_size = 1,\n\tepochs = 1,\n\tshuffle = True\n\t):\n\t\"\"\" input function for tensorflow.estimator.LinearClassifier.train()\n\n\tArgs:\n\t\tfeatures: a 2-dimensional numpy.array()\n\t\tlabels: a numpy.array() vector\n\t\tbatch_size: is batch_size\n\t\tepochs: number of epochs to perform\n\t\tshuffle: shuffles the data before return if True\n\n\tReturn:\n\t\t(dict(feature), labels)\n\t\"\"\"\n\tdata = dict()\n\n\tfor index in range(len(features[0])):\n\t\tdata.update(\n\t\t\t{index:numpy.array([i[index] for i in features])}\n\t\t)\n\n\tdataset = Dataset.from_tensor_slices((data, labels))\n\n\tif shuffle:\n\t\tdataset = dataset.shuffle(1000).repeat(epochs).batch(batch_size)\n\n\treturn dataset\n","sub_path":"tensorflow_estimator/input_function.py","file_name":"input_function.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"173915395","text":"import argparse\nimport sentencepiece as spm\nimport tqdm\nimport os\nimport gzip\nimport ujson\nfrom dataset.augmented_javascript.utils.jsonl_dataset import JSONLinesDataset, normalize_docstring\nfrom dataset.augmented_javascript.utils.util import normalize_program\n# from dataset.codesearchnet.utils.codebert_utils import vocab2dict\n\nfrom dataset.augmented_javascript import (\n DATASET_DIR, RAW_DATA_DIR,\n)\n\n\ndef make_corpus(input, output):\n dataset = JSONLinesDataset(input, {\"function\": \"function\", \"docstring\": \"docstring\"})\n print(\"Number of functions:\", len(dataset))\n print(\"Example original:\", dataset[0][\"function\"])\n print(\"Example normalized:\", normalize_program(dataset[0][\"function\"]))\n print(\"Example normalized docstring:\", normalize_docstring(dataset[0][\"docstring\"]))\n\n with open(output, \"w\", encoding='utf8') as f:\n for ex in tqdm.tqdm(dataset, \"Writing corpus to txt\"):\n # Write docstring\n if ex[\"docstring\"]:\n print(normalize_docstring(ex[\"docstring\"]), file=f)\n # Write normalized function\n function = ex[\"function\"]\n line = normalize_program(function)\n print(line, file=f)\n\n print(\"Wrote corpus to:\", output)\n\n\ndef spm_train(\n input: str, model_prefix: str, vocab_size: int, character_coverage=0.9995, model_type='unigram'\n): # , input_sentence_size: int, shuffle_input_sentence: str):\n # command = f\"--input={input} --model_prefix={model_prefix} --vocab_size={vocab_size} --character_coverage={character_coverage} --model_type={model_type} --input_sentence_size={input_sentence_size} --shuffle_input_sentence={shuffle_input_sentence}\"\n command = f\"--input={input} --model_prefix={model_prefix} --vocab_size={vocab_size} \" \\\n f\"--character_coverage={character_coverage} --model_type={model_type} --pad_id=0 --bos_id=1 --eos_id=2 --unk_id=3\" \\\n f\" --unk_piece=[UNK] --pad_piece=[PAD] --user_defined_symbols=[CLS],[SEP],[MASK],[EOL],[URL] --hard_vocab_limit=false\"\n print(command)\n spm.SentencePieceTrainer.Train(command)\n\n\nif __name__ == \"__main__\":\n # fire.Fire({\"make_corpus\": make_corpus, \"spm_train\": spm_train})\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--format\", type=str, default='piece', help='id(num)/piece(str)')\n parser.add_argument(\"--vocab-size\", type=int, default=8000, help='token dictionary size')\n parser.add_argument(\"--src-dir\", type=str, default=RAW_DATA_DIR, help='source data')\n parser.add_argument(\"--tgt-dir\", type=str, default=os.path.join(DATASET_DIR, 'codebert/code_roberta/data-mmap'),\n help='save dir for sentencepiece bpe models or save files')\n # parser.add_argument(\"--tgt-dir\", type=str, default=os.path.join(DATASET_DIR, 'contracode/data-raw/'),\n # help='save dir for sentencepiece bpe models or save files')\n parser.add_argument(\"--model-type\", type=str, default='unigram', help='source data')\n parser.add_argument(\"--model-prefix\", type=str, default='csnjs_8k_9995p_unigram_url', help='source data')\n\n # parser.add_argument(\"--bpe-dir\", type=str, default='wordpiece_bpe', help='wordpiece_bpe modal save direction')\n parser.add_argument(\"--keep-empty\", type=bool, default=True, help=\"keep empty lines\")\n parser.add_argument(\"--overwrite\", type=bool, default=False, help=\"build BPE model for files\")\n # parser.add_argument(\"--insert\", type=bool, help='insert CLS/S_SEP')\n parser.add_argument(\"--workers\", type=int, default=100, help='multi-processors number')\n args = parser.parse_args()\n\n os.makedirs(args.src_dir, exist_ok=True)\n os.makedirs(args.tgt_dir, exist_ok=True)\n\n input = os.path.join(args.src_dir, 'javascript_dedupe_definitions_nonoverlap_v2_train.jsonl')\n output = os.path.join(args.tgt_dir, 'javascript_dedupe_definitions_nonoverlap_v2_train.json')\n # 1. make corpus\n make_corpus(input, output)\n # exit()\n # 2. spm_train\n model_prefix = os.path.join(args.tgt_dir, args.model_prefix)\n spm_train(output, model_prefix=model_prefix, vocab_size=args.vocab_size, model_type=args.model_type)\n # vocab2dict(vocab_file='{}.vocab'.format(model_prefix))\n","sub_path":"dataset/augmented_javascript/run_sentencepiece.py","file_name":"run_sentencepiece.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"508110104","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nimport seaborn as sns\nfrom sklearn.svm import SVC #Support Vector Classifier\nfrom sklearn.datasets.samples_generator import make_blobs\n\nsns.set()\n\n#generate random data X and y using sklearn make_blobs and plot it.\nX,y = make_blobs(n_samples=50,centers=2,cluster_std=0.60,random_state=0)\n#plt.scatter(X[:, 0], X[:, 1], c=y,s=50,cmap=\"autumn\")\n#plt.show()\n'''\n# creating line space\nxfit = np.linspace(-1,3.5)\nplt.scatter(X[:, 0], X[:, 1], c=y,s=50,cmap=\"autumn\")\n\n# plot a line between the different sets of data\n#Maximizing the Margin\nfor m, b, d in [(1, 0.65, 0.33), (0.5, 1.6, 0.55), (-0.2, 2.9, 0.2)]:\n yfit = m * xfit + b\n plt.plot(xfit, yfit, '-k')\n plt.fill_between(xfit, yfit - d, yfit + d, edgecolor='none', color='#AAAAAA', alpha=0.4)\nplt.xlim(-1, 3.5)\nplt.show()\n'''\n\n#Fitting a support vector machine\nmodel = SVC(kernel='linear',C=1E10)\nmodel.fit(X, y)\n\n#plot SVM decision boundaries\ndef plot_svc_decision_function(model, ax=None, plot_support=True):\n \"\"\"Plot the decision function for a 2D SVC\"\"\"\n if ax is None:\n ax = plt.gca()\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n\n #create grid to evaluate model\n x = np.linspace(xlim[0], xlim[1], 30)\n y = np.linspace(ylim[0], ylim[1], 30)\n Y, X = np.meshgrid(y, x)\n xy = np.vstack([X.ravel(),Y.ravel()]).T\n P = model.decision_function(xy).reshape(X.shape)\n\n #plot decision boundary and margins\n ax.contour(X, Y, P, colors='k', levels=[-1, 0, 1], alpha=0.5,\n linestyles=['--', '-', '--'])\n\n #plot support vectors\n if plot_support:\n ax.scatter(model.support_vectors_[:,0],\n model.support_vectors_[:,1],\n s=300,linewidth=1,facecolors=\"none\")\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n\nplt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn')\n#plot_svc_decision_function(model)\nplt.show()\n#This is the dividing line that maximizes the margin between\n#the two sets of points. Notice that a few of the training points just\n#touch the margin, these points are the pivotal(关键的)elements of this fit,\n#and are known as the support vectors\n'''\n#plot the model learned from the first 60 points and\n#first 120 points of this dataset\ndef plot_svm(N=10, ax=None):\n X, y = make_blobs(n_samples=200, centers=2,\n random_state=0, cluster_std=0.60)\n X = X[:N]\n y = y[:N]\n model = SVC(kernel='linear', C=1E10)\n model.fit(X, y)\n\n ax = ax or plt.gca()\n ax.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn')\n ax.set_xlim(-1, 4)\n ax.set_ylim(-1, 6)\n plot_svc_decision_function(model, ax)\n\nfig, ax = plt.subplots(1, 2, figsize=(16, 6))\nfig.subplots_adjust(left=0.0625, right=0.95, wspace=0.1)\nfor axi, N in zip(ax, [60, 120]):\n plot_svm(N, axi)\n axi.set_title('N = {0}'.format(N))\nplt.show()\n#In the left panel, we see the model and the support vectors for 60 training\n#points. In the right panel, we have doubled the number of training points,\n#but the model has not changed: the three support vectors from the left panel\n#are still the support vectors from the right panel. This insensitivity to the\n#exact behavior of distant points is one of the strengths of the SVM model.\n'''\n","sub_path":"svmFromScratch.py","file_name":"svmFromScratch.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"226242584","text":"import tkinter as tk\nimport cv2\nimport numpy as np\nimport scipy\nfrom scipy.misc.pilutil import imread\nimport pickle\nimport random\nimport os\nimport matplotlib.pyplot as plt\n\ndef get_database():\n\treturn (var_masukan_db.get())\n\ndef get_user():\n\treturn (var_masukan_user.get())\n\n# Feature extractor\ndef extract_features(image_path, vector_size=32):\n image = imread(image_path, mode=\"RGB\")\n try:\n # Using KAZE, cause SIFT, ORB and other was moved to additional module\n # which is adding addtional pain during install\n alg = cv2.KAZE_create()\n # Dinding image keypoints\n kps = alg.detect(image)\n # Getting first 32 of them. \n # Number of keypoints is varies depend on image size and color pallet\n # Sorting them based on keypoint response value(bigger is better)\n kps = sorted(kps, key=lambda x: -x.response)[:vector_size]\n # computing descriptors vector\n kps, dsc = alg.compute(image, kps)\n # Flatten all of them in one big vector - our feature vector\n dsc = dsc.flatten()\n # Making descriptor of same size\n # Descriptor vector size is 64\n needed_size = (vector_size * 64)\n if dsc.size < needed_size:\n # if we have less the 32 descriptors then just adding zeros at the\n # end of our feature vector\n dsc = np.concatenate([dsc, np.zeros(needed_size - dsc.size)])\n except cv2.error as e:\n print( 'Error: ', e)\n return None\n\n return dsc\n\ndef dotMultiplication (vector1, vector2):\n\t#res = [ [0 for i in range (len(vector1[0]))] for i in range (len(vector2))]\n\tres = 0\n\tfor i in range (len(vector1)):\n\t\tres += float(vector1[i]) * float(vector2[i])\n\treturn res\n\ndef subs (vector1, vector2):\n\tres = [0 for i in range (len(vector1))]\n\tfor i in range (len(vector1)):\n\t\tres[i] = float(vector1[i]) - float(vector2[i])\n\treturn res\n\ndef squareEachElmt (vector):\n\tres = [0 for i in range (len(vector))]\n\tfor i in range (len(vector)):\n\t\tres[i] = vector[i]**2\n\treturn res\n\ndef sumsEachElmt (vector):\n\tres = 0\n\tfor i in range (len(vector)):\n\t\tres += float(vector[i])\n\treturn res\n\ndef distEuclidean (vector1, vector2):\n\tsubs_ = subs(vector1, vector2)\n\tsquare_ = squareEachElmt(subs_)\n\tsums_ = sumsEachElmt(square_)\n\treturn (sums_**0.5)\n\ndef lenOfVector (vector):\n\tsquare_ = squareEachElmt(vector)\n\tsums_ = sumsEachElmt(square_)\n\treturn (sums_**0.5)\n\ndef cosine (vector1, vector2):\n\tdotProd = dotMultiplication(vector1, vector2)\n\tlenOfV1 = lenOfVector(vector1)\n\tlenOfV2 = lenOfVector(vector2)\n\treturn dotProd/(lenOfV1*lenOfV2)\n\ndef newVec (vector, row, col):\n\tres = [[0 for i in range (len(vector[0])+col)] for i in range (len(vector)+row)]\n\tfor i in range (len(vector)):\n\t\tfor j in range (len(vector[0])):\n\t\t\tres[i][j] = vector[i][j]\n\treturn res\n\ndef npToList (np):\n\tres = [[0 for i in range (len(np[0]))] for i in range (len(np))]\n\tfor i in range (len(np)):\n\t\tfor j in range (len(np[0])):\n\t\t\tres[i][j] = np[i][j]\n\treturn res\n\ndef show_img (path):\n\timg = imread(path, mode=\"RGB\")\n\tplt.title('Foto Input Pengguna')\n\tplt.imshow(img)\n\tplt.show()\n\ndef show_img_euclidean(path, count, value):\n img = imread(path, mode=\"RGB\")\n judul = \"Ranking \" + str(count)\n plt.title(judul)\n x = \"Nilai Jarak Euclidean \" + str(value)\n plt.xlabel(x)\n plt.imshow(img)\n plt.show()\n\ndef show_img_cosine(path, count, value):\n img = imread(path, mode=\"RGB\")\n judul = \"Ranking \" + str(count)\n plt.title(judul)\n x = \"Nilai Cosine \" + str(value)\n plt.xlabel(x)\n plt.imshow(img)\n plt.show()\n \n#images_path = 'D:\\\\QA\\\\kuliah\\\\sems 3\\\\algeo\\\\python\\\\face-recognition\\\\images'\ndef runCosine():\n\timages_path = get_database()\n\timg_db = [os.path.join(images_path, p) for p in sorted(os.listdir(images_path))]\n\timg_input = get_user()\n\tinput_vector = extract_features(img_input)\n\tdb_vectors = [0 for i in range (len(img_db))]\n\tcos_res = [0 for i in range (len(img_db))]\n\tfor i in range (len(img_db)):\n\t\tdb_vectors[i] = extract_features(img_db[i])\n\t\tcos_res[i] = cosine(input_vector, db_vectors[i])\n\tfor i in range(len(cos_res)):\n\t\tfor j in range(i + 1, len(cos_res)):\n\t\t\tif cos_res[i] < cos_res[j]:\n\t\t\t\tcos_res[i], cos_res[j] = cos_res[j], cos_res[i]\n\t\t\t\timg_db[i], img_db[j] = img_db[j],img_db[i]\n\tshow_img(img_input)\n\tcount = 0\n\ti = 0\n\tfor path in img_db:\n\t\tcount += 1\n\t\tshow_img_cosine(path, count, cos_res[i])\n\t\ti += 1\n\ndef runEuclidean():\n\timages_path = get_database()\n\timg_db = [os.path.join(images_path, p) for p in sorted(os.listdir(images_path))]\n\timg_input = get_user()\n\tinput_vector = extract_features(img_input)\n\tdb_vectors = [0 for i in range (len(img_db))]\n\tdist_res = [0 for i in range (len(img_db))]\n\tfor i in range (len(img_db)):\n\t\tdb_vectors[i] = extract_features(img_db[i])\n\t\tdist_res[i] = distEuclidean(input_vector, db_vectors[i])\n\tfor i in range(len(dist_res)):\n\t\tfor j in range(i + 1, len(dist_res)):\n\t\t\tif dist_res[i] > dist_res[j]:\n\t\t\t\tdist_res[i], dist_res[j] = dist_res[j], dist_res[i]\n\t\t\t\timg_db[i], img_db[j] = img_db[j],img_db[i]\n\tshow_img(img_input)\n\tcount = 0\n\ti = 0\n\tfor path in img_db:\n\t\tcount += 1\n\t\tshow_img_euclidean(path, count, dist_res[i])\n\t\ti += 1\n\nroot = tk.Tk()\nvar_masukan_db = tk.StringVar()\nvar_masukan_user = tk.StringVar()\ncanvas1 = tk.Canvas(root, width = 400, height = 350, relief = 'raised')\ncanvas1.pack()\n\nlab1 = tk.Label(root, text='Deteksi Kemiripan Gambar')\nlab1.config(font=('System', 15, 'bold'))\ncanvas1.create_window(200, 25, window=lab1)\n\nlab2 = tk.Label(root, text=\"Masukkan direktori basis data gambar\")\nlab2.config(font=('System', 10))\ncanvas1.create_window(200, 70, window=lab2)\n\nmasukan_db = tk.Entry(root, textvariable = var_masukan_db)\ncanvas1.create_window(200, 110, window = masukan_db)\n\nlab3 = tk.Label(root, text=\"Masukkan direktori basis data gambar yang akan Anda cek\")\nlab3.config(font=('System', 10))\ncanvas1.create_window(200, 140, window=lab3)\n\nmasukan_user = tk.Entry(root, textvariable = var_masukan_user)\ncanvas1.create_window(200, 180, window = masukan_user)\n\nlab4 = tk.Label(root, text=\"Pilih Salah Satu Metode Kemiripan\")\nlab4.config(font=('System', 10))\ncanvas1.create_window(200, 220, window = lab4)\n\ncosine_button = tk.Button(text=\"Cosine\", command=runCosine, fg='black', font=('System', 9, 'bold'))\ncanvas1.create_window(200, 260, window = cosine_button)\n\neuclidean_button = tk.Button(text=\"Euclidean Distance\", command=runEuclidean, fg='black', font=('System', 9, 'bold'))\ncanvas1.create_window(200, 300, window = euclidean_button)\n\nroot.mainloop()","sub_path":"ALGEO/TUBES2/img-similarity.py","file_name":"img-similarity.py","file_ext":"py","file_size_in_byte":6498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"473236452","text":"#!/usr/bin/env python3\nimport pandas as pd\nimport numpy as np\nfrom pprint import pprint\nfrom pygama import DataGroup\n\ndef main():\n \"\"\"\n \"\"\"\n\n\n # analyze_lpgta()\n # analyze_cage()\n # analyze_hades()\n # analyze_ornl()\n analyze_surf()\n\n\ndef analyze_lpgta():\n\n dg = DataGroup('LPGTA.json')\n\n dg.lh5_dir_setup()\n dg.scan_daq_dir()\n\n # -- experiment-specific choices --\n\n # run 1 & 2 files don't match template\n dg.fileDB.query('run > 2', inplace=True)\n\n dg.fileDB.sort_values(['run','YYYYmmdd','hhmmss'], inplace=True)\n dg.fileDB.reset_index(drop=True, inplace=True)\n\n def get_cmap(row):\n row['cmap'] = dg.runDB[f\"{row['run']:0>4d}\"][\"cmap\"]\n return row\n\n dg.fileDB = dg.fileDB.apply(get_cmap, axis=1)\n\n dg.fileDB['runtype'] = dg.fileDB['rtp']\n\n dg.get_lh5_cols()\n\n dg.save_df('./LPGTA_fileDB.h5')\n\n print(dg.fileDB)\n\n\ndef analyze_cage():\n\n dg = DataGroup('CAGE.json')\n dg.lh5_dir_setup()\n\n dg.scan_daq_dir()\n\n # -- experiment-specific choices --\n dg.fileDB.sort_values(['cycle'], inplace=True)\n dg.fileDB.reset_index(drop=True, inplace=True)\n\n def get_cyc_info(row):\n \"\"\"\n map cycle numbers to physics runs, and identify detector\n \"\"\"\n cyc = row['cycle']\n for run, cycles in dg.runDB.items():\n tmp = cycles[0].split(',')\n for rng in tmp:\n if '-' in rng:\n clo, chi = [int(x) for x in rng.split('-')]\n if clo <= cyc <= chi:\n row['run'] = run\n break\n else:\n clo = int(rng)\n if cyc == clo:\n row['run'] = run\n break\n # label the detector ('runtype' matches 'run_types' in config file)\n if cyc < 126:\n row['runtype'] = 'oppi'\n else:\n row['runtype'] = 'icpc'\n return row\n\n dg.fileDB = dg.fileDB.apply(get_cyc_info, axis=1)\n\n dg.get_lh5_cols()\n\n for col in ['run']:\n dg.fileDB[col] = pd.to_numeric(dg.fileDB[col])\n\n print(dg.fileDB)\n\n dg.save_df('CAGE_fileDB.h5')\n\n\ndef analyze_hades():\n \"\"\"\n \"\"\"\n dg = DataGroup('HADES.json')\n\n dg.lh5_dir_setup()\n # dg.lh5_dir_setup(create=True)\n\n dg.scan_daq_dir()\n\n # -- experiment-specific stuff --\n dg.fileDB['runtype'] = dg.fileDB['detSN']\n\n # add a sortable timestamp column\n def get_ts(row):\n ts = f\"{row['YYmmdd']} {row['hhmmss']}\"\n row['date'] = pd.to_datetime(ts, format='%y%m%d %H%M%S')\n return row\n dg.fileDB = dg.fileDB.apply(get_ts, axis=1)\n dg.fileDB.sort_values('date', inplace=True)\n\n dg.get_lh5_cols()\n print(dg.fileDB['raw_file'].values)\n\n dg.save_df('HADES_fileDB.h5')\n\n\ndef analyze_ornl():\n\n dg = DataGroup('ORNL.json')\n # dg.lh5_dir_setup()\n dg.scan_daq_dir()\n\n # expt-specific organization\n dg.fileDB.sort_values(['cycle'], inplace=True)\n dg.fileDB.reset_index(drop=True, inplace=True)\n\n dg.save_keys()\n dg.load_keys()\n print(dg.fileDB)\n\n\n\ndef analyze_surf():\n \"\"\"\n \"\"\"\n dg = DataGroup('SURFCHAR.json')\n\n dg.lh5_dir_setup()\n # dg.lh5_dir_setup(create=True)\n\n dg.scan_daq_dir()\n\n # -- experiment-specific choices --\n dg.fileDB.sort_values(['cycle'], inplace=True)\n dg.fileDB.reset_index(drop=True, inplace=True)\n\n # TODO: adapt \"get_cyc_info\" function from analyze_cage to\n # fill in serial numbers for each detector\n dg.fileDB['runtype'] = \"P9999A\"\n\n dg.get_lh5_cols()\n\n # print(dg.fileDB.query('cycle < 10'))\n\n dg.save_df('SURFCHAR_fileDB.h5')\n\n\nif __name__=='__main__':\n main()\n","sub_path":"attic/experiments/datagroup/test_datagroup.py","file_name":"test_datagroup.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"76631271","text":"from django.contrib.sites.models import Site\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import activate, get_language\n\nfrom . import settings as subs_settings\nfrom .models import QueuedEmail, SendStatus\n\n\ndef to_queue(content, **kwargs):\n lang = content.get(\"lang\", None) or kwargs.get(\"lang\", None)\n orglang = get_language()\n if lang:\n activate(lang)\n else:\n activate(orglang)\n site = Site.objects.get_current()\n msg = render_to_string(\n \"subscriptions/msg.html\",\n {\n \"title\": content[\"title\"],\n \"content\": content[\"body\"],\n \"site_name\": site.name,\n \"site_domain\": site.domain,\n },\n )\n qm = QueuedEmail(\n subject=\"%s %s\" % (subs_settings.NEWS_SUBJECT_PREFIX, content[\"title\"]),\n body=msg,\n lang=lang,\n )\n qm.save()\n activate(orglang)\n\n\ndef send_queued_mail(qm, maxmails):\n sts = SendStatus.objects.filter(queued_email=qm)[:maxmails]\n qm.send_to_all(sts)\n sst2 = SendStatus.objects.filter(queued_email=qm)\n if not sst2 and subs_settings.DELETE_QUEUED_MAILS:\n qm.delete()\n return len(sts)\n\n\ndef send_max(max_send=subs_settings.MAX_PER_TIME):\n qms = QueuedEmail.objects.all()\n num_sended = 0\n for qm in qms:\n maxmails = max_send - num_sended\n if maxmails < 1:\n break\n num_sended += send_queued_mail(qm, maxmails)\n\n\n# influenced by:\n# http://stackoverflow.com/questions/7583801/send-mass-emails-with-emailmultialternatives\n","sub_path":"pagetools/subscriptions/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"558730376","text":"#!venv/bin/python\n# -*- coding: utf-8 -*-\n\nimport random\nimport redis\n\nconn = redis.Redis()\nchannels = ['demo']\n# Redis subscribe\nsub = conn.pubsub()\n# sub redis channels.\nsub.subscribe(channels)\nfor msg in sub.listen():\n if msg['type'] == 'message':\n channel = msg['channel'].decode('utf-8')\n data = msg['data'].decode('utf-8')\n\n print('Subscribe: %s\\'s data: %s' % (channel, data))\n \n # 数据格式化后,再publish\n conn.publish('format_demo', ','.join(data.split()))","sub_path":"subpub.py","file_name":"subpub.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"196934079","text":"from functools import wraps\n\nimport requests\nfrom flask import request\n\nfrom fhir_api import settings\nfrom fhir_api.errors import AuthenticationError\n\n\ndef validate_token(token, scope=None):\n payload = {\"token\": token}\n if scope is not None:\n payload[\"scope\"] = scope\n\n response = requests.post(settings.TOKEN_INTROSPECTION_URL, data=payload)\n\n validation = response.json()\n\n return validation[\"active\"]\n\n\ndef auth_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n auth_header = request.headers.get(\"Authorization\", \"\")\n prefix = \"Bearer \"\n if not auth_header.startswith(prefix):\n raise AuthenticationError(\"Authorization header malformed or unexisting.\")\n\n token = auth_header[len(prefix) :]\n\n if not validate_token(token):\n raise AuthenticationError(\"Failed to verify token.\")\n\n return f(*args, **kwargs)\n\n return f if settings.AUTH_DISABLED else decorated_function\n","sub_path":"fhir-api/fhir_api/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"197878905","text":"import numpy as np\nimport pygame as pg\nimport math\n\n# Color constants\nBLACK = (0, 0, 0)\n\n# Plane display parameters\nline_width = 1\naxis_width = 3\nvec_width = 4\n\npoint_color = (200, 0, 0)\npoint_radius = 5\nfont_size = 20\nfont_offset = 30\n\n# Real line display parameters\ntick_mark_height = 10\nzero_mark_height = 20\n\n# Misc.\n# Point and label display parameters\nsnap_threshold = 6\n\n\n# TODO Known bugs: 1. clicking anywhere selects one number when zoomed in far\n# TODO 2. zooming in far causes problems in general (shaky screen, etc.)\n# TODO 3. quitting the application occurs incorrectly when a pygame window is open\n# TODO Nice-to-add features: Allow for unlimited range zooming and arithmetic\n# This class defines a complex number plane which supports selection of points and animated arithmetic\nclass ComplexPlane:\n\n def __init__(self, screen, spacing, half_range, phase=0, offset=0+0j, real_mode=False):\n self.screen = screen\n self.spacing = spacing\n self.half_range = half_range\n self.phase = phase\n self.pixel_spacing = 0\n self.update_pixel_spacing()\n self.offset = offset.real - offset.imag * 1j\n self.pixel_offset = (0, 0)\n self.update_pixel_offset()\n self.added_coords = []\n self.newly_added_coords = None\n self.renders = {}\n self.rects = {}\n self.displayed_coords = []\n self.padding = 0\n self.set_padding()\n self.real_mode = real_mode\n\n # Public\n\n # Draws a complex plane on the screen\n def display(self):\n self.add_grid()\n self.add_bold_axes()\n self.plot_all()\n self.display_coords()\n\n # Plots a point on the complex plane\n def plot_point(self, coords):\n pg.draw.circle(self.screen, point_color, self.convert_to_pixel_coords(coords), point_radius)\n\n # Allows the spacing of the plane to be altered\n def set_spacing(self, spacing):\n self.spacing = spacing\n self.update_pixel_spacing()\n\n # Allows the complex plane to be scaled\n def set_half_range(self, half_range):\n self.half_range = half_range\n self.update_pixel_spacing()\n self.update_pixel_offset()\n\n # Allows the plane to shift left, right, up, and down\n def set_offset(self, offset):\n self.offset = offset.real if self.real_mode else offset\n self.update_pixel_offset()\n\n # Allows the plane to be rotated\n def set_phase(self, phase):\n self.phase = phase\n\n # Draws a line from 'point_1' to 'point_2', where these are interpreted as points on the complex plane\n def draw_line(self, point_1, point_2, color):\n pixel_point_1 = self.convert_to_pixel_coords(point_1)\n pixel_point_2 = self.convert_to_pixel_coords(point_2)\n pg.draw.line(self.screen, color, pixel_point_1, pixel_point_2, vec_width)\n\n # Converts from pixel coordinates to coordinates on the complex plane\n def convert_to_plane_coords(self, pixel_coords):\n pixel_coords = self.rotate(pixel_coords, -self.phase)\n plane_x = ((pixel_coords[0] - self.pixel_offset[0]) / self.screen.get_width() - 0.5) * 2 * self.half_range\n plane_y = ((pixel_coords[1] - self.pixel_offset[1]) / self.screen.get_height() - 0.5) * 2 * self.half_range\n if self.real_mode:\n plane_y = 0\n return round(plane_x, 5) - round(plane_y, 5) * 1j\n\n # Converts from complex plane coordinates to pixel coordinates\n def convert_to_pixel_coords(self, plane_coords):\n pixel_x = (plane_coords.real / (2 * self.half_range) + 0.5) * self.screen.get_width() + self.pixel_offset[0]\n pixel_y = (-plane_coords.imag / (2 * self.half_range) + 0.5) * self.screen.get_height() + self.pixel_offset[1]\n pixel = self.rotate((pixel_x, pixel_y), self.phase)\n return int(round(pixel[0])), int(round(pixel[1]))\n\n # Displays text boxes with given coordinates on the complex plane, making sure there is no overlap.\n def display_coords(self):\n new_displayed_coords = []\n self.update_rects()\n for coords in self.rects:\n rect = self.rects[coords]\n if self.intersects_displayed_coords(rect):\n continue\n elif coords not in self.displayed_coords and coords != self.newly_added_coords:\n if self.intersects_rect_coords(rect):\n continue\n self.screen.blit(self.renders[coords], (rect.left, rect.top))\n new_displayed_coords.append(coords)\n self.displayed_coords = new_displayed_coords\n self.newly_added_coords = None\n\n # Adds coords to be displayed\n def add_coords(self, coords):\n self.added_coords.append(coords)\n self.renders[coords] = self.get_display_surf(coords)\n self.newly_added_coords = coords\n\n # removes coords so they are no longer plotted and displayed\n def remove_coords(self, coords):\n if coords in self.added_coords:\n self.added_coords.remove(coords)\n if coords not in self.added_coords:\n del self.renders[coords]\n if coords in self.displayed_coords:\n self.displayed_coords.remove(coords)\n\n # If the pixel coordinates are close enough to a grid point, converts them to the values at the grid point\n def snap_to_grid(self, pixel_coords):\n pixel_coords = self.rotate(pixel_coords, -self.phase)\n center_x = self.screen.get_width() / 2 + (self.pixel_offset[0] % self.pixel_spacing)\n center_y = self.screen.get_height() / 2 + (self.pixel_offset[1] % self.pixel_spacing)\n closest_horiz_coord = center_x + round((pixel_coords[0] - center_x) / self.pixel_spacing) * self.pixel_spacing\n closest_vert_coord = center_y + round((pixel_coords[1] - center_y) / self.pixel_spacing) * self.pixel_spacing\n if abs(closest_horiz_coord - pixel_coords[0]) <= snap_threshold \\\n and abs(closest_vert_coord - pixel_coords[1]) <= snap_threshold:\n return self.convert_to_plane_coords(self.rotate((closest_horiz_coord, closest_vert_coord), self.phase))\n return self.convert_to_plane_coords(self.rotate(pixel_coords, self.phase))\n\n # Private\n\n # Plots all current points on the complex plane\n def plot_all(self):\n for pt in self.added_coords:\n if self.in_screen(self.convert_to_pixel_coords(pt)):\n self.plot_point(pt)\n\n # Checks if a given coordinate rect would intersect any displayed coords\n def intersects_displayed_coords(self, rect):\n for coords in self.displayed_coords:\n if coords in self.rects and self.rects[coords].colliderect(rect) and self.rects[coords] != rect:\n return True\n return False\n\n # Checks if a given coordinate rect would intersect any other added coords\n def intersects_rect_coords(self, rect):\n for coords in self.rects:\n if self.rects[coords].colliderect(rect) and self.rects[coords] != rect:\n return True\n return False\n\n # Returns a surface for a numerical coordinate display.\n def get_display_surf(self, coords):\n font = pg.font.Font(None, font_size)\n imag_part = \"\" if self.real_mode else \" + \" + str(coords.imag) + \"i\"\n render = font.render(str(coords.real) + imag_part, True, BLACK)\n return render\n\n # Returns a rect for a numerical coordinate display\n def get_display_rect(self, coords):\n pos = (self.convert_to_pixel_coords(coords)[0], self.convert_to_pixel_coords(coords)[1] - font_offset)\n render = self.renders[coords]\n return render.get_rect(center=(pos[0] + render.get_width() / 2, pos[1] + render.get_height() / 2))\n\n # Generates all currrent coordinate display rects which will not cause an overflow\n def update_rects(self):\n self.rects.clear()\n for coords in self.added_coords:\n try:\n rect = self.get_display_rect(coords)\n if self.rect_in_screen(rect):\n self.rects[coords] = rect\n except TypeError:\n continue\n\n # Updates the value of 'pixel_spacing' to conform to changes in 'half_range' or 'spacing'\n def update_pixel_spacing(self):\n self.pixel_spacing = self.screen.get_width() / (2 * self.half_range) * self.spacing\n\n # Updates the value of 'pixel_offset' to conform to changes in 'half_range' or 'offset'\n def update_pixel_offset(self):\n x_offset = self.screen.get_width() / (2 * self.half_range) * self.offset.real\n y_offset = self.screen.get_height() / (2 * self.half_range) * self.offset.imag\n self.pixel_offset = x_offset, y_offset\n\n # Adds the main grid to the screen, or the main line with tick marks in real mode\n def add_grid(self):\n self.add_imag_lines()\n if not self.real_mode:\n self.add_real_lines()\n\n # Adds imaginary lines to the screen\n def add_imag_lines(self):\n for pair in self.imag_line_coords():\n pg.draw.line(self.screen, BLACK, pair[0], pair[1], line_width)\n\n # Adds real lines to the screen\n def add_real_lines(self):\n for pair in self.real_line_coords():\n pg.draw.line(self.screen, BLACK, pair[0], pair[1], line_width)\n\n # Adds bold real and imaginary axes to the screen\n def add_bold_axes(self):\n self.add_real_axis()\n self.add_imag_axis()\n\n # Adds a bold imaginary axis to the screen\n def add_imag_axis(self):\n imag_width = self.screen.get_width() / 2 + self.pixel_offset[0]\n top = (imag_width, self.zero_ys()[0] if self.real_mode else -self.padding)\n bottom = (imag_width, self.zero_ys()[1] if self.real_mode else self.screen.get_height() + self.padding)\n try:\n pg.draw.line(self.screen, BLACK, self.rotate(top, self.phase), self.rotate(bottom, self.phase), axis_width)\n except TypeError:\n return\n\n # Adds a bold real axis to the screen\n def add_real_axis(self):\n real_height = self.screen.get_height() / 2 + self.pixel_offset[1]\n left = (-self.padding, real_height)\n right = (self.screen.get_width() + self.padding, real_height)\n try:\n pg.draw.line(self.screen, BLACK, self.rotate(left, self.phase), self.rotate(right, self.phase), axis_width)\n except TypeError:\n return\n\n # Gets coords for imaginary lines which will be added to the screen\n def imag_line_coords(self):\n x_coords = self.x_coords()\n tops_y = [self.tick_ys()[0] if self.real_mode else -self.padding] * len(x_coords)\n tops = list(zip(x_coords, tops_y))\n bottoms_y = [self.tick_ys()[1] if self.real_mode else self.screen.get_height() + self.padding] * len(x_coords)\n bottoms = list(zip(x_coords, bottoms_y))\n return [(self.rotate(pair[0], self.phase), self.rotate(pair[1], self.phase)) for pair in zip(tops, bottoms)]\n\n # Gets coords for real lines which will be added to the screen\n def real_line_coords(self):\n y_coords = self.y_coords()\n lefts_x = [-self.padding] * len(y_coords)\n lefts = list(zip(lefts_x, y_coords))\n rights_x = [self.screen.get_width() + self.padding] * len(y_coords)\n rights = list(zip(rights_x, y_coords))\n return [(self.rotate(pair[0], self.phase), self.rotate(pair[1], self.phase)) for pair in zip(lefts, rights)]\n\n # Creates the x coords for vertical lines\n def x_coords(self):\n offset_center = self.screen.get_width() / 2 + (self.pixel_offset[0] % self.pixel_spacing)\n neg = np.arange(offset_center, -self.padding, -self.pixel_spacing)\n pos = np.arange(offset_center + self.pixel_spacing, self.screen.get_width() + self.padding, self.pixel_spacing)\n return np.append(neg, pos)\n\n # Creates the y coords for horizontal lines\n def y_coords(self):\n offset_center = self.screen.get_height() / 2 + (self.pixel_offset[1] % self.pixel_spacing)\n pos = np.arange(offset_center, -self.padding, -self.pixel_spacing)\n neg = np.arange(offset_center + self.pixel_spacing, self.screen.get_height() + self.padding, self.pixel_spacing)\n return np.append(pos, neg)\n\n # Performs a rotation of a point about the center of the screen\n def rotate(self, pt, angle):\n pt_minus_center = pt[0] - self.screen.get_width() / 2, pt[1] - self.screen.get_height() / 2\n rot_matrix = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])\n result = np.matmul(rot_matrix, np.array(pt_minus_center))\n return result[0] + self.screen.get_width() / 2, result[1] + self.screen.get_height() / 2\n\n # Sets the amount of padding to apply in case of rotation\n def set_padding(self):\n if self.screen.get_height() < self.screen.get_width():\n to_subtract = self.screen.get_height()\n else:\n to_subtract = self.screen.get_width()\n self.padding = (np.sqrt(self.screen.get_width() ** 2 + self.screen.get_height() ** 2) - to_subtract) / 2\n\n # Determines whether a rect for given coords is in the screen\n def rect_in_screen(self, rect):\n return self.in_screen(rect.topleft) or self.in_screen(rect.topright) or self.in_screen(rect.bottomleft) or \\\n self.in_screen(rect.bottomright)\n\n # Determines whether a given coordinate is within the display or not\n def in_screen(self, pt):\n return 0 <= pt[0] <= self.screen.get_width() and 0 <= pt[1] <= self.screen.get_height()\n\n # Returns the upper and lower y-coordinates of the tick marks in real mode\n def tick_ys(self):\n return self.screen.get_height() / 2 - tick_mark_height, self.screen.get_height() / 2 + tick_mark_height\n \n # Returns the upper and lower y-coordinates of the zero mark in real mode\n def zero_ys(self):\n return self.screen.get_height() / 2 - zero_mark_height, self.screen.get_height() / 2 + zero_mark_height\n","sub_path":"ComplexPlane.py","file_name":"ComplexPlane.py","file_ext":"py","file_size_in_byte":13904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"205429595","text":"from tensor2tensor.layers import common_audio\nimport tensorflow as tf\nimport librosa\nimport numpy as np\nfrom glob import glob\nimport string\n\ndef data_preparation(libri_path):\n \"\"\"Prepare texts and its corresponding audio file path\n \n Args:\n path: Path to texts and audio files.\n \n Returns:\n texts: List of sentences.\n audio_path: Audio paths of its corresponding sentences.\n \"\"\"\n\n folders = glob(libri_path+\"/**/**\")\n texts = []\n audio_path = []\n for path in folders:\n text_path = glob(path+\"/*txt\")[0]\n f = open(text_path)\n for line in f.readlines():\n line_ = line.split(\" \")\n audio_path.append(path+\"/\"+line_[0]+\".flac\")\n texts.append(line[len(line_[0])+1:-1])\n return texts, audio_path\n\ndef process_audio(audio_path, \n sess, \n prepro_batch=128, \n sample_rate=22050,\n frame_step=10, \n frame_length=25,\n feat_dim = 40,\n feat_type='fbank'):\n \"\"\"GPU accerated audio features extracting in tensorflow\n\n Args:\n audio_path: List of path of audio files.\n sess: Tf session to execute the graph for feature extraction.\n prepro_batch: Batch size for preprocessing audio features.\n frame_step: Step size in ms.\n feat_dim: Feature dimension.\n feat_type: Types of features you want to apply.\n\n Returns:\n feats: List of features with variable length L, \n each element is in the shape of (L, feat_dim), N is\n the number of samples.\n featlen: List of feature length.\n \"\"\" \n\n # build extacting graph\n input_audio = tf.placeholder(dtype=tf.float32, shape=[None, None])\n if feat_type == 'fbank':\n mel_fbanks = common_audio.compute_mel_filterbank_features(\n input_audio, sample_rate=sample_rate, frame_step=frame_step, frame_length=frame_length, num_mel_bins=feat_dim, apply_mask=True)\n mel_fbanks = tf.reduce_sum(mel_fbanks, -1)\n\n def extract_feat(audio_batch, len_batch, fs):\n max_len = max(len_batch)\n audio_padded = np.zeros([prepro_batch, max_len], dtype=np.float32)\n for i in range(len(audio_batch)):\n audio_padded[i][:len(audio_batch[i])] = audio_batch[i]\n feat = sess.run(mel_fbanks, feed_dict={input_audio: audio_padded})\n # compute the feature length:\n feat_len = np.array(len_batch) // int(fs * frame_step / 1e3) + 1\n feat_len = feat_len.astype(np.int32)\n return feat, feat_len\n \n audio_batch = []\n len_batch = []\n feats = []\n featlen = []\n\n # start extracting audio feature in a batch manner:\n for p in audio_path[:13]:\n audio, fs = librosa.load(p)\n audio_batch.append(audio)\n len_batch.append(len(audio))\n if len(audio_batch) == prepro_batch:\n feat, feat_len = extract_feat(audio_batch, len_batch, fs)\n # remove paddings of audios batch:\n for index, l in enumerate(feat_len):\n feats.append(feat[index][:l])\n featlen = np.concatenate([featlen, feat_len])\n audio_batch = []\n len_batch = []\n print(\"Processed samples: {}/{}\".format(len(feats), len(audio_path)))\n\n if len(audio_batch) % prepro_batch != 0:\n feat, feat_len = extract_feat(audio_batch, len_batch, fs)\n # remove paddings:\n for index, l in enumerate(feat_len):\n feats.append(feat[index][:l])\n featlen = np.concatenate([featlen, feat_len])\n print(\"Processed samples: {}/{}\".format(len(feats), len(audio_path)))\n\n return np.array(feats), featlen.astype(np.int32)\n\ndef process_texts(special_chars, texts):\n \"\"\"\n Returns:\n chars: List of index sequences.\n charlen: List of length of sequences.\n \"\"\"\n\n charlen = []\n chars = []\n char2id, id2char = lookup_dicts(special_chars)\n for sentence in texts[:13]:\n sentence = sentence.translate(str.maketrans('', '', string.punctuation))\n char_converted = [char2id[char] if char != ' ' else char2id[''] for char in list(sentence)]\n chars.append([char2id['']] + char_converted + [char2id['']])\n charlen.append(len(chars[-1]))\n\n return np.array(chars), np.array(charlen).astype(np.int32), char2id, id2char\n\ndef lookup_dicts(special_chars):\n \"\"\"\n Args:\n special_chars: special charactors, , , , \n Returns:\n char2id: dict, from character to index.\n id2char: dict, from index to character.\n \"\"\"\n\n alphas = list(string.ascii_uppercase[:26])\n chars = special_chars + alphas\n char2id = {}\n id2char = {}\n for i, c in enumerate(chars):\n char2id[c] = i\n id2char[i] = c\n return char2id, id2char\n\ndef batch_gen(feats, chars, featlen, charlen, batch_size, feat_dim, max_charlen, shuffle=True):\n \"\"\"\n Returns:\n iter: Batch iterator.\n batch_num: Number of batches. \n \"\"\"\n\n # Check if the number of sample points matches.\n assert len(feats) == len(chars)\n assert len(feats) == len(featlen)\n assert len(chars) == len(charlen)\n num_batches = len(feats) // batch_size + int(len(feats) % batch_size != 0)\n\n def generator():\n buff_feats = []\n buff_chars = []\n\n if shuffle:\n rand_idx = np.random.permutation(len(feats))\n feats_, featlen_ = feats[rand_idx], featlen[rand_idx]\n chars_, charlen_ = chars[rand_idx], charlen[rand_idx]\n else:\n feats_, featlen_ = feats, featlen\n chars_, charlen_ = chars, charlen\n \n for i, x in enumerate(zip(feats_, chars_)):\n if i % batch_size == 0 and buff_feats and buff_chars:\n yield (np.stack(buff_feats, 0), len_batch1), (np.stack(buff_chars, 0), len_batch2)\n buff_feats = []\n buff_chars = []\n if i % batch_size == 0:\n len_batch1 = featlen_[i:i+batch_size]\n len_batch2 = charlen_[i:i+batch_size]\n max_len1 = max(len_batch1)\n max_len2 = max_charlen\n # Padding\n x_feat, x_char = x\n padded_feat = np.zeros([max_len1 - x_feat.shape[0], x_feat.shape[1]], dtype=np.float32)\n padded_char = np.zeros(max_len2 - len(x_char), dtype=np.int32)\n feat_padded = np.concatenate([x_feat, padded_feat], 0)\n char_padded = np.concatenate([x_char, padded_char], 0)\n buff_feats.append(feat_padded)\n buff_chars.append(char_padded)\n\n if buff_feats and buff_chars:\n yield (np.stack(buff_feats, 0), len_batch1), (np.stack(buff_chars, 0), len_batch2)\n\n shapes = (([None, None, feat_dim], [None]), ([None, None], [None]))\n types = ((tf.float32, tf.int32), (tf.int32, tf.int32))\n dataset = tf.data.Dataset.from_generator(generator,\n output_types=types, \n output_shapes=shapes)\n dataset.batch(batch_size).prefetch(1)\n dataset = dataset.shuffle(batch_size*64)\n dataset = dataset.repeat() \n iter = dataset.make_initializable_iterator()\n return iter, num_batches\n\nif __name__ == \"__main__\":\n sess = tf.Session()\n libri_path = './data/LibriSpeech/dev-clean'\n texts, audio_path = data_preparation(libri_path)\n special_chars = ['', '', '', '']\n chars, charlen = process_texts(special_chars, texts)\n X, X_len = process_audio(audio_path, sess, prepro_batch=64)\n iter_, num_batches = batch_gen(X, chars, X_len, charlen, batch_size=32)\n x = iter_.get_next()\n sess.run(tf.global_variables_initializer())\n sess.run(iter_.initializer)\n print(num_batches)\n for _ in range(5):\n a, b = sess.run(x)\n print(a[0].shape, a[1].shape, b[0].shape, b[1].shape)\n","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":7971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"135461377","text":"import mysql.connector\nfrom difflib import get_close_matches\n\ndef translate(word):\n word = word.lower()\n query = cursor.execute(\"SELECT Definition FROM Dictionary WHERE Expression = '%s'\" % word)\n results = cursor.fetchall()\n if results:\n return results\n else:\n query = cursor.execute(\"SELECT Expression FROM Dictionary\")\n words = cursor.fetchall()\n words = [w[0] for w in words]\n closest = get_close_matches(word, words)[0]\n confirm_word = input(f\"Sorry could not find {word}, did you mean {closest}? (y or n): \")\n confirm_word = confirm_word.lower()\n if confirm_word == \"y\":\n query = cursor.execute(\"SELECT Definition FROM Dictionary WHERE Expression = '%s'\" % closest)\n results = cursor.fetchall()\n return results\n else:\n return \"Sorry word not found\"\n\ncon = mysql.connector.connect(\nuser = \"ardit700_student\",\npassword = \"ardit700_student\",\nhost = \"108.167.140.122\",\ndatabase = \"ardit700_pm1database\"\n)\n\ncursor = con.cursor()\n\nword = input(\"Enter the word: \")\n\noutput = translate(word)\n\nif type(output) == list:\n for w in output:\n print(w[0])\nelse:\n print(output)\n","sub_path":"App_2_Volcano_Pop_Web_Map/section_14_python_mysql/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"357959472","text":"#!/bin/python3\n\nimport os\nimport sys\n\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n def __repr__(self):\n return \"[{0}, l:{1}, r:{2}]\".format(self.val, self.left.val if self.left else \"-\", self.right.val if self.right else \"-\")\n\n\ndef build_tree(root, indexes):\n arr = [root]\n while arr:\n node = arr.pop(0)\n left, right = indexes.pop(0)\n node.left = Node(left) if left != -1 else None\n node.right = Node(right) if right != -1 else None\n if node.left: arr.append(node.left)\n if node.right: arr.append(node.right)\n\n\ndef build_levels(node, levels, level):\n if not node:\n return\n if len(levels) <= level:\n levels.append([])\n levels[level].append(node)\n build_levels(node.left, levels, level + 1)\n build_levels(node.right, levels, level + 1)\n\n\ndef inorder(node, res):\n if not node:\n return\n inorder(node.left, res)\n res.append(node.val)\n inorder(node.right, res)\n\n\ndef swapNodes(indexes, queries):\n root, results, levels = Node(1), [], []\n\n build_tree(root, indexes)\n build_levels(root, levels, 0)\n\n # for i, level in enumerate(levels):\n # print(\"LEVEL {}\".format(i))\n # print(level)\n\n for k in queries:\n for kk in range(0, len(levels), k):\n for node in levels[kk - 1]:\n node.left, node.right = node.right, node.left\n res = []\n inorder(root, res)\n results.append(res)\n\n return results\n\n\nif __name__ == '__main__':\n indexes = [\n [2, 3],\n [4, 5],\n [6, -1],\n [-1, 7],\n [8, 9],\n [10, 11],\n [12, 13],\n [-1, 14],\n [-1, -1],\n [15, -1],\n [16, 17],\n [-1, -1],\n [-1, -1],\n [-1, -1],\n [-1, -1],\n [-1, -1],\n [-1, -1]\n ]\n\n queries = [2, 3]\n result = swapNodes(indexes, queries)\n for row in result:\n print(\" \".join([str(x) for x in row]))\n","sub_path":"hackerrank/swap_nodes_algo/my_attempt.py","file_name":"my_attempt.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"87623513","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom django.template import RequestContext\nfrom cms.api import add_plugin\n\n# from cms.utils.i18n import force_language\n# from hvad.test_utils.context_managers import LanguageOverride\n# from aldryn_faq.models import Category, Question, get_slug_in_language\n\nfrom . import AldrynFaqTest, CMSRequestBasedTest\n\n\nclass TestQuestionListPlugin(AldrynFaqTest, CMSRequestBasedTest):\n\n def test_plugin(self):\n page1 = self.get_or_create_page(\"Page One\")\n ph = page1.placeholders.get(slot='content')\n plugin = add_plugin(ph, 'QuestionListPlugin', language='en')\n\n request = self.get_page_request(\n page1, self.user, None, lang_code='en', edit=True)\n context = RequestContext(request, {})\n rendered = plugin.render_plugin(context, ph)\n self.assertTrue(rendered.find(self.question1.title) > -1)\n\n\nclass TestCategoryListPlugin(AldrynFaqTest, CMSRequestBasedTest):\n\n def test_plugin(self):\n page1 = self.get_or_create_page(\"Page One\")\n ph = page1.placeholders.get(slot='content')\n plugin = add_plugin(ph, 'CategoryListPlugin', language='en')\n\n request = self.get_page_request(\n page1, self.user, None, lang_code='en', edit=True)\n context = RequestContext(request, {})\n try:\n rendered = plugin.render_plugin(context, ph)\n self.assertTrue(rendered.find(self.category1.name) > -1)\n except Exception as e:\n self.fail(e)\n","sub_path":"aldryn_faq/tests/test_plugins.py","file_name":"test_plugins.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"376549241","text":"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nfrom hyperparameters import Hyperparameters as hp\nimport tensorflow as tf\nfrom load_marco import load_marco\nfrom BiDAF import BiDAF\nfrom classification_vector import classification\nfrom BiLSTM_cudnn import BiLSTM\nfrom extract_valid_para import extract_valid\nfrom load_dict import load_dict\nimport numpy as np\n\nvocab = load_dict(hp.word, hp.embedding_size)\nmarco_train = load_marco(\n vocab=vocab,\n path=hp.marco_train_path,\n max_seq_length=hp.max_seq_length,\n max_para=hp.max_para\n)\n\n'''\nmarco_dev = load_marco(\n vocab=vocab,\n path=hp.marco_dev_path,\n max_seq_length=hp.max_seq_length,\n max_para=hp.max_para\n)\n'''\n\nwith tf.device('/gpu:1'):\n with tf.variable_scope('embedding'):\n embedding_weight = tf.Variable(tf.constant(0.0, shape=[hp.vocab_size, hp.embedding_size]),\n trainable=False,\n name='embedding_weight')\n embedding_placeholder = tf.placeholder(tf.float32, [hp.vocab_size, hp.embedding_size])\n embedding_init = embedding_weight.assign(embedding_placeholder)\n keep_prob = tf.placeholder(tf.float32)\n\n context_input_ids = tf.placeholder(dtype=tf.int32, shape=[None, hp.max_seq_length])\n qas_input_ids = tf.placeholder(dtype=tf.int32, shape=[None, hp.max_seq_length])\n label = tf.placeholder(shape=[None, 1], dtype=tf.float32)\n context_embedding = tf.nn.embedding_lookup(embedding_weight, context_input_ids)\n qas_embedding = tf.nn.embedding_lookup(embedding_weight, qas_input_ids)\n\n with tf.variable_scope('context_lstm', reuse=tf.AUTO_REUSE):\n context_lstm = BiLSTM(\n inputs=context_embedding,\n hidden_units=hp.embedding_size,\n dropout=hp.keep_prob,\n name='context_lstm'\n ).result\n\n with tf.variable_scope('qas_lstm', reuse=tf.AUTO_REUSE):\n qas_lstm = BiLSTM(\n inputs=qas_embedding,\n hidden_units=hp.embedding_size,\n dropout=hp.keep_prob,\n name='qas_lstm'\n ).result\n\n with tf.variable_scope('bidaf', reuse=tf.AUTO_REUSE):\n fuse_vector = BiDAF(\n refc=context_lstm,\n refq=qas_lstm,\n cLength=hp.max_seq_length,\n qLength=hp.max_seq_length,\n hidden_units=hp.embedding_size,\n name='bidaf'\n ).fuse_vector\n\n with tf.variable_scope('features', reuse=tf.AUTO_REUSE):\n features = BiLSTM(\n inputs=fuse_vector,\n hidden_units=8 * hp.embedding_size,\n dropout=hp.keep_prob,\n name='features'\n ).result\n\n with tf.variable_scope('classification', reuse=tf.AUTO_REUSE):\n classification_vector = classification(\n inputs=features,\n embedding_size=16 * hp.embedding_size,\n max_seq_length=hp.max_seq_length,\n bert_embedding_size=hp.embedding_size,\n keep_prob=keep_prob,\n name='classification'\n ).class_vector\n\n with tf.name_scope('class_loss'):\n class_loss = tf.reduce_sum(\n tf.reduce_sum(\n tf.nn.weighted_cross_entropy_with_logits(\n targets=label,\n logits=classification_vector,\n pos_weight=hp.pos_weight),\n axis=0),\n axis=0,\n name='class_loss'\n )\n class_loss_summary = tf.summary.scalar(\"class_loss_summary\", class_loss)\n\n with tf.name_scope('class_acc'):\n prediction = tf.cast(tf.greater(tf.nn.sigmoid(classification_vector), 0.5), tf.float32)\n class_acc = tf.reduce_sum(\n tf.divide(\n tf.subtract(\n 10.,\n tf.reduce_sum(\n tf.mod(\n tf.add(\n label,\n prediction),\n 2),\n axis=0)),\n 10),\n axis=0,\n name='class_acc'\n )\n class_acc_summary = tf.summary.scalar('class_acc_summary', class_acc)\n\n class_merged = tf.summary.merge([class_loss_summary, class_acc_summary])\n\n ''' # get the valid paragraph, useless for the classify\n valid_para = extract_valid(\n fuse_vector=fuse_vector,\n classification_vector=tf.nn.sigmoid(classification_vector),\n max_seq=hp.max_seq_length,\n embd_size=4 * hp.bert_embedding_size,\n pos_para=hp.pos_para\n ).valid_para\n '''\n\n train_op = tf.train.GradientDescentOptimizer(learning_rate=hp.learning_rate).minimize(class_loss)\n\n init = tf.global_variables_initializer()\n\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n\n\n with tf.Session(config=config) as sess:\n sess.run(init)\n #saver = tf.train.Saver()\n #saver.restore(sess, tf.train.latest_checkpoint('./bidaf_classify/model/'))\n writer = tf.summary.FileWriter('bidaf_classify/log', sess.graph)\n saver = tf.train.Saver(max_to_keep=hp.max_to_keep)\n sess.run(embedding_init, feed_dict={embedding_placeholder: vocab.embd})\n counter = 0\n for epoch in range(hp.epoch):\n for i in range(marco_train.total):\n context_id = marco_train.passage_index[i]\n qas_id = np.tile(marco_train.query_index[i][np.newaxis,:], [hp.max_para, 1])\n labels = marco_train.label[i]\n dict = {\n context_input_ids: context_id,\n qas_input_ids: qas_id,\n label: labels,\n keep_prob: hp.keep_prob\n }\n sess.run(train_op, feed_dict=dict)\n if i % hp.loss_acc_iter == 0:\n if sess.run(class_loss, feed_dict=dict) != 0:\n writer.add_summary(sess.run(class_merged, feed_dict=dict), counter)\n counter += 1\n saver.save(sess, 'bidaf_classify/model/my_model', global_step=epoch)\n","sub_path":"no_bert_classification/bidaf_classify_cudnn.py","file_name":"bidaf_classify_cudnn.py","file_ext":"py","file_size_in_byte":6118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"581498647","text":"# This is not part of the actual server\n\nimport os\ntry:\n os.remove('../data/messages.db')\nexcept:\n pass\n\nimport sqlite3, random\nfrom time import time\n\ndb = sqlite3.connect('../data/messages.db')\nc = db.cursor()\n\nfor file_id in range(16):\n c.execute('''CREATE TABLE d{}\n (content text, \n timestamp int, \n sender text)'''.format(file_id))\n users = [random.randint(0, 10), random.randint(0, 10)]\n for msg_num in range(20):\n msg_record = ('Some, text', int(time() * 100),\n 'user' + str(random.choice(users)))\n c.execute('''INSERT INTO d{} VALUES (?, ?, ?)'''.format(file_id), msg_record)\n \ndb.commit()\ndb.close()\n","sub_path":"server/utilities/generate_messages.py","file_name":"generate_messages.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"614766533","text":"# stdlib\nimport logging\n\n# third party lib\nfrom flask import request\n\n# internal lib\nfrom lib import tasks\nfrom lib import networking\nfrom lib.settings import Commands, Options, Thresholds\nfrom server.lib import task_init, task_qc, task_pca, task_ass\nfrom lib.client_registry import Registry\n\n# TODO helper for parameter validation and default specification\n\n\ndef list_tasks():\n tsks = tasks.task_list\n return networking.create_response(200, tsks)\n\n\ndef start_task(task_name):\n logging.info(f'Got command to start {task_name}, starting...')\n\n args = {}\n for key, val in request.json.items():\n args[key.upper()] = val\n\n if task_name == Commands.INIT:\n task_init.start_init_task()\n\n elif task_name.startswith(Commands.QC):\n args[Options.HWE] = args.get(\"QC_HWE\", Thresholds.QC_hwe)\n args[Options.MAF] = args.get(\"QC_MAF\", Thresholds.QC_maf)\n logging.info(f\"Specified Filters :{args}\")\n task_qc.start_client_qc_task(args)\n task_qc.start_local_qc_task(args)\n\n elif task_name.startswith(Commands.PCA):\n args[\"PCA_PCS\"] = int(args.get(\"PCA_PCS\", Thresholds.PCA_pcs))\n task_pca.Position_reporter.get_instance(args)\n if not task_pca.ready_to_decompose():\n if not task_pca.filtered():\n args[Options.MAF] = args.get(\"PCA_MAF\", Thresholds.PCA_maf)\n if \"PCA_LD_WINDOW\" not in args: # default parameters\n args[\"PCA_LD_WINDOW\"] = Thresholds.PCA_ld_window\n if \"PCA_LD_THRESHOLD\" not in args: # default parameters\n args[\"PCA_LD_THRESHOLD\"] = Thresholds.PCA_ld_threshold\n args[Options.LD] = [args[\"PCA_LD_WINDOW\"], args[\"PCA_LD_THRESHOLD\"]]\n logging.info(f\"Specified pruning filters :{args}\")\n task_pca.start_pca_filters(args)\n else:\n logging.info(\"Reporting Filtered Sites\")\n task_pca.Position_reporter.get_instance().report_pos()\n else:\n logging.info(\"starting eigen decomposition\")\n task_pca.eigenDecompose(n_components=args[\"PCA_PCS\"])\n\n elif task_name == Commands.ASSO:\n args[\"ASSO_PCS\"] = args.get(\"ASSO_PCS\", Thresholds.ASSO_pcs)\n logging.info(\"Starting Associations\")\n task_ass.LogisticAdmm.get_instance(args, active=2)\n\n return networking.create_response(200, f'Started task {task_name}')\n\n\ndef start_subtask(task_name, subtask_name, client_name):\n if task_name == Commands.INIT:\n if subtask_name == 'POS':\n task_init.store_positions(request.data, client_name)\n elif subtask_name == 'COUNT':\n task_init.store_counts(request.data, client_name)\n\n elif task_name.startswith(Commands.QC):\n if subtask_name == \"FIN\":\n if task_qc.filter_finished(client_name, Commands.QC):\n logging.info(\"Done with QC.\")\n\n elif task_name.startswith(Commands.PCA):\n if subtask_name == \"FIN\":\n if task_qc.filter_finished(client_name, Commands.PCA):\n logging.info(\"Done with PCA filters. Initiating pruning\")\n reset_states(\"PRUNE\")\n ld_agg = task_pca.CovarianceAggregator.get_instance(len(Registry.get_instance().list_clients()), 50)\n # send message to start LD pruning\n ld_agg.send_request({})\n elif subtask_name == \"LD\":\n ld_agg = task_pca.CovarianceAggregator.get_instance(len(Registry.get_instance().list_clients()), 50)\n ld_agg.update(request.data)\n elif subtask_name == \"PCAPOS\":\n task_pca.Position_reporter.get_instance().report_pos()\n elif subtask_name == \"COV\":\n task_pca.store_covariance(client_name, request.data)\n\n elif task_name.startswith(Commands.ASSO):\n ass_agg = task_ass.LogisticAdmm.get_instance({}, active=2)\n if subtask_name == \"adjust\":\n ass_agg.update_stats(request.data)\n elif subtask_name == \"estimate\":\n ass_agg.update(request.data)\n elif subtask_name == \"pval\":\n ass_agg.update_pval(request.data)\n elif subtask_name == \"hessians\":\n model, have_all_info = ass_agg.newton_stats_update(request.data)\n if have_all_info:\n ass_agg.newton_iter(model)\n elif subtask_name == \"valback\":\n ass_agg.collect_likelihoods(request.data)\n\n return networking.create_response(200)\n\n\ndef reset_states(state):\n instance = Registry.get_instance()\n for client in instance.list_clients():\n instance.set_client_state(client[\"name\"], state)\n","sub_path":"src/server/routes/controllers/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"91574237","text":"from .node import Node\nfrom typing import Any\n\n\nclass LinkedList(object):\n '''\n Singly linked list creation.\n O(1) time.\n '''\n def __init__(self, iterable=[]):\n '''\n Initialized the list with a set of values, contained in an iterable.\n O(n) time.\n '''\n self._head = None\n self._length = 0\n if len(iterable) >= 1:\n for e in iterable:\n self.append(e)\n\n def __str__(self):\n return f'Head: {self._head} | Length: {self._length}'\n\n def __repr__(self):\n return f''\n\n def __len__(self):\n \"\"\"\n Returns the length of the linked list.\n O(n) time\n \"\"\"\n curr = self._head\n count = 0\n while curr:\n count += 1\n curr = curr._next\n\n return self._length\n\n # def __iter__(self):\n # pass\n\n # def __next__(self):\n # pass\n\n def prepend(self, data: Any) -> Node:\n '''\n Add node to singly-linked list creation at head.\n O(1) time.\n '''\n if data is None:\n print('data absent')\n return False\n else:\n self._head = Node(data, self._head)\n self._length += 1\n return True\n\n def append(self, data: Any) -> Node:\n '''\n Add node to singly-linked list creation at tail.\n O(n) time.\n '''\n if data is None:\n print('data absent')\n return False\n\n else:\n new_node = Node(data)\n\n if self._head is None:\n self._head = new_node\n self._length += 1\n # print('appended ' + str(new_node) + ' as head')\n return new_node\n\n else:\n curr = self._head\n while curr._next:\n curr = curr._next\n\n curr._next = new_node\n self._length += 1\n return new_node\n\n def includes(self, data: Any) -> bool:\n '''\n Search the linked list for for a given value.\n O(n) time.\n '''\n if self._head is None:\n print('No head.')\n return False\n\n else:\n curr = self._head\n pos = 0\n while curr is not None:\n if curr._data == data:\n print('Contains ' + str(data) + ' at pos: ' + str(pos))\n return (True)\n curr = curr._next\n pos += 1\n\n print('Doesn\\'t contain ' + str(data))\n return False\n\n def read_off(self):\n '''\n Reads off the values in order.\n O(n) time\n '''\n vals = []\n\n if self._head is not None:\n curr = self._head\n while curr:\n vals.append(curr._data)\n curr = curr._next\n print(vals)\n return(vals)\n\n def insert_before(self, i_data, s_data):\n '''\n Add a new node with the given i_data immediately\n before the first s_data node\n O(n) time.\n '''\n if self._head is not None:\n\n curr = self._head\n n = Node(i_data)\n\n if self._head._data == s_data:\n n._next, self._head = curr, n\n self._length += 1\n else:\n prev = None\n while curr:\n if curr._data == s_data:\n n._next, prev._next = curr, n\n self._length += 1\n prev, curr = curr, curr._next\n\n def insert_after(self, i_data, s_data):\n '''\n Add a new node with the given i_data immediately\n after the first s_data node\n O(n) time.\n '''\n if self._head is not None:\n\n n = Node(i_data)\n curr = self._head\n\n while curr:\n if curr._data == s_data:\n n._next = curr._next\n curr._next = n\n self._length += 1\n curr = curr._next\n\n def kth_from_end(self, k):\n '''\n Moves down the linked list to find\n a node that is k pisitions from the tail of the list.\n tortise and hare solution.\n O(n) time.\n '''\n\n if k < 0:\n print('Cheeky Bugger: K out of range.')\n return IndexError\n\n t = self._head\n h = self._head\n\n for i in range(k):\n if h is not None:\n h = h._next\n else:\n raise IndexError\n\n while h._next is not None:\n t = t._next\n h = h._next\n\n return t._data\n","sub_path":"challenges/ll_merge/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"243293994","text":"#ASSIGNMENT9\n\n#Q1\na=3\nif a<4:\n try:\n a=a/(a-3)\n except:\n print('Division cannot be done by zero')\nprint(a)\n\n#Q2\nl=[1,2,3]\ntry:\n print(l[3])\nexcept:\n print('List index out of range.')\n\n#Q3\nprint(\"An exception\")\n\n#Q4\nprint(\"OUTPUT\\n\"\n\"\"\"-5.0\na/b result is 0\"\"\")\n\n#Q5\n\n#1\ntry:\n import maths\nexcept ImportError:\n print(\"It is import error because no such module is present\")\n \n#2\ntry:\n a=int(input(\"ENTER NO.\"))\nexcept ValueError:\n print(\"ENTER NO.ONLY\")\n\n#3\ntry:\n l=[1,2,3]\n print(l[3])\nexcept IndexError:\n print(\"Invalid index\")\n","sub_path":"assignment9.py","file_name":"assignment9.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"638589687","text":"\"\"\"\nExample program to demonstrate how to send a multi-channel time series to LSL.\nI used this example in order to simulate EEG stream\n\"\"\"\n\nimport time\nimport numpy as np\nfrom pylsl import StreamInfo, StreamOutlet\nimport random as rnd\n\nchannels = 16\nsample_rate = 120\n\ndef main():\n\n # Simulation of OpenBCI streaming using 16 channels and 120 Hz as sample rate\n info = StreamInfo('OpenBCI', 'EEG', channels, sample_rate, 'float32', 'myuid34234')\n outlet = StreamOutlet(info)\n\n input('Start recording via Lab Recorder and press enter...')\n print('Streaming EEG data...')\n\n while True:\n\n # Rand some EEG sample\n eeg_sample = [rnd.random() for i in range(channels)]\n\n # Now send it and wait for a bit\n outlet.push_sample(eeg_sample)\n time.sleep(1 / sample_rate)\n\n\nif __name__ == '__main__':\n\n main()\n\n","sub_path":"recycle bin/openbci lsl/simulate_EEG_stream.py","file_name":"simulate_EEG_stream.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"32167551","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = u'Braňo Žarnovičan'\nSITENAME = u'Braňo Žarnovičan\\'s blog'\nSITEURL = ''\n\nPATH = 'content'\nFILENAME_METADATA = '(?P\\d{4}-\\d{2}-\\d{2})-(?P.*)'\nARTICLE_URL = '{date:%Y}/{date:%m}/{date:%d}/{slug}/'\nARTICLE_SAVE_AS = '{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'\n\nTIMEZONE = 'Europe/Prague'\n\nDEFAULT_LANG = u'en'\n\nMD_EXTENSIONS = ['codehilite(noclasses=True,guess_lang=False)', 'extra', 'admonition', 'toc']\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Blogroll\nLINKS = ()\n\n# Social widget\nSOCIAL = (\n ('zarnovican@github', 'http://github.com/zarnovican'),\n ('zarnovican@linkedin', 'https://cz.linkedin.com/in/brano-zarnovican-79a5397'),\n)\n\nDEFAULT_PAGINATION = 10\n\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\n\nPLUGIN_PATHS = ['pelican-plugins']\nPLUGINS = ['tag_cloud']\nTAG_CLOUD_SORTING = 'alphabetically'\nTAG_CLOUD_STEPS = 2\n\nTHEME = 'pelican-bootstrap3'\nDISPLAY_CATEGORIES_ON_MENU = False\nDISPLAY_TAGS_ON_SIDEBAR = True\nBOOTSTRAP_THEME = 'flatly'\nCUSTOM_CSS = 'static/custom.css'\nSTATIC_PATHS = ['static']\n\nEXTRA_PATH_METADATA = {\n 'static/favicon.ico': {'path': 'favicon.ico'},\n}\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"610567945","text":"from __future__ import annotations\nimport random\nimport logging\nfrom collections import OrderedDict\nfrom collections.abc import Iterable, Container\nfrom itertools import chain\nfrom typing import TYPE_CHECKING, Optional\n\nfrom Fill import ShuffleError\nfrom Search import Search\nfrom Region import Region, TimeOfDay\nfrom Rules import set_entrances_based_rules\nfrom State import State\nfrom Item import ItemFactory\nfrom Hints import HintArea, HintAreaNotFound\nfrom HintList import misc_item_hint_table\n\nif TYPE_CHECKING:\n from Entrance import Entrance\n from Location import Location\n from Item import Item\n from World import World\n\n\ndef set_all_entrances_data(world: World) -> None:\n for type, forward_entry, *return_entry in entrance_shuffle_table:\n forward_entrance = world.get_entrance(forward_entry[0])\n forward_entrance.data = forward_entry[1]\n forward_entrance.type = type\n forward_entrance.primary = True\n if type == 'Grotto':\n forward_entrance.data['index'] = 0x1000 + forward_entrance.data['grotto_id']\n if world.settings.decouple_entrances and type not in ('ChildBoss', 'AdultBoss'):\n forward_entrance.decoupled = True\n if return_entry:\n return_entry = return_entry[0]\n return_entrance = world.get_entrance(return_entry[0])\n return_entrance.data = return_entry[1]\n return_entrance.type = type\n forward_entrance.bind_two_way(return_entrance)\n if type == 'Grotto':\n return_entrance.data['index'] = 0x2000 + return_entrance.data['grotto_id']\n if world.settings.decouple_entrances and type not in ('ChildBoss', 'AdultBoss'):\n return_entrance.decoupled = True\n\n\ndef assume_entrance_pool(entrance_pool: list[Entrance]) -> list[Entrance]:\n assumed_pool = []\n for entrance in entrance_pool:\n assumed_forward = entrance.assume_reachable()\n if entrance.reverse is not None and not entrance.decoupled:\n assumed_return = entrance.reverse.assume_reachable()\n world = entrance.world\n if not (len(world.settings.mix_entrance_pools) > 1 and (world.settings.shuffle_overworld_entrances or world.shuffle_special_interior_entrances)):\n if (entrance.type in ('Dungeon', 'Grotto', 'Grave') and entrance.reverse.name != 'Spirit Temple Lobby -> Desert Colossus From Spirit Lobby') or \\\n (entrance.type == 'Interior' and world.shuffle_special_interior_entrances):\n # In most cases, Dungeon, Grotto/Grave and Simple Interior exits shouldn't be assumed able to give access to their parent region\n assumed_return.set_rule(lambda state, **kwargs: False)\n assumed_forward.bind_two_way(assumed_return)\n assumed_pool.append(assumed_forward)\n return assumed_pool\n\n\ndef build_one_way_targets(world: World, types_to_include: Iterable[str], exclude: Container[str] = (), target_region_names: Container[str] = ()) -> list[Entrance]:\n one_way_entrances: list[Entrance] = []\n for pool_type in types_to_include:\n one_way_entrances += world.get_shufflable_entrances(type=pool_type)\n valid_one_way_entrances = list(filter(lambda entrance: entrance.name not in exclude, one_way_entrances))\n if target_region_names:\n return [entrance.get_new_target() for entrance in valid_one_way_entrances\n if entrance.connected_region.name in target_region_names]\n return [entrance.get_new_target() for entrance in valid_one_way_entrances]\n\n\n# Abbreviations\n# DMC Death Mountain Crater\n# DMT Death Mountain Trail\n# GC Goron City\n# GF Gerudo Fortress\n# GS Gold Skulltula\n# GV Gerudo Valley\n# HC Hyrule Castle\n# HF Hyrule Field\n# KF Kokiri Forest\n# LH Lake Hylia\n# LLR Lon Lon Ranch\n# LW Lost Woods\n# OGC Outside Ganon's Castle\n# SFM Sacred Forest Meadow\n# ToT Temple of Time\n# ZD Zora's Domain\n# ZF Zora's Fountain\n# ZR Zora's River\n\nentrance_shuffle_table = [\n ('Dungeon', ('KF Outside Deku Tree -> Deku Tree Lobby', { 'index': 0x0000 }),\n ('Deku Tree Lobby -> KF Outside Deku Tree', { 'index': 0x0209 })),\n ('Dungeon', ('Death Mountain -> Dodongos Cavern Beginning', { 'index': 0x0004 }),\n ('Dodongos Cavern Beginning -> Death Mountain', { 'index': 0x0242 })),\n ('Dungeon', ('Zoras Fountain -> Jabu Jabus Belly Beginning', { 'index': 0x0028 }),\n ('Jabu Jabus Belly Beginning -> Zoras Fountain', { 'index': 0x0221 })),\n ('Dungeon', ('SFM Forest Temple Entrance Ledge -> Forest Temple Lobby', { 'index': 0x0169 }),\n ('Forest Temple Lobby -> SFM Forest Temple Entrance Ledge', { 'index': 0x0215 })),\n ('Dungeon', ('DMC Fire Temple Entrance -> Fire Temple Lower', { 'index': 0x0165 }),\n ('Fire Temple Lower -> DMC Fire Temple Entrance', { 'index': 0x024A })),\n ('Dungeon', ('Lake Hylia -> Water Temple Lobby', { 'index': 0x0010 }),\n ('Water Temple Lobby -> Lake Hylia', { 'index': 0x021D })),\n ('Dungeon', ('Graveyard Warp Pad Region -> Shadow Temple Entryway', { 'index': 0x0037 }),\n ('Shadow Temple Entryway -> Graveyard Warp Pad Region', { 'index': 0x0205 })),\n ('Dungeon', ('Desert Colossus -> Spirit Temple Lobby', { 'index': 0x0082 }),\n ('Spirit Temple Lobby -> Desert Colossus From Spirit Lobby', { 'index': 0x01E1 })),\n ('Dungeon', ('Kakariko Village -> Bottom of the Well', { 'index': 0x0098 }),\n ('Bottom of the Well -> Kakariko Village', { 'index': 0x02A6 })),\n ('Dungeon', ('ZF Ice Ledge -> Ice Cavern Beginning', { 'index': 0x0088 }),\n ('Ice Cavern Beginning -> ZF Ice Ledge', { 'index': 0x03D4 })),\n ('Dungeon', ('Gerudo Fortress -> Gerudo Training Ground Lobby', { 'index': 0x0008 }),\n ('Gerudo Training Ground Lobby -> Gerudo Fortress', { 'index': 0x03A8 })),\n\n ('DungeonSpecial', ('Ganons Castle Ledge -> Ganons Castle Lobby', { 'index': 0x0467 }),\n ('Ganons Castle Lobby -> Castle Grounds From Ganons Castle', { 'index': 0x023D })),\n\n ('ChildBoss', ('Deku Tree Before Boss -> Queen Gohma Boss Room', { 'index': 0x040f, 'savewarp_addresses': [ 0xB06292, 0xBC6162, 0xBC60AE ] }),\n ('Queen Gohma Boss Room -> Deku Tree Before Boss', { 'index': 0x0252 })),\n ('ChildBoss', ('Dodongos Cavern Before Boss -> King Dodongo Boss Room', { 'index': 0x040b, 'savewarp_addresses': [ 0xB062B6, 0xBC616E ] }),\n ('King Dodongo Boss Room -> Dodongos Cavern Mouth', { 'index': 0x00c5 })),\n ('ChildBoss', ('Jabu Jabus Belly Before Boss -> Barinade Boss Room', { 'index': 0x0301, 'savewarp_addresses': [ 0xB062C2, 0xBC60C2 ] }),\n ('Barinade Boss Room -> Jabu Jabus Belly Before Boss', { 'index': 0x0407 })),\n ('AdultBoss', ('Forest Temple Before Boss -> Phantom Ganon Boss Room', { 'index': 0x000c, 'savewarp_addresses': [ 0xB062CE, 0xBC6182 ] }),\n ('Phantom Ganon Boss Room -> Forest Temple Before Boss', { 'index': 0x024E })),\n ('AdultBoss', ('Fire Temple Before Boss -> Volvagia Boss Room', { 'index': 0x0305, 'savewarp_addresses': [ 0xB062DA, 0xBC60CE ] }),\n ('Volvagia Boss Room -> Fire Temple Before Boss', { 'index': 0x0175 })),\n ('AdultBoss', ('Water Temple Before Boss -> Morpha Boss Room', { 'index': 0x0417, 'savewarp_addresses': [ 0xB062E6, 0xBC6196 ] }),\n ('Morpha Boss Room -> Water Temple Before Boss', { 'index': 0x0423 })),\n ('AdultBoss', ('Shadow Temple Before Boss -> Bongo Bongo Boss Room', { 'index': 0x0413, 'savewarp_addresses': [ 0xB062FE, 0xBC61AA ] }),\n ('Bongo Bongo Boss Room -> Shadow Temple Before Boss', { 'index': 0x02B2 })),\n ('AdultBoss', ('Spirit Temple Before Boss -> Twinrova Boss Room', { 'index': 0x008D, 'savewarp_addresses': [ 0xB062F2, 0xBC6122 ] }),\n ('Twinrova Boss Room -> Spirit Temple Before Boss', { 'index': 0x02F5 })),\n\n ('Interior', ('Kokiri Forest -> KF Midos House', { 'index': 0x0433 }),\n ('KF Midos House -> Kokiri Forest', { 'index': 0x0443 })),\n ('Interior', ('Kokiri Forest -> KF Sarias House', { 'index': 0x0437 }),\n ('KF Sarias House -> Kokiri Forest', { 'index': 0x0447 })),\n ('Interior', ('Kokiri Forest -> KF House of Twins', { 'index': 0x009C }),\n ('KF House of Twins -> Kokiri Forest', { 'index': 0x033C })),\n ('Interior', ('Kokiri Forest -> KF Know It All House', { 'index': 0x00C9 }),\n ('KF Know It All House -> Kokiri Forest', { 'index': 0x026A })),\n ('Interior', ('Kokiri Forest -> KF Kokiri Shop', { 'index': 0x00C1 }),\n ('KF Kokiri Shop -> Kokiri Forest', { 'index': 0x0266 })),\n ('Interior', ('Lake Hylia -> LH Lab', { 'index': 0x0043 }),\n ('LH Lab -> Lake Hylia', { 'index': 0x03CC })),\n ('Interior', ('LH Fishing Island -> LH Fishing Hole', { 'index': 0x045F }),\n ('LH Fishing Hole -> LH Fishing Island', { 'index': 0x0309 })),\n ('Interior', ('GV Fortress Side -> GV Carpenter Tent', { 'index': 0x03A0 }),\n ('GV Carpenter Tent -> GV Fortress Side', { 'index': 0x03D0 })),\n ('Interior', ('Market Entrance -> Market Guard House', { 'index': 0x007E }),\n ('Market Guard House -> Market Entrance', { 'index': 0x026E })),\n ('Interior', ('Market -> Market Mask Shop', { 'index': 0x0530 }),\n ('Market Mask Shop -> Market', { 'index': 0x01D1, 'addresses': [0xC6DA5E] })),\n ('Interior', ('Market -> Market Bombchu Bowling', { 'index': 0x0507 }),\n ('Market Bombchu Bowling -> Market', { 'index': 0x03BC })),\n ('Interior', ('Market -> Market Potion Shop', { 'index': 0x0388 }),\n ('Market Potion Shop -> Market', { 'index': 0x02A2 })),\n ('Interior', ('Market -> Market Treasure Chest Game', { 'index': 0x0063 }),\n ('Market Treasure Chest Game -> Market', { 'index': 0x01D5 })),\n ('Interior', ('Market Back Alley -> Market Bombchu Shop', { 'index': 0x0528 }),\n ('Market Bombchu Shop -> Market Back Alley', { 'index': 0x03C0 })),\n ('Interior', ('Market Back Alley -> Market Man in Green House', { 'index': 0x043B }),\n ('Market Man in Green House -> Market Back Alley', { 'index': 0x0067 })),\n ('Interior', ('Kakariko Village -> Kak Carpenter Boss House', { 'index': 0x02FD }),\n ('Kak Carpenter Boss House -> Kakariko Village', { 'index': 0x0349 })),\n ('Interior', ('Kakariko Village -> Kak House of Skulltula', { 'index': 0x0550 }),\n ('Kak House of Skulltula -> Kakariko Village', { 'index': 0x04EE })),\n ('Interior', ('Kakariko Village -> Kak Impas House', { 'index': 0x039C }),\n ('Kak Impas House -> Kakariko Village', { 'index': 0x0345 })),\n ('Interior', ('Kak Impas Ledge -> Kak Impas House Back', { 'index': 0x05C8 }),\n ('Kak Impas House Back -> Kak Impas Ledge', { 'index': 0x05DC })),\n ('Interior', ('Kak Backyard -> Kak Odd Medicine Building', { 'index': 0x0072 }),\n ('Kak Odd Medicine Building -> Kak Backyard', { 'index': 0x034D })),\n ('Interior', ('Graveyard -> Graveyard Dampes House', { 'index': 0x030D }),\n ('Graveyard Dampes House -> Graveyard', { 'index': 0x0355 })),\n ('Interior', ('Goron City -> GC Shop', { 'index': 0x037C }),\n ('GC Shop -> Goron City', { 'index': 0x03FC })),\n ('Interior', ('Zoras Domain -> ZD Shop', { 'index': 0x0380 }),\n ('ZD Shop -> Zoras Domain', { 'index': 0x03C4 })),\n ('Interior', ('Lon Lon Ranch -> LLR Talons House', { 'index': 0x004F }),\n ('LLR Talons House -> Lon Lon Ranch', { 'index': 0x0378 })),\n ('Interior', ('Lon Lon Ranch -> LLR Stables', { 'index': 0x02F9 }),\n ('LLR Stables -> Lon Lon Ranch', { 'index': 0x042F })),\n ('Interior', ('Lon Lon Ranch -> LLR Tower', { 'index': 0x05D0 }),\n ('LLR Tower -> Lon Lon Ranch', { 'index': 0x05D4 })),\n ('Interior', ('Market -> Market Bazaar', { 'index': 0x052C }),\n ('Market Bazaar -> Market', { 'index': 0x03B8, 'addresses': [0xBEFD74] })),\n ('Interior', ('Market -> Market Shooting Gallery', { 'index': 0x016D }),\n ('Market Shooting Gallery -> Market', { 'index': 0x01CD, 'addresses': [0xBEFD7C] })),\n ('Interior', ('Kakariko Village -> Kak Bazaar', { 'index': 0x00B7 }),\n ('Kak Bazaar -> Kakariko Village', { 'index': 0x0201, 'addresses': [0xBEFD72] })),\n ('Interior', ('Kakariko Village -> Kak Shooting Gallery', { 'index': 0x003B }),\n ('Kak Shooting Gallery -> Kakariko Village', { 'index': 0x0463, 'addresses': [0xBEFD7A] })),\n ('Interior', ('Desert Colossus -> Colossus Great Fairy Fountain', { 'index': 0x0588 }),\n ('Colossus Great Fairy Fountain -> Desert Colossus', { 'index': 0x057C, 'addresses': [0xBEFD82] })),\n ('Interior', ('Hyrule Castle Grounds -> HC Great Fairy Fountain', { 'index': 0x0578 }),\n ('HC Great Fairy Fountain -> Castle Grounds', { 'index': 0x0340, 'addresses': [0xBEFD80] })),\n ('Interior', ('Ganons Castle Grounds -> OGC Great Fairy Fountain', { 'index': 0x04C2 }),\n ('OGC Great Fairy Fountain -> Castle Grounds', { 'index': 0x0340, 'addresses': [0xBEFD6C] })),\n ('Interior', ('DMC Lower Nearby -> DMC Great Fairy Fountain', { 'index': 0x04BE }),\n ('DMC Great Fairy Fountain -> DMC Lower Local', { 'index': 0x0482, 'addresses': [0xBEFD6A] })),\n ('Interior', ('Death Mountain Summit -> DMT Great Fairy Fountain', { 'index': 0x0315 }),\n ('DMT Great Fairy Fountain -> Death Mountain Summit', { 'index': 0x045B, 'addresses': [0xBEFD68] })),\n ('Interior', ('Zoras Fountain -> ZF Great Fairy Fountain', { 'index': 0x0371 }),\n ('ZF Great Fairy Fountain -> Zoras Fountain', { 'index': 0x0394, 'addresses': [0xBEFD7E] })),\n\n ('SpecialInterior', ('Kokiri Forest -> KF Links House', { 'index': 0x0272 }),\n ('KF Links House -> Kokiri Forest', { 'index': 0x0211 })),\n ('SpecialInterior', ('ToT Entrance -> Temple of Time', { 'index': 0x0053 }),\n ('Temple of Time -> ToT Entrance', { 'index': 0x0472 })),\n ('SpecialInterior', ('Kakariko Village -> Kak Windmill', { 'index': 0x0453 }),\n ('Kak Windmill -> Kakariko Village', { 'index': 0x0351 })),\n ('SpecialInterior', ('Kakariko Village -> Kak Potion Shop Front', { 'index': 0x0384 }),\n ('Kak Potion Shop Front -> Kakariko Village', { 'index': 0x044B })),\n ('SpecialInterior', ('Kak Backyard -> Kak Potion Shop Back', { 'index': 0x03EC }),\n ('Kak Potion Shop Back -> Kak Backyard', { 'index': 0x04FF })),\n\n ('Hideout', ('Gerudo Fortress -> Hideout 1 Torch Jail', { 'index': 0x0486 }),\n ('Hideout 1 Torch Jail -> Gerudo Fortress', { 'index': 0x0231 })),\n ('Hideout', ('GF Entrances Behind Crates -> Hideout 1 Torch Jail', { 'index': 0x048A }),\n ('Hideout 1 Torch Jail -> GF Entrances Behind Crates', { 'index': 0x0235 })),\n ('Hideout', ('GF Entrances Behind Crates -> Hideout Kitchen Hallway', { 'index': 0x048E }),\n ('Hideout Kitchen Hallway -> GF Entrances Behind Crates', { 'index': 0x0239 })),\n ('Hideout', ('Gerudo Fortress -> Hideout Kitchen Hallway', { 'index': 0x0492 }),\n ('Hideout Kitchen Hallway -> Gerudo Fortress', { 'index': 0x02AA })),\n ('Hideout', ('Gerudo Fortress -> Hideout 4 Torches Jail', { 'index': 0x0496 }),\n ('Hideout 4 Torches Jail -> Gerudo Fortress', { 'index': 0x02BA })),\n ('Hideout', ('GF Roof Entrance Cluster -> Hideout 4 Torches Jail', { 'index': 0x049A }),\n ('Hideout 4 Torches Jail -> GF Roof Entrance Cluster', { 'index': 0x02BE })),\n ('Hideout', ('Gerudo Fortress -> Hideout 2 Torches Jail', { 'index': 0x049E }),\n ('Hideout 2 Torches Jail -> Gerudo Fortress', { 'index': 0x02C2 })),\n ('Hideout', ('GF Roof Entrance Cluster -> Hideout 2 Torches Jail', { 'index': 0x04A2 }),\n ('Hideout 2 Torches Jail -> GF Roof Entrance Cluster', { 'index': 0x02C6 })),\n ('Hideout', ('GF Roof Entrance Cluster -> Hideout Kitchen Front', { 'index': 0x04A6 }),\n ('Hideout Kitchen Front -> GF Roof Entrance Cluster', { 'index': 0x02D2 })),\n ('Hideout', ('GF Kitchen Roof Access -> Hideout Kitchen Rear', { 'index': 0x04AA }),\n ('Hideout Kitchen Rear -> GF Kitchen Roof Access', { 'index': 0x02D6 })),\n ('Hideout', ('GF Break Room Entrance -> Hideout Break Room', { 'index': 0x04AE }),\n ('Hideout Break Room -> GF Break Room Entrance', { 'index': 0x02DA })),\n ('Hideout', ('GF Balcony -> Hideout Hall to Balcony', { 'index': 0x04B2 }),\n ('Hideout Hall to Balcony -> GF Balcony', { 'index': 0x02DE })),\n ('Hideout', ('GF 3 Torches Jail Exterior -> Hideout 3 Torches Jail', { 'index': 0x0570 }),\n ('Hideout 3 Torches Jail -> GF 3 Torches Jail Exterior', { 'index': 0x03A4 })),\n\n ('Grotto', ('Desert Colossus -> Colossus Grotto', { 'grotto_id': 0x00, 'entrance': 0x05BC, 'content': 0xFD, 'scene': 0x5C }),\n ('Colossus Grotto -> Desert Colossus', { 'grotto_id': 0x00, 'entrance': 0x0123, 'room': 0x00, 'angle': 0xA71C, 'pos': (0x427A0800, 0xC2000000, 0xC4A20666), 'savewarp_fallback': 0x01F1 })),\n ('Grotto', ('Lake Hylia -> LH Grotto', { 'grotto_id': 0x01, 'entrance': 0x05A4, 'content': 0xEF, 'scene': 0x57 }),\n ('LH Grotto -> Lake Hylia', { 'grotto_id': 0x01, 'entrance': 0x0102, 'room': 0x00, 'angle': 0x0000, 'pos': (0xC53DF56A, 0xC4812000, 0x45BE05F2), 'savewarp_fallback': 0x0604 })),\n ('Grotto', ('Zora River -> ZR Storms Grotto', { 'grotto_id': 0x02, 'entrance': 0x05BC, 'content': 0xEB, 'scene': 0x54 }),\n ('ZR Storms Grotto -> Zora River', { 'grotto_id': 0x02, 'entrance': 0x00EA, 'room': 0x00, 'angle': 0x0000, 'pos': (0xC4CBC1B4, 0x42C80000, 0xC3041ABE), 'savewarp_fallback': 0x0199 })),\n ('Grotto', ('Zora River -> ZR Fairy Grotto', { 'grotto_id': 0x03, 'entrance': 0x036D, 'content': 0xE6, 'scene': 0x54 }),\n ('ZR Fairy Grotto -> Zora River', { 'grotto_id': 0x03, 'entrance': 0x00EA, 'room': 0x00, 'angle': 0xE000, 'pos': (0x4427A070, 0x440E8000, 0xC3B4ED3B), 'savewarp_fallback': 0x0199 })),\n ('Grotto', ('Zora River -> ZR Open Grotto', { 'grotto_id': 0x04, 'entrance': 0x003F, 'content': 0x29, 'scene': 0x54 }),\n ('ZR Open Grotto -> Zora River', { 'grotto_id': 0x04, 'entrance': 0x00EA, 'room': 0x00, 'angle': 0x8000, 'pos': (0x43B52520, 0x440E8000, 0x4309A14F), 'savewarp_fallback': 0x0199 })),\n ('Grotto', ('DMC Lower Nearby -> DMC Hammer Grotto', { 'grotto_id': 0x05, 'entrance': 0x05A4, 'content': 0xF9, 'scene': 0x61 }),\n ('DMC Hammer Grotto -> DMC Lower Local', { 'grotto_id': 0x05, 'entrance': 0x0246, 'room': 0x01, 'angle': 0x31C7, 'pos': (0xC4D290C0, 0x44348000, 0xC3ED5557), 'savewarp_fallback': 0x0246 })),\n ('Grotto', ('DMC Upper Nearby -> DMC Upper Grotto', { 'grotto_id': 0x06, 'entrance': 0x003F, 'content': 0x7A, 'scene': 0x61 }),\n ('DMC Upper Grotto -> DMC Upper Local', { 'grotto_id': 0x06, 'entrance': 0x0147, 'room': 0x01, 'angle': 0x238E, 'pos': (0x420F3401, 0x449E2000, 0x44DCD549), 'savewarp_fallback': 0x0147 })),\n ('Grotto', ('GC Grotto Platform -> GC Grotto', { 'grotto_id': 0x07, 'entrance': 0x05A4, 'content': 0xFB, 'scene': 0x62 }),\n ('GC Grotto -> GC Grotto Platform', { 'grotto_id': 0x07, 'entrance': 0x014D, 'room': 0x03, 'angle': 0x0000, 'pos': (0x448A1754, 0x44110000, 0xC493CCFD), 'savewarp_fallback': 0x014D })), #TODO (out-of-logic access to Goron City)\n ('Grotto', ('Death Mountain -> DMT Storms Grotto', { 'grotto_id': 0x08, 'entrance': 0x003F, 'content': 0x57, 'scene': 0x60 }),\n ('DMT Storms Grotto -> Death Mountain', { 'grotto_id': 0x08, 'entrance': 0x01B9, 'room': 0x00, 'angle': 0x8000, 'pos': (0xC3C1CAC1, 0x44AD4000, 0xC497A1BA), 'savewarp_fallback': 0x01B9 })),\n ('Grotto', ('Death Mountain Summit -> DMT Cow Grotto', { 'grotto_id': 0x09, 'entrance': 0x05FC, 'content': 0xF8, 'scene': 0x60 }),\n ('DMT Cow Grotto -> Death Mountain Summit', { 'grotto_id': 0x09, 'entrance': 0x01B9, 'room': 0x00, 'angle': 0x8000, 'pos': (0xC42CC164, 0x44F34000, 0xC38CFC0C), 'savewarp_fallback': 0x045B })),\n ('Grotto', ('Kak Backyard -> Kak Open Grotto', { 'grotto_id': 0x0A, 'entrance': 0x003F, 'content': 0x28, 'scene': 0x52 }),\n ('Kak Open Grotto -> Kak Backyard', { 'grotto_id': 0x0A, 'entrance': 0x00DB, 'room': 0x00, 'angle': 0x0000, 'pos': (0x4455CF3B, 0x42A00000, 0xC37D1871), 'savewarp_fallback': 0x04FF })),\n ('Grotto', ('Kakariko Village -> Kak Redead Grotto', { 'grotto_id': 0x0B, 'entrance': 0x05A0, 'content': 0xE7, 'scene': 0x52 }),\n ('Kak Redead Grotto -> Kakariko Village', { 'grotto_id': 0x0B, 'entrance': 0x00DB, 'room': 0x00, 'angle': 0x0000, 'pos': (0xC3C8EFCE, 0x00000000, 0x43C96551), 'savewarp_fallback': 0x0349 })),\n ('Grotto', ('Hyrule Castle Grounds -> HC Storms Grotto', { 'grotto_id': 0x0C, 'entrance': 0x05B8, 'content': 0xF6, 'scene': 0x5F }),\n ('HC Storms Grotto -> Castle Grounds', { 'grotto_id': 0x0C, 'entrance': 0x0138, 'room': 0x00, 'angle': 0x9555, 'pos': (0x447C4104, 0x44C46000, 0x4455E211), 'savewarp_fallback': 0x0340 })),\n ('Grotto', ('Hyrule Field -> HF Tektite Grotto', { 'grotto_id': 0x0D, 'entrance': 0x05C0, 'content': 0xE1, 'scene': 0x51 }),\n ('HF Tektite Grotto -> Hyrule Field', { 'grotto_id': 0x0D, 'entrance': 0x01F9, 'room': 0x00, 'angle': 0x1555, 'pos': (0xC59AACA0, 0xC3960000, 0x45315966), 'savewarp_fallback': 0x01F9 })),\n ('Grotto', ('Hyrule Field -> HF Near Kak Grotto', { 'grotto_id': 0x0E, 'entrance': 0x0598, 'content': 0xE5, 'scene': 0x51 }),\n ('HF Near Kak Grotto -> Hyrule Field', { 'grotto_id': 0x0E, 'entrance': 0x01F9, 'room': 0x00, 'angle': 0xC000, 'pos': (0x4500299B, 0x41A00000, 0xC32065BD), 'savewarp_fallback': 0x017D })),\n ('Grotto', ('Hyrule Field -> HF Fairy Grotto', { 'grotto_id': 0x0F, 'entrance': 0x036D, 'content': 0xFF, 'scene': 0x51 }),\n ('HF Fairy Grotto -> Hyrule Field', { 'grotto_id': 0x0F, 'entrance': 0x01F9, 'room': 0x00, 'angle': 0x0000, 'pos': (0xC58B2544, 0xC3960000, 0xC3D5186B), 'savewarp_fallback': 0x027E })),\n ('Grotto', ('Hyrule Field -> HF Near Market Grotto', { 'grotto_id': 0x10, 'entrance': 0x003F, 'content': 0x00, 'scene': 0x51 }),\n ('HF Near Market Grotto -> Hyrule Field', { 'grotto_id': 0x10, 'entrance': 0x01F9, 'room': 0x00, 'angle': 0xE000, 'pos': (0xC4B2B1F3, 0x00000000, 0x444C719D), 'savewarp_fallback': 0x027E })),\n ('Grotto', ('Hyrule Field -> HF Cow Grotto', { 'grotto_id': 0x11, 'entrance': 0x05A8, 'content': 0xE4, 'scene': 0x51 }),\n ('HF Cow Grotto -> Hyrule Field', { 'grotto_id': 0x11, 'entrance': 0x01F9, 'room': 0x00, 'angle': 0x0000, 'pos': (0xC5F61086, 0xC3960000, 0x45D84A7E), 'savewarp_fallback': 0x018D })),\n ('Grotto', ('Hyrule Field -> HF Inside Fence Grotto', { 'grotto_id': 0x12, 'entrance': 0x059C, 'content': 0xE6, 'scene': 0x51 }),\n ('HF Inside Fence Grotto -> Hyrule Field', { 'grotto_id': 0x12, 'entrance': 0x01F9, 'room': 0x00, 'angle': 0xEAAB, 'pos': (0xC59BE902, 0xC42F0000, 0x4657F479), 'savewarp_fallback': 0x0189 })),\n ('Grotto', ('Hyrule Field -> HF Open Grotto', { 'grotto_id': 0x13, 'entrance': 0x003F, 'content': 0x03, 'scene': 0x51 }),\n ('HF Open Grotto -> Hyrule Field', { 'grotto_id': 0x13, 'entrance': 0x01F9, 'room': 0x00, 'angle': 0x8000, 'pos': (0xC57B69B1, 0xC42F0000, 0x46588DF2), 'savewarp_fallback': 0x0189 })),\n ('Grotto', ('Hyrule Field -> HF Southeast Grotto', { 'grotto_id': 0x14, 'entrance': 0x003F, 'content': 0x22, 'scene': 0x51 }),\n ('HF Southeast Grotto -> Hyrule Field', { 'grotto_id': 0x14, 'entrance': 0x01F9, 'room': 0x00, 'angle': 0x9555, 'pos': (0xC384A807, 0xC3FA0000, 0x4640DCC8), 'savewarp_fallback': 0x0189 })),\n ('Grotto', ('Lon Lon Ranch -> LLR Grotto', { 'grotto_id': 0x15, 'entrance': 0x05A4, 'content': 0xFC, 'scene': 0x63 }),\n ('LLR Grotto -> Lon Lon Ranch', { 'grotto_id': 0x15, 'entrance': 0x0157, 'room': 0x00, 'angle': 0xAAAB, 'pos': (0x44E0FD92, 0x00000000, 0x44BB9A4C), 'savewarp_fallback': 0x05D4 })),\n ('Grotto', ('SFM Entryway -> SFM Wolfos Grotto', { 'grotto_id': 0x16, 'entrance': 0x05B4, 'content': 0xED, 'scene': 0x56 }),\n ('SFM Wolfos Grotto -> SFM Entryway', { 'grotto_id': 0x16, 'entrance': 0x00FC, 'room': 0x00, 'angle': 0x8000, 'pos': (0xC33DDC64, 0x00000000, 0x44ED42CE), 'savewarp_fallback': 0x00FC })),\n ('Grotto', ('Sacred Forest Meadow -> SFM Storms Grotto', { 'grotto_id': 0x17, 'entrance': 0x05BC, 'content': 0xEE, 'scene': 0x56 }),\n ('SFM Storms Grotto -> Sacred Forest Meadow', { 'grotto_id': 0x17, 'entrance': 0x00FC, 'room': 0x00, 'angle': 0xAAAB, 'pos': (0x439D6D22, 0x43F00000, 0xC50FC63A), 'savewarp_fallback': 0x0600 })),\n ('Grotto', ('Sacred Forest Meadow -> SFM Fairy Grotto', { 'grotto_id': 0x18, 'entrance': 0x036D, 'content': 0xFF, 'scene': 0x56 }),\n ('SFM Fairy Grotto -> Sacred Forest Meadow', { 'grotto_id': 0x18, 'entrance': 0x00FC, 'room': 0x00, 'angle': 0x0000, 'pos': (0x425C22D1, 0x00000000, 0x434E9835), 'savewarp_fallback': 0x0600 })),\n ('Grotto', ('LW Beyond Mido -> LW Scrubs Grotto', { 'grotto_id': 0x19, 'entrance': 0x05B0, 'content': 0xF5, 'scene': 0x5B }),\n ('LW Scrubs Grotto -> LW Beyond Mido', { 'grotto_id': 0x19, 'entrance': 0x01A9, 'room': 0x08, 'angle': 0x2000, 'pos': (0x44293FA2, 0x00000000, 0xC51DE32B), 'savewarp_fallback': 0x01A9 })),\n ('Grotto', ('Lost Woods -> LW Near Shortcuts Grotto', { 'grotto_id': 0x1A, 'entrance': 0x003F, 'content': 0x14, 'scene': 0x5B }),\n ('LW Near Shortcuts Grotto -> Lost Woods', { 'grotto_id': 0x1A, 'entrance': 0x011E, 'room': 0x02, 'angle': 0xE000, 'pos': (0x4464B055, 0x00000000, 0xC464DB7D), 'savewarp_fallback': 0x04D6 })),\n ('Grotto', ('Kokiri Forest -> KF Storms Grotto', { 'grotto_id': 0x1B, 'entrance': 0x003F, 'content': 0x2C, 'scene': 0x55 }),\n ('KF Storms Grotto -> Kokiri Forest', { 'grotto_id': 0x1B, 'entrance': 0x0286, 'room': 0x00, 'angle': 0x4000, 'pos': (0xC3FD8856, 0x43BE0000, 0xC4988DA8), 'savewarp_fallback': 0x0286 })),\n ('Grotto', ('Zoras Domain -> ZD Storms Grotto', { 'grotto_id': 0x1C, 'entrance': 0x036D, 'content': 0xFF, 'scene': 0x58 }),\n ('ZD Storms Grotto -> Zoras Domain', { 'grotto_id': 0x1C, 'entrance': 0x0108, 'room': 0x01, 'angle': 0xD555, 'pos': (0xC455EB8D, 0x41600000, 0xC3ED3602), 'savewarp_fallback': 0x0108 })),\n ('Grotto', ('GF Entrances Behind Crates -> GF Storms Grotto', { 'grotto_id': 0x1D, 'entrance': 0x036D, 'content': 0xFF, 'scene': 0x5D }),\n ('GF Storms Grotto -> GF Entrances Behind Crates', { 'grotto_id': 0x1D, 'entrance': 0x0129, 'room': 0x00, 'angle': 0x4000, 'pos': (0x43BE42C0, 0x43A68000, 0xC4C317B1), 'savewarp_fallback': 0x0235 })),\n ('Grotto', ('GV Fortress Side -> GV Storms Grotto', { 'grotto_id': 0x1E, 'entrance': 0x05BC, 'content': 0xF0, 'scene': 0x5A }),\n ('GV Storms Grotto -> GV Fortress Side', { 'grotto_id': 0x1E, 'entrance': 0x022D, 'room': 0x00, 'angle': 0x9555, 'pos': (0xC4A5CAD2, 0x41700000, 0xC475FF9B), 'savewarp_fallback': 0x022D })),\n ('Grotto', ('GV Grotto Ledge -> GV Octorok Grotto', { 'grotto_id': 0x1F, 'entrance': 0x05AC, 'content': 0xF2, 'scene': 0x5A }),\n ('GV Octorok Grotto -> GV Grotto Ledge', { 'grotto_id': 0x1F, 'entrance': 0x0117, 'room': 0x00, 'angle': 0x8000, 'pos': (0x4391C1A4, 0xC40AC000, 0x44B8CC9B), 'savewarp_fallback': 0x0117 })), #TODO (out-of-logic access to Gerudo Valley)\n ('Grotto', ('LW Beyond Mido -> Deku Theater', { 'grotto_id': 0x20, 'entrance': 0x05C4, 'content': 0xF3, 'scene': 0x5B }),\n ('Deku Theater -> LW Beyond Mido', { 'grotto_id': 0x20, 'entrance': 0x01A9, 'room': 0x06, 'angle': 0x4000, 'pos': (0x42AA8FDA, 0xC1A00000, 0xC4C82D49), 'savewarp_fallback': 0x01A9 })),\n\n ('Grave', ('Graveyard -> Graveyard Shield Grave', { 'index': 0x004B }),\n ('Graveyard Shield Grave -> Graveyard', { 'index': 0x035D })),\n ('Grave', ('Graveyard -> Graveyard Heart Piece Grave', { 'index': 0x031C }),\n ('Graveyard Heart Piece Grave -> Graveyard', { 'index': 0x0361 })),\n ('Grave', ('Graveyard -> Graveyard Royal Familys Tomb', { 'index': 0x002D }),\n ('Graveyard Royal Familys Tomb -> Graveyard', { 'index': 0x050B })),\n ('Grave', ('Graveyard -> Graveyard Dampes Grave', { 'index': 0x044F }),\n ('Graveyard Dampes Grave -> Graveyard', { 'index': 0x0359 })),\n\n ('Overworld', ('Kokiri Forest -> LW Bridge From Forest', { 'index': 0x05E0 }),\n ('LW Bridge -> Kokiri Forest', { 'index': 0x020D })),\n ('Overworld', ('Kokiri Forest -> Lost Woods', { 'index': 0x011E }),\n ('LW Forest Exit -> Kokiri Forest', { 'index': 0x0286 })),\n ('Overworld', ('Lost Woods -> GC Woods Warp', { 'index': 0x04E2 }),\n ('GC Woods Warp -> Lost Woods', { 'index': 0x04D6 })),\n ('Overworld', ('Lost Woods -> Zora River', { 'index': 0x01DD }),\n ('Zora River -> LW Underwater Entrance', { 'index': 0x04DA })),\n ('Overworld', ('LW Beyond Mido -> SFM Entryway', { 'index': 0x00FC }),\n ('SFM Entryway -> LW Beyond Mido', { 'index': 0x01A9 })),\n ('Overworld', ('LW Bridge -> Hyrule Field', { 'index': 0x0185 }),\n ('Hyrule Field -> LW Bridge', { 'index': 0x04DE })),\n ('Overworld', ('Hyrule Field -> Lake Hylia', { 'index': 0x0102 }),\n ('Lake Hylia -> Hyrule Field', { 'index': 0x0189 })),\n ('Overworld', ('Hyrule Field -> Gerudo Valley', { 'index': 0x0117 }),\n ('Gerudo Valley -> Hyrule Field', { 'index': 0x018D })),\n ('Overworld', ('Hyrule Field -> Market Entrance', { 'index': 0x0276 }),\n ('Market Entrance -> Hyrule Field', { 'index': 0x01FD })),\n ('Overworld', ('Hyrule Field -> Kakariko Village', { 'index': 0x00DB }),\n ('Kakariko Village -> Hyrule Field', { 'index': 0x017D })),\n ('Overworld', ('Hyrule Field -> ZR Front', { 'index': 0x00EA }),\n ('ZR Front -> Hyrule Field', { 'index': 0x0181 })),\n ('Overworld', ('Hyrule Field -> Lon Lon Ranch', { 'index': 0x0157 }),\n ('Lon Lon Ranch -> Hyrule Field', { 'index': 0x01F9 })),\n ('Overworld', ('Lake Hylia -> Zoras Domain', { 'index': 0x0328 }),\n ('Zoras Domain -> Lake Hylia', { 'index': 0x0560 })),\n ('Overworld', ('GV Fortress Side -> Gerudo Fortress', { 'index': 0x0129 }),\n ('Gerudo Fortress -> GV Fortress Side', { 'index': 0x022D })),\n ('Overworld', ('GF Outside Gate -> Wasteland Near Fortress', { 'index': 0x0130 }),\n ('Wasteland Near Fortress -> GF Outside Gate', { 'index': 0x03AC })),\n ('Overworld', ('Wasteland Near Colossus -> Desert Colossus', { 'index': 0x0123 }),\n ('Desert Colossus -> Wasteland Near Colossus', { 'index': 0x0365 })),\n ('Overworld', ('Market Entrance -> Market', { 'index': 0x00B1 }),\n ('Market -> Market Entrance', { 'index': 0x0033 })),\n ('Overworld', ('Market -> Castle Grounds', { 'index': 0x0138 }),\n ('Castle Grounds -> Market', { 'index': 0x025A })),\n ('Overworld', ('Market -> ToT Entrance', { 'index': 0x0171 }),\n ('ToT Entrance -> Market', { 'index': 0x025E })),\n ('Overworld', ('Kakariko Village -> Graveyard', { 'index': 0x00E4 }),\n ('Graveyard -> Kakariko Village', { 'index': 0x0195 })),\n ('Overworld', ('Kak Behind Gate -> Death Mountain', { 'index': 0x013D }),\n ('Death Mountain -> Kak Behind Gate', { 'index': 0x0191 })),\n ('Overworld', ('Death Mountain -> Goron City', { 'index': 0x014D }),\n ('Goron City -> Death Mountain', { 'index': 0x01B9 })),\n ('Overworld', ('GC Darunias Chamber -> DMC Lower Local', { 'index': 0x0246 }),\n ('DMC Lower Nearby -> GC Darunias Chamber', { 'index': 0x01C1 })),\n ('Overworld', ('Death Mountain Summit -> DMC Upper Local', { 'index': 0x0147 }),\n ('DMC Upper Nearby -> Death Mountain Summit', { 'index': 0x01BD })),\n ('Overworld', ('ZR Behind Waterfall -> Zoras Domain', { 'index': 0x0108 }),\n ('Zoras Domain -> ZR Behind Waterfall', { 'index': 0x019D })),\n ('Overworld', ('ZD Behind King Zora -> Zoras Fountain', { 'index': 0x0225 }),\n ('Zoras Fountain -> ZD Behind King Zora', { 'index': 0x01A1 })),\n\n ('OverworldOneWay', ('GV Lower Stream -> Lake Hylia', { 'index': 0x0219 })),\n\n ('OwlDrop', ('LH Owl Flight -> Hyrule Field', { 'index': 0x027E, 'addresses': [0xAC9F26] })),\n ('OwlDrop', ('DMT Owl Flight -> Kak Impas Rooftop', { 'index': 0x0554, 'addresses': [0xAC9EF2] })),\n\n ('Spawn', ('Child Spawn -> KF Links House', { 'index': 0x00BB, 'addresses': [0xB06342] })),\n ('Spawn', ('Adult Spawn -> Temple of Time', { 'index': 0x05F4, 'addresses': [0xB06332] })),\n\n ('WarpSong', ('Minuet of Forest Warp -> Sacred Forest Meadow', { 'index': 0x0600, 'addresses': [0xBF023C] })),\n ('WarpSong', ('Bolero of Fire Warp -> DMC Central Local', { 'index': 0x04F6, 'addresses': [0xBF023E] })),\n ('WarpSong', ('Serenade of Water Warp -> Lake Hylia', { 'index': 0x0604, 'addresses': [0xBF0240] })),\n ('WarpSong', ('Requiem of Spirit Warp -> Desert Colossus', { 'index': 0x01F1, 'addresses': [0xBF0242] })),\n ('WarpSong', ('Nocturne of Shadow Warp -> Graveyard Warp Pad Region', { 'index': 0x0568, 'addresses': [0xBF0244] })),\n ('WarpSong', ('Prelude of Light Warp -> Temple of Time', { 'index': 0x05F4, 'addresses': [0xBF0246] })),\n\n ('BlueWarp', ('Queen Gohma Boss Room -> KF Outside Deku Tree', { 'index': 0x0457, 'addresses': [0xAC93A2, 0xCA3142] })),\n ('BlueWarp', ('King Dodongo Boss Room -> Death Mountain', { 'index': 0x047A, 'addresses': [0xAC9336, 0xCA30CA] })),\n ('BlueWarp', ('Barinade Boss Room -> Zoras Fountain', { 'index': 0x010E, 'addresses': [0xAC936A, 0xCA31B2] })),\n ('BlueWarp', ('Phantom Ganon Boss Room -> Sacred Forest Meadow', { 'index': 0x0608, 'addresses': [0xAC9F96, 0xCA3D66, 0xCA3D5A], 'child_index': 0x0600 })),\n ('BlueWarp', ('Volvagia Boss Room -> DMC Central Local', { 'index': 0x0564, 'addresses': [0xACA516, 0xCA3DF2, 0xCA3DE6], 'child_index': 0x04F6 })),\n ('BlueWarp', ('Morpha Boss Room -> Lake Hylia', { 'index': 0x060C, 'addresses': [0xAC995A, 0xCA3E82, 0xCA3E76], 'child_index': 0x0604 })),\n ('BlueWarp', ('Bongo Bongo Boss Room -> Graveyard Warp Pad Region', { 'index': 0x0580, 'addresses': [0xACA496, 0xCA3FA2, 0xCA3F96], 'child_index': 0x0568 })),\n ('BlueWarp', ('Twinrova Boss Room -> Desert Colossus', { 'index': 0x0610, 'addresses': [0xACA402, 0xCA3F12, 0xCA3F06], 'child_index': 0x01F1 })),\n\n ('Extra', ('ZD Eyeball Frog Timeout -> Zoras Domain', { 'index': 0x0153 })),\n ('Extra', ('ZR Top of Waterfall -> Zora River', { 'index': 0x0199 })),\n]\n\n\n# Basically, the entrances in the list above that go to:\n# - DMC Central Local (child access for the bean and skull)\n# - Desert Colossus (child access to colossus and spirit)\n# - Graveyard Warp Pad Region (access to shadow, plus the gossip stone)\n# We will always need to pick one from each list to receive a one-way entrance\n# if shuffling warp songs (depending on other settings).\n# Table maps: short key -> ([target regions], [allowed types])\npriority_entrance_table = {\n 'Bolero': (['DMC Central Local'], ['OwlDrop', 'WarpSong', 'OverworldOneWay']),\n 'Nocturne': (['Graveyard Warp Pad Region'], ['OwlDrop', 'Spawn', 'WarpSong', 'OverworldOneWay']),\n 'Requiem': (['Desert Colossus', 'Desert Colossus From Spirit Lobby'], ['OwlDrop', 'Spawn', 'WarpSong', 'OverworldOneWay']),\n}\n\n\nclass EntranceShuffleError(ShuffleError):\n pass\n\n\n# Set entrances of all worlds, first initializing them to their default regions, then potentially shuffling part of them\ndef set_entrances(worlds: list[World], savewarps_to_connect: list[tuple[Entrance, str]]) -> None:\n for world in worlds:\n world.initialize_entrances()\n\n for savewarp, replaces in savewarps_to_connect:\n savewarp.replaces = savewarp.world.get_entrance(replaces)\n savewarp.connect(savewarp.replaces.connected_region)\n\n for world in worlds:\n if world.settings.logic_rules != 'glitched':\n # Set entrance data for all entrances, even those we aren't shuffling\n set_all_entrances_data(world)\n\n if worlds[0].entrance_shuffle:\n shuffle_random_entrances(worlds)\n\n set_entrances_based_rules(worlds)\n\n\n# Shuffles entrances that need to be shuffled in all worlds\ndef shuffle_random_entrances(worlds: list[World]) -> None:\n # Store all locations reachable before shuffling to differentiate which locations were already unreachable from those we made unreachable\n complete_itempool = [item for world in worlds for item in world.get_itempool_with_dungeon_items()]\n max_search = Search.max_explore([world.state for world in worlds], complete_itempool)\n\n non_drop_locations = [location for world in worlds for location in world.get_locations() if location.type not in ('Drop', 'Event')]\n max_search.visit_locations(non_drop_locations)\n locations_to_ensure_reachable = list(filter(max_search.visited, non_drop_locations))\n placed_one_way_entrances = None\n\n # Shuffle all entrances within their own worlds\n for world in worlds:\n # Determine entrance pools based on settings, to be shuffled in the order we set them by\n one_way_entrance_pools = OrderedDict()\n entrance_pools = OrderedDict()\n one_way_priorities = {}\n\n if worlds[0].settings.shuffle_gerudo_valley_river_exit:\n one_way_entrance_pools['OverworldOneWay'] = world.get_shufflable_entrances(type='OverworldOneWay')\n\n if worlds[0].settings.owl_drops:\n one_way_entrance_pools['OwlDrop'] = world.get_shufflable_entrances(type='OwlDrop')\n\n if worlds[0].settings.spawn_positions:\n one_way_entrance_pools['Spawn'] = world.get_shufflable_entrances(type='Spawn')\n if 'child' not in worlds[0].settings.spawn_positions:\n one_way_entrance_pools['Spawn'].remove(world.get_entrance('Child Spawn -> KF Links House'))\n elif 'adult' not in worlds[0].settings.spawn_positions:\n one_way_entrance_pools['Spawn'].remove(world.get_entrance('Adult Spawn -> Temple of Time'))\n\n if worlds[0].settings.warp_songs:\n one_way_entrance_pools['WarpSong'] = world.get_shufflable_entrances(type='WarpSong')\n if worlds[0].settings.reachable_locations != 'beatable' and worlds[0].settings.logic_rules == 'glitchless':\n # In glitchless, there aren't any other ways to access these areas\n wincons = [worlds[0].settings.bridge, worlds[0].settings.shuffle_ganon_bosskey]\n if worlds[0].settings.shuffle_ganon_bosskey == 'on_lacs':\n wincons.append(worlds[0].settings.lacs_condition)\n if (\n worlds[0].settings.reachable_locations == 'all'\n or ('tokens' in wincons and worlds[0].settings.tokensanity in ('off', 'dungeons'))\n ):\n one_way_priorities['Bolero'] = priority_entrance_table['Bolero']\n if (\n worlds[0].settings.reachable_locations == 'all'\n or 'dungeons' in wincons\n or ('stones' in wincons and 'medallions' in wincons)\n or ('tokens' in wincons and worlds[0].settings.tokensanity in ('off', 'overworld'))\n ):\n one_way_priorities['Nocturne'] = priority_entrance_table['Nocturne']\n if (\n not worlds[0].shuffle_dungeon_entrances\n and not worlds[0].settings.shuffle_overworld_entrances\n and not worlds[0].shuffle_special_interior_entrances\n and (\n worlds[0].settings.reachable_locations == 'all'\n or 'dungeons' in wincons\n or ('stones' in wincons and 'medallions' in wincons)\n or ('tokens' in wincons and worlds[0].settings.tokensanity != 'all')\n )\n ):\n one_way_priorities['Requiem'] = priority_entrance_table['Requiem']\n\n if worlds[0].settings.shuffle_bosses == 'full':\n entrance_pools['Boss'] = world.get_shufflable_entrances(type='ChildBoss', only_primary=True)\n entrance_pools['Boss'] += world.get_shufflable_entrances(type='AdultBoss', only_primary=True)\n if worlds[0].settings.open_forest == 'closed':\n # Deku is forced vanilla below, so Queen Gohma must be vanilla to ensure she is reachable.\n # This is already enforced by the fill algorithm in most cases, but this covers the odd settings combination where it isn't.\n entrance_pools['Boss'].remove(world.get_entrance('Deku Tree Before Boss -> Queen Gohma Boss Room'))\n elif worlds[0].settings.shuffle_bosses == 'limited':\n entrance_pools['ChildBoss'] = world.get_shufflable_entrances(type='ChildBoss', only_primary=True)\n entrance_pools['AdultBoss'] = world.get_shufflable_entrances(type='AdultBoss', only_primary=True)\n if worlds[0].settings.open_forest == 'closed':\n # Deku is forced vanilla below, so Queen Gohma must be vanilla to ensure she is reachable.\n # This is already enforced by the fill algorithm in most cases, but this covers the odd settings combination where it isn't.\n entrance_pools['ChildBoss'].remove(world.get_entrance('Deku Tree Before Boss -> Queen Gohma Boss Room'))\n\n if worlds[0].shuffle_dungeon_entrances:\n entrance_pools['Dungeon'] = world.get_shufflable_entrances(type='Dungeon', only_primary=True)\n # The fill algorithm will already make sure gohma is reachable, however it can end up putting\n # a forest escape via the hands of spirit on Deku leading to Deku on spirit in logic. This is\n # not really a closed forest anymore, so specifically remove Deku Tree from closed forest.\n if worlds[0].settings.open_forest == 'closed':\n entrance_pools['Dungeon'].remove(world.get_entrance('KF Outside Deku Tree -> Deku Tree Lobby'))\n if worlds[0].shuffle_special_dungeon_entrances:\n entrance_pools['Dungeon'] += world.get_shufflable_entrances(type='DungeonSpecial', only_primary=True)\n if worlds[0].settings.decouple_entrances:\n entrance_pools['DungeonReverse'] = [entrance.reverse for entrance in entrance_pools['Dungeon']]\n\n if worlds[0].shuffle_interior_entrances:\n entrance_pools['Interior'] = world.get_shufflable_entrances(type='Interior', only_primary=True)\n if worlds[0].shuffle_special_interior_entrances:\n entrance_pools['Interior'] += world.get_shufflable_entrances(type='SpecialInterior', only_primary=True)\n if worlds[0].settings.shuffle_hideout_entrances:\n entrance_pools['Interior'] += world.get_shufflable_entrances(type='Hideout', only_primary=True)\n if worlds[0].settings.decouple_entrances:\n entrance_pools['InteriorReverse'] = [entrance.reverse for entrance in entrance_pools['Interior']]\n\n if worlds[0].settings.shuffle_grotto_entrances:\n entrance_pools['GrottoGrave'] = world.get_shufflable_entrances(type='Grotto', only_primary=True)\n entrance_pools['GrottoGrave'] += world.get_shufflable_entrances(type='Grave', only_primary=True)\n if worlds[0].settings.decouple_entrances:\n entrance_pools['GrottoGraveReverse'] = [entrance.reverse for entrance in entrance_pools['GrottoGrave']]\n\n if worlds[0].settings.shuffle_overworld_entrances:\n exclude_overworld_reverse = ('Overworld' in worlds[0].settings.mix_entrance_pools) and not worlds[0].settings.decouple_entrances\n entrance_pools['Overworld'] = world.get_shufflable_entrances(type='Overworld', only_primary=exclude_overworld_reverse)\n\n # Set shuffled entrances as such\n for entrance in list(chain.from_iterable(one_way_entrance_pools.values())) + list(chain.from_iterable(entrance_pools.values())):\n entrance.shuffled = True\n if entrance.reverse:\n entrance.reverse.shuffled = True\n\n # Combine all entrance pools into one when mixing entrance pools\n mixed_entrance_pools = []\n for pool in worlds[0].settings.mix_entrance_pools:\n mixed_entrance_pools.append(pool)\n if pool != 'Overworld' and worlds[0].settings.decouple_entrances:\n mixed_entrance_pools.append(pool + 'Reverse')\n\n if len(mixed_entrance_pools) > 1:\n entrance_pools['Mixed'] = []\n for pool in mixed_entrance_pools:\n entrance_pools['Mixed'] += entrance_pools.pop(pool, [])\n\n # Build target entrance pools and set the assumption for entrances being reachable\n one_way_target_entrance_pools = {}\n for pool_type, entrance_pool in one_way_entrance_pools.items():\n # One way entrances are extra entrances that will be connected to entrance positions from a selection of entrance pools\n if pool_type == 'OverworldOneWay':\n valid_target_types = ('WarpSong', 'BlueWarp', 'OwlDrop', 'OverworldOneWay', 'Overworld', 'Extra')\n one_way_target_entrance_pools[pool_type] = build_one_way_targets(world, valid_target_types, exclude=['Prelude of Light Warp -> Temple of Time'])\n elif pool_type == 'OwlDrop':\n valid_target_types = ('WarpSong', 'BlueWarp', 'OwlDrop', 'OverworldOneWay', 'Overworld', 'Extra')\n one_way_target_entrance_pools[pool_type] = build_one_way_targets(world, valid_target_types, exclude=['Prelude of Light Warp -> Temple of Time'])\n for target in one_way_target_entrance_pools[pool_type]:\n target.set_rule(lambda state, age=None, **kwargs: age == 'child')\n elif pool_type == 'Spawn':\n valid_target_types = ('Spawn', 'WarpSong', 'BlueWarp', 'OwlDrop', 'OverworldOneWay', 'Overworld', 'Interior', 'SpecialInterior', 'Extra')\n # Restrict spawn entrances from linking to regions with no or extremely specific glitchless itemless escapes.\n one_way_target_entrance_pools[pool_type] = build_one_way_targets(world, valid_target_types, exclude=['Volvagia Boss Room -> DMC Central Local', 'Bolero of Fire Warp -> DMC Central Local', 'Queen Gohma Boss Room -> KF Outside Deku Tree'])\n elif pool_type == 'WarpSong':\n valid_target_types = ('Spawn', 'WarpSong', 'BlueWarp', 'OwlDrop', 'OverworldOneWay', 'Overworld', 'Interior', 'SpecialInterior', 'Extra')\n one_way_target_entrance_pools[pool_type] = build_one_way_targets(world, valid_target_types)\n # Ensure that when trying to place the last entrance of a one way pool, we don't assume the rest of the targets are reachable\n for target in one_way_target_entrance_pools[pool_type]:\n target.add_rule((lambda entrances=entrance_pool: (lambda state, **kwargs: any(\n entrance.connected_region is None for entrance in entrances)))())\n # Disconnect all one way entrances at this point (they need to be connected during all of the above process)\n for entrance in chain.from_iterable(one_way_entrance_pools.values()):\n entrance.disconnect()\n\n target_entrance_pools = {}\n for pool_type, entrance_pool in entrance_pools.items():\n target_entrance_pools[pool_type] = assume_entrance_pool(entrance_pool)\n\n # Set entrances defined in the distribution\n world.distribution.set_shuffled_entrances(worlds, {**one_way_entrance_pools, **entrance_pools}, {**one_way_target_entrance_pools, **target_entrance_pools}, locations_to_ensure_reachable, complete_itempool)\n\n # Check placed one way entrances and trim.\n # The placed entrances are already pointing at their new regions.\n placed_entrances = [entrance for entrance in chain.from_iterable(one_way_entrance_pools.values())\n if entrance.replaces is not None]\n replaced_entrances = [entrance.replaces for entrance in placed_entrances]\n # Remove replaced entrances so we don't place two in one target.\n for remaining_target in chain.from_iterable(one_way_target_entrance_pools.values()):\n if remaining_target.replaces and remaining_target.replaces in replaced_entrances:\n delete_target_entrance(remaining_target)\n # Remove priority targets if any placed entrances point at their region(s).\n for key, (regions, _) in priority_entrance_table.items():\n if key in one_way_priorities:\n for entrance in placed_entrances:\n if entrance.connected_region and entrance.connected_region.name in regions:\n del one_way_priorities[key]\n break\n\n # Place priority entrances\n placed_one_way_entrances = shuffle_one_way_priority_entrances(worlds, world, one_way_priorities, one_way_entrance_pools, one_way_target_entrance_pools, locations_to_ensure_reachable, complete_itempool, retry_count=2)\n\n # Delete all targets that we just placed from one way target pools so multiple one way entrances don't use the same target\n replaced_entrances = [entrance.replaces for entrance in chain.from_iterable(one_way_entrance_pools.values())]\n for remaining_target in chain.from_iterable(one_way_target_entrance_pools.values()):\n if remaining_target.replaces in replaced_entrances:\n delete_target_entrance(remaining_target)\n\n # Shuffle all entrances among the pools to shuffle\n for pool_type, entrance_pool in one_way_entrance_pools.items():\n placed_one_way_entrances += shuffle_entrance_pool(world, worlds, entrance_pool, one_way_target_entrance_pools[pool_type], locations_to_ensure_reachable, check_all=True, placed_one_way_entrances=placed_one_way_entrances)\n # Delete all targets that we just placed from other one way target pools so multiple one way entrances don't use the same target\n replaced_entrances = [entrance.replaces for entrance in entrance_pool]\n for remaining_target in chain.from_iterable(one_way_target_entrance_pools.values()):\n if remaining_target.replaces in replaced_entrances:\n delete_target_entrance(remaining_target)\n # Delete all unused extra targets after placing a one way pool, since the unused targets won't ever be replaced\n for unused_target in one_way_target_entrance_pools[pool_type]:\n delete_target_entrance(unused_target)\n\n for pool_type, entrance_pool in entrance_pools.items():\n shuffle_entrance_pool(world, worlds, entrance_pool, target_entrance_pools[pool_type], locations_to_ensure_reachable, placed_one_way_entrances=placed_one_way_entrances)\n\n # Determine blue warp targets\n # if a boss room is inside a boss door, make the blue warp go outside the dungeon's entrance\n boss_exits = {\n 'Queen Gohma Boss Room -> Deku Tree Before Boss': world.get_entrance('Deku Tree Lobby -> KF Outside Deku Tree'),\n 'King Dodongo Boss Room -> Dodongos Cavern Mouth': world.get_entrance('Dodongos Cavern Beginning -> Death Mountain'),\n 'Barinade Boss Room -> Jabu Jabus Belly Before Boss': world.get_entrance('Jabu Jabus Belly Beginning -> Zoras Fountain'),\n 'Phantom Ganon Boss Room -> Forest Temple Before Boss': world.get_entrance('Forest Temple Lobby -> SFM Forest Temple Entrance Ledge'),\n 'Volvagia Boss Room -> Fire Temple Before Boss': world.get_entrance('Fire Temple Lower -> DMC Fire Temple Entrance'),\n 'Morpha Boss Room -> Water Temple Before Boss': world.get_entrance('Water Temple Lobby -> Lake Hylia'),\n 'Bongo Bongo Boss Room -> Shadow Temple Before Boss': world.get_entrance('Shadow Temple Entryway -> Graveyard Warp Pad Region'),\n 'Twinrova Boss Room -> Spirit Temple Before Boss': world.get_entrance('Spirit Temple Lobby -> Desert Colossus From Spirit Lobby'),\n }\n # if a boss room is inside a dungeon entrance (or inside a dungeon which is inside a dungeon entrance), make the blue warp go to that dungeon's blue warp target\n dungeon_exits = {\n 'Deku Tree Lobby -> KF Outside Deku Tree': world.get_entrance('Queen Gohma Boss Room -> KF Outside Deku Tree'),\n 'Dodongos Cavern Beginning -> Death Mountain': world.get_entrance('King Dodongo Boss Room -> Death Mountain'),\n 'Jabu Jabus Belly Beginning -> Zoras Fountain': world.get_entrance('Barinade Boss Room -> Zoras Fountain'),\n 'Forest Temple Lobby -> SFM Forest Temple Entrance Ledge': world.get_entrance('Phantom Ganon Boss Room -> Sacred Forest Meadow'),\n 'Fire Temple Lower -> DMC Fire Temple Entrance': world.get_entrance('Volvagia Boss Room -> DMC Central Local'),\n 'Water Temple Lobby -> Lake Hylia': world.get_entrance('Morpha Boss Room -> Lake Hylia'),\n 'Shadow Temple Entryway -> Graveyard Warp Pad Region': world.get_entrance('Bongo Bongo Boss Room -> Graveyard Warp Pad Region'),\n 'Spirit Temple Lobby -> Desert Colossus From Spirit Lobby': world.get_entrance('Twinrova Boss Room -> Desert Colossus'),\n }\n\n for (blue_warp, boss_door_exit) in (\n (world.get_entrance('Queen Gohma Boss Room -> KF Outside Deku Tree'), world.get_entrance('Queen Gohma Boss Room -> Deku Tree Before Boss')),\n (world.get_entrance('King Dodongo Boss Room -> Death Mountain'), world.get_entrance('King Dodongo Boss Room -> Dodongos Cavern Mouth')),\n (world.get_entrance('Barinade Boss Room -> Zoras Fountain'), world.get_entrance('Barinade Boss Room -> Jabu Jabus Belly Before Boss')),\n (world.get_entrance('Phantom Ganon Boss Room -> Sacred Forest Meadow'), world.get_entrance('Phantom Ganon Boss Room -> Forest Temple Before Boss')),\n (world.get_entrance('Volvagia Boss Room -> DMC Central Local'), world.get_entrance('Volvagia Boss Room -> Fire Temple Before Boss')),\n (world.get_entrance('Morpha Boss Room -> Lake Hylia'), world.get_entrance('Morpha Boss Room -> Water Temple Before Boss')),\n (world.get_entrance('Bongo Bongo Boss Room -> Graveyard Warp Pad Region'), world.get_entrance('Bongo Bongo Boss Room -> Shadow Temple Before Boss')),\n (world.get_entrance('Twinrova Boss Room -> Desert Colossus'), world.get_entrance('Twinrova Boss Room -> Spirit Temple Before Boss')),\n ):\n target = boss_door_exit.replaces or boss_door_exit\n if True: #TODO not world.settings.decouple_entrances\n while target.name in boss_exits:\n target = boss_exits[target.name].replaces or boss_exits[target.name]\n if target.name in dungeon_exits:\n target = dungeon_exits[target.name]\n blue_warp.connect(world.get_region(target.name.split(' -> ')[1]))\n blue_warp.replaces = target\n\n\n # Multiple checks after shuffling entrances to make sure everything went fine\n max_search = Search.max_explore([world.state for world in worlds], complete_itempool)\n\n # Check that all shuffled entrances are properly connected to a region\n for world in worlds:\n for entrance in world.get_shuffled_entrances():\n if entrance.connected_region is None:\n logging.getLogger('').error('%s was shuffled but still isn\\'t connected to any region [World %d]', entrance, world.id)\n if entrance.replaces is None:\n logging.getLogger('').error('%s was shuffled but still doesn\\'t replace any entrance [World %d]', entrance, world.id)\n if len(world.get_region('Root Exits').exits) > 8:\n for exit in world.get_region('Root Exits').exits:\n logging.getLogger('').error('Root Exit: %s, Connected Region: %s', exit, exit.connected_region)\n raise RuntimeError('Something went wrong, Root has too many entrances left after shuffling entrances [World %d]' % world.id)\n\n # Check for game beatability in all worlds\n if not max_search.can_beat_game(False):\n raise EntranceShuffleError('Cannot beat game!')\n\n # Validate the worlds one last time to ensure all special conditions are still valid\n for world in worlds:\n try:\n validate_world(world, worlds, None, locations_to_ensure_reachable, complete_itempool, placed_one_way_entrances=placed_one_way_entrances)\n except EntranceShuffleError as error:\n raise EntranceShuffleError('Worlds are not valid after shuffling entrances, Reason: %s' % error)\n\n\ndef shuffle_one_way_priority_entrances(worlds: list[World], world: World, one_way_priorities: dict[str, tuple[list[str], list[str]]],\n one_way_entrance_pools: dict[str, list[Entrance]], one_way_target_entrance_pools: dict[str, list[Entrance]],\n locations_to_ensure_reachable: Iterable[Location], complete_itempool: list[Item],\n retry_count: int = 2) -> list[tuple[Entrance, Entrance]]:\n while retry_count:\n retry_count -= 1\n rollbacks = []\n\n try:\n for key, (regions, types) in one_way_priorities.items():\n place_one_way_priority_entrance(worlds, world, key, regions, types, rollbacks, locations_to_ensure_reachable, complete_itempool, one_way_entrance_pools, one_way_target_entrance_pools)\n\n # If all entrances could be connected without issues, log connections and continue\n for entrance, target in rollbacks:\n confirm_replacement(entrance, target)\n return rollbacks\n\n except EntranceShuffleError as error:\n for entrance, target in rollbacks:\n restore_connections(entrance, target)\n logging.getLogger('').info('Failed to place all priority one-way entrances for world %d. Will retry %d more times', world.id, retry_count)\n logging.getLogger('').info('\\t%s' % error)\n\n if world.settings.custom_seed:\n raise EntranceShuffleError('Entrance placement attempt count exceeded for world %d. Ensure the \\\"Seed\\\" field is empty and retry a few times.' % world.id)\n if world.settings.distribution_file:\n raise EntranceShuffleError('Entrance placement attempt count exceeded for world %d. Some entrances in the Plandomizer File may have to be changed to create a valid seed. Reach out to Support on Discord for help.' % world.id)\n raise EntranceShuffleError('Entrance placement attempt count exceeded for world %d. Retry a few times or reach out to Support on Discord for help.' % world.id)\n\n\n# Shuffle all entrances within a provided pool\ndef shuffle_entrance_pool(world: World, worlds: list[World], entrance_pool: list[Entrance], target_entrances: list[Entrance],\n locations_to_ensure_reachable: Iterable[Location], check_all: bool = False, retry_count: int = 20,\n placed_one_way_entrances: Optional[list[tuple[Entrance, Entrance]]] = None) -> list[tuple[Entrance, Entrance]]:\n if placed_one_way_entrances is None:\n placed_one_way_entrances = []\n # Split entrances between those that have requirements (restrictive) and those that do not (soft). These are primarily age or time of day requirements.\n restrictive_entrances, soft_entrances = split_entrances_by_requirements(worlds, entrance_pool, target_entrances)\n\n while retry_count:\n retry_count -= 1\n rollbacks = []\n\n try:\n # Shuffle restrictive entrances first while more regions are available in order to heavily reduce the chances of the placement failing.\n shuffle_entrances(worlds, restrictive_entrances, target_entrances, rollbacks, locations_to_ensure_reachable, placed_one_way_entrances=placed_one_way_entrances)\n\n # Shuffle the rest of the entrances, we don't have to check for beatability/reachability of locations when placing those, unless specified otherwise\n if check_all:\n shuffle_entrances(worlds, soft_entrances, target_entrances, rollbacks, locations_to_ensure_reachable, placed_one_way_entrances=placed_one_way_entrances)\n else:\n shuffle_entrances(worlds, soft_entrances, target_entrances, rollbacks, placed_one_way_entrances=placed_one_way_entrances)\n\n # Fully validate the resulting world to ensure everything is still fine after shuffling this pool\n complete_itempool = [item for world in worlds for item in world.get_itempool_with_dungeon_items()]\n validate_world(world, worlds, None, locations_to_ensure_reachable, complete_itempool, placed_one_way_entrances=placed_one_way_entrances)\n\n # If all entrances could be connected without issues, log connections and continue\n for entrance, target in rollbacks:\n confirm_replacement(entrance, target)\n return rollbacks\n\n except EntranceShuffleError as error:\n for entrance, target in rollbacks:\n restore_connections(entrance, target)\n logging.getLogger('').info('Failed to place all entrances in a pool for world %d. Will retry %d more times', entrance_pool[0].world.id, retry_count)\n logging.getLogger('').info('\\t%s' % error)\n\n if world.settings.custom_seed:\n raise EntranceShuffleError('Entrance placement attempt count exceeded for world %d. Ensure the \\\"Seed\\\" field is empty and retry a few times.' % world.id)\n if world.settings.distribution_file:\n raise EntranceShuffleError('Entrance placement attempt count exceeded for world %d. Some entrances in the Plandomizer File may have to be changed to create a valid seed. Reach out to Support on Discord for help.' % world.id)\n raise EntranceShuffleError('Entrance placement attempt count exceeded for world %d. Retry a few times or reach out to Support on Discord for help.' % world.id)\n\n\n# Split entrances based on their requirements to figure out how each entrance should be handled when shuffling them\ndef split_entrances_by_requirements(worlds: list[World], entrances_to_split: list[Entrance], assumed_entrances: list[Entrance]) -> tuple[list[Entrance], list[Entrance]]:\n # First, disconnect all root assumed entrances and save which regions they were originally connected to, so we can reconnect them later\n original_connected_regions = {}\n entrances_to_disconnect = set(assumed_entrances).union(entrance.reverse for entrance in assumed_entrances if entrance.reverse)\n for entrance in entrances_to_disconnect:\n if entrance.connected_region:\n original_connected_regions[entrance] = entrance.disconnect()\n\n # Generate the states with all assumed entrances disconnected\n # This ensures no assumed entrances corresponding to those we are shuffling are required in order for an entrance to be reachable as some age/tod\n complete_itempool = [item for world in worlds for item in world.get_itempool_with_dungeon_items()]\n max_search = Search.max_explore([world.state for world in worlds], complete_itempool)\n\n restrictive_entrances = []\n soft_entrances = []\n\n for entrance in entrances_to_split:\n # Here, we find entrances that may be unreachable under certain conditions\n if not max_search.spot_access(entrance, age='both', tod=TimeOfDay.ALL):\n restrictive_entrances.append(entrance)\n continue\n # If an entrance is reachable as both ages and all times of day with all the other entrances disconnected,\n # then it can always be made accessible in all situations by the Fill algorithm, no matter which combination of entrances we end up with.\n # Thus, those entrances aren't bound to any specific requirements and are very versatile during placement.\n soft_entrances.append(entrance)\n\n # Reconnect all disconnected entrances afterwards\n for entrance in entrances_to_disconnect:\n if entrance in original_connected_regions:\n entrance.connect(original_connected_regions[entrance])\n\n return restrictive_entrances, soft_entrances\n\n\ndef replace_entrance(worlds: list[World], entrance: Entrance, target: Entrance, rollbacks: list[tuple[Entrance, Entrance]],\n locations_to_ensure_reachable: Iterable[Location], itempool: list[Item], placed_one_way_entrances: Optional[list[tuple[Entrance, Entrance]]] = None) -> bool:\n if placed_one_way_entrances is None:\n placed_one_way_entrances = []\n try:\n check_entrances_compatibility(entrance, target, rollbacks, placed_one_way_entrances)\n change_connections(entrance, target)\n validate_world(entrance.world, worlds, entrance, locations_to_ensure_reachable, itempool, placed_one_way_entrances=placed_one_way_entrances)\n rollbacks.append((entrance, target))\n return True\n except EntranceShuffleError as error:\n # If the entrance can't be placed there, log a debug message and change the connections back to what they were previously\n logging.getLogger('').debug('Failed to connect %s To %s (Reason: %s) [World %d]',\n entrance, entrance.connected_region or target.connected_region, error, entrance.world.id)\n if entrance.connected_region:\n restore_connections(entrance, target)\n return False\n\n\n# Connect one random entrance from entrance pools to one random target in the respective target pool.\n# Entrance chosen will have one of the allowed types.\n# Target chosen will lead to one of the allowed regions.\ndef place_one_way_priority_entrance(worlds: list[World], world: World, priority_name: str, allowed_regions: Container[str], allowed_types: Iterable[str],\n rollbacks: list[tuple[Entrance, Entrance]], locations_to_ensure_reachable: Iterable[Location], complete_itempool: list[Item],\n one_way_entrance_pools: dict[str, list[Entrance]], one_way_target_entrance_pools: dict[str, list[Entrance]]) -> None:\n # Combine the entrances for allowed types in one list.\n # Shuffle this list.\n # Pick the first one not already set, not adult spawn, that has a valid target entrance.\n # Assemble then clear entrances from the pool and target pools as appropriate.\n avail_pool = list(chain.from_iterable(one_way_entrance_pools[t] for t in allowed_types if t in one_way_entrance_pools))\n random.shuffle(avail_pool)\n for entrance in avail_pool:\n if entrance.replaces:\n continue\n # Only allow Adult Spawn as sole Nocturne access if hints != mask.\n # Otherwise, child access is required here (adult access assumed or guaranteed later).\n if entrance.parent_region.name == 'Adult Spawn':\n if priority_name != 'Nocturne' or entrance.world.settings.hints == \"mask\":\n continue\n # If not shuffling dungeons, Nocturne requires adult access.\n if not entrance.world.shuffle_dungeon_entrances and priority_name == 'Nocturne':\n if entrance.type not in ('OverworldOneWay', 'WarpSong') and entrance.parent_region.name != 'Adult Spawn':\n continue\n for target in one_way_target_entrance_pools[entrance.type]:\n if target.connected_region and target.connected_region.name in allowed_regions:\n if replace_entrance(worlds, entrance, target, rollbacks, locations_to_ensure_reachable, complete_itempool):\n logging.getLogger('').debug(f'Priority placement for {priority_name}: placing {entrance} as {target}')\n return\n raise EntranceShuffleError(f'Unable to place priority one-way entrance for {priority_name} [World {world.id}].')\n\n\n# Shuffle entrances by placing them instead of entrances in the provided target entrances list\n# While shuffling entrances, the algorithm will ensure worlds are still valid based on multiple criterias\ndef shuffle_entrances(worlds: list[World], entrances: list[Entrance], target_entrances: list[Entrance], rollbacks: list[tuple[Entrance, Entrance]],\n locations_to_ensure_reachable: Iterable[Location] = (), placed_one_way_entrances: Optional[list[tuple[Entrance, Entrance]]] = None) -> None:\n if placed_one_way_entrances is None:\n placed_one_way_entrances = []\n # Retrieve all items in the itempool, all worlds included\n complete_itempool = [item for world in worlds for item in world.get_itempool_with_dungeon_items()]\n\n random.shuffle(entrances)\n\n # Place all entrances in the pool, validating worlds during every placement\n for entrance in entrances:\n if entrance.connected_region is not None:\n continue\n random.shuffle(target_entrances)\n\n for target in target_entrances:\n if target.connected_region is None:\n continue\n\n if replace_entrance(worlds, entrance, target, rollbacks, locations_to_ensure_reachable, complete_itempool, placed_one_way_entrances=placed_one_way_entrances):\n break\n\n if entrance.connected_region is None:\n raise EntranceShuffleError('No more valid entrances to replace with %s in world %d' % (entrance, entrance.world.id))\n\n\n# Check and validate that an entrance is compatible to replace a specific target\ndef check_entrances_compatibility(entrance: Entrance, target: Entrance, rollbacks: list[tuple[Entrance, Entrance]] = (),\n placed_one_way_entrances: Optional[list[tuple[Entrance, Entrance]]] = None) -> None:\n if placed_one_way_entrances is None:\n placed_one_way_entrances = []\n # An entrance shouldn't be connected to its own scene, so we fail in that situation\n if entrance.parent_region.get_scene() and entrance.parent_region.get_scene() == target.connected_region.get_scene():\n raise EntranceShuffleError('Self scene connections are forbidden')\n\n # One way entrances shouldn't lead to the same hint area as other already chosen one way entrances\n if entrance.type in ('OverworldOneWay', 'OwlDrop', 'Spawn', 'WarpSong'):\n try:\n hint_area = HintArea.at(target.connected_region)\n except HintAreaNotFound:\n pass # not connected to a hint area yet, will be checked when shuffling two-way entrances\n else:\n # Check all already placed entrances of the same type (including priority entrances placed separately)\n for rollback in (*rollbacks, *placed_one_way_entrances):\n try:\n placed_entrance = rollback[0]\n if entrance.type == placed_entrance.type and HintArea.at(placed_entrance.connected_region) == hint_area:\n raise EntranceShuffleError(f'Another {entrance.type} entrance already leads to {hint_area}')\n except HintAreaNotFound:\n pass\n\n\n# Validate the provided worlds' structures, raising an error if it's not valid based on our criterias\ndef validate_world(world: World, worlds: list[World], entrance_placed: Optional[Entrance], locations_to_ensure_reachable: Iterable[Location],\n itempool: list[Item], placed_one_way_entrances: Optional[list[tuple[Entrance, Entrance]]] = None) -> None:\n if placed_one_way_entrances is None:\n placed_one_way_entrances = []\n\n if not world.settings.decouple_entrances:\n # Unless entrances are decoupled, we don't want the player to end up through certain entrances as the wrong age\n # This means we need to hard check that none of the relevant entrances are ever reachable as that age\n # This is mostly relevant when mixing entrance pools or shuffling special interiors (such as windmill or kak potion shop)\n # Warp Songs and Overworld Spawns can also end up inside certain indoors so those need to be handled as well\n # Allowing child to enter Spirit from the boss would severely complicate key logic\n CHILD_FORBIDDEN = ['OGC Great Fairy Fountain -> Castle Grounds', 'GV Carpenter Tent -> GV Fortress Side', 'Ganons Castle Lobby -> Castle Grounds From Ganons Castle', 'Bongo Bongo Boss Room -> Shadow Temple Before Boss', 'Twinrova Boss Room -> Spirit Temple Before Boss']\n ADULT_FORBIDDEN = ['HC Great Fairy Fountain -> Castle Grounds', 'HC Storms Grotto -> Castle Grounds', 'Bongo Bongo Boss Room -> Shadow Temple Before Boss', 'Twinrova Boss Room -> Spirit Temple Before Boss']\n if world.dungeon_mq['Forest Temple'] and 'Forest Temple' in world.settings.dungeon_shortcuts:\n CHILD_FORBIDDEN.append('Phantom Ganon Boss Room -> Forest Temple Before Boss')\n ADULT_FORBIDDEN.append('Phantom Ganon Boss Room -> Forest Temple Before Boss')\n\n for entrance in world.get_shufflable_entrances():\n if entrance.shuffled:\n if entrance.replaces:\n if entrance.replaces.name in CHILD_FORBIDDEN and not entrance_unreachable_as(entrance, 'child', already_checked=[entrance.replaces.reverse]):\n raise EntranceShuffleError('%s is replaced by an entrance with a potential child access' % entrance.replaces.name)\n elif entrance.replaces.name in ADULT_FORBIDDEN and not entrance_unreachable_as(entrance, 'adult', already_checked=[entrance.replaces.reverse]):\n raise EntranceShuffleError('%s is replaced by an entrance with a potential adult access' % entrance.replaces.name)\n else:\n if entrance.name in CHILD_FORBIDDEN and not entrance_unreachable_as(entrance, 'child', already_checked=[entrance.reverse]):\n raise EntranceShuffleError('%s is potentially accessible as child' % entrance.name)\n elif entrance.name in ADULT_FORBIDDEN and not entrance_unreachable_as(entrance, 'adult', already_checked=[entrance.reverse]):\n raise EntranceShuffleError('%s is potentially accessible as adult' % entrance.name)\n\n if locations_to_ensure_reachable:\n max_search = Search.max_explore([w.state for w in worlds], itempool)\n if world.check_beatable_only:\n if worlds[0].settings.reachable_locations == 'goals':\n # If this entrance is required for a goal, it must be placed somewhere reachable.\n # We also need to check to make sure the game is beatable, since custom goals might not imply that.\n predicate = lambda state: state.won() and state.has_all_item_goals()\n else:\n # If the game is not beatable without this entrance, it must be placed somewhere reachable.\n predicate = State.won\n perform_access_check = not max_search.can_beat_game(scan_for_items=False, predicate=predicate)\n else:\n # All entrances must be placed somewhere reachable.\n perform_access_check = True\n if perform_access_check:\n max_search.visit_locations(locations_to_ensure_reachable)\n for location in locations_to_ensure_reachable:\n if not max_search.visited(location):\n raise EntranceShuffleError('%s is unreachable' % location.name)\n\n if (\n world.shuffle_interior_entrances and (\n (world.dungeon_rewards_hinted and world.mixed_pools_bosses) or #TODO also enable if boss reward shuffle is on\n any(hint_type in world.settings.misc_hints for hint_type in misc_item_hint_table) or world.settings.hints != 'none'\n ) and (entrance_placed is None or entrance_placed.type in ['Interior', 'SpecialInterior'])\n ):\n # When cows are shuffled, ensure both Impa's House entrances are in the same hint area because the cow is reachable from both sides\n if world.settings.shuffle_cows:\n impas_front_entrance = get_entrance_replacing(world.get_region('Kak Impas House'), 'Kakariko Village -> Kak Impas House')\n impas_back_entrance = get_entrance_replacing(world.get_region('Kak Impas House Back'), 'Kak Impas Ledge -> Kak Impas House Back')\n if impas_front_entrance is not None and impas_back_entrance is not None and not same_hint_area(impas_front_entrance, impas_back_entrance):\n raise EntranceShuffleError('Kak Impas House entrances are not in the same hint area')\n\n if (world.shuffle_special_interior_entrances or world.settings.shuffle_overworld_entrances or world.settings.spawn_positions) and \\\n (entrance_placed == None or entrance_placed.type in ('SpecialInterior', 'Hideout', 'Overworld', 'OverworldOneWay', 'Spawn', 'WarpSong', 'OwlDrop')):\n # At least one valid starting region with all basic refills should be reachable without using any items at the beginning of the seed\n # Note this creates new empty states rather than reuse the worlds' states (which already have starting items)\n no_items_search = Search([State(w) for w in worlds])\n\n valid_starting_regions = ['Kokiri Forest', 'Kakariko Village']\n if not any(region for region in valid_starting_regions if no_items_search.can_reach(world.get_region(region))):\n raise EntranceShuffleError('Invalid starting area')\n\n # Check that a region where time passes is always reachable as both ages without having collected any items\n time_travel_search = Search.with_items([w.state for w in worlds], [ItemFactory('Time Travel', world=w) for w in worlds])\n\n if not (any(region for region in time_travel_search.reachable_regions('child') if region.time_passes and region.world == world) and\n any(region for region in time_travel_search.reachable_regions('adult') if region.time_passes and region.world == world)):\n raise EntranceShuffleError('Time passing is not guaranteed as both ages')\n\n # The player should be able to get back to ToT after going through time, without having collected any items\n # This is important to ensure that the player never loses access to the pedestal after going through time\n if world.settings.starting_age == 'child' and not time_travel_search.can_reach(world.get_region('Temple of Time'), age='adult'):\n raise EntranceShuffleError('Path to Temple of Time as adult is not guaranteed')\n elif world.settings.starting_age == 'adult' and not time_travel_search.can_reach(world.get_region('Temple of Time'), age='child'):\n raise EntranceShuffleError('Path to Temple of Time as child is not guaranteed')\n\n if (world.shuffle_interior_entrances or world.settings.shuffle_overworld_entrances) and \\\n (entrance_placed == None or entrance_placed.type in ('Interior', 'SpecialInterior', 'Hideout', 'Overworld', 'OverworldOneWay', 'Spawn', 'WarpSong', 'OwlDrop')):\n # The Big Poe Shop should always be accessible as adult without the need to use any bottles\n # This is important to ensure that players can never lock their only bottles by filling them with Big Poes they can't sell\n # We can use starting items in this check as long as there are no exits requiring the use of a bottle without refills\n time_travel_search = Search.with_items([w.state for w in worlds], [ItemFactory('Time Travel', world=w) for w in worlds])\n\n if not time_travel_search.can_reach(world.get_region('Market Guard House'), age='adult'):\n raise EntranceShuffleError('Big Poe Shop access is not guaranteed as adult')\n\n # placing a two-way entrance can connect a one-way entrance to a hint area,\n # so the restriction also needs to be checked here\n for idx1 in range(len(placed_one_way_entrances)):\n try:\n entrance1 = placed_one_way_entrances[idx1][0]\n hint_area1 = HintArea.at(entrance1.connected_region)\n except HintAreaNotFound:\n pass\n else:\n for idx2 in range(idx1):\n try:\n entrance2 = placed_one_way_entrances[idx2][0]\n if entrance1.type == entrance2.type and hint_area1 == HintArea.at(entrance2.connected_region):\n raise EntranceShuffleError(f'Multiple {entrance1.type} entrances lead to {hint_area1}')\n except HintAreaNotFound:\n pass\n\n\n# Returns whether or not we can affirm the entrance can never be accessed as the given age\ndef entrance_unreachable_as(entrance: Entrance, age: str, already_checked: Optional[list[Entrance]] = None) -> bool:\n if already_checked is None:\n already_checked = []\n\n already_checked.append(entrance)\n\n # The following cases determine when we say an entrance is not safe to affirm unreachable as the given age\n if entrance.type in ('WarpSong', 'OverworldOneWay', 'Overworld'):\n # Note that we consider all overworld entrances as potentially accessible as both ages, to be completely safe\n return False\n elif entrance.type == 'OwlDrop':\n return age == 'adult'\n elif entrance.name == 'Child Spawn -> KF Links House':\n return age == 'adult'\n elif entrance.name == 'Adult Spawn -> Temple of Time':\n return age == 'child'\n\n # Other entrances such as Interior, Dungeon or Grotto are fine unless they have a parent which is one of the above cases\n # Recursively check parent entrances to verify that they are also not reachable as the wrong age\n for parent_entrance in entrance.parent_region.entrances:\n if parent_entrance in already_checked:\n continue\n\n # Farores Wind Warp is a special case which should be ignored for the purpose of this check\n if parent_entrance.parent_region.name == 'Farores Wind Warp':\n continue\n\n unreachable = entrance_unreachable_as(parent_entrance, age, already_checked)\n if not unreachable:\n return False\n\n return True\n\n\n# Returns whether two entrances are in the same hint area\ndef same_hint_area(first: Entrance, second: Entrance) -> bool:\n try:\n return HintArea.at(first) == HintArea.at(second)\n except HintAreaNotFound:\n return False\n\n\n# Shorthand function to find an entrance with the requested name leading to a specific region\ndef get_entrance_replacing(region: Region, entrance_name: str) -> Optional[Entrance]:\n original_entrance = region.world.get_entrance(entrance_name)\n\n if not original_entrance.shuffled:\n return original_entrance\n\n try:\n return next(filter(lambda entrance: entrance.replaces and entrance.replaces.name == entrance_name and \\\n entrance.parent_region and entrance.parent_region.name != 'Root Exits' and \\\n entrance.type not in ('OverworldOneWay', 'OwlDrop', 'Spawn', 'WarpSong', 'BlueWarp'), region.entrances))\n except StopIteration:\n return None\n\n\n# Change connections between an entrance and a target assumed entrance, in order to test the connections afterwards if necessary\ndef change_connections(entrance: Entrance, target_entrance: Entrance) -> None:\n entrance.connect(target_entrance.disconnect())\n entrance.replaces = target_entrance.replaces\n if entrance.reverse and not entrance.decoupled:\n target_entrance.replaces.reverse.connect(entrance.reverse.assumed.disconnect())\n target_entrance.replaces.reverse.replaces = entrance.reverse\n\n\n# Restore connections between an entrance and a target assumed entrance\ndef restore_connections(entrance: Entrance, target_entrance: Entrance) -> None:\n target_entrance.connect(entrance.disconnect())\n entrance.replaces = None\n if entrance.reverse and not entrance.decoupled:\n entrance.reverse.assumed.connect(target_entrance.replaces.reverse.disconnect())\n target_entrance.replaces.reverse.replaces = None\n\n\n# Confirm the replacement of a target entrance by a new entrance, logging the new connections and completely deleting the target entrances\ndef confirm_replacement(entrance: Entrance, target_entrance: Entrance) -> None:\n delete_target_entrance(target_entrance)\n logging.getLogger('').debug('Connected %s To %s [World %d]', entrance, entrance.connected_region, entrance.world.id)\n if entrance.reverse and not entrance.decoupled:\n replaced_reverse = target_entrance.replaces.reverse\n delete_target_entrance(entrance.reverse.assumed)\n logging.getLogger('').debug('Connected %s To %s [World %d]', replaced_reverse, replaced_reverse.connected_region, replaced_reverse.world.id)\n\n\n# Delete an assumed target entrance, by disconnecting it if needed and removing it from its parent region\ndef delete_target_entrance(target_entrance: Entrance) -> None:\n if target_entrance.connected_region is not None:\n target_entrance.disconnect()\n if target_entrance.parent_region is not None:\n target_entrance.parent_region.exits.remove(target_entrance)\n target_entrance.parent_region = None\n","sub_path":"EntranceShuffle.py","file_name":"EntranceShuffle.py","file_ext":"py","file_size_in_byte":95930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"637484737","text":"\n# -*- coding: UTF-8 -*-\nimport os\n\n\nfrom . import DATAYES\n\n__author__ = 'frank'\n\n\nfrom . import api_base\ntry:\n from StringIO import StringIO\nexcept:\n from io import StringIO\nimport pandas as pd\nimport sys\nimport logging\nimport traceback\n\nimport requests\n\nusername = os.environ.get('DatayesPrincipalName', 'invalid')\n\ndef MktOptionTicksHistOneDayGet(optionId, date, field = \"\", startSecOffset = \"\", endSecOffset = \"\", pandas = \"1\"):\n\n fieldStr = ''\n if hasattr(field,'__iter__'):\n for temp in field:\n fieldStr += temp + \",\"\n fieldStr = fieldStr[:-1]\n else:\n fieldStr += field\n\n URL = '/api/market/getOptionTicksHistOneDay.csv?field=%s&optionId=%s&date=%s&startSecOffset=%s&endSecOffset=%s' % (fieldStr, optionId, date, startSecOffset, endSecOffset)\n\n httpClient = api_base.__getConn__()\n csvString = api_base.__getCSV__(URL, httpClient)\n if pandas != \"1\":\n return csvString\n if csvString is None or len(csvString) == 0 or csvString[0] == '-':\n raise Exception(u'%s for request: %s' % (csvString, URL))\n try:\n\n myIO = StringIO(csvString)\n pdFrame = pd.read_csv(myIO, dtype = {'optionId':'str'}, )\n return pdFrame\n except Exception as e:\n raise e\n finally:\n myIO.close()\n\ndef EcoDataProGet(indicID, beginDate = \"\", endDate = \"\", field = \"\"):\n \"\"\"\n 通联新宏观行业。输入指标ID,获取相关数据。\n\n :param indicID: 指标代码,可以通过特色数据页面获取。,可以是列表\n :param beginDate: 根据数据截止日期范围查询的开始日期,输入格式“YYYYMMDD”,可空\n :param endDate: 根据数据截止日期范围查询的结束日期,输入格式“YYYYMMDD”,可空\n :param field: 所需字段,可以是列表,可空;候选列表:indicID - 指标ID,publishDate - 数据源发布时间,periodDate - 数据期,dataValue - 数据值,updateTime - 更新时间\n :return: :raise e: API查询的结果,是CSV或者被转成pandas data frame;若查询API失败,返回空data frame; 若解析失败,则抛出异常\n \"\"\"\n if not api_base.is_pro_user() and not api_base.is_enterprise_user():\n raise Exception('Not uqer pro user!')\n api_name_info = DATAYES.EcoInfoProGet(indicID=indicID, field='indicID,dataApiName').drop_duplicates()\n df_list = []\n for idx, row in api_name_info.iterrows():\n thisIndicID, api_name = (row['indicID'], row['dataApiName'])\n f = getattr(DATAYES, api_name[3:] + 'Get')\n df = f(str(thisIndicID), beginDate, endDate, field)\n df_list.append(df)\n if df_list:\n return pd.concat(df_list)\n return pd.DataFrame()\n\nif os.getenv('env', 'dev') != 'dev':\n cache_server = \"tcp://db_cache_server:1111\"\nelse:\n if os.environ.get('enterprise', '0') == '1':\n cache_server = \"tcp://10.24.51.170:1111\" # stg\n else:\n cache_server = \"tcp://10.22.132.36:1111\" # stg\n\n\nbatch_size = 5\n\ndef _get_service():\n import zerorpc\n client = zerorpc.Client(heartbeat=None, timeout=300)\n client.connect(cache_server)\n return client\n\ndef _load_factor(secID, fields, beginDate, endDate, username):\n try:\n client = _get_service()\n return 1, client.get_user_factors(secID, fields, beginDate, endDate, username)\n except Exception as e:\n error_msg = str(e)\n if \"[Name Check]\" in error_msg:\n raise Exception(error_msg[error_msg.find('[Name Check]')+12:])\n if '[Restriction]' in error_msg:\n raise Exception(error_msg[error_msg.find('[Restriction]')+13:])\n else:\n logging.error(traceback.format_exc().replace('\\n', ''))\n raise Exception(\"The data is not available at this moment.\")\n finally:\n client.close()\n\ndef FactorsGet(secID, beginDate, endDate, field):\n \"\"\"\n 获取因子数据,包括优矿因子、用户自定义因子和公共池因子\n :param secID: 股票池列表\n :param beginDate: 开始时间\n :param endDate: 结束时间\n :param field: 因子名列表,比如['PB', 'public.factor1', 'private.factor2'],不带前缀表示优矿因子,带private前缀表示用户自定义因子,带public前缀表示公共池因子\n :return: 一个pandas Panel,比如panel['PB']表示PB因子数据的DataFrame,panel['public.factor1']表示public.factor1因子数据的DataFrame,panel['private.factor2']表示private.factor2因子数据的DataFrame\n \"\"\"\n from gevent.pool import Pool\n if not api_base.is_pro_user() and not api_base.is_enterprise_user():\n print ('非专业版用户,无法使用该API')\n return\n try:\n if type(field) != list:\n field = field.split(',')\n except:\n raise Exception('Invalid field format')\n client = _get_service()\n try:\n factors = client.translate_user_factors(field, username, beginDate)\n except Exception as e:\n error_msg = str(e)\n if \"[Name Check]\" in error_msg:\n print (\"ERROR: \" + error_msg[error_msg.find('[Name Check]')+12:])\n return\n if '[Restriction]' in error_msg:\n print (\"ERROR: \" + error_msg[error_msg.find('[Restriction]')+13:])\n return\n else:\n logging.error(traceback.format_exc().replace('\\n', ''))\n raise Exception(\"The data is not available at this moment.\")\n finally:\n client.close()\n result_factors = []\n from gevent.pool import Pool\n pool = Pool(batch_size)\n workers = []\n for key in factors:\n for f_id in factors[key]:\n factor = factors[key][f_id]\n result_factors.append(factor)\n\n group_factors = api_base.splist(result_factors, 3)\n for group in group_factors:\n worker = pool.spawn(_load_factor, secID, group, beginDate, endDate, username)\n workers.append(worker)\n\n result = {}\n if len(workers) > 0:\n pool.join()\n for worker in workers:\n val = worker.value\n if val[0] < 0:\n raise Exception(val[1])\n ast_data = val[1]\n for key in ast_data:\n result[key] = pd.DataFrame(ast_data[key])\n return pd.Panel(result)\n","sub_path":"JD_FactorFactory01/venv/Lib/site-packages/uqer/DataAPI/OTHER.py","file_name":"OTHER.py","file_ext":"py","file_size_in_byte":6212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"467065816","text":"\"\"\"Network class and main algorithm for training the network.\"\"\"\nfrom test_model import *\nimport random\n\n# Add parameters for plotting. \nmpl.rcParams['figure.titleweight'] = \"bold\"\nmpl.rcParams['font.size'] = 14\nmpl.rcParams['axes.titlesize'] = 13\nmpl.rcParams['axes.titleweight'] = \"bold\"\n\nplt.style.use('seaborn')\n\nclass Parameters:\n \"\"\"Class for the parameters in the Neural Network.\"\"\"\n def __init__(self,K,d,I):\n self.W_k = np.random.randn(K,d,d)\n self.b_k = np.random.randn(K,d,1)\n self.b_k_I = np.zeros((K,d,I))\n\n for i in range(K):\n self.b_k_I[i,:,:] = self.b_k[i,:,:]\n\n self.w = np.random.randn(d) \n self.my = np.random.rand()\n self.K = K\n \n # For Adam descent.\n self.beta_1 = 0.9; self.beta_2 = 0.999; self.alpha = 0.01; self.epsilon = 10E-8\n self.v = [0]; self.m = [0]\n \n def update_parameters(self,gradient,method,tau,j):\n \"\"\"Update the parameters via Vanilla Gradient Method or Adam Descent.\"\"\"\n if(method == \"vanilla\"):\n self.W_k = self.W_k - tau*gradient[0]\n self.b_k = self.b_k - tau*gradient[1]\n for i in range(self.K):\n self.b_k_I[i,:,:] = self.b_k[i,:,:]\n self.w = self.w - tau*gradient[2]\n self.my = self.my - tau*gradient[3]\n \n elif(method == \"adams\"):\n g = gradient \n self.m.append( self.beta_1*self.m[j-1] + (1-self.beta_1)*g)\n self.v.append(self.beta_2*self.v[j-1]+(1-self.beta_2)*(g*g))\n m_hat = self.m[j]/(1-self.beta_1**j)\n v_hat = self.v[j]/(1-self.beta_2**j)\n subtract = self.alpha*m_hat/(self.root(self.v[j])+self.epsilon)\n \n self.W_k = self.W_k - subtract[0]\n self.b_k = self.b_k - subtract[1]\n for i in range(self.K):\n self.b_k_I[i,:,:] = self.b_k[i,:,:]\n self.w = self.w - subtract[2]\n self.my = self.my - subtract[3] \n \n else:\n print(\"No method found\") \n \n def root(self, v_j):\n \"\"\"Calculate the root of v_j componentwise for Adam descent.\"\"\"\n for i in range(4):\n v_j[i] = np.sqrt(v_j[i])\n return v_j\n\nclass Network:\n \"\"\"Class for the Neural Network (ResNet).\"\"\"\n def __init__(self,K,d,I,h,Z_0,c, hypothesis = 1):\n self.theta = Parameters(K,d,I)\n self.Z_list = np.zeros((K+1,d,I))\n self.Z_list[0,:,:] = Z_0\n self.c = c\n self.h = h\n self.K = K\n self.I = I\n self.d = d\n self.Y = None\n self.hypothesis = hypothesis\n\n self.J_last = None # For use in the testing (instead of returning J from algorithm, save in NN)\n \n def J(self): \n \"\"\"Objective function.\"\"\"\n return 0.5 * la.norm(self.Y - self.c)**2\n \n def sigma(self, x):\n \"\"\"Sigmoid activation function.\"\"\"\n return np.tanh(x)\n\n def sigma_der(self, x):\n \"\"\"Derivative of the sigmoid activation function.\"\"\"\n val = 1-np.tanh(x)**2\n return val\n\n def eta(self, x):\n \"\"\"Hypothesis function. Appears in the final layer of the neural network.\"\"\"\n if self.hypothesis == 1:\n val = x\n elif self.hypothesis == 2:\n val = 0.5*(1+ np.tanh(x/2))\n return val\n\n def eta_der(self, x):\n \"\"\"The derivative of the hypothesis function.\"\"\"\n if self.hypothesis == 1:\n val = np.ones(x.shape)\n elif self.hypothesis == 2:\n val = 0.25*self.sigma_der(x/2) \n return val\n\n def forward_function(self): \n \"\"\"Calculate Y.\"\"\"\n for i in range(self.K):\n self.Z_list[i+1,:,:] = self.Z_list[i,:,:] + \\\n self.h*self.sigma(self.theta.W_k[i,:,:] @ \\\n self.Z_list[i,:,:] + self.theta.b_k_I[i,:,:])\n \n ZT_K = np.transpose(self.Z_list[-1,:,:]) \n one_vec = np.ones(self.I)\n Y = self.eta(ZT_K @ self.theta.w + self.theta.my*one_vec)\n self.Y = Y\n\n def back_propagation(self):\n \"\"\"Calculate and return the gradient of the objective function.\"\"\"\n ZT_K = np.transpose(self.Z_list[-1,:,:])\n one_vec = np.ones(self.I)\n J_der_my = self.eta_der(ZT_K @ self.theta.w + self.theta.my*one_vec).T @ (self.Y-self.c)\n J_der_w = self.Z_list[-1,:,:] @ ((self.Y-self.c) * \\\n self.eta_der(ZT_K @ self.theta.w + self.theta.my*one_vec))\n \n P_K = np.outer(self.theta.w,(self.Y-self.c)*self.eta_der(ZT_K @ \\\n self.theta.w + self.theta.my*one_vec)) \n \n \n P_list = np.zeros((self.K,self.d,self.I)) \n P_list[-1,:,:] = P_K \n for i in range(self.K-1,0,-1):\n P_list[i-1,:,:] = P_list[i,:,:] + self.h*np.transpose(self.theta.W_k[i-1,:,:]) @ \\\n (self.sigma_der(self.theta.W_k[i-1,:,:] @ self.Z_list[i-1,:,:] + \\\n self.theta.b_k_I[i-1,:,:]) * P_list[i,:,:])\n\n J_der_Wk = np.zeros((self.K,self.d,self.d))\n J_der_b = np.zeros((self.K,self.d,1))\n one_vec = np.ones((self.I,1)) \n \n for i in range(self.K):\n val = P_list[i,:,:] * self.sigma_der(self.theta.W_k[i,:,:] @ \\\n self.Z_list[i,:,:] + self.theta.b_k_I[i,:,:])\n J_der_Wk[i,:,:] = self.h*(val @ np.transpose(self.Z_list[i,:,:]))\n J_der_b[i,:,:] = self.h*(val @ one_vec)\n \n gradient = np.array((J_der_Wk,J_der_b,J_der_w,J_der_my))\n \n return gradient\n \n def embed_input_and_sol(self, inp, sol):\n \"\"\"Embed the input into d-dimensional space. This function is used for testing.\"\"\"\n self.I = inp.shape[1]\n d0 = inp.shape[0]\n if d0 < self.d:\n while d0 < d:\n zero_row = np.zeros(self.I)\n inp = inp.vstack((inp,zero_row))\n d0 += 1\n \n\n self.Z_list = np.zeros((self.K+1,self.d,self.I))\n self.Z_list[0,:,:] = inp\n self.c = sol\n\n\n self.theta.b_k_I = np.zeros((self.K,self.d,self.I))\n for i in range(self.K):\n self.theta.b_k_I[i,:,:] = self.theta.b_k[i,:,:]\n \n def embed_input(self, inp):\n \"\"\" Embed the input in d-dimenstional space when no solution is provided.\"\"\" \n if inp.ndim == 1: # The input is a point.\n self.I = 1\n d0 = inp.shape[0]\n inp = inp.reshape(d0, 1)\n else:\n d0 = inp.shape[0]\n self.I = inp.shape[1]\n\n self.Z_list = np.zeros((self.K+1,self.d,self.I))\n self.Z_list[0,:d0,:] = inp\n \n # Changing b_K_I in case self.I has changed.\n self.theta.b_k_I = np.zeros((self.K,self.d,self.I))\n for i in range(self.K):\n self.theta.b_k_I[i,:,:] = self.theta.b_k[i,:,:]\n\n def calculate_output(self, inp):\n \"\"\"Calculate and return the network's output from a given input.\"\"\"\n self.embed_input(inp)\n self.forward_function()\n return self.Y\n\n def Hamiltonian_gradient(self):\n \"\"\"Calculate the gradient of F, according to the theoretical derivation.\n \n In general it is used for the entire Hamiltonian F, but since we are \n working with separable Hamiltonians, it can be used on both T and V.\n \"\"\"\n one_vec = np.ones((self.I,1)) \n\n \n w = self.theta.w.reshape((self.d,1)) # Need different dimensions for w. \n A = w @ self.eta_der(w.T@self.Z_list[self.K,:,:] + self.theta.my*one_vec.T) \n \n\n for k in range(self.K, 0, -1):\n A += self.theta.W_k[k-1,:,:].T @ (self.h*self.sigma_der(\\\n self.theta.W_k[k-1,:,:]@self.Z_list[k-1,:,:] + self.theta.b_k_I[k-1,:,:])*A)\n \n return A\n\ndef algorithm(I, d, d0, K, h, iterations, tau, function, domain, plot = False, savename = \"\"):\n \"\"\"Main training algorithm.\"\"\"\n \n inp = generate_input(function,domain,d0,I,d)\n output = get_solution(function,inp,d,I,d0)\n\n Z_0 = inp\n c_0 = output\n NN = Network(K,d,I,h,Z_0,c_0)\n \n # For plotting J. \n J_list = np.zeros(iterations)\n it = np.zeros(iterations)\n\n for j in range(1,iterations+1):\n \n NN.forward_function()\n gradient = NN.back_propagation()\n NN.theta.update_parameters(gradient,\"adams\",tau,j)\n\n J_list[j-1] = NN.J()\n it[j-1] = j\n \n if plot:\n fig, ax = plt.subplots()\n ax.plot(it,J_list)\n plt.yscale(\"log\")\n fig.suptitle(\"Objective Function J as a Function of Iterations.\", fontweight = \"bold\")\n ax.set_ylabel(\"J\")\n ax.set_xlabel(\"Iteration\")\n plt.text(0.5, 0.5, \"Value of J at iteration \"+str(iterations)+\": \"+str(round(J_list[-1], 6)), \n horizontalalignment=\"center\", verticalalignment=\"center\", \n transform=ax.transAxes, fontsize = 16)\n if savename != \"\": \n plt.savefig(savename+\".pdf\", bbox_inches='tight')\n plt.show()\n \n \n NN.J_last = J_list[-1] # Save last value of J_list in NN, to check which converges best in tests. \n \n return NN\n\ndef algorithm_sgd(I,d, d0, K, h, iterations, tau, chunk, function, domain, plot = False, savename = \"\"):\n \"\"\"Main training algorithm with Stochastic Gradient Descent.\"\"\"\n\n inp = generate_input(function,domain,d0,I,d)\n output = get_solution(function,inp,d,I,d0)\n\n index_list = [i for i in range(I)]\n\n Z_0, c_0, index_list = get_random_sample(inp,output,index_list,chunk,d)\n NN = Network(K,d,chunk,h,Z_0,c_0)\n\n # For plotting J. \n J_list = np.zeros(iterations)\n it = np.zeros(iterations)\n\n counter = 0\n for j in range(1,iterations+1):\n\n NN.forward_function()\n gradient = NN.back_propagation()\n NN.theta.update_parameters(gradient,\"adams\",tau,j)\n\n # For plotting.\n J_list[j-1] = NN.J()\n it[j-1] = j\n\n if counter < I/chunk - 1:\n Z, c, index_list = get_random_sample(inp,output,index_list,chunk,d)\n NN.Z_list[0,:,:] = Z\n NN.c = c\n counter += 1\n else:\n # All data has been sifted through.\n counter = 0\n index_list = [i for i in range(I)]\n\n\n if plot:\n fig, ax = plt.subplots()\n ax.plot(it,J_list)\n plt.yscale(\"log\")\n fig.suptitle(\"Objective Function J as a Function of Iterations.\", fontweight = \"bold\")\n ax.set_ylabel(\"J\")\n ax.set_xlabel(\"Iteration\")\n plt.text(0.5, 0.5, \"Value of J at iteration \"+str(iterations)+\": \"+str(round(J_list[-1], 6)), \n horizontalalignment=\"center\", verticalalignment=\"center\", \n transform=ax.transAxes, fontsize = 16)\n if savename != \"\": \n plt.savefig(savename + \".pdf\", bbox_inches='tight')\n plt.yscale(\"log\")\n plt.show()\n return NN \n\ndef algorithm_scaling(I,d, d0, K,h,iterations, tau, chunk, method, function,domain,scaling, alpha, beta, hypothesis = 1, plot = False, savename = \"\"):\n \"\"\"Main training algorithm with SGD and option to scale.\"\"\"\n\n inp = generate_input(function,domain,d0,I,d)\n output = get_solution(function,inp,d,I,d0)\n\n a1 = b1 = a2 = b2 = None\n if scaling:\n inp, a1, b1 = scale_data(alpha,beta,inp)\n output, a2, b2 = scale_data(alpha,beta,output)\n\n index_list = [i for i in range(I)]\n\n Z_0, c_0, index_list = get_random_sample(inp,output,index_list,chunk,d)\n NN = Network(K,d,chunk,h,Z_0,c_0)\n\n # For plotting J. \n J_list = np.zeros(iterations)\n it = np.zeros(iterations)\n\n counter = 0\n for j in range(1,iterations+1):\n\n\n NN.forward_function()\n gradient = NN.back_propagation()\n NN.theta.update_parameters(gradient,method,tau,j)\n\n # For plotting.\n J_list[j-1] = NN.J()\n it[j-1] = j\n\n if counter < I/chunk - 1:\n Z, c, index_list = get_random_sample(inp,output,index_list,chunk,d)\n NN.Z_list[0,:,:] = Z\n NN.c = c\n counter += 1\n else:\n # All data has been sifted through.\n counter = 0\n index_list = [i for i in range(I)]\n\n\n if plot:\n fig, ax = plt.subplots()\n ax.plot(it,J_list)\n fig.suptitle(\"Objective Function J as a Function of Iterations.\", fontweight = \"bold\")\n ax.set_ylabel(\"J\")\n ax.set_xlabel(\"Iteration\")\n plt.text(0.5, 0.5, \"Value of J at iteration \"+str(iterations)+\": \"+str(round(J_list[-1], 4)), \n horizontalalignment=\"center\", verticalalignment=\"center\", \n transform=ax.transAxes, fontsize = 16)\n if savename != \"\": \n plt.savefig(savename + \".pdf\", bbox_inches='tight')\n plt.show()\n \n NN.J_last = J_list[-1] # Save last value of J_list in NN, to check which converges best in tests. \n return NN, a1, b1, a2, b2, J_list, it\n","sub_path":"Project2/network_algo.py","file_name":"network_algo.py","file_ext":"py","file_size_in_byte":13036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"261174723","text":"from main import *\nfrom subscoring_functions import *\nfrom fit_refactor import *\n\nimport random\n\n\ndef get_state_names(include_abbr = False):\n states = []\n abbrs = []\n with open(\"states.csv\") as f:\n f.readline() # get rid of header\n for entry in f:\n state, abbr = entry.split(\",\")\n state = state.replace(\"\\\"\", \"\")\n states.append(state)\n if include_abbr:\n abbr = abbr.replace(\"\\\"\", \"\")\n abbr = abbr.replace(\"\\n\", \"\")\n abbrs.append(abbr)\n if include_abbr:\n return states, abbrs\n else:\n return states\n\n\ndef test_abbr_prune():\n states = get_state_names()\n states = [Abbreviatable(state) for state in states]\n while all(map(lambda x: len(x) > 0, states)):\n for state in states:\n rand_ptr = random.randint(0, len(state)-1)\n state.prune(rand_ptr)\n\n abbr = Abbreviatable(\"Apple Sauce\")\n abbr.prune(0)\n assert str(abbr) == \"pple Sauce\"\n abbr.prune(-1)\n assert str(abbr) == \"pple Sauc\"\n abbr.prune(4)\n assert str(abbr) == \"ppleSauc\"\n\n print(\"Success!\")\n\n\ndef test_subscoring_functions():\n states = get_state_names()\n states = [Abbreviatable(state.lower()) for state in states]\n freq = load_freq()\n for state in states:\n for i, letter in enumerate(state):\n self_info(letter, freq)\n starts_word(i, state)\n contains_repetitive(i, state)\n avoid_vowels(i, state)\n prefer_consecutive(i, state)\n\n\n assert self_info(\"z\", freq) > self_info(\"e\", freq)\n\n # test contains_repetitive\n msp = Abbreviatable(\"mississippi\")\n assert contains_repetitive(0, msp) == 1\n assert contains_repetitive(2, msp, weight = 10) == 40\n assert contains_repetitive(-2, msp, weight = 10) == 20\n\n test_starts_word()\n\n\ndef test_score():\n states = [Abbreviatable(state.lower()) for state in get_state_names()]\n freq = load_freq()\n for state in states:\n total_score = sum([score(i, state, freq) for i in range(len(state))])\n print(str(round(total_score)).rjust(5) + \" \" + str(state).ljust(20))\n \n\ndef test_starts_word():\n # Test starts word function.\n sentence = \"the quick brown fox jumped over the lazy dog\"\n sentence = Abbreviatable(sentence)\n starters = [0, 4, 10, 16, 20, 27, 32, 36, 41]\n for i in range(len(sentence)):\n result = starts_word(i, sentence, weight = 42)\n if i in starters:\n assert result == 42\n else:\n assert result == 1\n\n\ndef test_entropy():\n freq = load_freq()\n for letter_num in range(ord(\"a\"), ord(\"z\") + 1):\n letter = chr(letter_num)\n print(\"\\t\", letter, \":\", round(self_info(freq, letter), 4))\n\n\ndef test_squeeze():\n states = get_state_names()\n for state in states:\n print(squeeze(Abbreviatable(state.lower()), 2), state)\n\n\ndef test_batch_squeeze():\n states = get_state_names()\n states = [s.lower() for s in states]\n batch_results = batch_squeeze(states, 2)\n for abbr in batch_results:\n print(abbr, abbr.original)\n\n\ndef test_fit():\n states, abbr = get_state_names( include_abbr = True )\n states = [s.lower() for s in states]\n abbr = [a.lower() for a in abbr]\n wts = fit(states, abbr)\n for k in wts.keys():\n print()\n print(str(k).ljust(15), str(wts[k]))\n return wts\n","sub_path":"unit_tests.py","file_name":"unit_tests.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"492459414","text":"import time\nimport requests\n\nfrom .base import celery_app\nfrom .util import report_done, report_ready\nfrom artifice.scraper.utils import headify\nfrom artifice.scraper.supervisor import Supervisor\nfrom artifice.scraper.parsers import NPRParser\n\n\nURL_FOR_QUEUE = celery_app._preconf.get('URL_FOR_QUEUE')\nURL_FOR_CONTENT = celery_app._preconf.get('URL_FOR_CONTENT')\n\n\ndef auth_header():\n key = 'AUTH_TOKEN'\n token = celery_app._preconf.get(key)\n return {headify(key): token}\n\n\ndef unit_is_under_test():\n key = 'testing'\n is_testing = celery_app._preconf.get(key)\n return is_testing\n\n\n@celery_app.task(name='tasks.holding_tank')\ndef holding_tank(url, **kwargs):\n '''\n Entrypoint for the Celery queue. Automatically\n executes functions based on application config.\n '''\n if unit_is_under_test():\n return\n return sorting_hat(url, **kwargs)\n\n\n@celery_app.task(name='tasks.sorting_hat')\ndef sorting_hat(url, **kwargs):\n '''\n Responsible for checking whether service is enabled,\n and whether to scrape the URL or return unfinished.\n '''\n status = Supervisor.status()\n if not status['enabled']:\n return archive_url(report_ready(url), **kwargs)\n time.sleep(status['polite'])\n return fetch_url(url, **kwargs)\n\n\n@celery_app.task(name='tasks.fetch_url')\ndef fetch_url(url, **kwargs):\n '''\n Scrapes the URL and passes along the response data\n for content extraction.\n '''\n response = requests.get(url)\n return extract_content(response, **kwargs)\n\n\n@celery_app.task(name='tasks.extract_content')\ndef extract_content(response, **kwargs):\n '''\n Determines which parser should be used based on the URL,\n and extracts the content accordingly.\n '''\n npr = NPRParser(response)\n content = npr.extract_content()\n return archive_content(content, **kwargs)\n\n\n@celery_app.task(name='tasks.archive_content')\ndef archive_content(content, **kwargs):\n '''\n Stores the extracted content to the database via\n API endpoint.\n '''\n response = requests.post(URL_FOR_CONTENT, headers=auth_header(), json=content)\n fb = feed_back(content)\n url = content.get('origin')\n return archive_url(report_done(url), status_code=response.status_code, feedback=fb, **kwargs)\n\n\n@celery_app.task(name='tasks.archive_url')\ndef archive_url(json_data, **kwargs):\n '''\n Returns the URL to the database via API endpoint,\n status can be either `READY` or `DONE`.\n '''\n response = requests.put(URL_FOR_QUEUE, headers=auth_header(), json=json_data)\n return response.status_code, {**kwargs}\n\n\n@celery_app.task(name='tasks.feed_back')\ndef feed_back(content):\n '''\n Automatically add the extracted links from the page\n to the API queue endpoint, perpetuating the process.\n '''\n json_data = dict(url=content.get('url'))\n response = requests.post(URL_FOR_QUEUE, headers=auth_header(), json=json_data)\n return response.status_code\n\n\n# # # notifications & monitoring\n@celery_app.task(name='tasks.sms_notify')\ndef sms_notify(body):\n import os\n from twilio.rest import Client\n\n sid = os.environ['TWILIO_SID']\n token = os.environ['TWILIO_TOKEN']\n client = Client(sid, token)\n\n from_ = os.environ['TWILIO_FROM_NUM']\n to_ = os.environ['TWILIO_TO_NUM']\n client.messages.create(to=to_, from_=from_, body=body)\n","sub_path":"src/artifice/scraper/tasks/callable_tasks.py","file_name":"callable_tasks.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"515196312","text":"import json\n\nfrom aiohttp import web\n\nfrom main import process_articles\n\nARTICLE_URLS_LIMIT = 10\n\n\nasync def index(request):\n\n urls = [url.strip() for url in request.query.get('urls', '').split(',') if url]\n if len(urls) > ARTICLE_URLS_LIMIT:\n raise web.HTTPBadRequest(\n body=json.dumps({\"error\": f\"too many urls in request, should be {ARTICLE_URLS_LIMIT} or less\"})\n )\n\n results = await process_articles(urls)\n\n response = []\n for article_info in results:\n response.append({\n \"status\": article_info['status'].value,\n \"url\": article_info['url'],\n \"score\": article_info['rate'],\n \"words_count\": article_info['words_count']\n })\n\n return web.json_response(data=response)\n\n\napp = web.Application()\napp.add_routes([web.get('/', index)])\nweb.run_app(app)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"166091513","text":"#! /usr/bin/env python\nfrom __future__ import print_function\n# jdbc stuff\nimport jaydebeapi\nimport jpype\nfrom jpype import *\n\n# aerospike stuff\ntry:\n import aerospike\nexcept:\n print(\"Please install aerospike python client.\")\n sys.exit(1)\n\nfrom aerospike import exception as ex\n\nimport sys\nimport pprint\nimport csv\n\nclass main:\n \n # oracle variables\n driverName = \"oracle.jdbc.OracleDriver\"\n url= \"jdbc:oracle:thin:@rhes564:1521:mydb12\"\n _username = \"scratchpad\"\n _password = \"oracle\"\n _dbschema = \"SCRATCHPAD\"\n _dbtable = \"DUMMY\"\n csv_file_name = \"/home/hduser/dba/bin/python/DUMMY.csv\"\n \n # aerospike variables\n dbHost = \"rhes75\"\n dbPort = 3000\n dbConnection = \"mich\"\n namespace = \"test\"\n dbPassword = \"aerospike\"\n dbSet = \"oracletoaerospike2\"\n dbKey = \"ID\"\n rec = {}\n\n def read_oracle_table(self):\n # Check Oracle is accessible\n try:\n connection = jaydebeapi.connect(self.driverName, self.url, [self._username, self._password])\n except jaydebeapi.Error as e:\n print(\"Error: {0} [{1}]\".format(e.msg, e.code))\n sys.exit(1)\n else:\n # Check if table exists\n metadata = connection.jconn.getMetaData()\n rs = metadata.getTables(None, self._dbschema, self._dbtable, None)\n if (rs.next()):\n print(\"\\nTable \" + self._dbschema+\".\"+ self._dbtable + \" exists\\n\")\n cursor = connection.cursor()\n sql=\"\"\"\n SELECT ID, CLUSTERED, SCATTERED, RANDOMISED, RANDOM_STRING, SMALL_VC, PADDING FROM scratchpad.dummy where ROWNUM <= 10\n \"\"\"\n cursor.execute(sql)\n # get column descriptions\n columns = [i[0] for i in cursor.description]\n rows = cursor.fetchall()\n # write oracle data to the csv file\n csv_file = open(self.csv_file_name, mode='w')\n writer = csv.writer(csv_file, delimiter=',', lineterminator=\"\\n\", quoting=csv.QUOTE_NONNUMERIC)\n # write coumn headers to csv file\n writer.writerow(columns)\n for row in rows:\n writer.writerow(row) ## write rows to csv file\n \n print(\"writing to csv file complete\")\n cursor.close()\n connection.close()\n csv_file.close()\n sys.exit(0)\n else:\n print(\"Table \" + self._dbschema+\".\"+ self._dbtable + \" does not exist, quitting!\")\n connection.close()\n sys.exit(1)\n\n def read_aerospike_set(self):\n # Check aerospike is accessible\n try:\n config = { \n 'hosts': [(self.dbHost, self.dbPort)],\n 'policy': {'aerospike.POLICY_KEY_SEND': 'true', 'read': {'total_timeout': 1000}}\n }\n client = aerospike.client(config).connect(self.dbConnection, self.dbPassword)\n except ex.ClientError:\n print(\"Error: {0} [{1}]\".format(e.msg, e.code))\n sys.exit(1)\n else:\n print(\"Connection successful\")\n keys = []\n for k in xrange(1,10):\n key = (self.namespace, self.dbSet, str(k))\n keys.append(key)\n\n records = client.get_many(keys)\n pprint.PrettyPrinter(depth=4).pprint (records)\n print(\"\\nget everyting for one record with pk = '9'\")\n (key, meta, bins)= client.get((self.namespace, self.dbSet, '9'))\n print (key)\n print (meta)\n print (bins)\n client.close()\n sys.exit(0)\n\n def write_aerospike_set(self):\n # Check aerospike is accessible\n try:\n config = {\n 'hosts': [(self.dbHost, self.dbPort)],\n 'policy': {'aerospike.POLICY_KEY_SEND': 'true', 'read': {'total_timeout': 1000}}\n }\n client = aerospike.client(config).connect(self.dbConnection, self.dbPassword)\n except ex.ClientError:\n print(\"Error: {0} [{1}]\".format(e.msg, e.code))\n sys.exit(1)\n else:\n print(\"Connection to aerospike successful\")\n rec = {}\n # read from csv file\n csv_file = open(self.csv_file_name, mode='r')\n reader = csv.reader(csv_file, delimiter=',')\n rownum = 0\n for row in reader:\n if rownum == 0:\n header = row\n else:\n column = 0\n for col in row:\n # print (rownum,header[colnum],col)\n rec[header[column]] = col\n column += 1\n rownum += 1\n #print(rownum, rec)\n if rec:\n client.put((self.namespace, self.dbSet, str(rownum)), rec)\n rec = {}\n print(\"writing to aerospike set complete\")\n csv_file.close()\n client.close()\n sys.exit(0)\n\n\n\na = main()\noption = sys.argv[1]\nif option == \"1\":\n a.read_oracle_table()\nelif option == \"2\":\n a.write_aerospike_set()\nelif option == \"3\":\n a.read_aerospike_set()\nelse:\n print(\"incorrect option, valid options are: 1, 2 and 3\")\n sys.exit(1)\nsys.exit(0)\n","sub_path":"read_oracle_table.py","file_name":"read_oracle_table.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"586516508","text":"# -*- encoding: utf-8 *-*\n\nimport datetime\n\n# customize traveling specs\n# mind that ltur offers bahn tickets only for the next 7 days starting from tomorrow\nfrom_city = 'Berlin Hbf'\nto_city = 'München Hbf'\n\n# default to tomorrow\non_date = ( datetime.date.today() + datetime.timedelta( days=1 )).strftime( '%d.%m.%Y' )\n#on_date = '21.01.2013'\n\nat_time = '09:12'\nmax_price = 40.0\n\n# ltur's Bahn webpage\nurl = 'http://www.ltur.com/de/bahn.html?omnin=DB-DE'\n\n# set the mode of notification: pushover or email\n# MODE = 'pushover'\nMODE = 'email'\n\n# keywords for webscraping\nTRIGGER = '\\xe2\\x82\\xac'\t# the euro sign\nIMPOSTOR = 'Sparangebote'\nDELIMITERS = 'label>| \\xe2\\x82\\xac'\n\n\n# Pushover config\nAPP_TOKEN = 'EpMD3BrlmxioeKvGujVccccPqHeUxd'\nUSER_TOKEN = ''\n\nEMAIL = 'you@example.org'\nFROM_EMAIL = 'lturdaemon@example.org'\nSMTP_SERVER = 'smtp.example.org'\nSMTP_USER = 'lturdaemon@example.org' # optional.\nSMTP_PASS = 'somesecretpassword' # optional.\n\n\nPUSHOVER_URL = \"api.pushover.net\"\nPUSHOVER_PATH = \"/1/messages.json\"\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"275494878","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 28 19:24:54 2018\n\n@author: andy\n\"\"\"\n\nimport numpy as np\nfrom derivative import partial_derivative\nimport pandas as pd\nimport os \nfrom scipy import interpolate\nfrom scipy.stats import norm\nfrom scipy import optimize\n\nfrom scipy.optimize import minimize\n\ndef BS(sig, F, K, P, T):\n\td1 = (np.log(F/K)+(0.5*sig**2)*T)/sig/np.sqrt(T)\n\td2 = d1 - sig*np.sqrt(T)\n\tNd1 = norm.cdf(d1)\n\tNd2 = norm.cdf(d2)\n\tcall = P*(F*Nd1 - K * Nd2)\n\treturn call\n\n\ndef BS_vol(F, K, C, P, T):\n\tsol = optimize.root(lambda x: BS(x, F, K, P, T)-C, 0.50, method='hybr')\t\n\treturn sol.x\ndef raw_SVI(a,b,rho,m,sigma,k):\n\tres = a+b*(rho*(k-m)+np.sqrt((k-m)**2+sigma**2))\n\tif res <0:\n\t\treturn 1e-8\n\treturn res\n\t\n\ndef impvol_Error(param, k, impvol, FT, PT, T):\n\tN = len(k)\n\tfitted = np.zeros(N)\n\ta,b,rho,m,sigma = param\n\tfor i in range(N):\n\t\tfitted_tot_var = raw_SVI(a,b,rho,m,sigma,k[i])\n\t\tfitted_var = fitted_tot_var / T\n\t\tfitted_vol = np.sqrt(fitted_var)\n\t\tfitted[i] = fitted_vol\n\treturn np.nanmean((impvol-fitted)**2)\t\nclass Stock_calibrate_LV:\n\t\"\"\"\n\tLocal Vol model calibration, assuming deterministic short rates and constant dividend\n\t\"\"\"\n\tdef __init__(self):\n\t\t\"\"\"\n\t\t1. Read available european call data and parametrize using raw SVI.\n\t\t2. Read relevant stock data (S0, q)\n\t\t3. Compute local vol using Dupire Formula. \n\t\t4. self.simga is a function that takes in (S_t,t,r_t)\n\t\t\"\"\"\n\t\t\n\t\tdf_stock_stats = pd.read_csv(os.path.join(\"data\", \"Stock.csv\"), index_col=0, header=None)\n\t\tspot_str = df_stock_stats.loc['Price'].squeeze()\n\t\tself.S0 = float(spot_str)\n\t\tself.q = float(df_stock_stats.loc['div'].squeeze().strip(\"%\"))/100\n\t\tdf_s = pd.read_csv(os.path.join(\"data\",\"StockCall_shortterm.csv\"),index_col=0)\n\t\tdf_m = pd.read_csv(os.path.join(\"data\",\"StockCall_midterm.csv\"),index_col=0)\n\t\tdf_l = pd.read_csv(os.path.join(\"data\",\"StockCall_longterm.csv\"),index_col=0)\n\t\tdf_all = pd.concat([df_s, df_m, df_l], axis=1)\n\n\t\tdf_all = df_all.loc[df_all.index >=2500]\n\t\tdf_all = df_all.loc[df_all.index<=3500 ]\n\t\tdf_all.columns = df_all.columns.astype(int)\n\t\n\t\tdf_Yield = pd.read_csv(os.path.join(\"data\",\"GERYield.csv\"),\n\t\t\t\t\theader = None, delimiter = \"\\t\", index_col=0)\n\t\tT = np.array(df_Yield.index)\n\t\tyields = np.array(df_Yield.loc[:,1])\n\t\tself.yields_interp = interpolate.interp1d(T.squeeze(), yields, 'cubic', fill_value='extrapolate')\n\t\tfor _T in df_all.columns:\n\t\t\tT = _T/365.0\n\t\t\tPT = self.EUR_bond_price(T)\n\t\t\tFT = self.stock_forward_price(T)\n\t\t\tfor K in df_all.index:\n\t\t\t\tif np.isnan(df_all.loc[K,_T]):\n\t\t\t\t\tcontinue\n\t\t\t\tdf_all.loc[K,_T] = BS_vol(FT, K, df_all.loc[K,_T], PT, T)\n\t\tself.df_impliedvols = df_all\n\t\tself.get_RAW_SVI()\n\t\tself.sigma=self.local_vol\n\tdef EUR_bond_price(self,T):\n\t\tT_yield = self.yields_interp(T)\n\t\tprice = (1+T_yield/100)**(-T)\n\t\treturn price\n\tdef stock_forward_price(self,T):\n\t\tPT = self.EUR_bond_price(T)\n\t\treturn self.S0/PT*np.exp(-self.q*T)\n\t\n\n\t\n\tdef get_RAW_SVI(self):\n\t\tSVI = {}\n\t\t\n\t\tfor _T in self.df_impliedvols.columns:\n\t\t\tT = _T/365.0\n\t\t\ttmp_dict = {}\n\t\t\tPT = self.EUR_bond_price(T)\n\t\t\tFT = self.stock_forward_price(T)\n\t\t\tdf_impvol = self.df_impliedvols.loc[:,_T].dropna()\n\t\t\tK = np.array(df_impvol.index)\n\t\t\timplied_vol = np.array(df_impvol)\n\t\t\tk = np.log(K/FT)\n\t\t\traw_SVI_initial = [0.001, 0.2, 0.03, -0.2, 0.1]\n\t\t\tfun = lambda x: impvol_Error(x, k, implied_vol, FT,PT,T)\n\t\t\tbnds = ((None, None), (0, None), (-1,1), (None,None),(0,None))\n\t\t\tcons = ({'type': 'ineq', 'fun': lambda x: x[0]+x[1]*x[4]*np.sqrt(1-x[2]**2)})\n\t\t\tres = minimize(fun, raw_SVI_initial, method='SLSQP', bounds=bnds, constraints=cons)\n\t\t\ttmp_dict['a'] = res.x[0]\n\t\t\ttmp_dict['b'] = res.x[1]\n\t\t\ttmp_dict['rho'] = res.x[2]\n\t\t\ttmp_dict['m'] = res.x[3]\n\t\t\ttmp_dict['sigma'] = res.x[4]\n\t\t\tSVI[_T] = tmp_dict\n\t\tself.SVI_df = pd.DataFrame(SVI)\n\t\t\n\tdef SVI_imp_vol(self,K,T):\n\t\tFT = self.stock_forward_price(T)\n\t\tk = np.log(K/FT)\n\t\tT_arr_indays = np.array(self.SVI_df.columns)\n\t\tT_indays = T*365.0\n\t\t_i = np.searchsorted(T_arr_indays,T_indays)\n\t\tif _i == 0 or _i == len(T_arr_indays):\n\t\t\tif _i == 0:\n\t\t\t\tparam = self.SVI_df.loc[:,T_arr_indays[0]]\n\n\t\t\telse:\n\t\t\t\tparam = self.SVI_df.loc[:,T_arr_indays[-1]]\n\n\t\t\ta,b,rho,m,sigma = list(param)\n\t\t\timp_tot_var = raw_SVI(a,b,rho,m,sigma,k)\n\t\t\timp_vol = np.sqrt(imp_tot_var/T)\n\t\t\treturn imp_vol\n\t\telse:\n\t\t\tT1 = T_arr_indays[_i-1]\n\t\t\tT2 = T_arr_indays[_i]\n\t\t\tparam1 = self.SVI_df.loc[:,T1]\n\t\t\ta1 = param1['a']\n\t\t\tb1 = param1['b']\n\t\t\tm1 = param1['m']\n\t\t\trho1 = param1['rho']\n\t\t\tsigma1 = param1['sigma']\n\t\t\tparam2 = self.SVI_df.loc[:,T2]\n\t\t\ta2 = param2['a']\n\t\t\tb2 = param2['b']\n\t\t\tm2 = param2['m']\n\t\t\trho2 = param2['rho']\n\t\t\tsigma2 = param2['sigma']\n\t\t\tT1 = T1/365.0\n\t\t\tT2 = T2/365.0\n\n\t\t\timp_tot_var1 = raw_SVI(a1,b1,rho1,m1,sigma1,k)\n\t\t\timp_tot_var2 = raw_SVI(a2,b2,rho2,m2,sigma2,k)\n\t\t\timp_tot_var = (imp_tot_var2 - imp_tot_var1)/(T2-T1)*(T-T1)+imp_tot_var1\n\t\t\timp_vol = np.sqrt(imp_tot_var/T)\n\t\t\treturn imp_vol\n\tdef local_vol(self, K, T, r):\n\t\t\"\"\"\n\t\tCompute local vol given K, t, and r_t using Dupire formula \n\t\tassuming nonconstant but deterministic short rate\t\t\n\t\t\"\"\"\n\t\tFT = self.stock_forward_price(T)\n\t\timp_vol = self.SVI_imp_vol(K,T)\n\t\td1 = (np.log(FT/K)+0.5*imp_vol*imp_vol)/(imp_vol*np.sqrt(T))\n\t\td2 = d1 - imp_vol*np.sqrt(T)\n\n\t\tdsigmadT = partial_derivative(self.SVI_imp_vol, 1 ,[K, T], dx=1e-3) \n\t\tdsigmadK = partial_derivative(self.SVI_imp_vol, 0, [K, T], dx=50) \n\t\td2sigmadK2 = partial_derivative(self.SVI_imp_vol, 0, [K, T],2, dx=50) \n\t\tnumerator = 0.5*imp_vol/T + dsigmadT + K*(r-self.q)* dsigmadK\n\t\tcoef = 0.5*K*K\n\t\tfirst = 1.0/(imp_vol*K*K*T)\n\t\tsecond = 2*dsigmadK*d1/(imp_vol*K*np.sqrt(T))\n\t\tthird = d2sigmadK2\n\t\tfourth = dsigmadK**2 * (d1*d2)/imp_vol\n\t\tdenumerator = coef*(first+second+third+fourth)\n\t\tsig2 = numerator / denumerator\n\t\tif sig2 >1e-8 and sig2 < 0.7**2:\n\t\t\treturn np.sqrt(sig2)\n\t\telif sig2 <= 1e-8:\n\t\t\treturn 0.01\n\t\telse:\n\t\t\treturn 0.7\n\n\n#==============================================================================\n# import math\n# from mpl_toolkits.mplot3d import Axes3D\n# if __name__ == \"__main__\":\n# \tsc = Stock_calibrate_LV()\n# \t#df = sc.df_all\n# \t#print(sc.local_vol(4000,0.6))\n# \t\n# \tNK= 20\n# \tNT =10\n# \tX = np.linspace(2500,4000,NK)\n# \tY = np.linspace(0.1,2,NT)\n# \tXX,YY = np.meshgrid(X,Y)\n# \tZ = np.zeros((NK,NT))\n# \tfor j in range(0,NT):\n# \t\tfor i in range(0,NK):\n# \t\t\tZ[i,j] = sc.local_vol(X[i],Y[j], -0.00)\n# \n# \n# \tfig = plt.figure()\n# \tax = plt.axes(projection='3d')\n# \tax.contour3D(XX, YY, Z.T, 50, cmap='binary')\n# \tplt.show()\n# \t\n# \tprint(Z.mean())\n# \n# \n#==============================================================================\n","sub_path":"Stock_LV_calibrate.py","file_name":"Stock_LV_calibrate.py","file_ext":"py","file_size_in_byte":6545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"248807656","text":"\"\"\"Mocked unit test\"\"\"\n# pylint: disable=C0301\n# pylint: disable=C0103\n# pylint: disable=W0613\n# pylint: disable=W0107\nimport unittest\nfrom unittest.mock import patch\nfrom os.path import dirname, join\nimport sys\n\nsys.path.insert(1, join(dirname(__file__), '../'))\nimport json\nimport app\n\n\nclass MockedTest(unittest.TestCase):\n \"\"\" Mocked unit test cases \"\"\"\n\n def setUp(self):\n \"\"\"feedback db Mocked unit test\"\"\"\n self.success_test_params = [\n {\n \"name\": \"Hammad\",\n \"feedback\": \"test\"\n }\n ]\n\n self.failure_test_params = [\n {\n \"name\": 123,\n \"feedback\": 123\n }\n ]\n\n class MockSession:\n \"\"\"mock session\"\"\"\n def add(self, value):\n \"\"\"add\"\"\"\n pass\n\n def commit(self):\n \"\"\"commit\"\"\"\n pass\n\n def remove(self):\n \"\"\"remove\"\"\"\n pass\n\n def query(self, param):\n \"\"\"query\"\"\"\n return self\n\n def filter(self, param):\n \"\"\"filter\"\"\"\n return self\n\n def first(self):\n \"\"\"first\"\"\"\n return None\n\n @patch(\"app.api_call_for_news\")\n def test_app_mock(self, mock_api_call):\n \"\"\"News api Mocked unit test\"\"\"\n f = open(\"tests/NEWSDATA.json\", \"r\")\n mock_api_call.return_value.status_code = 200\n mock_api_call.return_value = f.read()\n\n r_json = json.loads(app.api_call_for_news(\"news\"))\n\n self.assertEqual(r_json[\"status\"], 'okkk')\n self.assertEqual(r_json[\"articles\"][0][\"author\"], 'Lisa Lerer')\n self.assertEqual(r_json[\"articles\"][0][\"source\"][\"id\"], 'None')\n self.assertEqual(r_json[\"articles\"][0][\"source\"][\"name\"], 'New York Times')\n f.close()\n\n @patch(\"app.send_message\")\n def test_dictionary_failure(self, mocked_get):\n \"\"\"dictionary api Mocked unit test\"\"\"\n with patch('app.requests.get') as mocked_get:\n mocked_get.return_value.json.return_value = [\"added\", \"daddy\", \"add\", \"adds\", \"dad\", \"dads\", \"dead\",\n \"deed\", \"did\", \"dido\",\n \"died\", \"dodo\", \"dud\", \"dude\", \"duds\", \"dyad\",\n \"dyed\", \"eddy\", \"odd\", \"odds\"]\n\n response = app.messageDict('dddd')\n result = \"Sorry, we can't find the definition of the term you are looking for.\"\n self.assertEqual(response, result)\n\n @patch(\"app.send_message\")\n def test_dictionary_success(self, mocked_get):\n \"\"\"dictionary api Mocked unit test\"\"\"\n with patch('app.requests.get') as mocked_get:\n mocked_get.return_value.json.return_value = [{\"shortdef\": [\"A piece of paper indicating a person\\u0027s \"\n \"preferences in an election\",\n \"the right to formally express one\\u0027s \"\n \"position \"\n \"or will in an election\"]}]\n response = app.messageDict('ballot')\n result = \"A piece of paper indicating a person\\u0027s \" \\\n \"preferences in an election, \" \\\n \"the right to formally express one\\u0027s \" \\\n \"position \" \\\n \"or will in an election\"\n self.assertEqual(response, result)\n\n def test_socket_dictionary(self):\n \"\"\"mocked test to check dictionary api\"\"\"\n flask_test_client = app.app.test_client()\n with unittest.mock.patch('app.messageDict') as mocked_messageDict:\n mocked_messageDict.return_value = \"A piece of paper indicating a person's preferences in an election, \" \\\n \"the right to formally express one's position or will in an election\"\n socketio_test_client = app.socketio.test_client(app.app,\n flask_test_client=flask_test_client)\n socketio_test_client.emit('send message', 'ballot')\n result = socketio_test_client.get_received()\n message = result[0]['args'][0]['messageReceived']\n self.assertEqual(message, \"A piece of paper indicating a person's preferences in an election, \"\n \"the right to formally express one's position or will in an election\")\n\n def test_word_of_day(self):\n \"\"\"mocked test to check word of the day\"\"\"\n flask_test_client = app.app.test_client()\n with unittest.mock.patch('app.messageDict') as mocked_messageDict:\n mocked_messageDict.return_value = \"None\"\n socketio_test_client = app.socketio.test_client(app.app,\n flask_test_client=flask_test_client)\n socketio_test_client.emit('word of the day')\n result = socketio_test_client.get_received()\n message = result[0]['args'][0]['messageReceived']\n self.assertNotEqual(message, \"A piece of paper indicating a person's preferences in an election, \"\n \"the right to formally express one's position or will in an election\")\n\n @patch('app.flask')\n def test_map_feature(self, mocked_flask):\n \"\"\"mocked test for map method\"\"\"\n mocked_flask.request.sid = 'abcdef'\n\n def mocked_open(file, mode):\n \"\"\"file open for fake map json data\"\"\"\n return open(\"tests/fake_map.json\", 'r')\n\n def mocked_emit(event, data, room):\n \"\"\"socket mocked test for map data\"\"\"\n self.assertEqual(event, \"sendState\")\n self.assertEqual(room, \"abcdef\")\n self.assertEqual(data, {\n 'sendState': 'Fake State',\n 'sendPop': 'FK',\n 'sendVotes': '10',\n 'sendSenators': '5',\n 'sendHouse': '5',\n 'sendWeb': 'gov.org'\n })\n\n with unittest.mock.patch('app.open', mocked_open):\n with unittest.mock.patch('app.socketio.emit', mocked_emit):\n app.map_state(objState={'state': 'Fake State'})\n\n @patch(\"app.db.session\", MockSession())\n @patch(\"app.user_sids\", create=True)\n def test_new_user_connection(self, mock_sids):\n \"\"\"user connection Mocked unit test\"\"\"\n flask_test_client = app.app.test_client()\n socketio_test_client = app.socketio.test_client(app.app,\n flask_test_client=flask_test_client)\n socketio_test_client.emit('connect user', {\n \"name\": \"Jay Amin\",\n \"email\": \"juiamin1000@gmail.com\",\n \"imageUrl\": \"profile.jpg\"\n })\n result = socketio_test_client.get_received()\n user = result[0]['args'][0]['user']\n self.assertEqual(user, 'Jay Amin')\n\n def test_on_new_message_success(self):\n \"\"\" testing success of new feedback \"\"\"\n mock_session = self.MockSession()\n for test in self.success_test_params:\n with unittest.mock.patch('app.db.session', mock_session):\n app.on_new_feedback(test)\n\n def test_on_new_message_failure(self):\n \"\"\" testing failure of new feedback \"\"\"\n for test in self.failure_test_params:\n mock_session = self.MockSession()\n with unittest.mock.patch('app.db.session', mock_session):\n app.on_new_feedback(test)\n\n def test_home(self):\n \"\"\"homepage api Mocked unit test\"\"\"\n tester = app.app.test_client(self)\n response = tester.get('/', content_type='html')\n self.assertEqual(response.status_code, 200)\n\n @patch('app.flask')\n def test_quiz_generation(self, mocked_flask):\n \"\"\"Quiz Mocked unit test\"\"\"\n mocked_flask.request.sid = 'abcdef'\n\n class MockSession:\n \"\"\"mock session\"\"\"\n class MockQuery:\n \"\"\"mock query\"\"\"\n def all(self):\n \"\"\"all\"\"\"\n class MockRecord:\n \"\"\"mock record\"\"\"\n def __init__(self, text, group_name, multiplier):\n self.text = text\n self.group_name = group_name\n self.multiplier = multiplier\n\n return [MockRecord('Test question for unit test', 'unittest group', 99)]\n\n def query(self, param):\n \"\"\"mocked query\"\"\"\n return self.MockQuery()\n\n def mocked_emit(event, data, room):\n \"\"\"Quiz Mocked unit test\"\"\"\n self.assertEqual(event, \"quiz generated\")\n self.assertEqual(room, \"abcdef\")\n self.assertTrue(isinstance(data, list))\n self.assertEqual(len(data), 1)\n self.assertTrue(isinstance(data[0], dict))\n self.assertEqual(data[0]['text'], \"Test question for unit test\")\n self.assertEqual(data[0]['multiplier'], 99)\n\n with unittest.mock.patch('app.db.session', MockSession()):\n with unittest.mock.patch('app.socketio.emit', mocked_emit):\n app.request_quiz()\n\n def test_quiz_load(self):\n \"\"\" testing for quiz load method\"\"\"\n class MockSession:\n \"\"\"mock session\"\"\"\n def __init__(self, unittest_class):\n \"\"\"initial method to assign variables\"\"\"\n self.questions = []\n self.unittest_class = unittest_class\n\n class MockQuery:\n \"\"\" mocking query \"\"\"\n def delete(self):\n \"\"\"deleting method\"\"\"\n pass\n\n def query(self, param):\n \"\"\"query\"\"\"\n return self.MockQuery()\n\n def commit(self):\n \"\"\"commit\"\"\"\n pass\n\n def add(self, question_record):\n \"\"\"make sure question texts are all unique, since text is used as primary key\"\"\"\n self.unittest_class.assertNotIn(question_record.text, self.questions)\n self.questions.append(question_record.text)\n\n with unittest.mock.patch('app.db.session', MockSession(self)):\n app.load_quiz_questions()\n \n @patch('app.flask')\n def test_quiz_save(self, mocked_flask):\n \"\"\" Test for save_quiz function \"\"\"\n mocked_flask.request.sid = 'abcdef'\n \n expected_msg = \"\"\n def mocked_emit(event, data, room):\n \"\"\" Mocked socketIO emit to test that correct message is being emitted \"\"\"\n self.assertEqual(data['message'], expected_msg)\n self.assertEqual(room, 'abcdef')\n \n with unittest.mock.patch('app.db.session', self.MockSession()):\n with unittest.mock.patch('app.socketio.emit', mocked_emit):\n with unittest.mock.patch('app.user_sids', {}, create=True):\n expected_msg = 'user not logged in'\n app.save_quiz(50)\n \n with unittest.mock.patch('app.user_sids', {'abcdef': 'test@test.com'}, create=True):\n expected_msg = 'success'\n app.save_quiz(-90)\n \n @patch('app.flask')\n def test_prev_quiz_result(self, mocked_flask):\n \"\"\" Test for get_prev_quiz_result function \"\"\"\n mocked_flask.request.sid = 'abcdef'\n \n expected_msg = \"\"\n def mocked_emit(event, data, room):\n \"\"\" Mocked socketIO emit to test that correct message is being emitted \"\"\"\n self.assertEqual(data['message'], expected_msg)\n self.assertEqual(room, 'abcdef')\n \n with unittest.mock.patch('app.db.session', self.MockSession()):\n with unittest.mock.patch('app.socketio.emit', mocked_emit):\n with unittest.mock.patch('app.user_sids', {}, create=True):\n expected_msg = 'user not logged in'\n app.get_prev_quiz_result()\n \n with unittest.mock.patch('app.user_sids', {'abcdef': 'test@test.com'}, create=True):\n expected_msg = 'no record found'\n app.get_prev_quiz_result()\n \n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/mocked_unit_tests.py","file_name":"mocked_unit_tests.py","file_ext":"py","file_size_in_byte":12557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"18500690","text":"import time\nimport pandas as pd\nimport numpy as np\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\ncity_dictionary = {'c': 'chicago',\n 'n': 'new york city',\n 'w': 'washington'}\nmonth_dictionary = {'1': 'january',\n '2': 'february',\n '3': 'march',\n '4': 'april',\n '5': 'may',\n '6': 'june'}\nday_dictionary = {'1': 'monday',\n '2': 'tuesday',\n '3': 'wednesday',\n '4': 'thursday',\n '5': 'friday',\n '6': 'saturday',\n '7': 'sunday'}\n\n\n\ndef get_month():\n \"\"\"\n Requests user to select which month by which to filter the data.\n\n Returns:\n (str) month - name of the month to filter by\n \"\"\"\n # get user input for month (january, february, ... , june)\n while True:\n print('Which month\\'s data would you like to analyze?')\n\n month = input('Enter the number corresponding to the desired month: \\n(1) January, (2) February, (3) March, (4) April, (5) May, (6) June: ').lower()\n\n if month in month_dictionary:\n month = month_dictionary[month]\n break\n else:\n print('\\nYour response was not one of the options. Let\\'s try again!\\n')\n\n print('-'*40)\n return(month)\n\n\ndef get_day():\n \"\"\"\n Requests user to select which day of the week by which to filter the data.\n\n Returns:\n (str) day - name of the day of the week to filter by\n \"\"\"\n # get user input for day of week (monday, tuesday, ... sunday)\n while True:\n print('Which day of the week would you like to use for your data analysis?')\n\n day = input('Enter the number corresponding to the desired day: \\n(1) Monday, (2) Tuesday, (3) Wednesday, (4) Thursday, (5) Friday, (6) Saturday, (7) Sunday: ').lower()\n\n if day in day_dictionary:\n day = day_dictionary[day]\n break\n else:\n print('\\nYour response was not one of the options. Let\\'s try again!\\n')\n\n print('-'*40)\n return(day)\n\n\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n\n # print introductory information and graphic\n print('\\nHello! Let\\'s explore some US bikeshare data!\\n')\n print('The data is provided by the bikeshare system provider Motivate for three large cities: Chicago, New York City, and Washington DC.')\n print('The data sets consist of randomly selected data for the first six months (January through June) of 2017.')\n bike_string = \"\"\"\n\n /-/ ___\n ____|=====|=____\n / \\ / \\\\\n \\____/ \\____/\n\n \"\"\"\n print(bike_string)\n\n # get user input for city (chicago, new york city, washington).\n while True:\n print('Which city\\'s bikeshare data would you like to analyze?')\n\n city = input('Enter \\'c\\' for Chicago, \\'n\\' for New York City, or \\'w\\' for Washington DC: ').lower()\n\n if city in city_dictionary:\n city = city_dictionary[city]\n break\n else:\n print('\\nYour response was not one of the options. Let\\'s try again!\\n')\n\n print('-'*40)\n\n # get user input for time filter -- month, day, both, or none\n while True:\n print('You can filter the {} data by month (\\'m\\'), by day of week (\\'d\\'), by both month and day of week (\\'b\\'), \\nor you can choose not to use any time filter, thereby selecting all months and days of the week (\\'a\\').'.format(city.title()))\n time_filter = input('Please make your selection: ').lower()\n\n if time_filter == 'a':\n month = 'all'\n day = 'all'\n break\n elif time_filter == 'm':\n month = get_month()\n day = 'all'\n break\n elif time_filter == 'd':\n month = 'all'\n day = get_day()\n break\n elif time_filter == 'b':\n month = get_month()\n day = get_day()\n break\n else:\n print('\\nYour response was not one of the options. Let\\'s try again!\\n')\n\n return city, month, day\n\n\n\ndef check_input(city, month, day):\n \"\"\"\n Checks with user that the selection was as intended.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n (bool) True if selection was as user intended, False if otherwise\n \"\"\"\n\n while True:\n print('\\nYour selection was...\\nCity: {}\\nMonth: {}\\nDay of Week: {}\\n'.format(city.title(), month.title(), day.title()))\n response = input('Is this correct? Enter the letter \\'y\\' for Yes or \\'n\\' for No: ').lower()\n\n if response == 'y' or response == 'n':\n break\n else:\n print('\\nYour response was not one of the options. Let\\'s try again!\\n')\n\n\n print('-'*40)\n\n return response == 'y'\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month']==month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week']==day.title()]\n\n return df\n\n\n\ndef time_stats(df, month, day):\n \"\"\"\n Displays statistics on the most frequent times of travel.\n\n Arguments:\n df - Pandas DataFrame containing the selected city data, possibly filter by month and day of week\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month under the condition that all months were chosen\n if month == 'all':\n popular_month = df['month'].mode()[0]\n print('The most popular month is: ', month_dictionary[str(popular_month)].title())\n\n # display the most common day of week under the condition that all days of the week were chosen\n if day == 'all':\n popular_day_of_week = df['day_of_week'].mode()[0]\n print('The most popular day of the week is: ', popular_day_of_week)\n\n # display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n popular_hour = df['hour'].mode()[0]\n print('The most popular hour to start is: {}:00 (24-hr clock)'.format(popular_hour))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Most Popular Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print('The most popular start station is: ', popular_start_station)\n\n # display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print('The most popular end station is: ', popular_end_station)\n\n # display most frequent combination of start station and end station trip\n df['combo_start_end'] = df['Start Station'] + ',' + df['End Station']\n popular_combo_start, popular_combo_end = df['combo_start_end'].mode()[0].split(',')\n print('The most popular combination of start and end stations consists in...')\n print('Start: {} End: {}'.format(popular_combo_start, popular_combo_end))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_travel_time = df['Trip Duration'].sum()\n total_minutes = total_travel_time // 60\n remaining_tot_seconds = total_travel_time % 60\n print('The total bikeshare travel time was: {} seconds ({} minutes, {} seconds)'.format(total_travel_time, total_minutes, remaining_tot_seconds))\n\n # display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n mean_minutes = mean_travel_time // 60\n remaining_mean_seconds = mean_travel_time % 60\n print('The mean bikeshare travel time was: {} seconds ({} minutes, {} seconds)'.format(int(mean_travel_time), int(mean_minutes), int(remaining_mean_seconds)))\n\n # display percentage of trips under an hour\n total_num_trips = df['Trip Duration'].count()\n trips_under_hour = df.loc[df['Trip Duration'] < 3600, 'Trip Duration'].count()\n percentage_under_hour = trips_under_hour * 100 / total_num_trips\n print('The percentage of trips under one hour was: {}'.format(percentage_under_hour.round()))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df, city):\n \"\"\"\n Displays statistics on bikeshare users.\n\n Args:\n df - Pandas dataframe\n (str) city - name of the city to analyze\n \"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print('User Type Counts')\n df_user_types = df['User Type'].value_counts()\n print(df_user_types)\n\n # Display statistics about gender and birth year only for Chicago and New York, not Washington\n if city != 'washington':\n # Display counts of gender\n print('\\nGender Counts')\n df_gender = df['Gender'].value_counts()\n print(df_gender)\n\n # Display earliest, most recent, and most common year of birth\n earliest_birth_year = df['Birth Year'].min()\n print('\\nThe earliest year of birth of users: ', int(earliest_birth_year))\n\n most_recent_birth_year = df['Birth Year'].max()\n print('The most recent year of birth of users: ', int(most_recent_birth_year))\n\n most_common_birth_year = df['Birth Year'].mode()[0]\n print('The most common year of birth of users: ', int(most_common_birth_year))\n\n else:\n print('\\nData and statistics concerning gender and birth year are not available for Washington DC.')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef main():\n while True:\n\n # Get info for filter from user and check that info with the user\n ready = False\n while not ready:\n city, month, day = get_filters()\n ready = check_input(city, month, day)\n\n # Create DataFrame\n df = load_data(city, month, day)\n\n # Print stats \n print('*'*50)\n print('* Statistics for...\\n* City: {}\\n* Month: {}\\n* Day of Week: {}'.format(city.title(), month.title(), day.title()))\n print('*'*50)\n\n time_stats(df, month, day)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df, city)\n\n # Prompt user if they would like to see rows of raw data from the dataframe, 5 at a time.\n while True:\n print('\\nWould you like to see 5 lines of raw data from the dataframe your selection created?')\n raw_data = input('Enter the letter \\'y\\' for Yes or \\'n\\' for No: ').lower()\n i = 0\n if raw_data == 'y':\n print(df.iloc[i:i+5, :])\n print('-'*40)\n\n while True:\n raw_data = input('\\nWould you like to see more raw data? Enter \\'y\\' for Yes or \\'n\\' for No: ').lower()\n if raw_data == 'y':\n i += 5\n print('\\n', df.iloc[i:i+5, :])\n print('-'*40)\n else:\n break\n break\n else:\n break\n\n # Ask user if they would like to restart\n restart = input('\\nWould you like to start over for a fresh analysis? Enter the letter \\'y\\' for Yes or \\'n\\' for No: ')\n if restart.lower() != 'y':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":13579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"81032589","text":"n = map(int, input())\nar =list(map(int, input().split()))\nmaxlen = 0\ncurrlen = 0\nstack = []\nl = []\nfor ele in ar:\n if ele > 0:\n stack.append(ele)\n else:\n if len(stack) > 0 and stack[-1] == -ele:\n currlen += 2\n stack.pop()\n if currlen >= maxlen:\n maxlen = currlen\n if len(stack) == 0:\n l.append(currlen)\n currlen = 0\n else:\n currlen = 0\n stack = []\n l.append(-1)\ncurrlen = 0\nfor ele in l:\n if ele == -1:\n currlen = 0\n else:\n currlen += ele\n if currlen >= maxlen:\n maxlen = currlen\nprint(maxlen)\n\n\nn=int(input())\nar=list(map(int,input().split()))\nc=[0]\nfor i in range(len(ar)-1):\n for j in range(i+1,len(ar)+1):\n ele=ar[i:j]\n if len(ele)%2 == 0:\n a=len(ele)\n for k in range(a//2):\n b=-(ele[-1])\n if ele[0] == b:\n ele=ele[1:-1]\n else:\n break\n if len(ele)==0:\n c.append(a)\n else:\n continue\nprint(max(c))","sub_path":"Hacker/Data Structures/Stacks/hack_little_monk_and_balanced_parenthesis.py","file_name":"hack_little_monk_and_balanced_parenthesis.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"391533615","text":"from timeExecution import time_execution\n\ndef second_algorithm(n):\n c = [True] * n\n c[0] = False\n c[1] = False\n i = 2\n while i <= n:\n j = 2 * i\n while j < n:\n c[j] = False\n j = j + i\n i = i + 1\n\n return([i for i in range(n) if c[i]])\n\n\ntime_execution(second_algorithm)","sub_path":"secondAlgorithm.py","file_name":"secondAlgorithm.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"617359316","text":"import collections\nimport time\nfrom collections import namedtuple\nfrom datetime import datetime\n\nfrom discord import Object, HTTPException, MessageType, AllowedMentions\n\nfrom Util import Translator, Emoji, Archive\nfrom database import DBUtils\nfrom database.DatabaseConnector import LoggedMessage\n\nMessage = namedtuple(\"Message\", \"messageid author content channel server attachments type pinned\")\n\ndef is_cache_enabled(bot):\n return bot.redis_pool is not None\n\n\nattachment = namedtuple(\"attachment\", \"id name\")\n\nasync def get_message_data(bot, message_id):\n message = None\n if is_cache_enabled(bot) and not Object(message_id).created_at <= datetime.utcfromtimestamp(time.time() - 5 * 60):\n parts = await bot.redis_pool.hgetall(f\"messages:{message_id}\")\n if len(parts) is 6:\n message = Message(message_id, int(parts[\"author\"]), parts[\"content\"], int(parts[\"channel\"]), int(parts[\"server\"]), [attachment(a.split(\"/\")[0], a.split(\"/\")[1]) for a in parts[\"attachments\"].split(\"|\")] if len(parts[\"attachments\"]) > 0 else [], type=int(parts[\"type\"]) if \"type\" in parts else None, pinned=parts[\"pinned\"] == '1')\n if message is None:\n message = LoggedMessage.get_or_none(LoggedMessage.messageid == message_id)\n return message\n\nasync def insert_message(bot, message, redis=True):\n message_type = message.type\n if message_type == MessageType.default:\n message_type = None\n else:\n if not isinstance(message_type, int):\n message_type = message_type.value\n if redis and is_cache_enabled(bot):\n pipe = bot.redis_pool.pipeline()\n pipe.hmset_dict(f\"messages:{message.id}\", author=message.author.id, content=message.content,\n channel=message.channel.id, server=message.guild.id, pinned=1 if message.pinned else 0, attachments='|'.join((f\"{str(a.id)}/{str(a.filename)}\" for a in message.attachments)))\n if message_type is not None:\n pipe.hmset_dict(f\"messages:{message.id}\", type=message_type)\n pipe.expire(f\"messages:{message.id}\", 5*60+2)\n await pipe.execute()\n DBUtils.insert_message(message)\n\nasync def update_message(bot, message_id, content, pinned):\n if is_cache_enabled(bot) and not Object(message_id).created_at <= datetime.utcfromtimestamp(time.time() - 5 * 60):\n pipe = bot.redis_pool.pipeline()\n pipe.hmset_dict(f\"messages:{message_id}\", content=content)\n pipe.hmset_dict(f\"messages:{message_id}\", pinned=(1 if pinned else 0))\n await pipe.execute()\n LoggedMessage.update(content=content, pinned=pinned).where(LoggedMessage.messageid == message_id).execute()\n\ndef assemble(destination, emoji, m, translate=True, **kwargs):\n translated = Translator.translate(m, destination, **kwargs) if translate else m\n return f\"{Emoji.get_chat_emoji(emoji)} {translated}\"\n\nasync def archive_purge(bot, id_list, guild_id):\n message_list = dict()\n for mid in id_list:\n message = await get_message_data(bot, mid)\n if message is not None:\n message_list[mid] = message\n if len(message_list) > 0:\n await Archive.archive_purge(bot, guild_id,\n collections.OrderedDict(sorted(message_list.items())))\n\n\nasync def send_to(destination, emoji, message, delete_after=None, translate=True, embed=None, **kwargs):\n translated = Translator.translate(message, destination.guild, **kwargs) if translate else message\n return await destination.send(f\"{Emoji.get_chat_emoji(emoji)} {translated}\", delete_after=delete_after, embed=embed, allowed_mentions=AllowedMentions(everyone=False, users=True, roles=False))\n\nasync def try_edit(message, emoji: str, string_name: str, embed=None, **kwargs):\n translated = Translator.translate(string_name, message.channel, **kwargs)\n try:\n return await message.edit(content=f'{Emoji.get_chat_emoji(emoji)} {translated}', embed=embed)\n except HTTPException:\n return await send_to(message.channel, emoji, string_name, embed=embed, **kwargs)\n\n\ndef day_difference(a, b, location):\n diff = a - b\n return Translator.translate('days_ago', location, days=diff.days, date=a)\n\ndef construct_jumplink(guild_id, channel_id, message_id):\n return f\"https://discordapp.com/channels/{guild_id}/{channel_id}/{message_id}\"\n","sub_path":"GearBot/Util/MessageUtils.py","file_name":"MessageUtils.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"37114516","text":"import gdal\nimport osr\nimport ephem \nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.ndimage.interpolation import geometric_transform \nimport os\nimport subprocess as sbp\nimport logging \nimport unittest\nimport warnings \n\n__all__ = [\"ElevationTile\",\"dual_plots\",\n \"yosemite_plot\",\"get_terrain_angle\",\"get_terrain_angles\",\n \"plot_terrain_angles\",\"polar_map\",\"remap_srs\",\n \"show_terrain_angle\",\"transform_points\"]\n\n\n\nclass ElevationTile(object):\n \"\"\"\n Class to encapsulate the features of a digital elevation \n model. Constructor takes a GeoTIFF filename or a previously\n read-in GeoTIFF object\n \n Elevation values are stored as a numpy array in data member 'elev'\n \"\"\"\n \n def __init__(self,geoTiffObj):\n self.layer = geoTiffObj\n\n gt = self.layer.GetGeoTransform()\n\n self.origin = (gt[0],gt[3])\n self.dx = gt[1]\n self.dy = gt[5]\n\n self.srid = self.layer.GetProjectionRef()\n self.mddc = self.layer.GetMetadata_Dict()\n\n self.band = self.layer.GetRasterBand(1)\n\n self.stats = self.band.GetMetadata_Dict()\n self.nan = self.band.GetNoDataValue()\n\n arr = self.band.ReadAsArray()\n self.elev = np.ma.masked_array( arr, arr==self.nan)\n\n ny,nx = arr.shape\n\n self.x = [gt[0]+n*gt[1] for n in range(nx)]\n self.y = [gt[3]+n*gt[5] for n in range(ny)]\n\n @classmethod\n def from_filename(cls, filename):\n \"Initialize from file\"\n data = gdal.Open(filename)\n return cls(data)\n\n def imagesc(self,zlims=None,clrmap='hot'):\n \"\"\"\n function that plots the elevation data with x,y values\n corresponding to the geotiff data. Returns the axis\n which then can be used to over-plot locations, lines, ...\n \"\"\"\n fig = plt.figure(); axx = fig.add_subplot(1,1,1)\n ext = [self.x[0],self.x[-1],self.y[-1],self.y[0]]\n if zlims is None:\n axx.imshow(self.elev,interpolation='None',extent=ext,cmap=clrmap)\n else:\n axx.imshow(self.elev,clim=zlims,interpolation='None',\n extent=ext,cmap=clrmap)\n\n return axx\n\n def fnd_ij(self,pointlist):\n \"\"\"\n Return (i,j) indices for a list of (x,y) values\n \"\"\"\n IJ = [find_ij(self.x, self.y, p) for p in pointlist] \n return IJ\n\n def remap_srs(self,espg):\n \"\"\"\n Return a new ElevationTile object based on ESPG number\n \"\"\"\n dstImg = remap_srs(self.layer,espg)\n dstElev = ElevationTile(dstImg)\n return dstElev \n\ndef remap_srs(srcImg, epsg=26911):\n \"\"\"\n srcImg : image read from a GeoTIFF\n epsg: EPSG number of the desired output image\n \n Returns a remapped image object\n \"\"\"\n dst_srs = osr.SpatialReference()\n dst_srs.ImportFromEPSG(epsg)\n epsg_wkt = dst_srs.ExportToWkt()\n\n resample_method = gdal.GRA_Bilinear\n error_threshold = 0.125\n\n dstImg = gdal.AutoCreateWarpedVRT( srcImg, None, \n epsg_wkt, resample_method, \n error_threshold )\n\n return dstImg\n\n\ndef get_terrain_angle(geotiff, point):\n \"\"\"\n Returns terrain angle array\n \"\"\"\n c = ElevationTile(geotiff)\n\n X0, Y0 = np.meshgrid(c.x, c.y)\n\n x0 = point[0]\n y0 = point[1]\n X = X0-x0\n Y = Y0-y0\n distarr = np.sqrt( np.multiply(X,X) + np.multiply(Y,Y))\n\n m = np.min(distarr)\n\n ii = np.ndarray.argmin(distarr)\n i0,j0 = np.unravel_index(ii, c.elev.shape)\n\n z0 = c.elev[i0][j0]\n\n el = np.arctan2(c.elev-z0,distarr)*180/np.pi\n\n return el\n\n\ndef show_terrain_angle(geotiff, points):\n \"\"\"\n Displays terrain angle\n The source geotiff must be in (x,y) meter units \n and z(x,y) in meters \n \"\"\"\n c = ElevationTile(geotiff)\n\n X0, Y0 = np.meshgrid(c.x, c.y)\n\n for p in points:\n x0 = p[0]\n y0 = p[1]\n X = X0-x0\n Y = Y0-y0\n distarr = np.sqrt( np.multiply(X,X) + np.multiply(Y,Y))\n\n m = np.min(distarr)\n\n ii = np.ndarray.argmin(distarr)\n i0,j0 = np.unravel_index(ii, c.elev.shape)\n\n z0 = c.elev[i0][j0]\n\n el = np.arctan2(c.elev-z0,distarr)*180/np.pi\n\n mxe = np.max(el)\n\n ext = [c.x[0],c.x[-1],c.y[-1],c.y[0] ]\n\n fig = plt.figure(); axx = fig.add_subplot(1,1,1)\n im = axx.imshow(el,interpolation='None',extent=ext,clim=(0,mxe),cmap='hot')\n axx.plot(x0,y0,'go') \n cax = fig.add_axes([0.9, 0.1, 0.03, 0.8])\n fig.colorbar(im, cax=cax)\n plt.show()\n\n\ndef transform_points(points,s_srs=4326,t_srs=26911):\n \"\"\"\n points is a list of 2-tuples\n s_srs is source spatial reference system\n t_srs is target spatial reference system\n \"\"\"\n if points is None:\n return\n\n points_list = [\"%s %s\\n\"%(p) for p in points ]\n\n points_str = (\" \").join(points_list)\n\n # TODO: what's the Python gdal equivalent?\n cmd = \"echo -e '%s' | gdaltransform -s_srs EPSG:%s -t_srs EPSG:%s -output_xy\" % (points_str,s_srs,t_srs) \n\n x = sbp.Popen(cmd, shell=True, stdout=sbp.PIPE).stdout.read()\n\n yy = x.strip().split(b\"\\n\")\n\n zz = [y.split(b\" \") for y in yy]\n\n out_points = [(float(z[0]),float(z[1])) for z in zz]\n\n return out_points\n\ndef find_nearest(array,value):\n idx = (np.abs(array-value)).argmin()\n return idx\n\n\ndef find_ij(x,y,p):\n \"\"\"\n Utility to find nearest indices for 2D arrays\n i,j are the row and column indices\n \"\"\"\n x0,y0 = p\n\n i = find_nearest(np.asarray(y),y0)\n j = find_nearest(np.asarray(x),x0)\n\n return i,j\n\n\ndef polar_map(src,nangles=720,p0=None,radius=None):\n \"\"\"\n w = polar_map(src,nangles,p0,radius)\n \n Function to remap an image on a cartesian grid to a polar map\n where angles run along columns and radial distance runs along rows\n \n nangles: number of angles \n p0: grid point (x0,y0) in pixel units to use as the origin\n (default is center of image)\n radius: pixel radius (defualt is min(src.shape)/2)\n \n Returned image has shape (radius,nangles)\n \"\"\"\n h,w = src.shape\n\n if p0 is None:\n y0 = float(h)/2 - 0.5; x0=float(w)/2 - 0.5\n else:\n x0,y0 = p0\n\n if radius is None:\n nr = np.min(src.shape)/2\n else:\n nr = np.floor(radius)\n\n nc = nangles\n\n \"\"\"\n This mapping assumes tha scr is in image coordinates\n so that rows increase in the downward direction\n and that north is up \n \"\"\"\n def mapping(rc):\n r,c = rc\n t = c*2*np.pi/nc\n\n x = x0 + np.sin(t)*r\n y = y0 - np.cos(t)*r\n\n return y,x\n \n return geometric_transform(src,mapping,(nr,nc),order=3,mode='nearest')\n\n\ndef get_terrain_angles(src,p0,z0,dr,nangles=720,radius=None):\n \"\"\"\n az,alt = get_terrain_angles(src,p0,z0,dr,nangles,p0,radius)\n \n Function to calculate terrain azimuth and elevation \n \n p0: grid point (x0,y0) in pixel units to use as the origin\n z0: elevation z(x0,y0) \n dr: pixel units [ dr and z0 can be any units but MUST agree]\n nangles: number of angles \n radius: pixel radius (defualt is min(src.shape)/2)\n \"\"\"\n\n w = polar_map(src,nangles,p0,radius)\n w = w - z0\n\n az = np.linspace(0,2*np.pi,nangles) \n r = np.linspace(0,(radius-1)*dr,radius)\n\n zm = np.amax(w,0)\n maxi = np.argmax(w,0)\n\n rm = [r[i] for i in maxi]\n\n alt = [np.arctan2(zm[i],rm[i]) for i in range(nangles)]\n\n return az,alt\n\ndef plot_terrain_angles(az,alt):\n\n altd = [t*180/np.pi for t in alt]\n\n altmax = (np.ceil(np.max(altd)/10))*10\n\n ax = plt.subplot(111, polar=True, axisbg='Azure');ax.set_rmax(altmax);ax.grid(True)\n \n ax.plot(-az+np.pi/2,altd,'o--')\n\n ax.set_xticklabels(['E', '', 'N', '', 'W', '', 'S', ''])\n\n return ax\n\ndef yosemite_plot(lon,lat):\n\n epsg = 26911\n c = ElevationTile.from_filename('20160817142431_1759362796.tif' )\n d = c.remap_srs(epsg)\n bldr_xy = transform_points([(lon,lat)]); IJ = d.fnd_ij(bldr_xy)\n\n p0 = IJ[0][1],IJ[0][0];z0 = d.elev[IJ[0]];dr = d.x[1] - d.x[0]\n\n TP = get_terrain_angles(d.elev,p0,z0,dr,360,900)\n\n ax = plot_terrain_angles(*TP)\n plt.show()\n\n\ndef dual_plots( c,lon,lat,date,utc_shift):\n \"\"\"\n dual_plots(c,lon,lat,date,utc_shift)\n c: an ElevationTile object\n lon,lat: longitude, latitude\n date: \"yyyy/mm/dd\"\n utc_shift: integer offset in hours for local noon\n \"\"\"\n if utc_shift < 0:\n utc_shift = 24 + utc_shift\n\n site_xy = transform_points([(lon,lat)]) \n IJ = c.fnd_ij(site_xy)\n\n # p0 is (x,y) and IJ is a list of (row,col)\n p0 = IJ[0][1],IJ[0][0] # In this case IJ is a list of one 2-tuple\n \n z0 = c.elev[IJ[0]] # Get the local elevation \n dr = c.x[1] - c.x[0] # It's assumed dy==dx\n\n # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n w = polar_map(c.elev,1440,p0,900)\n\n fig0 = plt.figure() \n axx0 = fig0.add_subplot(1,2,1)\n axx1 = fig0.add_subplot(1,2,2)\n axx0.imshow(w,clim=[1000,3000],interpolation='None')\n axx1.imshow(c.elev,clim=[1000,3000],interpolation='None')\n # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n\n # d\\theta is half a degree => 720 angles\n TP = get_terrain_angles(c.elev,p0,z0,dr,1440,900)\n az = TP[0]\n alt = [t*180/np.pi for t in TP[1]]\n\n obs = ephem.Observer()\n\n obs.lon, obs.lat = str(lon), str(lat)\n obs.date = date\n # I believe pyephem only uses elevation for air pressure calculations\n # so this will have no effect\n obs.elevation = z0\n\n sun = ephem.Sun()\n minutesInDay = 24*60;\n ealt = np.zeros(minutesInDay)\n eaz = np.zeros(minutesInDay)\n \n for i in range(minutesInDay):\n sun.compute(obs)\n ealt[i]=sun.alt*180/np.pi; eaz[i]= sun.az \n obs.date += ephem.minute\n\n obs.date = date \n ealtH = np.zeros(24)\n eazH = np.zeros(24)\n\n for i in range(24):\n sun.compute(obs)\n ealtH[i]=sun.alt*180/np.pi; eazH[i]= sun.az \n obs.date += ephem.hour\n\n fig = plt.figure() \n ax = plt.subplot(111, polar=True, axisbg='Azure');ax.set_rmax(90);ax.grid(True)\n ax.set_theta_direction(-1) # clockwise \n ax.set_theta_zero_location('N') # zero theta points north (default is east) \n ax.plot(eaz,ealt,'r-')\n \n ax.plot(eazH,ealtH,'ro')\n\n ax.annotate('Noon', xy=( eazH[utc_shift], ealtH[utc_shift]), \n xycoords='data',xytext=(-50, 30), \n textcoords='offset points',\n arrowprops=dict(arrowstyle=\"->\") )\n\n alt = [t*180/np.pi for t in TP[1]]\n \n ax.plot(az,alt,'bo--')\n\n ax.set_xticklabels(['N', '', 'E', '', 'S', '', 'W', ''])\n\n fig3 = plt.figure(); ax3 = fig3.add_subplot(1,1,1)\n ax3.plot(az,alt,'bo--')\n\n return ax\n\nclass TestTerrShad(unittest.TestCase):\n\n def setUp(self):\n self.filename = \"20160817142431_1759362796.tif\"\n self.epsg = 26911\n self.yos_lon = -119.642498\n self.yos_lat = 37.722592\n self.n_angles = 360\n self.pxl_rad = 900\n\n def test_constructor(self):\n c = ElevationTile.from_filename(self.filename)\n self.assertFalse(c is None)\n\n def test_remap_srs(self):\n c = ElevationTile.from_filename(self.filename)\n d = c.remap_srs(self.epsg) \n self.assertFalse(d is None)\n\n def test_polar_map(self):\n c = ElevationTile.from_filename(self.filename)\n d = c.remap_srs(self.epsg) \n plr_map = polar_map(d.elev,self.n_angles)\n self.assertFalse(plr_map is None)\n\n def test_get_terrain_angles(self):\n c = ElevationTile.from_filename(self.filename)\n d = c.remap_srs(self.epsg) \n site_xy = transform_points([(self.yos_lon,self.yos_lat)]) \n IJ = d.fnd_ij(site_xy)\n \n p0 = IJ[0][1],IJ[0][0]\n \n z0 = d.elev[IJ[0]] # Get the local elevation \n dr = d.x[1] - d.x[0] # It's assumed dy==dx\n terr_angles = get_terrain_angles(d.elev,p0,z0,dr,\n self.n_angles,\n self.pxl_rad)\n self.assertFalse(terr_angles is None)\n\n def tearDown(self):\n del self.filename \n del self.epsg \n del self.yos_lon \n del self.yos_lat \n\n\nif __name__ == \"__main__\":\n\n logging.basicConfig(filename='TerrainShadow.log',\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M', level=logging.INFO)\n logging.info('Started')\n\n testTerrShad = unittest.TestLoader().loadTestsFromTestCase(TestTerrShad)\n suite = unittest.TestSuite([testTerrShad])\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n logging.info('Stopped')\n","sub_path":"TerrainShadow.py","file_name":"TerrainShadow.py","file_ext":"py","file_size_in_byte":12449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"316224293","text":"from synapyse.base.learning.training_set import TrainingSet\nfrom synapyse.impl.activation_functions.sigmoid import Sigmoid\nfrom synapyse.impl.input_functions.weighted_sum import WeightedSum\nfrom synapyse.impl.learning.momentum_back_propagation \\\n import MomentumBackPropagation\nfrom synapyse.impl.multi_layer_perceptron import MultiLayerPerceptron\n\n__author__ = 'Douglas Eric Fonseca Rodrigues'\n\n# Define o sim da aplicação\nsim = 0.5\n\n# Importa os dados para treinamento\ndados_treinamento = TrainingSet(input_count=4, output_count=3) \\\n .import_from_file('iris_training.data')\n\n# Importa os dados para teste\ndados_teste = TrainingSet(input_count=4, output_count=3) \\\n .import_from_file('iris_testing.data')\n\n# Testa diversas taxas de aprendizagem\nfor numero_neuros_camada_oculta in [1, 2, 3, 4, 5, 6, 7]:\n\n # Cria a rede neural artificial\n rede_neural = MultiLayerPerceptron() \\\n .create_layer(neuron_count=4,\n input_function=WeightedSum()) \\\n .create_layer(neuron_count=numero_neuros_camada_oculta,\n input_function=WeightedSum(),\n activation_function=Sigmoid()) \\\n .create_layer(neuron_count=3,\n input_function=WeightedSum(),\n activation_function=Sigmoid()) \\\n .randomize_weights()\n\n # Cria o algoritmo de aprendizado\n aprendizado = MomentumBackPropagation(neural_network=rede_neural,\n learning_rate=0.7,\n max_error=0.05,\n momentum=0,\n max_iterations=30)\n\n print('Rede com número de neurônios na camada oculta =',\n numero_neuros_camada_oculta)\n\n # Inicia o aprendizado\n aprendizado.learn(dados_treinamento)\n\n # Testa o aprendizado\n classificacoes_corretas = 0\n classificacoes_incorretas = 0\n classificacoes_descartadas = 0\n\n possiveis_classificacoes = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n\n for dado in dados_teste:\n output = rede_neural.set_input(dado.input_pattern) \\\n .compute() \\\n .output\n\n # Ajusta output para verificação do resultado\n for i in range(len(output)):\n if (1 - sim) < output[i] < (1 + sim):\n output[i] = 1\n elif (0 - sim) < output[i] < (0 + sim):\n output[i] = 0\n else:\n output[i] = None\n\n # Verifica a classificação\n if output == dado.ideal_output:\n classificacoes_corretas += 1\n elif output in possiveis_classificacoes:\n classificacoes_incorretas += 1\n else:\n classificacoes_descartadas += 1\n\n print('Número de iterações:', aprendizado.actual_iteration)\n print('Classificações corretas:', classificacoes_corretas, (100 / len(dados_teste)) * classificacoes_corretas)\n print('Classificações incorretas:', classificacoes_incorretas, (100 / len(dados_teste)) * classificacoes_incorretas)\n print('Classificações descartadas:', classificacoes_descartadas,\n (100 / len(dados_teste)) * classificacoes_descartadas)","sub_path":"tcc/mlp1.py","file_name":"mlp1.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"30795569","text":"import gl\n\nOPENS = []\nCLOSES = []\n\ndef get_direction():\n global CLOSES\n direction = []\n for i in range(len(CLOSES) - 1):\n start = CLOSES[i]\n end = CLOSES[i + 1]\n x = end['x'] - start['x']\n y = end['y'] - start['y']\n if x == -10 and y == -10:\n direction.append(0)\n elif x == 0 and y == -10:\n direction.append(1)\n elif x == 10 and y == -10:\n direction.append(2)\n elif x == -10 and y == 0:\n direction.append(3)\n elif x == 10 and y == 0:\n direction.append(4)\n elif x == -10 and y == 10:\n direction.append(5)\n elif x == 0 and y == 10:\n direction.append(6)\n elif x == 10 and y == 10:\n direction.append(7)\n return direction\n\ndef min_f(x):\n return x['f']\n\ndef cmp_opens(x, y):\n global OPENS\n for op in OPENS:\n if op['x'] == x and op['y'] == y:\n return True\n return False\n\ndef cmp_closes(x, y):\n global CLOSES\n for cl in CLOSES:\n if cl['x'] == x and cl['y'] == y:\n return True\n return False\n\ndef get_pos(x, y, end_x, end_y):\n cur_list = []\n for j in range(-10, 11, 10):\n for i in range(-10, 11, 10):\n if not i and not j:\n pass\n else:\n cur_pos = {}\n cur_pos['x'] = x + i\n cur_pos['y'] = y + j\n if i == -10 and j == -10:\n cur_pos['g'] = 14\n elif i == 0 and j == -10:\n cur_pos['g'] = 10\n elif i == 10 and j == -10:\n cur_pos['g'] = 14\n elif i == -10 and j == 0:\n cur_pos['g'] = 10\n elif i == 10 and j == 0:\n cur_pos['g'] = 10\n elif i == -10 and j == 10:\n cur_pos['g'] = 14\n elif i == 0 and j == 10:\n cur_pos['g'] = 10\n elif i == 10 and j == 10:\n cur_pos['g'] = 14\n cur_pos['h'] = abs(end_x - cur_pos['x']) + abs(end_y - cur_pos['y'])\n cur_pos['f'] = cur_pos['g'] + cur_pos['h']\n if 0 <= cur_pos['x'] <= 800 and 0 <= cur_pos['y'] <= 600:\n cur_list.append(cur_pos)\n return cur_list\n\ndef create_pos(x, y):\n pos = {}\n pos['x'] = x\n pos['y'] = y\n pos['g'] = 0\n pos['h'] = 0\n pos['f'] = 0\n return pos\n\ndef move(start_x, start_y, end_x, end_y):\n global OPENS, CLOSES\n for i in range(len(OPENS)):\n OPENS.pop()\n for i in range(len(CLOSES)):\n CLOSES.pop()\n start_x -= start_x % 10\n start_y -= start_y % 10\n end_x -= end_x % 10\n end_y -= end_y % 10\n pos = create_pos(start_x, start_y)\n OPENS.append(pos)\n while OPENS:\n p = OPENS.pop()\n CLOSES.append(p)\n if p['x'] == end_x and p['y'] == end_y:\n return get_direction()\n\n cur_list = get_pos(p['x'], p['y'], end_x, end_y)\n for cur in cur_list:\n if cmp_closes(cur['x'], cur['y']):\n cur_list.remove(cur)\n\n for cur in cur_list:\n if not OPENS or cmp_opens(cur['x'], cur['y']) == False:\n OPENS.append(cur)\n if cmp_opens(cur['x'], cur['y']):\n for op in OPENS:\n if op['x'] == cur['x'] and op['y'] == cur['y']:\n if op['g'] > cur['g']:\n op['g'] = cur['g']\n op['f'] = op['g'] + op['h']\n\n OPENS.sort(reverse = True, key = min_f)\n","sub_path":"src/pygame/core/astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"574030715","text":"# An elegant weapon, for a more civilized age\nfrom __future__ import division\n\nfrom collections import defaultdict\nimport numpy as np\n\nclass AddKTrigramLM(object):\n \"\"\"Trigram LM with add-k smoothing.\"\"\"\n order_n = 3\n\n def __eq__(self, other):\n \"\"\"Do not modify.\"\"\"\n state_vars = ['k', 'counts', 'context_totals', 'words', 'V']\n return all([getattr(self, v) == getattr(other, v) for v in state_vars])\n\n def __init__(self, tokens):\n \"\"\"Build our smoothed trigram model.\n This should be very similar to SimpleTrigramLM.__init__ from the demo\n notebook, with the exception that we _don't_ want to actually normalize\n the probabilities at training time. Instead, we'll compute the corpus\n counts C_abc = C(w_2, w_1, w) and C_ab = C(w_2, w_1), after which we can\n compute the probabilities on the fly for any value of k. (We'll do this\n in the next_word_proba() function.)\n The starter code will fill in:\n self.counts\n self.wordset\n Your code should populate:\n self.context_totals (total count C_ab for context ab)\n Args:\n tokens: (list or np.array) of training tokens\n Returns:\n None\n \"\"\"\n self.k = 0.0\n # Raw trigram counts over the corpus.\n # c(w | w_1 w_2) = self.counts[(w_2,w_1)][w]\n # Be sure to use tuples (w_2,w_1) as keys, *not* lists [w_2,w_1]\n self.counts = defaultdict(lambda: defaultdict(lambda: 0.0))\n\n # Map of (w_1, w_2) -> int\n # Entries are c( w_2, w_1 ) = sum_w c(w_2, w_1, w)\n self.context_totals = dict()\n\n # Track unique words seen, for normalization\n # Use wordset.add(word) to add words\n wordset = set()\n\n # Iterate through the word stream once\n # Compute trigram counts as in SimpleTrigramLM\n w_1, w_2 = None, None\n for word in tokens:\n wordset.add(word)\n if w_1 is not None and w_2 is not None:\n self.counts[(w_2,w_1)][word] += 1\n # Update context\n w_2 = w_1\n w_1 = word\n\n #### YOUR CODE HERE ####\n # Compute context counts\n for context in self.counts.keys():\n self.context_totals[context] = np.sum(self.counts.get(context).values())\n #### END(YOUR CODE) ####\n # Total vocabulary size, for normalization\n self.words = list(wordset)\n self.V = len(self.words)\n\n def set_live_params(self, k=0.0, **params):\n self.k = k\n\n def next_word_proba(self, word, seq):\n \"\"\"Next word probability for smoothed n-gram.\n Your code should implement the corresponding equation from the\n notebook, using self.counts and self.context_totals as defined in\n __init__(), above.\n Args:\n word: (string) w in P(w | w_1 w_2 )\n seq: (list of string) [w_1, w_2, w_3, ...]\n Returns:\n (float) P_k(w | w_1 w_2), according to the model\n \"\"\"\n context = tuple(seq[-2:]) # (w_2, w_1)\n k = self.k\n #### YOUR CODE HERE ####\n # Hint: self.counts.get(...) and self.context_totals.get(...) may be\n # useful here. See note in defaultdict.md about how this works.\n return (self.counts.get(context, defaultdict(lambda: 0.0)).get(word, 0) + k) / float(self.context_totals.get(context, 0) + k * self.V)\n #return a /b \n #### END(YOUR CODE) ####\n\n\n\nclass KNTrigramLM(object):\n \"\"\"Trigram LM with Kneser-Ney smoothing.\"\"\"\n order_n = 3\n\n def __eq__(self, other):\n \"\"\"Do not modify.\"\"\"\n state_vars = ['delta', 'counts', 'type_contexts',\n 'context_totals', 'context_nnz', 'type_fertility',\n 'z_tf', 'words']\n return all([getattr(self, v) == getattr(other, v) for v in state_vars])\n\n def __init__(self, tokens):\n \"\"\"Build our smoothed trigram model.\n This should be similar to the AddKTrigramLM.__init__ function, above,\n but will compute a number of additional quantities that we need for the\n more sophisticated KN model.\n See the documentation in the notebook for the KN backoff model\n definition and equations, and be sure to read the in-line comments\n carefully to understand what each data structure represents.\n Note the usual identification of variables:\n w : c : current word\n w_1 : w_{i-1} : b : previous word\n w_2 : w_{i-2} : a : previous-previous word\n There are two blocks of code to fill here. In the first one, you should\n fill in the inner loop to compute:\n self.counts (unigram, bigram, and trigram)\n self.type_contexts (set of preceding words for each word (type))\n In the second one, you should compute:\n self.context_totals (as in AddKTrigramLM)\n self.context_nnz (number of nonzero elements for each context)\n self.type_fertility (number of unique preceding words for each word\n (type))\n The starter code will fill in:\n self.z_tf\n self.words\n Args:\n tokens: (list or np.array) of training tokens\n Returns:\n None\n \"\"\"\n self.delta = 0.75\n # Raw counts over the corpus.\n # Keys are context (N-1)-grams, values are dicts of word -> count.\n # You can access C(w | w_{i-1}, ...) as:\n # unigram: self.counts[()][w]\n # bigram: self.counts[(w_1,)][w]\n # trigram: self.counts[(w_2,w_1)][w]\n self.counts = defaultdict(lambda: defaultdict(lambda: 0))\n # As in AddKTrigramLM, but also store the unigram and bigram counts\n # self.context_totals[()] = (total word count)\n # self.context_totals[(w_1,)] = c(w_1)\n # self.context_totals[(w_2, w_1)] = c(w_2, w_1)\n self.context_totals = dict()\n # Also store in self.context_nnz the number of nonzero entries for each\n # context; as long as \\delta < 1 this is equal to nnz(context) as\n # defined in the notebook.\n self.context_nnz = dict()\n\n # Context types: store the set of preceding words for each word\n # map word -> {preceding_types}\n self.type_contexts = defaultdict(lambda: set())\n # Type fertility is the size of the set above\n # map word -> |preceding_types|\n self.type_fertility = dict()\n # z_tf is the sum of type fertilities\n self.z_tf = 0.0\n\n\n # Iterate through the word stream once\n # Compute unigram, bigram, trigram counts and type fertilities\n w_1, w_2 = None, None\n for word in tokens:\n #### YOUR CODE HERE ####\n \n self.counts[()][word] += 1\n if w_1 is not None:\n self.counts[(w_1,)][word] += 1\n self.type_contexts[word].add(w_1)\n if w_2 is not None:\n self.counts[(w_2,w_1)][word] += 1 \n \n #### END(YOUR CODE) ####\n # Update context\n w_2 = w_1\n w_1 = word\n\n ##\n # We'll compute type fertilities and normalization constants now,\n # but not actually store the normalized probabilities. That way, we can compute\n # them (efficiently) on the fly.\n\n #### YOUR CODE HERE ####\n # Count the total for each context.\n for context in self.counts.keys():\n self.context_totals[context] = np.sum(self.counts.get(context).values())\n\n \n #for word in self.counts.get(context):\n #print str(context) + \" -> \" + word\n # if context not in self.context_totals.keys():\n # self.context_totals[context] = 0\n # self.context_totals[context] += self.counts.get(context).get(word)\n \n # Count the number of nonzero entries for each context.\n self.context_nnz[context] = np.count_nonzero(self.counts.get(context).values()) \n # Compute type fertilities, and the sum z_tf.\n for word in self.type_contexts:\n self.type_fertility[word] = len(self.type_contexts[word])\n\n self.z_tf = float(sum(self.type_fertility.values()))\n #### END(YOUR CODE) ####\n\n # Total vocabulary size, for normalization\n self.words = self.counts[()].keys()\n self.V = len(self.words)\n\n def set_live_params(self, delta = 0.75, **params):\n self.delta = delta\n\n def kn_interp(self, word, context, delta, pw):\n \"\"\"Compute KN estimate P_kn(w | context) given a backoff probability\n Your code should implement the absolute discounting equation from the\n notebook, using the counts computed in __init__(). Note that you don't\n need to deal with type fertilities here; this is handled in the\n next_word_proba() function in the starter code, below.\n Be sure you correctly handle the case where c(context) = 0, so as to not\n divide by zero later on. You should just return the backoff probability\n directly, since we have no information to decide otherwise.\n Args:\n word: (string) w in P(w | context )\n context: (tuple of string)\n delta: (float) discounting term\n pw: (float) backoff P_kn(w | less_context), precomputed\n Returns:\n (float) P_kn(w | context)\n \"\"\"\n pass\n #### YOUR CODE HERE ####\n # Hint: self.counts.get(...) and self.context_totals.get(...) may be\n # useful here. See note in defaultdict.md about how this works.\n cabc = self.counts.get(context, defaultdict(lambda: 0.0)).get(word, 0)\n cab = self.context_totals.get(context, 0)\n if cabc == 0:\n return delta*pw\n return max(0.0, (cabc - delta)) / float(cab) + delta*pw\n #### END(YOUR CODE) ####\n\n\n def next_word_proba(self, word, seq):\n \"\"\"Compute next word probability with KN backoff smoothing.\n Args:\n word: (string) w in P(w | w_1 w_2 )\n seq: (list of string) [w_1, w_2, w_3, ...]\n delta: (float) discounting term\n Returns:\n (float) P_kn(w | w_1 w_2)\n \"\"\"\n delta = delta = self.delta\n # KN unigram, then recursively compute bigram, trigram\n pw1 = self.type_fertility[word] / self.z_tf\n pw2 = self.kn_interp(word, tuple(seq[-1:]), delta, pw1)\n pw3 = self.kn_interp(word, tuple(seq[-2:]), delta, pw2)\n return pw3","sub_path":"code/GetOldTweets-python/ngram_lm.py","file_name":"ngram_lm.py","file_ext":"py","file_size_in_byte":10568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"584282781","text":"#倒叙进行\n\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nimport time\nimport pymysql\nimport random\n\ndef s(a,b):\n rand=random.randint(a,b)\n time.sleep(rand)\n\ndef swith_tab():\n\n print(\"开始切换标签\")\n driver.find_element_by_id(\"showhidediv1\").click()\n\n\ndef find_same_record(name,price):\n #输入型号、价格,用于验证数据准确性;输出状态,1为找到,2为没有找到,结果为页面跳转事件\n table = driver.find_element_by_id('part_list')\n trs = table.find_elements_by_tag_name('tr')\n nums = len(trs)\n print(\"共%s条相似的记录,正在查找完全一样的数据\" % nums)\n for tr in trs:\n tds = tr.find_elements_by_tag_name('td')\n prt_id = tds[0].text\n # prt_name=tds[1].get_attribute('title')\n prt_name = tds[1].find_element_by_tag_name('a').text\n prt_price = tds[2].text\n print(prt_id, prt_name, prt_price)\n if name==prt_name and price==prt_price:\n #当两个数据完全一样,那么触发点击事件\n print(\"找到一样的数据了,正在加载页面\")\n print(\"waiting for the page to reload\")\n tds[1].find_element_by_tag_name('a').click()\n s(4,6)\n list_state = 1\n return list_state\n print(\"maybe there have no same data for you\")\n list_state=2\n return list_state\n\ndef get_prt_cnt():\n #输入无,输出一个tuple,prt_name和price\n db = pymysql.connect(\"localhost\", \"root\", \"123456\", \"yidun\", charset=\"utf8\")\n cursor = db.cursor()\n sql = \"select count(prt_name) as cnt from yidun_prt_lists where status=10 \"\n\n try:\n execute = cursor.execute(sql)\n cnt = cursor.fetchmany(execute)\n cnt=str(cnt).replace(\"((\", \"\").replace(\",),)\", \"\")\n return cnt\n except Exception as err:\n db.rollback()\n print('---------------Error------Message--------:' + str(err))\n db.close()\n cursor.close()\n\ndef update_prt_status(status,prt_no,prt_name):\n #输入无,输出一个tuple,prt_name和price\n db = pymysql.connect(\"localhost\", \"root\", \"123456\", \"yidun\", charset=\"utf8\")\n cursor = db.cursor()\n sql = \"update yidun_prt_lists set status='%d' where status=10 and prt_no='%s' and prt_name='%s'\" %(status,prt_no,prt_name)\n\n try:\n cursor.execute(sql)\n db.commit()\n except Exception as err:\n db.rollback()\n print('---------------Error------Message--------:' + str(err))\n cursor.close()\n db.close()\n\n\ndef get_prt_info():\n #输入无,输出一个tuple,prt_name和price\n db = pymysql.connect(\"localhost\", \"root\", \"123456\", \"yidun\", charset=\"utf8\")\n cursor = db.cursor()\n sql = \"select prt_name,prt_price,prt_no from yidun_prt_lists where status=10 limit 1\"\n\n try:\n execute = cursor.execute(sql)\n data = cursor.fetchmany(execute)\n return data\n except Exception as err:\n db.rollback()\n print('---------------Error------Message--------:' + str(err))\n db.close()\n cursor.close()\n\ndef get_basic_data(prt_name):\n # 输入无,输出无,结果直接insert到数据表\n '''\n cnt=0\n while 1==1:\n name = driver.find_element_by_id('result_prodname').text\n if name!=prt_name:\n print(\"waiting...\")\n time.sleep(1)\n cnt+=1\n if cnt>15:\n break\n '''\n name = driver.find_element_by_id('result_prodname').text\n try:\n if name != prt_name:\n detail_state = 2\n print(\"为什么会因为名称不同跳出?\")\n return detail_state\n detail_state = 1\n print(\"成功二次验证\")\n return detail_state\n except:\n print(\"异常跳出\")\n detail_state = 3\n return detail_state\n\ndef get_attrs_data(prt_no,prt_name):\n #输入无,输出无,结果直接insert到数据表\n Part_MainProp=driver.find_element_by_id('Part_MainProp')\n div_lists=Part_MainProp.find_elements_by_class_name('prop_part')\n print(\"共%s条属性\"%str(len(div_lists)))\n attr_no=0\n for div_list in div_lists:\n attr_no+=1\n attr_name=div_list.find_element_by_xpath(\"./div[@class='prop_title']/label\").text\n try:\n attr_value = div_list.find_element_by_xpath(\"./div[@class='prop_list']/span[@ischk='1']\").text\n except:\n attr_value=\"\"\n print(attr_no,attr_name,attr_value)\n insert_attrs(prt_no,prt_name,attr_no,attr_name,attr_value)\n\ndef insert_attrs(prt_no,prt_name,attr_no,attr_name,attr_value):\n db = pymysql.connect(\"localhost\", \"root\", \"123456\", \"yidun\", charset=\"utf8\")\n cursor = db.cursor()\n sql = \"insert into prt_attribute(prt_no,prt_name,attr_no,attr_name,attr_value) values('%d','%s','%d','%s','%s')\" %(prt_no,prt_name,attr_no,attr_name,attr_value)\n\n try:\n cursor.execute(sql)\n db.commit()\n except Exception as err:\n db.rollback()\n print('---------------Error------Message--------:' + str(err))\n cursor.close()\n db.close()\n\n\ndef get_imgs(prt_no,prt_name):\n img_src=driver.find_element_by_id('elementimg').get_attribute('src')\n img_name=img_src[img_src.rindex(\"/\")+1:100]\n print(img_name,img_src)\n insert_img_data(prt_no,prt_name,img_name,\"\",\"small_img\",img_src,1)\n\ndef insert_img_data(prt_no,prt_name,img_name,img_path,type,original_url,img_num):\n db = pymysql.connect(\"localhost\", \"root\", \"123456\", \"yidun\", charset=\"utf8\")\n cursor = db.cursor()\n sql = \"insert into prt_img_info \" \\\n \" (prt_no,prt_name,img_name,img_path,type,original_url,img_num)\" \\\n \" values('%d','%s','%s','%s','%s','%s','%d')\" \\\n % (prt_no,prt_name, img_name, img_path, type, original_url, img_num)\n try:\n cursor.execute(sql)\n db.commit()\n except Exception as err:\n db.rollback()\n print('---------------Error------Message--------:' + str(err))\n cursor.close()\n db.close()\n\n\n\n\nif __name__==\"__main__\":\n #从数据表获取数据\n cnt=get_prt_cnt()\n print(\"共%s条数据\"%cnt)\n driver = webdriver.Firefox()\n driver.set_page_load_timeout(50)\n #driver.set_script_timeout(10)\n #driver.maximize_window()\n url = \"http://www.dq123.com/price/index.php?kw=prt_name\"\n count = 0\n while True:\n count += 1\n print(\"第%s次尝试\" % str(count))\n try:\n driver.get(url)\n break\n except:\n pass\n\n for i in range(int(cnt)):\n data=get_prt_info()\n prt_name=data[0][0]\n price=data[0][1]\n prt_no=data[0][2]\n print(prt_no,prt_name,price)\n while 1 == 1:\n # 验证页面的加载情况\n print(\"waiting...\")\n try:\n div_table = driver.find_element_by_id('part_list').find_elements_by_tag_name('tr')\n if len(div_table) > 0:\n break\n time.sleep(1)\n except:\n time.sleep(1)\n s(2, 4)\n #加入输入、点击事件\n\n try:\n driver.find_element_by_id('searchinputmodel').clear()\n except:\n print(\"重新切换标签\")\n swith_tab()\n s(4,5)\n driver.find_element_by_id('searchinputmodel').clear()\n driver.find_element_by_id('searchinputmodel').send_keys(prt_name)\n driver.find_element_by_id(\"search_text\").click()\n while 1==1:\n #验证页面的加载情况\n print(\"waiting...\")\n try:\n div_table=driver.find_element_by_id('part_list').find_elements_by_tag_name('tr')\n if len(div_table)>0:\n break\n time.sleep(1)\n except:\n time.sleep(1)\n s(1,3)\n\n #获取列表中当的结果记录,到click(),返回状态\n try:\n list_state=find_same_record(prt_name,price)\n except:\n list_state=3\n if list_state==1:\n #进入到详情页后,获取属性模块,详情页基础模块\n detail_state=get_basic_data(prt_name)\n if detail_state==1:\n get_attrs_data(prt_no,prt_name)\n get_imgs(prt_no,prt_name)\n status=11\n\n else:\n print(\"error in detail_state\")\n status=12\n\n else:\n print(\"error in list_state\")\n status=13\n update_prt_status(status, prt_no, prt_name)\n s(1,2)\n try:\n swith_tab()\n except:\n pass\n\n print(\"-----------------------------------------\")","sub_path":"untitled/Selenium/dq123/dq123数据获取-20170831/DQ123-根据关键词获取详情页V1.1.py","file_name":"DQ123-根据关键词获取详情页V1.1.py","file_ext":"py","file_size_in_byte":8669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"85898999","text":"from PIL import Image\nimport sys\nimport os\nimport random\nimport math\n\n# Generators to convert to flat list and back into 3-element tuples\n\ndef flatten_image(data):\n for i in xrange(len(data)):\n yield data[i][0]\n yield data[i][1]\n yield data[i][2]\n \ndef repack_image(data):\n for i in xrange(len(data) // 3):\n yield (data[i * 3 + 0], data[i * 3 + 1], data[i * 3 + 2])\n\n# Left-over broken repacker which creates a cool effect\n \ndef repack_image_broke(data):\n for i in xrange(len(data) // 3):\n yield (data[i + 0], data[i + 1], data[i + 2])\n \n# rotate colour channels\n\ndef rotate_channels(data, size):\n rot = random.randint(1, 2)\n for i in xrange(size[0] * size[1]):\n yield data[i * 3 + ((rot + 0) % 3)]\n yield data[i * 3 + ((rot + 1) % 3)]\n yield data[i * 3 + ((rot + 2) % 3)]\n \n# add a random byte here and there\ndef oh_hai(data, size):\n j = 0\n for i in xrange(size[0] * size[1] * 3):\n yield data[j]\n j = j + 1\n \n # maintains colour balance. Swap in comment out equations to fuck with it\n tweak = random.random()\n if j >= len(data):\n j = j - 3\n #j = j - 1\n elif tweak < 0.0002:\n j = j + random.randint(-5, 5) * 3\n #j = j + random.randint(-15, 15)\n \ndef echo(data, size):\n echo_distance = random.randint(2, size[0] // 10)\n echo_size = random.randint(1, echo_distance - 1)\n #print(echo_distance)\n #print(echo_size)\n j = 0\n for i in xrange(size[0] * size[1] * 3):\n if j > echo_distance:\n yield data[i - echo_distance]\n if j > echo_distance + echo_size:\n j = -1\n else:\n yield data[i]\n j = j + 1\n \ndef colourise(data, size):\n # get channel scales in a range 0.5 -> 1.5\n r = random.random() * 1.0 + 0.5;\n g = random.random() * 1.0 + 0.5;\n b = random.random() * 1.0 + 0.5;\n #print('{0} {1} {2}'.format(r, g, b))\n for i in xrange(size[0] * size[1]):\n yield min(0xff, int(data[i * 3 + 0] * r))\n yield min(0xff, int(data[i * 3 + 1] * g))\n yield min(0xff, int(data[i * 3 + 2] * b))\n \ndef lolwut(data, size):\n lairiness = random.random() * math.pi\n threshold = random.random()\n for i in xrange(size[0] * size[1] * 3):\n f = float(data[i]) / 0xff\n if f > threshold:\n s = math.sin(f * lairiness) * 0xff\n yield max(0, min(0xff, int(s)))\n else:\n yield data[i]\n \ndef blit(data, size):\n count = random.randint(1, 20)\n new_data = list(data)\n for i in xrange(count):\n block_size = random.randint(10, size[0] * size[1] // 8)\n src_offset = random.randint(0, len(data) - block_size)\n dst_offset = random.randint(0, len(new_data) - block_size)\n for j in xrange(block_size):\n new_data[dst_offset + j] = data[src_offset + j]\n return new_data\n \ndef create_glitch(im):\n\n print('Performing bitwise glitches')\n\n data = list(im.getdata())\n data = list(flatten_image(data))\n\n # tweaks\n\n data = blit(data, im.size)\n data = list(oh_hai(data, im.size))\n data = list(echo(data, im.size))\n data = list(rotate_channels(data, im.size))\n data = list(colourise(data, im.size))\n data = list(lolwut(data, im.size))\n\n # end tweaks\n\n im.putdata(list(repack_image(data)))\n \nif __name__ == '__main__':\n filename = sys.argv[1]\n im = Image.open(filename)\n create_glitch(im)\n im.save(filename[:-4] + '.out.' + filename[-3:])\n","sub_path":"glitch2.py","file_name":"glitch2.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"474719923","text":"import tweepy\n\nfrom . import Tweeter\nimport error\n\nclass Twitter(Tweeter):\n def __init__(self, cons_key, cons_secret):\n auth = tweepy.AppAuthHandler(cons_key, cons_secret)\n self._api = tweepy.API(auth)\n\n def get_tweet_text(self, tweet_id):\n try:\n status = self._api.get_status(tweet_id, tweet_mode='extended')\n return status.full_text\n except tweepy.error.TweepError as e:\n if e.api_code == 144:\n raise error.TweetNotFoundError\n","sub_path":"tweets/twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"492660897","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n # def rightSideView(self, root: TreeNode) -> List[int]:\n def rightSideView(self, root: TreeNode):\n ans = []\n self.dfs(root, 0, ans)\n return [x[0] for x in ans]\n\n def dfs(self, node, lvl, ans):\n if node:\n if len(ans) < lvl + 1:\n ans.append([])\n\n ans[lvl].append(node.val)\n self.dfs(node.right, lvl + 1, ans)\n self.dfs(node.left, lvl + 1, ans)\n\n\n\"\"\"\n# DFS recursively\ndef rightSideView(self, root):\n res = []\n self.dfs(root, 0, res)\n return [x[0] for x in res]\n \ndef dfs(self, root, level, res):\n if root:\n if len(res) < level+1:\n res.append([])\n res[level].append(root.val)\n self.dfs(root.right, level+1, res)\n self.dfs(root.left, level+1, res)\n\n# DFS + stack\ndef rightSideView2(self, root):\n res, stack = [], [(root, 0)]\n while stack:\n curr, level = stack.pop()\n if curr:\n if len(res) < level+1:\n res.append([])\n res[level].append(curr.val)\n stack.append((curr.right, level+1))\n stack.append((curr.left, level+1))\n return [x[-1] for x in res]\n \n# BFS + queue\ndef rightSideView(self, root):\n res, queue = [], [(root, 0)]\n while queue:\n curr, level = queue.pop(0)\n if curr:\n if len(res) < level+1:\n res.append([])\n res[level].append(curr.val)\n queue.append((curr.left, level+1))\n queue.append((curr.right, level+1))\n return [x[-1] for x in res]\n\"\"\"","sub_path":"LeetCode/0199_BinaryTreeRightSideView/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"303035786","text":"import pickle\nimport os\nimport sys\nsys.path.append('/home/maciek/pyCharmProjects/mc-doi')\nfrom model.results import Results\nimport pandas as pd\nfrom data.data import Data\nfrom model.parameters import ContagionCorrelation, Adjacency\nfrom model.multi_contagion_models import MultiContagionDynamicLinearThresholdModel as MCDOI\nimport numpy as np\n\nsets_to_estimate_file = list(sys.argv)[1]\nwith open(sets_to_estimate_file, 'r', encoding='utf-8') as sets_to_estimate:\n sets_to_estimate = sets_to_estimate.readlines()\nsets_to_estimate = [x.strip() for x in sets_to_estimate]\n\nfrom joblib import Parallel, delayed\nimport time\ndef text_progessbar(seq, total=None):\n step = 1\n tick = time.time()\n while True:\n time_diff = time.time()-tick\n avg_speed = time_diff/step\n total_str = 'of %n' % total if total else ''\n print('step', step, '%.2f' % time_diff, 'avg: %.2f iter/sec' % avg_speed, total_str)\n step += 1\n yield next(seq)\nall_bar_funcs = {\n 'txt': lambda args: lambda x: text_progessbar(x, **args),\n 'None': lambda args: iter,\n}\ndef ParallelExecutor(use_bar='tqdm', **joblib_args):\n def aprun(bar=use_bar, **tq_args):\n def tmp(op_iter):\n if str(bar) in all_bar_funcs.keys():\n bar_func = all_bar_funcs[str(bar)](tq_args)\n else:\n raise ValueError(\"Value %s not supported as bar type\"%bar)\n return Parallel(**joblib_args)(bar_func(op_iter))\n return tmp\n return aprun\n\naprun = ParallelExecutor(n_jobs=6)\n\ndirectory = '/nfs/maciej/mcdoi/louvain/'\n\nwith open(directory+'estimated_t+predict', 'r', encoding='utf-8') as file:\n estimated = file.readlines()\nestimated = [x.strip() for x in estimated]\n\nbatch_sizes = [604800]# [43200, 86400, 604800] # (1h), 12h, 24h, 7d\nbatch_sizes.reverse()\n\n# with open(directory + 'sets_to_omit', 'r', encoding='utf-8') as sets_to_omit:\n# sets_to_omit = sets_to_omit.readlines()\n# sets_to_omit = set([x.strip() for x in sets_to_omit])\n#\n# with open(directory + 'not_estimated', 'r', encoding='utf-8') as not_estimated:\n# not_estimated = not_estimated.readlines()\n# not_estimated = set([x.strip() for x in not_estimated])\n\n\ndef save_results(result: Results, dir, num_predictions):\n for iter in range(num_predictions):\n matrix = result.get_result(iter).matrix\n file_name = dir + '/result_' + str(iter) + '.pickle'\n os.makedirs(os.path.dirname(file_name), exist_ok=True)\n with open(file_name, 'wb') as file:\n pickle.dump(matrix, file)\n\n\ndef estimate_t_and_predict(path_dataset_history, batch_type, batch_sizes, num_predictions, estimated):\n flag = False\n for batch_size in batch_sizes:\n if path_dataset_history+ '/' + batch_type + '/size_' + str(batch_size) not in estimated:\n flag = True\n if flag:\n edges = pd.read_csv(os.path.dirname(path_dataset_history) + '/edges', header = None)\n event_log = pd.read_csv(path_dataset_history + '/event_log', header=None)\n with open(path_dataset_history + '/contagion.pickle', 'rb') as file:\n cc = pickle.load(file)\n with open(path_dataset_history + '/adjacency.pickle', 'rb') as file:\n a = pickle.load(file)\n for batch_size in batch_sizes:\n if path_dataset_history+ '/' + batch_type + '/size_' + str(batch_size) not in estimated:\n if os.path.isfile(path_dataset_history + '/data_obj.pickle'):\n with open(path_dataset_history + '/data_obj.pickle', 'rb') as f:\n d = pickle.load(f)\n else:\n d = Data()\n d.load_data_data_frame(event_log, edges)\n with open(path_dataset_history + '/data_obj.pickle', 'wb') as f:\n pickle.dump(d, f)\n m = MCDOI()\n m.assign_contagions_correlation_matrix(cc)\n m.assign_adjacency_matrix(a)\n m.fit_only_thresholds_states(d, batch_type = batch_type, batch_size = batch_size)\n file_name = path_dataset_history + '/' + batch_type + '/size_' + str(batch_size) + '/threshold.pickle'\n os.makedirs(os.path.dirname(file_name), exist_ok=True)\n with open(file_name, 'wb') as threshold_file:\n pickle.dump(m.thresholds.matrix, threshold_file)\n result = m.predict(num_predictions)\n save_results(result, path_dataset_history + '/' + batch_type + '/size_' + str(batch_size), num_predictions)\n with open(directory+'estimated_t+predict', 'a+', encoding='utf-8') as handle:\n handle.write(path_dataset_history + '/' + batch_type + '/size_' + str(batch_size) + '\\n')\n with open(directory + 'predicted_7days', 'a+', encoding='utf-8') as handle:\n handle.write(path_dataset_history + '/' + batch_type + '/size_' + str(batch_size) + '\\n')\n\n\n# def make_dataset_history_paths():\n# paths = []\n# for dat in next(os.walk(directory))[1]:\n# for history_length in np.arange(1, 31, 1):\n# paths.append(directory+dat+'/history_'+str(history_length))\n# return paths\n\nif __name__ == '__main__':\n aprun(bar='txt')(delayed(estimate_t_and_predict)(dat, 'time', batch_sizes, 7, estimated) for dat in sets_to_estimate)\n\n\n\n\n\n\n\n\n","sub_path":"experiments/parameters_exploration/t-louvain.py","file_name":"t-louvain.py","file_ext":"py","file_size_in_byte":5364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"644436946","text":"import unittest\nimport requests\nimport json\nimport asyncio\nimport asyncpg\nfrom exceptions.user_exceptions import UserLoginException, UserRegisterException\n\nwith open('./env.json', 'r') as f:\n config = json.loads(f.read())\n\n\ndef create_user_and_login(url, username, password):\n \"\"\"\n Create a new user, do the login and return the generated token\n :param url:\n :param username:\n :param password:\n :return:\n \"\"\"\n # Register a new user\n res = requests.post(f'{url}/user/register',\n data={\"username\": username, \"password\": password}\n )\n\n if res.status_code != 200:\n raise UserRegisterException(\"Error creating new user\")\n\n # Login the previously created user\n res = requests.post(f'{url}/user/login',\n data={\"username\": username, \"password\": password}\n )\n if res.status_code != 200 or \"token\" not in res.json():\n raise UserLoginException(\"Error doing login of the created user\")\n\n token = res.json()[\"token\"]\n return {\"token\": token}\n\n\nasync def delete_all_related_to_user(username):\n \"\"\"\n Delete all groups and information of that user\n :param username:\n :return:\n \"\"\"\n conn = await asyncpg.connect(database=config[\"database\"][\"database\"],\n user=config[\"database\"][\"username\"],\n password=config[\"database\"][\"password\"])\n # Get user id\n user_search = await conn.fetch('SELECT id FROM users WHERE username=$1', username)\n if len(user_search) == 0:\n # The user doesn't exists, so we don't need to delete anything\n return\n\n user_id = int(user_search[0][\"id\"])\n\n # Delete groups\n await conn.execute('DELETE FROM groups WHERE created_by=$1', user_id)\n\n # Delete user\n await conn.execute('DELETE FROM users WHERE username=$1', username)\n\n\nclass TestUserGroupBehaviour(unittest.TestCase):\n\n def __init__(self, *args, **kwargs):\n super(TestUserGroupBehaviour, self).__init__(*args, **kwargs)\n self.users_to_create = [\n {\n \"username\": \"test user 1\",\n \"password\": \"test user 1\",\n \"token\": \"\",\n \"groups\": [],\n \"invites\": []\n },\n {\n \"username\": \"test user 2\",\n \"password\": \"test user 2\",\n \"token\": \"\",\n \"groups\": [],\n \"invites\": []\n }\n ]\n\n def test_groups_behaviour(self):\n failed_any_login = False\n raised_exception = None\n try:\n for user in self.users_to_create:\n # Create and login\n try:\n user_details = create_user_and_login(\n f'{config[\"server\"][\"url\"]}:{config[\"server\"][\"port\"]}',\n user[\"username\"],\n user[\"password\"]\n )\n user[\"token\"] = user_details[\"token\"]\n except UserLoginException as ex:\n # Delete the user\n print(\"Error doing user login\")\n raised_exception = ex\n failed_any_login = True\n except UserRegisterException as ex:\n # Delete the user\n print(\"Error doing user register\")\n raised_exception = ex\n failed_any_login = True\n\n if not failed_any_login:\n # Do the rest of the test\n\n # Create groups\n for user in self.users_to_create:\n # Without headers\n resp = requests.post(\n f'{config[\"server\"][\"url\"]}:{config[\"server\"][\"port\"]}/group',\n data={\"name\": \"Test group 1\"}\n )\n self.assertEqual(resp.status_code, 401)\n self.assertEqual(resp.json()[\"error\"], \"Unauthorized\")\n\n # Without name but valid token\n resp = requests.post(\n f'{config[\"server\"][\"url\"]}:{config[\"server\"][\"port\"]}/group',\n headers={\"dango-key\": user[\"token\"]}\n )\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(resp.json()[\"error\"], \"Invalid parameters\")\n\n # Valid group creation\n resp = requests.post(\n f'{config[\"server\"][\"url\"]}:{config[\"server\"][\"port\"]}/group',\n data={\"name\": f\"group {user['username']}\"},\n headers={\"dango-key\": user[\"token\"]}\n )\n self.assertEqual(resp.status_code, 200)\n user[\"groups\"].append(resp.json()[\"id\"])\n\n # Extra group creation\n for group_number in range(3):\n resp = requests.post(\n f'{config[\"server\"][\"url\"]}:{config[\"server\"][\"port\"]}/group',\n data={\"name\": f\"group {user['username']} {group_number}\"},\n headers={\"dango-key\": user[\"token\"]}\n )\n self.assertEqual(resp.status_code, 200)\n user[\"groups\"].append(resp.json()[\"id\"])\n\n # Create invites\n for group in user[\"groups\"]:\n for _ in range(5):\n resp = requests.post(\n f'{config[\"server\"][\"url\"]}:{config[\"server\"][\"port\"]}/group/invitation/{group}',\n headers={\"dango-key\": user[\"token\"]}\n )\n self.assertEqual(resp.status_code, 200)\n user[\"invites\"].append(resp.json()[\"code\"])\n\n # Try to create it again with the same name\n resp = requests.post(\n f'{config[\"server\"][\"url\"]}:{config[\"server\"][\"port\"]}/group',\n data={\"name\": f\"group {user['username']}\"},\n headers={\"dango-key\": user[\"token\"]}\n )\n self.assertEqual(resp.status_code, 400)\n\n # Try to join to a group\n resp = requests.post(\n f'{config[\"server\"][\"url\"]}:{config[\"server\"][\"port\"]}/group/join',\n data={\"code\": self.users_to_create[0][\"invites\"][0]},\n headers={\"dango-key\": self.users_to_create[1][\"token\"]}\n )\n\n self.assertEqual(resp.status_code, 200)\n group_to_test = int(resp.json()[\"group\"][0])\n\n resp = requests.post(\n f'{config[\"server\"][\"url\"]}:{config[\"server\"][\"port\"]}/group/join',\n data={\"code\": self.users_to_create[0][\"invites\"][6]},\n headers={\"dango-key\": self.users_to_create[1][\"token\"]}\n )\n self.assertEqual(resp.status_code, 200)\n second_group_to_test = int(resp.json()[\"group\"][0])\n\n # Try to join to the same group\n resp = requests.post(\n f'{config[\"server\"][\"url\"]}:{config[\"server\"][\"port\"]}/group/join',\n data={\"code\": self.users_to_create[0][\"invites\"][0]},\n headers={\"dango-key\": self.users_to_create[1][\"token\"]}\n )\n self.assertEqual(resp.status_code, 400)\n\n # Get group participants\n resp = requests.get(\n f'{config[\"server\"][\"url\"]}:{config[\"server\"][\"port\"]}/group/participants?group-id={group_to_test}',\n headers={\"dango-key\": self.users_to_create[1][\"token\"]}\n )\n self.assertEqual(resp.status_code, 200)\n\n # Get user groups\n resp = requests.get(\n f'{config[\"server\"][\"url\"]}:{config[\"server\"][\"port\"]}/group',\n headers={\"dango-key\": self.users_to_create[1][\"token\"]}\n )\n self.assertEqual(resp.status_code, 200)\n\n # Try to leave the joined group\n resp = requests.post(\n f'{config[\"server\"][\"url\"]}:{config[\"server\"][\"port\"]}/group/exit/{group_to_test}',\n headers={\"dango-key\": self.users_to_create[1][\"token\"]}\n )\n self.assertEqual(resp.status_code, 200)\n\n # Try to leave the group again\n resp = requests.post(\n f'{config[\"server\"][\"url\"]}:{config[\"server\"][\"port\"]}/group/exit/{group_to_test}',\n headers={\"dango-key\": self.users_to_create[1][\"token\"]}\n )\n self.assertEqual(resp.status_code, 401)\n\n # Try to leave the group as owner\n resp = requests.post(\n f'{config[\"server\"][\"url\"]}:{config[\"server\"][\"port\"]}/group/exit/{group_to_test}',\n headers={\"dango-key\": self.users_to_create[0][\"token\"]}\n )\n self.assertEqual(resp.status_code, 400)\n\n # Try to delete the group\n resp = requests.delete(\n f'{config[\"server\"][\"url\"]}:{config[\"server\"][\"port\"]}/group/{group_to_test}',\n headers={\"dango-key\": self.users_to_create[0][\"token\"]}\n )\n self.assertEqual(resp.status_code, 200)\n\n # Try to delete group with someone inside\n resp = requests.delete(\n f'{config[\"server\"][\"url\"]}:{config[\"server\"][\"port\"]}/group/{second_group_to_test}',\n headers={\"dango-key\": self.users_to_create[0][\"token\"]}\n )\n self.assertEqual(resp.status_code, 200)\n\n finally:\n # Clean the users\n for user in self.users_to_create:\n asyncio.get_event_loop().run_until_complete(delete_all_related_to_user(user[\"username\"]))\n\n if raised_exception is not None:\n raise raised_exception\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/user_groups_test.py","file_name":"user_groups_test.py","file_ext":"py","file_size_in_byte":10320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"584622013","text":"import crypt\n\ndef main():\n print(\"What do you want to do?\")\n print(\" 1)Encrypt a message\")\n print(\" 2)Decrypt a message\")\n if int(input()) == 1:\n print(\"Please enter your message and press enter:\")\n message = crypt.encrypt(input())\n else:\n print(\"Please enter your morse code and press enter:\")\n message = crypt.decrypt(input())\n print(message)\n print(\"Want to try again?(y/n)\")\n choice = input().strip()\n if choice == 'y' or choice == 'Y':\n main()\n elif choice == 'n' or choice == 'N':\n print(\"Thank you!\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"code/morse_code.py","file_name":"morse_code.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"53435847","text":"import geoip2.database #Library for working with MMDB files\nimport sys\nimport os\nimport urllib.request #Library used for downloading database if it doesn't exist\nimport tarfile #Library to extract tarballs \nimport shutil\nimport glob\nimport hashlib\n\ndef installDatabase():\n extracted = tarfile.open('archive.tar.gz',\"r:gz\") #Extract database file\n extracted.extractall()\n extracted.close()\n current = os.getcwd() #Store working directory\n db_file = glob.glob(r'*/*.mmdb') #Get database filename and location\n shutil.copy(db_file[0], current) #Copy database to working directory\n del_dir,del_file = db_file[0].split(\"\\\\\") #Get temporary directory for removal\n os.rename(del_file, \"db.mmdb\") #Rename and install database\n print (\"Cleaning up...\")\n shutil.rmtree(del_dir) #Clean up temporary files\n os.remove(\"archive.tar.gz\")\n return\n\ndef checkDatabase():\n if not os.path.exists(\"db.mmdb\"): #Check if database file exists\n if os.path.exists(\"archive.tar.gz\"):\n checksum = hashlib.md5(open(\"archive.tar.gz\", 'rb').read()).hexdigest()\n if checksum == \"b0d93822d6937bcbaa549e1ab90b235a\":\n print (\"Partially installed database found...finishing setup...\")\n installDatabase()\n return\n else:\n print (\"Corrupted database found...re-installing\")\n print (\"Running setup scripts...\")\n print (\"Downloading database...\")\n urllib.request.urlretrieve('http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.tar.gz','archive.tar.gz')\n print (\"Installing database...\")\n installDatabase()\n return\n\ndef findTarget(ip):\n checkDatabase()\n reader = geoip2.database.Reader('db.mmdb') #Create reader object containing database\n response = reader.city(ip) #Get location data from DB (stored in reader object) and store into variable\n print (\"LOCATION DATA PULLED FOR \" + ip)\n print (\"COUNTRY: \" + response.country.name)\n print (\"STATE: \" + response.subdivisions.most_specific.name)\n print (\"CITY: \" + response.city.name)\n print (\"ZIP: \" + response.postal.code)\n print (\"COORDINATES: \" + str(response.location.latitude) + \",\" + str(response.location.longitude))\n reader.close()\n return\n\ndef main(argv):\n usage = \"USAGE: triangle.py [FLAGS] \\n-ip [IP_TO_LOCATE] Locate IP Address\\n\"\n\n if not argv: #Display usage if arguments are undefined\n print (usage)\n sys.exit()\n if argv[0] == \"-ip\":\n findTarget(argv[1]) #Launch primary function for analyzing location data\n \n sys.exit()\n\nmain(sys.argv[1:]) #Read arguments","sub_path":"triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"315866961","text":"\"\"\"\n此题让我想起了以前的快慢指针,用了差不多的算法,既然要删除倒数第n个节点,那么就弄两个指针,之间隔着n-2个节点即可,\n先让指路指针走n-1步,然后处理指针指向头节点,再同步向后直到指路指针指向尾节点,达到n的时间复杂度.\n只需注意处理好一些异常情况即可\n\"\"\"\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n def __str__(self):\n head = self\n res = \"\"\n while head.next:\n res += \"%s -> \" % head.val\n head = head.next\n res += str(head.val)\n return res\n\n\nclass Solution(object):\n def removeNthFromEnd(self, head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n guide = head\n for i in range(n - 1):\n guide = guide.next\n if not guide.next:\n return head.next\n guide = guide.next\n target_prev = head\n while guide.next:\n guide = guide.next\n target_prev = target_prev.next\n target_prev.next = target_prev.next.next\n return head\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n head = ListNode(1)\n head.next = ListNode(2)\n head.next.next = ListNode(3)\n head.next.next.next = ListNode(4)\n head.next.next.next.next = ListNode(5)\n print(sol.removeNthFromEnd(head, 5))\n","sub_path":"1-100/19.py","file_name":"19.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"5719500","text":"from setuptools import setup\nimport os\n\n\n__version__ = '0.0.3'\n\n\ndef read(readme):\n return open(os.path.join(os.path.dirname(__file__), readme)).read()\n\n\nsetup(\n name='pyazo',\n version=__version__,\n description = 'Gyazo API client',\n author='mtwtkman',\n url='https://github.com/mtwtkman/pyazo',\n install_requires=['requests']\n)\n","sub_path":"pypi_install_script/pyazo-0.0.3.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"188911135","text":"from gpiozero import Button\nfrom signal import pause\n\ndef say_hello():\n print(\"Hello !! Button is pressed \")\n\n\nbutton = Button(2) # connect +Ve terminal of button to GPIO2 and -Ve of button to GND of raspberri \n\nbutton.when_pressed = say_hello ## be careful not use say_hello()\n\npause()\n","sub_path":"GPIOlib/button_press_function.py","file_name":"button_press_function.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"305106070","text":"from unittesting import DeferrableTestCase\n\nimport sublime\n\n\nclass ApplyDocumentEditTests(DeferrableTestCase):\n def setUp(self):\n self.view = sublime.active_window().new_file()\n\n def test_remove_line_and_then_insert_at_that_line_at_end(self):\n original = (\n 'a\\n'\n 'b\\n'\n 'c'\n )\n file_changes = [\n ((2, 0), (3, 0), ''), # out-of-bounds end position, but this is fine\n ((3, 0), (3, 0), 'c\\n') # out-of-bounds start and end, this line doesn't exist\n ]\n expected = (\n 'a\\n'\n 'b\\n'\n 'c\\n'\n )\n # Old behavior:\n # 1) first we end up with ('a\\n', 'b\\n', 'cc\\n')\n # 2) then we end up with ('a\\n', 'b\\n', '')\n # New behavior:\n # 1) line index 3 is \"created\" ('a\\n', 'b\\n', 'c\\n', c\\n'))\n # 2) deletes line index 2.\n self.run_test(original, expected, file_changes)\n\n def test_apply(self):\n original = (\n '\\n'\n '\\n'\n '\\n'\n '\\n'\n )\n file_changes = [\n ((0, 28), (1, 0), ''), # delete first \\n\n ((1, 0), (1, 15), ''), # delete second line (but not the \\n)\n ((2, 10), (2, 10), '\\n '), # insert after