diff --git "a/4715.jsonl" "b/4715.jsonl" new file mode 100644--- /dev/null +++ "b/4715.jsonl" @@ -0,0 +1,639 @@ +{"seq_id":"533082145","text":"import numpy as np\n\nclass HiddenMarkov:\n def __init__(self, Q, V, A, B, PI):\n self.Q = Q # 状态数组或者状态映射也可以,N\n self.V = np.array(V) # 观测集数组,包含数据集中所有可能的观测项,T\n self.A = np.array(A) # 状态转移概率分布矩阵,N*N\n self.B = np.array(B) # 观测概率分布矩阵,N*T\n self.PI = np.array(PI) # 初始状态概率分布数组,N\n \n def forward(self, O, logs=False): # 使用前向算法,O为观察序列,logs用于控制是否输出计算过程\n N = len(self.Q) #可能存在的状态数量\n M = len(O) # 观测序列的大小\n self.alphas = np.zeros((N, M)) # 前向概率:alphas[i][j]表示t时刻部分观测序列为o1,o2,o3...,ot且状态为qi的概率\n T = M # 有几个时刻,有几个观测序列,就有几个时刻\n for t in range(T): # 遍历每一时刻,算出alpha值\n indexOfO = np.where(self.V == O[t])[0][0] # 找出序列对应的索引\n for i in range(N):\n if t == 0: # 计算初值\n self.alphas[i][t] = self.PI[i] * self.B[i][indexOfO] # P176(10.15)\n if logs:\n print('alpha1(%d)=p%db%db(o1)=%f' % (i, i, i, self.alphas[i][t]))\n else:\n self.alphas[i][t] = np.dot(\n [alpha[t - 1] for alpha in self.alphas],\n [a[i] for a in self.A]) * self.B[i][indexOfO] # 对应P176(10.16)\n if logs:\n print('alpha%d(%d)=sigma [alpha%d(i)ai%d]b%d(o%d)=%f' %\n (t, i, t - 1, i, i, t, self.alphas[i][t]))\n # print(alphas)\n P = np.sum([alpha[M - 1] for alpha in self.alphas]) # P176(10.17)\n if logs:\n print(\"P(O|lambda)=\", end=\"\")\n for i in range(N):\n print(\"%.3f+\" % self.alphas[i][M - 1], end=\"\")\n print(\"0=%.6f\" % P)\n # alpha11 = pi[0][0] * B[0][0] #代表a1(1)\n # alpha12 = pi[0][1] * B[1][0] #代表a1(2)\n # alpha13 = pi[0][2] * B[2][0] #代表a1(3)\n\n def backward(self, O, logs=False): # 后向算法,O为观察序列,logs用于控制是否输出计算过程\n N = len(self.Q) # 可能存在的状态数量\n M = len(O) # 观测序列的大小\n self.betas = np.ones((N, M)) # 后向概率:时刻t状态为qi的条件下,从t+1到T的部分观测序列为ot+1,ot+2,...,oT的概率\n if logs:\n for i in range(N):\n print('beta%d(%d)=1' % (M, i))\n for t in range(M - 2, -1, -1):\n indexOfO = np.where(self.V == O[t + 1])[0][0] # 找出序列对应的索引\n for i in range(N):\n self.betas[i][t] = np.dot(\n np.multiply(self.A[i], [b[indexOfO] for b in self.B]),\n [beta[t + 1] for beta in self.betas])\n realT = t + 1\n realI = i + 1\n if logs:\n print(\n 'beta%d(%d)=sigma [a%djbj(o%d)beta%d(j)]=(' %\n (realT, realI, realI, realT + 1, realT + 1),\n end='')\n for j in range(N):\n print(\n \"%.3f*%.3f*%.3f+\" % (self.A[i][j], self.B[j][indexOfO],\n self.betas[j][t + 1]),\n end='')\n print(\"0)=%.6f\" % self.betas[i][t])\n # print(betas)\n if logs:\n indexOfO = np.where(self.V == O[0])[0][0]\n P = np.dot(\n np.multiply(self.PI, [b[indexOfO] for b in self.B]),\n [beta[0] for beta in self.betas])\n print(\"P(O|lambda)=\", end=\"\")\n for i in range(N):\n print(\n \"%.3f*%.3f*%.3f+\" % (self.PI[i], self.B[i][indexOfO], self.betas[i][0]),\n end=\"\")\n print(\"0=%.6f\" % P)\n\n def viterbi(self, O, logs=False): # viterbi算法进行状态decode,O为观测序列,logs用于控制是否输出计算过程\n N = len(self.Q) #可能存在的状态数量\n M = len(O) # 观测序列的大小\n self.deltas = np.zeros((N, M)) # deltas[i][t]表示t时刻状态为qi的所有状态序列中的最大概率\n self.psis = np.zeros((N, M)) # psis[i][t]使t时刻状态为qi最大化的t-1时刻的状态\n I = np.zeros(M, dtype=np.int32)\n for t in range(M):\n realT = t + 1\n indexOfO = np.where(self.V == O[t])[0][0] # 找出序列对应的索引\n for i in range(N):\n realI = i + 1\n if t == 0:\n self.deltas[i][t] = self.PI[i] * self.B[i][indexOfO]\n self.psis[i][t] = 0\n if logs:\n print('delta1(%d)=pi%d * b%d(o1)=%.3f * %.3f=%.6f' %\n (realI, realI, realI, self.PI[i], self.B[i][indexOfO],\n self.deltas[i][t]))\n print('psis1(%d)=0' % (realI))\n else:\n self.deltas[i][t] = np.max(\n np.multiply([delta[t - 1] for delta in self.deltas],\n [a[i] for a in self.A])) * self.B[i][indexOfO]\n if logs:\n print(\n 'delta%d(%d)=max[delta%d(j)aj%d]b%d(o%d)=%.3f*%.3f=%.6f'\n % (realT, realI, realT - 1, realI, realI, realT,\n np.max(\n np.multiply([delta[t - 1] for delta in self.deltas],\n [a[i] for a in self.A])), self.B[i][indexOfO],\n self.deltas[i][t]))\n self.psis[i][t] = np.argmax(\n np.multiply(\n [delta[t - 1] for delta in self.deltas],\n [a[i]\n for a in self.A])) + 1 #由于其返回的是索引,因此应+1才能和正常的下标值相符合。\n if logs:\n print('psis%d(%d)=argmax[delta%d(j)aj%d]=%d' %\n (realT, realI, realT - 1, realI, self.psis[i][t]))\n if logs:\n print(self.deltas)\n print(self.psis)\n I[M - 1] = np.argmax([delta[M - 1] for delta in self.deltas\n ]) + 1 #由于其返回的是索引,因此应+1才能和正常的下标值相符合。\n print('i%d=argmax[deltaT(i)]=%d' % (M, I[M - 1]))\n for t in range(M - 2, -1, -1):\n I[t] = self.psis[int(I[t + 1]) - 1][t + 1]\n print('i%d=psis%d(i%d)=%d' % (t + 1, t + 2, t + 2, I[t]))\n print(\"状态序列I:\", I)\n return I\n \n def train(self, O, criterion=0.05, logs=False): \n '''\n Baum-Welch无监督参数学习,EM算法进行训练,训练之前必须已计算前向forward和后向backward概率\n O为观察序列\n criterion为前后两次训练参数相差允许的最小值,用于控制迭代次数\n logs为真则会打印forward和backward详细的计算过程\n '''\n N = len(self.Q)\n M = len(O)\n xi = np.zeros((M - 1, N, N))\n gamma = np.zeros((M, N))\n done = False\n O_index = np.zeros(M, dtype=np.int32)\n for t in range(M):\n O_index[t] = np.where(self.V == O[t])[0][0]\n while not done:\n # 计算更新参数后的前向概率alphas和后向概率betas\n self.forward(O,logs=logs)\n self.backward(O,logs=logs)\n # EM算法的E step\n # 计算xi\n for t in range(M - 1):\n indexofO = O_index[t + 1]\n xi_divisor = np.dot([alpha[t] for alpha in self.alphas], \n [np.dot(np.multiply(self.A[i], [b[indexofO] for b in self.B]), \n [beta[t + 1] for beta in self.betas]) for i in range(N)])\n xi_dividend = np.array([self.alphas[i][t] * np.multiply(np.multiply(self.A[i], [b[indexofO] for b in self.B]), \n [beta[t + 1] for beta in self.betas]) for i in range(N)])\n xi[t] = xi_dividend / xi_divisor\n # 计算gamma\n for t in range(M):\n gamma[t] = np.multiply([alpha[t] for alpha in self.alphas], [beta[t] for beta in self.betas]) / np.dot(\n [alpha[t] for alpha in self.alphas], [beta[t] for beta in self.betas])\n # EM算法的M step\n # 更新状态转移概率分布矩阵A\n new_A = np.zeros(self.A.shape)\n for i in range(N):\n new_A_divisor = np.sum([g[i] for g in gamma]) - gamma[M - 1][i]\n for j in range(N):\n new_A[i][j] = np.sum([xit[i, j] for xit in xi]) / new_A_divisor\n # 更新观测概率分布矩阵B\n new_B = np.zeros(self.B.shape)\n for j in range(new_B.shape[0]):\n new_B_divisor = np.sum([g[j] for g in gamma])\n for k in range(new_B.shape[1]):\n new_B[j][k] = np.sum([gamma[t][j] for t in range(M) if O_index[t] == k]) / new_B_divisor\n # 更新初始状态概率分布数组PI\n new_PI = np.zeros(self.PI.shape)\n new_PI = gamma[0]\n # 对比前后两次更新幅度\n if (np.max(np.abs(new_A - self.A)) < criterion \n and np.max(np.abs(new_B - self.B)) < criterion \n and np.max(np.abs(new_PI - self.PI)) < criterion):\n done = True\n self.A[:,:], self.B[:,:], self.PI[:] = new_A, new_B, new_PI\n\nif __name__ == '__main__':\n Q = [1, 2, 3]\n V = ['红', '白']\n A = [[0.5, 0.2, 0.3], [0.3, 0.5, 0.2], [0.2, 0.3, 0.5]]\n B = [[0.5, 0.5], [0.4, 0.6], [0.7, 0.3]]\n O = ['红', '白', '红', '红', '白', '红', '白', '白']\n PI = [0.2, 0.3, 0.5]\n HMM = HiddenMarkov(Q, V, A, B, PI)\n print('forward:')\n HMM.forward(O, logs=True)\n print('\\nbackward:')\n HMM.backward(O, logs=True)\n print('\\nviterbi状态预测before train:')\n HMM.viterbi(O)\n print('\\nBaum-Welch train:')\n HMM.train(O) \n print('\\nviterbi状态预测after train:')\n HMM.viterbi(O)","sub_path":"第10章 隐马尔可夫模型/HMM.py","file_name":"HMM.py","file_ext":"py","file_size_in_byte":10552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"598715133","text":"\"\"\"Cross-site request forgery proection.\n\nCourtesy of Warehouse project:\n\nhttps://github.com/pypa/warehouse/blob/master/warehouse/csrf.py\n\"\"\"\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport hmac\nimport urllib.parse\n\nfrom pyramid.httpexceptions import HTTPForbidden, HTTPMethodNotAllowed\nfrom pyramid.session import check_csrf_token\nfrom websauna.system.http import Request\n\nfrom websauna.system.http.header import add_vary\n\n\nREASON_NO_ORIGIN = \"Origin checking failed - no Origin or Referer.\"\nREASON_BAD_ORIGIN = \"Origin checking failed - {} does not match {}.\"\nREASON_BAD_TOKEN = \"CSRF token missing or incorrect.\"\n\n\nclass InvalidCSRF(HTTPForbidden):\n pass\n\n\ndef csrf_exempt(view):\n @functools.wraps(view)\n def wrapped(context, request):\n request._process_csrf = False\n return view(context, request)\n return wrapped\n\n\ndef csrf_protect(view_or_scope):\n scope = None\n if isinstance(view_or_scope, str):\n scope = view_or_scope\n\n def inner(view):\n @functools.wraps(view)\n def wrapped(context, request):\n request._process_csrf = True\n request._csrf_scope = scope\n return view(context, request)\n return wrapped\n\n if scope is None:\n return inner(view_or_scope)\n else:\n return inner\n\n\ndef _check_csrf(request: Request):\n \"\"\"The default CSRF protection mechanism.\n\n For all state changing HTTP requests (POST) force a CSRF check unless view is whitelisted with :py:func:`websauna.system.core.csrf.csrf_exempt` decorator.\n\n :raises: InvalidCSRF\n \"\"\"\n # Assume that anything not defined as 'safe' by RFC2616 needs protection\n if request.method not in {\"GET\", \"HEAD\", \"OPTIONS\", \"TRACE\"}:\n # Determine if this request has set itself so that it should be\n # protected against CSRF. If it has not and it's gotten one of these\n # methods, then we want to raise an error stating that this resource\n # does not support this method.\n\n # TODO: Make this configurable - at the moment we don't require explicit @csrf_protect on every view\n # if not getattr(request, \"_process_csrf\", None):\n # raise HTTPMethodNotAllowed\n\n if request.scheme == \"https\":\n # Determine the origin of this request\n origin = request.headers.get(\"Origin\")\n if origin is None:\n origin = request.headers.get(\"Referer\")\n\n # Fail if we were not able to locate an origin at all\n if not origin:\n raise InvalidCSRF(REASON_NO_ORIGIN)\n\n # Parse the origin and host for comparison\n originp = urllib.parse.urlparse(origin)\n hostp = urllib.parse.urlparse(request.host_url)\n\n # Actually check our Origin against our Current\n # Host URL.\n if ((originp.scheme, originp.hostname, originp.port) !=\n (hostp.scheme, hostp.hostname, hostp.port)):\n reason_origin = origin\n if origin != \"null\":\n reason_origin = urllib.parse.urlunparse(\n originp[:2] + (\"\", \"\", \"\", \"\"),\n )\n\n reason = REASON_BAD_ORIGIN.format(\n reason_origin, request.host_url,\n )\n\n raise InvalidCSRF(reason)\n\n check_csrf_token(request)\n\n\ndef csrf_mapper_factory(mapper):\n class CSRFMapper(mapper):\n\n def __call__(self, view):\n view = super().__call__(view)\n\n @functools.wraps(view)\n def wrapped(context, request):\n # Assign our view to an innerview function so that we can\n # modify it inside of the wrapped function.\n innerview = view\n\n # Check if we're processing CSRF for this request at all or\n # if it has been exempted from CSRF.\n if not getattr(request, \"_process_csrf\", True):\n return innerview(context, request)\n\n # If we're processing CSRF for this request, then we want to\n # set a Vary: Cookie header on every response to ensure that\n # we don't cache the result of a CSRF check or a form with a\n # CSRF token in it.\n if getattr(request, \"_process_csrf\", False):\n innerview = add_vary(\"Cookie\")(innerview)\n\n # Actually check our CSRF\n _check_csrf(request)\n\n return innerview(context, request)\n\n return wrapped\n return CSRFMapper\n\n\n\n","sub_path":"websauna/system/core/csrf.py","file_name":"csrf.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"599626733","text":"class ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n# 头插法 需多复习,没看太明白\n\n'''\n147. 对链表进行插入排序\n插入排序 时间复杂度O(N^2) \n需要知道插入位置的前一个节点所以要判断next指针\n'''\nclass Solution(object):\n def insertionSortList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n # 正常插入排序 O(n*n)\n # 维护 dummy 亚节点,用于在head之前插入节点\n # 维护lastsorted 节点,和当前节点比较,判断是否需从前往后判断\n if not head: return head\n dummy = ListNode(0, head)\n curr = head.next\n last_sorted = head\n while curr:\n tmp = curr.next\n if curr.val >= last_sorted.val:\n last_sorted = curr\n # last_sorted = last_sorted.next\n elif curr.val < last_sorted.val:\n prev = dummy\n while prev.next and prev.next.val <= curr.val:\n prev = prev.next\n last_sorted.next = curr.next\n curr.next = prev.next\n prev.next = curr\n\n curr = tmp #last_sorted.next\n return dummy.next\n\n # 头插法\n # 3 1 4 5 2 3\n dummy = ListNode(0)\n curr = head\n while curr:\n prev = dummy\n temp = curr.next\n while prev.next and prev.next.val < curr.val:\n prev = prev.next\n curr.next = prev.next\n prev.next = curr\n curr = temp\n return dummy.next\n\nSolution().insertionSortList()","sub_path":"3-chain/10-insert_sort_list.py","file_name":"10-insert_sort_list.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"610698749","text":"import random\n\n\nclass MaxHeap:\n\n def __init__(self):\n self._data = []\n self._count = len(self._data)\n\n def size(self):\n return self._count\n\n def isEmpty(self):\n return self._count == 0\n\n def add(self, item):\n self._data.append(item)\n self._count += 1\n self._shifUp(self._count - 1)\n\n def pop(self):\n if self._count > 0:\n ret = self._data[0]\n self._data[0] = self._data[self._count - 1]\n self._count -= 1\n self._shifDown(0)\n return ret\n\n def _shifUp(self, index):\n parent = (index - 1) >> 1\n while index > 0 and self._data[index] < self._data[index]:\n self._data[parent], self._data[index] = self._data[index], self._data[parent]\n index = parent\n parent = (index - 1) >> 1\n\n def _shifDown(self, index):\n j = (index << 1) + 1\n while j < self._count:\n if j + 1 < self._count and self._data[j+1] > self._data[j]:\n j += 1\n if self._data[index] >= self._data[j]:\n break\n self._data[index], self._data[j] = self._data[j], self._data[index]\n index = j\n j = (index << 1) + 1\n\ndef testIntValue():\n for i in range(10):\n iLen = random.randint(1, 300)\n # allData = random.sample(range(iLen *100), iLen)\n allData = [10, 5, 6, 2, 10, 1, 8]\n print('\\nlen = ', iLen)\n\n oMaxHeap = MaxHeap()\n print('_data:\\t', allData)\n arrDataSorted = sorted(allData, reverse=True)\n print('dataSorted:', arrDataSorted)\n for i in allData:\n oMaxHeap.add(i)\n heapData = []\n for i in range(iLen):\n iExpected = arrDataSorted[i]\n iActual = oMaxHeap.pop()\n heapData.append(iActual)\n print('{}, expected:{}, actual:{}'.format(iExpected==iActual, iExpected, iActual))\n assert iExpected == iActual\n print('dataSorted: ', arrDataSorted)\n print('heapData: ', heapData)\n\n\n\nif __name__ == '__main__':\n testIntValue()\n # m = MaxHeap()\n # m.add(1)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"SortingAlgorithm/HeapSort.py","file_name":"HeapSort.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"589652292","text":"from foryou.common.base import *\r\n\r\n# 福佑账务管理\r\n# 获取单据列表\r\ndef finance_getList(cookies):\r\n url = mk_url('caiwu', 'fykc-goldplate-service/api/document/getList')\r\n data = {\r\n 'pageNo': 1,\r\n 'pageSize': 30\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n\r\n\r\n# 单据详情\r\ndef finance_getDetailByCode(code2, cookies):\r\n url = mk_url('caiwu', 'fykc-goldplate-service/api/document/getDetailByCode')\r\n data = {\r\n 'code': code2,\r\n 'pageNo': 1,\r\n 'pageSize': 30\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n\r\n\r\n# 生成凭证\r\ndef finance_generateVouchers(id1, cookies):\r\n url = mk_url('caiwu', 'fykc-goldplate-service/api/document/getDetailByCode')\r\n data = {\r\n 'ids': [id1]\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n\r\n\r\n# 导出运单明细\r\ndef finance_exportOrderDetail(id1, code2, docTypeNamwe3, cookies):\r\n url = mk_url('caiwu', 'fykc-goldplate-service/api/document/exportOrderDetail')\r\n data = {\r\n 'id': id1,\r\n 'code': code2,\r\n 'docTypeNamwe': docTypeNamwe3\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n\r\n\r\n# 新增收款账户\r\ndef finance_addFyReceiptAccount(cookies):\r\n url = mk_url('caiwu', 'api/finance/goldPlate/addFyReceiptAccount.do')\r\n data = {\r\n 'receiptChannel': '0',\r\n 'accountType': '0',\r\n 'accountName': '账户名',\r\n 'accountNo': '123456789',\r\n 'bankName': '中国建设银行',\r\n 'receiptCompany': '广州福佑卡车电子商务有限公司',\r\n 'status': '0'\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n\r\n\r\n# 获取收款账户列表\r\ndef finance_getFyReceiptAccounts(cookies):\r\n url = mk_url('caiwu', 'api/finance/goldPlate/getFyReceiptAccounts.do')\r\n data = {\r\n 'pageNo': 1,\r\n 'pageSize': 30\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n\r\n# 福佑账务管理-公共配置\r\n# 获取通用配置项?\r\ndef finance_getConfEnum(cookies):\r\n url = mk_url('caiwu', 'api/common/getConfEnum')\r\n data = {\r\n 'enumType': 2\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n\r\n\r\n# 创建账套\r\ndef finance_fykcAccountCreate(cookies):\r\n url = mk_url('caiwu', 'fykc-goldplate-service/api/common/fykcAccountCreate.do')\r\n data = {\r\n 'code': '123',\r\n 'name': '小沈测试子公司',\r\n 'voiceStatus': '0',\r\n 'createTime': '1553484382',\r\n 'enable': '1',\r\n 'comType': '1'\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n\r\n\r\n# 创建科目\r\n\r\n# def finance_subjectCreate(cookies):\r\n# url = mk_url('caiwu', 'fykc-goldplate-service/api/common/subjectCreate.do')\r\n# data = {\r\n# 'data':'{\"code\":\"123\",\"name\":\"测试新建科目\",\"subjType\":\"2\",'\r\n# '\"cstPrjs\":[{\"code\":\"HSWD08_SYS\",\"name\":\"组织机构\",\"field\":\"4\",\"fieldName\":\"账套\",\"cstConfId\":\"59\"}]}'\r\n# }\r\n# r = requests.post(url, data, cookies = cookies)\r\n# return r.json()\r\n\r\n\r\n\r\n# 获取科目列表\r\ndef finance_subjectList(cookies):\r\n url = mk_url('caiwu', 'fykc-goldplate-service/api/common/subjectList.do')\r\n data = {\r\n 'pageNo': 1,\r\n 'pageSize': 30\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n\r\n\r\n# 单据类型列表\r\ndef finance_docTypeList(cookies):\r\n url = mk_url('caiwu', 'fykc-goldplate-service/api/goldplate/common/docTypeList')\r\n data = {\r\n 'pageNo': 1,\r\n 'pageSize': 30\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n\r\n\r\n# 创建凭证规则\r\n\r\n# def finance_voucherCreate(cookies):\r\n# url = mk_url('caiwu', 'fykc-goldplate-service/api/common/voucherCreate')\r\n# data = {\r\n# 'data':'{\"code\":\"BRPL0039\",\"name\":\"测试\",\"docTypeCode\":\"03\",\"docTypeIdList\":[],'\r\n# '\"schPeriod\":0,\"confList\":[],\"ruleItems\":[{\"summary\":\"%s年%s月/客户其他应收款单/收入确认\",'\r\n# '\"subjCode\":\"1002\",\"subjName\":\"银行存款\",\"subjId\":\"16\",\"subjDriection\":\"0\",\"rateValue\":\"0.1\"}]}'\r\n# }\r\n# r = requests.post(url, data, cookies = cookies)\r\n# return r.json()\r\n\r\n\r\n\r\n# 获取规则列表\r\ndef finance_getRuleList(cookies):\r\n url = mk_url('caiwu', 'fykc-goldplate-service/api/common/getRuleList.do')\r\n data = {\r\n 'pageNo': 1,\r\n 'pageSize': 30\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n\r\n\r\n# 获取内部联运统计表-列表\r\ndef finance_getUnionTransOrders(cookies):\r\n url = mk_url('caiwu', 'api/finance/goldPlate/getUnionTransOrders.do')\r\n data = {\r\n 'pageNo': 1,\r\n 'pageSize': 30\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n\r\n\r\n\r\n#导出内部联运统计表\r\n# def finance_exportUnionOrders(cookies):\r\n# url = mk_url('caiwu', 'api/finance/goldPlate/exportUnionOrders.do')\r\n# data = {\r\n# '':'',\r\n# '':''\r\n# }\r\n# r = requests.post(url, data, cookies = cookies)\r\n# return r.json()\r\n\r\n\r\n# 获取凭证列表\r\ndef finance_getVoucherList(cookies):\r\n url = mk_url('caiwu', 'fykc-goldplate-service/api/voucher/getVoucherList.do')\r\n data = {\r\n 'pageNo': 1,\r\n 'pageSize': 30,\r\n 'docTypeCode':'01',\r\n 'docCode':'',\r\n 'createTimeStart':'',\r\n 'createTimeEnd':'',\r\n 'thirdVoucherNo':'',\r\n 'accountName':'',\r\n 'summary':'',\r\n 'status':'',\r\n 'orderSn':''\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n\r\n\r\n# 传输凭证\r\ndef finance_sendVouchers(fyVoucherNo, cookies):\r\n url = mk_url('caiwu', 'fykc-goldplate-service/api/voucher/sendVouchers.do')\r\n data = {\r\n 'fyVoucherNo': [fyVoucherNo]\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n\r\n\r\n# 查询项摘要联想输入法\r\ndef finance_getSummaryMatch(cookies):\r\n url = mk_url('caiwu', 'fykc-goldplate-service/api/voucher/getSummaryMatch.do')\r\n data = {\r\n 'input': '2019'\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n\r\n\r\n\r\n#导出凭证接口 ???\r\n# def finance_exportVouchers(cookies):\r\n# url = mk_url('caiwu', 'fykc-goldplate-service/api/voucher/exportVouchers')\r\n# data = {\r\n# 'docTypeCode':'?'\r\n# }\r\n# r = requests.post(url, data, cookies = cookies)\r\n# return r.json()\r\n\r\n\r\n\r\n# 统收统付报表-列表\r\ndef finance_getUnifiedPayOrders(cookies):\r\n url = mk_url('caiwu', 'fykc-goldplate-service/api/internal/getUnifiedPayOrders.do')\r\n data = {\r\n 'pageNo': 1,\r\n 'pageSize': 10,\r\n 'type': 0\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n\r\n# 生成统收统付凭证\r\ndef finance_genUnifiedVoucher(cookies):\r\n url = mk_url('caiwu', 'fykc-goldplate-service/api/voucher/genUnifiedVoucher.do')\r\n data = {\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n\r\n\r\n# 导出统收统付记录\r\ndef finance_exportUnifiedOrders(cookies):\r\n url = mk_url('caiwu', 'fykc-goldplate-service/api/internal/exportUnifiedOrders')\r\n data = {\r\n 'type': 0\r\n }\r\n r = requests.post(url, data, cookies=cookies)\r\n return r.json()\r\n","sub_path":"foryou/api/api_finance_fyaccount.py","file_name":"api_finance_fyaccount.py","file_ext":"py","file_size_in_byte":7442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"331214772","text":"# Artifical Neural Networks\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Churn_Modelling.csv')\nX = dataset.iloc[:, 3:13].values\ny = dataset.iloc[:, -1].values\n# Encoding categorical data \n# Encoding the Independent Variable\n\n# v0.20\n#from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n#labelencoder_X1 = LabelEncoder()\n#X[:, 1] = labelencoder_X1.fit_transform(X[:, 1])\n#labelencoder_X2 = LabelEncoder()\n#X[:, 2] = labelencoder_X2.fit_transform(X[:, 2])\n#onehotencoder = OneHotEncoder(categorical_features = [1])\n#X = onehotencoder.fit_transform(X).toarray()\n#X = X[:, 1:]\n \n# v0.22\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\nct = ColumnTransformer([(\"Country\", OneHotEncoder(), [1, 2])], remainder = 'passthrough')\nX = ct.fit_transform(X)\nX = pd.DataFrame(X)\nX = X.iloc[:, 1:].values\nX = X[:, [0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11]]\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n# making ANN\n\n# Importing the keras libraries and packages\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\n\n# Initializing the ANN\nclassifier = Sequential()\n\n# Adding the input layer and the first hidden layer with dropout\n# relu--> rectifier activation function https://www.google.com/url?sa=i&source=images&cd=&cad=rja&uact=8&ved=2ahUKEwi7h9OC7NDmAhUl4XMBHRc_A5AQjRx6BAgBEAQ&url=%2Furl%3Fsa%3Di%26source%3Dimages%26cd%3D%26ved%3D2ahUKEwiy_4eB7NDmAhX083MBHUt9C6cQjRx6BAgBEAQ%26url%3Dhttps%253A%252F%252Fmedium.com%252F%2540kanchansarkar%252Frelu-not-a-differentiable-function-why-used-in-gradient-based-optimization-7fef3a4cecec%26psig%3DAOvVaw1OS0b2vQnWTxM5CFcbulZQ%26ust%3D1577364819708851&psig=AOvVaw1OS0b2vQnWTxM5CFcbulZQ&ust=1577364819708851\nclassifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))\nclassifier.add(Dropout(rate = 0.1))\n\n# Adding the second hidden layer\nclassifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))\n# incase you wanna apply dropout to multiple layers, simply copy paste at all layers\nclassifier.add(Dropout(rate = 0.1))\n\n\n# Adding the output layer\n# softmax is a sigmoid function applied to a dependant variable that has more than 2 categories. for 2 categories use activation = 'sigmoid'\nclassifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))\n\n# Compiling the ANN\n# loss = 'categorical_crossentropy' if more than 2 outcomes.\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n# Making predictions and evaluating model \nclassifier.fit(X_train, y_train, batch_size = 10, epochs = 100)\n\n# Predicting the Test set results\ny_pred = classifier.predict(X_test)\ny_pred = (y_pred > 0.5)\n\n# Predicting single observation\n\"\"\"Geography: France\nCredit Score: 600\nGender: Male\nAge: 40 years old\nTenure: 3 years\nBalance: $60000\nNumber of Products: 2\nDoes this customer have a credit card ? Yes\nIs this customer an Active Member: Yes\nEstimated Salary: $50000 \"\"\"\nnew_prediction = classifier.predict(sc.transform(np.array([[0, 0, 1, 600, 40, 3, 60000, 2, 1, 1, 50000]])))\nnew_prediction = (new_prediction > 0.5)\n\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\n\n# Evaluating the ANN (implementing k fold cross validation on model)\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom keras.models import Sequential\nfrom keras.layers import Dense\ndef build_classifier():\n classifier = Sequential()\n classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))\n classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))\n classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))\n classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n return classifier\n\nclassifier = KerasClassifier(build_fn = build_classifier, batch_size = 10, epochs = 100)\naccuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10, n_jobs = 1)\nmean = accuracies.mean()\nvariance = accuracies.std()\n\n# Improving the Ann\n# Dropout regularization to reduce overfitting if needed\n\n\n# Tuning the ANN\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom keras.models import Sequential\nfrom keras.layers import Dense\ndef build_classifier(optimizer):\n classifier = Sequential()\n classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))\n classifier.add(Dense(units = 4, kernel_initializer = 'uniform', activation = 'relu'))\n classifier.add(Dense(units = 4, kernel_initializer = 'uniform', activation = 'relu'))\n classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))\n classifier.compile(optimizer = optimizer, loss = 'binary_crossentropy', metrics = ['accuracy'])\n return classifier\n\nclassifier = KerasClassifier(build_fn = build_classifier)\nparameters = {'batch_size' : [25, 32, 64],\n 'epochs' : [100, 500],\n 'optimizer' : ['adam', 'rmsprop']}\ngrid_search = GridSearchCV(estimator = classifier, \n param_grid = parameters,\n scoring = 'accuracy',\n cv = 10)\ngrid_search = grid_search.fit(X_train, y_train)\nbest_accuracy = grid_search.best_score_\nbest_parameters = grid_search.best_params_\n","sub_path":"ann.py","file_name":"ann.py","file_ext":"py","file_size_in_byte":6016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"15819258","text":"# import numpy as np\n#\n# a = np.array([[1, 2], [3, 4]])\n#\n# print(a)\n# print(a.shape)\n# print(np.repeat(a[:, :, np.newaxis], 3, axis=2))\n# print(np.repeat(a[np.newaxis, :, :], 3, axis=0))\n# print(np.repeat(a[np.newaxis, :, :], 3, axis=0).shape)\n\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\n\nimport config\nfrom resnet import resnet_18, resnet_18_fast3d, resnet_18_fast3d_split\n\n\ndef get_model(model_provider):\n model = model_provider()\n model.build(input_shape=(None, config.num_of_frames, config.image_height, config.image_width, config.channels))\n model.summary()\n return model\n\n\ndef test_batch_generator(x, batch_size=config.BATCH_SIZE):\n indices = np.arange(len(x))\n batch = []\n for i in indices:\n batch.append(i)\n if len(batch) == batch_size:\n x_batch = x[batch]\n x_batch = x_batch / 255.0\n x_batch = x_batch.astype(np.float32)\n yield x_batch\n\n\ndef split_into_batches(x, batch_size=config.BATCH_SIZE):\n indices = np.arange(len(x))\n i = 0\n while i < len(x):\n b = x[indices[i:min(i + batch_size, len(x))]]\n b = b / 255.0\n b = b.astype(np.float32)\n yield b\n i += batch_size\n\n\ndef get_validation_losses_and_acc_for_epochs(model, x, y, weight_path):\n epochs = []\n top1_accs = []\n top5_accs = []\n losses = []\n print(weight_path)\n for w_file_name in os.listdir('./' + weight_path):\n epoch_num = int(w_file_name.split('_')[-1].split('-')[0])\n model.load_weights(weight_path + '/' + w_file_name)\n y_preds = []\n for batch in tqdm(split_into_batches(x)):\n y_pred = model.predict(batch)\n y_preds.append(y_pred)\n y_preds = np.vstack(y_preds)\n\n top1_acc = np.mean(tf.keras.metrics.categorical_accuracy(y, y_preds))\n top5_acc = np.mean(tf.keras.metrics.top_k_categorical_accuracy(y, y_preds))\n loss = np.mean(tf.keras.losses.categorical_crossentropy(y, y_preds))\n epochs.append(epoch_num)\n top1_accs.append(top1_acc)\n top5_accs.append(top5_acc)\n losses.append(loss)\n\n print((epochs, top1_accs, top5_accs, losses))\n return epochs, top1_accs, top5_accs, losses\n\n\nif __name__ == '__main__':\n gpus = tf.config.experimental.list_physical_devices('GPU')\n\n x_test = np.load('dataset/x_test2.npy')\n\n y_test = np.load('dataset/y_test2.npy')\n\n # model.load_weights('weights/basic_2/resnet18_3d_20-0.9738.h5')\n # y_preds = []\n # for batch in tqdm(split_into_batches(x_test)):\n # y_pred = model.predict(batch)\n # y_preds.append(y_pred)\n # y_preds = np.vstack(y_preds)\n #\n # top1_acc = np.mean(tf.keras.metrics.categorical_accuracy(y_test, y_preds))\n # top5_acc = np.mean(tf.keras.metrics.top_k_categorical_accuracy(y_test, y_preds))\n #\n # loss = np.mean(tf.keras.losses.categorical_crossentropy(y_test, y_preds))\n #\n # print(top1_acc)\n # print(top5_acc)\n\n # get_validation_losses_and_acc_for_epochs(model, x_test, y_test, 'weights/basic_2')\n\n model = get_model(resnet_18_fast3d)\n get_validation_losses_and_acc_for_epochs(model, x_test, y_test, 'weights/fast_1')\n\n model = get_model(resnet_18_fast3d_split)\n get_validation_losses_and_acc_for_epochs(model, x_test, y_test, 'weights/fast_split_1')\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"151042817","text":"import numpy as np\nfrom collections import namedtuple\nfrom scipy.sparse import csr_matrix, find\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.spatial import cKDTree\n\nGraph = namedtuple('Graph', ['X', 'Ri', 'Ro', 'y','simmatched'])\n\nSparseGraph = namedtuple('SparseGraph',\n ['X', 'Ri_rows', 'Ri_cols', 'Ro_rows', 'Ro_cols', 'y', 'simmatched'])\n\ndef make_sparse_graph(X, Ri, Ro, y,simmatched=None):\n Ri_rows, Ri_cols = Ri.nonzero()\n Ro_rows, Ro_cols = Ro.nonzero()\n return SparseGraph(X, Ri_rows, Ri_cols, Ro_rows, Ro_cols, y, simmatched)\n\ndef save_graph(graph, filename):\n \"\"\"Write a single graph to an NPZ file archive\"\"\"\n np.savez(filename, **graph._asdict())\n #np.savez(filename, X=graph.X, Ri=graph.Ri, Ro=graph.Ro, y=graph.y)\n\ndef save_graphs(graphs, filenames):\n for graph, filename in zip(graphs, filenames):\n save_graph(graph, filename)\n\ndef load_graph(filename, graph_type=SparseGraph):\n \"\"\"Reade a single graph NPZ\"\"\"\n with np.load(filename) as f:\n return graph_type(**dict(f.items()))\n\ndef load_graphs(filenames, graph_type=SparseGraph):\n return [load_graph(f, graph_type) for f in filenames]\n\ndef graph_from_sparse(sparse_graph, dtype=np.uint8):\n n_nodes = sparse_graph.X.shape[0]\n n_edges = sparse_graph.Ri_rows.shape[0]\n mat_shape = (n_nodes,n_edges)\n data = np.ones(n_edges)\n Ri = csr_matrix((data,(sparse_graph.Ri_rows,sparse_graph.Ri_cols)),mat_shape,dtype=dtype)\n Ro = csr_matrix((data,(sparse_graph.Ro_rows,sparse_graph.Ro_cols)),mat_shape,dtype=dtype)\n return Graph(sparse_graph.X, Ri, Ro, sparse_graph.y, sparse_graph.simmatched)\n\nfeature_names = ['x','y','layer','t','E']\nn_features = len(feature_names)\n\n#thanks Steve :-)\ndef draw_sample(X, Ri, Ro, y, \n cmap='bwr_r', \n skip_false_edges=True,\n alpha_labels=False, \n sim_list=None): \n # Select the i/o node features for each segment \n feats_o = X[find(Ro)[0]]\n feats_i = X[find(Ri)[0]] \n # Prepare the figure\n fig, (ax0,ax1) = plt.subplots(1, 2, figsize=(20,12))\n cmap = plt.get_cmap(cmap)\n \n if sim_list is None: \n # Draw the hits (layer, x, y)\n ax0.scatter(X[:,0], X[:,2], c='k')\n ax1.scatter(X[:,1], X[:,2], c='k')\n else: \n #ax0.scatter(X[:,0], X[:,2], c='k')\n #ax1.scatter(X[:,1], X[:,2], c='k')\n ax0.scatter(X[sim_list,0], X[sim_list,2], c='b')\n ax1.scatter(X[sim_list,1], X[sim_list,2], c='b')\n \n # Draw the segments\n for j in range(y.shape[0]):\n if not y[j] and skip_false_edges: continue\n if alpha_labels:\n seg_args = dict(c='k', alpha=float(y[j]))\n else:\n seg_args = dict(c=cmap(float(y[j])))\n ax0.plot([feats_o[j,0], feats_i[j,0]],\n [feats_o[j,2], feats_i[j,2]], '-', **seg_args)\n ax1.plot([feats_o[j,1], feats_i[j,1]],\n [feats_o[j,2], feats_i[j,2]], '-', **seg_args)\n # Adjust axes\n ax0.set_xlabel('$x$ [cm]')\n ax1.set_xlabel('$y$ [cm]')\n ax0.set_ylabel('$layer$ [arb]')\n ax1.set_ylabel('$layer$ [arb]')\n plt.tight_layout()\n\ndef draw_sample3d(X, Ri, Ro, y, \n cmap='bwr_r', \n skip_false_edges=True,\n alpha_labels=False, \n sim_list=None):\n # Select the i/o node features for each segment\n feats_o = X[find(Ri)[0]]\n feats_i = X[find(Ro)[0]]\n # Prepare the figure\n fig = plt.figure()\n ax2 = fig.add_subplot(111, projection='3d')\n cmap = plt.get_cmap(cmap)\n \n if sim_list is None: \n # Draw the hits (layer, x, y)\n ax2.scatter(X[:,0], X[:,1], X[:,2], c='k')\n else: \n ax2.scatter(X[sim_list,0], X[sim_list,1], X[sim_list,2], c='k')\n \n # Draw the segments\n for j in range(y.shape[0]):\n if not y[j] and skip_false_edges: continue\n if alpha_labels:\n seg_args = dict(c='k', alpha=float(y[j]))\n else:\n seg_args = dict(c=cmap(float(y[j]))) \n ax2.plot([feats_o[j,0], feats_i[j,0]],\n [feats_o[j,1], feats_i[j,1]],\n [feats_o[j,2], feats_i[j,2]],'-',**seg_args)\n # Adjust axes\n ax2.set_xlabel('$x$ [cm]')\n ax2.set_ylabel('$y$ [cm]')\n ax2.set_zlabel('$layer$ [arb]')\n \ndef make_graph_kdtree(coords,layers,sim_indices,r=2.5):\n #setup kd tree for fast processing\n the_tree = cKDTree(coords)\n \n #define the pre-processing (all layer-adjacent hits in ball R < r)\n #and build a sparse matrix representation, then blow it up \n #to the full R_in / R_out definiton\n pairs = the_tree.query_pairs(r=r,output_type='ndarray')\n first,second = pairs[:,0],pairs[:,1] \n #selected index pair list that we label as connected\n pairs_sel = pairs[( (np.abs(layers[(second,)]-layers[(first,)]) <= 1) )]\n data_sel = np.ones(pairs_sel.shape[0])\n \n #prepare the input and output matrices (already need to store sparse)\n r_shape = (coords.shape[0],pairs.shape[0])\n eye_edges = np.arange(pairs_sel.shape[0])\n \n R_i = csr_matrix((data_sel,(pairs_sel[:,1],eye_edges)),r_shape,dtype=np.uint8)\n R_o = csr_matrix((data_sel,(pairs_sel[:,0],eye_edges)),r_shape,dtype=np.uint8)\n \n #now make truth graph y (i.e. both hits are sim-matched) \n y = (np.isin(pairs_sel,sim_indices).astype(np.int8).sum(axis=-1) == 2)\n \n return R_i,R_o,y","sub_path":"notebooks/graph_generation/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"199667247","text":"from django.shortcuts import render\nimport json\nimport pandas as pd\nimport requests\n\n\ndef apihome(request):\n query1='''{\n allChreports {\n rNo\n reportTitle\n rDate\n }\n }'''\n\n url='http://127.0.0.1:8000/graphql/'\n r=requests.get(url, json={'query':query1})\n Jdata=r.json()\n df_data=Jdata['data']['allChreports']\n df=pd.DataFrame(df_data)\n\n rsAPI=[tuple(r) for r in df.to_numpy()]\n\n print(Jdata)\n\n return render(request, \"apihome.html\",{\n 'rsAPI':rsAPI,\n }) \ndef api_view(request):\n rno = request.GET['r_no']\n query1='''{\n CHReport(rNo:'''+rno+'''){\n reportTitle\n reportNote\n \trDate \n }\n DTReport(rNo:'''+rno+'''){\n sTitle\n sNote\n sNotePost\n sNoteSpecial\n }\n }'''\n url='http://127.0.0.1:8000/graphql/'\n r=requests.get(url, json={'query':query1})\n Jdata=r.json()\n rTitle=Jdata['data']['CHReport']['reportTitle']\n rDate=Jdata['data']['CHReport']['rDate']\n rNote=Jdata['data']['CHReport']['reportNote']\n df_data=Jdata['data']['DTReport']\n df=pd.DataFrame(df_data)\n dtAPI=[tuple(r) for r in df.to_numpy()]\n\n return render(request, \"apiview.html\",{\n 'report_title':rTitle,\n 'report_note':rNote,\n 'report_date':rDate,\n 'dtAPI':dtAPI,\n }) \n\n","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"103624045","text":"import pygame\nfrom Functions import load_image\n\n\nclass Potion(pygame.sprite.Sprite):\n\n def __init__(self, x, y, potion_sprites, all_sprites):\n super(Potion, self).__init__(potion_sprites, all_sprites)\n self.image = load_image('Tiles\\\\Items', 'health_potion.png')\n self.rect = self.image.get_rect().move(16 * x, 16 * y)\n self.mask = pygame.mask.from_surface(self.image)\n\n def update(self, player):\n if pygame.sprite.collide_mask(self, player):\n pygame.mixer.music.load('data\\\\drink.wav')\n pygame.mixer.music.play(1)\n player.health = player.health + 2\n self.kill()\n","sub_path":"Potion.py","file_name":"Potion.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"29968983","text":"\nf = open(\"input_day5.txt\")\n\nall_ids = []\n\n\nfor line in f:\n start = 0 \n end = 127\n chars = list(line)\n size = 128\n for i in range(7):\n char = chars[i]\n size = size / 2\n if(char == 'F'):\n end = start + (size-1)\n elif(char == 'B'):\n start = start + size\n \n\n col_start = 0\n col_end = 7 \n size = 8 \n for i in range(3):\n char = chars[i+7]\n size = size / 2\n if(char == 'L'):\n col_end = col_start + (size-1)\n elif(char == 'R'):\n col_start = col_start + (size)\n\n seatID = (start * 8) + col_start\n all_ids.append(seatID)\n\nall_ids.sort()\n\nlastid = all_ids[0]\nfor id in all_ids:\n if (id - lastid) > 1:\n print(id)\n lastid = id","sub_path":"AOC_day5.py","file_name":"AOC_day5.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"570586600","text":"\nfrom nintendo.nex import account, authentication, common, datastore, \\\n\tfriends, kerberos, matchmaking, notification, ranking, secure, \\\n\tservice, nattraversal\nfrom nintendo.settings import Settings\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass BackEndClient:\n\tdef __init__(self, access_key, version, settings=None):\n\t\tif settings:\n\t\t\tself.settings = settings.copy()\n\t\telse:\n\t\t\tself.settings = Settings()\n\t\tself.settings.set(\"server.access_key\", access_key)\n\t\tself.settings.set(\"server.version\", version)\n\t\t\n\t\tself.auth_client = service.RMCClient(self.settings)\n\t\tself.secure_client = service.RMCClient(self.settings)\n\t\t\n\t\tself.auth_proto = authentication.AuthenticationClient(self.auth_client)\n\t\tself.secure_proto = secure.SecureConnectionClient(self.secure_client)\n\t\t\n\t\tif self.settings.get(\"kerberos.key_derivation\") == 0:\n\t\t\tself.key_derivation = kerberos.KeyDerivationOld(65000, 1024)\n\t\telse:\n\t\t\tself.key_derivation = kerberos.KeyDerivationNew(1, 1)\n\t\t\t\n\t\tself.my_pid = None\n\t\tself.local_station = None\n\t\tself.public_station = None\n\t\t\n\tdef connect(self, host, port):\n\t\t# Connect to authentication server\n\t\tif not self.auth_client.connect(host, port, 1):\n\t\t\traise ConnectionError(\"Couldn't connect to authentication server\")\n\t\t\n\tdef close(self):\n\t\tself.auth_client.close()\n\t\tself.secure_client.close()\n\t\t\n\tdef login(self, username, password, auth_info=None, login_data=None):\n\t\t# Call login method on authentication protocol\n\t\tif auth_info:\n\t\t\tresponse = self.auth_proto.login_ex(username, auth_info)\n\t\telse:\n\t\t\tresponse = self.auth_proto.login(username)\n\t\t\t\n\t\t# Check for errors\n\t\tresponse.result.raise_if_error()\n\t\t\n\t\tself.my_pid = response.pid\n\t\t\n\t\tsecure_station = response.connection_data.main_station\n\n\t\t# Derive kerberos key from password\n\t\tkerberos_key = self.key_derivation.derive_key(\n\t\t\tpassword.encode(\"ascii\"), response.pid\n\t\t)\n\t\t\n\t\t# Decrypt ticket from login response\n\t\tticket = kerberos.ClientTicket()\n\t\tticket.decrypt(response.ticket, kerberos_key, self.settings)\n\t\t\n\t\tif ticket.target_pid != secure_station[\"PID\"]:\n\t\t\t# Request ticket for secure server\n\t\t\tresponse = self.auth_proto.request_ticket(\n\t\t\t\tself.my_pid, secure_station[\"PID\"]\n\t\t\t)\n\t\t\t\n\t\t\t# Check for errors and decrypt ticket\n\t\t\tresponse.result.raise_if_error()\n\t\t\tticket = kerberos.ClientTicket()\n\t\t\tticket.decrypt(response.ticket, kerberos_key, self.settings)\n\t\t\t\n\t\tticket.source_pid = self.my_pid\n\t\tticket.target_cid = secure_station[\"CID\"]\n\n\t\t# The secure server may reside at the same\n\t\t# address as the authentication server\n\t\thost = secure_station[\"address\"]\n\t\tport = secure_station[\"port\"]\n\t\tif host == \"0.0.0.1\":\n\t\t\thost, port = self.auth_client.remote_address()\n\n\t\t# Connect to secure server\n\t\tserver_sid = secure_station[\"sid\"]\n\t\tif not self.secure_client.connect(host, port, server_sid, ticket):\n\t\t\traise ConnectionError(\"Couldn't connect to secure server\")\n\t\t\n\t\t# Create a stationurl for our local client address\n\t\tclient_addr = self.secure_client.local_address()\n\t\tself.local_station = common.StationURL(\n\t\t\taddress=client_addr[0], port=client_addr[1],\n\t\t\tsid=self.secure_client.stream_id(),\n\t\t\tnatm=0, natf=0, upnp=0, pmp=0\n\t\t)\n\t\t\n\t\t# Register urls on secure server\n\t\tif login_data:\n\t\t\tresponse = self.secure_proto.register_ex([self.local_station], login_data)\n\t\telse:\n\t\t\tresponse = self.secure_proto.register([self.local_station])\n\n\t\t# Check for errors and update urls\n\t\tresponse.result.raise_if_error()\n\t\tself.public_station = response.public_station\n\t\tself.public_station[\"RVCID\"] = response.connection_id\n\t\tself.local_station[\"RVCID\"] = response.connection_id\n\t\t\n\tdef login_guest(self):\n\t\tself.login(\"guest\", \"MMQea3n!fsik\")\n\t\t\n\tdef get_pid(self):\n\t\treturn self.my_pid\n","sub_path":"nintendo/nex/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"16522666","text":"prompt = \"\\nPlease choose a pizza topping: \"\nprompt += \"\\nEnter 'quit' when you are finished: \"\n\nwhile True:\n topping = input(prompt)\n \n if topping != 'quit':\n print(\" I'll add {} to your pizza.\".format(topping))\n else:\n break\n","sub_path":"Chap 7 User input and While Loops/7_4_Pizza_Toppings.py","file_name":"7_4_Pizza_Toppings.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"316618367","text":"from unittest import TestCase\nimport unittest\nimport numpy as np\nimport Ofpp\nfrom smithers.io.openfoam import OpenFoamHandler\n\n# openfoam_mesh_path = \"tests/test_datasets/openfoam_mesh\"\n# notime_openfoam_mesh_path = \"tests/test_datasets/notime_openfoam_mesh\"\n#\n# handler = OpenFoamHandler()\n# mesh = handler.read(openfoam_mesh_path)\n# truth_mesh = Ofpp.FoamMesh(openfoam_mesh_path)\n\n\nclass TestOpenFoamHandler(TestCase):\n def test_read(self):\n assert type(mesh) == dict\n\n assert \"points\" in mesh[\"0\"]\n assert \"faces\" in mesh[\"0\"]\n assert \"boundary\" in mesh[\"0\"]\n assert \"cells\" in mesh[\"0\"]\n\n def test_read_boundary_names(self):\n assert set(mesh[\"0\"][\"boundary\"].keys()) == set(\n [\n b\"inlet\",\n b\"outlet\",\n b\"bottom\",\n b\"top\",\n b\"obstacle\",\n b\"frontAndBack\",\n ]\n )\n\n def test_read_points(self):\n np.testing.assert_almost_equal(mesh[\"0\"][\"points\"], truth_mesh.points)\n\n def test_read_faces(self):\n np.testing.assert_almost_equal(mesh[\"0\"][\"faces\"], truth_mesh.faces)\n\n def test_read_cells(self):\n assert len(mesh[\"0\"][\"cells\"]) == len(truth_mesh.cell_faces)\n\n def test_read_cell_faces(self):\n a_key = list(mesh[\"0\"][\"cells\"].keys())[0]\n smithers_cell = mesh[\"0\"][\"cells\"][a_key]\n\n np.testing.assert_almost_equal(\n smithers_cell[\"faces\"], truth_mesh.cell_faces[a_key]\n )\n\n def test_read_cell_neighbors(self):\n a_key = list(mesh[\"0\"][\"cells\"].keys())[-1]\n smithers_cell = mesh[\"0\"][\"cells\"][a_key]\n np.testing.assert_almost_equal(\n smithers_cell[\"neighbours\"], truth_mesh.cell_neighbour[a_key]\n )\n\n def test_read_cell_points(self):\n a_key = list(mesh[\"0\"][\"cells\"].keys())[-1]\n smithers_cell = mesh[\"0\"][\"cells\"][a_key]\n\n faces_idxes = truth_mesh.cell_faces[a_key]\n faces_points = np.concatenate(\n [truth_mesh.faces[face_idx] for face_idx in faces_idxes]\n )\n faces_points = np.unique(faces_points)\n\n np.testing.assert_almost_equal(smithers_cell[\"points\"], faces_points)\n\n def test_boundary(self):\n ofpp_obstacle = truth_mesh.boundary[b\"obstacle\"]\n smithers_obstacle = mesh[\"0\"][\"boundary\"][b\"obstacle\"]\n\n ofpp_obstacle_faces = truth_mesh.faces[\n ofpp_obstacle.start : ofpp_obstacle.start + ofpp_obstacle.num\n ]\n\n np.testing.assert_almost_equal(\n mesh[\"0\"][\"faces\"][smithers_obstacle[\"faces\"][\"faces_indexes\"]],\n ofpp_obstacle_faces,\n )\n\n points_indexes = np.concatenate(\n [face_points_idx for face_points_idx in ofpp_obstacle_faces]\n )\n points_indexes = np.unique(points_indexes)\n all_points = truth_mesh.points[points_indexes]\n\n np.testing.assert_almost_equal(\n mesh[\"0\"][\"points\"][smithers_obstacle[\"points\"]], all_points\n )\n\n assert smithers_obstacle[\"points\"].ndim == 1\n assert (\n isinstance(smithers_obstacle[\"faces\"][\"faces_indexes\"], list)\n or smithers_obstacle[\"faces\"][\"faces_indexes\"].ndim == 1\n )\n\n def test_read_fields_time_instants_all(self):\n all_numeric_mesh = handler.read(\n openfoam_mesh_path, time_instants=\"all_numeric\"\n )\n assert set(all_numeric_mesh.keys()) == set([\"0\", \"1088\", \"4196\"])\n\n def test_read_fields_time_instants_first(self):\n assert set(mesh.keys()) == set([\"0\"])\n\n def test_read_fields_time_instants_list(self):\n handler = OpenFoamHandler()\n time_list_mesh = handler.read(\n openfoam_mesh_path, time_instants=[\"1088\"]\n )\n assert set(time_list_mesh.keys()) == set([\"1088\"])\n\n def test_read_fields_all(self):\n for tdc in mesh.values():\n assert set(tdc[\"fields\"].keys()) == set([\"U\", \"p\"])\n\n def test_read_fields_list(self):\n fields_list_mesh = handler.read(openfoam_mesh_path, field_names=[\"p\"])\n for tdc in fields_list_mesh.values():\n assert set(tdc[\"fields\"].keys()) == set([\"p\"])\n\n def test_no_time_instants(self):\n # assert that this doesn't raise anything\n handler.read(notime_openfoam_mesh_path)\n\n def test_area(self):\n np.testing.assert_almost_equal(\n mesh[\"0\"][\"boundary\"][b\"obstacle\"][\"faces\"][\"area\"][100],\n 0.039269502373542965,\n decimal=7,\n )\n\n def test_normal(self):\n pts = np.array(\n [\n [0.00670972, 0.0209654, 0.0999091],\n [0.00753623, 0.0213154, 0.0995543],\n [0.00765566, 0.0214058, 0.100427],\n [0.007578, 0.0208206, 0.100599],\n ]\n )\n vecs = pts[1 : len(pts)] - pts[0]\n\n nrm = OpenFoamHandler._normal(pts)\n dots = np.dot(vecs, nrm)\n\n np.testing.assert_allclose(dots[dots != 0], 0, atol=1e-4)\n","sub_path":"tests/test_openfoamhandler.py","file_name":"test_openfoamhandler.py","file_ext":"py","file_size_in_byte":4995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"556008234","text":"import os\nimport pdb\nimport torch\nimport torchvision\nimport numpy as np\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nfrom pytorch_i3d import InceptionI3d\nfrom pytorch_sife import SIFE\nfrom torch.optim import lr_scheduler\nfrom torch.utils.data import DataLoader\nfrom torchvision.transforms import Compose, ToTensor, Resize\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom data_loader_jpeg import *\n\n# from sklearn.manifold import TSNE\n# from MulticoreTSNE import MulticoreTSNE as TSNE\nfrom matplotlib import pyplot as plt\nfrom collections import OrderedDict\n\nimport nonechucks as nc\n\n# ----------------- Modify these --------------------\n\nNUM_ACTIONS = 157\nNUM_FEATURES = 1024\nBATCH_SIZE = 16\nFEATURES_SAVE_PATH = '/vision/u/samkwong/pytorch-i3d/charades_experiments/i3d_features'\n\n\"\"\" baseline i3d params \"\"\"\nIS_BASELINE = True # use baseline i3d\nDATA_PARALLEL = True # model trained using nn.DataParallel\n\ndef extract_data(model, test_loader):\n # Move model to CPU/GPU\n if torch.cuda.is_available():\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n print('Using device:', device)\n model = model.to(device=device) # move model parameters to CPU/GPU\n\n # Extract features and ground truth labels\n print('Starting feature extraction with batch size = {}'.format(BATCH_SIZE))\n inputs_features = np.empty((0, NUM_FEATURES)) # to hold all inputs' feature arrays\n\n i = 0\n for data in test_loader:\n print(\"Extracting features from batch {}\".format(i))\n i += 1\n inputs = data[0]\n inputs = inputs.to(device=device, dtype=torch.float32) \n with torch.no_grad():\n features = model.extract_features(inputs)\n\n print('Features shape =', features.shape)\n features = features.squeeze()\n \n features = features.cpu().detach().numpy()\n print('Inputs Features shape =', inputs_features.shape)\n print('Features shape =', features.shape)\n inputs_features = np.append(inputs_features, features, axis=0)\n\n print('inputs_features shape = {}'.format(inputs_features.shape))\n return inputs_features\n\ndef get_test_loader(model):\n print('Getting test_loader')\n # Transforms\n SPATIAL_TRANSFORM = Compose([\n Resize((224, 224)),\n ToTensor()\n ])\n \n # Load dataset\n vf = VideoFolder(root=\"/vision/group/Charades_RGB/Charades_v1_rgb\",\n csv_file_input=\"/vision/group/Charades/annotations/Charades_v1_train.csv\",\n csv_file_actions_labels=\"/vision/u/samkwong/pytorch-i3d/charades_experiments/data/annotations/Charades_v1_actions.csv\",\n csv_file_scene_labels=\"/vision/u/samkwong/pytorch-i3d/charades_experiments/data/annotations/Charades_v1_scenes.csv\",\n clip_size=128,\n nclips=1,\n step_size=1,\n is_val=True, # True means don't randomly offset clips (i.e. don't augment dataset)\n transform=SPATIAL_TRANSFORM,\n loader=default_loader)\n\n #vf = nc.SafeDataset(vf) # skip over any noisy samples\n\n print('Size of training set = {}'.format(len(vf)))\n test_loader = DataLoader(vf, \n batch_size=BATCH_SIZE,\n shuffle=False, \n num_workers=2,\n pin_memory=True)\n\n print('Aqcuired test_loader!')\n return test_loader\n\n# ------------------------------------------------------------\n\nif __name__ == '__main__':\n\n i3d = InceptionI3d(NUM_ACTIONS, in_channels=3)\n model = i3d\n # model.replace_logits(NUM_ACTIONS)\n\n test_loader = get_test_loader(model)\n inputs_features = extract_data(model, test_loader)\n print('Saving features')\n np.save(FEATURES_SAVE_PATH, inputs_features)\n \n","sub_path":"charades_experiments/old_files/extract_features_i3d.py","file_name":"extract_features_i3d.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"185378531","text":"from lr_finder import LRFinder\nimport math\n\ndef get_LR(model,trainloader, optimizer, criterion, device,testloader=None):\n\n # print(\"########## Tweaked version from fastai ###########\")\n # lr_find = LRFinder(model, optimizer, criterion, device=\"cuda\")\n # lr_find.range_test(trainloader, end_lr=100, num_iter=100)\n # best_lr=lr_find.plot() # to inspect the loss-learning rate graph\n # lr_find.reset()\n # return best_lr\n\n # print(\"########## Tweaked version from fastai ###########\")\n # lr_find = LRFinder(model, optimizer, criterion, device=\"cuda\")\n # lr_find.range_test(trainloader, end_lr=1, num_iter=100)\n # lr_find.plot() # to inspect the loss-learning rate graph\n # lr_find.reset()\n # for index in range(len(lr_find.history['loss'])):\n # item = lr_find.history['loss'][index]\n # if item == lr_find.best_loss:\n # min_val_index = index\n # print(f\"{min_val_index}\")\n #\n # lr_find.plot(show_lr=lr_find.history['lr'][75])\n # lr_find.plot(show_lr=lr_find.history['lr'][min_val_index])\n #\n # val_index = 75\n # mid_val_index = math.floor((val_index + min_val_index)/2)\n # show_lr=[{'data': lr_find.history['lr'][val_index], 'linestyle': 'dashed'}, {'data': lr_find.history['lr'][mid_val_index], 'linestyle': 'solid'}, {'data': lr_find.history['lr'][min_val_index], 'linestyle': 'dashed'}]\n # # lr_find.plot_best_lr(skip_start=10, skip_end=5, log_lr=True, show_lr=show_lr, ax=None)\n #\n # best_lr = lr_find.history['lr'][mid_val_index]\n # print(f\"LR to be used: {best_lr}\")\n #\n # return best_lr\n\n print(\"########## Leslie Smith's approach ###########\")\n lr_find = LRFinder(model, optimizer, criterion, device=\"cuda\")\n lr_find.range_test(trainloader,val_loader=testloader, end_lr=1, num_iter=100, step_mode=\"linear\")\n best_lr=lr_find.plot(log_lr=False)\n lr_find.reset()\n return best_lr\n","sub_path":"API/find_lr_max.py","file_name":"find_lr_max.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"368603272","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.2 (3180)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/trepan/processor/command/break.py\n# Compiled at: 2018-05-31 17:34:51\nimport os\nfrom trepan.processor.command import base_cmd as Mbase_cmd\nfrom trepan.processor import cmdbreak as Mcmdbreak\nfrom trepan.processor import complete as Mcomplete\n\nclass BreakCommand(Mbase_cmd.DebuggerCommand):\n r\"\"\"**break** [*location*] [if *condition*]]\n\nSets a breakpoint, i.e. stopping point just before the\nexecution of the instruction specified by *location*.\n\nWithout arguments or an empty *location*, the breakpoint is set at the\ncurrent stopped location.\n\nSee `help syntax location` for detailed information on a location.\n\nIf the word `if` is given after *location*, subsequent arguments given\nWithout arguments or an empty *location*, the breakpoint is set\nthe current stopped location.\n\nNormally we only allow stopping at lines that we think are\nstoppable. If the command has a `!` suffix, force the breakpoint anyway.\n\nExamples:\n---------\n\n break # Break where we are current stopped at\n break if i < j # Break at current line if i < j\n break 10 # Break on line 10 of the file we are\n # currently stopped at\n break! 10 # Break where we are current stopped at, even if\n # we don't think line 10 is stoppable\n break os.path.join() # Break in function os.path.join\n break x[i].fn() # break in function specified by x[i].fn\n break x[i].fn() if x # break in function specified by x[i].fn\n # if x is set\n break os.path:45 # Break on line 45 file holding module os.path\n break myfile.py:2 # Break on line 2 of myfile.py\n break myfile.py:2 if i < j # Same as above but only if i < j\n break \"foo's.py\":1\" # One way to specify path with a quote\n break 'c:\\foo.bat':1 # One way to specify a Windows file name,\n break '/My Docs/foo.py':1 # One way to specify path with blanks in it\n\nSee also:\n---------\n\n`info break`, `tbreak`, `condition` and `help syntax location`.\"\"\"\n aliases = ('b', 'break!', 'b!')\n category = 'breakpoints'\n min_args = 0\n max_args = None\n name = os.path.basename(__file__).split('.')[0]\n need_stack = True\n short_help = 'Set breakpoint at specified line or function'\n complete = Mcomplete.complete_break_linenumber\n\n def run(self, args):\n force = True if args[0][(-1)] == '!' else False\n func, filename, lineno, condition = Mcmdbreak.parse_break_cmd(self.proc, args)\n if not (func == None and filename == None):\n Mcmdbreak.set_break(self, func, filename, lineno, condition, False, args, force=force)\n return\n\n\nif __name__ == '__main__':\n\n def doit(cmd, a):\n cmd.current_command = ' '.join(a)\n print(Mcmdbreak.parse_break_cmd(cmd.proc, a))\n\n\n import sys\n from trepan import debugger as Mdebugger\n d = Mdebugger.Trepan()\n command = BreakCommand(d.core.processor)\n command.proc.frame = sys._getframe()\n command.proc.setup()\n doit(command, [' '])\n doit(command, ['10'])\n doit(command, [__file__ + ':10'])\n\n def foo():\n return 'bar'\n\n\n doit(command, ['foo'])\n doit(command, ['os.path'])\n doit(command, ['os.path', '5+1'])\n doit(command, ['os.path.join'])\n doit(command, ['if', 'True'])\n doit(command, ['foo', 'if', 'True'])\n doit(command, ['os.path:10', 'if', 'True'])\n command.run(['break'])\n command.run(['break', 'command.run'])\n command.run(['break', '10'])\n command.run(['break', __file__ + ':10'])\n command.run(['break', 'foo'])","sub_path":"pycfiles/trepan3k-0.8.11-py3.2/break.cpython-32.py","file_name":"break.cpython-32.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"600443004","text":"from tkinter import *\nfrom PIL import Image, ImageTk\nfrom random import randint\n\n# main window\nroot = Tk()\nroot.title(\"Rock Scissor Paper\")\nroot.configure(background=\"aqua\")\n\n# picture\nrock_img = ImageTk.PhotoImage(Image.open(\"./assets/rock-user.png\"))\npaper_img = ImageTk.PhotoImage(Image.open(\"./assets/paper-user.png\"))\nscissor_img = ImageTk.PhotoImage(Image.open(\"./assets/scissors-user.png\"))\nrock_img_comp = ImageTk.PhotoImage(Image.open(\"./assets/rock.png\"))\npaper_img_comp = ImageTk.PhotoImage(Image.open(\"./assets/paper.png\"))\nscissor_img_comp = ImageTk.PhotoImage(Image.open(\"./assets/scissors.png\"))\n\n# insert picture\nuser_label = Label(root, image=scissor_img, bg=\"aqua\")\ncomp_label = Label(root, image=scissor_img_comp, bg=\"aqua\")\ncomp_label.grid(row=1, column=0)\nuser_label.grid(row=1, column=4)\n\n# scores\nplayerScore = Label(root, text=0, font=(\"Arial\", 15), bg=\"aqua\", fg=\"green\")\ncomputerScore = Label(root, text=0, font=(\"Arial\", 15), bg=\"aqua\", fg=\"green\")\ncomputerScore.grid(row=1, column=1)\nplayerScore.grid(row=1, column=3)\n\n# indicators\nuser_indicator = Label(root, font=(\"Arial\", 15), text=\"USER\", bg=\"aqua\", fg=\"brown\")\ncomp_indicator = Label(root, font=(\"Arial\", 15), text=\"COMPUTER\",\n bg=\"aqua\", fg=\"brown\")\nuser_indicator.grid(row=0, column=3)\ncomp_indicator.grid(row=0, column=1)\n\n# messages\nmsg = Label(root, font=(\"Arial\", 15), bg=\"aqua\", fg=\"green\")\nmsg.grid(row=3, column=2)\n\n# update message\ndef updateMessage(message):\n msg['text'] = message\n\n# update user score\ndef updatePlayerScore():\n score = int(playerScore['text'])\n score += 1\n playerScore['text'] = str(score)\n\n# update computer score\ndef updateComputerScore():\n score = int(computerScore['text'])\n score += 1\n computerScore['text'] = str(score)\n\n# check winner \ndef checkWin(player,computer):\n if(player == computer):\n updateMessage(\"It's a tie\")\n elif(player == \"rock\"):\n if(computer == \"scissor\"):\n updateMessage(\"You Won\")\n updatePlayerScore()\n else:\n updateMessage(\"You Lose\")\n updateComputerScore()\n elif(player == \"paper\"):\n if(computer == \"rock\"):\n updateMessage(\"You Won\")\n updatePlayerScore()\n else:\n updateMessage(\"You Lose\")\n updateComputerScore()\n elif(player == \"scissor\"):\n if(computer == \"paper\"):\n updateMessage(\"You Won\")\n updatePlayerScore()\n else:\n updateMessage(\"You Lose\")\n updateComputerScore()\n else:\n pass\n\n# update choices\n\nchoices = [\"rock\", \"paper\", \"scissor\"]\n\n\ndef updateChoice(x):\n\n # for computer\n compChoice = choices[randint(0, 2)]\n if compChoice == \"rock\":\n comp_label.configure(image=rock_img_comp)\n elif compChoice == \"paper\":\n comp_label.configure(image=paper_img_comp)\n else:\n comp_label.configure(image=scissor_img_comp)\n\n\n# for user\n if x == \"rock\":\n user_label.configure(image=rock_img)\n elif x == \"paper\":\n user_label.configure(image=paper_img)\n else:\n user_label.configure(image=scissor_img)\n\n checkWin(x, compChoice)\n\n\n# buttons\nrock = Button(root, width=20, height=2, text=\"ROCK\",\n bg=\"#FF3E4D\", fg=\"black\", font=(\"Arial\", 15), command=lambda: updateChoice(\"rock\")).grid(row=2, column=1)\npaper = Button(root, width=20, height=2, text=\"PAPER\",\n bg=\"#FAD02E\", fg=\"black\", font=(\"Arial\", 15), command=lambda: updateChoice(\"paper\")).grid(row=2, column=2)\nscissor = Button(root, width=20, height=2, text=\"SCISSOR\",\n bg=\"#0ABDE3\", fg=\"black\", font=(\"Arial\", 15), command=lambda: updateChoice(\"scissor\")).grid(row=2, column=3)\n\nroot.mainloop()\n","sub_path":"rps.py","file_name":"rps.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"128644845","text":"from pygame.sprite import Sprite\nfrom pygame import Surface\nfrom pygame import event, USEREVENT\n\n\nclass Energy(Sprite):\n def __init__(self, position, size, color, id_en, energy):\n Sprite.__init__(self)\n self.image = Surface(size)\n self.image.fill(color)\n self.rect = self.image.get_rect()\n self.rect.x, self.rect.y = position\n self.is_energy = True\n self.energy_take = USEREVENT + (2 + id_en)\n self.energy = energy\n","sub_path":"Model/Energy.py","file_name":"Energy.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"386827599","text":"'''\nPurpose - create tabular data table with PPA projects as rows and metrics as columns.\n\tMetrics are in excel sheet Q:\\ProjectLevelPerformanceAssessment\\DataLayers_Proof_of_Concept\\Spreadsheet\\RawDataTableLayout.xlsx\n \n\t11/20/2017:\n\t\tMade this just a module. If user wants region-level data they can \n\t\tbring in the output of this script in and write it to the community-types CSV.\n\t\n\t11/17/2017:\n\t\tfor transit, use dissolved file with counts of vehicle stops (pre-made\n\t\tthrough dissolve in arcmap rather than in this script). Idea is that it\n\t\tis a smaller file (4500 rows instead of 126000) and takes fewer resources.\n\t\t\n\t\timport functions from stand-alone function script\n\t\n\t11/16/2017:\n\t\tadds tag to each project indicating community type\n\t\t\n\t11/14/2017:\n\t\tattempt to replace searchcursor with da.searchcursor because supposedly\n\t\tit's much faster.\n\n\t\t\n\t11/9/2017:\n\t\tAdd regional averages; possibly averages grouped by placetype\n\t\n\t10/17/2017:\n\tFor job accessibility, will say zero jobs accessible if the project has no\n\tTAZ intesecting it (i.e., it's not in SACOG region).\n\t\n\tOlder updates:\n\tSee notes in Q:\\ProjectLevelPerformanceAssessment\\DataLayers_Proof_of_Concept\\Python\\Old scripts\\getMasterTable_multibuff_latest_10192017good.py\n\nto run in command line:\npython Q:\\ProjectLevelPerformanceAssessment\\DataLayers_Proof_of_Concept\\Python\\getMasterTable_wregiontotal_latest.py\n\nto run in shell:\nexecfile(r'Q:\\ProjectLevelPerformanceAssessment\\DataLayers_Proof_of_Concept\\Python\\getMasterTable_wregiontotal_latest.py')\n'''\nimport arcpy\nfrom arcpy import env\nimport datetime\nimport time\nimport sys\nimport os\nimport csv\n\narcpy.env.overwriteOutput = True\n\nscript_dir = r'Q:\\ProjectLevelPerformanceAssessment\\DataLayers_Proof_of_Concept\\Python\\Master Table Scripts'\nos.chdir(script_dir)\nimport ppa_functions1 as ppaf\n\ndateSuffix = str(datetime.date.today().strftime('%m%d%Y'))\n\n#===============USER-DEFINED PARAMETERS=========================\n# Specify directories\nworkSpace = r'Q:\\ProjectLevelPerformanceAssessment\\DataLayers_Proof_of_Concept\\PPA_layers.gdb'\narcpy.env.workspace = workSpace\naccessibilityTxtDir = r'Q:\\ProjectLevelPerformanceAssessment\\DataLayers_Proof_of_Concept\\Accessbility Metric'\noutTextDir = r'Q:\\ProjectLevelPerformanceAssessment\\DataLayers_Proof_of_Concept\\Spreadsheet\\Region and CType Averages'\n\n\n#project file inputs\nprojectsList = \"mtpprojx_sampWCtyp11172017\" # SHP/GDB file of all projects\nprojectIdCol = \"F2016_Proje\" #column with the unique project ids\n#data layer inputs\ncenterLine = \"Region_Centerline2017fwytag2\"\nAllParcel = \"parcel_all\"\nJobCenter = \"JobCenters\"\nBikewaysCurrent = \"AllBikeways2015\"\nBikewaysFuture = \"ProposedBikeways2015\"\nTransitData = \"TrnStopLocnWTrpCnt11172017\"\nTIMS = \"Collisions2011to2016fwytag\"\nTAZ = \"TAZ07\"\ncommTypes = \"community_type\"\ncommTypeIdCol = \"type\"\n\n# input choices\nYear = input(\"Enter year for regional data: \") #year to use for ILUT and model network data\n\n# specify list of projects if you are testing and don't want to summarize all projects.\nspecProjList = ['PLA25548',\n\t\t\t\t'CAL20432',\n\t\t\t\t'SAC24704'] \n\nbufferDist1 = 1320 #distance in feet for transpo infrastructure buffers\nbufferDist2 = 2640 #distance in feet for land-use buffers\nbufferDistJC = 21120 # job center buffer distance in feet; 4 mile = 21120ft\n\nstart_time = time.time()\n\n#==================DATA IMPORT AND PREPARATION========================\n\nYear = str(Year)\nILUT = \"parcel_\" + Year\ninterSect = \"intersections_\" + Year\nmodelNet = \"SM15_modelnet_\" + Year\naccessTxt = accessibilityTxtDir + \"\\\\access_\" + Year + \".csv\"\noutputFile = outTextDir+\"\\\\RegionSum\" + Year + \"_\" + dateSuffix + \".csv\"\n\t\n#all-parcel file project stats\nyr2char = Year[-2:]\nvarListAllParcel = [\"acres\",\"acres_AG_\" + yr2char,\"du_BO\",\"emp_BO\",\n\t\t\t\t\t\"netACdu_\" + yr2char,\"netACemp_\" + yr2char]\n\t\t\t\t\t\n#ILUT\nvarListILUT = [\"DU_TOT\",\"POP_TOT\",\"POP_EJ\",\"HH_TOT_P\",\"EMPEDU\",\"EMPFOOD\",\"EMPGOV\",\"EMPOFC\",\n\t\t\t\t\"EMPOTH\",\"EMPRET\",\"EMPSVC\",\"EMPMED\",\"EMPIND\",\"EMPTOT_S\",\"STUD_K12\",\"STUD_UNI\",\n\t\t\t\t\"ENR_K12\",\"ENR_UNI\",\n\t\t\t\t\"VT_TOT\",\"VMT_TOT\",\"CVMT_TOT\",\"PT_TOT_RES\",\"SOV_TOT_RES\",\"HOV_TOT_RES\",\n\t\t\t\t\"TRN_TOT_RES\",\"BK_TOT_RES\",\"WK_TOT_RES\",\"VT_TOT_cv\",\"CV3_VT\",\"VMT_TOT_cv\",\n\t\t\t\t\"CV3_VMT\",\"CVMT_TOT_cv\",\"CV3_CVMT\"]\n\n\n#intersections\nvarListIntersecn = [\"INTXN_1WAY\",\"INTXN_3WAY\",\"INTXN_4WAY\"]\n\n# Job Center polygons\n\nvarListjobCenter = ['JOB_CTR']\n\n#street centerline length\nvarListCLineDist = ['BUF_CTRLINE_MI']\n\n# Bike Facilities\nvarListBikeNtwk = ['BIKC1_MIBUFF','BIKC2_MIBUFF','BIKC3_MIBUFF','BIKC4_MIBUFF']\n\n# Google Transit\nvarListTransit = ['TRAN_LOCNS','TRAN_EVENTS']\n\t#service density/vehicle stops (use pointSum function)\n\t#unique stop locations (look to UOP NPMRDS script for inspiration)\n\n# collisons\nvarListCollisionSums = ['PEDKILL','PEDINJ','BICKILL','BICINJ'] \nvarListCollisionsCounts = ['FWY_COLLN',\n\t\t\t\t\t\t\t'NON_FWY_COLLN', \n\t\t\t\t\t\t\t'FWY_FATAL_COLLN',\n\t\t\t\t\t\t\t'NONFWY_FATAL_COLLN'] #count of all collisions\n\n\n# Model network (buffer)\nvarListModelBuffer = ['FwyVMT_buff','FwyCVMT_buff','FwyLnMi_buff','FwyRteMi_buff',\n\t\t\t\t\t\t'StVMT_buff','StCVMT_buff','StLnMi_buff','StRteMi_buff']\n\n#TAZ-based accessibilityTxtDir\nvarListAccessibility = ['JOBS30D_PAVG','JOBS45T_PAVG','WKR30D_JBAVG','WKR45T_JBAVG']\n\n#=================PREPARE OUTPUT TABLE================================\n#header row elements are concatenated lists converted to strings with comma-separated words\nHeader_ProjInfo = 'projectID,PROJ_TYPE,commun_type,PROJ_LEN_MI'\nHeader_buffArea= 'buffarea_qm,buffarea_hm'\nHeader_allParcel = ','.join(varListAllParcel) #makes list into comma-sep'd string\nHeader_ILUT = ','.join(varListILUT)\nHeader_Intersecn = ','.join(varListIntersecn)\nHeader_jobCenter = ','.join(varListjobCenter)\nHeader_CLineDist = ','.join(varListCLineDist)\nHeader_bikeCLDist = ','.join(varListBikeNtwk)\nHeader_Transit = ','.join(varListTransit)\nHeader_TIMS = ','.join(varListCollisionsCounts) + ',' + ','.join(varListCollisionSums)\nHeader_modelBuff = ','.join(varListModelBuffer)\nHeader_access = ','.join(varListAccessibility)\n\nheader_items = [Header_ProjInfo,Header_buffArea,Header_allParcel,\n Header_ILUT,Header_Intersecn,Header_jobCenter,\n Header_CLineDist,Header_bikeCLDist,Header_Transit,\n Header_TIMS,Header_modelBuff,Header_access]\n\n\nheader_line = ','.join(header_items) + '\\n'\n\n#=======================make feature layers=========================\narcpy.MakeFeatureLayer_management(AllParcel,\"inAllParcel_lyr\")\narcpy.MakeFeatureLayer_management(ILUT,\"inILUT_lyr\")\narcpy.MakeFeatureLayer_management(projectsList,\"inprojectsList_lyr\")\narcpy.MakeFeatureLayer_management(interSect,\"inInterSect_lyr\")\narcpy.MakeFeatureLayer_management(JobCenter,\"inJobCenter_lyr\")\narcpy.MakeFeatureLayer_management(BikewaysCurrent,\"currBikeways_lyr\")\narcpy.MakeFeatureLayer_management(BikewaysFuture,\"futBikeways_lyr\")\narcpy.MakeFeatureLayer_management(TransitData,\"inGTFS_lyr\")\narcpy.MakeFeatureLayer_management(TIMS,\"inTIMS_lyr\")\narcpy.MakeFeatureLayer_management(modelNet,\"inModelNetwork_lyr\")\narcpy.MakeFeatureLayer_management(TAZ,\"inTAZ_lyr\")\narcpy.MakeFeatureLayer_management(centerLine,\"inCenterline_lyr\")\n\n#make future bikeways layer combining present and proposed bikeways\nfuture_bikeways = \"in_memory\\currAndFutBikewys\"\narcpy.Merge_management([\"currBikeways_lyr\",\"futBikeways_lyr\"],\n\t\t\t\t\t\tfuture_bikeways)\nfuture_bikeways_fl = \"future_bikeways_lyr\"\narcpy.MakeFeatureLayer_management(future_bikeways,future_bikeways_fl)\n\n\n\n#================CALCULATE REGION-LEVEL METRICS====================\n\n#project ID and type are just \"all\" when referring to entire region----------\nprojstat_regn = \"REGION,All,All\"\n\n#area of region within 0.25mi and 0.5mi of centerline, in acres--------------\n\nregn_area_qmi = str(999) #temporary dummy\nregn_area_hmi = str(999) #temporary dummy\n\nprint(\"calculating region-level centerline buffer areas (0.5mi and 0.25mi)...\")\nregn_area_qmi = str(ppaf.bufferArea(\"inCenterline_lyr\", \"tmpbuff_region\", bufferDist1))\nregn_area_hmi = str(ppaf.bufferArea(\"inCenterline_lyr\", \"tmpbuff_region\", bufferDist2))\n\n\n#regional centerline total length---------------------------\nregion_cline_tot = 0\nprint(\"summing lengths of centerline segments...\")\nwith arcpy.da.SearchCursor(\"inCenterline_lyr\",\"Shape_Length\") as cline_cursor:\n\tfor segment in cline_cursor:\n\t\tseg_length = segment[0]\n\t\tregion_cline_tot += seg_length\n\t\nregion_cline_tot = str(region_cline_tot/5280) #convert from feet to miles\n\n#-----------------------------------------------------------------\n\n#calculate all-parcel stats for all parcels within 1/2 mile of regional centerline\nprint(\"getting general stats for parcels within half mile of centerline...\")\nregion_pcl = ppaf.pointSum(\"inAllParcel_lyr\",varListAllParcel,bufferDist2, \n\t\t\t\"inCenterline_lyr\")\n\nprint(\"getting ILUT stats for parcels within half mile of centerline...\")\nregion_ilut = ppaf.pointSum(\"inILUT_lyr\",varListILUT,bufferDist2, \n\t\t\t\"inCenterline_lyr\")\n\n\n#get stats on intersections-------------------------------------------\n\nprint(\"summarizing intersection types, job centers...\")\narcpy.SelectLayerByLocation_management(\"inInterSect_lyr\", \n\t\t\t\t\t\t\t\t\t\t\t\"WITHIN_A_DISTANCE\",\n\t\t\t\t\t\t\t\t\t\t\t\"inCenterline_lyr\", \n\t\t\t\t\t\t\t\t\t\t\tbufferDist1)\n\ncnt_type1=0\ncnt_type3=0\ncnt_type4=0\n\nwith arcpy.da.SearchCursor(\"inInterSect_lyr\",\"LINKS\") as aCursor:\n\tfor aCur in aCursor:\n\t\tlinkType = aCur[0]\n\t\tif linkType == 1:\n\t\t\tcnt_type1 += 1\n\t\telif linkType == 3:\n\t\t\tcnt_type3 += 1\n\t\telif linkType == 4:\n\t\t\tcnt_type4 += 1\n\nregion_intersxncnt = str(cnt_type1) + ',' + str(cnt_type3) + ',' \\\n\t\t\t\t\t+ str(cnt_type4)\n\t\t\t\t\t\n#total job centers in region. consider making an average of some sort---------\njob_ctrs_region = str(arcpy.GetCount_management(\"inJobCenter_lyr\"))\n\n#non-freeway centerline miles in region-------------------------------\nnon_fwy_clinetot = 0\nnofwy_sql = \"fwy_yn = 0\"\n\nprint(\"summing lengths of non-freeway centerline segments...\")\nwith arcpy.da.SearchCursor(\"inCenterline_lyr\",\"SHAPE@LENGTH\",nofwy_sql) \\\nas cline_cursor_nonfwy:\n\tfor segment in cline_cursor_nonfwy:\n\t\tseg_length = segment[0]\n\t\tif seg_length is None:\n\t\t\tseg_length = 0\n\t\tnon_fwy_clinetot += seg_length\n\t\t\nnon_fwy_clinetot = str(non_fwy_clinetot/5280)\n\n#bicycle facility miles in region----------------------------------------------\n\n#activate this code if you want to only include bike segments whose centroid\n#is within 0.25mi of the centerline file\ntmpbuff_reg = \"in_memory/tmpbuffproj\"\narcpy.Buffer_analysis(\"inCenterline_lyr\", \n\t\t\t\t\t\t\ttmpbuff_reg, \n\t\t\t\t\t\t\tbufferDist1, \n\t\t\t\t\t\t\t\"FULL\", \n\t\t\t\t\t\t\t\"ROUND\", \n\t\t\t\t\t\t\t\"ALL\")\n\nif int(Year) == 2036: #should change this so it's not hard-coded as 2036/2012!\n\tbike_lyr = future_bikeways_fl\nelse:\n\tbike_lyr = \"currBikeways_lyr\"\n\t\narcpy.SelectLayerByLocation_management(bike_lyr, \n\t\t\t\t\t\t\t\t\t\t\t\"HAVE_THEIR_CENTER_IN\",tmpbuff_reg)\n\n#arcpy.SelectLayerByLocation_management(\"currBikeways_lyr\", \n#\t\t\t\t\t\t\t\t\"HAVE_THEIR_CENTER_IN\",tmpbuff_reg)\n\nclass1r = 0\nclass2r = 0\nclass3r = 0\nclass4r = 0\n\nwhere_clause = \"BIKE_CLASS IN (1,2,3,4)\"\nwith arcpy.da.SearchCursor(bike_lyr,[\"SHAPE@LENGTH\",\"BIKE_CLASS\"],\n\t\t\t\t\t\t\twhere_clause) as bikeCursor:\n\tfor aCur in bikeCursor:\n\t\tshapeLength = aCur[0]\n\t\tif shapeLength is None:\n\t\t\tshapeLength = 0\n\t\tif aCur[1] == 1:\n\t\t\tclass1r += shapeLength\n\t\tif aCur[1] == 2:\n\t\t\tclass2r += shapeLength\n\t\tif aCur[1] == 3:\n\t\t\tclass3r += shapeLength\n\t\tif aCur[1] == 4:\n\t\t\tclass4r += shapeLength\n\nstringBikClsLenReg = ''\ncnt = 1\nfor i in [class1r,class2r,class3r,class4r]:\n\tif cnt == 1:\n\t\tstringBikClsLenReg = str(i/5280) #convert feet to miles\n\telse:\n\t\tstringBikClsLenReg = stringBikClsLenReg + ',' + str(i/5280)\n\tcnt += 1\n\n#regional transit stop data (unique stops and svc density-----------------------\n\n#transit stops within 0.25mi of a centerline file feature\nregl_transit_data = ppaf.transitEvents(\"inCenterline_lyr\",bufferDist1)\n\n#regional collision data-----------------------------------------------------\nprint(\"aggregating regional collision data...\")\n#different from normal collision selection; not spatial selection, so that it\n#captures all collisions, including those that happened in SACOG region but are\n#mapped outside of it due to bad geocoding.\n\nfwy_colln = 0\nnfwy_colln = 0\nfwyfatal_colln = 0\nnfwyfatal_colln = 0\n\nwith arcpy.da.SearchCursor(\"inTIMS_lyr\",[\"KILLED\",\"fwy_yn\"]) as aCursor:\n\tfor aCur in aCursor:\n\t\tfatal_ind = aCur[0] #\"KILLED\" column\n\t\tif aCur[1] == 1: #\"fwy_yn\" column\n\t\t\tif fatal_ind > 0: #if fatal, add to both \"total\" and \"fatal\" count\n\t\t\t\tfwyfatal_colln += 1 #otherwise, count just in \"total\" count\n\t\t\t\tfwy_colln += 1\n\t\t\telse:\n\t\t\t\tfwy_colln += 1 \n\t\telse:\n\t\t\tif fatal_ind > 0:\n\t\t\t\tnfwyfatal_colln += 1\n\t\t\t\tnfwy_colln += 1\n\t\t\telse:\n\t\t\t\tnfwy_colln += 1\n\nregionTIMSCounts = str(fwy_colln) + \",\" \\\n\t\t\t\t+ str(nfwy_colln) + \",\" \\\n\t\t\t\t + str(fwyfatal_colln) + \",\" \\\n\t\t\t\t + str(nfwyfatal_colln)\n\n \nregionTIMSSums = ppaf.pointSum(\"inTIMS_lyr\",varListCollisionSums,50, \n\t\t\t\t\t\t\"inprojectsList_lyr\", \n\t\t\t\t\t\ttrim_location = False, bufType=\"WITHIN_A_DISTANCE\")\n\t\t\t\t\t\t\nregion_collisions = regionTIMSCounts + ',' + regionTIMSSums\n\ndel aCursor\n#regional model network data (vmt, lane miles, route miles, etc.)---------------\nSQLFwy = 'CAPCLASS IN (1,6,26,36,56,8,9)'\nSQLNotFwy = 'CAPCLASS NOT IN (1,6,26,36,56,8,9,7,62,63,99)'\n\nnetwork_metrx = [\"DAYVMT\",\"DAYCVMT\",\"LANEMI\",\"DISTANCE\"]\n\nstringFwyModregn = ppaf.modelBuffCalc(\"inModelNetwork_lyr\",network_metrx,SQLFwy)\nstringStreetModregn = ppaf.modelBuffCalc(\"inModelNetwork_lyr\",network_metrx,SQLNotFwy)\n\nregnlevel_model = stringFwyModregn + ',' + stringStreetModregn\n\n\n#regional accessibility---------------------------------------------------\nprint(\"getting regional accessibility stats...\")\n\ntazDict = {}\nwith open(accessTxt,'r') as f:\n\tinDict = csv.DictReader(f)\n\n\t#create dict of all TAZs along with their accessibility metrics\n\tfor i in inDict:\n\t\ttazDict[int(i['TAZ'])] = [float(i['JOBS30D']), #0\n\t\t\t\t\t\t\t\tfloat(i['JOBS45T']), #1\n\t\t\t\t\t\t\t\tfloat(i['POP']), #2\n\t\t\t\t\t\t\t\tfloat(i['WORKERS30D']), #3\n\t\t\t\t\t\t\t\tfloat(i['WORKERS45T']), #4\n\t\t\t\t\t\t\t\tfloat(i['JOBS'])] #5\n\t\n\t\nsumJobsD = 0 #simple sum of jobs within driving n mins driving\nsumJobsT = 0 ##simple sum of jobs within driving n mins via transit\nsumWorkersD = 0 #simple sum of workers within driving dist\nsumWorkersT = 0 #simple sum of workers within transit dist\nsumProductJobsD = 0 #for each taz, multiply jobacc*pop, then sum those products\nsumProductJobsT = 0 #same as sumProductJobsD, but for driving\nsumProductWkrsD = 0 #for each taz, multiple workers accessed * jobs\nsumProductWkrsT = 0\nsumTAZjobs = 0\nsumTAZPops = 0 #sum of pop in all TAZs intersecting project\nTAZcnt = 0 #count of TAZs the project intersects\n\n#create iterator for TAZs that intersect the project segments\nwith arcpy.da.SearchCursor(\"inTAZ_lyr\",\"TAZ07\") as accCursor:\n\tfor row in accCursor: #get total pop of all tazs intersecting project, plus \n\t#sumproducts for weighted avg.\n\t\ttaz = row[0]\n\t\tsumJobsD += tazDict[taz][0]\n\t\tsumJobsT += tazDict[taz][1]\n\t\tsumWorkersD += tazDict[taz][3]\n\t\tsumWorkersT += tazDict[taz][4]\n\t\tsumProductJobsD += tazDict[taz][0]*tazDict[taz][2]\n\t\tsumProductJobsT += tazDict[taz][1]*tazDict[taz][2]\n\t\tsumProductWkrsD += tazDict[taz][3]*tazDict[taz][5] #driving workers weightd x jobs\n\t\tsumProductWkrsT += tazDict[taz][4]*tazDict[taz][5] #transit wkrs weighted x jobs\n\t\tsumTAZjobs += tazDict[taz][5]\n\t\tsumTAZPops += tazDict[taz][2]\n\t\tTAZcnt += 1\n\t\n#jobs weighted by resident (POP)\nregnJobAccD = str(sumProductJobsD/sumTAZPops)\nregnJobAccT = str(sumProductJobsT/sumTAZPops)\n\n#accessibility to workers, weighted by jobs.\nregnJobWkrAccD = str(sumProductWkrsD/sumTAZjobs) \nregnJobWkrAccT = str(sumProductWkrsT/sumTAZjobs)\n\n\t\nregnAccMeasures = regnJobAccD + ',' + regnJobAccT + ',' \\\n\t\t\t\t\t+ regnJobWkrAccD + ',' + regnJobWkrAccT\n\n#----------------consolidate to write to script-----------------------\nprint(\"writing regional values to CSV...\")\nregion_variables = [projstat_regn, region_cline_tot, \n\t\t\t\t\tregn_area_qmi, regn_area_hmi, region_pcl, region_ilut,\n\t\t\t\t\tregion_intersxncnt, job_ctrs_region, \n\t\t\t\t\tnon_fwy_clinetot, stringBikClsLenReg, regl_transit_data, \n\t\t\t\t\tregion_collisions, regnlevel_model, regnAccMeasures]\n\t\t\t\t\t\nregion_values = ','.join(region_variables) + '\\n'\n\nwith open(outputFile,'w') as f:\n f.write(header_line)\n f.write(region_values)\n\n\nend_time = time.time()\nelapsed = str(round((end_time - start_time)/60,1))\nprint(\"Time elapsed: \" + elapsed + \" mins\")\n\n","sub_path":"PPAv1.0/RegionCommType_Calcs/getMasterTable_region_bikwyfix.py","file_name":"getMasterTable_region_bikwyfix.py","file_ext":"py","file_size_in_byte":16361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"472912128","text":"import logging\n\nfrom PyQt5 import QtWidgets, QtGui\n\nfrom ...core.mixins import ToolWindow\nfrom ....core.devices import Device\nfrom ....core.instrument.instrument import Instrument\nfrom ....core.instrument.privileges import PRIV_CONNECTDEVICES\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass DeviceConnections(QtWidgets.QWidget, ToolWindow):\n required_privilege = PRIV_CONNECTDEVICES\n\n def __init__(self, *args, **kwargs):\n credo = kwargs.pop('credo')\n QtWidgets.QWidget.__init__(self, *args, **kwargs)\n self.setupToolWindow(credo)\n self._connectbuttons = {}\n self._disconnectbuttons = {}\n self.setupUi(self)\n\n @classmethod\n def testRequirements(cls, credo: Instrument):\n return super().testRequirements(credo) and credo.online\n\n def setupUi(self, Form):\n assert isinstance(self.credo, Instrument)\n self.layout = QtWidgets.QGridLayout(self)\n for i, d in enumerate(sorted(self.credo.devices)):\n dev = self.credo.devices[d]\n assert isinstance(dev, Device)\n self.layout.addWidget(QtWidgets.QLabel(dev.name, self), i, 0)\n self._connectbuttons[d] = QtWidgets.QPushButton('Connect', self)\n self.layout.addWidget(self._connectbuttons[d], i, 1)\n self._connectbuttons[d].clicked.connect(self.onButtonClicked)\n self._disconnectbuttons[d] = QtWidgets.QPushButton('Disconnect', self)\n self.layout.addWidget(self._disconnectbuttons[d], i, 2)\n self._disconnectbuttons[d].clicked.connect(self.onButtonClicked)\n self.setWindowTitle('Connect/disconnect devices')\n self.setWindowIcon(QtGui.QIcon.fromTheme('network-idle'))\n\n def onButtonClicked(self):\n assert isinstance(self.credo, Instrument)\n for devname in self._disconnectbuttons:\n if self._disconnectbuttons[devname] is self.sender():\n dev = self.credo.get_device(devname)\n assert isinstance(dev, Device)\n try:\n dev.disconnect_device()\n except Exception as exc:\n logger.error('Error while disconnecting from device {}: {}'.format(devname, exc))\n for devname in self._connectbuttons:\n if self._connectbuttons[devname] is self.sender():\n dev = self.credo.get_device(devname)\n assert isinstance(dev, Device)\n try:\n dev.reconnect_device()\n except Exception as exc:\n logger.error('Error while reconnecting to device {}: {}'.format(devname, exc))\n","sub_path":"cct/qtgui/devices/connections/connections.py","file_name":"connections.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"301495721","text":"from django import template\nfrom apps.report.models import Repeat as result\nfrom apps.report.models import Crash as crash\nregister = template.Library()\n\n@register.assignment_tag\ndef result_filter(testinfoid, testresult):\n if result.objects.all().filter(testinfoid=testinfoid).exists():\n qs = result.objects.all().filter(testinfoid=testinfoid, testresult=testresult).count()\n else:\n qs = 0\n return qs\n\n@register.assignment_tag\ndef crash_filter(testinfoid, testresult):\n if crash.objects.filter(his_id=testinfoid).exists():\n qs = crash.objects.filter(his_id=testinfoid).count()\n crashs = ''+qs+''\n else:\n crashs = 0\n return crashs","sub_path":"apps/report/templatetags/report_extras.py","file_name":"report_extras.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"533118975","text":"# -*-coding: utf-8-*-\nimport logging\nimport sys\nimport traceback\nfrom itertools import cycle\n\nimport pandas as pd\nfrom sqlalchemy import create_engine\nimport decimal\n\nclass LocalMysqlTest(object):\n # 初始化\n def __init__(self,schema='test'):\n self._engin = create_engine('mysql+pymysql://root:root@localhost:3306/'+schema+'?charset=utf8mb4')\n\n # 连接池状态\n def getPoolStatus(self):\n return self._engin.pool.status()\n\n # 获取连接\n def getConnection(self):\n conn = self._engin.connect()\n # logging.info(self._engin.pool.status())\n # print(\"=======\")\n return conn\n\n # 释放连接\n def closeConnection(self, conn):\n if conn:\n conn.close()\n # print(self._engin.pool.status())\n # print(\"********\")\n\n # 执行sql\n def executeSqlByEngine(self, sql='SELECT * FROM DUAL'):\n return self._engin.execute(sql)\n\n # 执行sql\n def executeSqlByConn(self, sql='SELECT * FROM DUAL', conn=None):\n conn = conn or self.getConnection()\n with conn as connection:\n return connection.execute(sql)\n\n # 批量执行更新sql语句\n def executeSqlManyByConn(self, sql='',data=[], conn=None):\n if len(data) >0 :\n conn = conn or self.getConnection()\n with conn as connection:\n return connection.execute(sql,data)\n\n # 加载数据到df(自定义索引) 不推荐\n def load_DataFrame_Conn(self, sql='SELECT * FROM DUAL', conn=None):\n conn = conn or self.getConnection()\n with conn as connection:\n dataList = list(connection.execute(sql))\n dataFrame = pd.DataFrame(dataList, index=[(n + 1) for n in range(len(dataList))])\n return dataFrame\n\n # 加载数据到df 不推荐\n def get_DataFrame_Conn(self, sql='SELECT * FROM DUAL', conn=None):\n conn = conn or self.getConnection()\n with conn as connection:\n dataList = list(connection.execute(sql))\n dataFrame = pd.DataFrame(dataList)\n return dataFrame\n\n # 加载数据到df\n def get_DataFrame_PD(self, sql='SELECT * FROM DUAL', conn=None):\n conn = conn or self.getConnection()\n with conn as connection:\n dataFrame = pd.read_sql(sql, connection)\n return dataFrame\n\n # 保存df到数据库\n def save_DataFrame_PD(self, pd, table, conn=None):\n conn = conn or self.getConnection()\n with conn as connection:\n pd.to_sql(table, connection, if_exists='append', index=False)\n\n\nif __name__ == '__main__':\n\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n stream=sys.stdout,\n filemode='a+')\n\n try:\n qunaMysql = LocalMysqlTest()\n\n\n except:\n ex = traceback.format_exc()\n logging.error(ex)\n finally:\n print(qunaMysql._engin.pool.status())\n # print(qunaMysql._engin.pool.checkedin())\n # print(qunaMysql._engin.pool.checkedout())\n pass\n","sub_path":"model/mysql/LocalMysqlTest.py","file_name":"LocalMysqlTest.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"615789405","text":"# Authors: Soledad Galli \n# License: BSD 3 clause\n\nfrom typing import List, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom feature_engine.encoding.base_encoder import BaseCategoricalTransformer\nfrom feature_engine.validation import _return_tags\nfrom feature_engine.variable_manipulation import _check_input_parameter_variables\n\n\nclass WoEEncoder(BaseCategoricalTransformer):\n \"\"\"\n The WoEEncoder() replaces categories by the weight of evidence\n (WoE). The WoE was used primarily in the financial sector to create credit risk\n scorecards.\n\n The encoder will encode only categorical variables by default\n (type 'object' or 'categorical'). You can pass a list of variables to encode.\n Alternatively, the encoder will find and encode all categorical variables\n (type 'object' or 'categorical').\n\n With `ignore_format=True` you have the option to encode numerical variables as well.\n The procedure is identical, you can either enter the list of variables to encode, or\n the transformer will automatically select all variables.\n\n The encoder first maps the categories to the weight of evidence for each variable\n (fit). The encoder then transforms the categories into the mapped numbers\n (transform).\n\n This categorical encoding is exclusive for binary classification.\n\n **Note**\n\n The log(0) is not defined and the division by 0 is not defined. Thus, if any of the\n terms in the WoE equation are 0 for a given category, the encoder will return an\n error. If this happens, try grouping less frequent categories.\n\n More details in the :ref:`User Guide `.\n\n Parameters\n ----------\n variables: list, default=None\n The list of categorical variables that will be encoded. If None, the\n encoder will find and transform all variables of type object or categorical by\n default. You can also make the transformer accept numerical variables, see the\n next parameter.\n\n ignore_format: bool, default=False\n Whether the format in which the categorical variables are cast should be\n ignored. If False, the encoder will automatically select variables of type\n object or categorical, or check that the variables entered by the user are of\n type object or categorical. If True, the encoder will select all variables or\n accept all variables entered by the user, including those cast as numeric.\n\n Attributes\n ----------\n encoder_dict_:\n Dictionary with the WoE per variable.\n\n variables_:\n The group of variables that will be transformed.\n\n n_features_in_:\n The number of features in the train set used in fit.\n\n Methods\n -------\n fit:\n Learn the WoE per category, per variable.\n transform:\n Encode the categories to numbers.\n fit_transform:\n Fit to the data, then transform it.\n inverse_transform:\n Encode the numbers into the original categories.\n\n Notes\n -----\n For details on the calculation of the weight of evidence visit:\n https://www.listendata.com/2015/03/weight-of-evidence-woe-and-information.html\n\n NAN are introduced when encoding categories that were not present in the training\n dataset. If this happens, try grouping infrequent categories using the\n RareLabelEncoder().\n\n There is a similar implementation in the the open-source package\n `Category encoders `_\n\n See Also\n --------\n feature_engine.encoding.RareLabelEncoder\n feature_engine.discretisation\n category_encoders.woe.WOEEncoder\n \"\"\"\n\n def __init__(\n self,\n variables: Union[None, int, str, List[Union[str, int]]] = None,\n ignore_format: bool = False,\n ) -> None:\n\n if not isinstance(ignore_format, bool):\n raise ValueError(\"ignore_format takes only booleans True and False\")\n\n self.variables = _check_input_parameter_variables(variables)\n self.ignore_format = ignore_format\n\n def fit(self, X: pd.DataFrame, y: pd.Series):\n \"\"\"\n Learn the WoE.\n\n Parameters\n ----------\n X: pandas dataframe of shape = [n_samples, n_features]\n The training input samples.\n Can be the entire dataframe, not just the categorical variables.\n\n y: pandas series.\n Target, must be binary.\n \"\"\"\n\n X = self._check_fit_input_and_variables(X)\n\n if not isinstance(y, pd.Series):\n y = pd.Series(y)\n\n # check that y is binary\n if y.nunique() != 2:\n raise ValueError(\n \"This encoder is designed for binary classification. The target \"\n \"used has more than 2 unique values.\"\n )\n\n temp = pd.concat([X, y], axis=1)\n temp.columns = list(X.columns) + [\"target\"]\n\n # if target does not have values 0 and 1, we need to remap, to be able to\n # compute the averages.\n if any(x for x in y.unique() if x not in [0, 1]):\n temp[\"target\"] = np.where(temp[\"target\"] == y.unique()[0], 0, 1)\n\n self.encoder_dict_ = {}\n\n total_pos = temp[\"target\"].sum()\n total_neg = len(temp) - total_pos\n temp[\"non_target\"] = np.where(temp[\"target\"] == 1, 0, 1)\n\n for var in self.variables_:\n pos = temp.groupby([var])[\"target\"].sum() / total_pos\n neg = temp.groupby([var])[\"non_target\"].sum() / total_neg\n\n t = pd.concat([pos, neg], axis=1)\n t[\"woe\"] = np.log(t[\"target\"] / t[\"non_target\"])\n\n if (\n not t.loc[t[\"target\"] == 0, :].empty\n or not t.loc[t[\"non_target\"] == 0, :].empty\n ):\n raise ValueError(\n \"The proportion of one of the classes for a category in \"\n \"variable {} is zero, and log of zero is not defined\".format(var)\n )\n\n self.encoder_dict_[var] = t[\"woe\"].to_dict()\n\n self._check_encoding_dictionary()\n\n self.n_features_in_ = X.shape[1]\n\n return self\n\n # Ugly work around to import the docstring for Sphinx, otherwise not necessary\n def transform(self, X: pd.DataFrame) -> pd.DataFrame:\n X = super().transform(X)\n\n return X\n\n transform.__doc__ = BaseCategoricalTransformer.transform.__doc__\n\n def inverse_transform(self, X: pd.DataFrame) -> pd.DataFrame:\n X = super().inverse_transform(X)\n\n return X\n\n inverse_transform.__doc__ = BaseCategoricalTransformer.inverse_transform.__doc__\n\n def _more_tags(self):\n tags_dict = _return_tags()\n # in the current format, the tests are performed using continuous np.arrays\n # this means that when we encode some of the values, the denominator is 0\n # and this the transformer raises an error, and the test fails.\n # For this reason, most sklearn transformers will fail. And it has nothing to\n # do with the class not being compatible, it is just that the inputs passed\n # are not suitable\n tags_dict[\"_skip_test\"] = True\n return tags_dict\n","sub_path":"feature_engine/encoding/woe.py","file_name":"woe.py","file_ext":"py","file_size_in_byte":7160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"156030841","text":"# -*- mode: python -*-\n\nblock_cipher = None\n\n\na = Analysis(['api.py'],\n pathex=['/Users/liuqian/Documents/GitHub/GenG4/pygeng4'],\n binaries=[],\n datas=[],\n hiddenimports = [ #for gevent 1.3.1\n 'gevent.__greenlet_primitives',\n 'gevent.__hub_local',\n 'gevent.__hub_primitives',\n 'gevent.__ident',\n 'gevent.__imap',\n 'gevent.__semaphore',\n 'gevent.__tracer',\n 'gevent.__waiter',\n 'gevent._event',\n 'gevent._greenlet',\n 'gevent._local',\n 'gevent._queue',\n\n # info steal from gevent.monkey patch_all() args\n 'gevent.os',\n 'gevent.time',\n 'gevent.thread',\n # 'gevent.sys',\n 'gevent.socket',\n 'gevent.select',\n 'gevent.ssl',\n # 'gevent.httplib',\n 'gevent.subprocess',\n 'gevent.builtins',\n 'gevent.signal',\n\n 'gevent.libev',\n 'gevent.libev.corecext',\n 'gevent.libev.corecffi',\n 'gevent.libev.watcher',\n\n 'gevent.libuv',\n 'gevent.libuv._corecffi',\n 'gevent.libuv.loop',\n 'gevent.libuv.watcher',\n ],\n hookspath=[],\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n exclude_binaries=True,\n name='api',\n debug=False,\n strip=False,\n upx=True,\n console=True )\ncoll = COLLECT(exe,\n a.binaries,\n a.zipfiles,\n a.datas,\n strip=False,\n upx=True,\n name='api')\n","sub_path":"pygeng4/api.spec","file_name":"api.spec","file_ext":"spec","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"118589785","text":"# -*- encoding: utf-8 -*-\nfrom lazyblacksmith.extension.celery_app import celery_app\nfrom lazyblacksmith.extension.esipy import esiclient\nfrom lazyblacksmith.extension.esipy import esisecurity\nfrom lazyblacksmith.extension.esipy.operations import get_characters_skills\nfrom lazyblacksmith.models import User\nfrom lazyblacksmith.models import Item\nfrom lazyblacksmith.models import Skill\nfrom lazyblacksmith.models import TaskStatus\nfrom lazyblacksmith.models import db\nfrom lazyblacksmith.utils.time import utcnow\n\nfrom datetime import datetime\nfrom email.utils import parsedate\n\nimport json\nimport pytz\n\n\n@celery_app.task(name=\"character_skill_update\")\ndef update_character_skills(character_id):\n skill_number = 0\n\n character = User.query.get(character_id)\n if character is None:\n return\n\n esisecurity.update_token(character.get_sso_data())\n\n character_skills = esiclient.request(\n get_characters_skills(character_id=character_id),\n )\n\n if character_skills.status == 200:\n for skill_object in character_skills.data.skills:\n item = Item.query.get(skill_object.skill_id)\n if item is None:\n continue\n\n char_skill = character.skills.filter(\n Skill.skill_id == item.id\n ).one_or_none()\n \n if char_skill:\n char_skill.level = skill_object.current_skill_level\n else: \n skill = Skill(\n character=character,\n skill=item,\n level=skill_object.current_skill_level,\n )\n db.session.merge(skill)\n skill_number += 1\n\n db.session.commit()\n\n task_status = TaskStatus(\n name=TaskStatus.TASK_CHARACTER_SKILLS % character_id,\n expire=datetime(\n *parsedate(character_skills.header['Expires'][0])[:6]\n ).replace(tzinfo=pytz.utc),\n last_run=utcnow(),\n results=json.dumps({\n 'character_id': character_id,\n 'inserted': skill_number\n })\n )\n db.session.merge(task_status)\n db.session.commit()\n","sub_path":"lazyblacksmith/tasks/character_skills.py","file_name":"character_skills.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"504493991","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n\n# Import results\ndf1 = pd.read_csv('results/srl_base')\ndf2 = pd.read_csv('results/ne_base')\ndf3 = pd.read_csv('results/sst_propAll_base')\n\ndf4 = pd.read_csv('results/srl_uni')\ndf5 = pd.read_csv('results/ne_uni')\ndf6 = pd.read_csv('results/sst_propAll_uni')\n\ndf7 = pd.read_csv('results/srl_pro')\ndf8 = pd.read_csv('results/ne_pro')\ndf9 = pd.read_csv('results/sst_propAll_pro')\n\n# Dev\nplt.style.use('presentation')\n\nstyles = ['k--', 'b', 'r']\nlegends = ['baseline', 'joint-propAll-Uni', 'joint-propAll-Pro']\n\n# Plot SRL\nsets = ['dev', 'wsj', 'brown']\nfig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9,3))\nfor df, style in zip([df1, df4, df7], styles):\n for i,_name in enumerate(sets):\n df[_name].plot(x=df.name, ax=axes[i], style=style, title=_name)\n axes[i].set_xticks(list(range(len(df.name))))\n axes[i].set_xticklabels(df.name, rotation='vertical')\n\n\n# Plot SRL\nsets = ['dev', 'test']\nfig, axes = plt.subplots(nrows=1, ncols=2, figsize=(6,3))\nfor df, style in zip([df3, df6, df9], styles):\n for i,_name in enumerate(sets):\n df[_name].plot(x=df.name, ax=axes[i], style=style, title=_name)\n axes[i].set_xticks(list(range(len(df.name))))\n axes[i].set_xticklabels(df.name, rotation='vertical')\n\n\n# Plot SRL\nsets = ['dev', 'test']\nfig, axes = plt.subplots(nrows=1, ncols=2, figsize=(6,3))\nfor df, style in zip([df2, df5, df8], styles):\n for i,_name in enumerate(sets):\n df[_name].plot(x=df.name, ax=axes[i], style=style, title=_name)\n axes[i].set_xticks(list(range(len(df.name))))\n axes[i].set_xticklabels(df.name, rotation='vertical')\n\n#plt.legend(loc='best')\n#plt.tight_layout()\nplt.show()\n\n","sub_path":"plot_results.py","file_name":"plot_results.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"355729033","text":"#\n#\n#\n\nfrom norlinter.LintException import LintException\nfrom norlinter.LintResult import LintResult\n\nclass Rule (object):\n def getLines(self, source):\n return source.split(\"\\n\")\n\n def getCharsFromFirstChar(self, line):\n for pos, char in enumerate(line):\n if char != \" \":\n return pos, line[pos:]\n return None, None\n\n def getResult(self, lines, exceptions):\n return LintResult(\"\\n\".join(lines), exceptions)\n\n def execute(self):\n raise Exception(\"Rule::execute() must be implemented\")\n\nclass SingleLineRule (Rule):\n def execute(self, source):\n lines = []\n exceptions = []\n for num, line in enumerate(self.getLines(source)):\n # Ignore everything after the ';'. This parser only supports one line\n # expressions. The exceptions are when there is a 'for' loop or when\n # an expression is nestled in a comment :p\n comment_idx = line.find(\"//\")\n idx = line.find(\";\")\n if not line.strip().startswith(\"for\") and idx != -1 and (comment_idx == -1 or idx < comment_idx):\n # Strip all excess ';' and have only one ';' at the end of the expression.\n if comment_idx > -1:\n line = line[:comment_idx].rstrip(\" ;\") + \"; \" + line[comment_idx:]\n else:\n line = line.rstrip(\" ;\") + \";\"\n idx = line.find(\";\") # Position may have changed.\n start = \"{};\".format(line[:idx].rstrip())\n end = line[idx+1:].strip()\n end = end and \" \" + end or \"\"\n formatted = self.format(start) + end\n else:\n parts = line.split(\"//\")\n formatted = None\n # Ignore single-line comments.\n if len(parts) > 1:\n formatted = self.format(parts[0])\n parts.pop(0)\n comment = \"//\" + \"//\".join(parts)\n # If there is no code, then the comment was on its own line.\n # Append as-is w/ no space. Otherwise, put a single space\n # between end of logic and comment.\n if len(formatted.strip()) == 0:\n formatted = formatted + comment\n else:\n formatted = formatted.rstrip() + \" \" + comment\n else:\n formatted = self.format(line)\n lines.append(formatted)\n if line != formatted:\n exceptions.append(LintException(num, line, self.getErrorDescription(), formatted))\n return self.getResult(lines, exceptions)\n\n def format(self):\n raise Exception(\"SingleLineRule::format() must be implemented\")\n\n def getErrorDescription(self):\n raise Exception(\"SingleLineRule::getErrorDescription() must be implemented\")\n","sub_path":"norlinter/source/norlinter/rule/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"462567687","text":"import numpy\nfrom keras.layers import Dense\nfrom keras.models import Sequential\n\n# load pima indians dataset\ndataset = numpy.loadtxt(\n \"/home/mario/PycharmProjects/deep_learning_A-Z/frist_neural_network_with_keras/data/pima-indians-diabetes.csv\",\n delimiter=\",\")\n# split into input (X) and output (Y) variables\nX = dataset[:, 0:8]\ny = dataset[:, 8]\n\nmodel = Sequential()\nmodel.add(Dense(12, input_dim=8, init='uniform', activation='relu'))\nmodel.add(Dense(8, init='uniform', activation='relu'))\nmodel.add(Dense(1, init='uniform', activation='sigmoid'))\n\n# Compile model\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\nmodel.fit(X, y, batch_size=10, epochs=150)\n\nscores = model.evaluate(X, Y)","sub_path":"frist_neural_network_with_keras/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"11236069","text":"from PyQt5.QtCore import QRunnable, pyqtSlot, pyqtSignal, QObject\n\n\nclass Update(QRunnable):\n def __init__(self, gui):\n super(Update, self).__init__()\n self.gui = gui\n\n @pyqtSlot()\n def run(self):\n RandomTrackposition = [int(self.gui.CameraOptions_track1lower.text()),\n int(self.gui.CameraOptions_track1upper.text()),\n int(self.gui.CameraOptions_track2lower.text()),\n int(self.gui.CameraOptions_track2upper.text())]\n\n messageSetRandomTrack = self.gui.cam.SetRandomTracks(2, RandomTrackposition)\n if messageSetRandomTrack is not None:\n self.gui.post.eventlog(self.gui, messageSetRandomTrack)\n return\n\n\nclass OpenImage(QRunnable):\n def __init__(self, gui):\n super(OpenImage, self).__init__()\n self.gui = gui\n self.acquiring = False\n\n @pyqtSlot()\n def run(self):\n messageSetAcquisitionMode = self.gui.cam.SetAcquisitionMode(1)\n if messageSetAcquisitionMode is not None:\n self.gui.post.eventlog(self.gui, messageSetAcquisitionMode)\n return\n\n nessageSetReadMode = self.gui.cam.SetReadMode(4)\n if nessageSetReadMode is not None:\n self.gui.post.eventlog(self.gui, nessageSetReadMode)\n return\n\n self.acquiring = self.gui.wincamera.isVisible()\n while self.acquiring:\n self.acquiring = self.gui.wincamera.isVisible()\n messageStartAcquisition = self.gui.cam.StartAcquisition()\n messageGetAcquiredData = self.gui.cam.GetAcquiredData()\n\n","sub_path":"scancars/threads/CameraOptions.py","file_name":"CameraOptions.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"642276628","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import (\n QApplication, QMainWindow, QVBoxLayout, QAction, qApp,\n QHBoxLayout, QMessageBox, QWidget, QPushButton)\n\nfrom cagi.mpl_canvas import MplCanvas\nfrom cagi.paths_list import PathsListView\nfrom cagi.axis import XAxis, YAxis\nfrom cagi.misc_menu import MiscMenu\n\n\nclass AppForm(QMainWindow):\n\n \"\"\"Dedicated interface to plot LEDs information\"\"\"\n\n def __init__(self, parent=None):\n\n QMainWindow.__init__(self, parent)\n self.setWindowTitle('Curves Analysis Graphic Interface')\n self.create_main_frame()\n\n def create_main_frame(self):\n \"\"\"Creates interface, initialize widgets and organize screen\"\"\"\n\n self._main_frame = QWidget()\n\n self._canvas = MplCanvas(self._main_frame, width=6, height=5, dpi=100)\n self._paths_list = PathsListView(self._main_frame)\n self._yaxis = YAxis(self._main_frame)\n self._xaxis = XAxis(self._main_frame)\n self._misc = MiscMenu(self._main_frame)\n\n # Plot button\n self._plot_btn = QPushButton('Plot', self)\n self._plot_btn.resize(self._plot_btn.sizeHint())\n self._plot_btn.setStyleSheet(\"background-color: cyan\")\n self._plot_btn.clicked.connect(self.plot_data)\n\n self.organize_screen()\n self.create_menus()\n\n def organize_screen(self):\n \"\"\"Define layout of the screen\"\"\"\n\n box1 = QVBoxLayout()\n box1.addWidget(self._canvas)\n box1.addWidget(self._xaxis)\n\n box2 = QVBoxLayout()\n box2.addWidget(self._yaxis)\n box2.addWidget(self._misc)\n box2.addWidget(self._plot_btn)\n\n box3 = QHBoxLayout()\n box3.addLayout(box2)\n box3.addLayout(box1)\n\n box4 = QVBoxLayout()\n box4.addLayout(box3)\n box4.addWidget(self._paths_list)\n\n self._main_frame.setLayout(box4)\n self._main_frame.setFocus()\n self.setCentralWidget(self._main_frame)\n self.setGeometry(100, 100, 800, 750)\n\n def create_menus(self):\n \"\"\"Creates menus on top of the screen\"\"\"\n\n # 'File' menu\n self._file_menu = self.menuBar().addMenu('&File')\n\n exit_action = QAction('&Exit', self)\n exit_action.setShortcut(Qt.CTRL + Qt.Key_E)\n exit_action.triggered.connect(qApp.quit)\n\n about_action = QAction('&About', self)\n about_action.setShortcut(Qt.CTRL + Qt.Key_A)\n about_action.triggered.connect(self.about_message)\n\n refresh_paths_action = QAction('&Refresh', self)\n refresh_paths_action.setShortcut(Qt.Key_F5)\n refresh_paths_action.triggered.connect(self._paths_list.fetch_paths)\n\n self._file_menu.addAction(refresh_paths_action)\n self._file_menu.addAction(about_action)\n self._file_menu.addAction(exit_action)\n\n self._main_frame.addAction(refresh_paths_action)\n self._main_frame.addAction(about_action)\n self._main_frame.addAction(exit_action)\n\n # 'Plot' menu\n self._plot_menu = self.menuBar().addMenu('&Plot')\n\n plot_action = QAction('&Plot', self)\n plot_action.setShortcut(Qt.CTRL + Qt.Key_P)\n plot_action.triggered.connect(self.plot_data)\n\n linear_fit_action = QAction('&Linear Fit', self)\n linear_fit_action.setShortcut(Qt.CTRL + Qt.Key_L)\n linear_fit_action.triggered.connect(self._canvas.linear_regression)\n\n parabolic_fit_action = QAction('&Parabolic Fit', self)\n parabolic_fit_action.setShortcut(Qt.CTRL + Qt.Key_Q)\n parabolic_fit_action.triggered.connect(\n self._canvas.parabolic_regression)\n\n hyperbolic_fit_action = QAction('&Hyperbolic Fit', self)\n hyperbolic_fit_action.setShortcut(Qt.CTRL + Qt.Key_H)\n hyperbolic_fit_action.triggered.connect(\n self._canvas.hyperbolic_regression)\n\n remove_range_action = QAction('&Remove Points', self)\n remove_range_action.setShortcut(Qt.CTRL + Qt.Key_R)\n remove_range_action.triggered.connect(self._canvas.remove_points)\n\n compare_data_fit_action = QAction('&Compare Fit', self)\n compare_data_fit_action.setShortcut(Qt.CTRL + Qt.Key_D)\n compare_data_fit_action.triggered.connect(self._canvas.plot_diff_fit)\n\n add_stddev_segments_action = QAction('&Std. dev. Segments', self)\n add_stddev_segments_action.setShortcut(Qt.CTRL + Qt.Key_S)\n add_stddev_segments_action.triggered.connect(\n self._canvas.plot_stddev_fit)\n\n self._plot_menu.addAction(plot_action)\n self._plot_menu.addAction(remove_range_action)\n self._plot_menu.addAction(linear_fit_action)\n self._plot_menu.addAction(parabolic_fit_action)\n self._plot_menu.addAction(hyperbolic_fit_action)\n self._plot_menu.addAction(compare_data_fit_action)\n self._plot_menu.addAction(add_stddev_segments_action)\n\n self._main_frame.addAction(plot_action)\n self._main_frame.addAction(remove_range_action)\n self._main_frame.addAction(linear_fit_action)\n self._main_frame.addAction(parabolic_fit_action)\n self._main_frame.addAction(hyperbolic_fit_action)\n self._main_frame.addAction(compare_data_fit_action)\n self._main_frame.addAction(add_stddev_segments_action)\n\n def about_message(self):\n \"\"\"About message\"\"\"\n\n QMessageBox.about(self._main_frame, 'About',\n '''\n Data analysis application.\n\n Choose the csv file which contains the data,\n select what information X and Y axis represent,\n plot the data.\n\n If necessary, remove non-useful data points.\n Fit the best line to interpret the data.\n Calculate the standard deviation of values, and compare them to the predicted values.\n ''')\n\n def set_san_values(self, values):\n \"\"\"Set step and n values in misc menu\"\"\"\n\n self._misc.set_san_values(values)\n\n def plot_data(self):\n \"\"\"Plots data\"\"\"\n\n if not self._yaxis.is_valid() or not self._xaxis.is_valid() or not self._misc.is_valid():\n QMessageBox.question(\n self, 'Erro', 'Escolha plot valido', QMessageBox.Ok, QMessageBox.Ok)\n return\n\n self._canvas.plot_data(self._yaxis.get_ind(), self._xaxis.get_ind(),\n self._misc.get_values(), self._paths_list.get_path())\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n form = AppForm()\n form.show()\n app.exec_()\n","sub_path":"analyser.py","file_name":"analyser.py","file_ext":"py","file_size_in_byte":6697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"396926137","text":"from helpers import *\n\nfrom mobject.tex_mobject import TexMobject\nfrom mobject import Mobject\nfrom mobject.image_mobject import ImageMobject\nfrom mobject.vectorized_mobject import *\n\nfrom animation.animation import Animation\nfrom animation.transform import *\nfrom animation.simple_animations import *\nfrom animation.playground import *\nfrom topics.geometry import *\nfrom topics.characters import *\nfrom topics.functions import *\nfrom topics.fractals import *\nfrom topics.number_line import *\nfrom topics.combinatorics import *\nfrom topics.numerals import *\nfrom topics.three_dimensions import *\nfrom scene import Scene\nfrom camera import Camera\nfrom mobject.svg_mobject import *\nfrom mobject.tex_mobject import *\n\nclass FractalCreation(Scene):\n CONFIG = {\n \"fractal_class\" : PentagonalFractal,\n \"max_order\" : 5,\n \"transform_kwargs\" : {\n \"path_arc\" : np.pi/6,\n \"submobject_mode\" : \"lagged_start\",\n \"run_time\" : 2,\n },\n \"fractal_kwargs\" : {},\n }\n def construct(self):\n fractal = self.fractal_class(order = 0, **self.fractal_kwargs)\n self.play(FadeIn(fractal))\n for order in range(1, self.max_order+1):\n new_fractal = self.fractal_class(\n order = order,\n **self.fractal_kwargs\n )\n fractal.align_data(new_fractal)\n self.play(Transform(\n fractal, new_fractal,\n **self.transform_kwargs\n ))\n self.dither()\n self.dither()\n self.fractal = fractal\n\nclass PentagonalFractalCreation(FractalCreation):\n pass\n\nclass DiamondFractalCreation(FractalCreation):\n CONFIG = {\n \"fractal_class\" : DiamondFractal,\n \"max_order\" : 6,\n \"fractal_kwargs\" : {\"height\" : 6}\n }\n\n\nclass PiCreatureFractalCreation(FractalCreation):\n CONFIG = {\n \"fractal_class\" : PiCreatureFractal,\n \"max_order\" : 6,\n \"fractal_kwargs\" : {\"height\" : 6},\n \"transform_kwargs\" : {\n \"submobject_mode\" : \"all_at_once\",\n \"run_time\" : 2,\n },\n }\n def construct(self):\n FractalCreation.construct(self)\n fractal = self.fractal\n smallest_pi = fractal[0][0]\n zoom_factor = 0.1/smallest_pi.get_height()\n fractal.generate_target()\n fractal.target.shift(-smallest_pi.get_corner(UP+LEFT))\n fractal.target.scale(zoom_factor)\n self.play(MoveToTarget(fractal, run_time = 10))\n self.dither()\n\nclass QuadraticKochFractalCreation(FractalCreation):\n CONFIG = {\n \"fractal_class\" : QuadraticKoch,\n \"max_order\" : 5,\n \"fractal_kwargs\" : {\"radius\" : 10},\n # \"transform_kwargs\" : {\n # \"submobject_mode\" : \"all_at_once\",\n # \"run_time\" : 2,\n # },\n }\n\nclass KochSnowFlakeFractalCreation(FractalCreation):\n CONFIG = {\n \"fractal_class\" : KochSnowFlake,\n \"max_order\" : 6,\n \"fractal_kwargs\" : {\n \"radius\" : 6,\n \"num_submobjects\" : 100,\n },\n \"transform_kwargs\" : {\n \"submobject_mode\" : \"lagged_start\",\n \"path_arc\" : np.pi/6,\n \"run_time\" : 2,\n },\n }\n\n\nclass WonkyHexagonFractalCreation(FractalCreation):\n CONFIG = {\n \"fractal_class\" : WonkyHexagonFractal,\n \"max_order\" : 5,\n \"fractal_kwargs\" : {\"height\" : 6},\n }\n\n\n\nclass CircularFractalCreation(FractalCreation):\n CONFIG = {\n \"fractal_class\" : CircularFractal,\n \"max_order\" : 5,\n \"fractal_kwargs\" : {\"height\" : 6},\n }\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"old_projects/fractal_charm.py","file_name":"fractal_charm.py","file_ext":"py","file_size_in_byte":3638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"405374273","text":"# File: debugCoinToss.py\n# Description: code provided in chapter 11 of Automate the Boring Things in Python to prompt user to guess heads or tails of\n# a coin flip, however, it contains bugs.\n# Bugs fixed: guess variable mistyped as \"guesss\"\n# toss variable is assigned a random number representing the coin flip, while the guess variable is assigned a\n# string by the user, and any comparisons between the 2 would fail.\n\n\n\nimport random\n\n\n\n# constant dictionary to contain the valid coin flip options\nkCoinSides = {0: \"heads\", 1: \"tails\"}\n\nguess = ''\n\n# loop that prompts the user to enter a side of a coin until they pick a valid option\nwhile guess.tolower() != kCoinSides[0] and guess.tolower() != kCoinSides[1]:\n print('Guess the coin toss! Enter heads or tails:')\n guess = input()\n\ntoss = kCoinSides[random.randint(0, 1)] # 0 is heads, 1 is tails\n\n# if the generated coin flip is the same as the user's guess, a winning message is displayed\nif toss == guess:\n print('You got it!')\n# else, the user is asked to guess again\nelse:\n print('Nope! Guess again!')\n guess = input()\n # if the user guessed the correct coin flip, a winning message is displayed\n if toss == guess:\n print('You got it!')\n # else, a losing message is displayed\n else:\n print('Nope. You are really bad at this game.')","sub_path":"Chapter11/debugCoinToss.py","file_name":"debugCoinToss.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"398019284","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: Kenzhaoyihui\n# Date: 2016.11.28\n\nfrom selenium import webdriver\nimport time\n\n\ndef LJ():\n\tdr = webdriver.Firefox()\n\tid = dr.find_element_by_id\n\tclass_name = dr.find_element_by_class_name\n\tlj = dr.find_elements_by_class_name\n\txpath = dr.find_element_by_xpath\n\n\tdr.get(\"http://180.209.113.96\")\n\tid(\"txtUserName\").send_keys(\"13030915\")\n# time.sleep(0.5)\n\tid(\"txtPassword\").send_keys(\"13030915\")\n# time.sleep(0.5)\n\tid(\"cmdOK\").click()\n\ttime.sleep(2)\n\n\ticon = lj(\"x-tree-ec-icon\")\n\ticon_lj_list = list(icon)\n\tfor i in icon_lj_list[0:3]:\n\t\ti.click()\n# time.sleep(1)\n\ttime.sleep(1)\n\txpath(\".//*[@id='ext-gen74']/li[1]/div/a/span\").click()\n\ttime.sleep(1)\n# dr.switch_to_default_content()\n\tdr.switch_to_frame(\"dynamic_added_tabxnode1\")\n\txpath(\".//*[@id='ext-gen45']/div[1]/table/tbody/tr/td[7]/div/a/img\").click()\n# xpath(\".//*[@id='ext-gen45']/div[1]/table/tbody/tr/td[7]/div/a/img\").click()\n\ttime.sleep(2)\n\tdr.switch_to_default_content()\n\tdr.switch_to_frame(\"ext-gen98\")\n\ttime.sleep(1)\n\txpath(\".//*[@id='t101001001']\").click()\n\txpath(\".//*[@id='101001001']/div[4]\").click()\n\ttime.sleep(1)\n\tdr.switch_to_frame(\"ext-gen18\")\n\tid(\"ext-gen42\").click()\n\t#id(\"ext-gen41\").click()\n\nif __name__ == \"__main__\":\n\tLJ()\n","sub_path":"LJ_demo.py","file_name":"LJ_demo.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"204713391","text":"from sklearn.svm import SVC\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import datasets\nimport numpy as np\nimport warnings\n\n\ndef versiontuple(v):\n return tuple(map(int, (v.split(\".\"))))\n\n\ndef plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):\n\n # Scetup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'brown', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n\n # Plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],\n alpha=0.8, c=cmap(idx),\n marker=markers[idx], label=cl)\n\n # Highlight test samples\n if test_idx:\n # Plot all samples\n if not versiontuple(np.__version__) >= versiontuple('1.9.0'):\n X_test, y_test = X[list(test_idx), :], y[list(test_idx)]\n warnings.warn('Please update to NumPy 1.9.0 or newer')\n else:\n X_test, y_test = X[test_idx, :], y[test_idx]\n plt.scatter(X_test[:, 0], X_test[:, 1], c='black', alpha=0.3, linewidths=3,\n marker='o', s=55, label='test set')\n\n\nif __name__ == '__main__':\n\n # Loading the data\n iris = datasets.load_iris()\n X = iris.data[:, [2, 3]]\n y = iris.target\n\n # Splitting the data into training and test dataset\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=0)\n # Standardizing the data\n sc = StandardScaler()\n sc.fit(X_train)\n X_train_std = sc.transform(X_train)\n X_test_std = sc.transform(X_test)\n X_combined_std = np.vstack((X_train_std, X_test_std))\n y_combined = np.hstack((y_train, y_test))\n\n # In SVMs, our optimization objective is to maximize the margin. The margin\n # is defined as the distance between the separating hyperplane (decision\n # boundary) and the training samples that are closest to this hyperplane,\n # which are the so-called support vectors.\n\n # Maximum margin intuition\n # The rationale behind having decision boundaries with large margins is\n # that they tend to have a lower generalization error whereas models with\n # small margins are more prone to overfitting.\n\n # Dealing with the nonlinearly separable case using slack variables\n # The motivation for introducing the slack variable ξ was that the linear\n # constraints need to be relaxed for nonlinearly separable data to allow\n # convergence of the optimization in the presence of misclassi cations\n # under the appropriate cost penalization.\n\n # Using the variable C, we can control the penalty for misclassification.\n # Large values of C correspond to large error penalties whereas we are less\n # strict about misclassification errors if we choose smaller values for C.\n # We can then use the parameter C to control the width of the margin and\n # therefore tune the bias-variance trade-off. This concept is related to\n # regularization, where increasing the value of C increases the bias and\n # lowers the variance of the model.\n\n svm = SVC(kernel='linear', C=1.0, random_state=0)\n svm.fit(X_train_std, y_train)\n\n plot_decision_regions(X_combined_std, y_combined,\n classifier=svm, test_idx=range(105, 150))\n plt.xlabel('petal length [standardized]')\n plt.ylabel('petal width [standardized]')\n plt.legend(loc='upper left')\n plt.tight_layout()\n plt.show()\n\n # Logistic regression versus SVM\n # In practical classification tasks, linear logistic regression and linear\n # SVMs often yield very similar results. Logistic regression tries to\n # maximize the conditional likelihoods of the training data, which makes it\n # more prone to outliers than SVMs. The SVMs mostly care about the points\n # that are closest to the decision boundary (support vectors). On the other\n # hand, logistic regression has the advantage that it is a simpler model\n # that can be implemented more easily. Furthermore, logistic regression\n # models can be easily updated, which is attractive when working with\n # streaming data.\n\n # Sometimes our datasets are too large to fit into computer memory. Thus,\n # scikit-learn also offers alternative implementations via the SGDClassifier\n # class, which also supports online learning via the partial_fit method.\n # The concept behind the SGDClassifier class is similar to the stochastic\n # gradient algorithm.\n\n from sklearn.linear_model import SGDClassifier\n\n # We could initialize the stochastic gradient descent version of the\n # perceptron, logistic regression, and support vector machine with default\n # parameters as follows:\n ppn = SGDClassifier(loss='perceptron')\n lr = SGDClassifier(loss='log')\n svm = SGDClassifier(loss='hinge')\n","sub_path":"machine_learning/support_vector_machines/simple_svm_example.py","file_name":"simple_svm_example.py","file_ext":"py","file_size_in_byte":5519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"576958193","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\nimport requests\nfrom lxml.etree import HTML\nimport re\n\n\ndef parse_detail(response):\n html = HTML(response.text)\n div_list = html.xpath('//div[@class=\"trade-list-contain j-list-content\"]//div[@class=\"demand\"]')\n for item in div_list:\n title = item.xpath('string(.//p[@class=\"d-title\"]/span/@title)').strip()\n price = item.xpath('string(.//b[@class=\"d-base-price\"]/text())').replace('¥','').strip()\n des = item.xpath('string(.//p[@class=\"d-des\"]/@title)').strip()\n url = 'https:' + item.xpath('string(./a/@href)').strip()\n\n save_res = title+'||'+price+'||'+des+'||'+url\n save_res = save_res.replace(',',',').replace('\\n','').replace('||',',')+'\\n'\n print(save_res)\n\n with open('结果.csv', 'a', encoding='gbk', errors='ignore') as f:\n f.write(save_res)\n\ndef start(catid):\n start_url = 'https://task.zbj.com/'+catid\n print('正在爬取:'+start_url)\n response = requests.get(start_url)\n # print(response.text)\n html = HTML(response.text)\n\n #获取总页数\n totalPage = html.xpath('string(//span[@class=\"zbj-paging-text\"]/text())')\n totalPage = re.search('共.*?,(\\d+)页',totalPage)\n if totalPage:\n totalPage = int(totalPage.group(1))\n print('总页数:'+str(totalPage))\n else:\n totalPage = 1\n print('没有获取到总页数')\n\n\n #处理第一页内容\n print('当前页:1')\n parse_detail(response)\n\n # 处理后续页数\n if totalPage !=1:\n for i in range(2,totalPage+1):\n print('当前页:'+str(i))\n each_url = 'https://task.zbj.com/{catid}/page{pageToken}.html'.format(catid=catid,pageToken=i)\n response = requests.get(each_url)\n parse_detail(response)\n\n\n\nif __name__ == '__main__':\n with open('结果.csv','w',encoding='gbk',errors='ignore') as f:\n f.write('标题,预算,项目描述,url\\n')\n while True:\n catid = input('请输入要爬取的分类:')\n start(catid)","sub_path":"other/zbj/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"290100102","text":"import uuid\nfrom time import time\nimport logging\n\n\nclass Facade(object):\n def __init__(self, authorization, type_store, instance_store, send_method):\n self.authorization = authorization\n self._types = type_store\n self._instances = instance_store\n self.send = send_method\n\n def get_active_types(self):\n return self._types.get_constrained(constrain={'value.available': True})\n\n def get_types(self):\n return self.get_all_types()\n\n def get_all_types(self):\n return self._types.get_all()\n\n def create_instance(self, instance_type, uid, environment=None):\n logging.debug('Creating instance of %s', instance_type)\n # should we check if the type is active?\n if instance_type not in self.get_types():\n return None\n\n instance = self._prepare_instance(status='creating',\n instance_type=instance_type,\n environment=environment)\n # return value ignored?\n self.authorization.make_owner(uid, instance['id'])\n self.send(instance_type, 'create_instance', instance)\n self.send('info', 'instance_info', {'instance': instance})\n return instance\n\n def delete_instance(self, instance_id, uid):\n if not self._instances.get(instance_id) or not \\\n self.authorization.is_user_instance(instance_id=instance_id,\n user_id=uid):\n logging.debug('Unable to remove instance %s', instance_id)\n return False\n instance = self._instances.get(instance_id)\n # this may not work properly as we don't use broadcast\n self.send(instance['type'], 'delete_instance', instance)\n return self.authorization.revoke_ownership(user_id=uid,\n instance_id=instance_id)\n\n def get_instances_of_type(self, instance_type_name, uid):\n if instance_type_name not in self.get_all_types():\n return {}\n return {iid: desc\n for iid, desc in self.get_all_instances(uid).iteritems()\n if desc['type'] == instance_type_name}\n\n def get_instance(self, instance_id, uid):\n if not self.authorization.is_user_instance(instance_id, uid):\n return None\n\n return self._instances.get(instance_id)\n\n def get_all_instances(self, uid):\n user_instances = self.authorization.get_user_instances(uid)\n return {instanceId: self._instances.get(instanceId) for instanceId in\n user_instances if self._instances.get(instanceId)}\n\n @staticmethod\n def _prepare_instance(status, instance_type, environment=None):\n instance = dict()\n instance['id'] = generate_id()\n instance['status'] = status\n instance['type'] = instance_type\n instance['ts'] = instance['created'] = time()\n if environment:\n instance['environment'] = environment\n\n return instance\n\n\ndef generate_id():\n return uuid.uuid1().hex\n\n\ndef is_available(type_description):\n return 'status' not in type_description or \\\n ('status' in type_description\n and type_description['status'] == 'active')\n","sub_path":"facade/facade.py","file_name":"facade.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"260942574","text":"from abc import ABC, abstractmethod\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom typing import Any, Dict\n\n\nclass FlyModule(nn.Module, ABC):\n def __init__(self, *args, **kwargs):\n super().__init__()\n self.config = args[0]\n\n @abstractmethod\n def forward(self, *args, **kwargs) -> Dict[str, torch.Tensor]:\n pass\n\n\nclass CNNNet(FlyModule):\n def __init__(self, config):\n super().__init__(config.model)\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.dropout1 = nn.Dropout2d(0.25)\n self.dropout2 = nn.Dropout2d(0.5)\n self.fc1 = nn.Linear(9216, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:\n x = batch[\"input\"]\n target = batch[\"target\"]\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n output = F.log_softmax(x, dim=1)\n loss = F.nll_loss(output, target)\n\n results = {\n \"loss\": loss,\n \"output\": output\n }\n\n return results\n","sub_path":"examples/TestTorchfly/MNIST/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"97409084","text":"import time\nimport numpy as np\nimport numpy.linalg as lin\nimport ray\nimport matrixDiv as md\n'''\nThis code tests a serial versus a parallelized implementation\nof determinant calculation over submatrices in a bigger matrix.\n'matriz' defines a 'big' matrix.\nThe serial version simply goes through every 3x3 region and applied\nlin.det(), while the parallel version uses the Ray library to\ngive different workers for regions of the matrix.\nThe auxiliary library matrixDiv can divide the bigger matrix into\n2, 4 and 8 submatrices to help divide the jobs for Ray.\nCode must be adapted to the number of cores/cpus available in the system\n'''\n\nray.init()\n\nsize_x = 200\nsize_y = 200\nmatriz = np.random.randint(5, size=(size_x,size_y))\n\n\nprint(\"=======================\")\nt1 = time.time()\nmrows = matriz.shape[0]\nmcols = matriz.shape[1]\nresults_s = np.zeros([1,(mrows-2)*(mcols-2)],dtype=np.float64)\nfor i in range(0,mrows-2):\n\tfor j in range(0,mcols-2):\n\t\tsubm = matriz[np.ix_(range(i,i+3),range(j,j+3))]\n\t\tresults_s[0][(mcols-2)*i+j]= lin.det(subm)\nprint(\"Calculated dets: \",len(results_s[0]))\nt2 = time.time()\nprint(\"Serial: \",t2-t1)\nprint(\"=======================\")\n\n@ray.remote\ndef det_3(mat):\n\tmatrows = mat.shape[0]\n\tmatcols = mat.shape[1]\n\tresults = np.zeros([1,(matrows-2)*(matcols-2)],dtype=np.float64)\n\tfor i in range(0,matrows-2):\n\t\tfor j in range(0,matcols-2):\n\t\t\tsubm = mat[np.ix_(range(i,i+3),range(j,j+3))]\n\t\t\tresults[0][(matcols-2)*i+j]= lin.det(subm)\n\treturn results\n\n\nprint(\"=======================\")\nt1 = time.time()\nmats = md.div8(matriz)\nray_ids = []\n\n\nfor m in mats:\n\tray_ids.append(det_3.remote(m))\n\nresults_r = np.concatenate(ray.get(ray_ids),axis=1)\n\nprint(\"Calculated dets: \",len(results_r[0]))\n\nt2 = time.time()\nprint(\"Parallel: \",t2-t1)\nprint(\"=======================\")\n\n\"\"\"\nimport code\ncode.interact(local=locals())\n\"\"\"","sub_path":"sw-sim/minitests/testSerialParallel.py","file_name":"testSerialParallel.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"59411589","text":"import pygame\nimport sys\nfrom commands import *\nfrom bebop import Bebop\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\nprint(\"Connecting to drone...\")\ndrone = Bebop()\ndrone.trim()\nprint(\"Connected.\")\n\n\ndrone.takeoff()\ndrone.wait(.2)\ndrone.update(cmd=movePCMDCmd(True, 0, 40, 0, 0))\ndrone.wait(2.3)\ndrone.update(cmd=movePCMDCmd(True, 0, 0, 0, 0))\ndrone.wait(.2)\ndrone.land()","sub_path":"drone/drone_takeoff_and_land.py","file_name":"drone_takeoff_and_land.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"341442089","text":"# -*- coding: UTF-8 -*-\nimport datetime\n\nimport xlrd\nimport pandas as pd\nimport os\nimport time\n\nDATA_DIR = 'data'\nONE_HOUR_SECONDS = 60 * 60\n\n\n# 获取股票代码列表\ndef get_stocks(config=None):\n if config:\n data = xlrd.open_workbook(config)\n table = data.sheets()[0]\n rows_count = table.nrows\n codes = table.col_values(0)[1:rows_count-1]\n names = table.col_values(1)[1:rows_count-1]\n return list(zip(codes, names))\n else:\n data_files = os.listdir(DATA_DIR)\n stocks = []\n for file in data_files:\n code_name = file.split(\".\")[0]\n code = code_name.split(\"-\")[0]\n name = code_name.split(\"-\")[1]\n appender = (code, name)\n stocks.append(appender)\n return stocks\n\n\n# 读取本地数据文件\ndef read_data(stock, name):\n file_name = stock + '-' + name + '.h5'\n try:\n return pd.read_hdf(DATA_DIR + \"/\" + file_name)\n except FileNotFoundError:\n return\n\n\n# 是否需要更新数据\ndef need_update_data():\n try:\n filename = \"data/000001-平安银行.h5\"\n last_modified = os.stat(filename).st_mtime\n now = time.time()\n time_diff = now - last_modified\n return time_diff > ONE_HOUR_SECONDS\n except FileNotFoundError:\n return True\n\n\n# 是否是工作日\ndef is_weekday():\n return datetime.datetime.today().weekday() < 5\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"618165792","text":"# Suppose a sorted array is rotated at some pivot unknown to you beforehand.\n\n# (i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2 ).\n\n# You are given a target value to search. If found in the array, return its index, otherwise return -1.\n\n# You may assume no duplicate exists in the array.\n\n# Input : [4 5 6 7 0 1 2] and target = 4\n# Output : 0\n\n# NOTE : Think about the case when there are duplicates. Does your current solution work? How does the time complexity change?\n\n\n\nclass Solution:\n # @param A : tuple of integers\n # @param B : integer\n # @return an integer\n def search(self, A, B):\n left = 0\n right = len(A)-1\n while left < right:\n mid = (left+right)/2\n if A[left] < A[right] or A[mid] < A[left]:\n right = mid\n else:\n left = mid +1\n \n low, high = 0, len(A)-1\n while low <= high:\n ind = (low+high)/2\n indx = (ind+left)%len(A)\n if A[indx] == B:\n return indx\n elif A[indx] > B:\n high = ind-1\n else:\n low = ind + 1\n return -1\n ","sub_path":"Google/Search in Rotated Sorted Array.py","file_name":"Search in Rotated Sorted Array.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"54956912","text":"# Traversal is a process to visit all the nodes of a tree and may print their values too. Because all nodes are connected via edges(links) we always start from the root(head) node. So, we cannot randomly access a node in a tree.\n# There are three ways which we use to traverse a tree −\n# Inorder Traversal:\n# 1.Traverse the left subtree.\n# 2.Visit the root.\n# 3.Traverse the right subtree.\n# Preorder Traversal:\n# 1.Visit the root.\n# 2.Traverse the left subtree.\n# 3.Traverse the right subtree.\n# Postorder Traversal:\n# 1.Traverse the left subtree.\n# 2.Traverse the right subtree.\n# 3.Visit the root.\n\nclass Node:\n def __init__(self,key):\n \"\"\"Initialization of Node\"\"\"\n self.left=None\n self.right=None\n self.val=key\n\ndef preOrder(root):\n \"\"\"Pre order Traversal\"\"\"\n if(root):\n print(root.val,end=' ')\n preOrder(root.left)\n preOrder(root.right)\n\ndef postOrder(root):\n \"\"\"Post order Traversal\"\"\"\n if (root):\n postOrder(root.left)\n postOrder(root.right)\n print(root.val,end=' ')\n\ndef inOrder(root):\n \"\"\"In order Traversal\"\"\"\n if (root):\n inOrder(root.left)\n print(root.val,end=' ')\n inOrder(root.right)\n\nroot=Node(5)\nroot.left=Node(3)\nroot.right=Node(7)\nroot.left.left=Node(2)\nroot.left.right=Node(4)\n\nprint(\"PreOrder Traversal of the given tree: \")\npreOrder(root)\n\nprint(\"\\nInOrder Traversal of the given tree: \")\ninOrder(root)\n\nprint(\"\\nPostOrder Traversal of the given tree: \")\npostOrder(root)\n\n# Testcase 1:\n# Output:\n# PreOrder Traversal of the given tree:\n# 5 3 2 4 7\n# InOrder Traversal of the given tree:\n# 2 3 4 5 7\n# PostOrder Traversal of the given tree:\n# 2 4 3 7 5\n# Testcase 2:\n# Output:\n# PreOrder Traversal of the given tree:\n# 1 2 4 5 3\n# InOrder Traversal of the given tree:\n# 4 2 5 1 3\n# PostOrder Traversal of the given tree:\n# 4 5 2 3 1\n# Time Complexity=O(n)\n# Space Complexity=If we don’t consider size of stack for function calls then O(1) otherwise O(n).","sub_path":"Code/Python/BST Traversal.py","file_name":"BST Traversal.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"154653300","text":"\nimport time #時刻を扱うライブラリ\nimport numpy as np #NumPyライブラリ\nimport scipy.linalg #SciPyの線形計算ライブラリ\nimport matplotlib.pyplot as plt #データ可視化ライブラリ\n\nclass node_cond:\n def __init__(self,dirichlet,neumann):\n self.dirichlet,self.neumann=dirichlet,neumann\nclass node2:\n def __init__(self,data):\n self.pos = [float(data[0]),float(data[1])]\n self.cond = [node_cond(data[2],data[3])]\nclass node_list:\n def __init__(self,data,nnode):\n self.nd = [node2(data[i].split()) for i in range(nnode)]\nclass element:\n sample = [-0.577350,0.577350]\n alpha = 1.0\n def __init__(self,data):\n self.mnode = 9\n self.B = self.deriv(self.sample)\n self.node_no = [int(data[i]) for i in range(self.mnode)]\n self.node_pos = [NODE.nd[self.node_no[i]].pos for i in range(self.mnode)]\n self.mat_loc_A =[[0 for _ in range(self.mnode)] for _ in range(self.mnode)]\n self.vec_loc_b = [0 for _ in range(self.mnode)]\n self.mat_loc_A = self.sgsm(self.sample,self.node_pos,self.alpha,self.B)\n self.vec_loc_b = self.make_b(self.sample,self.B,self.node_pos)*2\n def deriv(self,sample):\n n0 = self.isopara(sample[0]+0.5,sample[0])\n n1 = self.isopara(sample[0]-0.5,sample[0])\n n2 = self.isopara(sample[0],sample[0]+0.5)\n n3 = self.isopara(sample[0],sample[0]-0.5)\n n4 = self.isopara(sample[0]+0.5,sample[1])\n n5 = self.isopara(sample[0]-0.5,sample[1])\n n6 = self.isopara(sample[0],sample[1]+0.5)\n n7 = self.isopara(sample[0],sample[1]-0.5)\n n8 = self.isopara(sample[1]+0.5,sample[0])\n n9 = self.isopara(sample[1]-0.5,sample[0])\n n10 = self.isopara(sample[1],sample[0]+0.5)\n n11 = self.isopara(sample[1],sample[0]-0.5)\n n12 = self.isopara(sample[1]+0.5,sample[1])\n n13 = self.isopara(sample[1]-0.5,sample[1])\n n14 = self.isopara(sample[1],sample[1]+0.5)\n n15 = self.isopara(sample[1],sample[1]-0.5)\n n = np.array([n0-n1,n4-n5,n8-n9,n12-n13,n2-n3,n6-n7,n10-n11,n14-n15])\n return n\n def shapef(self,sample):\n n0 = self.isopara(sample[0],sample[0])\n n1 = self.isopara(sample[0],sample[1])\n n2 = self.isopara(sample[1],sample[0])\n n3 = self.isopara(sample[1],sample[1])\n N = np.array([n0,n1,n2,n3])\n return N\n def isopara(self,gzai,eta):\n n0= 0.25*(1-gzai)*(1-eta)*gzai*eta\n n1 = -0.5*(1+gzai)*(1-gzai)*(1-eta)*eta\n n2 = -0.25*(1+gzai)*(1-eta)*gzai*eta\n n3 = 0.5*(1+gzai)*(1+eta)*(1-eta)*gzai\n n4 = 0.25*(1+gzai)*(1+eta)*gzai*eta\n n5 = 0.5*(1+gzai)*(1-gzai)*(1+eta)*eta\n n6 = -0.25*(1-gzai)*(1+eta)*gzai*eta\n n7 = -0.5*(1-gzai)*(1+eta)*(1-eta)*gzai\n n8 = (1+gzai)*(1-gzai)*(1-eta)*(1+eta)\n N = np.array([n0,n1,n2,n3,n4,n5,n6,n7,n8])\n return N\n def sgsm(self,sample,x,alpha,B):\n #N = shapef(sample)\n B = np.reshape(B,(2,36))\n j1 = self.yacob(x,B[:,:9])\n j2 = self.yacob(x,B[:,9:18])\n j3 = self.yacob(x,B[:,18:27])\n j4 = self.yacob(x,B[:,27:36])\n j1d = np.linalg.det(self.yacob(x,B[:,:9]))\n j2d = np.linalg.det(self.yacob(x,B[:,9:18]))\n j3d = np.linalg.det(self.yacob(x,B[:,18:27]))\n j4d = np.linalg.det(self.yacob(x,B[:,27:36]))\n detj1 = np.linalg.inv(j1)\n detj2 = np.linalg.inv(j2)\n detj3 = np.linalg.inv(j3)\n detj4 = np.linalg.inv(j4)\n Ninv1 = np.dot(detj1,B[:,:9])\n Ninv2 = np.dot(detj2,B[:,9:18]) \n Ninv3 = np.dot(detj3,B[:,18:27]) \n Ninv4 = np.dot(detj4,B[:,27:36]) \n N1 = np.dot(Ninv1.T,Ninv1)\n N2 = np.dot(Ninv2.T,Ninv2)\n N3 = np.dot(Ninv3.T,Ninv3)\n N4 = np.dot(Ninv4.T,Ninv4)\n A = N1*j1d+N2*j2d+N3*j3d+N4*j4d\n return A\n def yacob(self,x,N):\n x = np.array(x)\n J =np.array([np.dot(N[0],x[:,0]),np.dot(N[1],x[:,0]),np.dot(N[0],x[:,1]),np.dot(N[1],x[:,1])])\n J = np.reshape(J,(2,2))\n return J\n def make_b(self,sample,B,x):\n B = np.reshape(B,(2,36))\n x = np.array(x)\n j1 = np.linalg.det(self.yacob(x,B[:,:9]))\n j2 = np.linalg.det(self.yacob(x,B[:,9:18]))\n j3 = np.linalg.det(self.yacob(x,B[:,18:27]))\n j4 = np.linalg.det(self.yacob(x,B[:,27:36]))\n n0 = self.isopara(sample[0],sample[0])\n n1 = self.isopara(sample[0],sample[1])\n n2 = self.isopara(sample[1],sample[0])\n n3 = self.isopara(sample[1],sample[1])\n return j1*n0+j2*n1+j3*n2+j4*n3\nclass element_list:\n def __init__(self,data,nelem):\n self.ed =[element(data[i].split()) for i in range(nelem)]\ndef make_global(nelem,mnode,nnode):\n mat_A_glo = [[0 for _ in range(nnode)] for _ in range(nnode)]\n vec_b_glo = [0]*nnode\n for m in range(nelem):\n for i in range(mnode):\n for j in range(mnode):\n mat_A_glo[ELEM.ed[m].node_no[i]][ELEM.ed[m].node_no[j]] += ELEM.ed[m].mat_loc_A[i,j]\n vec_b_glo[ELEM.ed[m].node_no[i]] += ELEM.ed[m].vec_loc_b[i]\n return mat_A_glo, vec_b_glo\ndef boundary (A,b,nnode):\n A = np.array(A)\n b = np.array(b, dtype=np.float)\n for npoint in range(nnode):\n dirichlet = NODE.nd[npoint].cond[0].dirichlet\n neumann = NODE.nd[npoint].cond[0].neumann\n #neumann = NODE.nd[npoint].cond[1]\n if (dirichlet!=\"inf\"): #Dirichlet=無限大の時は処理しない\n b[:] -= float(dirichlet)*A[npoint][:]\n b[npoint] = float(dirichlet)\n A[npoint,:] = 0\n A[:,npoint] = 0\n A[npoint][npoint] = 1\n if (neumann!=\"inf\"):\n b[npoint] +=float(neumann)\n return A,b\niu = open(\"data4.dat\")\ndata = iu.readlines()\niu.close()\nnnode = 441\nnelem = 100\nmnode = 9\nNODE = node_list(data[:nnode],nnode)\nELEM = element_list(data[nnode:],nelem)\nA,b = make_global(nelem,mnode,nnode)\nA,b = boundary(A,b,nnode) #左端はディリクレ境界\nunknown_vec_u = scipy.linalg.solve(A,b)\nprint(unknown_vec_u)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"413224306","text":"from calculations.python.paths import *\nfrom calculations.python.util import *\nimport Levenshtein\n\nwith open(data_path(REFERENCE_GENOME), 'r') as file:\n reference = ''.join(file.read().split('\\n')[1:])\n\nwith open(data_path(REFERENCE_GENES), 'r') as file:\n genes = [g.split() for g in file.read().split('\\n')]\n genes = [(g[0], translate(transcribe(reference[int(g[1]):int(g[2])]))) for g in genes]\n\nwith open(data_path(\"netmhcpan_bare_peptides.txt\"), 'r') as input, open(data_path(\"netmhcpan_peptides.txt\"), 'w') as output:\n for entry in input:\n entry = entry.strip()\n m = min(((gene_name, gene_data, begin) for gene_name, gene_data in genes for begin in range(len(gene_data) - len(entry))), key=lambda g: Levenshtein.distance(g[1][g[2]:g[2]+len(entry)], entry))\n print(f\"{entry}\\t{m[0]}\\t{m[2]}\", file=output)\n","sub_path":"calculations/python/netmhcpan_find_peptides.py","file_name":"netmhcpan_find_peptides.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"406900298","text":"from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer\nfrom pycocoevalcap.bleu.bleu import Bleu\nfrom pycocoevalcap.meteor.meteor import Meteor\nfrom pycocoevalcap.rouge.rouge import Rouge\nfrom pycocoevalcap.cider.cider import Cider\nimport json, sys, os\n\n# git https://github.com/salaniz/pycocoevalcap\n\nclass COCOEvalCap:\n def __init__(self,images,gts,res):\n self.evalImgs = []\n self.eval = {}\n self.imgToEval = {}\n self.params = {'image_id': images}\n self.gts = gts\n self.res = res\n\n def evaluate(self):\n imgIds = self.params['image_id']\n gts = self.gts\n res = self.res\n\n # =================================================\n # Set up scorers\n # =================================================\n print('tokenization...')\n tokenizer = PTBTokenizer()\n print(gts)\n gts = tokenizer.tokenize(gts)\n print(\"\\n==============\\n\")\n print(gts)\n print(\"\\n==============\\n\")\n print(res)\n res = tokenizer.tokenize(res)\n print(res)\n \n \n\n # =================================================\n # Set up scorers\n # =================================================\n print('setting up scorers...')\n scorers = [\n (Bleu(4), [\"Bleu_1\", \"Bleu_2\", \"Bleu_3\", \"Bleu_4\"]),\n (Meteor(),\"METEOR\"),\n (Rouge(), \"ROUGE_L\"),\n (Cider(), \"CIDEr\")\n ]\n\n # =================================================\n # Compute scores\n # =================================================\n eval = {}\n for scorer, method in scorers:\n print('computing %s score...'%(scorer.method()))\n score, scores = scorer.compute_score(gts, res)\n if type(method) == list:\n for sc, scs, m in zip(score, scores, method):\n self.setEval(sc, m)\n self.setImgToEvalImgs(scs, imgIds, m)\n print(\"%s: %0.3f\"%(m, sc))\n else:\n self.setEval(score, method)\n self.setImgToEvalImgs(scores, imgIds, method)\n print(\"%s: %0.3f\"%(method, score))\n self.setEvalImgs()\n\n def setEval(self, score, method):\n self.eval[method] = score\n\n def setImgToEvalImgs(self, scores, imgIds, method):\n for imgId, score in zip(imgIds, scores):\n if not imgId in self.imgToEval:\n self.imgToEval[imgId] = {}\n self.imgToEval[imgId][\"image_id\"] = imgId\n self.imgToEval[imgId][method] = score\n\n def setEvalImgs(self):\n self.evalImgs = [eval for imgId, eval in self.imgToEval.items()]\n\ndef calculate_metrics(rng,datasetGTS,datasetRES):\n imgIds = rng\n gts = {}\n res = {}\n\n imgToAnnsGTS = {ann['image_id']: [] for ann in datasetGTS['annotations']}\n for ann in datasetGTS['annotations']:\n imgToAnnsGTS[ann['image_id']] += [ann]\n\n imgToAnnsRES = {ann['image_id']: [] for ann in datasetRES['annotations']}\n for ann in datasetRES['annotations']:\n imgToAnnsRES[ann['image_id']] += [ann]\n\n for imgId in imgIds:\n gts[imgId] = imgToAnnsGTS[imgId]\n res[imgId] = imgToAnnsRES[imgId]\n\n evalObj = COCOEvalCap(imgIds,gts,res)\n evalObj.evaluate()\n return evalObj.eval, evalObj.imgToEval\n\ndef loadRES(filename):\n annotations = []\n vids = []\n vid_index = {}\n i = 0\n for line in open(filename, 'r'):\n line = line.strip()\n if line == '':\n continue\n toks = line.split(' ')\n vid = toks[0]\n if vid not in vid_index:\n vid_index[vid] = len(vids)\n vids.append(vid)\n sent = []\n for i, tok in enumerate(toks):\n if i == 0:\n continue\n if tok == '':\n break\n sent += [tok]\n annotations += [{'image_id': vid_index[vid], 'caption': ' '.join(sent)}]\n i += 1\n if i > 10:\n break\n return {'annotations': annotations}, vids, vid_index\n\ndef loadGTS(video_ids, vid_index):\n # parse data \n dir_path = os.path.dirname(os.path.realpath(__file__))\n files = [\n dir_path + '/videodatainfo_2017.json',\n dir_path + '/test_videodatainfo_2017.json'\n ]\n annotations = []\n for filename in files:\n info = json.loads(open(filename, 'r').read())\n for i in range(len(info['sentences'])):\n video_id = info['sentences'][i]['video_id']\n if video_id not in video_ids:\n \tcontinue\n caption = info['sentences'][i]['caption']\n # remove illegal char\n for c in caption:\n if ord(c) >= 128:\n caption = caption.replace(c, '')\n # caption = caption.replace(u'\\xe9', \"e\")\n annotations += [{'image_id': vid_index[video_id], 'caption': caption}]\n return {'annotations': annotations}, False\n\ndef evalRes(filename):\n res, video_ids, vid_index = loadRES(filename)\n gts, _ = loadGTS(video_ids, vid_index)\n rng = range(len(video_ids))\n\n evals, imgToEval = calculate_metrics(rng, gts, res)\n\n # print(evals)\n video_id_scores = {}\n\n for kid in imgToEval.keys():\n item = imgToEval[kid]\n for k in item.keys():\n item[k] = str(item[k])\n item['idx'] = kid\n video_id_scores[video_ids[kid]] = item\n\n # print(video_id_scores)\n json.dump(evals, open(filename + '.eval_all', 'w'))\n json.dump(video_id_scores, open(filename + '.eval', 'w'))\n\nif __name__ == '__main__':\n fname = '/4t/peiya/model_1011_resnet2048_none_none_none_empty_pad_dropout05/epoch_results/val.-1.epoch02.out'\n evalRes(fname)\n","sub_path":"eval_utils.py","file_name":"eval_utils.py","file_ext":"py","file_size_in_byte":5748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"397938916","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/restresource/__init__.py\n# Compiled at: 2007-09-24 16:48:57\n\"\"\"\nrestresource\n\ncherrypy controller mixin to make it easy to build REST applications.\n\nhandles nested resources and method-based dispatching.\n\nhere's a rough sample of what a controller would look like using this:\n\ncherrypy.root = MainController()\ncherrypy.root.user = UserController()\n\nclass PostController(RESTResource):\n def read(self,post):\n return post.as_html()\n read.expose_resource = True\n\n def delete(self,post):\n post.destroySelf()\n return \"ok\"\n delete.expose_resource = True\n\n def update(self,post,title=\"\",body=\"\"):\n post.title = title\n post.body = body\n return \"ok\"\n update.expose_resource = True\n\n def create(self, post, title=\"\", body=\"\")\n post.title = title\n post.body = body\n return \"ok\"\n create.expose_resource = True\n\n def REST_instantiate(self, slug, **kwargs):\n try:\n user = self.parents[0]\n return Post.select(Post.q.slug == slug, Post.q.userID = user.id)[0]\n except:\n return None\n\n def REST_create(self, slug, **kwargs):\n user = self.parents[0]\n return Post(slug=slug,user=user)\n\nclass UserController(RESTResource):\n REST_children = {'posts' : PostController()}\n\n def read(self,user):\n return user.as_html()\n read.expose_resource = True\n\n def delete(self,user):\n user.destroySelf()\n return \"ok\"\n delete.expose_resource = True\n\n def update(self,user,fullname=\"\",email=\"\"):\n user.fullname = fullname\n user.email = email\n return \"ok\"\n update.expose_resource = True\n\n def create(self, user, fullname=\"\", email=\"\"):\n user.fullname = fullname\n user.email = email\n return \"ok\"\n create.expose_resource = True\n\n def get_extra_action(self,user):\n # do something else\n get_extra_action.expose_resource = True\n\n def REST_instantiate(self, username, **kwargs):\n try:\n return User.byUsername(username)\n except:\n return None\n\n def REST_create(self, username, **kwargs):\n return User(username=username)\n\nthen, the site would have urls like:\n\n /user/bob\n /user/bob/posts/my-first-post\n /user/bob/posts/my-second-post\n /user/bob/extra_action\n\nwhich represent REST resources. calling 'GET /user/bob' would call the read() method on UserController\nfor the user bob. 'PUT /user/joe' would create a new user with username 'joe'. 'DELETE /user/joe'\nwould delete that user. 'GET /user/bob/posts/my-first-post' would call read() on the Post Controller\nwith the post with the slug 'my-first-post' that is owned by bob.\n\nThere are actually two URL scheme options. Note that the first corresponds to the scheme above, as well.\nScheme One (default) --this follows 'WSGI Collection' semicolon rules\n REST_ids_are_root = True\n /user/bob;extra_action OR /user/bob/extra_action\n\nScheme Two:\n If you put the following in your RESTResource class:\n REST_ids_are_root = False\n /user;bob/extra_action OR /user/;bob/extra_action\n\n This has the advantage of making both the collection and the members 'context'\n resources, meantin at /user;bob/ you can send a relative-url for 'extra_action'\n and it will go to the expected location.\n\"\"\"\nimport cherrypy\n\ndef strip_empty(path):\n return [ e for e in path if e != '' ]\n\n\nclass RESTResource:\n REST_defaults = {'DELETE': 'delete', 'GET': 'read', \n 'POST': 'update', \n 'PUT': 'create'}\n REST_map = {}\n REST_children = {}\n parents = []\n REST_content_types = {}\n REST_default_content_type = ''\n REST_ids_are_root = True\n\n def CT_dispatch(self, d):\n method = cherrypy.request.method\n if method != 'GET':\n return d\n if cherrypy.request.headerMap.has_key('Accept'):\n accept = cherrypy.request.headerMap['Accept']\n if self.REST_content_types.has_key(accept):\n m = self.REST_content_types[accept]\n if hasattr(self, m):\n cherrypy.response.headerMap['Content-Type'] = accept\n return getattr(self, m)(d)\n if self.REST_default_content_type != '':\n if self.REST_content_types.has_key(self.REST_default_content_type):\n m = self.REST_content_types[self.REST_default_content_type]\n if hasattr(self, m):\n cherrypy.response.headerMap['Content-Type'] = self.REST_default_content_type\n return getattr(self, m)(d)\n return d\n\n def REST_childOverride(self, child_obj, *resources):\n \"\"\"If this is overridden in a subclass, you can:\n 1. return a non-false value which will override child responses\n 2. decorate the child further (e.g. a la obj.parents)\n This should be useful if, for example, you want security\n restrictions to be inherited\n \"\"\"\n return False\n\n def REST_collection_dispatch(self, func_params, **params):\n collection_method = cherrypy.request.method.lower()\n if func_params:\n param_method = ('_').join((collection_method,\n func_params[0]))\n if hasattr(self, param_method):\n collection_method = param_method\n func_params.pop(0)\n m = getattr(self, collection_method, self.list)\n if getattr(m, 'exposed', False):\n return self.CT_dispatch(m(**params))\n else:\n raise cherrypy.NotFound\n\n def REST_dispatch(self, resource, func_params, **params):\n method = cherrypy.request.method\n param_method = None\n if func_params:\n param_method = method.lower() + '_' + func_params[0]\n if param_method and hasattr(self, param_method):\n m = getattr(self, param_method)\n func_params.pop(0)\n elif self.REST_map.has_key(method):\n m = getattr(self, self.REST_map[method])\n elif self.REST_defaults.has_key(method):\n m = getattr(self, self.REST_defaults[method], getattr(self, 'index', None))\n if m and getattr(m, 'expose_resource', False):\n return m(resource, **params)\n raise cherrypy.NotFound\n return\n\n def parse_resource_token(self, token):\n resource_params = token.split(';')\n resource_name = resource_params.pop(0)\n return (resource_name, resource_params)\n\n @cherrypy.expose\n def default(self, *vpath, **params):\n \"\"\"This method will get called by default by CherryPy when it can't\n map an object path directly (a.b.c for request /a/b/c) which if we have\n RESTful urls (interspersed with id's) will be most of the time.\n\n Before this would only be inherited by sub-Root controllers, but to handle\n situations like /a;1/ or /a;add_form it needs to be sub-classed by the\n Root Controller now.\n\n So default() now simply handles one token between /'s and other\n methods dispatch handling\n\n * pass resource to sub-object (update obj.parents first)\n * call local method\n * getresource(id)\n * continue down vpath\n \"\"\"\n if vpath:\n vpath = list(vpath)\n vpath = strip_empty(vpath)\n if self in cherrypy.tree.mount_points.values():\n (rname, rparams) = self.parse_resource_token(vpath.pop(0))\n return self.map_vpath([], rname, rparams, vpath, params)\n return self.collection_dispatcher(None, [], vpath, params)\n\n def collection_dispatcher(self, myname, resource_params, vpath, params):\n resources = []\n if resource_params:\n if not self.REST_ids_are_root:\n resources.append(self.getresource(resource_params, params))\n if vpath:\n (rname, rparams) = self.parse_resource_token(vpath.pop(0))\n if self.REST_ids_are_root:\n if rname:\n resources.append(self.getresource((rname,), params))\n rname = None\n if vpath:\n (rname, rparams) = self.parse_resource_token(vpath.pop(0))\n elif not self.REST_ids_are_root and not rname and rparams:\n resources.append(self.getresource(rparams, params))\n if vpath:\n (rname, rparams) = self.parse_resource_token(vpath.pop(0))\n if rname:\n return self.map_vpath(resources, rname, rparams, vpath, params)\n elif self.REST_ids_are_root:\n resource_params.extend(rparams)\n if not self.REST_ids_are_root:\n resource_params = []\n if not resource_params and cherrypy.request.method == 'GET' and not cherrypy.request.path.endswith('/'):\n atoms = cherrypy.request.browser_url.split('?', 1)\n newUrl = atoms.pop(0) + '/'\n if atoms:\n newUrl += '?' + atoms[0]\n raise cherrypy.HTTPRedirect(newUrl)\n if resources:\n return self.REST_dispatch(resources[0], resource_params, **params)\n else:\n return self.REST_collection_dispatch(resource_params, **params)\n return\n\n def getresource(self, resource_params, params):\n \"\"\"not doing anything with resource_params\n this could in theory be sent along to REST_* functions\n it is named without an '_' to avoid clobber from a '/col/;resource' hook\n \"\"\"\n resource = self.REST_instantiate(resource_params[0], **params)\n if resource is None:\n if cherrypy.request.method in ('PUT', 'POST'):\n resource = self.REST_create(resource_params[0], **params)\n else:\n raise cherrypy.NotFound\n return resource\n\n def map_vpath(self, resources, a, rparams, vpath, params):\n obj = None\n if self.REST_children.has_key(a):\n obj = self.REST_children[a]\n elif isinstance(getattr(self, a, None), RESTResource):\n obj = getattr(self, a)\n if obj and hasattr(obj, 'collection_dispatcher'):\n obj.parents = [ p for p in self.parents ]\n obj.parents.extend(resources)\n return self.REST_childOverride(obj, *resources) or obj.collection_dispatcher(a, rparams, vpath, params)\n rparams.insert(0, a)\n if resources:\n return self.REST_dispatch(resources[0], rparams, **params)\n else:\n return self.REST_collection_dispatch(rparams, **params)\n return\n\n def REST_instantiate(self, id, *params, **kwargs):\n \"\"\" instantiate a REST resource based on the id\n\n this method MUST be overridden in your class. it will be passed\n the id (from the url fragment) and should return a model object\n corresponding to the resource.\n\n if the object doesn't exist, it should return None rather than throwing\n an error. if this method returns None and it is a PUT request,\n REST_create() will be called so you can actually create the resource.\n \"\"\"\n raise cherrypy.NotFound\n\n def REST_create(self, id, *params, **kwargs):\n \"\"\" create a REST resource with the specified id\n\n this method should be overridden in your class.\n this method will be called when a PUT request is made for a resource\n that doesn't already exist. you should create the resource in this method\n and return it.\n \"\"\"\n raise cherrypy.NotFound","sub_path":"pycfiles/restresource-0.4-py2.5/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"569132875","text":"import csv\nimport re\n\n# For simplicity, we assume that the program runs where the files are located.\nSOURCES = [\n 'combined_data_1.txt',\n 'combined_data_2.txt',\n 'combined_data_3.txt',\n 'combined_data_4.txt'\n]\n\n# The all-important pattern indicating the current movie---here we are using it\n# just to provide some output guidance.\nMOVIE_LINE_PATTERN = '^(\\d+):$'\nMOVIE_LINE = re.compile(MOVIE_LINE_PATTERN)\ncurrent_movie_id = None\n\nDESTINATION = 'viewers.csv'\npost_processed_file = open(DESTINATION, 'w')\n\n# Read the files line by line and write out just the viewer IDs.\n#\n# We compile a list of IDs already seen and filter for uniques here---a choice\n# that assumes that we have enough memory to hold all possible IDs. A pre-count\n# was done to verify that the number of IDs would indeed fit in memory, so we\n# can proceed with this. In the general case, we might not have that luxury.\nviewer_ids = {}\nfor ratings_file in SOURCES:\n # Provide some visible output so that the user can see where we are.\n print(f'Processing file {ratings_file}...')\n\n with open(ratings_file, 'r+') as f:\n reader = csv.reader(f)\n for row in reader:\n movie_match = MOVIE_LINE.match(row[0])\n if movie_match:\n # Set the new movie ID.\n current_movie_id = movie_match.group(1)\n\n # Provide some visible output.\n print(f'- Movie ID: {current_movie_id}')\n else:\n # Write out the viewer ID if we haven’t seen it before.\n viewer_id = row[0]\n if viewer_ids.get(viewer_id) is None:\n viewer_ids[viewer_id] = True\n post_processed_file.write(f'{viewer_id}\\n')\n\npost_processed_file.close()\n","sub_path":"netflix-prize-graph-example/preprocess_viewers.py","file_name":"preprocess_viewers.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"319573017","text":"import numpy as np\nfrom sklearn.datasets import make_classification\nfrom sklearn.metrics import accuracy_score, log_loss, roc_auc_score\nfrom sklearn.model_selection import train_test_split\n\nfrom hyperband import settings\n\n\ndef load_data():\n \"\"\"El objetivo de esta funcion es devolver un dataset\n para un problema de clasificacion.\n\n :returns:\n Retorna un diccionario con los datos para el entrenamiento.\n \"\"\"\n\n data = {}\n\n data[\"X\"], data[\"y\"] = make_classification(\n n_samples=settings.DS_N_SAMPLES,\n n_features=settings.DS_N_FEATURES,\n n_classes=settings.DS_N_CLASS,\n n_informative=4,\n n_redundant=1,\n n_repeated=2,\n random_state=1,\n )\n return data\n\n\ndef handle_integers(params):\n \"\"\"El objetivo de esta funcion es evitar que en el futuro\n haya problema con el tipo de datos para los parametros.\n\n :param params:\n Configuracion a evaluar.\n\n :returns:\n Retorna la configuracion con el tratamiento correspondiente.\n \"\"\"\n new_params = {}\n for k, v in params.items():\n if type(v) == float and int(v) == v:\n new_params[k] = int(v)\n else:\n new_params[k] = v\n return new_params\n\n\ndef train_and_eval_sklearn_classifier(clf, data):\n \"\"\"El objetivo de esta funcion es evaluar una configuracion determinada.\n\n :param clf:\n Modelo a entrenar.\n\n :param data:\n Dataset para entrenar al modelo.\n\n :returns:\n Retorna un diccionario con los score correspondientes de evaluacion.\n \"\"\"\n X = data[\"X\"]\n y = data[\"y\"]\n\n x_train, x_test, y_train, y_test = train_test_split(X, y)\n clf.fit(x_train, y_train)\n\n try:\n p = clf.predict_proba(x_train)[:, 1] # sklearn convention\n except IndexError:\n p = clf.predict_proba(x_train)\n\n ll = log_loss(y_train, p)\n auc = roc_auc_score(y_train, p)\n acc = accuracy_score(y_train, np.round(p))\n\n print(\n \"\\n# training | log loss: {:.2%}, AUC: {:.2%}, accuracy: {:.2%}\".format(\n ll, auc, acc\n )\n )\n\n try:\n p = clf.predict_proba(x_test)[:, 1] # sklearn convention\n except IndexError:\n p = clf.predict_proba(x_test)\n\n ll = log_loss(y_test, p)\n auc = roc_auc_score(y_test, p)\n acc = accuracy_score(y_test, np.round(p))\n\n print(\n \"# testing | log loss: {:.2%}, AUC: {:.2%}, accuracy: {:.2%}\".format(\n ll, auc, acc\n )\n )\n\n return {\"loss\": ll, \"log_loss\": ll, \"auc\": auc}\n","sub_path":"hyperband/commons/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"494293828","text":"comparisons = 0\n\n\ndef startqs():\n a = []\n #l = open('test', 'r')\n l = open('qs.txt', 'r')\n for line in l:\n a.append(int(line))\n l.close()\n # choose quicksort method 1, 2, 3\n quicksort3(a)\n print(\"FINAL COMP IS\", comparisons)\n\n\ndef quicksort1(a):\n global comparisons\n p = a[0]\n print(\"Now sorting:\", a)\n print(\"p=\", p)\n i = 1\n comparisons += len(a) - 1\n for j in range(1, len(a)):\n if a[j] < p:\n (a[j], a[i]) = (a[i], a[j])\n i += 1\n (a[0], a[i - 1]) = (a[i - 1], a[0])\n print(\"Sorted:\", a)\n if i > 2:\n lpart = a[:i - 1]\n quicksort1(lpart)\n if i < len(a) - 1:\n rpart = a[i:]\n quicksort1(rpart)\n\n\ndef quicksort2(a):\n global comparisons\n print(\"Now sorting:\", a)\n print(\"p=\", a[len(a)-1])\n (a[0], a[len(a) - 1]) = (a[len(a) - 1], a[0])\n p = a[0]\n i = 1\n comparisons += len(a) - 1\n for j in range(1, len(a)):\n if a[j] < p:\n (a[j], a[i]) = (a[i], a[j])\n i += 1\n (a[0], a[i - 1]) = (a[i - 1], a[0])\n #print(\"Sorted:\", a)\n if i > 2:\n lpart = a[:i - 1]\n quicksort2(lpart)\n if i < len(a) - 1:\n rpart = a[i:]\n quicksort2(rpart)\n\n\ndef quicksort3(a):\n global comparisons\n if len(a) % 2 == 0:\n mpos = int(len(a) / 2 - 1)\n else:\n mpos = int((len(a) - 1) / 2)\n mscore = 0\n fscore = 0\n lscore = 0\n if a[0] > a[mpos]:\n fscore += 1\n else:\n mscore += 1\n if a[len(a)-1] > a[mpos]:\n lscore += 1\n else:\n mscore += 1\n if a[len(a)-1] > a[0]:\n lscore += 1\n else:\n fscore += 1\n if mscore == 1:\n ppos = mpos\n elif fscore == 1 :\n ppos = 0\n else:\n ppos = len(a) - 1\n print(\"Now sorting:\", a)\n print(\"p=\", a[ppos])\n (a[0], a[ppos]) = (a[ppos], a[0])\n p = a[0]\n i = 1\n comparisons += len(a) - 1\n for j in range(1, len(a)):\n if a[j] < p:\n (a[j], a[i]) = (a[i], a[j])\n i += 1\n (a[0], a[i - 1]) = (a[i - 1], a[0])\n print('i=',i)\n if i > 2:\n lpart = a[:i - 1]\n quicksort3(lpart)\n if i < len(a) - 1:\n rpart = a[i:]\n quicksort3(rpart)\n\n\nstartqs()\n","sub_path":"Quicksort/Quicksort.py","file_name":"Quicksort.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"198221448","text":"# Copyright © 2018 This material is the property of the individuals that contributed to it: Dennis Vidovic, Andrew Rae, Thomas Kidd and Yusef Zia.\n# It may NOT be copied or otherwise used, in part or in its entirety, without permission, for any purpose, other than to execute it on a computing\n# platform as a complete project, without modifications.\n\nfrom spritesList import *\n\nmovementDicts = {\n 'LightInfantry': {\n TerrainTypes.waterTile: None,\n TerrainTypes.grassTile: 1.,\n TerrainTypes.plainTile: 1.,\n TerrainTypes.mountainTile: 2.,\n TerrainTypes.forestTile: 1.,\n TerrainTypes.cityTile: 1.,\n },\n 'Battleship': {\n TerrainTypes.waterTile: 1.,\n TerrainTypes.grassTile: None,\n TerrainTypes.plainTile: None,\n TerrainTypes.mountainTile: None,\n TerrainTypes.forestTile: None,\n TerrainTypes.cityTile: None,\n }\n}","sub_path":"movementDicts.py","file_name":"movementDicts.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"244889175","text":"from django.shortcuts import render\nfrom .models import Detalle_Venta,Direccion_Envio_Venta,Venta,Carrito_Compras\nfrom inventario.models import Productos,Tallas,Img_Producto\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom django.db.models import Sum\nimport decimal\nimport datetime\nfrom django.urls import reverse\nfrom .forms import Busqueda_Venta_Form,Venta_Form\nfrom django.http.response import HttpResponseRedirect\nfrom seguridad.models import Direccion_Envio_Cliente_Temporal,Clientes_Logueados\ndef busca_ventas(request):\n\tif not request.user.is_authenticated:\t\n\t\treturn HttpResponseRedirect(reverse('seguridad:login'))\n\n\tif request.method==\"POST\":\n\t\tfecha_i=request.POST.get(\"fecha_inicial\")\n\t\tfecha_f=request.POST.get(\"fecha_final\")\n\t\t\n\t\tif request.POST.get(\"id_estatus_venta\"):\t\t\n\t\t\tid_estatus_venta=int(request.POST.get(\"id_estatus_venta\"))\n\t\telse:\t\t\t\n\t\t\tid_estatus_venta=0\n\t\t\t\n\t\tif fecha_i==\"\" and fecha_i==\"\" and id_estatus_venta==0:\n\t\t\tventas=Venta.objects.all()\n\t\tif fecha_i!=\"\" and fecha_i!=\"\":\n\t\t\tif id_estatus_venta>0:\n\t\t\t\tventas=Venta.objects.filter(fecha__range=(fecha_i,fecha_f),id_estatus_venta=id_estatus_venta)\t\t\n\t\t\telse:\n\t\t\t\tventas=Venta.objects.filter(fecha__range=(fecha_i,fecha_f))\t\t\n\t\tif id_estatus_venta>0:\n\t\t\tif fecha_i!=\"\" and fecha_i!=\"\":\n\t\t\t\tventas=Venta.objects.filter(fecha__range=(fecha_i,fecha_f),id_estatus_venta=id_estatus_venta)\t\t\n\t\t\telse:\n\t\t\t\tventas=Venta.objects.filter(id_estatus_venta=id_estatus_venta)\n\t\tform=Busqueda_Venta_Form(request.POST)\n\telse:\n\t\tform=Busqueda_Venta_Form()\n\t\tventas=Venta.objects.all()\n\treturn render(request,'ventas/busca_ventas.html',locals())\n\ndef detalle_venta_form(request,id_venta):\t\n\tif not request.user.is_authenticated:\t\n\t\treturn HttpResponseRedirect(reverse('seguridad:login'))\n\t\t\n\tventa=Venta.objects.get(id=id_venta)\n\tif request.method==\"POST\":\n\t\tform=Venta_Form(request.POST,instance=venta)\t\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn HttpResponseRedirect(reverse('ventas:busca_ventas'))\n\telse:\n\t\tdet_venta=Detalle_Venta.objects.filter(id_venta=venta)\n\t\tdireccion_enviio=Direccion_Envio_Venta.objects.get(id_venta=venta)\n\t\tform=Venta_Form(instance=venta)\t\n\treturn render(request,'ventas/ventas.html',locals())\n\t\n#esta api, regresa los productos que estan en el carrito de compras de de la sessionq ue recibe como parametro\n#parametros\n#\tSession\n@api_view(['GET','POST','DELETE'])\ndef api_consulta_carrito_compras(request):\n\tif request.method==\"GET\":\n\t\tcarrito=[]\n\t\tsession=request.GET.get(\"session\")\t\t\n\t\t#obtenemos los productos que estan en el carrito de compras.\n\t\tc_c=Carrito_Compras.objects.filter(session=session)\n\n\t\tif c_c.exists():\n\t\t\t\n\t\t\tfor cc in c_c:\n\t\t\t\ttry:\n\t\t\t\t\tip=Img_Producto.objects.get(id_producto=cc.id_producto,orden=1)\n\t\t\t\t\tnom_img=ip.nom_img\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"el producto no tiene imagen con valor 1 en el campo orden\")\n\t\t\t\t\tnom_img=\"\"\t\t\t\t\n\t\t\t\tif cc.id_producto.descuento!=0:\n\t\t\t\t\t#obtenemos el precio sin iva\n\t\t\t\t\tsub_total=decimal.Decimal(cc.id_producto.precio)/decimal.Decimal(1.16)\n\t\t\t\t\t#obtenemos el subtotal con descuento\n\t\t\t\t\tsub_total_con_desc=decimal.Decimal(sub_total)-(decimal.Decimal(sub_total)*(decimal.Decimal(cc.id_producto.descuento/100.00)))\n\t\t\t\t\t#una vez que tenemos el precio con descuento, le agregamos el iva\n\t\t\t\t\tsub_total_con_iva=decimal.Decimal(sub_total_con_desc)*decimal.Decimal(1.16)\n\t\t\t\t\tprecio_desc=sub_total_con_iva\n\t\t\t\telse:\n\t\t\t\t\tprecio_desc=cc.id_producto.precio\n\t\t\t\tcarrito.append({'nombre':cc.id_producto.nombre,'id':cc.id,'id_producto':cc.id_producto.id,'precio':precio_desc,'nombre':cc.id_producto.nombre,'nom_img':nom_img,'cantidad':cc.cantidad,'talla':cc.talla.talla})\t\t\t\t\t\n\t\treturn Response(carrito)\n\tif request.method==\"POST\":\n\t\terror=[]\n\t\ttry:\t\t\t\n\t\t\t#parametros\n\t\t\tid_producto=request.POST.get(\"id_producto\")\n\t\t\tsession=request.POST.get(\"session\")\n\t\t\tcantidad=int(request.POST.get(\"cantidad\"))\n\t\t\tid_talla=request.POST.get(\"id_talla\")\n\t\t\t\n\t\t\t#parametros\n\t\t\ttry:\n\t\t\t\tproducto=Productos.objects.get(id=id_producto)\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\t\t\t\t\n\t\t\t\terror.append({'estatus':0,'msj':'El producto no existe.'})\n\t\t\t\treturn Response(error)\n\t\t\t\n\t\t\ttry:\n\t\t\t\ttalla=Tallas.objects.get(id=id_talla)\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\t\t\t\terror.append({'estatus':0,'msj':'Erro al encontrar la talla solicitada.'})\t\t\t\t\n\t\t\t\treturn Response(error)\n\t\t\ttry:\n\t\t\t\t#en caso de que exista ya un registro en el carrito que cumpla con la session, producto y talla\n\t\t\t\t#ya no crea nuevo registro, solo incrementa su existencia.\n\t\t\t\tcc=Carrito_Compras.objects.get(session=session,id_producto=producto,talla=talla)\n\t\t\t\tcc.cantidad=int(cc.cantidad)+int(cantidad)\n\t\t\t\tcc.save()\n\t\t\texcept Exception as e:\n\t\t\t\t#en caso de que no existe el registro en el carrito que cumpla con la session, producto y talla\n\t\t\t\t#se crea uno nuevo.\n\t\t\t\tprint(e)\n\t\t\t\tCarrito_Compras.objects.create(session=session,id_producto=producto,cantidad=cantidad,talla=talla)\t\t\t\n\t\t\t\t\n\t\t\t\n\t\t\terror.append({'estatus':1,'msj':'El producto se agrego correctamente.'})\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\terror.append({'estatus':0,'msj':'Error al agregar el producto, intente nuevamente.'})\n\t\treturn Response(error)\n\n#al parecer django no soporta el metodo delete, por lo tanto eliminar un producto del carrito\n# se ara atravez de una url diferente por el metodo post\n@api_view(['POST'])\t\t\ndef api_elimina_carrito_compras(request):\n\tif request.method==\"POST\":\n\t\terror=[]\n\t\ttry:\t\t\t\n\t\t\tprint(request.POST.get(\"id\"))\n\t\t\t#parametros\n\t\t\tid_carrito=request.POST.get(\"id\")\t\t\t\n\t\t\t#parametros\n\t\t\tCarrito_Compras.objects.get(id=id_carrito).delete()\n\t\t\terror.append({\"estatus\":1,\"msj\":\"\"})\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\terror.append({\"estatus\":0,\"msj\":\"Error al eliminar el producto del carrito, intente nuevamente.\"})\n\t\treturn Response(error)\n\n\n\n\t\t\n#guardamos la venta, almacenando la direccion de envio \n#parametros:\n#\tsession:\n#\t\t\tSolo recibe este parametro ya que el resto de la informacion esta ya almacenada ligada a esta session.\n@api_view(['POST'])\t\t\ndef api_crea_venta(request):\n\tif request.method==\"POST\":\n\t\tfolio_venta=[]\n\t\tsession=request.POST.get(\"session\")\n\t\tc_l=Clientes_Logueados.objects.get(session=session)\n\t\tcliente=c_l.cliente\n\t\t#obtenemos la inormacion guardada en la session\n\t\tc_c=Carrito_Compras.objects.filter(session=session)\n\t\tif c_c.exists():\n\t\t\ttry:\n\t\t\t\td_e=Direccion_Envio_Cliente_Temporal.objects.get(session=session)\t\t\n\t\t\texcept:\n\t\t\t\t#sillega a la except es porque no tiene capturada la direccion de envio\n\t\t\t\tfolio_venta.append({\"estatus\":0,\"msj\":\"No se ha agregado la direccion de envio.\"})\t\t\t\n\t\t\t\treturn Response(folio_venta)\t\t\t\n\t\t\t#calculamos el total de la venta\n\t\t\ttotal=0.00\n\t\t\tdescuento=0.00\n\t\t\tiva=0.00\n\t\t\tfor cc in c_c:\t\t\t\t\t\n\t\t\t\t#calculamos el precio de venta(en caso de tener descuento)\t\t\t\t\t\n\t\t\t\tif cc.id_producto.descuento!=0:\n\t\t\t\t\t#obtenemos el precio sin iva\n\t\t\t\t\tsub_total=decimal.Decimal(cc.id_producto.precio)/decimal.Decimal(1.16)\n\t\t\t\t\t#obtenemos el descuento\n\t\t\t\t\tdescuento=decimal.Decimal(descuento)+(decimal.Decimal(sub_total)*(decimal.Decimal(cc.id_producto.descuento/100.00)))\n\t\t\t\t\tdescuento=decimal.Decimal(descuento)*decimal.Decimal(cc.cantidad)\t\t\t\t\t\t\t\t\n\t\t\t\t\t#obtenemos el subtotal con descuento\n\t\t\t\t\tsub_total_con_desc=decimal.Decimal(sub_total)-(decimal.Decimal(sub_total)*(decimal.Decimal(cc.id_producto.descuento/100.00)))\n\t\t\t\t\t#una vez que tenemos el precio con descuento, le agregamos el iva y lo multiplicamos por la cantidad\n\t\t\t\t\tsub_total_con_iva=(decimal.Decimal(sub_total_con_desc)*decimal.Decimal(1.16))*decimal.Decimal(cc.cantidad)\t\t\t\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tsub_total_con_iva=decimal.Decimal(cc.id_producto.precio)*decimal.Decimal(cc.cantidad)\t\t\t\t\n\t\t\t\t#total ya con iva\n\t\t\t\ttotal=decimal.Decimal(total)+decimal.Decimal(sub_total_con_iva)\n\t\t\t\t#el subtotal es sin iva\n\t\t\t\tsub_total=decimal.Decimal(decimal.Decimal(total)/decimal.Decimal(1.16))+decimal.Decimal(descuento)\n\t\t\t\t#el iva es la diferencia entre el total y el subtotal\n\t\t\t\t#iva=decimal.Decimal(sub_total)*decimal.Decimal(0.16)\n\t\t\t#CREAMOS LA VENTA\n\t\t\tiva=decimal.Decimal(decimal.Decimal(sub_total)-decimal.Decimal(descuento))*decimal.Decimal(0.16)\n\t\t\tv=Venta(total=total,sub_total=sub_total,iva=iva,descuento=descuento,cliente=cliente)\n\t\t\tv.save()\n\t\t\t\n\t\t\t#recorremos los productos del carrito para crear el detalle d ela venta\n\t\t\tfor cc in c_c:\n\t\t\t\t#calculamos el precio de venta(en caso de tener descuento)\t\n\t\t\t\tprecio_unitario=0.00\n\t\t\t\tdescuento=0.00\n\t\t\t\tiva=0.00\n\t\t\t\tprecio_total=0.00\n\t\t\t\tif cc.id_producto.descuento!=0:\n\t\t\t\t\tprecio_unitario=decimal.Decimal(cc.id_producto.precio)/decimal.Decimal(1.16)#precio antes de iva\n\t\t\t\t\tdescuento=decimal.Decimal(precio_unitario)*(decimal.Decimal(cc.id_producto.descuento)/decimal.Decimal(100))\n\t\t\t\t\tiva=(decimal.Decimal(precio_unitario)-decimal.Decimal(descuento))*decimal.Decimal(0.16)\n\t\t\t\t\tprecio_total=(decimal.Decimal(precio_unitario)-decimal.Decimal(descuento)+decimal.Decimal(iva))*decimal.Decimal(cc.cantidad)\n\t\t\t\telse:\n\t\t\t\t\tprecio_unitario=decimal.Decimal(cc.id_producto.precio)/decimal.Decimal(1.16)\n\t\t\t\t\tiva=decimal.Decimal(precio_unitario)*decimal.Decimal(0.16)\n\t\t\t\t\tprecio_total=decimal.Decimal(cc.id_producto.precio)*decimal.Decimal(cc.cantidad)\n\t\t\t\t#guardamos el detalle de la venta\n\t\t\t\td=Detalle_Venta(id_venta=v,id_producto=cc.id_producto,cantidad=cc.cantidad,talla=cc.talla,precio_unitario=precio_unitario,descuento=descuento,iva=iva,precio_total=precio_total)\t\t\t\t\n\t\t\t\td.save()\n\t\t\t#agregamos la direccion de envio a la venta.\n\t\t\tdir_envio=Direccion_Envio_Venta(id_venta=v,nombre_recibe=d_e.nombre,apellido_p=d_e.apellido_p,apellido_m=d_e.apellido_m,calle=d_e.calle,numero_interior=d_e.numero_interior,numero_exterior=d_e.numero_exterior,cp=d_e.cp,municipio=d_e.municipio,estado=d_e.estado,pais=d_e.pais,telefono=d_e.telefono,correo_electronico=d_e.e_mail,referencia=d_e.referencia)\n\t\t\tdir_envio.save()\n\t\t\t#borramos la informacion de la session del cliente\n\t\t\tc_c.delete()\n\t\t\td_e.delete()\n\t\t\tfolio_venta.append({\"estatus\":1,\"msj\":\"El folio de su transaccion es: \"+str(v.id)})\t\t\t\t\t\t\t\n\t\telse:\n\t\t\tfolio_venta.append({\"estatus\":0,\"msj\":\"No tiene productos agregados al carrito de compras.\"})\n\t\treturn Response(folio_venta)\n\n\n#obtenemos la cantidad de productos que estan en carrito de compras.\n#parametros:\n#\tsession\n@api_view(['GET'])\ndef api_cont_productos_carrito(request):\t\t\n\tif request.method==\"GET\":\t\t\t\t\n\t\tcontador=[]\t\t\n\t\tsession=request.GET.get(\"session\")\t\t\t\t\n\t\t#obtenemos los productos que estan en el carrito de compras.\n\t\tc_c=Carrito_Compras.objects.filter(session=session).aggregate(Sum('cantidad'))\t\t\n\t\tcontador.append(c_c)\t\t\n\t\t\n\t\treturn Response(contador)\n\t\t\n\n#obtenemos el listado de ventas del cliente logueado.\n@api_view(['GET'])\ndef api_consulta_ventas(request):\n\trespuesta=[]\n\ttry:\n\t\tsession=request.GET.get(\"session\")\n\t\tc_l=Clientes_Logueados.objects.get(session=session)\n\t\tcliente=c_l.cliente\n\t\tventas=Venta.objects.filter(cliente=cliente).order_by('-fecha')\n\t\tfor v in ventas:\t\t\t\n\t\t\trespuesta.append({\"estatus\":\"1\",\"msj\":\"\",\"id_venta\":v.id,\"descuento\":v.descuento,\"fecha\":v.fecha,\"sub_total\":v.sub_total,\"iva\":v.iva,\"total\":v.total,\"link_seg\":v.link_seguimiento})\n\texcept Exception as e:\n\t\tprint(e)\n\t\trespuesta.append({\"estatus\":\"0\",\"msj\":\"Error al consultar las ventas, intente refrescar la pagina.\"})\n\treturn Response(respuesta)\n\n@api_view(['GET'])\t\ndef api_consulta_detalle_venta(request):\n\trespuesta=[]\n\ttry:\n\t\tid_venta=request.GET.get(\"id_venta\")\n\t\tventa=Venta.objects.get(id=id_venta)\n\t\tprint(venta)\n\t\td_v=Detalle_Venta.objects.filter(id_venta=venta)\n\t\t\n\t\tfor v in d_v:\n\t\t\ttry:\n\t\t\t\tip=Img_Producto.objects.get(id_producto=v.id_producto,orden=1)\n\t\t\t\tnom_img=ip.nom_img\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\t\t\t\tnom_img=\"\"\t\t\t\t\n\t\t\trespuesta.append({\"estatus\":\"1\",\"msj\":\"\",\"nom_img\":nom_img,'nombre':v.id_producto.nombre,\"cantidad\":v.cantidad,\"talla\":v.talla.talla,\"precio_unitario\":v.precio_unitario})\n\texcept Exception as e:\n\t\tprint(e)\n\t\trespuesta.append({\"estatus\":\"0\",\"msj\":\"Error al consultar el detalle de la venta.\"})\n\treturn Response(respuesta)","sub_path":"ventas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"181560923","text":"## Ch 4 Challenges\r\n## Counting program\r\n\r\nprint (\"Welcome to the counter program. You give me a number to start from,\\n\\\r\na number to end at, and an interval to count by. I will then, count for you.\")\r\n\r\nminimum = int(input(\"What is the starting number?:\\t\"))\r\nmaximum = int(input(\"What is your ending number?:\\t\"))\r\ninterval = int(input(\"How much you want to me to skip by?:\\t\"))\r\n\r\nfor i in range(minimum, (maximum + 1), interval):\r\n print (i)\r\n\r\nreply = input(\"Did I do as promised? Yes or no?:\\t\")\r\n\r\nif reply.lower() == \"no\":\r\n print(\"Please talk to A7mad Basha and ask him to fix this code\")\r\nelse:\r\n input(\"Press Enter to exit\")\r\n","sub_path":"Python 31 Programs/Ch 4 Challenges/Counting Program.py","file_name":"Counting Program.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"95304975","text":"import pandas as pd \nimport matplotlib.pyplot as plt \n\ndf= pd.DataFrame({\n 'name':['john', 'mary', 'peter','jeff', 'bill', 'lisa', 'jose'],\n 'age':[23, 78, 22, 19, 45, 33, 20],\n 'gender':['M', 'F', 'M', 'M', 'M', 'F', 'M'],\n 'state':['california', 'dc', 'california', 'dc', 'california', 'texas', 'texas'],\n 'num_children':[2,0,0,3,2,1,4],\n 'num_pets':[5,1,0,5,2,2,3]\n })\n\ndf.groupby(['gender', 'state'])['age'].size().unstack().plot(kind='bar', stacked= True)\nplt.show()\n\n# plt.savefig('output.png')\n","sub_path":"scripts/matplotlib/55groupby_unstack2.py","file_name":"55groupby_unstack2.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"586452707","text":"# This is the file you will need to edit in order to complete assignment 1\n# You may create additional functions, but all code must be contained within this file\n\n\n# Some starting imports are provided, these will be accessible by all functions.\n# You may need to import additional items\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport json\nfrom pathlib import Path\nimport re\nfrom numpy import arange\nimport seaborn as sns\nimport pathlib\nfrom nltk.stem.porter import *\nimport nltk\nfrom nltk.corpus import stopwords\nimport math\nfrom numpy import dot\nfrom numpy.linalg import norm \nfrom nltk.stem import WordNetLemmatizer \nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\n# You should use these two variable to refer the location of the JSON data file and the folder containing the news articles.\n# Under no circumstances should you hardcode a path to the folder on your computer (e.g. C:\\Chris\\Assignment\\data\\data.json) as this path will not exist on any machine but yours.\ndatafilepath = 'data/data.json'\narticlespath = 'data/football'\n\n# dependencies\nnltk.download('stopwords')\nnltk.download('punkt')\nnltk.download('stopwords')\n\ndef task1():\n fd = open(datafilepath,)\n dt = json.load(fd)\n dt_lst = dt['teams_codes']\n dt_sorted = sorted(dt_lst)\n fd.close()\n return dt_sorted\n \ndef task2():\n # read the data\n fd = open(datafilepath,)\n dt = json.load(fd)\n fd.close()\n dt_club = dt['clubs']\n #extract the data and store them in lists\n lst1 = []\n lst2 = []\n for club in dt_club:\n lst1.append(club['goals_scored'])\n lst2.append(club['goals_conceded'])\n # Organise the data into one dataframe\n df = pd.DataFrame({'goals_scored_by_team': lst1, 'goals_scored_against_team': lst2})\n df.index = dt['teams_codes']\n df.index.name = 'teams_codes'\n df_out = df.sort_index()\n return df_out.to_csv('task2.csv')\n \ndef task3():\n # decide data struction and initialize\n score_dic = {}\n pat_score = r' (\\d{1,2})-(\\d{1,2})'\n \n # iteratively read the files and extract the data using regular expression \n folder = Path(articlespath).rglob('*[0-9].txt')\n for article in folder:\n art = open(article,\"r\")\n content = art.read()\n art.close()\n match_scores = re.findall(pat_score, content)\n if match_scores != []:\n max_score = 0\n for score in match_scores:\n temp_score = int(score[0]) + int(score[1])\n max_score = max(temp_score, max_score)\n score_dic[article.name] = max_score \n else:\n score_dic[article.name] = 0 \n \n # sort and convert into panda series and \n ds_sorted = pd.Series(score_dic).sort_index() \n ds_sorted.name = 'total_goals' \n ds_sorted.index.name = 'filename'\n return ds_sorted.to_csv('task3.csv')\n\ndef task4():\n # read and sort the data\n df = pd.read_csv('task3.csv') \n df_sorted = df.sort_values(by = ['total_goals'])\n dt = df_sorted['total_goals']\n \n # plot\n boxplot_fig = plt.figure(num = 1, figsize = (10,6))\n \n plt.boxplot(df_sorted['total_goals'])\n plt.xlabel('articles')\n plt.ylabel('total goals')\n plt.title('maximum total goals in each article')\n \n # emphasize the outliers in the boxplot\n flier = dict(markerfacecolor='r', marker='o', markersize = 6)\n plt.boxplot(dt, flierprops = flier)\n \n return boxplot_fig.savefig('task4.png', bbox_inches='tight')\n \ndef task5():\n # extract the list of club names\n fd = open(datafilepath,)\n dt = json.load(fd)\n dt_lst = dt['participating_clubs']\n lst_sorted = sorted(dt_lst)\n fd.close()\n \n # initializion\n dic = dict.fromkeys(lst_sorted, 0)\n \n # iteratively read the files and extract the data using regular expression \n folder = Path(articlespath).rglob('*[0-9].txt')\n for article in folder:\n art = open(article,\"r\")\n content = art.read() \n for club_name in lst_sorted:\n if re.search(club_name, content):\n dic[club_name] += 1\n else:\n continue\n \n # plot the bar chart\n bar_fig = plt.figure(num =2, figsize = (10,8))\n clubs = list(dic.keys())\n times = list(dic.values())\n \n plt.bar(arange(len(times)), times)\n plt.xticks(arange(len(clubs)), clubs, rotation = 90,)\n \n plt.xlabel('club name') \n plt.ylabel('number of mentions') \n\n plt.title('Number of mentions for each club') \n \n ds = pd.Series(dic)\n ds.name = 'number_of_mentions' \n ds.index.name = 'club_name'\n return ds.to_csv('task5.csv'), bar_fig.savefig('task5.png', bbox_inches='tight') \n\n \ndef task6():\n path = pathlib.Path('task5.csv')\n if path.exists():\n df_tsk5 = pd.read_csv('task5.csv')\n else:\n task5()\n df_tsk5 = pd.read_csv('task5.csv')\n\n club_name = df_tsk5['club_name']\n df = pd.DataFrame(data = 0, columns = club_name, index = club_name)\n length = len(club_name)\n folder = Path(articlespath).rglob('*[0-9].txt')\n for article in folder:\n art = open(article,\"r\")\n content = art.read() \n art.close()\n for a in range(0, length):\n for b in range(a, length):\n if re.search(club_name[a], content) and re.search(club_name[b], content):\n df.iloc[b,a] += 1\n for a in range(0, length):\n for b in range(a, length): \n clb1_m = df_tsk5['number_of_mentions'][a]\n clb2_m = df_tsk5['number_of_mentions'][b]\n if clb1_m == 0 and clb2_m == 0:\n s = 1 \n else:\n s = df.iloc[b,a] * 2 / (clb1_m + clb2_m)\n df.iloc[b,a] = s \n\n mask = np.zeros_like(df, dtype = np.bool)\n mask[np.triu_indices_from(mask)] = True\n \n plt.figure(num =3, figsize = (8,6))\n sns.heatmap(df,cmap='Greens',mask=mask, xticklabels = True)\n plt.title('Heatmap of similarity between clubs') \n \n return plt.savefig('task6.png', bbox_inches='tight')\n\n\ndef task7():\n\n path2 = pathlib.Path('task2.csv')\n if path2.exists():\n df_tsk2 = pd.read_csv('task2.csv')\n else:\n task2()\n df_tsk2 = pd.read_csv('task2.csv')\n\n path1 = pathlib.Path('task5.csv')\n if path1.exists():\n df_tsk5 = pd.read_csv('task5.csv')\n else:\n task5()\n df_tsk5 = pd.read_csv('task5.csv')\n\n dic = {'goals_scored_by_team': df_tsk2.iloc[:,1], 'number_of_mentions': df_tsk5.iloc[:,1]}\n df = pd.DataFrame(dic)\n df.sort_values(by = 'goals_scored_by_team')\n\n plt.figure(num = 4, figsize = (8,6))\n plt.scatter(df.iloc[:,0], df.iloc[:,1], color='green')\n plt.ylabel(\"number_of_mentions\")\n plt.xlabel(\"goals_scored_by_team\")\n plt.grid(True)\n plt.title('scatterplot comparing the number of mentions and goals scored by each team') \n \n return plt.savefig('task7.png', bbox_inches='tight')\n \ndef task8(filepath):\n fl = open(filepath,)\n dt = fl.read()\n fl.close()\n\n pt1 = r'[^a-zA-Z\\n\\t\\s]'\n pt2 = r'\\n|\\t|\\s\\s+'\n\n dt = re.sub(pt1, ' ', dt)\n dt = re.sub(pt2, ' ', dt)\n dt = dt.lower()\n dt = nltk.word_tokenize(dt)\n\n stopWords = set(stopwords.words('english'))\n dt = [wd for wd in dt if wd not in stopWords and len(wd) > 1 ]\n return dt\n \ndef task9():\n dic = {}\n nm = []\n wd = []\n out = []\n folder = Path(articlespath).rglob('*[0-9].txt')\n for article in folder:\n nm.append(article.name)\n lst = task8(article)\n st = set(lst)\n dic[article.name] = {}\n for w in st:\n fq = lst.count(w)\n dic[article.name][w] = fq\n wd = wd + lst\n wd_set = set(wd)\n x = len(nm)\n y = len(wd_set)\n array = np.zeros(shape = (x, y), dtype = int)\n wd_lst = list(wd_set)\n for a in range(0, x):\n nm_ar = nm[a]\n for b in range(0, y):\n wd_ar = wd_lst[b]\n try:\n array[a][b] = dic[nm_ar][wd_ar]\n except:\n continue\n transformer = TfidfTransformer()\n tfidf = transformer.fit_transform(array)\n arry_tfidf = tfidf.toarray()\n for a in range(0, x):\n nm_a = nm[a]\n for b in range(a+1, x):\n nm_b = nm[b]\n cosim = dot(arry_tfidf[a], arry_tfidf[b]) / (norm(arry_tfidf[a]) * norm(arry_tfidf[b]))\n out.append([nm_a, nm_b, cosim])\n df = pd.DataFrame(data = out, columns=('article1', 'article2', 'similarity'))\n df_sorted = df.sort_values(by = 'similarity', ascending=False)\n df_top10 = df_sorted.head(10)\n return df_top10.to_csv('task9.csv', index = None)\n","sub_path":"assignment1.py","file_name":"assignment1.py","file_ext":"py","file_size_in_byte":8789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"64636837","text":"#! /usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n# Copyright(C), 2019, Zhang Chengwei.\n#\n# Author: zhang\n# DATE: 2019/5/26 16:28\n# IDE_NAME: PyCharm\n# Filename: urls.py\n# Version: 1.0.0\n# Description: \n# History: \n\nfrom django.urls import path\nfrom courses.views import CourseView, CourseDetailView, CourseVideoView, CourseCommentView, AddCommentView\n\nurlpatterns = [\n path('list/', CourseView.as_view(), name='course_list'),\n path('/detail/', CourseDetailView.as_view(), name='course_detail'),\n path('/video/', CourseVideoView.as_view(), name='course_video'),\n path('/comment/', CourseCommentView.as_view(), name='course_comment'),\n path('add_comment/', AddCommentView.as_view(), name='add_comment'),\n]","sub_path":"apps/courses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"437851159","text":"# main app\n\n# standard\nimport base64\nimport http\nimport io\nimport json\nimport logging\nimport os\nimport re\nimport sys\nimport threading\nimport time\n\n# open community\nimport boto3 # pip install boto3\nimport cv2 # pip install opencv-python\n#import dronekit # pip install dronekit\nimport dronekit_sitl # pip install dronekit_sitl\nfrom flask import Flask # \nimport numpy # \nfrom PIL import Image # pip install Pillow\nimport pymavlink # pip install pymavlink\n#import pyplot \n\n# unknown\nimport imutils # pip install imutils\n\n\ndef load_configuration():\n\tf = open(\"config.json\", \"r\")\n\t#config = f.read()\n\tconfig = json.load(f)\n\tf.close()\n\treturn config\n\ndef loadRekognition(config):\n\tclient = boto3.client(\n\t\t'rekognition',\n\t\tregion_name = config['region'],\n\t\taws_access_key_id = config['ACCESS_KEY'],\n\t\taws_secret_access_key = config['SECRET_KEY'],\n\t\t#aws_session_token=SESSION_TOKEN,\n\t\t)\n\treturn client\n\ndef prepImage(frame):\n\tmyimgTuple = cv2.imencode('.jpg', frame)\n\tprint(type(myimgTuple))\n\tmyimg = myimgTuple[1].tostring()\n\treturn myimg\n\ndef find_marker(image):\n\t# convert the image to grayscale, blur it, and detect edges\n\tgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\tgraygb = cv2.GaussianBlur(gray, (5, 5), 0)\n\tedged = cv2.Canny(graygb, 35, 125)\n \n\t# find the contours in the edged image and keep the largest one;\n\t# we'll assume that this is our piece of paper in the image\n\tcnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\tcnts = cnts[0] if imutils.is_cv2() else cnts[1]\n\tc = max(cnts, key = cv2.contourArea)\n \n\t# compute the bounding box of the of the paper region and return it\n\treturn cv2.minAreaRect(c)\n\ndef distance_to_camera(knownWidth, focalLength, perWidth):\n\tmathz = (knownWidth * focalLength) / perWidth\n\treturn mathz\n\nKNOWN_DISTANCE = 35.0\nKNOWN_WIDTH = 11.0\n\ndef obtainDistance(frame):\n\tmarker = find_marker(frame)\n\tfocalLength = (marker[1][0] * KNOWN_DISTANCE) / KNOWN_WIDTH\n\ndef processResponse(response, frame):\n\tfor x in response['Labels']:\n\t\txname = x['Name']\n\t\tlogging.debug(x)#print(x)\n\t\tif re.search('balloon', xname, re.IGNORECASE):\n\t\t\tobtainDistance(frame)\n\ndef main():\n\tprint('running Fulton app...')\n\tlogging.basicConfig(filename='debug.log',level=logging.DEBUG)\n\tconfig = load_configuration()\n\tcap = cv2.VideoCapture(config['camera_number'])\n\tframe_width = cap.get(3)\n\tframe_height = cap.get(4)\n\tapp_on = True\n\tpreviewTitle = 'preview'\n\n\tclient = loadRekognition(config)\n\n\tprint('w: ' + str(frame_width) + '\\nh: ' + str(frame_height))\n\n\tsitl = dronekit_sitl.start_default()\n\tconnection_string = sitl.connection_string()\n\n\t#vehicle = connect(connection_string, wait_key=True)\n\n\t# Get some vehicle attributes (state)\n\t#print(\"Get some vehicle attribute values:\")\n\t#print(\" GPS: %s\" % vehicle.gps_0)\n\t#print(\" Battery: %s\" % vehicle.battery)\n\t#print(\" Last Heartbeat: %s\" % vehicle.last_heartbeat)\n\t#print(\" Is Armable?: %s\" % vehicle.is_armable)\n\t#print(\" System status: %s\" % vehicle.system_status.state)\n\t#print(\" Mode: %s\" % vehicle.mode.name) # settable\n\n\twhile(app_on):\n\t\tret, frame = cap.read()\n\n\t\tif config['type'] == '3d':\n\t\t\tpass\n\t\telse:\n\t\t\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\t\tcv2.imshow(previewTitle, gray)\n\t\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\t\tlogging.debug(type(frame))\n\t\t\t\tmyimg = prepImage(frame)\n\t\t\t\tlogging.debug(type(myimg))\n\n\t\t\t\tresponse = client.detect_labels(Image={'Bytes': myimg})\n\t\t\t\tprocessResponse(response, frame)\n\t\t\t\tcv2.imwrite(\"images/frame.jpg\", frame)\n\t\t\t\tbreak\n\n\tcap.release()\n\tcv2.destroyAllWindows()\n\t#vehicle.close()\n\tsitl.stop()\n\n\tprint('')\n\tprint('closing Fulton app...')\n\n\nmain()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"66910025","text":"from reportlab.lib import colors\nfrom reportlab.lib.colors import black, white\nfrom reportlab.graphics.shapes import Polygon, String, Drawing, Group, Rect\nfrom reportlab.graphics.widgetbase import Widget\nfrom reportlab.lib.attrmap import *\nfrom reportlab.lib.validators import *\nfrom reportlab.lib.units import cm\nfrom reportlab.pdfbase.pdfmetrics import getFont\nfrom reportlab.graphics.widgets.grids import ShadedRect\n\nclass SlideBox(Widget):\n \"\"\"Returns a slidebox widget\"\"\"\n _attrMap = AttrMap(\n labelFontName = AttrMapValue(isString, desc=\"Name of font used for the labels\"),\n labelFontSize = AttrMapValue(isNumber, desc=\"Size of font used for the labels\"),\n labelStrokeColor = AttrMapValue(isColorOrNone, desc=\"Colour for for number outlines\"),\n labelFillColor = AttrMapValue(isColorOrNone, desc=\"Colour for number insides\"),\n startColor = AttrMapValue(isColor, desc='Color of first box'),\n endColor = AttrMapValue(isColor, desc='Color of last box'),\n numberOfBoxes = AttrMapValue(isInt, desc='How many boxes there are'),\n trianglePosition = AttrMapValue(isInt, desc='Which box is highlighted by the triangles'),\n triangleHeight = AttrMapValue(isNumber, desc=\"Height of indicator triangles\"),\n triangleWidth = AttrMapValue(isNumber, desc=\"Width of indicator triangles\"),\n triangleFillColor = AttrMapValue(isColor, desc=\"Colour of indicator triangles\"),\n triangleStrokeColor = AttrMapValue(isColorOrNone, desc=\"Colour of indicator triangle outline\"),\n triangleStrokeWidth = AttrMapValue(isNumber, desc=\"Colour of indicator triangle outline\"),\n boxHeight = AttrMapValue(isNumber, desc=\"Height of the boxes\"),\n boxWidth = AttrMapValue(isNumber, desc=\"Width of the boxes\"),\n boxSpacing = AttrMapValue(isNumber, desc=\"Space between the boxes\"),\n boxOutlineColor = AttrMapValue(isColorOrNone, desc=\"Colour used to outline the boxes (if any)\"),\n boxOutlineWidth = AttrMapValue(isNumberOrNone, desc=\"Width of the box outline (if any)\"),\n leftPadding = AttrMapValue(isNumber, desc='Padding on left of drawing'),\n rightPadding = AttrMapValue(isNumber, desc='Padding on right of drawing'),\n topPadding = AttrMapValue(isNumber, desc='Padding at top of drawing'),\n bottomPadding = AttrMapValue(isNumber, desc='Padding at bottom of drawing'),\n background = AttrMapValue(isColorOrNone, desc='Colour of the background to the drawing (if any)'),\n sourceLabelText = AttrMapValue(isNoneOrString, desc=\"Text used for the 'source' label (can be empty)\"),\n sourceLabelOffset = AttrMapValue(isNumber, desc='Padding at bottom of drawing'),\n sourceLabelFontName = AttrMapValue(isString, desc=\"Name of font used for the 'source' label\"),\n sourceLabelFontSize = AttrMapValue(isNumber, desc=\"Font size for the 'source' label\"),\n sourceLabelFillColor = AttrMapValue(isColorOrNone, desc=\"Colour ink for the 'source' label (bottom right)\"),\n )\n\n def __init__(self):\n self.labelFontName = \"Helvetica-Bold\"\n self.labelFontSize = 10\n self.labelStrokeColor = black\n self.labelFillColor = white\n self.startColor = colors.Color(232/255.0,224/255.0,119/255.0)\n self.endColor = colors.Color(25/255.0,77/255.0,135/255.0)\n self.numberOfBoxes = 7\n self.trianglePosition = 7\n self.triangleHeight = 0.12*cm\n self.triangleWidth = 0.38*cm\n self.triangleFillColor = white\n self.triangleStrokeColor = black\n self.triangleStrokeWidth = 0.58\n self.boxHeight = 0.55*cm\n self.boxWidth = 0.73*cm\n self.boxSpacing = 0.075*cm\n self.boxOutlineColor = black\n self.boxOutlineWidth = 0.58\n self.leftPadding=5\n self.rightPadding=5\n self.topPadding=5\n self.bottomPadding=5\n self.background=None\n self.sourceLabelText = \"Source: ReportLab\"\n self.sourceLabelOffset = 0.2*cm\n self.sourceLabelFontName = \"Helvetica-Oblique\"\n self.sourceLabelFontSize = 6\n self.sourceLabelFillColor = black\n\n def _getDrawingDimensions(self):\n tx=(self.numberOfBoxes*self.boxWidth)\n if self.numberOfBoxes>1: tx=tx+((self.numberOfBoxes-1)*self.boxSpacing)\n tx=tx+self.leftPadding+self.rightPadding\n ty=self.boxHeight+self.triangleHeight\n ty=ty+self.topPadding+self.bottomPadding+self.sourceLabelOffset+self.sourceLabelFontSize\n return (tx,ty)\n\n def _getColors(self):\n # for calculating intermediate colors...\n numShades = self.numberOfBoxes+1\n fillColorStart = self.startColor\n fillColorEnd = self.endColor\n colorsList =[]\n\n for i in range(0,numShades):\n colorsList.append(colors.linearlyInterpolatedColor(fillColorStart, fillColorEnd, 0, numShades-1, i))\n return colorsList\n\n def demo(self,drawing=None):\n if not drawing:\n tx,ty=self._getDrawingDimensions()\n drawing = Drawing(tx,ty)\n drawing.add(self.draw())\n return drawing\n\n def draw(self):\n g = Group()\n ys = self.bottomPadding+(self.triangleHeight/2)+self.sourceLabelOffset+self.sourceLabelFontSize\n if self.background:\n x,y = self._getDrawingDimensions()\n g.add(Rect(-self.leftPadding,-ys,x,y,\n strokeColor=None,\n strokeWidth=0,\n fillColor=self.background))\n\n ascent=getFont(self.labelFontName).face.ascent/1000.\n if ascent==0: ascent=0.718 # default (from helvetica)\n ascent=ascent*self.labelFontSize # normalize\n\n colorsList = self._getColors()\n\n # Draw the boxes - now uses ShadedRect from grids\n x=0\n for f in range (0,self.numberOfBoxes):\n sr=ShadedRect()\n sr.x=x\n sr.y=0\n sr.width=self.boxWidth\n sr.height=self.boxHeight\n sr.orientation = 'vertical'\n sr.numShades = 30\n sr.fillColorStart = colorsList[f]\n sr.fillColorEnd = colorsList[f+1]\n sr.strokeColor = None\n sr.strokeWidth = 0\n\n g.add(sr)\n\n g.add(Rect(x,0,self.boxWidth,self.boxHeight,\n strokeColor=self.boxOutlineColor,\n strokeWidth=self.boxOutlineWidth,\n fillColor=None))\n\n g.add(String(x+self.boxWidth/2.,(self.boxHeight-ascent)/2.,\n text = str(f+1),\n fillColor = self.labelFillColor,\n strokeColor=self.labelStrokeColor,\n textAnchor = 'middle',\n fontName = self.labelFontName,\n fontSize = self.labelFontSize))\n x=x+self.boxWidth+self.boxSpacing\n\n #do triangles\n xt = (self.trianglePosition*self.boxWidth)\n if self.trianglePosition>1:\n xt = xt+(self.trianglePosition-1)*self.boxSpacing\n xt = xt-(self.boxWidth/2)\n g.add(Polygon(\n strokeColor = self.triangleStrokeColor,\n strokeWidth = self.triangleStrokeWidth,\n fillColor = self.triangleFillColor,\n points=[xt,self.boxHeight-(self.triangleHeight/2),\n xt-(self.triangleWidth/2),self.boxHeight+(self.triangleHeight/2),\n xt+(self.triangleWidth/2),self.boxHeight+(self.triangleHeight/2),\n xt,self.boxHeight-(self.triangleHeight/2)]))\n g.add(Polygon(\n strokeColor = self.triangleStrokeColor,\n strokeWidth = self.triangleStrokeWidth,\n fillColor = self.triangleFillColor,\n points=[xt,0+(self.triangleHeight/2),\n xt-(self.triangleWidth/2),0-(self.triangleHeight/2),\n xt+(self.triangleWidth/2),0-(self.triangleHeight/2),\n xt,0+(self.triangleHeight/2)]))\n\n #source label\n if self.sourceLabelText != None:\n g.add(String(x-self.boxSpacing,0-(self.triangleHeight/2)-self.sourceLabelOffset-(self.sourceLabelFontSize),\n text = self.sourceLabelText,\n fillColor = self.sourceLabelFillColor,\n textAnchor = 'end',\n fontName = self.sourceLabelFontName,\n fontSize = self.sourceLabelFontSize))\n\n g.shift(self.leftPadding, ys)\n\n return g\n\n\nif __name__ == \"__main__\":\n d = SlideBox()\n d.demo().save(fnRoot=\"slidebox\")\n","sub_path":"src/reportlab/graphics/charts/slidebox.py","file_name":"slidebox.py","file_ext":"py","file_size_in_byte":8548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"418195515","text":"import discord\r\nfrom discord.ext import commands\r\nimport asyncio\r\nimport datetime\r\n\r\nclass HelpCog(commands.Cog, name='Help'):\r\n '''Help formatter'''\r\n \r\n def __init__(self,bot):\r\n self.bot = bot\r\n\r\n @commands.command(hidden=True)\r\n async def help(self, ctx):\r\n embed = discord.Embed(title='**Commands**')\r\n cog_info = ''\r\n for command in sorted(self.bot.commands, key=lambda x: x.name):\r\n if not command.hidden:\r\n cog_info += f'**{command.name}** - {command.help}\\n\\n'\r\n embed.description = cog_info\r\n\r\n await ctx.send(embed=embed)\r\n \r\ndef setup(bot):\r\n bot.add_cog(HelpCog(bot))\r\n","sub_path":"src/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"57908999","text":"#! /user/bing/env python\r\nimport simplejson\r\nimport app\r\nimport db\r\nimport recipes\r\nimport cStringIO\r\nimport sys\r\nimport copy\r\n\r\ndef call_remote(method, params, id):\r\n\r\n\td = dict(method=method, params=params, id=id)\r\n\tencoded = simplejson.dumps(d)\r\n\r\n\tlines = []\r\n\r\n\tfor line in encoded:\r\n\t\tlines.append(line)\r\n\r\n\tnewlines = copy.copy(lines)\r\n\r\n\r\n\tenviron = {}\r\n\tenviron['PATH_INFO'] = '/rpc'\r\n\tenviron['REQUEST_METHOD'] = 'POST'\r\n\tenviron['CONTENT_LENGTH'] = len(encoded)\r\n\tenviron['wsgi.input'] = cStringIO.StringIO(''.join(newlines))\r\n\r\n\td = {}\r\n\tdef my_start_response(s, h, return_in=d):\r\n\t\td['status'] = s\r\n\t\td['headers'] = h\r\n\r\n\tsimpleApp = app.SimpleApp()\r\n\tresponse = simpleApp(environ, my_start_response)\r\n\r\n\tresults = simplejson.loads(response[0])\r\n\r\n\treturn results\r\n\r\ndef test_rpc_ConvertToMilliters():\r\n\r\n\tresults = call_remote(method='ConvertToMilliters', params=['1000 oz'], id=1)\r\n\tassert results['result'] == 29573.5, results['result']\r\n\r\ndef test_rpc_AddLiquorType():\r\n\r\n\tdb._reset_db()\r\n\tcall_remote(method='AddLiquorType', params=['Jack Daniels', 'Old No. 7', 'whiskey'], id=1)\r\n\tassert db._check_bottle_type_exists('Jack Daniels', 'Old No. 7')\r\n\r\ndef test_rpc_AddToInventory():\r\n\r\n\tdb._reset_db()\r\n\tcall_remote(method='AddLiquorType', params=['Jack Daniels', 'Old No. 7', 'whiskey'], id=1)\r\n\tcall_remote(method='AddToInventory', params=['Jack Daniels', 'Old No. 7', '1000 ml'], id=1)\r\n\r\n\tassert db.check_inventory('Jack Daniels', 'Old No. 7')\r\n\r\ndef test_rpc_AddRecipe():\r\n\t\r\n\tdb._reset_db()\r\n\tcall_remote(method='AddRecipe', params=['rum and coke', 'rum,2 oz,coke,4 oz'], id=1)\r\n\tassert db.get_recipe('rum and coke')\r\n\r\ndef test_rpc_GetLiquorTypes():\r\n\tdb._reset_db()\r\n\tcall_remote(method='AddLiquorType', params=['Jack Daniels', 'Old No. 7', 'whiskey'], id=1)\r\n\r\n\tresults = call_remote(method='GetLiquorTypes', params=[], id=1)\r\n\tassert results['result'] == [['Jack Daniels', 'Old No. 7', 'whiskey']]\r\n\r\ndef test_rpc_GetLiquorInventory():\r\n\tdb._reset_db()\r\n\tcall_remote(method='AddLiquorType', params=['Jack Daniels', 'Old No. 7', 'whiskey'], id=1)\r\n\tcall_remote(method='AddToInventory', params=['Jack Daniels', 'Old No. 7', '1000 ml'], id=1)\r\n\tresults = call_remote(method='GetLiquorInventory', params=[], id=1)\r\n\r\n\tassert results['result'] == [['Jack Daniels', 'Old No. 7']]\r\n\r\ndef test_rpc_GetRecipes():\r\n\tdb._reset_db()\r\n\tcall_remote(method='AddRecipe', params=['rum and coke', 'rum,2 oz,coke,4 oz'], id=1)\r\n\tresults = call_remote(method='GetRecipes', params=[], id=1)\r\n\r\n\tassert results['result'] == ['rum and coke']","sub_path":"drinkz/test_rpc.py","file_name":"test_rpc.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"432743028","text":"import re\n\nfrom django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\n\ndef DV_maker(v):\n if v >= 2:\n return 11 - v\n return 0\n\n\nclass SignUpForm(UserCreationForm):\n\n def __init__(self, *args, **kwargs):\n super(SignUpForm, self).__init__(*args, **kwargs)\n self.fields['username'].label = 'CPF'\n self.fields['username'].help_text = None\n\n class Meta:\n model = User\n fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2', )\n\n def clean_username(self):\n cpf = self.cleaned_data['username']\n value = cpf\n if not value.isdigit():\n value = re.sub(\"[-\\.]\", \"\", value)\n orig_value = value[:]\n try:\n int(value)\n except ValueError:\n raise forms.ValidationError(\"CPF Inválido!\")\n if len(value) != 11:\n raise forms.ValidationError(\"CPF Inválido!\")\n orig_dv = value[-2:]\n\n new_1dv = sum([i * int(value[idx]) for idx, i in enumerate(range(10, 1, -1))])\n new_1dv = DV_maker(new_1dv % 11)\n value = value[:-2] + str(new_1dv) + value[-1]\n new_2dv = sum([i * int(value[idx]) for idx, i in enumerate(range(11, 1, -1))])\n new_2dv = DV_maker(new_2dv % 11)\n value = value[:-1] + str(new_2dv)\n if value[-2:] != orig_dv:\n raise forms.ValidationError(\"CPF Inválido!\")\n\n return cpf\n","sub_path":"event_site/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"391739394","text":"#!/usr/bin/env python3\n\nimport argparse\nimport subprocess\nimport tempfile\nimport logging\nimport json\nimport os\nimport gzip\n\n## Variables\nMITO = 'ftp://ftp.ncbi.nlm.nih.gov/refseq/release/mitochondrion/*.1.genomic.fna.gz'\nPLASTID = 'ftp://ftp.ncbi.nlm.nih.gov/refseq/release/plastid/*.1.genomic.fna.gz'\n\n\ndef parser():\n parser = argparse.ArgumentParser(description='Create organelle file')\n parser.add_argument('--out', help=\"location to write file containing organelles\")\n args = parser.parse_args()\n return args\n\ndef find_organelles():\n \"\"\" There are approximately twice as many organelle only genomes as complete\n eukaryotic genomes in refseq. we need to exclude these when building\n the training set so we do not oversample them.\n \"\"\"\n logging.info(\"Downloading organelle sequences from Refseq\")\n seqidlist = []\n dtemp = tempfile.mkdtemp()\n for item in [MITO, PLASTID]:\n options = [\"wget\", item, \"-P\", dtemp,]\n wgetout = subprocess.run(options, stderr=subprocess.PIPE)\n for seqfile in os.listdir(dtemp):\n with gzip.open(os.path.join(dtemp, seqfile), 'rt') as f:\n for line in f:\n if line.startswith(\">\"):\n ll = line.split(\" \")[0]\n seqid = ll[1:]\n seqidlist.append(seqid)\n logging.info(\"{} organelle sequence accessions saved\".format(len(seqidlist)))\n print(\"{} organelle sequence accessions saved\".format(len(seqidlist)))\n #shutil.rmtree(dtemp)\n return seqidlist\n\ndef main():\n args = parser()\n seqidlist = find_organelles()\n with open(args.out, 'w') as f:\n json.dump(seqidlist, f, ensure_ascii=False)\n\nif __name__ == '__main__':\n main()\n","sub_path":"pipelines/create_organelles_file.py","file_name":"create_organelles_file.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"328965639","text":"# Exercise 4\n# This program opens the file romeo.txt and reads it line by line\n\n# List of unique words created\nuniqueWords = []\n\n# Open the file rome.txt and read line by line\ntry:\n readfile = open('romeo.txt')\n for line in readfile:\n\n # Split line into a list of words\n words = line.split()\n for word in words:\n\n # Check if word is already in the list of unique words\n if word in uniqueWords:\n continue\n uniqueWords.append(word)\n\n # Prints the sorted list of unique words in alphabetical order\n print(sorted(uniqueWords))\nexcept:\n print(\"Unable to open file romeo.txt.\")\n print(\"Please check and ensure the file is in the same directory\")\n","sub_path":"src/chapter8/exercise4.py","file_name":"exercise4.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"68996049","text":"import boto\nfrom mypylibs.sqs_que import sqs_que\nfrom mypylibs.ecs import ecs_client\n\njob_list = [\n\t\t\t{\n\t\t\t\t\"que_name\":\"process_incoming\",\n\t\t\t\t\"que_region\":\"eu-west-1\",\n\t\t\t\t\"task_family\":\"sabstack\",\n\t\t\t\t\"task_name\":\"sabstack:1\",\n\t\t\t\t\"run_length\":600,\n\t\t\t\t\"max_concurrent\":2\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"que_name\":\"post_processing\",\n\t\t\t\t\"que_region\":\"eu-west-1\",\n\t\t\t\t\"task_family\":\"handstack\",\n\t\t\t\t\"task_name\":\"handstack:1\",\n\t\t\t\t\"run_length\":900,\n\t\t\t\t\"max_concurrent\":0\n\t\t\t}\n\t\t\t]\n\necs = ecs_client()\n\nwhile 1:\n\tfor job in job_list:\n\t\tque = sqs_que(job['que_name'], job['que_region'])\n\t\tque_length = que.getnumberofmessages()\n\n\t\tif ecs.number_of_running_tasks() != 0:\n\n\t\t\tresponse = ecs.list_tasks()\n\t\t\tfor task in response['taskArns']:\n\t\t\t\tcontinue\n\t\t\n\n\tbreak\n\n","sub_path":"basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"211121407","text":"import syntax\r\nimport utils\r\nfrom logic import Trace\r\nfrom syntax import Expr\r\nfrom utils import Set\r\n\r\nimport itertools\r\nimport networkx # type: ignore\r\nfrom typing import List, Callable, Union, Dict, TypeVar, Tuple, Optional, cast\r\n\r\nT = TypeVar('T')\r\n\r\nclass RelationFact(object):\r\n def __init__(self, rel: syntax.RelationDecl, els: List[str], polarity: bool):\r\n self._rel = rel\r\n self._els = els\r\n self._polarity = polarity\r\n\r\n def as_expr(self, els_trans: Callable[[str],str]) -> Expr:\r\n fact_free_vars = syntax.Apply(self._rel.name, [syntax.Id(None, els_trans(e)) for e in self._els])\r\n if not self._is_positive():\r\n fact_free_vars = syntax.Not(fact_free_vars)\r\n return fact_free_vars\r\n\r\n def involved_elms(self) -> List[str]:\r\n return self._els\r\n\r\n def _is_positive(self) -> bool:\r\n return self._polarity\r\n\r\n def __repr__(self) -> str:\r\n return \"RelationFact(rel=%s, els=%s, polarity=%s)\" % (self._rel, self._els, self._polarity)\r\n\r\n def __str__(self) -> str:\r\n return \"%s(%s) = %s\" % (self._rel.name, self._els, str(self._polarity))\r\n\r\nclass FunctionFact(object):\r\n def __init__(self, func: syntax.FunctionDecl, param_els: List[str], res_elm: str):\r\n self._func = func\r\n self._params_els = param_els\r\n self._res_elm = res_elm\r\n\r\n def as_expr(self, els_trans: Callable[[str],str]) -> Expr:\r\n e = syntax.AppExpr(None, self._func.name, [syntax.Id(None, els_trans(e)) for e in self._params_els])\r\n return syntax.Eq(e, syntax.Id(None, els_trans(self._res_elm)))\r\n\r\n def involved_elms(self) -> List[str]:\r\n return self._params_els + [self._res_elm]\r\n\r\n def __repr__(self) -> str:\r\n return \"FunctionFact(func=%s, param_els=%s, res_elm=%s)\" % (self._func, self._params_els, self._res_elm)\r\n\r\n def __str__(self) -> str:\r\n return \"%s(%s) = %s\" % (self._func.name, self._params_els, self._res_elm)\r\n\r\nclass InequalityFact(object):\r\n def __init__(self, lhs: str, rhs: str):\r\n self._lhs = lhs\r\n self._rhs = rhs\r\n\r\n def as_expr(self, els_trans: Callable[[str],str]) -> Expr:\r\n return syntax.Neq(syntax.Id(None, els_trans(self._lhs)),\r\n syntax.Id(None, els_trans(self._rhs)))\r\n\r\n def involved_elms(self) -> List[str]:\r\n return [self._lhs, self._rhs]\r\n\r\n def __repr__(self) -> str:\r\n return \"InequalityFact(lhs=%s, rhs=%s)\" % (self._lhs, self._rhs)\r\n\r\n def __str__(self) -> str:\r\n return \"%s ! %s\" % (self._lhs, self._rhs)\r\n\r\ndef dict_val_from_rel_name(name: str, m: Dict[syntax.RelationDecl,T]) -> T:\r\n for r,v in m.items():\r\n if r.name != name:\r\n continue\r\n return v\r\n raise KeyError\r\n\r\ndef first_relax_step_idx(trns: Trace) -> int:\r\n first_relax_idx = trns.transitions.index('decrease_domain')\r\n assert first_relax_idx != -1, trns.transitions\r\n assert first_relax_idx + 1 < len(trns.keys)\r\n return first_relax_idx\r\n\r\ndef active_rel(sort: syntax.SortDecl) -> syntax.RelationDecl:\r\n res = syntax.the_program.scope.get_relation('active_%s' % sort.name)\r\n assert res is not None\r\n return res\r\n\r\ndef active_rel_by_sort(prog: syntax.Program) -> Dict[syntax.SortDecl, syntax.RelationDecl]:\r\n return dict((sort, active_rel(sort)) for sort in prog.sorts())\r\n\r\ndef active_var(name: str, sort_name: str) -> syntax.Expr:\r\n return syntax.Apply('active_%s' % sort_name, [syntax.Id(None, name)])\r\n\r\ndef closing_qa_cycle(prog: syntax.Program, free_vars_sorts: List[syntax.SortDecl],\r\n existentially_quantified_sorts: List[syntax.SortDecl]) -> bool:\r\n qa_graph = prog.decls_quantifier_alternation_graph([])\r\n assert networkx.is_directed_acyclic_graph(qa_graph)\r\n\r\n for asort in free_vars_sorts:\r\n for esort in existentially_quantified_sorts:\r\n qa_graph.add_edge(asort.name, esort.name)\r\n\r\n return not networkx.is_directed_acyclic_graph(qa_graph)\r\n\r\ndef is_rel_blocking_relax(trns: Trace, idx: int,\r\n derived_rel: Tuple[List[Tuple[syntax.SortedVar, str]], Expr]) -> bool:\r\n # TODO: probably can obtain the sort from the sortedvar when not using scapy\r\n free_vars, derived_relation_formula = derived_rel\r\n free_vars_active_clause = syntax.And(*(active_var(v.name, sort_name) for (v, sort_name) in free_vars))\r\n\r\n diffing_formula = syntax.Exists([v for (v, _) in free_vars],\r\n syntax.And(syntax.Old(syntax.And(free_vars_active_clause,\r\n derived_relation_formula)),\r\n syntax.And(free_vars_active_clause,\r\n syntax.Not(derived_relation_formula))))\r\n\r\n with syntax.the_program.scope.two_state(twostate=True): # TODO: what is this doing? probably misusing\r\n diffing_formula.resolve(syntax.the_program.scope, syntax.BoolSort)\r\n\r\n res = trns.eval_double_vocab(diffing_formula, idx)\r\n assert isinstance(res, bool)\r\n return cast(bool, res)\r\n\r\ndef derived_rels_candidates_from_trace(trns: Trace, more_traces: List[Trace],\r\n max_conj_size: int, max_free_vars: int) -> List[Tuple[List[syntax.SortedVar],Expr]]:\r\n first_relax_idx = first_relax_step_idx(trns)\r\n pre_relax_state = trns.as_state(first_relax_idx)\r\n post_relax_state = trns.as_state(first_relax_idx + 1)\r\n assert pre_relax_state.univs == post_relax_state.univs\r\n\r\n\r\n # relaxed elements\r\n relaxed_elements = []\r\n for sort, univ in pre_relax_state.univs.items():\r\n active_rel_name = 'active_' + sort.name # TODO: de-duplicate\r\n pre_active_interp = dict_val_from_rel_name(active_rel_name, pre_relax_state.rel_interp)\r\n post_active_interp = dict_val_from_rel_name(active_rel_name, post_relax_state.rel_interp)\r\n pre_active_elements = [tup[0] for (tup, b) in pre_active_interp if b]\r\n post_active_elements = [tup[0] for (tup, b) in post_active_interp if b]\r\n assert set(post_active_elements).issubset(set(pre_active_elements))\r\n\r\n for relaxed_elem in utils.OrderedSet(pre_active_elements) - set(post_active_elements):\r\n relaxed_elements.append((sort, relaxed_elem))\r\n\r\n # pre-relaxation step facts concerning at least one relaxed element (other to be found by UPDR)\r\n relevant_facts: List[Union[RelationFact,FunctionFact,InequalityFact]] = []\r\n\r\n for rel, rintp in pre_relax_state.rel_interp.items():\r\n for rfact in rintp:\r\n (elms, polarity) = rfact\r\n relation_fact = RelationFact(rel, elms, polarity)\r\n if set(relation_fact.involved_elms()) & set(ename for (_, ename) in relaxed_elements):\r\n relevant_facts.append(relation_fact)\r\n\r\n for func, fintp in pre_relax_state.func_interp.items():\r\n for ffact in fintp:\r\n (els_params, els_res) = ffact\r\n function_fact = FunctionFact(func, els_params, els_res)\r\n if set(function_fact.involved_elms()) & set(ename for (_, ename) in relaxed_elements):\r\n relevant_facts.append(function_fact)\r\n\r\n for sort, elm in relaxed_elements: # other inequalities presumably handled by UPDR\r\n for other_elm in pre_relax_state.univs[sort]:\r\n if other_elm == elm:\r\n continue\r\n relevant_facts.append(InequalityFact(elm, other_elm))\r\n\r\n # facts blocking this specific relaxation step\r\n diff_conjunctions = []\r\n candidates_cache: Set[str] = set()\r\n for fact_lst in itertools.combinations(relevant_facts, max_conj_size):\r\n elements = utils.OrderedSet(itertools.chain.from_iterable(fact.involved_elms() for fact in fact_lst))\r\n relaxed_elements_relevant = [elm for (_, elm) in relaxed_elements if elm in elements]\r\n vars_from_elm = dict((elm, syntax.SortedVar(None, syntax.the_program.scope.fresh(\"v%d\" % i), None))\r\n for (i, elm) in enumerate(elements))\r\n parameter_elements = elements - set(relaxed_elements_relevant)\r\n if len(parameter_elements) > max_free_vars:\r\n continue\r\n\r\n conjuncts = [fact.as_expr(lambda elm: vars_from_elm[elm].name) for fact in fact_lst]\r\n\r\n # for elm, var in vars_from_elm.items():\r\n # TODO: make the two loops similar\r\n for elm in relaxed_elements_relevant:\r\n var = vars_from_elm[elm]\r\n sort = pre_relax_state.element_sort(elm)\r\n active_element_conj = syntax.Apply('active_%s' % sort.name, [syntax.Id(None, var.name)])\r\n conjuncts.append(active_element_conj)\r\n\r\n derived_relation_formula = syntax.Exists([vars_from_elm[elm]\r\n for (_, elm) in relaxed_elements\r\n if elm in vars_from_elm],\r\n syntax.And(*conjuncts))\r\n\r\n if str(derived_relation_formula) in candidates_cache:\r\n continue\r\n candidates_cache.add(str(derived_relation_formula))\r\n\r\n if closing_qa_cycle(syntax.the_program, [pre_relax_state.element_sort(elm) for elm in parameter_elements],\r\n [pre_relax_state.element_sort(elm) for elm in relaxed_elements_relevant]):\r\n # adding the derived relation would close a quantifier alternation cycle, discard the candidate\r\n continue\r\n\r\n # if trns.eval_double_vocab(diffing_formula, first_relax_idx):\r\n if is_rel_blocking_relax(trns, first_relax_idx,\r\n ([(vars_from_elm[elm], pre_relax_state.element_sort(elm).name) for elm in parameter_elements],\r\n derived_relation_formula)):\r\n # if all(trs.eval_double_vocab(diffing_formula, first_relax_step_idx(trs)) for trs in more_traces):\r\n diff_conjunctions.append(([vars_from_elm[elm] for elm in parameter_elements],\r\n derived_relation_formula))\r\n\r\n return diff_conjunctions\r\n\r\ndef relaxation_action_def(prog: syntax.Program,\r\n actives: Optional[Dict[syntax.SortDecl, syntax.RelationDecl]]=None,\r\n fresh: bool=True) \\\r\n -> syntax.DefinitionDecl:\r\n decrease_name = (prog.scope.fresh('decrease_domain') if fresh else 'decrease_domain')\r\n mods = []\r\n conjs: List[Expr] = []\r\n if actives is None:\r\n actives = active_rel_by_sort(prog)\r\n\r\n # a conjunct allowing each domain to decrease\r\n for sort in prog.sorts():\r\n name = prog.scope.fresh(sort.name[0].upper())\r\n ap = syntax.Apply(actives[sort].name, [syntax.Id(None, name)])\r\n expr = syntax.Forall([syntax.SortedVar(None, name, None)],\r\n syntax.Implies(ap, syntax.Old(ap)))\r\n conjs.append(expr)\r\n mods.append(syntax.ModifiesClause(None, actives[sort].name))\r\n\r\n # constants are active\r\n for const in prog.constants():\r\n conjs.append(syntax.Apply(actives[syntax.get_decl_from_sort(const.sort)].name,\r\n [syntax.Id(None, const.name)]))\r\n\r\n # functions map active to active\r\n for func in prog.functions():\r\n names: List[str] = []\r\n func_conjs = []\r\n for arg_sort in func.arity:\r\n arg_sort_decl = syntax.get_decl_from_sort(arg_sort)\r\n name = prog.scope.fresh(arg_sort_decl.name[0].upper(),\r\n also_avoid=names)\r\n names.append(name)\r\n func_conjs.append(syntax.Apply(actives[arg_sort_decl].name, [syntax.Id(None, name)]))\r\n ap_func = syntax.Old(syntax.Apply(func.name, [syntax.Id(None, name) for name in names]))\r\n active_func = syntax.Apply(actives[syntax.get_decl_from_sort(func.sort)].name, [ap_func])\r\n conjs.append(syntax.Forall([syntax.SortedVar(None, name, None) for name in names],\r\n syntax.Implies(syntax.And(*func_conjs), active_func)))\r\n\r\n # (relativized) axioms hold after relaxation\r\n for axiom in prog.axioms():\r\n if not syntax.is_universal(axiom.expr):\r\n conjs.append(syntax.relativize_quantifiers(actives, axiom.expr))\r\n\r\n # derived relations have the same interpretation on the active domain\r\n for rel in prog.derived_relations():\r\n names = []\r\n rel_conjs = []\r\n for arg_sort in rel.arity:\r\n arg_sort_decl = syntax.get_decl_from_sort(arg_sort)\r\n name = prog.scope.fresh(arg_sort_decl.name[0].upper(),\r\n also_avoid=names)\r\n names.append(name)\r\n rel_conjs.append(syntax.Apply(actives[arg_sort_decl].name, [syntax.Id(None, name)]))\r\n ap_rel = syntax.Apply(rel.name, [syntax.Id(None, name) for name in names])\r\n conjs.append(syntax.Forall([syntax.SortedVar(None, name, None) for name in names],\r\n syntax.Implies(syntax.And(*rel_conjs),\r\n syntax.Iff(ap_rel, syntax.Old(ap_rel)))))\r\n\r\n return syntax.DefinitionDecl(None, public=True, twostate=True, name=decrease_name,\r\n params=[], body=(mods, syntax.And(*conjs)))\r\n\r\n\r\ndef replace_relaxation_action(prog: syntax.Program, new_relax_action: syntax.DefinitionDecl) -> syntax.Program:\r\n old_relaxation_action = prog.scope.get('decrease_domain')\r\n decls = [decl for decl in prog.decls if decl != old_relaxation_action]\r\n decls.append(new_relax_action)\r\n return syntax.Program(decls)\r\n","sub_path":"src/relaxed_traces.py","file_name":"relaxed_traces.py","file_ext":"py","file_size_in_byte":13753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"517837291","text":"#!/usr/bin/env python3\n\ndef selection_sort(arr):\n\t# For every element in array\n\tfor i in range(len(arr)-1, 0, -1):\n\t\tpositionOfMax = 0\n\n\t\tfor location in range(1, i+1):\n\t\t\t# Set maximum's location\n\t\t\tif arr[location] > arr[positionOfMax]:\n\t\t\t\tpositionOfMax = location\n\n\t\ttemp = arr[i]\n\t\tarr[i] = arr[positionOfMax]\n\t\tarr[positionOfMax] = temp\n\nprint('\\n### Selection Sort')\narr = [3, 5, 2, 7, 6, 8, 12, 40, 21]\nprint('List:\\t' + str(arr))\nselection_sort(arr)\nprint('Sorted:\\t' + str(arr))","sub_path":"Sorting and Searching/03_Selection_Sort.py","file_name":"03_Selection_Sort.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"258496702","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/mediagoblin/gmg_commands/addmedia.py\n# Compiled at: 2016-03-29 15:18:42\n# Size of source mod 2**32: 3862 bytes\nfrom __future__ import print_function\nimport os, six\nfrom mediagoblin.db.models import LocalUser\nfrom mediagoblin.gmg_commands import util as commands_util\nfrom mediagoblin.submit.lib import submit_media, get_upload_file_limits, FileUploadLimit, UserUploadLimit, UserPastUploadLimit\nfrom mediagoblin import mg_globals\n\ndef parser_setup(subparser):\n subparser.add_argument('username', help='Name of user this media entry belongs to')\n subparser.add_argument('filename', help='Local file on filesystem')\n subparser.add_argument('-d', '--description', help='Description for this media entry')\n subparser.add_argument('-t', '--title', help='Title for this media entry')\n subparser.add_argument('-l', '--license', help='License this media entry will be released under. Should be a URL.')\n subparser.add_argument('-T', '--tags', help='Comma separated list of tags for this media entry.')\n subparser.add_argument('-s', '--slug', help='Slug for this media entry. Will be autogenerated if unspecified.')\n subparser.add_argument('--celery', action='store_true', help=\"Don't process eagerly, pass off to celery\")\n\n\ndef addmedia(args):\n if not args.celery:\n os.environ['CELERY_ALWAYS_EAGER'] = 'true'\n app = commands_util.setup_app(args)\n user = app.db.LocalUser.query.filter(LocalUser.username == args.username.lower()).first()\n if user is None:\n print(\"Sorry, no user by username '%s'\" % args.username)\n return\n filename = os.path.split(args.filename)[(-1)]\n abs_filename = os.path.abspath(args.filename)\n if not os.path.exists(abs_filename):\n print(\"Can't find a file with filename '%s'\" % args.filename)\n return\n upload_limit, max_file_size = get_upload_file_limits(user)\n\n def maybe_unicodeify(some_string):\n if some_string is None:\n return\n if six.PY2:\n return six.text_type(some_string, 'utf-8')\n return some_string\n\n try:\n submit_media(mg_app=app, user=user, submitted_file=open(abs_filename, 'rb'), filename=filename, title=maybe_unicodeify(args.title), description=maybe_unicodeify(args.description), license=maybe_unicodeify(args.license), tags_string=maybe_unicodeify(args.tags) or '', upload_limit=upload_limit, max_file_size=max_file_size)\n except FileUploadLimit:\n print('This file is larger than the upload limits for this site.')\n except UserUploadLimit:\n print('This file will put this user past their upload limits.')\n except UserPastUploadLimit:\n print('This user is already past their upload limits.')","sub_path":"pycfiles/mediagoblin-0.9.0-py3.4/addmedia.cpython-34.py","file_name":"addmedia.cpython-34.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"310362981","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport re\nimport click\nimport logging\nimport requests\nfrom six.moves.urllib.parse import urljoin\nlogger = logging.getLogger(__name__)\n\n\ndef capture(\n target_url,\n user_agent=\"archiveis (https://github.com/pastpages/archiveis)\",\n):\n \"\"\"\n Archives the provided URL using archive.is\n\n Returns the URL where the capture is stored.\n \"\"\"\n # Put together the URL that will save our request\n domain = \"http://archive.is\"\n save_url = urljoin(domain, \"/submit/\")\n\n # Configure the request headers\n headers = {\n 'User-Agent': user_agent,\n }\n\n # Request a unique identifier for our activity\n logger.debug(\"Requesting {}\".format(domain + \"/\"))\n response = requests.get(\n domain + \"/\",\n timeout=120,\n allow_redirects=True,\n headers=headers\n )\n\n # It will need to be parsed from the homepage response headers\n html = str(response.content)\n try:\n unique_id = html.split('name=\"submitid', 1)[1].split('value=\"', 1)[1].split('\"', 1)[0]\n logger.debug(\"Unique identifier: {}\".format(unique_id))\n except IndexError:\n logger.warn(\"Unable to extract unique identifier from archive.is. Submitting without it.\")\n unique_id = None\n\n # Send the capture request to archive.is with the unique id included\n data = {\n \"url\": target_url,\n \"anyway\": 1,\n }\n if unique_id:\n data.update({\"submitid\": unique_id})\n\n logger.debug(\"Requesting {}\".format(save_url))\n response = requests.post(\n save_url,\n timeout=120,\n allow_redirects=True,\n headers=headers,\n data=data\n )\n\n # archive.is returns a link format timemap in the header field link\n # but if it was the first time archive.is has archived the uri-r\n # or for some other reason unknown at this time\n # this information will not be present so resort to searching the\n # returned html page\n memento_re = re.compile('\"(http(?:s)?://archive\\.is/(?:[0-9]{14}/(?:\\b)?)?'\n '[-a-zA-Z0-9@:%_+.~#?&/=]+)\"',\n re.IGNORECASE | re.MULTILINE)\n mementos = memento_re.findall(response.text)\n logger.debug(\"Memento: {}\".format(mementos[0]))\n\n # the url to the memento is the first element in the list\n return mementos[0]\n\n\n@click.command()\n@click.argument(\"url\")\n@click.option(\"-ua\", \"--user-agent\", help=\"User-Agent header for the web request\")\ndef cli(url, user_agent):\n \"\"\"\n Archives the provided URL using archive.is.\n \"\"\"\n kwargs = {}\n if user_agent:\n kwargs['user_agent'] = user_agent\n archive_url = capture(url, **kwargs)\n click.echo(archive_url)\n\n\nif __name__ == \"__main__\":\n cli()\n","sub_path":"archiveis/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"508006742","text":"# %%\nimport numpy as np\nimport pandas as pd\nimport xlrd\nimport os\n\nfilename = \"参数设定.xlsx\"\nfilePath = os.path.join(os.getcwd(), filename)\n# 第一关,第二关游戏参数相同\nGame_data = pd.read_excel(\n filePath, sheet_name='1', usecols=['player_num', 'max_load', 'init_fund', 'ddl', 'base_income', 'water_kg', 'water_price', 'water_consume', 'food_kg', 'food_price', 'food_consume', 'weather'])\n\nplayer_num = Game_data.loc[0, 'player_num']\nmax_load = Game_data.loc[0, 'max_load']\ninit_fund = Game_data.loc[0, 'init_fund']\nddl = Game_data.loc[0, 'ddl']\nbase_income = Game_data.loc[0, 'base_income']\n\nwater_kg = Game_data.loc[0, 'water_kg']\nwater_price = Game_data.loc[0, 'water_price']\nwater_consume = []\nfor i in range(3):\n water_consume.append(Game_data.loc[i, 'water_consume'])\nfood_kg = Game_data.loc[0, 'food_kg']\nfood_price = Game_data.loc[0, 'food_price']\nfood_consume = []\nfor i in range(3):\n food_consume.append(Game_data.loc[i, 'food_consume'])\nweather = []\nfor i in range(int(ddl)):\n weather.append(Game_data.loc[i, 'weather'])\n\n# %% 游戏地图设定\n\n\ndef excel_to_matrix(path):\n table = xlrd.open_workbook(path).sheets()[0] # 第一个sheet表是关卡1,第二个表是关卡2\n row = table.nrows # 行数\n col = table.ncols # 列数\n datamatrix = np.zeros((row, col)) # 生成一个nrows行ncols列,且元素均为0的初始矩阵\n for x in range(col):\n cols = np.matrix(table.col_values(x)) # 把list转换为矩阵进行矩阵操作\n datamatrix[:, x] = cols # 按列把数据存进矩阵中\n # 数据归一化(跳过)\n game_map = datamatrix # 生成一个nrows行ncols列,且元素均为0的初始矩阵,作为game_map\n for i in range(col):\n for j in range(row):\n game_map[j, i] = int(game_map[j, i])\n game_map[i, j] = game_map[j, i]\n return game_map\n\n\nmap_filename = \"一二关地图.xlsx\"\nmap_filePath = os.path.join(os.getcwd(), map_filename)\nGame_map = excel_to_matrix(map_filePath)\n'''手动设定以下参数'''\nshop_site = [15] # 村庄位置\nmine_site = [12] # 矿场位置\nfinal_site = 27 # 终点位置\n\n# %% 路径选择器\n# 表格第一行和第一列是地点序号\n\n\ndef path_choose(game_map, local):\n path = [i+1 for i, x in enumerate(game_map[1:, local]) if x == 1]\n return path\n\n# %% 负重计算\n\n\ndef count_load(water, food):\n '''计算负重(水,食物)'''\n load_total = water*water_kg+food*food_kg\n return load_total\n\n# %% 游戏开始\n\n\ntoday = 1 # 当前日期\nlocal_site = 1 # 所在区域\nleft_fund = init_fund # 剩余资金 = 初始资金\nleft_water = 0 # 剩余水量\nleft_food = 0 # 剩余食物\n\n\ndef buy_something(plus):\n '''购置物资,plus为购置物资的价格倍数'''\n # 获取全局变量\n global left_fund\n global left_water\n global left_food\n water_price_now = water_price*plus\n food_price_now = food_price*plus\n load_now = count_load(left_water, left_food)\n load_left = max_load - load_now # 最大负重 - 当前负重 = 剩余空间\n print('--------------------------------')\n print('进入商店 当前水价:'+str(water_price_now) +\n '可购买('+str(left_fund/water_price_now)+')' +\n '可装载('+str(load_left/water_kg)+')' +\n ' 当前食物价:'+str(food_price_now) +\n '可购买('+str(left_fund/food_price_now)+')' +\n '可装载('+str(load_left/food_kg)+')')\n print('剩余资金为 :'+str(left_fund))\n print('剩余水量为 :'+str(left_water))\n print('剩余食物为 :'+str(left_food))\n input_str = input('输入你要购买的水和食物的数量(用空格隔开,不买则输入0):')\n if input_str == '0':\n print('-------跳过购买-------')\n else:\n if ' ' in input_str:\n a = [int(n) for n in input_str.split(' ')]\n left_food_temp = left_fund - a[0] * \\\n water_price_now-a[1]*food_price_now\n if left_food_temp < 0:\n print('资金不足,购买失败!')\n buy_something(plus)\n else:\n if count_load(a[0], a[1]) <= load_left:\n left_water += a[0]\n left_food += a[1]\n left_fund = left_food_temp\n print('-------购买成功-------')\n print('剩余资金为 :'+str(left_fund))\n print('剩余水量为 :'+str(left_water))\n print('剩余食物为 :'+str(left_food))\n buy_something(plus)\n else:\n print('背包空间不足,购买失败!')\n buy_something(plus)\n else:\n print('输入错误,重新购买')\n buy_something(plus)\n\n\ndef run_mine():\n '''进行挖矿'''\n global left_fund\n global local_site\n print('-------进行挖矿-------')\n print('当前资金为:'+str(left_fund)+'+'+str(base_income) +\n '='+str(left_fund+base_income))\n left_fund += base_income\n\n\ndef move_site():\n '''进行移动,返回值为资源消耗倍率,移动则消耗双倍'''\n global local_site\n site_to = int(input('输入你前往的地区(不移动则输入0):'))\n if site_to == 0:\n return 1\n else:\n if site_to not in path_choose(Game_map, local_site):\n print('无法到达该地区,请重新输入')\n return move_site()\n else:\n print('从'+str(local_site)+'到达'+str(site_to))\n local_site = site_to\n return 2\n\n\ndef do_something():\n '''执行当天任务'''\n show_now()\n if local_site in shop_site: # 如果在村庄可以进行购买再移动\n buy_something(2)\n spend_oneday(move_site())\n elif local_site in mine_site: # 如果在矿场可以进行挖矿或移动\n if_mine = int(input('当前在矿场,是否挖矿(1:是 0:否):'))\n if if_mine:\n run_mine()\n spend_oneday(3)\n else:\n spend_oneday(move_site())\n else:\n if weather[today-1] == 2: # 遇到沙暴无法移动\n print('第 '+str(today)+' 天有沙暴,无法移动')\n spend_oneday(1)\n else:\n spend_oneday(move_site())\n\n\ndef spend_oneday(plus):\n '''度过一天,plus为资源消耗的倍数'''\n global today\n global left_water\n global left_food\n a = 'null'\n if weather[today-1] == 0:\n a = '晴朗'\n elif weather[today-1] == 1:\n a = '高温'\n elif weather[today-1] == 2:\n a = '沙暴'\n print('--------------------------------')\n print('第 '+str(today)+' 天结束,天气是'+a)\n print('消耗的水量为 :'+str(water_consume[weather[today-1]]*plus))\n print('消耗的食物为 :'+str(food_consume[weather[today-1]]*plus))\n left_water = left_water - water_consume[weather[today-1]]*plus\n left_food = left_food - food_consume[weather[today-1]]*plus\n today += 1\n\n\ndef show_now():\n '''展示当前状态'''\n a = 'null'\n if weather[today-1] == 0:\n a = '晴朗'\n elif weather[today-1] == 1:\n a = '高温'\n elif weather[today-1] == 2:\n a = '沙暴'\n print('--------------------------------')\n print('当前是第 '+str(today)+' 天,天气是'+a)\n print('当前在区域 '+str(local_site))\n print('剩余资金为 :'+str(left_fund))\n print('剩余水量为 :'+str(left_water))\n print('剩余食物为 :'+str(left_food))\n print('可前往:'+str(path_choose(Game_map, local_site)))\n\n\ndef if_dead():\n '''判断是否死亡'''\n if left_food < 0:\n return True\n elif left_water < 0:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n local_site_table = []\n left_fund_table = []\n left_water_table = []\n left_food_table = []\n buy_something(1)\n for i in range(int(ddl)):\n do_something()\n local_site_table.append(local_site)\n left_fund_table.append(left_fund)\n left_water_table.append(left_water)\n left_food_table.append(left_food)\n if if_dead():\n break\n if local_site == final_site:\n break\n if local_site == final_site:\n print('走出沙漠,游戏结束')\n print('最终资产:'+str(left_fund+left_water *\n water_price*0.5+left_food*food_price*0.5))\n print(str(local_site_table))\n print(str(left_fund_table))\n print(str(left_water_table))\n print(str(left_food_table))\n else:\n print('没能走出沙漠,游戏终止')\n\n# %%\n","sub_path":"1_第一关仿真源码.py","file_name":"1_第一关仿真源码.py","file_ext":"py","file_size_in_byte":8638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"540175814","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport urlparse\nfrom scrapy.selector import Selector\nfrom scrapy.spider import Spider\nfrom target_parser.models import TargetCategory, Product\nfrom scrapy.http import Request\n\n\nclass ProductsSpider(Spider):\n name = 'get_products'\n allowed_domains = ['target.com.au']\n start_urls = []\n brand_urls = []\n brands = []\n brands_already_parsed = False\n link_prepend = u'http://www.target.com.au'\n\n def __init__(self, *args, **kwargs):\n super(ProductsSpider, self).__init__(*args, **kwargs)\n self.latest_not_parsed_category = (\n TargetCategory.objects.filter(target_id__isnull=False).\n order_by('last_parsed').\n first()\n )\n # self.latest_not_parsed_category = TargetCategory.objects.get(target_id='W258161')\n self.start_urls.append('http://www.target.com.au/c/%s?itemsPerPage=90' %\n self.latest_not_parsed_category.target_id)\n\n self.latest_not_parsed_category.last_parsed = datetime.datetime.now()\n self.latest_not_parsed_category.save()\n\n def check_for_pagination(self, sel):\n try:\n paginator_link = sel.xpath('//li[@class=\"next option\"]/a/@href').extract()[0]\n return self.link_prepend+paginator_link\n except IndexError:\n return False\n\n def check_for_brands_presence(self, sel):\n if not self.brands_already_parsed:\n self.brands_already_parsed = True\n #parsing brands just once\n try:\n brand_links = sel.xpath('//div[@data-code=\"Brand\"]//li[@class=\"option top\"]/a')\n for brand_link in brand_links:\n self.brands.append(brand_link.xpath('./text()').extract()[0])\n self.brand_urls.append(self.link_prepend+brand_link.xpath('./@href').extract()[0])\n except IndexError:\n self.log(\"There's no brands in this category\")\n\n def parse(self, response):\n # flag showing should we parse at the end\n parse = False\n brand = None\n sel = Selector(response)\n self.check_for_brands_presence(sel)\n # let's check if there's a brand parameter in url\n url = urlparse.urlparse(response.url)\n params = urlparse.parse_qs(url.query)\n query_param = params.get('q')\n if query_param:\n query_params = query_param[0].split(':')\n if 'brand' in query_params:\n brand = query_params[-1]\n if brand in self.brands:\n self.log(\"!!!!!!!!!!!!PARSING URL %s\" % response.url)\n parse = True\n if len(self.brand_urls):\n # this means tht we've already parsed brands, so no longer need that\n for brand_url in self.brand_urls:\n self.brand_urls.remove(brand_url)\n yield Request(brand_url, callback=self.parse)\n else:\n parse = True\n if parse:\n # if we should parse - we PARSE!!!!\n product_containers = sel.xpath('//li[@class=\"product\"]//div[@class=\"summary\"]/h3/a')\n for product_container in product_containers:\n product_name = product_container.xpath('./text()').extract()[0]\n product_target_id = product_container.xpath('./@data-product-code').extract()[0]\n product, is_new = Product.objects.get_or_create(\n name=product_name,\n target_id=product_target_id,\n defaults={\n 'brand': brand,\n 'parent': self.latest_not_parsed_category,\n }\n )\n if is_new:\n self.log('New product added!%s sku: %s.' % (product_name, product_target_id))\n else:\n self.log('Product %s with sku %s skipped as duplicate' % (product_name, product_target_id))\n next_page = self.check_for_pagination(sel)\n if next_page:\n yield Request(next_page, callback=self.parse)","sub_path":"target_parser/spiders/get_products.py","file_name":"get_products.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"206751687","text":"# __author__ = itsneo1990\nfrom PIL import Image\nfrom io import BytesIO\n\n\ndef image_resize(data, size):\n i_file = BytesIO(data)\n o_file = BytesIO()\n\n img = Image.open(i_file)\n img = img.convert(\"RGB\").resize(size, Image.ANTIALIAS)\n\n img.save(o_file, 'jpeg', quality=100)\n resized_data = o_file.getvalue()\n\n i_file.close()\n o_file.close()\n return resized_data\n","sub_path":"libs/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"298371343","text":"class Solution:\n \"\"\"\n @param nums: A list of integers\n @return: An integer denotes the middle number of the array\n \"\"\"\n\n def median(self, nums):\n # write your code here\n def partition(nums, start, end, size):\n mid = (start + end) / 2\n pivot = nums[mid]\n i, j = start - 1, end + 1\n\n k = start\n while k < j:\n if nums[k] < pivot:\n i += 1\n nums[i], nums[k] = nums[k], nums[i]\n elif nums[k] > pivot:\n j -= 1\n nums[j], nums[k] = nums[k], nums[j]\n k -= 1\n k += 1\n\n if (i - start + 1) >= size:\n return partition(nums, start, i, size)\n elif (j - start) >= size:\n return nums[j - 1]\n else:\n return partition(nums, j, end, size - (j - start))\n\n return partition(nums, 0, len(nums) - 1, (len(nums) + 1) / 2)\n","sub_path":"lintcode/80-median.py","file_name":"80-median.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"648634416","text":"# -*- coding: utf-8 -*-\n\nfrom nose.tools import * # PEP8 asserts\n\nfrom modularodm.exceptions import ValidationError\n\nfrom tests.base import OsfTestCase\nfrom website.addons.forward.tests.factories import ForwardSettingsFactory\n\n\nclass TestSettingsValidation(OsfTestCase):\n\n def setUp(self):\n super(TestSettingsValidation, self).setUp()\n self.settings = ForwardSettingsFactory()\n\n def test_validate_url_bad(self):\n self.settings.url = 'badurl'\n with assert_raises(ValidationError):\n self.settings.save()\n\n def test_validate_url_good(self):\n self.settings.url = 'http://frozen.pizza.reviews/'\n try:\n self.settings.save()\n except ValidationError:\n assert 0\n\n def test_validate_redirect_bool_bad(self):\n self.settings.redirect_bool = 'notabool'\n with assert_raises(ValidationError):\n self.settings.save()\n\n def test_validate_redirect_bool_good(self):\n self.settings.redirect_bool = False\n try:\n self.settings.save()\n except ValidationError:\n assert 0\n\n def test_validate_redirect_secs_bad(self):\n self.settings.redirect_secs = -2\n with assert_raises(ValidationError):\n self.settings.save()\n\n def test_validate_redirect_secs_good(self):\n self.settings.redirect_secs = 20\n try:\n self.settings.save()\n except ValidationError:\n assert 0\n\n def test_label_sanitary(self):\n self.settings.label = 'safe'\n try:\n self.settings.save()\n except ValidationError:\n assert False\n\n def test_label_unsanitary(self):\n self.settings.label = 'un
safe'\n with assert_raises(ValidationError):\n self.settings.save()\n","sub_path":"website/addons/forward/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"192386164","text":"import matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\n\nx_array = np.linspace(0, 20, 10)\ny_array = np.sin(x_array)\n\nprint( \"x ========\")\nprint( x_array )\nprint( \"y ========\")\nprint( y_array )\n\nplt.plot(x_array, y_array)\nplt.show()","sub_path":"python_src/30_tensor_flow_ex/lec_210_python/chart_example.py","file_name":"chart_example.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"296607689","text":"from opengever.dossier.upgrades.to4502 import drop_old_refnumber_from_annotations\nfrom opengever.testing import FunctionalTestCase\nfrom zope.annotation.interfaces import IAnnotations\n\n\nclass TestDropTemplateFolderReferenceNumber(FunctionalTestCase):\n\n def setUp(self):\n super(TestDropTemplateFolderReferenceNumber, self).setUp()\n annotations = IAnnotations(self.portal)\n annotations['dossier_reference_mapping'] = 'Foo'\n annotations['reference_numbers'] = 'Bar'\n annotations['reference_prefix'] = 'Qux'\n\n def test_drop_refnumbers_from_annotations(self):\n drop_old_refnumber_from_annotations()\n\n annotations = IAnnotations(self.portal)\n self.assertNotIn('dossier_reference_mapping', annotations)\n self.assertNotIn('reference_numbers', annotations)\n self.assertNotIn('reference_prefix', annotations)\n","sub_path":"opengever/dossier/tests/test_upgrades.py","file_name":"test_upgrades.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"401598668","text":"#!/usr/bin/python3\nfrom block_io import BlockIo, BlockIoAPIError\n\n\ndef inicializarAPI(_secretPIN):\n\t#inicializando a API\n\t\n\tversion = 2 # API version\n\t\n\tbitcointTestnetApiKey = 'dac5-24ba-d04c-19f9'\n\tblock_io = BlockIo(bitcointTestnetApiKey, _secretPIN, version)\n\treturn block_io\n\ndef send_btc(_block_io, _amount, _fromWallet, _toWallet, _secretPIN):\n\ttry:\n\t\t#envia 1 btc\n\t\twithdraw = _block_io.withdraw_from_addresses(amounts=_amount, \n\t\t\tfrom_addresses=_fromWallet, to_addresses=_toWallet, pin=_secretPIN)\n\n\texcept BlockIoAPIError as error:\n\t\tprint(error)\n\n\treturn withdraw\n\ndef main():\n\tsecretPIN = '35kTalBneckAe'\n\tblock_io = inicializarAPI(secretPIN)\n\n\tamount = '1.00000000'\n\texampleWallet = '2N2MPfFRUmipSSdFaio23YiYw8eq6SV49qt'\n\ttestWallet = '2MtSQLgfWpREsDjpax9k71MjiN6GRECuv2P'\n\thiringWallet = 'mnYoahiweETgdXsfY92GCWA6HoRj9knQUw'\n\n\twithdraw = send_btc(block_io, amount, exampleWallet, hiringWallet, secretPIN)\n\n\tprint(\"Status da operacao: \", withdraw['status'])\n\tprint(\"Enviado: \", withdraw['data']['amount_sent'])\n\n\treturn\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"hiring.py","file_name":"hiring.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"197169502","text":"from selenium import webdriver\r\nfrom time import sleep\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport os\r\n\r\n\r\nif 'chromedriver.exe' in os.listdir():\r\n x = os.path.join(os.getcwd(), 'chromedriver.exe')\r\n print(x)\r\n driver = webdriver.Chrome(x)\r\n\r\nelse:\r\n print('Warning : chrome binaries missing! ')\r\n\r\n\r\ndef google(search):\r\n driver.get(\"https://www.google.com/\")\r\n sleep(5)\r\n driver.find_element_by_xpath(r'//*[@id=\"tsf\"]/div[2]/div[1]/div[1]/div/div[2]/input').send_keys(search)\r\n sleep(2)\r\n driver.find_element_by_xpath(r'//*[@id=\"tsf\"]/div[2]/div[1]/div[1]/div/div[2]/input').send_keys(Keys.RETURN)\r\n\r\n\r\ndef duck(search):\r\n driver.get(\"https://duckduckgo.com/\")\r\n sleep(5)\r\n driver.find_element_by_xpath(r'//*[@id=\"search_form_input_homepage\"]').send_keys(search)\r\n sleep(2)\r\n driver.find_element_by_xpath(r'//*[@id=\"search_form_input_homepage\"]').send_keys(Keys.RETURN)\r\n\r\ndef wiki(search):\r\n driver.get(\"https://www.wikipedia.org/\")\r\n sleep(5)\r\n driver.find_element_by_xpath(r'//*[@id=\"searchInput\"]').send_keys(search)\r\n sleep(2)\r\n driver.find_element_by_xpath(r'//*[@id=\"searchInput\"]').send_keys(Keys.RETURN)\r\n\r\n\r\ndef bing(search):\r\n driver.get(\"https://www.bing.com/\")\r\n sleep(5)\r\n driver.find_element_by_xpath(r'//*[@id=\"sb_form_q\"]').send_keys(search)\r\n sleep(2)\r\n driver.find_element_by_xpath(r'//*[@id=\"sb_form_q\"]').send_keys(Keys.RETURN)\r\n\r\n\r\ns = input('what shouid i search? : ')\r\no = int(input('Where shouid i search ? \\n 1.Google \\t 2.DuckDuckGo \\t 3.Wiki \\t 4.Bing :'))\r\nif o == 1:\r\n google(s)\r\nelif o == 2:\r\n duck(s)\r\nelif o == 3:\r\n wiki(s)\r\nelif o == 4:\r\n bing(s)\r\nelse:\r\n google(s)","sub_path":"Search.py","file_name":"Search.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"636245669","text":"import tkinter as tk\n\ndef clicked(e):\n print(\"x:{0} y:{1} text:{2}\".format(e.x, e.y, e. widget[\"text\"]))\n\nroot = tk.Tk()\nbutton0 = tk.Button(root, text=\"Hello\")\nbutton1 = tk.Button(root, text=\"World\")\nbutton0.pack()\nbutton1.pack()\nbutton0.bind(\"\", clicked)\nbutton1.bind(\"\", clicked)\nroot.mainloop()\n","sub_path":"python/GUI/tkinter_button1.py","file_name":"tkinter_button1.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"510047240","text":"from setuptools import setup, Extension\nimport numpy\nimport os\nimport sys\n\nif 'USE_CYTHON' in os.environ and os.environ['USE_CYTHON'].lower() in ('1', 'true', 'yes', 'y'):\n USE_CYTHON = True\nelse:\n USE_CYTHON = False\n\nextensions = [\n Extension(\"chirplettransform\", [\"chirplettransform\" + ('.pyx' if USE_CYTHON else '.c')],\n libraries=[\"chirplet\"],\n library_dirs=[\".\"],\n include_dirs=[numpy.get_include()],\n **({'runtime_library_dirs': [\"$ORIGIN\"]} if sys.platform.startswith('linux') else {})\n )\n ]\n\nif USE_CYTHON:\n from Cython.Build import cythonize\n extensions = cythonize(extensions)\n\nsetup(\n name = 'Fast Discrete-Time Chirplet Transform',\n ext_modules = extensions,\n data_files = [\"libchirplet.so\"]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"28631748","text":"from setuptools import setup, find_packages\nimport sys, os\n\nfrom paste.script import templates\nvars = [\n templates.var('version', 'Version (like 0.1)'),\n templates.var('description', 'One-line description of the package'),\n templates.var('long_description', 'Multi-line description (in reST)'),\n templates.var('keywords', 'Space-separated keywords/tags'),\n templates.var('author', 'Author name'),\n templates.var('author_email', 'Author email'),\n templates.var('url', 'URL of homepage'),\n templates.var('license_name', 'License name'),\n templates.var('zip_safe', 'True/False: if the package can be distributed as a .zip file',\n default=False),\n ]\n\nversion = '0.1'\n\nsetup(name='timgerapp',\n version=version,\n description=\"\",\n long_description=\"\"\"\\\n\"\"\",\n classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n keywords='',\n author='@timger',\n author_email='yishenggudou@gmail.com',\n url='http://www.timger.info',\n license='timger',\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n # -*- Extra requirements: -*-\n ],\n entry_points=\"\"\"\n [paste.paster_create_template]\n timgerapp = timgerapp:FrameworkTemplate\n \"\"\",\n )\n","sub_path":"timger_python_packed/timgerapp/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"312945961","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\ndef get_key_for_storage_account(cmd, storage_account): # pylint: disable=unused-argument\n from ._client_factory import cf_storage\n from msrestazure.tools import parse_resource_id, is_valid_resource_id\n from knack.util import CLIError\n\n storage_account_key = None\n if is_valid_resource_id(storage_account):\n parsed_storage_account = parse_resource_id(storage_account)\n resource_group_name = parsed_storage_account['resource_group']\n storage_account_name = parsed_storage_account['resource_name']\n\n storage_client = cf_storage(cmd.cli_ctx)\n keys = storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name)\n storage_account_key = keys.keys[0].value # pylint: disable=no-member\n elif storage_account:\n raise CLIError('Failed to get access key for storage account: {}'.format(storage_account))\n return storage_account_key\n\n\ndef get_storage_account_endpoint(cmd, storage_account, is_wasb):\n from ._client_factory import cf_storage\n from msrestazure.tools import parse_resource_id, is_valid_resource_id\n host = None\n if is_valid_resource_id(storage_account):\n parsed_storage_account = parse_resource_id(storage_account)\n resource_group_name = parsed_storage_account['resource_group']\n storage_account_name = parsed_storage_account['resource_name']\n\n storage_client = cf_storage(cmd.cli_ctx)\n storage_account = storage_client.storage_accounts.get_properties(\n resource_group_name=resource_group_name,\n account_name=storage_account_name)\n\n def extract_endpoint(storage_account, is_wasb):\n if not storage_account:\n return None\n return storage_account.primary_endpoints.dfs if not is_wasb else storage_account.primary_endpoints.blob\n\n def extract_host(uri):\n import re\n return uri and re.search('//(.*)/', uri).groups()[0]\n\n host = extract_host(extract_endpoint(storage_account, is_wasb))\n return host\n\n\ndef build_identities_info(identities):\n from azure.mgmt.hdinsight.models import ClusterIdentity, ResourceIdentityType\n identity = None\n if identities:\n identity_type = ResourceIdentityType.user_assigned\n identity = ClusterIdentity(type=identity_type)\n identity.user_assigned_identities = {e: {} for e in identities}\n\n return identity\n\n\ndef build_virtual_network_profile(subnet):\n from msrestazure.tools import resource_id, parse_resource_id, is_valid_resource_id\n from azure.mgmt.hdinsight.models import VirtualNetworkProfile\n from knack.util import CLIError\n\n vnet_profile = None\n if is_valid_resource_id(subnet):\n parsed_subnet_id = parse_resource_id(subnet)\n subscription_name = parsed_subnet_id['subscription']\n resource_group_name = parsed_subnet_id['resource_group']\n vnet_namespace = parsed_subnet_id['namespace']\n vnet_type = parsed_subnet_id['type']\n vnet_name = parsed_subnet_id['name']\n vnet_id = resource_id(\n subscription=subscription_name,\n resource_group=resource_group_name,\n namespace=vnet_namespace,\n type=vnet_type,\n name=vnet_name)\n vnet_profile = VirtualNetworkProfile(id=vnet_id, subnet=subnet)\n elif subnet:\n raise CLIError('Invalid subnet: {}'.format(subnet))\n return vnet_profile\n\n\ndef parse_domain_name(domain):\n from msrestazure.tools import parse_resource_id, is_valid_resource_id\n domain_name = None\n if is_valid_resource_id(domain):\n parsed_domain_id = parse_resource_id(domain)\n domain_name = parsed_domain_id['resource_name']\n return domain_name\n\n\n# Validate ESP cluster creation required parameters\ndef validate_esp_cluster_create_params(esp,\n cluster_name,\n resource_group_name,\n cluster_type,\n subnet,\n domain,\n cluster_admin_account,\n assign_identity,\n ldaps_urls,\n cluster_admin_password,\n cluster_users_group_dns):\n from knack.util import CLIError\n if esp:\n missing_params = []\n if not cluster_name:\n missing_params.append(\"--name/-n\")\n if not resource_group_name:\n missing_params.append(\"--resource-group/-g\")\n if not cluster_type:\n missing_params.append(\"--type/-t\")\n if not subnet:\n missing_params.append(\"--subnet\")\n if not domain:\n missing_params.append(\"--domain\")\n if not cluster_admin_account:\n missing_params.append(\"--cluster-admin-account\")\n if not cluster_users_group_dns:\n missing_params.append(\"--cluster-users-group-dns\")\n if not assign_identity:\n missing_params.append(\"--assign-identity\")\n\n if missing_params:\n raise CLIError('the following params are required '\n 'when --esp is specified: {}'.format(', '.join(missing_params)))\n else:\n esp_params = []\n if domain:\n esp_params.append(\"--domain\")\n if cluster_admin_account:\n esp_params.append(\"--cluster-admin_account\")\n if ldaps_urls:\n esp_params.append(\"--ldaps-urls\")\n if cluster_admin_password:\n esp_params.append(\"--cluster-admin-password\")\n if cluster_users_group_dns:\n esp_params.append(\"--cluster-users-group-dns\")\n\n if esp_params:\n raise CLIError('the following params are required only '\n 'when --esp is specified: {}'.format(', '.join(esp_params)))\n\n\ndef get_resource_id_by_name(cli_ctx, resource_type, resource_name):\n from ._client_factory import cf_resources\n from knack.util import CLIError\n\n client = cf_resources(cli_ctx)\n filter_str = \"resourceType eq '{}' and name eq '{}'\".format(resource_type, resource_name) if resource_type else None\n resources = list(client.resources.list(filter=filter_str))\n if not resources:\n raise CLIError('Fails to retrieve any resource by name {}'.format(resource_name))\n if len(resources) > 1:\n raise CLIError('Found more than one resources by name {}. '\n 'Please specify one of the following resource IDs explicitly:\\n{}'\n .format(resource_name, '\\n'.join([resource.id for resource in resources])))\n return resources[0].id\n","sub_path":"src/azure-cli/azure/cli/command_modules/hdinsight/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":7074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"234327633","text":"from collections import defaultdict\nfrom operator import itemgetter\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom src.preprocessing.preprocess import read_dependencies\nfrom src.utils import get_name, get_date\n\n\ndef jaccard_distance() -> None:\n \"\"\"\n Calculates the Jaccard distances for all the repos, save the pre-calculated distances\n as a NumPy file.\n :return: None.\n \"\"\"\n reqs = read_dependencies()\n matrix = np.zeros((len(reqs), len(reqs)))\n print(f\"The shape of the matrix is {matrix.shape}\")\n for index1, repo1 in tqdm(enumerate(reqs)):\n repo1reqs = set(reqs[repo1])\n for index2, repo2 in enumerate(reqs):\n repo2reqs = set(reqs[repo2])\n matrix[index1][index2] = len(repo1reqs.intersection(repo2reqs)) / len(\n repo1reqs.union(repo2reqs))\n np.save(f\"models/jaccard\", matrix)\n\n\ndef predict_closest_by_jaccard(names: List[str], amount: int, single_version: bool,\n filter_versions: bool) -> Dict[str, List[Tuple[str, str, float]]]:\n \"\"\"\n Given the list of names of projects, find the closest to them by Jaccard similarity.\n :param names: a list of full repo names that must be searched.\n :param amount: number of the closest repos to find for each query project.\n :param single_version: if True, will only consider the repos of the same version as query.\n :param filter_versions: if True, only the closest version of any repo will be in the output.\n :return: dictionary {repo: [(close_repo, version, similarity), ...]}.\n \"\"\"\n closest = defaultdict(list)\n # Load pre-calculated Jaccard distances\n data = np.load(f\"models/jaccard.npy\")\n\n # Load the names of repos\n repos_list = []\n with open(f\"models/repos_list.txt\") as fin:\n for line in fin:\n repos_list.append(line.rstrip())\n\n # Iterate over query projects\n for query_full_name in names:\n # Get a list of tuples (repo, Jaccard similarity to query repo)\n lst = [(x, y) for x, y in zip(repos_list, data[repos_list.index(query_full_name)])]\n lst = sorted(lst, key=itemgetter(1, 0), reverse=True) # Sort by Jaccard\n query_name = get_name(query_full_name)\n query_date = get_date(query_full_name)\n banned = {query_name}\n for candidate in lst:\n if candidate[1] == 1:\n continue # Skip exactly the same cases, they are of no interest\n candidate_name = get_name(candidate[0])\n candidate_date = get_date(candidate[0])\n if single_version and (query_date != candidate_date):\n continue # Skip the projects from another version if necessary\n if candidate_name in banned:\n continue # Skip banned\n closest[query_full_name].append((\n candidate_name,\n candidate_date,\n candidate[1]\n ))\n if filter_versions and (not single_version):\n banned.add(candidate_name) # If only one version per repo, skip further versions\n if len(closest[query_full_name]) >= amount:\n break # If enough candidates are gathered, stop the process\n return closest\n","sub_path":"src/predictions/jaccard.py","file_name":"jaccard.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"408288712","text":"#!/bin/python\nfrom __future__ import print_function\n\ndef ashtonString(str, chr_offset):\n \"\"\"\n \"set\" data structure in python has a large RAM footprint and returns a MemoryError in case of very long string. Due to this, the solution can only pass\n through the first 4 testcases. Any other solutions are welcome. :)\n \"\"\"\n arr = []\n end = 1\n while end != len(str):\n for bgn in range(len(str) - end + 1):\n arr.append(str[bgn:bgn + end])\n end += 1\n arr.sort()\n arr = ''.join(arr)\n print(arr)\n return arr[chr_offset - 1]\n\n\nif __name__ == '__main__':\n\n s = 'edvidhafc'\n\n k = 3\n\n res = ashtonString(s, k)\n\n print(res + '\\n')\n","sub_path":"python/ashton-n-string.py","file_name":"ashton-n-string.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"419933871","text":"\"\"\"Defining file_name\"\"\"\n\n\ndef file_open(file_name):\n with open(file_name) as a:\n game_list = []\n for line in a:\n game_list.append(line.split(\"\\t\"))\n return(game_list)\n\n\n\"\"\"How many games are in the file?\nExpected name of the function: count_games(file_name)\nExpected output of the function: a number\"\"\"\n\n\ndef count_games(file_name):\n game_list = file_open(file_name)\n return(len(game_list))\n\n\n\"\"\"Is there a game from a given year?\nExpected name of the function: decide(file_name, year)\nExpected output of the function: boolean value\"\"\"\n\n\ndef decide(file_name, year):\n game_list = file_open(file_name)\n for i in game_list:\n if str(year) == str(i[2]):\n return True\n return False\n\n\"\"\"Which was the latest game?\nExpected name of the function: get_latest(file_name)\nExpected output of the function: the title of the latest game as a string\"\"\"\n\n\ndef get_latest(file_name):\n game_list = file_open(file_name)\n highest_year = 0\n for i in game_list:\n if int(i[2]) > highest_year:\n highest_year = int(i[2])\n latest_game = i[0]\n return(latest_game)\n\n\"\"\"How many games do we have by genre?\nExpected name of the function: count_by_genre(file_name, genre)\nExpected output of the function: a number\"\"\"\n\n\ndef count_by_genre(file_name, genre):\n game_list = file_open(file_name)\n genre_list = [i[3] for i in game_list]\n return(genre_list.count(genre))\n\n\"\"\"What is the line number of the given game (by title)?\nExpected name of the function: get_line_number_by_title(file_name, title)\nExpected output of the function: a number (if there is no game found, then raises ValueError exception)\"\"\"\n\n\ndef get_line_number_by_title(file_name, title):\n game_list = file_open(file_name)\n for i, element in enumerate(game_list):\n if element[0] == title:\n return(i + 1)\n\n# # Report functions\n","sub_path":"reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"619849363","text":"from __future__ import print_function\n\nimport gdb\n\n\nclass ChibiosPrefixCommand(gdb.Command):\n \"\"\"Prefix for ChibiOS related helper commands\"\"\"\n def __init__(self):\n super(ChibiosPrefixCommand, self).__init__(\"chibios\",\n gdb.COMMAND_SUPPORT,\n gdb.COMPLETE_NONE,\n True)\n\n# List of all information to print for threads\n# Format is: \nTHREAD_INFO = [(\"{:10}\", \"Address\", \"{thread.address:#10x}\"),\n (\"{:10}\", \"StkLimit\", \"{thread.stack_limit:#10x}\"),\n (\"{:10}\", \"Stack\", \"{thread.stack_start:#10x}\"),\n (\"{:>6}\", \"Free\", \"{thread.stack_unused:6}\"),\n (\"{:>6}\", \"Total\", \"{thread.stack_size:6}\"),\n (\"{:16}\", \"Name\", \"{thread.name:16}\"),\n (\"{:10}\", \"State\", \"{thread.state_str}\")]\n\n# Build the string for thread info header\nTHREAD_INFO_HEADER_STRING = \" \".join(each[0] for each in THREAD_INFO)\nTHREAD_INFO_HEADER_DATA = [each[1] for each in THREAD_INFO]\nTHREAD_INFO_HEADER = THREAD_INFO_HEADER_STRING.format(*THREAD_INFO_HEADER_DATA)\n\n# Build format string for thread info rows.\nTHREAD_INFO = \" \".join(each[2] for each in THREAD_INFO)\n\n\nclass ChibiosThread(object):\n \"\"\"Class to model ChibiOS/RT thread\"\"\"\n THREAD_STATE = [\"READY\", \"CURRENT\", \"SUSPENDED\", \"WTSEM\", \"WTMTX\",\n \"WTCOND\", \"SLEEPING\", \"WTEXIT\", \"WTOREVT\",\n \"WTANDEVT\", \"SNDMSGQ\", \"SNDMSG\", \"WTMSG\",\n \"WTQUEUE\", \"FINAL\"]\n\n def __init__(self, thread):\n \"\"\" Initialize a Thread object. Will throw exceptions if fields do not\n exist\n\n \"\"\"\n self._stklimit = 0\n self._r13 = 0\n self._address = 0\n self._stack_size = 0\n self._stack_unused = 0\n self._name = \"\"\n self._state = 0\n self._flags = 0\n self._prio = 0\n self._refs = 0\n self._time = 0\n\n # Extract all thread information\n # Get a gdb.Type which is a void pointer.\n void_p = gdb.lookup_type('void').pointer()\n\n # stklimit and r13 are different pointer types, so cast to get the\n # arithmetic correct\n self._r13 = thread['p_ctx']['r13'].cast(void_p)\n\n # p_stklimit is optional.\n if 'p_stklimit' in thread.type.keys():\n self._stklimit = thread['p_stklimit'].cast(void_p)\n\n # only try to dump the stack if we have reasonable confidence that it\n # exists\n if self._stklimit > 0:\n self._stack_size = self._r13 - self._stklimit\n # Try to dump the entire stack of the thread\n inf = gdb.selected_inferior()\n\n try:\n stack = inf.read_memory(self._stklimit, self._stack_size)\n\n # Find the first non-'U' (0x55) element in the stack space.\n for i, each in enumerate(stack):\n if (each != 'U'):\n self._stack_unused = i\n break\n else:\n # Everything is 'U', apparently.\n self._stack_unused = self._stack_size\n\n except gdb.MemoryError:\n self._stack_unused = 0\n\n else:\n self._stack_size = 0\n self._stack_unused = 0\n\n self._address = thread.address\n\n if len(thread['p_name'].string()) > 0:\n self._name = thread['p_name'].string()\n\n self._state = int(thread['p_state'])\n self._flags = thread['p_flags']\n self._prio = thread['p_prio']\n self._refs = thread['p_refs']\n\n # p_time is optional\n if 'p_time' in thread.type.keys():\n self._time = thread['p_time']\n\n @staticmethod\n def sanity_check():\n \"\"\"Check to see if ChibiOS/RT has been built with enough debug\n information to read thread information.\n \"\"\"\n thread_type = gdb.lookup_type('Thread')\n\n # Sanity checks on Thread\n # From http://stackoverflow.com/questions/1285911/python-how-do-i-check-that-multiple-keys-are-in-a-dict-in-one-go\n if not all(k in thread_type.keys() for k in (\"p_newer\", \"p_older\")):\n raise gdb.GdbError(\"ChibiOS/RT thread registry not enabled, cannot\"\n \" access thread information!\")\n\n if 'p_stklimit' not in thread_type.keys():\n print(\"No p_stklimit in Thread struct; enable\"\n \" CH_DBG_ENABLE_STACK_CHECK\")\n\n if 'p_time' not in thread_type.keys():\n print(\"No p_time in Thread struct; enable\"\n \" CH_DBG_THREADS_PROFILING\")\n\n @property\n def name(self):\n return self._name\n\n @property\n def stack_size(self):\n return long(self._stack_size)\n\n @property\n def stack_limit(self):\n return long(self._stklimit)\n\n @property\n def stack_start(self):\n return long(self._r13)\n\n @property\n def stack_unused(self):\n return long(self._stack_unused)\n\n @property\n def address(self):\n return long(self._address)\n\n @property\n def state(self):\n return self._state\n\n @property\n def state_str(self):\n return ChibiosThread.THREAD_STATE[self.state]\n\n @property\n def flags(self):\n return self._flags\n\n @property\n def prio(self):\n return self._prio\n\n @property\n def time(self):\n return self._time\n\n\ndef chibios_get_threads():\n \"\"\" Create a list of ChibiosThreads for all threads currently in\n the system\n\n \"\"\"\n # Make sure Thread has enough info to work with\n ChibiosThread.sanity_check()\n\n threads = []\n\n # Walk the thread registry\n rlist_p = gdb.parse_and_eval('&rlist')\n rlist_as_thread = rlist_p.cast(gdb.lookup_type('Thread').pointer())\n newer = rlist_as_thread.dereference()['p_newer']\n older = rlist_as_thread.dereference()['p_older']\n\n while (newer != rlist_as_thread):\n ch_thread = ChibiosThread(newer.dereference())\n threads.append(ch_thread)\n\n current = newer\n newer = newer.dereference()['p_newer']\n older = newer.dereference()['p_older']\n\n if (older != current):\n raise gdb.GdbError('Rlist pointer invalid--corrupt list?')\n\n return threads\n\n\nclass ChibiosThreadsCommand(gdb.Command):\n \"\"\"Print all the ChibiOS threads and their stack usage.\n\n This will not work if ChibiOS was not compiled with, at a minumum,\n CH_USE_REGISTRY. Additionally, CH_DBG_ENABLE_STACK_CHECK and\n CH_DBG_FILL_THREADS are necessary to compute the used/free stack\n for each thread.\n \"\"\"\n def __init__(self):\n super(ChibiosThreadsCommand, self).__init__(\"chibios threads\",\n gdb.COMMAND_SUPPORT,\n gdb.COMPLETE_NONE)\n\n def invoke(self, args, from_tty):\n threads = chibios_get_threads()\n\n if threads is not None:\n print(THREAD_INFO_HEADER)\n for thread in threads:\n print(THREAD_INFO.format(thread=thread))\n\n\nclass ChibiosThreadCommand(gdb.Command):\n \"\"\"Print information about the currently selected thread\"\"\"\n def __init__(self):\n super(ChibiosThreadCommand, self).__init__(\"chibios thread\",\n gdb.COMMAND_SUPPORT,\n gdb.COMPLETE_NONE)\n\n def invoke(self, args, from_tty):\n thread = gdb.selected_thread()\n if thread is not None:\n threads = chibios_get_threads()\n\n # inf.ptid is PID, LWID, TID; TID corresponds to the address in\n # memory of the Thread*.\n newer = thread.ptid[2]\n\n ch_thread = next((i for i in threads if i.address == newer), None)\n if ch_thread is not None:\n print(THREAD_INFO_HEADER)\n print(THREAD_INFO.format(thread=ch_thread))\n else:\n print(\"Invalid thread\")\n\n else:\n print(\"No threads found--run info threads first\")\n\n\nclass ChibiosTraceCommand(gdb.Command):\n \"\"\"Print the last entries in the trace buffer\"\"\"\n\n def __init__(self):\n super(ChibiosTraceCommand, self).__init__(\"chibios trace\",\n gdb.COMMAND_SUPPORT,\n gdb.COMPLETE_NONE)\n\n def trace_line(self, index, time, state, prev_thread, curr_thread):\n \"\"\"Return a formatted string for a single trace\"\"\"\n trace_format = \"{:6} {:8d} {:#10x} {:16} {:10} {:#10x} {:16}\"\n if prev_thread is None:\n return trace_format.format(index,\n time,\n 0,\n \"\",\n ChibiosThread.THREAD_STATE[state],\n curr_thread.address,\n curr_thread.name)\n else:\n return trace_format.format(index,\n time,\n prev_thread.address,\n prev_thread.name,\n ChibiosThread.THREAD_STATE[state],\n curr_thread.address,\n curr_thread.name)\n\n def invoke(self, args, from_tty):\n argv = gdb.string_to_argv(args)\n if (len(argv) > 0):\n count = int(argv[0])\n else:\n count = 10\n\n threads = chibios_get_threads()\n\n try:\n dbg_trace_buffer = gdb.parse_and_eval(\"dbg_trace_buffer\")\n except gdb.error:\n raise gdb.GdbError(\"Debug Trace Buffer not found. Compile with\"\n \" CH_DBG_ENABLE_TRACE\")\n\n trace_buffer_size = int(dbg_trace_buffer['tb_size'])\n\n if (count > trace_buffer_size):\n count = trace_buffer_size\n\n trace_buffer = dbg_trace_buffer['tb_buffer']\n\n current_trace = dbg_trace_buffer['tb_ptr']\n\n trace_start = int(current_trace.dereference().address -\n trace_buffer.dereference().address)\n\n traces = []\n\n for i in xrange(trace_start, trace_buffer_size):\n traces.append(trace_buffer[i])\n\n for i in xrange(0, trace_start):\n traces.append(trace_buffer[i])\n\n print(\"{:>6} {:>8} {:10} {:16} {:10} {:10} {:16}\".format(\"Event\",\n \"Time\",\n \"Previous\",\n \"Name\",\n \"State\",\n \"Current\",\n \"Name\"))\n\n trace_lines = []\n\n # Print oldest trace separately since we don't have previous\n # information\n thread = next((i for i in threads if\n i.address == long(traces[0]['se_tp'])), None)\n trace_lines.append(self.trace_line(-63,\n int(traces[0]['se_time']),\n int(traces[0]['se_state']),\n None,\n thread))\n\n for j, event in enumerate(traces[1:], 1):\n curr_thread = next((i for i in threads if\n i.address == long(event['se_tp'])), None)\n prev_thread = next((i for i in threads if\n i.address == long(traces[j - 1]['se_tp'])), None)\n trace_lines.append(self.trace_line(-63 + j,\n int(event['se_time']),\n int(event['se_state']),\n prev_thread,\n curr_thread))\n\n for trace in trace_lines[-count:]:\n print(trace)\n\n\nclass ChibiosInfoCommand(gdb.Command):\n \"\"\"Print information about ChibiOS/RT\"\"\"\n def __init__(self):\n super(ChibiosInfoCommand, self).__init__(\"chibios info\",\n gdb.COMMAND_SUPPORT,\n gdb.COMPLETE_NONE)\n\n def invoke(self, args, from_tty):\n try:\n ch_debug = gdb.parse_and_eval('ch_debug')\n except gdb.error:\n raise gdb.GdbError(\"Could not find ch_debug\")\n\n ch_version = int(ch_debug['ch_version'])\n ch_major = (ch_version >> 11) & 0x1f\n ch_minor = (ch_version >> 6) & 0x1f\n ch_patch = (ch_version & 0x1f)\n\n print(\"ChibiOS/RT version {}.{}.{}\".format(ch_major,\n ch_minor,\n ch_patch))\n\n\nclass ChibiosTimersCommand(gdb.Command):\n \"\"\"Print current timers. Partially unimplemented\"\"\"\n def __init__(self):\n super(ChibiosTimersCommand, self).__init__(\"chibios timers\",\n gdb.COMMAND_SUPPORT,\n gdb.COMPLETE_NONE)\n\n def invoke(self, args, from_tty):\n vtlist_p = gdb.parse_and_eval('&vtlist')\n\n vtlist_as_timer = vtlist_p.cast(gdb.lookup_type(\"VirtualTimer\").pointer())\n\n vt_next = vtlist_as_timer.dereference()['vt_next']\n vt_prev = vtlist_as_timer.dereference()['vt_prev']\n\n print(\"{:6} {:10} {:10}\".format(\"Time\",\n \"Callback\",\n \"Param\"))\n\n while (vt_next != vtlist_as_timer):\n vt_time = int(vt_next.dereference()['vt_time'])\n vt_func = long(vt_next.dereference()['vt_func'])\n vt_par = long(vt_next.dereference()['vt_par'])\n print(\"{:6} {:#10x} {:#10x}\".format(vt_time,\n vt_func,\n vt_par))\n\n current = vt_next\n vt_next = vt_next.dereference()['vt_next']\n vt_prev = vt_next.dereference()['vt_prev']\n\n if (vt_prev != current):\n raise gdb.GdbError('Rlist pointer invalid--corrupt list?')\n\n\nChibiosPrefixCommand()\nChibiosThreadsCommand()\nChibiosThreadCommand()\nChibiosTraceCommand()\nChibiosInfoCommand()\nChibiosTimersCommand()\n","sub_path":"chibios.py","file_name":"chibios.py","file_ext":"py","file_size_in_byte":14752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"48392904","text":"from matplotlib import pyplot as plt\n\n\nif __name__ == \"__main__\":\n\n friends = [ 70, 65, 72, 63, 71, 64, 60, 64, 67]\n minutes = [175, 170, 205, 120, 220, 130, 105, 145, 190]\n labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']\n\n plt.scatter( friends , minutes )\n for label , friendCount , minuteCount in zip( labels , friends , minutes ):\n plt.annotate( label , xy = ( friendCount , minuteCount ) , xytext = (5,-5) , textcoords = \"offset points\")\n plt.title( \"Daily Minutes vs. Number of Friends\" )\n plt.xlabel( \"# of friends\" )\n plt.ylabel( \"daily minutes spent on the site\" )\n\n # make the x-axis and y-axis equal width\n plt.axis( \"equal\" )\n \n plt.savefig( \"04-scatterplots\" )\n plt.show()\n","sub_path":"Chapter-03-Visualizing-Data/04-scatterplots.py","file_name":"04-scatterplots.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"239457494","text":"#!/usr/bin/env python3\nfrom .base_layer import *\nfrom ..stream_handler import stream_maps\n\n\n# TODO: Convert operations to gpu kernel\n# TODO: Fold into Convolution.\n# TODO: Check and remove if previous layer has bias.\n\nclass BatchNormalization(Layer):\n\tdef __init__(\n\t\t\tself,\n\t\t\tmomentum=0.9,\n\t\t\tepsilon=1e-10,\n\t\t\tname=None\n\t\t\t):\n\t\tsaved_locals = locals()\t\t# save for do_init() function\n\t\tsuper().__init__(saved_locals)\n\n\tdef do_init(self, kwargs):\n\t\tinput_shape = self.get_inp_shape()\n\t\tself.shape = (None, *input_shape)\n\t\tself.batches = 1\n\t\tself.inp_shape = (self.batches, *input_shape)\n\t\tself.biases = cp.zeros(input_shape, dtype=self.dtype) # biases is beta\n\t\tself.weights = cp.ones(input_shape, dtype=self.dtype) # weights is gamma\n\t\tself.gamma = self.weights\n\t\tself.beta = self.biases\n\t\tself.kernels = self.weights\n\t\tself.w_m = cp.zeros_like(self.weights, dtype=self.dtype)\n\t\tself.w_v = cp.zeros_like(self.weights, dtype=self.dtype)\n\t\tself.b_m = cp.zeros_like(self.biases, dtype=self.dtype)\n\t\tself.b_v = cp.zeros_like(self.biases, dtype=self.dtype)\n\t\tself.epsilon = kwargs.get('epsilon')\n\t\tself.momentum = kwargs.get('momentum')\n\t\tself.moving_mean = None\n\t\tself.moving_var = None\n\t\tself.param = 4 * input_shape[-1]\n\t\tself.activation = echo\n\t\tself.backp_stream = stream_maps.get_next_stream()\n\t\tself.grad_event = stream_maps.default_stream.record()\n\n\t# self.update_moving = cp.ElementwiseKernel(\n\t# 'T inp, int32 row, int32 col, int32 out_row, int32 out_col,'\n\t# 'T coled',\n\t# '''\n\t# \tint in_y = ky * dy + out_y * sy - ph;\n\t# \tint in_x = kx * dx + out_x * sx - pw;\n\t# ''',\n\t# 'update_moving')\n\n\tdef forward(self, inp, training=True): # yeah, I know, too many repetitions\n\t\t# inp[batches,row,col,channels]\t\t\t## MAKE A KERNEL\n\t\tself.inp_shape = inp.shape\n\t\tif training:\n\t\t\tmean = inp.mean(axis=0) # (row,col,channels)\n\t\t\tself.xmu = inp - mean # (batches,row,col,channels)\n\t\t\tvar = (self.xmu ** 2).mean(axis=0) # (row,col,channels)\n\t\t\tself.grad_event = stream_maps.default_stream.record(self.grad_event)\n\t\t\tself.ivar = 1 / (var + self.epsilon) # (row,col,channels)\n\t\t\tself.istd = cp.sqrt(self.ivar) # (row,col,channels)\n\t\t\tself.xnorm = self.xmu * self.istd # (batches,row,col,channels)\n\t\t\twith self.backp_stream:\n\t\t\t\tself.backp_stream.wait_event(self.grad_event)\n\t\t\t\tif self.moving_mean is None:\n\t\t\t\t\tself.moving_mean = mean\n\t\t\t\t\tself.moving_var = var\n\t\t\t\telse:\n\t\t\t\t\tself.moving_mean = self.momentum * self.moving_mean + (1 - self.momentum) * mean\n\t\t\t\t\tself.moving_var = self.momentum * self.moving_var + (1 - self.momentum) * var\n\t\telse:\n\t\t\tif self.moving_mean is None:\n\t\t\t\tmean = inp.mean(axis=0) # (row,col,channels)\n\t\t\t\tself.xmu = inp - mean # (batches,row,col,channels)\n\t\t\t\tvar = (self.xmu ** 2).mean(axis=0) # (row,col,channels)\n\t\t\t\tself.ivar = 1 / (var + self.epsilon) # (row,col,channels)\n\t\t\t\tself.istd = cp.sqrt(self.ivar) # (row,col,channels)\n\t\t\t\tself.moving_mean = mean\n\t\t\t\tself.moving_var = var\n\t\t\t\tself.xnorm = self.xmu * self.istd # (batches,row,col,channels)\n\t\t\telse:\n\t\t\t\tself.xmu = inp - self.moving_mean # (batches,row,col,channels)\t## all this is just for proper shape while model.free()\n\t\t\t\tself.ivar = 1 / (self.moving_var + self.epsilon)\n\t\t\t\tself.istd = cp.sqrt(self.ivar) # (row,col,channels)\n\t\t\t\tself.xnorm = self.xmu * self.istd\n\t\t\t# self.xnorm=(inp-self.moving_mean)/cp.sqrt(self.moving_var+self.epsilon)\n\t\treturn self.xnorm * self.weights + self.biases\n\n\tdef backprop(self, grads, do_d_inp=True):\n\t\t# grads(batches,row,col,channels), xmu(batches,row,col,channels)=inp-mean\n\t\tbatches = self.inp_shape[0]\n\t\tif batches != self.batches:\n\t\t\tself.batches = batches\n\n\t\tself.d_c_b = grads.sum(axis=0) # (row,col,channels)\t\t# biases is beta\n\t\tself.grad_event = stream_maps.default_stream.record(self.grad_event)\n\n\t\twith self.backp_stream:\n\t\t\tself.backp_stream.wait_event(self.grad_event)\n\t\t\tself.d_c_w = (self.xnorm * grads).sum(axis=0) # (row,col,channels)\t\t# gamma is weights\n\n\t\t# d_inp=(1/self.batches)*self.istd*self.weights*(self.batches*grads-self.d_c_b-self.xmu*self.ivar*((grads*self.xmu).sum(axis=0)))\n\t\td_inp = self.istd * self.weights * (\n\t\t\t\tself.batches * grads - self.d_c_b - self.xmu * self.ivar * ((grads * self.xmu).sum(axis=0)))\n\t\treturn d_inp\n","sub_path":"nnet_gpu/layers/BatchNormalization.py","file_name":"BatchNormalization.py","file_ext":"py","file_size_in_byte":4200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"480700706","text":"import csv\nimport sys\nfrom os import listdir\n\nlistOfFiles = []\n\nif len(sys.argv) < 2:\n print(\"Usage:\\npython3 exclude.py \")\n\nfullPath = sys.argv[1]\n# read tab-delimited file (from https://stackoverflow.com/questions/29759305/how-do-i-convert-a-tsv-to-csv)\nwith open(fullPath,'r') as fin:\n cr = csv.reader(fin, delimiter='`')\n filecontents = [line for line in cr]\n\n# write comma-delimited file (comma is the default delimiter) (from https://stackoverflow.com/questions/29759305/how-do-i-convert-a-tsv-to-csv)\nwith open(fullPath,'w') as fou:\n cw = csv.writer(fou)\n cw.writerows(filecontents)\n","sub_path":"unBacktick.py","file_name":"unBacktick.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"50265694","text":"import numpy as np, cv2\n\n# 회선 수행 함수\ndef filter(image, mask):\n\n rows, cols = image.shape[:2]\n dst = np.zeros((rows, cols), np.float32) # 회선 결과 저장 행렬\n xcenter, ycenter = mask.shape[1] // 2, mask.shape[0] // 2 # 마스크 중심 좌표\n\n for i in range(ycenter, rows - ycenter): # 입력 행렬 반복 순회\n for j in range(xcenter, cols - xcenter):\n y1, y2 = i - ycenter, i + ycenter + 1 # 관심영역 높이 범위\n x1, x2 = j - xcenter, j + xcenter + 1 # 관심영역 너비 범위\n\n roi = image[y1:y2, x1:x2].astype(\"float32\") # 관심영역 형변환\n tmp = cv2.multiply(roi, mask) # 회선 적용\n dst[i, j] = cv2.sumElems(tmp)[0] # 출력화소 저장\n\n return dst # 자료형 변환하여 반환\n\n\ndef differential(image, data1, data2):\n # 입력 인자로 마스크 행렬 초기화\n mask1 = np.array(data1, np.float32).reshape(3, 3)\n mask2 = np.array(data2, np.float32).reshape(3, 3)\n\n # 사용자 정의 회선 함수\n dst1 = filter(image, mask1)\n dst2 = filter(image, mask2)\n dst = cv2.magnitude(dst1, dst2); # 회선 결과 두 행렬의 크기 계산\n\n # dst1, dst2 = np.abs(dst1), np.abs(dst2) # 회선 결과 행렬 양수 변경\n dst = cv2.convertScaleAbs(dst)\n dst1 = cv2.convertScaleAbs(dst1)\n dst2 = cv2.convertScaleAbs(dst2)\n return dst, dst1, dst2\n\n\n# 침식 연산\ndef erode(img, mask):\n dst = np.zeros(img.shape, np.uint8)\n if mask is None: mask = np.ones((3, 3), np.uint8)\n\n mcnt = cv2.countNonZero(mask)\n xcenter, ycenter = int(mask.shape[1]/2), int(mask.shape[0]/2) # 마스크 중심 좌표\n for i in range(ycenter, img.shape[0] - ycenter): # 입력 행렬 반복 순회\n for j in range(xcenter, img.shape[1] - xcenter):\n # 마스크 영역\n y1, y2 = i - ycenter, i + ycenter + 1 # 마스크 높이 범위\n x1, x2 = j - xcenter, j + xcenter + 1 # 마스크 너비 범위\n roi = img[y1:y2, x1:x2] # 마스크 영역\n\n temp = cv2.bitwise_and(roi, mask)\n cnt = cv2.countNonZero(temp) # 일치한 화소수 계산\n dst[i, j] = 255 if (cnt == mcnt) else 0 # 출력 화소에 저장\n\n return dst\n\n\ndef dilate(img, mask):\n dst = np.zeros(img.shape, np.uint8)\n if mask is None:\n mask = np.ones((3, 3), np.uint8)\n\n xcenter, ycenter = mask.shape[1] // 2, mask.shape[0]//2 # 마스크 중심 좌표\n for i in range(ycenter, img.shape[0] - ycenter): # 입력 행렬 반복 순회\n for j in range(xcenter, img.shape[1] - xcenter):\n # 마스크 영역\n y1, y2 = i - ycenter, i + ycenter + 1 # 마스크 높이 범위\n x1, x2 = j - xcenter, j + xcenter + 1 # 마스크 너비 범위\n roi = img[y1:y2, x1:x2] # 마스크 영역\n\n # 행렬 처리 방식\n temp = cv2.bitwise_and(roi, mask)\n cnt = cv2.countNonZero(temp)\n dst[i, j] = 0 if (cnt == 0) else 255 # 출력 화소에 저장\n return dst","sub_path":"Common/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"483356091","text":"import os\nfrom core.services.DisplayDataService import DisplayDataService\nfrom pkg_resources import resource_string\n\n\nclass SimpleGraphVisualization(DisplayDataService):\n def __init__(self):\n self.links = []\n pass\n\n def name(self):\n return \"Simple graph visualization\"\n\n def identifier(self):\n return \"simple_graph_visualization\"\n\n def replace_special_characters(self, node):\n if len(str(node.node_text)) > 20: node.node_text = node.node_text[0:20] + \"...\"\n\n node.node_text = str(node.node_text).strip()\n node.node_id = node.node_id.replace('\\\\', '-')\n node.node_id = node.node_id.replace(\";\", \"\\;\")\n node.node_id = node.node_id.replace(',', '\\,')\n node.node_id = node.node_id.replace('\"', '\\\\\"')\n node.node_id = node.node_id.replace(\"'\", \"\\\\'\")\n node.node_text = str(node.node_text).strip()\n node.node_text = \" \".join(node.node_text.splitlines())\n\n node.node_text = str(node.node_text).replace('\"', '``')\n node.node_text = str(node.node_text).replace(\"'\", \"`\")\n\n def create_links(self, current_node, parent=None):\n self.replace_special_characters(current_node)\n\n if parent is None:\n self.links.append(\n \"{ source: '\" + current_node.node_id + \"|\" + current_node.node_text + \"|\" + str(current_node.selected) +\n \"', target:'\" + current_node.node_id + \"|\" + current_node.node_text + \"|\" + str(current_node.selected) + \"' },\")\n\n for node in current_node.neighbours:\n self.replace_special_characters(node)\n\n if \"{ source: '\" + current_node.node_id + \"|\" + current_node.node_text + \"|\" + str(current_node.selected) + \\\n \"', target:'\" + node.node_id + \"|\" + node.node_text + \"|\" + str(node.selected) + \"' },\" not in self.links:\n self.links.append(\n \"{ source: '\" + current_node.node_id + \"|\" + current_node.node_text + \"|\" + str(current_node.selected) +\n \"', target:'\" + node.node_id + \"|\" + node.node_text + \"|\" + str(node.selected) + \"' },\")\n self.create_links(node, current_node)\n return\n\n def get_graph_links(self, graph):\n self.links = []\n if not graph:\n return \"[]\"\n\n links = \"[\"\n for g in graph:\n self.create_links(g)\n for l in self.links:\n links += l\n links = links[:-1] + \"];\"\n return links\n\n def get_graph_script(self):\n print(os.getcwd())\n return resource_string(__name__, 'simple_graph.js')\n\n","sub_path":"simple_graph_visualization/visualizer/simple_graph_visualization.py","file_name":"simple_graph_visualization.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"618555952","text":"from datagenerator import DataGenerator\nimport os\n\n\nclass DataReader:\n\n def __init__(self, data_dir, scale_size, shuffle=False, input_channels=1, nb_classes=5):\n\n self.__n_classes = nb_classes\n self.__shuffle = shuffle\n self.__input_channels = input_channels\n self.__scale_size = scale_size\n self.__generator = None\n self.read_data(data_dir)\n\n def get_generator(self):\n return self.__generator #возвращает генерейшн\n\n def read_data(self, data_dir): #сначала посмотрели все файлы , 1 раз, больше метод Walk не ласт, переменная I становится больше\n #и после этого мы только добавляем файлы и метки, data_dir-каталог в котором мы работаем?\n patterns = [] #берем шаблоны\n labels = [] #берем метки\n\n i = -1\n for root, dirs, files in os.walk(data_dir): #Метод Python walk () генерирует имена файлов\n # в дереве каталогов путем обхода дерева сверху вниз или снизу вверх. ��аталог data_dir\n if i < 0:\n i = i + 1\n else:\n [patterns.append(root + '/' + file) for file in files] #Метод append () принимает один\n # элемент в качестве входного параметра и добавляет его в конец списка.- т е добавляем по входному файлу и его метке\n [labels.append(i) for file in files]\n i = i + 1\n\n self.__generator = DataGenerator(patterns, labels, self.__scale_size, self.__shuffle,\n self.__input_channels, self.__n_classes)\n","sub_path":"tensor_flow/datareader.py","file_name":"datareader.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"462886148","text":"#find the Kth Largest element in an array\nimport heapq\ndef kthLargest(lst, k):\n heapq._heapify_max(lst)\n ans = 0\n for i in range(0 , k) :\n ans = heapq._heappop_max(lst)\n return ans\n\n\n\nn=int(input())\nlst=list(int(i) for i in input().strip().split(' '))\nk=int(input())\nans=kthLargest(lst, k)\nprint(ans)\n","sub_path":"DATASTRUCTURESANDALGORITHMS/Python/PRIORITY QUEUES/KthLargestElement.py","file_name":"KthLargestElement.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"333634765","text":"import json\nimport pickle\nfrom google.cloud import storage\nfrom scipy.spatial import distance\ndef hello_world(request):\n\n all_files = {}\n all_files_vector = {}\n final_dict = {}\n client_store = storage.Client()\n\n storage_bucket = client_store.get_bucket(\"model_and_vector\")\n blob = storage_bucket.blob(\"tfidfVectorizer.pkl\")\n blob.download_to_filename(\"/tmp/vectorizer.pkl\")\n blob = storage_bucket.blob(\"model.pkl\")\n blob.download_to_filename(\"/tmp/model.pkl\")\n\n fp = open(\"/tmp/model.pkl\",\"rb\")\n ml_model = pickle.load(fp)\n fp.close()\n\n fp = open(\"/tmp/vectorizer.pkl\",\"rb\")\n vectorizer = pickle.load(fp)\n fp.close()\n\n print(\"Model succesfully imported\")\n print(\"Vectorizer successfully imported\")\n \n cluster_centroids = ml_model.cluster_centers_.argsort()[:,::-1]\n\n print(cluster_centroids)\n\n blobs = client_store.list_blobs(\"serverlessb00835071\")\n for each_blob in blobs:\n filename = \"/tmp/\"+each_blob.name\n each_blob.download_to_filename(filename)\n\n fp = open(filename,\"r\")\n all_files[each_blob.name] = fp.read()\n fp.close()\n \n print(len(all_files))\n\n for key, value in all_files.items():\n all_files_vector[key] = vectorizer.transform([all_files[key]]).toarray()[0]\n\n print(len(all_files_vector))\n print(\"Vector created for all the files\")\n\n total_clusters = 20\n\n for key, value in all_files_vector.items():\n num = 0\n difference = distance.euclidean(value,cluster_centroids[0])\n for individual in range(0,len(cluster_centroids)):\n res = distance.euclidean(value,cluster_centroids[individual])\n if(res <= difference):\n num = individual\n final_dict[key] = num\n \n print(final_dict)\n\n clusters = []\n final_clusters = []\n for key, value in final_dict.items():\n file_lis = []\n for i in final_dict.keys():\n if(final_dict[i]==value):\n file_lis.append(i)\n clusters.append(file_lis)\n\n for i in clusters:\n if(i not in final_clusters):\n final_clusters.append(i)\n \n print(final_clusters)\n return json.dumps(final_clusters)\n","sub_path":"serverless-services/components/MachineLearning/formClusters/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"347736887","text":"#!/usr/bin/env python\n\n'''\nAssign categories to the pages in the CitationHunt database.\n\nUsage:\n assign_categories.py [--max-categories=] [--mysql-config=]\n\nOptions:\n --max-categories= Maximum number of categories to use [default: inf].\n --mysql-config= MySQL config file [default: ./ch.my.cnf].\n'''\n\nfrom __future__ import unicode_literals\n\nimport sys\nsys.path.append('../')\n\nimport config\nimport chdb as chdb_\nfrom utils import *\n\nimport docopt\n\nimport re\nimport collections\n\nlog = Logger()\n\nclass CategoryName(unicode):\n '''\n The canonical format for categories, which is the one we'll use\n in the CitationHunt database: no Category: prefix and spaces instead\n of underscores.\n '''\n def __new__(klass, ustr):\n assert isinstance(ustr, unicode)\n assert not ustr.startswith('Category:'), ustr\n assert '_' not in ustr, ustr\n return super(CategoryName, klass).__new__(klass, ustr)\n\n @staticmethod\n def from_wp_page(ustr):\n ustr = d(ustr)\n assert not ustr.startswith('Category:'), ustr\n assert ' ' not in ustr, ustr\n return CategoryName(ustr.replace('_', ' '))\n\n @staticmethod\n def from_wp_categorylinks(ustr):\n ustr = d(ustr)\n assert not ustr.startswith('Category:'), ustr\n return CategoryName(ustr.replace('_', ' '))\n\ndef category_ids_to_names(wpcursor, category_ids):\n category_names = set()\n for pageid in category_ids:\n wpcursor.execute('''SELECT page_title FROM page WHERE page_id = %s''',\n (pageid,))\n category_names.update(\n CategoryName.from_wp_page(row[0])\n for row in wpcursor)\n return category_names\n\ndef category_name_to_id(catname):\n return mkid(catname)\n\ndef load_unsourced_pageids(chdb):\n cursor = chdb.cursor()\n cursor.execute('''SELECT page_id FROM articles''')\n return set(r[0] for r in cursor)\n\ndef load_hidden_categories(wpcursor):\n cfg = config.get_localized_config()\n wpcursor.execute('''\n SELECT cl_from FROM categorylinks WHERE\n cl_to = %s''', (cfg.hidden_category,))\n hidden_page_ids = [row[0] for row in wpcursor]\n return category_ids_to_names(wpcursor, hidden_page_ids)\n\ndef load_categories_for_page(wpcursor, pageid):\n wpcursor.execute('''\n SELECT cl_to FROM categorylinks WHERE cl_from = %s''', (pageid,))\n return set(CategoryName.from_wp_categorylinks(row[0]) for row in wpcursor)\n\ndef category_is_usable(catname, hidden_categories):\n assert isinstance(catname, CategoryName)\n if catname in hidden_categories:\n return False\n cfg = config.get_localized_config()\n for regexp in cfg.category_name_regexps_blacklist:\n if re.search(regexp, catname):\n return False\n return True\n\ndef choose_categories(categories_to_ids, unsourced_pageids, max_categories):\n categories = set()\n category_sets = categories_to_ids.items()\n total = float(len(unsourced_pageids))\n\n desired_pages_per_category = 20\n category_costs = {\n catname: abs(len(pageids) - desired_pages_per_category) + 1.0\n for catname, pageids in category_sets\n }\n\n def key_fn(cs):\n catname, pageids = cs\n return len(pageids & unsourced_pageids) / category_costs[catname]\n\n while unsourced_pageids and len(categories) < max_categories:\n category_sets.sort(key = key_fn)\n catname, covered_pageids = category_sets.pop()\n categories.add((catname, frozenset(covered_pageids)))\n unsourced_pageids -= covered_pageids\n\n rem = len(unsourced_pageids)\n log.progress('%d uncategorized pages (%d %%)' % \\\n (rem, (rem / total) * 100))\n log.info('finished with %d categories' % len(categories))\n return categories\n\ndef build_snippets_links_for_category(cursor, category_id):\n cursor.execute('''\n SELECT snippets.id FROM snippets, articles_categories, articles\n WHERE snippets.article_id = articles_categories.article_id AND\n articles.page_id = articles_categories.article_id AND\n articles_categories.category_id = %s ORDER BY articles.title;''',\n (category_id,))\n snippets = [r[0] for r in cursor]\n\n prev = snippets[0]\n for s in snippets[1:] + [snippets[0]]:\n cursor.execute('''\n INSERT INTO snippets_links VALUES (%s, %s, %s)\n ''', (prev, s, category_id))\n prev = s\n\ndef update_citationhunt_db(chdb, categories):\n for n, (catname, pageids) in enumerate(categories):\n category_id = category_name_to_id(catname)\n def insert(cursor):\n cursor.execute('''\n INSERT IGNORE INTO categories VALUES(%s, %s)\n ''', (category_id, unicode(catname)))\n\n prev = ''\n for page_id in pageids:\n cursor.execute('''\n INSERT INTO articles_categories VALUES (%s, %s)\n ''', (page_id, category_id))\n build_snippets_links_for_category(cursor, category_id)\n chdb.execute_with_retry(insert)\n\n log.progress('saved %d categories' % (n + 1))\n log.info('all done.')\n\ndef reset_chdb_tables(cursor):\n log.info('resetting articles_categories table...')\n cursor.execute('DELETE FROM articles_categories')\n log.info('resetting categories table...')\n cursor.execute('DELETE FROM categories')\n log.info('resetting snippets_links table...')\n cursor.execute('DELETE FROM snippets_links')\n\ndef assign_categories(max_categories, mysql_default_cnf):\n chdb = chdb_.init_scratch_db()\n chdb.execute_with_retry(reset_chdb_tables)\n unsourced_pageids = load_unsourced_pageids(chdb)\n\n wpdb = chdb_.init_wp_replica_db()\n wpcursor = wpdb.cursor()\n assert wpcursor.execute('SELECT * FROM page LIMIT 1;') == 1\n assert wpcursor.execute('SELECT * FROM categorylinks LIMIT 1;') == 1\n\n hidden_categories = load_hidden_categories(wpcursor)\n log.info('loaded %d hidden categories (%s...)' % \\\n (len(hidden_categories), next(iter(hidden_categories))))\n\n categories_to_ids = collections.defaultdict(set)\n page_ids_with_no_categories = 0\n for n, pageid in enumerate(list(unsourced_pageids)):\n page_has_at_least_one_category = False\n for catname in load_categories_for_page(wpcursor, pageid):\n if category_is_usable(catname, hidden_categories):\n page_has_at_least_one_category = True\n categories_to_ids[catname].add(pageid)\n if not page_has_at_least_one_category:\n unsourced_pageids.remove(pageid)\n page_ids_with_no_categories += 1\n log.progress('loaded categories for %d pageids' % (n + 1))\n\n log.info('%d pages lack usable categories!' % page_ids_with_no_categories)\n log.info('found %d usable categories (%s, %s...)' % \\\n (len(categories_to_ids), categories_to_ids.keys()[0],\n categories_to_ids.keys()[1]))\n\n categories = choose_categories(categories_to_ids, unsourced_pageids,\n max_categories)\n\n update_citationhunt_db(chdb, categories)\n wpdb.close()\n chdb.close()\n return 0\n\nif __name__ == '__main__':\n args = docopt.docopt(__doc__)\n max_categories = float(args['--max-categories'])\n mysql_default_cnf = args['--mysql-config']\n ret = assign_categories(max_categories, mysql_default_cnf)\n sys.exit(ret)\n","sub_path":"scripts/assign_categories.py","file_name":"assign_categories.py","file_ext":"py","file_size_in_byte":7338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"10413236","text":"# -*- encoding: utf-8 -*-\nimport json\n\ndef load_json_file(file_path):\n \"\"\"\n :param str file_path:\n :rtype: list of dictionary\n \"\"\"\n json_file = \"jawiki-country.json\"\n json_data = []\n with open(json_file) as f:\n for row in f:\n json_data.append(json.loads(row))\n\n return json_data\n\n\ndef get_record(json_data, title):\n \"\"\"\n :param list of dictionary json_data:\n :param title:\n :rtype:str\n \"\"\"\n for record in json_data:\n if record[\"title\"] == title:\n return record[\"text\"]\n\n\nif __name__ == \"__main__\":\n data = load_json_file(\"jawiki-country.json\")\n print(get_record(data, \"イギリス\"))\n","sub_path":"kaneko/chapter3/knock20.py","file_name":"knock20.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"121463810","text":"# Unless explicitly stated otherwise all files in this repository are licensed\n# under the BSD 3-Clause License.\n\n# This product includes software developed at Datadog\n# (https://www.datadoghq.com/). Copyright 2017-2018 Datadog, Inc.\n\n\n# Imports.\n\n\n# 1st-party.\nimport datetime\nimport logging\nimport os\nimport shutil\n\n\n# 3rd-party.\nfrom securesystemslib.interface import (\n generate_and_write_rsa_keypair,\n import_rsa_privatekey_from_file,\n import_rsa_publickey_from_file,\n)\n\n\n# Constants.\nLOG_LEVEL = logging.INFO\nSHARED_DIR = '/shared/'\nCONSISTENT_SNAPSHOT = True\n\nOLD_KEYSTORE_DIR = os.path.join(SHARED_DIR, 'old_keystore')\nOLD_REPOSITORY_DIR = os.path.join(SHARED_DIR, 'old_repository')\n\nNEW_REPOSITORY_DIR = os.path.join(SHARED_DIR, 'new_repository')\nNEW_KEYSTORE_DIR = os.path.join(SHARED_DIR, 'new_keystore')\nNEW_TARGETS_DIR = os.path.join(NEW_REPOSITORY_DIR, 'targets')\nNEW_SIMPLE_DIR = os.path.join(NEW_TARGETS_DIR, 'simple')\n\nREPOSITORY_DIR = os.path.join(SHARED_DIR, 'repository')\nMETADATA_DIR = os.path.join(REPOSITORY_DIR, 'metadata.staged')\nTARGETS_DIR = os.path.join(REPOSITORY_DIR, 'targets')\nSIMPLE_DIR = os.path.join(TARGETS_DIR, 'simple')\n\n\n# NOTE: We use 2048-bit RSA keys, because that is the \"lowest common\n# denominator\" at the time of writing (e.g., Nitrokey HSM, YubiHSM, and Azure\n# Key Vault support them). According to...\n# https://www.keylength.com/en/4/\n# ...NIST recommends that 2048-bit keys can be used from 2016 to 2030.\nDEFAULT_RSA_KEY_BITS = 2048\n\n\n# Utility classes.\n\n\nclass Threshold:\n\n\n def __init__(self, m, n):\n assert isinstance(m, int)\n assert isinstance(n, int)\n assert m > 0\n assert n > 0\n assert m <= n\n self.m = m\n self.n = n\n\n\nclass Keypath:\n\n\n def __init__(self, private, public):\n assert os.path.isfile(private)\n assert os.path.isfile(public)\n self.private = private\n self.public = public\n\n\nclass Key:\n\n\n def __init__(self, path, obj):\n self.path = path\n self.obj = obj\n\n\nclass Keypair:\n\n\n def __init__(self, private, public):\n assert isinstance(private, Key)\n assert isinstance(public, Key)\n self.private = private\n self.public = public\n\n\nclass Keyring:\n\n\n def __init__(self, threshold, keypairs):\n assert isinstance(threshold, Threshold)\n assert len(keypairs) == threshold.n\n for keypair in keypairs:\n assert isinstance(keypair, Keypair)\n self.threshold = threshold\n self.keypairs = keypairs\n\n\n# Utility functions.\n\n\ndef get_new_private_keypath(rolename, i):\n private_key_filename = '{}_rsa2048_key_{}'.format(rolename, i)\n return os.path.join(NEW_KEYSTORE_DIR, private_key_filename)\n\n\ndef get_public_keypath(private_keypath):\n # this is the tuf filename convention at the time of writing.\n return '{}.pub'.format(private_keypath)\n\n\ndef colorize_text(text, code):\n # color code + text + normal color\n # http://ozzmaker.com/add-colour-to-text-in-python/\n return code + text + '\\x1b[0m'\n\n\ndef bright_blue_text(text):\n return colorize_text(text, '\\033[1;34;40m')\n\n\ndef bright_red_text(text):\n return colorize_text(text, '\\033[1;31;40m')\n\n\ndef bright_rolename(rolename):\n return bright_blue_text(rolename.upper())\n\n\ndef bright_keynum(keynum):\n return bright_blue_text(str(keynum))\n\n\ndef flush(repository):\n # 'Write all metadata to \"repository/metadata.staged/\". The common case is\n # to crawl the filesystem for all the delegated roles in\n # \"metadata.staged/\".'\n repository.writeall(consistent_snapshot=CONSISTENT_SNAPSHOT)\n\n\ndef write_keypair(rolename, i, n):\n '''\n \n A Keypath.\n '''\n\n private_keypath = get_new_private_keypath(rolename, i)\n assert not os.path.isfile(private_keypath)\n public_keypath = get_public_keypath(private_keypath)\n assert not os.path.isfile(public_keypath)\n\n # \"Generate and write a key pair. The private key is saved encrypted. A\n # 'password' argument may be supplied, otherwise a prompt is presented.\"\n logging.info('Trying to create {} keypair {}/{}...'\\\n .format(bright_rolename(rolename),\n bright_keynum(i),\n bright_keynum(n)))\n generate_and_write_rsa_keypair(private_keypath, bits=DEFAULT_RSA_KEY_BITS)\n\n return Keypath(private_keypath, public_keypath)\n\n\ndef read_keypair(rolename, keypath, i, n):\n '''\n \n A Keypair.\n '''\n\n assert isinstance(keypath, Keypath)\n\n # Import the private key just created...\n logging.info('Trying to load {} {} key {}/{}...'\\\n .format(bright_rolename(rolename),\n bright_red_text('PRIVATE'),\n bright_keynum(i),\n bright_keynum(n)))\n\n private_keypath = keypath.private\n private_key_obj = import_rsa_privatekey_from_file(keypath.private,\n prompt=True)\n private_key = Key(private_keypath, private_key_obj)\n\n # and its corresponding public key.\n public_keypath = keypath.public\n public_key_obj = import_rsa_publickey_from_file(keypath.public)\n public_key = Key(public_keypath, public_key_obj)\n\n return Keypair(private_key, public_key)\n\n\ndef rename_keys_to_match_keyid(keypair, rolename, i, n):\n '''\n \n Rename public / private keys to match their keyid, so that it is easy\n to later find public keys on the repository, or private keys on disk.\n Also see https://github.com/theupdateframework/tuf/issues/573\n '''\n\n keyid = keypair.public.obj['keyid']\n\n # Rename the private key filename to match the keyid.\n new_private_keypath = os.path.join(NEW_KEYSTORE_DIR, keyid)\n # Move the key to the new filename.\n assert not os.path.isfile(new_private_keypath)\n shutil.move(keypair.private.path, new_private_keypath)\n # Update the path to the key.\n keypair.private.path = new_private_keypath\n\n # Rename the public key filename to match the keyid.\n new_public_keypath = get_public_keypath(new_private_keypath)\n # Move the key to the new filename.\n assert not os.path.isfile(new_public_keypath)\n shutil.move(keypair.public.path, new_public_keypath)\n # Update the path to the key.\n keypair.public.path = new_public_keypath\n\n logging.info('{} key {}/{} lives at {}...'\\\n .format(bright_rolename(rolename),\n bright_keynum(i),\n bright_keynum(n),\n bright_blue_text(keyid)))\n\n\ndef set_expiration_timestamp(role_object, timedelta_object):\n '''\n \n Set when this role metadata should expire, and thus be refreshed.\n '''\n\n assert isinstance(timedelta_object, datetime.timedelta)\n role_object.expiration = datetime.datetime.now() + timedelta_object\n\n\ndef write_and_read_new_keys(rolename, threshold):\n '''\n \n A Keyring with a treshold.n of Keypair-s.\n '''\n\n keypairs = []\n for i in range(1, threshold.n + 1):\n keypath = write_keypair(rolename, i, threshold.n)\n keypair = read_keypair(rolename, keypath, i, threshold.n)\n # Rename the private and public keys to match the keyid instead.\n # Why? So that we know how to find keys later on repository / disk.\n rename_keys_to_match_keyid(keypair, rolename, i, threshold.n)\n keypairs.append(keypair)\n return Keyring(threshold, tuple(keypairs))\n\n\ndef log(log_filename):\n # Log to file.\n logging.basicConfig(filename=log_filename, level=LOG_LEVEL, filemode='w',\n format='[%(asctime)s UTC] [%(levelname)s] '\\\n '[%(filename)s:%(funcName)s:%(lineno)s] '\\\n '%(message)s',\n datefmt='%m-%d %H:%M')\n\n # Also log to stdout.\n # https://docs.python.org/3/howto/logging-cookbook.html#logging-to-multiple-destinations\n console = logging.StreamHandler()\n console.setLevel(LOG_LEVEL)\n formatter = logging.Formatter('%(name)-21s: %(levelname)-8s %(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n\n\n","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":8211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"315038567","text":"\"\"\" Data tasks\n\"\"\"\n\nimport logging\n\nfrom celery import shared_task\nfrom celery.result import AsyncResult\n\nfrom core_main_app.access_control.exceptions import AccessControlError\nfrom core_main_app.components.data import api as data_api\nfrom core_main_app.components.user import api as user_api\nfrom core_main_app.components.xsl_transformation import (\n api as xsl_transformation_api,\n)\nfrom core_main_app.system import api as system_api\n\nlogger = logging.getLogger(__name__)\n\n\n@shared_task\ndef async_migration_task(data_list, xslt_id, template_id, user_id, migrate):\n \"\"\"Async task which perform a migration / validation of the data list for the given target template id\n\n Args:\n data_list:\n xslt_id:\n template_id:\n user_id:\n migrate: (boolean) Perform the migration\n\n Return:\n {\"valid\": [\"id\"...], \"wrong\": [\"id\"...]}\n \"\"\"\n success = []\n errors = []\n current_progress = 0\n total_data = len(data_list)\n\n try:\n user = user_api.get_user_by_id(user_id)\n # check user status\n if not (user.is_staff or user.is_superuser):\n raise AccessControlError(\"Only admin user can migrate data.\")\n\n target_template = system_api.get_template_by_id(template_id)\n\n # get xsl transformation if selected\n if xslt_id is not None:\n xslt = xsl_transformation_api.get_by_id(str(xslt_id))\n\n for data_id in data_list:\n data = data_api.get_by_id(data_id, user=user)\n # modify the data temporarily with the new targeted template\n data.template = target_template\n\n if xslt_id is not None:\n # modify the xml content temporarily with the transformed data content\n data.xml_content = xsl_transformation_api.xsl_transform(\n data.xml_content, xslt.name\n )\n\n try:\n # save the new template for the data if the migration is True\n if migrate:\n system_api.upsert_data(data)\n else:\n # check if the data is valid\n data_api.check_xml_file_is_valid(data)\n\n success.append(str(data.id))\n except Exception:\n errors.append(str(data.id))\n finally:\n # increase the current progress and update the task state\n current_progress += 1\n async_migration_task.update_state(\n state=\"PROGRESS\",\n meta={\"current\": current_progress, \"total\": total_data},\n )\n except Exception as exception:\n async_migration_task.update_state(\n state=\"ABORT\",\n meta={\"current\": current_progress, \"total\": total_data},\n )\n raise Exception(f\"Something went wrong: {str(exception)}\")\n\n return {\"valid\": success, \"wrong\": errors}\n\n\n@shared_task\ndef async_template_migration_task(\n templates, xslt_id, target_template_id, user_id, migrate\n):\n \"\"\"Async task which perform a migration / validation of all the data which belong to the given template id list\n\n Args:\n templates:\n xslt_id:\n target_template_id:\n user_id\n migrate: (boolean) Perform the migration\n\n Return:\n {\"valid\": , \"wrong\": }\n \"\"\"\n # get the data list to check\n current_data_progress = 0\n current_template_progress = -1\n total_data = 0\n total_template = len(templates)\n success = []\n error = []\n try:\n if target_template_id and total_template > 0:\n # get the user\n user = user_api.get_user_by_id(user_id)\n # check user status\n if not (user.is_staff or user.is_superuser):\n raise AccessControlError(\"Only admin user can migrate data.\")\n # get the target template\n target_template = system_api.get_template_by_id(target_template_id)\n # get xsl transformation if selected\n if xslt_id is not None:\n xslt = xsl_transformation_api.get_by_id(str(xslt_id))\n\n for template_id in templates:\n\n # increase the number of processed template\n current_template_progress += 1\n # rest de number of data\n current_data_progress = 0\n\n # get a QuerySet of all the data with the given template\n data_list = system_api.get_all_by_template(template_id)\n\n total_data = data_list.count()\n\n for data in data_list.all():\n # modify the data temporarily with the new targeted template\n data.template = target_template\n\n if xslt_id is not None:\n # modify the xml content temporarily with the transformed data content\n data.xml_content = (\n xsl_transformation_api.xsl_transform(\n data.xml_content, xslt.name\n )\n )\n\n # check if the data is valid\n try:\n # save the new template for the data if the migration is True\n if migrate:\n system_api.upsert_data(data)\n else:\n data_api.check_xml_file_is_valid(data)\n\n success.append(str(data.id))\n except Exception:\n error.append(str(data.id))\n finally:\n # increase the current progress and update the task state\n current_data_progress += 1\n async_template_migration_task.update_state(\n state=\"PROGRESS\",\n meta={\n \"template_current\": current_template_progress,\n \"template_total\": total_template,\n \"data_current\": current_data_progress,\n \"data_total\": total_data,\n },\n )\n\n return {\"valid\": success, \"wrong\": error}\n\n else:\n async_template_migration_task.update_state(\n state=\"ABORT\",\n meta={\n \"template_current\": current_template_progress,\n \"template_total\": total_template,\n \"data_current\": current_data_progress,\n \"data_total\": total_data,\n },\n )\n raise Exception(\n \"Wrong template id.\"\n if not target_template_id\n else \"Please provide template id.\"\n )\n except Exception as exception:\n async_template_migration_task.update_state(\n state=\"ABORT\",\n meta={\n \"template_current\": current_template_progress,\n \"template_total\": total_data,\n \"data_current\": current_data_progress,\n \"data_total\": total_data,\n },\n )\n raise Exception(f\"Something went wrong: {str(exception)}\")\n\n\ndef get_task_progress(task_id):\n \"\"\"Get task status for the given task id\n\n Args:\n task_id:\n\n Return:\n {\n 'state': PENDING | PROGRESS | SUCCESS,\n 'details': result (for SUCCESS) | null (for PENDING) | { PROGRESS info }\n }\n \"\"\"\n result = AsyncResult(task_id)\n response_data = {\n \"state\": result.state,\n \"details\": result.info,\n }\n return response_data\n\n\ndef get_task_result(task_id):\n \"\"\"Get task result for the given task id\n\n Args:\n task_id:\n\n Return: {\n \"valid\": [\"data_id_1\", \"data_id_2\" ...],\n \"wrong\": [\"data_id_3\", \"data_id_4\" ...]\n }\n \"\"\"\n result = AsyncResult(task_id).result\n return result\n\n\n@shared_task\ndef index_mongo_data(data_id):\n \"\"\"Index a data in MongoDB\"\"\"\n try:\n data = system_api.get_data_by_id(data_id)\n try:\n from core_main_app.components.mongo.models import MongoData\n\n mongo_data = MongoData.init_mongo_data(data)\n mongo_data.save()\n except Exception as exception:\n logger.error(\n f\"ERROR : An error occurred while indexing data : {str(exception)}\"\n )\n except Exception as exception:\n logger.error(\n f\"ERROR : An error occurred while indexing data : {str(exception)}\"\n )\n\n\n@shared_task\ndef update_mongo_data_user(data_ids, user_id):\n \"\"\"Update user id of all data in list\n\n Args:\n data_ids:\n user_id:\n\n Returns:\n\n \"\"\"\n try:\n from core_main_app.components.mongo.models import MongoData\n\n for data_id in data_ids:\n mongo_data = MongoData.objects.get(pk=data_id)\n mongo_data.user_id = user_id\n mongo_data.save()\n except Exception as exception:\n logger.error(\n f\"ERROR : An error occurred while updating data owner : {str(exception)}\"\n )\n\n\n@shared_task\ndef update_mongo_data_workspace(data_ids, workspace_id):\n \"\"\"Update workspace id of all data in list\n Args:\n data_ids:\n workspace_id:\n\n Returns:\n\n \"\"\"\n\n try:\n from core_main_app.components.mongo.models import MongoData\n\n for data_id in data_ids:\n mongo_data = MongoData.objects.get(pk=data_id)\n mongo_data._workspace_id = workspace_id\n mongo_data.save()\n except Exception as exception:\n logger.error(\n f\"ERROR : An error occurred while updating data workspace : {str(exception)}\"\n )\n\n\n@shared_task\ndef delete_mongo_data(data_id):\n \"\"\"Delete a data in MongoDB\"\"\"\n try:\n try:\n from core_main_app.components.mongo.models import MongoData\n\n mongo_data = MongoData.objects.get(pk=data_id)\n mongo_data.delete()\n except Exception as exception:\n logger.error(\n f\"ERROR : An error occurred while deleting data : {str(exception)}\"\n )\n except Exception as exception:\n logger.error(\n f\"ERROR : An error occurred while deleting data : {str(exception)}\"\n )\n","sub_path":"core_main_app/components/data/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":10384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"527697517","text":"import yaml\n\n\"\"\"\noption to give a executable a yaml file and parse all the information\nfrom rather rather than creating python commands\nmore readable and serves as a record\n\"\"\"\n\ndef open_yaml(path_to_yaml):\n \"\"\"return a dict representation of a yaml file\"\"\"\n with open(path_to_yaml, \"r\") as f:\n yaml_dict = yaml.load(f)\n return yaml_dict\n\n\ndef experiment(yaml_dict):\n \"\"\"\n get argument for Job.add_experiment method\n\n this is optional, so if not there then return none\n \"\"\"\n if \"experiment\" in yaml_dict:\n experiment_arg = yaml_dict[\"experiment\"]\n if isinstance(experiment_arg, list):\n experiment_arg = experiment_arg[0]\n return {\"exp_dir\" : experiment_arg}\n\n\ndef chunk(yaml_dict):\n \"\"\"\n get argument for Job.chunk method\n\n this is optional, so if not there then return none\n \"\"\"\n if \"chunk\" in yaml_dict:\n chunk_arg = yaml_dict[\"chunk\"]\n if isinstance(chunk_arg, list):\n chunk_arg = chunk_arg[0]\n return {\"job_size\" : int(chunk_arg)}\n\n\ndef add_plate(yaml_dict):\n \"\"\"\n get argument for Job.add_plate method\n\n this is optional, so if not there then return None\n \"\"\"\n if \"add plate\" in yaml_dict:\n add_plate_dicts = yaml_dict[\"add plate\"]\n # returns a list of dictionaries\n if isinstance(add_plate_dicts, list):\n for d in add_plate_dicts:\n if \"experiment\" in d.keys():\n # is the experiment labels\n experiment = str(d[\"experiment\"])\n if \"plates\" in d.keys():\n # is the plates, either a string or a list\n plate_args = d[\"plates\"]\n if isinstance(plate_args, str):\n plates = [d[\"plates\"]]\n if isinstance(plate_args, list):\n plates = d[\"plates\"]\n return {\"exp_dir\" : experiment, \"plates\" : plates}\n\n\ndef remove_plate(yaml_dict):\n \"\"\"\n get argument for Job.remove_plate method\n\n this is optional, so not there then return None\n \"\"\"\n if \"remove plate\" in yaml_dict:\n remove_arg = yaml_dict[\"remove plate\"]\n # can either be a string or a list in Job.remove plate\n return {\"plates\" : remove_arg}\n\n\ndef create_commands(yaml_dict):\n \"\"\"\n get arguments for Job.create_commands\n\n not optional, so error if no matching keys are found\n \"\"\"\n if \"pipeline\" in yaml_dict:\n pipeline_arg = yaml_dict[\"pipeline\"]\n if isinstance(pipeline_arg, list):\n pipeline_arg = pipeline_arg[0]\n if \"location\" in yaml_dict:\n location_arg = yaml_dict[\"location\"]\n if isinstance(location_arg, list):\n location_arg = location_arg[0]\n # TODO more options rather than exactly \"commands location\"\n if \"commands location\" in yaml_dict:\n commands_loc_arg = yaml_dict[\"commands location\"]\n if isinstance(commands_loc_arg, list):\n commands_loc_arg = commands_loc_arg[0]\n return {\"pipeline\" : pipeline_arg,\n \"location\" : location_arg,\n \"commands_location\" : commands_loc_arg}\n\n\ndef check_yaml_args(yaml_dict):\n \"\"\"\n check the validity of the yaml arguments\n\n raises a ValueError if any of the arguments in the yaml setup file are\n not recognised\n \"\"\"\n valid_args = [\"experiment\",\n \"chunk\",\n \"pipeline\",\n \"location\",\n \"commands location\",\n \"remove plate\",\n \"add plate\"]\n for argument in yaml_dict.keys():\n if argument not in valid_args:\n err_msg = \"'{}' is not a recognised argument\".format(argument)\n raise ValueError(err_msg)\n\n\n","sub_path":"cptools2/parse_yaml.py","file_name":"parse_yaml.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"197378008","text":"import modules as mod # Importing classes with the modules (car components)\n\"\"\"Notes:\n\n- Read the README.md file to see the naming conventions for variables,\nmethods, classes and constats. So the code remains consistant.\n\n- The 'modules.py' file creates a canvas (graphical window) with specified\nwidth and height. If you run modules.py file alone window closes imediatly\nthis is why in the main file (this file) in the last line of the main method\nwe call 'mod.canvas.mainloop()', which is another method from the tkinter\npackege to keep the window from closing.\n\n- Graphics tools are imported in 'modules.py' file, when using graphics tools\nin main file (this file) we need to use 'mod.' before the grapic tools\nexample: 'mod.[you choice of graphic tool here]'\n\n...\n\nCodes Current situation:\n\n- Creates 'airmod' (quadrotor), 'podmod' (capsule), 'grdmod' (wheels)\nobjects from 'modules.py' classes and calls move() method to randomly\nmove around the graphical window.\n\n...\n\n\"\"\"\n\ndef main():\n \"\"\"This is the main function were the whole program will run\"\"\"\n ##########################################################\n airmod = mod.AirMod() # Creating air module (quadrotor) from class in 'mod'\n podmod = mod.PodMod() # Creating pod module (capsule) from class in 'mod'\n grdmod = mod.GrdMod() # Creating ground module (wheels) from class in 'mod'\n\n while True:\n airmod.move() # Calling move method from 'AirMod' class in 'mod' file\n podmod.move() # Calling move method from 'PodMod' class in 'mod' file\n grdmod.move() # Calling move method from 'PodMod' class in 'mod' file\n\n mod.tk.update() # Updates graphical window to show\n ##########################################################\n mod.canvas.mainloop() # Keeps graphical window from closing instantly...\n\nif __name__ == '__main__':\n main() # calling main\n","sub_path":"Testing folder/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"499133750","text":"from argparse import (\n ArgumentParser,\n Namespace,\n _SubParsersAction,\n)\nimport os\nfrom pathlib import (\n Path,\n)\nimport sys\nimport time\nfrom typing import (\n Any,\n Callable,\n cast,\n Dict,\n Sequence,\n Tuple,\n)\n\nfrom eth_typing import BLSPubkey\n\nfrom eth_utils import (\n humanize_seconds,\n)\nfrom ruamel.yaml import (\n YAML,\n)\nfrom ssz.tools import (\n to_formatted_dict,\n)\n\nfrom eth2._utils.hash import (\n hash_eth2,\n)\nfrom eth2.beacon.state_machines.forks.xiao_long_bao import (\n XiaoLongBaoStateMachine,\n)\nfrom eth2.beacon.tools.misc.ssz_vector import override_lengths\nfrom eth2.beacon.tools.builder.initializer import (\n create_mock_genesis,\n)\nfrom eth2.beacon.typing import (\n Second,\n Timestamp,\n)\nfrom eth2._utils.bls import bls\nfrom trinity._utils.shellart import (\n bold_green,\n)\nfrom trinity.config import (\n TrinityConfig,\n)\nfrom trinity.extensibility import (\n BaseMainProcessComponent,\n)\nfrom trinity.components.eth2.constants import (\n VALIDATOR_KEY_DIR,\n)\n\nfrom .constants import (\n GENESIS_FILE,\n KEYS_DIR,\n)\n\n\nclass Client:\n name: str\n client_dir: Path\n validator_keys_dir: Path\n\n def __init__(self, name: str, root_dir: Path) -> None:\n self.name = name\n self.client_dir = root_dir / name\n self.validator_keys_dir = self.client_dir / VALIDATOR_KEY_DIR\n\n\ndef get_genesis_time_from_constant(genesis_time: Timestamp) -> Callable[[], Timestamp]:\n def get_genesis_time() -> Timestamp:\n return genesis_time\n return get_genesis_time\n\n\ndef get_genesis_time_from_delay(genesis_delay: Second)-> Callable[[], Timestamp]:\n def get_genesis_time() -> Timestamp:\n return Timestamp(int(time.time()) + genesis_delay)\n return get_genesis_time\n\n\nclass NetworkGeneratorComponent(BaseMainProcessComponent):\n @property\n def name(self) -> str:\n return \"NetworkGenerator\"\n\n @classmethod\n def configure_parser(cls, arg_parser: ArgumentParser, subparser: _SubParsersAction) -> None:\n\n testnet_generator_parser = subparser.add_parser(\n 'testnet',\n help='Generate testnet files',\n )\n testnet_generator_parser.add_argument(\n \"--network-dir\",\n help=\"Directory to create all the files into\",\n type=Path,\n default=Path(\".\"),\n )\n testnet_generator_parser.add_argument(\n \"--num\",\n help=\"Number of validators to generate\",\n type=int,\n default=100,\n )\n\n genesis_time_group = testnet_generator_parser.add_mutually_exclusive_group(\n required=True,\n )\n genesis_time_group.add_argument(\n \"--genesis-delay\",\n help=\"Set seconds delay after the genesis state is created as genesis time\",\n type=int,\n )\n genesis_time_group.add_argument(\n \"--genesis-time\",\n help=\"Set a genesis time as Unix int, e.g. 1559292765\",\n type=int,\n )\n\n testnet_generator_parser.set_defaults(func=cls.run_generate_testnet_dir)\n\n @classmethod\n def run_generate_testnet_dir(cls, args: Namespace, trinity_config: TrinityConfig) -> None:\n logger = cls.get_logger()\n logger.info(\"Generating testnet\")\n network_dir = args.network_dir\n if len(os.listdir(network_dir)) > 0:\n logger.error(\"This directory is not empty, won't create network files here\")\n sys.exit(1)\n\n clients = cls.generate_trinity_root_dirs(network_dir)\n keymap = cls.generate_keys(args.num, network_dir, clients)\n\n get_genesis_time = (\n get_genesis_time_from_constant(args.genesis_time)\n if args.genesis_time is not None\n else get_genesis_time_from_delay(args.genesis_delay)\n )\n\n cls.generate_genesis_state(get_genesis_time, network_dir, keymap, clients)\n\n logger.info(bold_green(\"Network generation completed\"))\n\n @classmethod\n def generate_keys(cls,\n num: int,\n network_dir: Path,\n clients: Tuple[Client, ...]) -> Dict[Any, Any]:\n logger = cls.get_logger()\n logger.info(\"Creating %s validators' keys\", num)\n keys_dir = network_dir / KEYS_DIR\n keys_dir.mkdir()\n\n privkeys = tuple(int.from_bytes(\n hash_eth2(str(i).encode('utf-8'))[:4], 'big')\n for i in range(num)\n )\n keymap = {bls.privtopub(key): key for key in privkeys}\n\n num_of_clients = len(clients)\n for validator_index, key in enumerate(privkeys):\n file_name = f\"v{validator_index:07d}.privkey\"\n private_key_path = keys_dir / file_name\n with open(private_key_path, \"w\") as f:\n f.write(str(key))\n\n # Distribute keys to clients\n client = clients[validator_index % num_of_clients]\n with open(client.validator_keys_dir / file_name, \"w\") as f:\n f.write(str(key))\n\n return keymap\n\n @classmethod\n def generate_genesis_state(cls,\n get_genesis_time: Callable[[], Timestamp],\n network_dir: Path,\n keymap: Dict[BLSPubkey, int],\n clients: Tuple[Client, ...]) -> None:\n logger = cls.get_logger()\n state_machine_class = XiaoLongBaoStateMachine\n # NOTE: see https://github.com/ethereum/trinity/issues/786\n override_lengths(XiaoLongBaoStateMachine.config)\n\n # Since create_mock_genesis takes a long time, update the real genesis_time later\n dummy_time = Timestamp(int(time.time()))\n state, _ = create_mock_genesis(\n pubkeys=cast(\n Sequence[BLSPubkey],\n keymap.keys(),\n ),\n config=state_machine_class.config,\n keymap=keymap,\n genesis_block_class=state_machine_class.block_class,\n genesis_time=dummy_time,\n )\n genesis_time = get_genesis_time()\n logger.info(\n \"Genesis time will be %s from now\",\n humanize_seconds(genesis_time - int(time.time())),\n )\n state = state.copy(\n genesis_time=genesis_time,\n )\n # The output here can be trusted, so use unsafe mode for performance\n yaml = YAML(typ='unsafe')\n with open(network_dir / GENESIS_FILE, \"w\") as f:\n yaml.dump(to_formatted_dict(state), f)\n\n # Distribute genesis file to clients\n for client in clients:\n with open(client.client_dir / GENESIS_FILE, \"w\") as f:\n yaml.dump(to_formatted_dict(state), f)\n\n @classmethod\n def generate_trinity_root_dirs(cls, network_dir: Path) -> Tuple[Client, ...]:\n logger = cls.get_logger()\n logger.info(\"Generating root directories for clients\")\n clients = tuple(Client(name, network_dir) for name in (\"alice\", \"bob\"))\n for client in clients:\n client.client_dir.mkdir()\n client.validator_keys_dir.mkdir()\n return clients\n","sub_path":"trinity/components/eth2/network_generator/component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":7143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"93497980","text":"#!/usr/bin/env python\n\nimport rospy, sys\nfrom std_msgs.msg import Int8, String, Float64, Bool\nfrom pacmod_msgs.msg import PacmodCmd\nstate = 0\n#Target Values\ncurrentVelocity = 0.0\npreviousVelocity = 0.05\ntargetVelocity = 2.2352 #5MPH in m/s\nsignVelocity = 2.2352\nwhiteLineVelocity = 2.2352\nerror = 0.0\nintegralError = 0.0\nderivativeError = 0.0\npreviousError = 0.0\npreviousIntegralError = 0.0\nKp = 0.8\nKi = 0.2\nKd = 0.0\nthrottle = 0.0\nprevTime = 0.0\ntime = 0.0\ncontrolMax = 10.0\ncontrolMin = 0.0\nMPH2MPS = 0.44704\nintegralMax = 2*MPH2MPS\n\ndef state_callback(msg):\n global state\n state = msg.data\n if state == 7:\n rospy.signal_shutdown('killed by selfdrive manager')\n sys.exit()\n \ndef speedCallBack(msg):\n global currentVelocity\n currentVelocity = msg.data\n \ndef whiteLineCallback(msg):\n global whiteLineVelocity\n whiteLineVelocity = msg.data\n \ndef signCallback(msg):\n global signVelocity\n signVelocity = msg.data\n\n\ndef velController():\n global currentVelocity, previousVelocity, targetVelocity, whiteLineVelocity, signVelocity, error, integralError, derivativeError, previousError, previousIntegralError, Kp, Ki, Kd, throttle, prevTime, time, controlMax, controlMin, MPH2MPS, integralMax\n prevTime = time\n time = rospy.get_time()\n dt = time-prevTime\n targetVelocity = (whiteLineVelocity + signVelocity)/2\n error = targetVelocity - currentVelocity\n previousIntegralError = integralError\n integralError = (error * (dt) + integralError)\n if integralError > integralMax:\n integralError = integralMax\n elif integralError < -integralMax:\n intregralError = -integralMax\n derivativeError = currentVelocity - previousVelocity\n ep = (Kp * error)\n ei = (Ki * integralError)\n ed = (Kd * derivativeError)\n u = ep + ei + ed\n throttle = scale(u)\n \n if u > controlMax:\n throttle = scale(controlMax)\n u -= previousIntegralError\n \n elif u < controlMin:\n throttle = scale(controlMin)\n u -= previousIntegralError\n previousError = error\n previousVelocity = currentVelocity\n return throttle\n \ndef scale(x):\n global controlMax\n u = (x*(1.0/controlMax))+.2\n if u > 1.0:\n u = 1.0\n return u\n\ndef constVelocity():\n global currentVelocity, previousVelocity, targetVelocity, error, integralError, derivativeError, previousError, previousIntegralError, Kp, Ki, Kd, throttle, prevTime, time, controlMax, controlMin, MPH2MPS, integralMax\n rospy.init_node('speed_controller', anonymous=True)\n accel_pub = rospy.Publisher('pacmod/as_rx/accel_cmd', PacmodCmd, queue_size = 10)\n brake_pub = rospy.Publisher('/pacmod/as_rx/brake_cmd', PacmodCmd, queue_size = 10)\n rospy.Subscriber('/pacmod/as_tx/vehicle_speed', Float64, speedCallBack)\n #rospy.Subscriber('/speed_applied', Float64, whiteLineCallBack)\n #rospy.Subscriber('/stop_speed', Float64, whiteLineCallBack)\n rospy.Subscriber('/selfdrive/state', Int8, state_callback)\n accel = PacmodCmd()\n accel.f64_cmd = 0\n brake = PacmodCmd()\n brake.f64_cmd = 0\n \n rate = rospy.Rate(10)\n while not rospy.is_shutdown():\n \n if state and not (10<=state<20 or state==3):\n brake.f64_cmd = 0\n brake_pub.publish(brake)\n accel.f64_cmd = velController()\n accel_pub.publish(accel)\n else:\n integralError = 0\n \n rate.sleep()\n \n \nif __name__ == '__main__':\n try:\n constVelocity()\n except rospy.ROSInterruptException:\n pass \n","sub_path":"catkin_ws/src/ay21_igvc_catkin/AY21_ws/src/igvc_sw/lane_following/constVelocity.py","file_name":"constVelocity.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"390555425","text":"# '''\r\n# Linked List hash table key/value pair\r\n# '''\r\nclass LinkedPair:\r\n def __init__(self, key, value):\r\n self.key = key\r\n self.value = value\r\n self.next = None\r\n\r\nclass HashTable:\r\n '''\r\n A hash table that with `capacity` buckets\r\n that accepts string keys\r\n '''\r\n def __init__(self, capacity):\r\n self.capacity = capacity # Number of buckets in the hash table\r\n self.storage = [None] * capacity\r\n\r\n\r\n def _hash(self, key):\r\n '''\r\n Hash an arbitrary key and return an integer.\r\n You may replace the Python hash with DJB2 as a stretch goal.\r\n '''\r\n #step1:\r\n #make a function to map the \"insert method\" to the array \"capacity\".\r\n #take the key value, turn it into a string, then for each character in the string,\r\n #return the unicode value of it. use that unicode number to make an equation.\r\n return hash(key)\r\n\r\n\r\n def _hash_djb2(self, key):\r\n '''\r\n Hash an arbitrary key using DJB2 hash\r\n\r\n OPTIONAL STRETCH: Research and implement DJB2\r\n '''\r\n hash = 5381\r\n for character in str(key):\r\n hash = hash << 3 + ord(character) << 3 #binary and bitshift\r\n\r\n return hash % self.capacity\r\n\r\n def _hash_mod(self, key):\r\n '''\r\n Take an arbitrary key and return a valid integer index\r\n within the storage capacity of the hash table.\r\n '''\r\n return self._hash(key) % self.capacity\r\n\r\n\r\n def insert(self, key, value):\r\n #get function\r\n hash_func = self._hash_djb2(key)\r\n #case1: look for key\r\n if not self.retrieve(key) == None:\r\n self.remove(key)\r\n #case2 if empty, use linked_list to insert (key, value)\r\n if self.storage[hash_func] == None:\r\n self.storage[hash_func] = LinkedPair(key,value)\r\n #case3 if collsion, then use a node pointer to point to first index, then insert at Null, then make a pointer point at null\r\n # headA -Next> , B -next> C -Next> Null\r\n #nodeA -nodeA.next> Null\r\n else:\r\n node=self.storage[hash_func]\r\n while node.next is not None: #iterator until node.next is null, then assign to node.\r\n node=node.next\r\n node.next=LinkedPair(key,value) #insert at\r\n\r\n\r\n def remove(self, key):\r\n #use the hash function to go straight to index in array.\r\n #Cas1 if index in storage is empty then return\r\n #case2 if something, loop thought index. found or not found\r\n # case2 If = if key is found middle or end, set previousNode.next to this node next. and set node_prev to this node. set node to node.next\r\n # case3: if key is found at beginning, then this index at sroage will be none.\r\n #\r\n # headNode -headNext> nodeA -ANext> nodeB -BNext> Null\r\n hash_function = self._hash_djb2(key)\r\n if self.storage[hash_function]==None:\r\n return\r\n else:\r\n node=self.storage[hash_function]\r\n prevous_node = None\r\n while node:\r\n if node.key ==key:\r\n if prevous_node is not None :\r\n prevous_node.next = node.next\r\n else:\r\n self.storage[hash_function] = None\r\n\r\n prevous_node = node\r\n node = node.next\r\n\r\n def retrieve(self, key):\r\n '''\r\n Retrieve the value stored with the given key.\r\n Returns None if the key is not found.\r\n Fill this in.\r\n #step 1\r\n '''\r\n #use the hash_func to go striaght to index\r\n hash_function = self._hash_djb2(key)\r\n #case1: if mapping of hash_function to index is empty, then return.\r\n #case2: iterator thought node until we find. then retrieve\r\n if self.storage[hash_function]==None:\r\n return None\r\n else:\r\n node=self.storage[hash_function]\r\n while node:\r\n if node.key==key:\r\n return node.value #return value if fond\r\n node=node.next\r\n return None\r\n\r\n def resize(self):\r\n '''\r\n Doubles the capacity of the hash table and\r\n rehash all key/value pairs.\r\n Fill this in.\r\n '''\r\n #copy capacity into a new temp variable\r\n #then recopy the key value back to the double size array\r\n temp = []\r\n for thisNode in self.storage:\r\n if not thisNode == None:\r\n node=thisNode\r\n while node:\r\n temp.append((node.key, node.value))\r\n node=node.next\r\n #make new storage\r\n self.storage = [None] * (self.capacity*2)\r\n for keyValue in temp:\r\n self.insert(keyValue[0],keyValue[1])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ht = HashTable(2)\r\n\r\n ht.insert(\"line_1\", \"Tiny hash table\")\r\n ht.insert(\"line_2\", \"Filled beyond capacity\")\r\n ht.insert(\"line_3\", \"Linked list saves the day!\")\r\n\r\n print(\"\")\r\n\r\n # Test storing beyond capacity\r\n print(ht.retrieve(\"line_1\"))\r\n print(ht.retrieve(\"line_2\"))\r\n print(ht.retrieve(\"line_3\"))\r\n\r\n # Test resizing\r\n old_capacity = len(ht.storage)\r\n ht.resize()\r\n new_capacity = len(ht.storage)\r\n\r\n print(f\"\\nResized from {old_capacity} to {new_capacity}.\\n\")\r\n\r\n # Test if data intact after resizing\r\n print(ht.retrieve(\"line_1\"))\r\n print(ht.retrieve(\"line_2\"))\r\n print(ht.retrieve(\"line_3\"))\r\n\r\n print(\"\")\r\n","sub_path":"src/hashtable.py","file_name":"hashtable.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"84157999","text":"import cv2\nimport numpy as np\nimport time\n\nPATH = 'images/cvtest.jpg'\n\n\nif __name__ == \"__main__\":\n img = cv2.imread('images/cvtest.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n blur = cv2.GaussianBlur(gray, (11, 11), 0)\n canny = cv2.Canny(gray, 150, 300)\n res = cv2.resize(canny, None, fx=0.125, fy=0.125, interpolation=cv2.INTER_CUBIC)\n \n cv2.imwrite('images/output_contour.png', canny)\n cv2.imwrite('images/output_contour_resized.png', res)\n cv2.imshow('canny', res)\n cv2.waitKey(0)\n\n print(img.shape) \n","sub_path":"vision/edges.py","file_name":"edges.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"335154349","text":"\nfrom lib_tools import nodos\nfrom lib_tools import aristas\nfrom lib_tools import grafo\nimport math\nfrom random import random\n\n\n# Coordenadas\ncord_x = \"x\"\ncord_y = \"y\"\n\"\"\"\nFuncio que genera un grafo con nodos aleatorios y sus aristas son con una distancia de r o menor\na la que el usuario ingreso. \n\"\"\"\ndef Geo_S(n, r, directed = False, auto = False):\n GRAFO = grafo.Grafo()\n GRAFO.atrbt[grafo.DIRECTED] = directed\n #Iteracion que produce n nodosen el grafo\n for nodo in range(n):\n GRAFO.Producir_Vertices(nodos.Nodo(nodo,{cord_x: random(), cord_y: random()}))\n #Iteracion donde se busca que la distancia entre nodos se igual o menor a la que ingreso el usuario\n for i in range(n):\n for j in range(n):\n valores1 = (GRAFO.ID(i).atributos[cord_x], GRAFO.ID(i).atributos[cord_y])\n valores2 = (GRAFO.ID(j).atributos[cord_x], GRAFO.ID(j).atributos[cord_y])\n d = distancia(valores1, valores2)\n if d <= r:\n # Crea la arista si la distancia es la deseada\n GRAFO.Producir_Aristas(aristas.Arista(i,j), directed, auto)\n \n return GRAFO\n \n\n#Funcion Basica que calcula la distancia de un punto a otro. \ndef distancia(valores1, valores2):\n x1, y1 = valores1\n x2, y2 = valores2\n d = math.sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2))\n return d\n\n\n\n ","sub_path":"lib_tools/Modelo_Geo_S.py","file_name":"Modelo_Geo_S.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"561185949","text":"# -*- coding: utf-8 -*- \nimport redis\nimport json\nimport urllib\nimport urllib2\nimport argparse \nimport platform\nimport sys\nimport os\n\nfrom datetime import datetime \n\nENABLE_PROXY = False # 本地访问时需要打开代理\nPROXY_SERVER = \"http://192.168.1.10:7777\" \nREDIS_HOST = \"172.31.44.160\"\nREDIS_PORT = 6379\nREDIS_DB = 1\nGOOGLE_HOT_URL = \"http://www.google.com/trends/hottrends/hotItems\"\nHOTWORDS_COUNT = 50\n\n\nYAHOO_KEYWORDS_FILE = 'yahoo-keywords.txt'\nYAHOO_KEYWORDS_COUNTRY = ['us', 'br', 'ca']\n\n# 抓取请求的发送间隔,为0表示连续发\nREQ_INTERVAL = 0\n\nwords_dict = {}\n\ncountry_dict = {\n \n 'us':'p1',\n 'ca':'p13',\n 'br':'p18',\n 'hk':'p10',\n 'tw':'p12',\n 'ru':'p14',\n 'de':'p15',\n 'fr':'p16',\n 'nl':'p17',\n 'id':'p19',\n 'mx':'p21',\n 'kr':'p23',\n 'tr':'p24',\n 'ph':'p25',\n 'es':'p26',\n 'it':'p27',\n 'vn':'p28',\n 'eg':'p29',\n 'in':'p3',\n 'ar':'p30',\n 'pl':'p31',\n 'co':'p32',\n 'th':'p33',\n 'my':'p34',\n 'ua':'p35',\n 'sa':'p36',\n 'ke':'p37',\n 'cl':'p38',\n 'ro':'p39',\n 'jp':'p4',\n 'za':'p40',\n 'be':'p41',\n 'se':'p42',\n 'cz':'p43',\n 'at':'p44',\n 'hu':'p45',\n 'ch':'p46',\n 'pt':'p47',\n 'gr':'p48',\n 'dk':'p49',\n 'sg':'p5',\n 'sf':'p50',\n 'no':'p51',\n 'ng':'p52',\n 'il':'p6',\n 'au':'p8',\n 'gb':'p9'\n}\n\nadd_cm_country = ['us', 'au', 'ca', 'in', 'th', 'za', 'br', 'sg', 'ng', 'my', 'ke', 'ph']\n\ndef get_url_opener(enable_proxy, proxy_server):\n proxy_handler = urllib2.ProxyHandler({\"http\" : proxy_server})\n null_proxy_handler = urllib2.ProxyHandler({})\n \n if enable_proxy:\n opener = urllib2.build_opener(proxy_handler)\n else:\n opener = urllib2.build_opener(null_proxy_handler) \n return opener\n\ndef req_google_words(url, country, hotdate):\n req_params = {}\n req_params[\"ajax\"] = 1\n req_params[\"htv\"] = \"l\"\n req_params[\"pn\"] = country\n req_params[\"htd\"] = hotdate\n\n opener = get_url_opener(ENABLE_PROXY, PROXY_SERVER) \n resp = opener.open(url, urllib.urlencode(req_params))\n \n resp_json = json.loads(resp.read().decode(\"UTF-8\"))\n return resp_json\n\ndef parse_google_words(resp_json): \n words = []\n for date_list in resp_json[\"trendsByDateList\"]:\n for trend in date_list[\"trendsList\"]:\n words.append(trend[\"title\"])\n return words\n\ndef fetch_hotword(country, country_code):\n print('request for country: %s \\n' % (country))\n\n hotwords = []\n \n # add special hot-word at the header of list\n #if country in add_cm_country:\n # hotwords.append('Clean Master\\'s data collection')\n \n next_day = datetime.now().strftime(\"%Y%m%d\")\n word_dict_key = 'hotword:' + country\n while len(hotwords) < HOTWORDS_COUNT:\n resp_json = req_google_words(GOOGLE_HOT_URL, country_code, next_day) \n\n for date_list in resp_json[\"trendsByDateList\"]:\n for trend in date_list[\"trendsList\"]:\n hotwords.append(trend[\"title\"])\n if not resp_json[\"lastPage\"]:\n next_day = resp_json[\"oldestVisibleDate\"]\n if len(hotwords) > 0 :\n words_dict[word_dict_key] = hotwords\n\n print('finish for country: %s \\n' % (country)) \n\ndef store_to_redis():\n pool = redis.ConnectionPool(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB)\n r = redis.Redis(connection_pool=pool)\n\n for k, word_list in words_dict.iteritems():\n r.delete(k)\n i = 1 \n for word in word_list:\n if word.lower() != \"ashley madison\":\n r.zadd(k, word, i)\n i = i + 1\n \n r.expire(k, 24 * 60 * 60)\n pool.disconnect()\n\ndef import_yahoo_keywords():\n \"\"\"\n Some countries use Yahoo keywords, reading keywords from local file.\n \n \n \"\"\"\n yahoo_keywords = []\n \n # if need using dos2unix command convert the file of Yahoo keywords first.\n if runOnLinux():\n ret = os.system(\"dos2unix \" + YAHOO_KEYWORDS_FILE)\n ret >>= 8\n if ret != 0:\n print(\"file fomrat convert error, please check dos2unix \" + YAHOO_KEYWORDS_FILE)\n sys.exit()\n \n f = open(YAHOO_KEYWORDS_FILE, \"r\") \n for line in f: \n yahoo_keywords.append(line[:-1]) \n f.close() \n \n pool = redis.ConnectionPool(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB)\n r = redis.Redis(connection_pool=pool)\n\n for country in YAHOO_KEYWORDS_COUNTRY:\n k = 'hotword:' + country\n r.delete(k)\n for word in yahoo_keywords:\n r.sadd(k, word)\n pool.disconnect()\n\ndef runOnLinux():\n curr_sys = platform.system()\n if curr_sys.lower() == 'linux':\n return True\n return False\n\ndef main():\n \n parser = argparse.ArgumentParser(description='This is a hotword script.')\n parser.add_argument('yahoo', help='Input file name', nargs='?')\n args = parser.parse_args()\n \n if args.yahoo: \n print('Start import Yahoo keywords:\\n')\n import_yahoo_keywords()\n print('End import Yahoo keywords:\\n') \n else:\n print('Start request hotwords:\\n')\n for k, v in country_dict.iteritems():\n fetch_hotword(k, v)\n if REQ_INTERVAL > 0:\n time.sleep(REQ_INTERVAL) \n store_to_redis()\n \n print('End request hotwords:\\n')\n \nif __name__ == '__main__' :\n main()","sub_path":"python/hotword.py","file_name":"hotword.py","file_ext":"py","file_size_in_byte":5389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"529958481","text":"from detectron2.data import MetadataCatalog\nfrom detectron2.utils.visualizer import Visualizer\nfrom detectron2.config import get_cfg\nfrom detectron2.engine import DefaultPredictor\nfrom detectron2 import model_zoo\nimport torch\nimport sys\nfrom imageTools import imageTool\nimport cv2\nimport numpy as np\nimport base64\nimport detectron2\nfrom detectron2.utils.logger import setup_logger\nsetup_logger()\n\n#----------------------------receive stdin----------------------------\n# base64 문자열으로 받기.\ninputs = sys.stdin.read()\n\nbinary_arry = base64.b64decode(inputs)\nbinary_np = np.frombuffer(binary_arry, dtype=np.uint8)\n\n# data cv2 np convert\nim = cv2.imdecode(binary_np, cv2.IMREAD_ANYCOLOR)\n\n\n\n\n## ------------------------detectron2 start!!!--------------------------------\n#\ncfg = get_cfg()\n# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library\n# start path in configs [dir]\nfileName = \"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\"\nmodel = model_zoo.get_config_file(fileName)\n\ncfg.merge_from_file(model)\ncfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model\ncfg.merge_from_list(['MODEL.DEVICE', 'cpu'])\n\n# Find a model from detectron2's model zoo. You can either use the https://dl.fbaipublicfiles.... url, or use the detectron2:// shorthand\ncfg.MODEL.WEIGHTS = \"detectron2://\"+model_zoo.get_weight_suffix(fileName)\npredictor = DefaultPredictor(cfg)\noutputs = predictor(im)\n\nboxes = outputs[\"instances\"].pred_boxes.tensor\npred = outputs['instances'].pred_classes\nscores = outputs[\"instances\"].scores\n\n\n\n#---------------------------------------------image Edit---------------------------------------------\n\n# Get weight of importance of echo instance, and Main instance index\nidx, weightlist = imageTool.get_weight(outputs, im, False)\n\n# instances가 한개도 없다면 그대로 출력\n# 있다면, 최대크기로 비율맞춰서 자른다, 단 자를 수 밖에 없는 경우는 사람을 기준으로 한다.\nif weightlist.size() == torch.Size([0]):\n #없으면 그대로.\n rate16_9 = im\nelse:\n mx1, my1, mx2, my2 = boxes[idx] # Main Instace box pos\n\n # concatenate close instace from Main_instance\n conlist = imageTool.getconInstances(boxes, idx, weightlist, 6)\n\n # combine img_box\n Y_S, Y_D, X_S, X_D = imageTool.combinde_img_box(boxes[conlist])\n\n # 사람중 가장 y축이 위에 있는 instance의 y축\n peoplelist = torch.where(pred[conlist] == 0)\n peoplelist = torch.where(pred[conlist] == 0)[0]\n if peoplelist.size() == torch.Size([0]):\n miny = Y_S\n else:\n miny = torch.min(boxes[conlist[peoplelist], 1], axis=0)\n miny = miny.values\n\n #비율 맞추기\n rate16_9 = imageTool.rate16_9(im,miny, Y_D, X_S, X_D)\n\n\n#------------------------------print result------------------------------\n# convert Base64\n_, imen = cv2.imencode('.jpeg', rate16_9)\nimenb = imen.tobytes()\nresult = base64.b64encode(imenb).decode()\nprint(result)\n","sub_path":"Detectron2-python-code/web_base64_rate.py","file_name":"web_base64_rate.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"276412225","text":"import pytest\nimport logging\nfrom pyspark.sql import SQLContext, SparkSession\nfrom pyspark.sql.types import StructType, IntegerType, DateType\nfrom pyspark import SparkContext\n\ndef quiet_py4j():\n \"\"\" turn down spark logging for the test context \"\"\"\n logger = logging.getLogger('py4j')\n logger.setLevel(logging.WARN)\n\n@pytest.fixture(scope='session')\ndef sql_context():\n quiet_py4j()\n spark_context = SparkContext()\n sql_context = SQLContext(spark_context)\n return sql_context\n spark_context.stop()\n\n@pytest.fixture(scope='session')\ndef spark():\n print('In spark')\n quiet_py4j()\n return SparkSession.builder \\\n .master(\"local\") \\\n .appName(\"test\") \\\n .getOrCreate()","sub_path":"test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"448663751","text":"class Coordinate(object):\n \"\"\"Represents an integral two-dimensional coordinate\"\"\"\n\n def __init__(self, row, column):\n # We store them in standard notation: i for row, j for column\n self.i = row\n self.j = column\n\n def across(self, amount):\n \"\"\"Shifts the coordinate horizontally by `amount`. To go left, make\n `amount` negative\"\"\"\n if self.j + amount < 0:\n raise IndexError(\n \"You are not allowed a negative integral coordinate\"\n )\n self.j += amount\n return self\n\n def down(self, amount):\n \"\"\"Shifts the coordinate vertically by `amount`. To go up, make\n `amount` negative\"\"\"\n if self.i + amount < 0:\n raise IndexError(\n \"You are not allowed a negative integral coordinate\"\n )\n self.i += amount\n return self\n\n\nclass SquareMatrix(object):\n \"\"\"Represents a Square matrix\"\"\"\n\n def __init__(self, list_of_lists):\n # Must not be empty\n if len(list_of_lists) == 0:\n raise Exception(\"Matrix cannot be empty\")\n if len(list_of_lists[0]) == 0:\n raise Exception(\"Matrix cannot be empty\")\n\n # Must be square\n row_length = len(list_of_lists[0])\n for l in list_of_lists:\n if len(l) != row_length:\n raise Exception(\"Matrix must be square\")\n if row_length != len(list_of_lists):\n raise Exception(\"Matrix must be square\")\n\n self.dim = row_length\n self.rows = list_of_lists\n\n def get(self, coordinate):\n \"\"\"Returns the (zero-indexed) (i,j)th element of the matrix\"\"\"\n return self.rows[coordinate.i][coordinate.j]\n\n def replace(self, coord, value):\n \"\"\"Replaces element coord of the matrix with `value`\"\"\"\n if coord.i >= self.dim or coord.j >= self.dim:\n raise Exception(\"Your desired coordinate is out of bounds\")\n self.rows[coord.i][coord.j] = value\n","sub_path":"Python/util/matrices.py","file_name":"matrices.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"565192683","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n'''\ninp0 = [[8,3,4], [1, 5, 9], [6, 7, 2]]\n\n\ndef rotate90(board):\n newboard = [[0 for x in range(3)] for y in range(3)]\n for i in range(3):\n for j in range(3):\n newboard[j][2-i] = board[i][j]\n return newboard\ndef boardprinter(board):\n length = len(board)\n for i in range(length):\n print(board[i])\n print(\"\\n\" )\ndef reflectLR(board):\n newboard = [[0 for x in range(3)] for y in range(3)]\n for i in range(3):\n newboard[i][0] = board[i][2]\n newboard[i][2] = board[i][0]\n newboard[i][1] = board[i][1] #keeps the middle the same\n return newboard\n\ninp1 = rotate90(inp0)\ninp2 = rotate90(inp1)\ninp3 = rotate90(inp2)\ninp4 = reflectLR(inp0)\ninp5 = reflectLR(inp1)\ninp6 = reflectLR(inp2)\ninp7 = reflectLR(inp3)\ninplst = [inp0,inp1,inp2,inp3,inp4,inp5,inp6,inp7]\n\ndef identitycheck(inplst):\n #Just a function to check if all the magic boards are unique\n if len(inplst) == 1:\n print(\"no duplicates found\")\n return\n else:\n for i in range(1,len(inplst)):\n if inplst[0] == inplst[i]:\n print(\"duplicate\", inplst[i])\n return identitycheck(inplst[1:])\n'''\n# Complete the formingMagicSquare function below.\ndef formingMagicSquare(s):\n # computed this list of the inputs by rotating the given magic square 90 three times, and flipping each output by 180 to get all possible permutations\n # functions can be found above\n inplst = [[[8, 3, 4], [1, 5, 9], [6, 7, 2]],\n [[6, 1, 8], [7, 5, 3], [2, 9, 4]],\n [[2, 7, 6], [9, 5, 1], [4, 3, 8]],\n [[4, 9, 2], [3, 5, 7], [8, 1, 6]],\n [[4, 3, 8], [9, 5, 1], [2, 7, 6]],\n [[8, 1, 6], [3, 5, 7], [4, 9, 2]],\n [[6, 7, 2], [1, 5, 9], [8, 3, 4]],\n [[2, 9, 4], [7, 5, 3], [6, 1, 8]]]\n\n allcosts = []\n for square in inplst: #check S with each of the magic squares\n i = 0\n j = 0\n cost = []\n for i in range(3): #check each tile of the square for cost, summed up\n for j in range(3):\n cost += [abs(square[i][j] - s[i][j])]\n allcosts += [sum(cost)]\n return min(allcosts)\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = []\n\n for _ in range(3):\n s.append(list(map(int, input().rstrip().split())))\n\n result = formingMagicSquare(s)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","sub_path":"Medium/solutions/FormingaMagicSquare.py","file_name":"FormingaMagicSquare.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"191755303","text":"# _*_ encoding: utf-8 _*_\nimport matplotlib.pyplot as plt\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.model_selection import RepeatedKFold\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split\n\nfrom mol2_converter import read_mol2\nfrom prediction_demo_test import load_model\n\n__author__ = 'wjk'\n__date__ = '2018/8/6 0006 下午 4:23'\n\nimport pandas as pd\nimport tensorflow as tf\nimport numpy as np\nimport os\n\n# 将string转化为int,为one_hot做准备,对于一般的数据,pandas有get_dummies()自动one_hot,sklearn也有OneHotEncoder\n\n\n\n# COLUMNS = ['atom_type', 'neibor_atom_type', 'charge', 'bA', 'bB', 'bC', 'bD']\n# FEATURES = [\"atom_type\", \"neibor_atom_type\"] # FEATURE_COLUMNS = [f for f in reader if not f in LABEL_COLUMN]\n# LABEL = [\"charge\"]\n\n# read_mol2(os.path.join('./mol2/', 'wat10_esp.mol2'), 'do.csv')\n\n\ndata = pd.read_csv('feature_water.csv')\natom = pd.read_csv('do.csv')['atom_type']\nprint(atom)\n# 将NaN替换为'字符串'\ndata = data.where(data.notnull(), 'None')\ndata.columns = ['atom_type', 'neibor_atom_type', 'bond_type', 'charge', 'bA', 'bB', 'bC', 'bD']\ncharge = data['charge']\nfeatures = data.drop(['charge','neibor_atom_type'], axis=1)\n\n# one_hot\nfeatures = features.values\n# 将chargereshape为(?,1)矩阵\ncharge = charge.values\ncharge = charge.reshape(-1, 1)\nfeatures[:, 0] = LabelEncoder().fit_transform(features[:, 0])\nfeatures[:, 1] = LabelEncoder().fit_transform(features[:, 1])\nfeatures[:, 2] = LabelEncoder().fit_transform(features[:, 2])\nfeatures[:, 3] = LabelEncoder().fit_transform(features[:, 3])\nfeatures[:, 4] = LabelEncoder().fit_transform(features[:, 4])\nfeatures[:, 5] = LabelEncoder().fit_transform(features[:, 5])\n\n# # 设置颜色列使用oneHot编码\none_hot = OneHotEncoder()\nfeatures = one_hot.fit_transform(features).toarray()\n\n\n\n# 分割数据集\n# X_train, X_test, y_train, y_test = train_test_split(features, charge, test_size=1, random_state=0)\nX = features[-30:]\ny = charge[-30:]\n\ndf = load_model(X, y)\ndf = df.reindex(columns=list(['atom', 'prediction', 'charge']))\ndf['atom'] = atom\n\ndf.to_csv(\"test.csv\", mode='a', index=False, sep=',') # head = 0 去掉表头","sub_path":"ChargePrediction/do.py","file_name":"do.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"177232228","text":"import codecs\nimport os\nimport sys\n\ntry:\n from setuptools import setup\nexcept:\n from distutils.core import setup\n\n\"\"\"\n\n打包的用的setup必须引入\n\n\"\"\"\n\ndef read(fname):\n\n \"\"\"\n\n 定义一个read方法,用来读取目录下的长描述\n\n 我们一般是将README文件中的内容读取出来作为长描述,这个会在PyPI中你这个包的页面上展现出来,\n\n 你也可以不用这个方法,自己手动写内容即可,\n\n PyPI上支持.rst格式的文件。暂不支持.md格式的文件,
.rst文件PyPI会自动把它转为HTML形式显示在你包的信息页面上。\n\n \"\"\"\n codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()\n return\n\nNAME = \"vfcode\"\n\n\"\"\"名字,一般放你包的名字即可.\"\"\"\n\nPACKAGES = [\"vfcode\",]\n\n\"\"\"包含的包,可以多个,这是一个列表.\"\"\"\n\n\nDESCRIPTION = \"验证码生成库.\"\n\n\"\"\"关于这个包的描述.\"\"\"\n\nLONG_DESCRIPTION = read(\"README.md\")\n\n\"\"\"参见read方法说明\"\"\"\n\nKEYWORDS = \"验证码\"\n\n\"\"\"关于当前包的一些关键字,方便PyPI进行分类.\"\"\"\n\nAUTHOR = \"xin053\"\n\n\"\"\"谁是这个包的作者,写谁的名字吧.\"\"\"\n\nAUTHOR_EMAIL = \"13207130066.cool@163.com\"\n\n\"\"\"作者的邮件地址.\"\"\"\n\nURL = \"https://github.com/xin053/vfcode\"\n\n\"\"\"你这个包的项目地址,如果有,给一个吧,没有你直接填写在PyPI你这个包的地址也是可以的.\"\"\"\n\nVERSION = \"0.0.2\"\n\n\"\"\"当前包的版本,这个按你自己需要的版本控制方式来.\"\"\"\n\nLICENSE = \"Apache License 2\"\n\n\"\"\"授权方式,我喜欢的是Apache License 2的方式,你可以换成其他方式.\"\"\"\n\nsetup(\n\n name = NAME,\n\n version = VERSION,\n\n description = DESCRIPTION,\n\n long_description = LONG_DESCRIPTION,\n\n classifiers =\n [\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Programming Language :: Python',\n\n 'Intended Audience :: Developers',\n\n 'Operating System :: OS Independent',\n\n ],\n\n keywords = KEYWORDS,\n\n author = AUTHOR,\n\n author_email = AUTHOR_EMAIL,\n\n url = URL,\n\n license = LICENSE,\n\n packages = PACKAGES,\n\n include_package_data=True,\n\n zip_safe=True,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"447913997","text":"\"\"\"\nDiff collections, as sets\n\"\"\"\n__author__ = \"Dan Gunter \"\n__date__ = \"3/29/13\"\n\n# System\nimport logging\nimport re\nimport time\n\n# Package\nfrom pymatgen.db import util\nfrom pymatgen.db.query_engine import QueryEngine\nfrom pymatgen.db.dbconfig import normalize_auth\n\n_log = logging.getLogger(\"mg.vv.diff\")\n\n\nclass IID:\n _value = 0\n\n @classmethod\n def next(cls):\n cls._value += 1\n return cls._value\n\n\nclass Differ:\n \"\"\"Calculate difference between two collections, based solely on a\n selected key.\n\n As noted in :func:`diff`, this will not work with huge datasets, as it stores\n all the keys in memory in order to do a \"set difference\" using Python sets.\n \"\"\"\n\n #: Keys in result dictionary.\n MISSING, NEW, CHANGED = \"missing\", \"additional\", \"different\"\n\n #: CHANGED result fields\n CHANGED_MATCH_KEY = \"match type\"\n CHANGED_MATCH_DELTA = \"delta\"\n CHANGED_MATCH_EXACT = \"exact\"\n CHANGED_OLD = \"old\"\n CHANGED_NEW = \"new\"\n CHANGED_DELTA = \"delta\"\n\n #: for missing property\n NO_PROPERTY = \"__MISSING__\"\n\n def __init__(self, key=\"_id\", props=None, info=None, fltr=None, deltas=None):\n \"\"\"Constructor.\n\n :param key: Field to use for identifying records\n :param props: List of fields to use for matching records\n :param info: List of extra fields to retrieve from (and show) for each record.\n :param fltr: Filter for records, a MongoDB query expression\n :param deltas: {prop: delta} to check. 'prop' is a string, 'delta' is an instance of :class:`Delta`.\n Any key for 'prop' not in parameter 'props' will get added.\n :type deltas: dict\n :raise: ValueError if some delta does not parse.\n \"\"\"\n self._key_field = key\n self._props = [] if props is None else props\n self._info = [] if info is None else info\n self._filter = fltr if fltr else {}\n self._prop_deltas = {} if deltas is None else deltas\n self._all_props = list(set(self._props[:] + list(self._prop_deltas.keys())))\n\n def diff(self, c1, c2, only_missing=False, only_values=False, allow_dup=False):\n \"\"\"Perform a difference between the 2 collections.\n The first collection is treated as the previous one, and the second\n is treated as the new one.\n\n Note: this is not 'big data'-ready; we assume all the records can fit in memory.\n\n :param c1: Collection (1) config file, or QueryEngine object\n :type c1: str or QueryEngine\n :param c2: Collection (2) config file, or QueryEngine object\n :type c2: str or QueryEngine\n :param only_missing: Only find and return self.MISSING; ignore 'new' keys\n :param only_values: Only find and return self.CHANGED; ignore new or missing keys\n :param allow_dup: Allow duplicate keys, otherwise fail with ValueError\n :return: dict with keys self.MISSING, self.NEW (unless only_missing is True), & self.CHANGED,\n each a list of records with the key and\n any other fields given to the constructor 'info' argument.\n The meaning is: 'missing' are keys that are in c1 not found in c2\n 'new' is keys found in c2 that are not found in c1, and 'changed' are records\n with the same key that have different 'props' values.\n \"\"\"\n # Connect.\n _log.info(\"connect.start\")\n if isinstance(c1, QueryEngine):\n engines = [c1, c2]\n else:\n engines = []\n for cfg in c1, c2:\n settings = util.get_settings(cfg)\n if not normalize_auth(settings):\n _log.warn(\n \"Config file {} does not have a username/password\".format(cfg)\n )\n settings[\"aliases_config\"] = {\"aliases\": {}, \"defaults\": {}}\n engine = QueryEngine(**settings)\n engines.append(engine)\n _log.info(\"connect.end\")\n\n # Query DB.\n keys = [set(), set()]\n eqprops = [{}, {}]\n numprops = [{}, {}]\n\n # Build query fields.\n fields = dict.fromkeys(self._info + self._all_props + [self._key_field], True)\n if not \"_id\" in fields: # explicitly remove _id if not given\n fields[\"_id\"] = False\n\n # Initialize for query loop.\n info = {} # per-key information\n has_info, has_props = bool(self._info), bool(self._all_props)\n has_numprops, has_eqprops = bool(self._prop_deltas), bool(self._props)\n _log.info(\"query.start query={} fields={}\".format(self._filter, fields))\n t0 = time.time()\n\n # Main query loop.\n for i, coll in enumerate(engines):\n _log.debug(\"collection {:d}\".format(i))\n count, missing_props = 0, 0\n for rec in coll.query(criteria=self._filter, properties=fields):\n count += 1\n # Extract key from record.\n try:\n key = rec[self._key_field]\n except KeyError:\n _log.critical(\n \"Key '{}' not found in record: {}. Abort.\".format(\n self._key_field, rec\n )\n )\n return {}\n if not allow_dup and key in keys[i]:\n raise ValueError(\"Duplicate key: {}\".format(key))\n keys[i].add(key)\n # Extract numeric properties.\n if has_numprops:\n pvals = {}\n for pkey in self._prop_deltas.keys():\n try:\n pvals[pkey] = float(rec[pkey])\n except KeyError:\n # print(\"@@ missing {} on {}\".format(pkey, rec))\n missing_props += 1\n continue\n except (TypeError, ValueError):\n raise ValueError(\n \"Not a number: collection={c} key={k} {p}='{v}'\".format(\n k=key, c=(\"old\", \"new\")[i], p=pkey, v=rec[pkey]\n )\n )\n numprops[i][key] = pvals\n # Extract properties for exact match.\n if has_eqprops:\n try:\n propval = tuple([(p, str(rec[p])) for p in self._props])\n except KeyError:\n missing_props += 1\n # print(\"@@ missing {} on {}\".format(pkey, rec))\n continue\n eqprops[i][key] = propval\n\n # Extract informational fields.\n if has_info:\n if key not in info:\n info[key] = {}\n for k in self._info:\n info[key][k] = rec[k]\n\n # Stop if we don't have properties on any record at all\n if 0 < count == missing_props:\n _log.critical(\n \"Missing one or more properties on all {:d} records\".format(count)\n )\n return {}\n # ..but only issue a warning for partially missing properties.\n elif missing_props > 0:\n _log.warn(\n \"Missing one or more properties for {:d}/{:d} records\".format(\n missing_props, count\n )\n )\n t1 = time.time()\n _log.info(\"query.end sec={:f}\".format(t1 - t0))\n\n # Compute missing and new keys.\n if only_values:\n missing, new = [], []\n else:\n _log.debug(\"compute_difference.start\")\n missing, new = keys[0] - keys[1], []\n if not only_missing:\n new = keys[1] - keys[0]\n _log.debug(\"compute_difference.end\")\n\n # Compute mis-matched properties.\n if has_props:\n changed = self._changed_props(\n keys,\n eqprops,\n numprops,\n info,\n has_eqprops=has_eqprops,\n has_numprops=has_numprops,\n )\n else:\n changed = []\n\n # Build result.\n _log.debug(\"build_result.begin\")\n result = {}\n if not only_values:\n result[self.MISSING] = []\n for key in missing:\n rec = {self._key_field: key}\n if has_info:\n rec.update(info.get(key, {}))\n result[self.MISSING].append(rec)\n if not only_missing:\n result[self.NEW] = []\n for key in new:\n rec = {self._key_field: key}\n if has_info:\n rec.update(info.get(key, {}))\n result[self.NEW].append(rec)\n result[self.CHANGED] = changed\n _log.debug(\"build_result.end\")\n\n return result\n\n def _changed_props(\n self,\n keys=None,\n eqprops=None,\n numprops=None,\n info=None,\n has_numprops=False,\n has_eqprops=False,\n ):\n changed = []\n _up = lambda d, v: d.update(v) or d # functional dict.update()\n for key in keys[0].intersection(keys[1]):\n # Numeric property comparisons.\n if has_numprops:\n for pkey in self._prop_deltas:\n oldval, newval = numprops[0][key][pkey], numprops[1][key][pkey]\n if self._prop_deltas[pkey].cmp(oldval, newval):\n change = {\n self.CHANGED_MATCH_KEY: self.CHANGED_MATCH_DELTA,\n self._key_field: key,\n \"property\": pkey,\n self.CHANGED_OLD: \"{:f}\".format(oldval),\n self.CHANGED_NEW: \"{:f}\".format(newval),\n \"rule\": self._prop_deltas[pkey],\n self.CHANGED_DELTA: \"{:f}\".format(newval - oldval),\n }\n changed.append(_up(change, info[key]) if info else change)\n # Exact property comparison.\n if has_eqprops:\n if not eqprops[0][key] == eqprops[1][key]:\n change = {\n self.CHANGED_MATCH_KEY: self.CHANGED_MATCH_EXACT,\n self._key_field: key,\n self.CHANGED_OLD: eqprops[0][key],\n self.CHANGED_NEW: eqprops[1][key],\n }\n changed.append(_up(change, info[key]) if info else change)\n return changed\n\n\nclass Delta:\n \"\"\"Delta between two properties.\n\n Syntax:\n +- Change in sign, 0 not included\n +-= Change in sign, + to 0 or - to 0 included\n +-X abs(new - old) > X\n +X-Y (new - old) > X or (old - new) > Y\n +-X= abs(new - old) >= X\n +X-Y= (new - old) >= X or (old - new) >= Y\n +X[=] Just look in '+' direction\n -Y[=] Just look in '-' direction\n ...% Instead of (v2 - v1), use 100*(v2 - v1)/v1\n \"\"\"\n\n _num = \"\\d+(\\.\\d+)?\"\n _expr = re.compile(\n \"(?:\"\n \"\\+(?P{n})?-(?P{n})?|\" # both + and -\n \"\\+(?P{n})?|\" # only +\n \"-(?P{n})?\" # only -\n \")\"\n \"(?P=)?(?P%)?\".format(n=_num)\n )\n\n def __init__(self, s):\n \"\"\"Constructor.\n\n :param s: Expression string\n :type s: str\n :raises: ValueError if it doesn't match the syntax\n \"\"\"\n # Match expression.\n m = self._expr.match(s)\n if m is None:\n raise ValueError(\"Bad syntax for delta '{}'\".format(s))\n if m.span()[1] != len(s):\n p = m.span()[1]\n raise ValueError(\"Junk at end of delta '{}': {}\".format(s, s[p:]))\n\n # Save a copy of orig.\n self._orig_expr = s\n\n # Initialize parsed values.\n self._sign = False\n self._dx, self._dy = 0, 0\n self._pct = False # %change\n self._eq = False # >=,<= instead of >, <\n\n # Set parsed values.\n d = m.groupdict()\n # print(\"@@ expr :: {}\".format(d))\n if all((d[k] is None for k in (\"X\", \"Y\", \"X2\", \"Y2\"))):\n # Change in sign only\n self._sign = True\n self._eq = d[\"eq\"] is not None\n elif d[\"X\"] is not None and d[\"Y\"] is None:\n raise ValueError(\"Missing value for negative delta '{}'\".format(s))\n else:\n if d[\"X2\"] is not None:\n # Positive only\n self._dx = float(d[\"X2\"])\n self._dy = None\n elif d[\"Y2\"] is not None:\n # Negative only\n self._dx = None\n self._dy = -float(d[\"Y2\"])\n else:\n # Both\n self._dy = -float(d[\"Y\"])\n self._dx = float(d[\"X\"] or d[\"Y\"])\n self._eq = d[\"eq\"] is not None\n self._pct = d[\"pct\"] is not None\n # print(\"@@ dx,dy eq,pct = {},{} {},{}\".format(self._dx, self._dy, self._eq, self._pct))\n\n # Pre-calculate comparison function.\n if self._sign:\n self._cmp = self._cmp_sign\n elif self._pct:\n self._cmp = self._cmp_val_pct\n else:\n self._cmp = self._cmp_val_abs\n\n self._json_id = None # for repeated serialization\n\n def __str__(self):\n return self._orig_expr\n\n def as_json(self):\n if self._json_id:\n # only serialize fully the first time\n return {\"delta\": {\"id\": self._json_id}}\n dtype = \"abs\" if self._eq else \"pct\"\n incl = self._eq\n self._json_id = next(IID)\n return {\n \"delta\": {\n \"plus\": self._dx,\n \"minus\": self._dy,\n \"type\": dtype,\n \"endpoints\": incl,\n \"id\": self._json_id,\n }\n }\n\n def cmp(self, old, new):\n \"\"\"Compare numeric values with delta expression.\n\n Returns True if delta matches (is as large or larger than) this class' expression.\n\n Delta is computed as (new - old).\n\n :param old: Old value\n :type old: float\n :param new: New value\n :type new: float\n :return: True if delta between old and new is large enough, False otherwise\n :rtype: bool\n \"\"\"\n return self._cmp(old, new)\n\n def _cmp_sign(self, a, b):\n if self._eq:\n return (a < 0 <= b) or (a > 0 >= b)\n return (a < 0 < b) or (a > 0 > b)\n\n def _cmp_val_abs(self, a, b):\n return self._cmp_val(b - a)\n\n def _cmp_val_pct(self, a, b):\n if a == 0:\n return False\n return self._cmp_val(100.0 * (b - a) / a)\n\n def _cmp_val(self, delta):\n oor = False # oor = out-of-range\n if self._eq:\n if self._dx is not None:\n oor |= delta >= self._dx\n if self._dy is not None:\n oor |= delta <= self._dy\n else:\n if self._dx is not None:\n oor |= delta > self._dx\n if self._dy is not None:\n oor |= delta < self._dy\n return oor\n","sub_path":"pymatgen/db/vv/diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":15398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"521707917","text":"__author__ = 'guopei'\n\nfrom Node import NormalNode, InvGammaNode\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nif __name__ == \"__main__\":\n\n # Read in Data\n data = [float(line) for line in open(\"faculty.dat\", 'r')]\n\n # Use point estimators from the data to come up with starting values.\n estimated_mean = np.mean(data)\n estimated_var = np.var(data)\n\n #mean_of_mean\n mom_node = NormalNode(init_value=5, candsd=0.5, name=\"Mean_of_Mean_Node\", mean=5, var=1, observed = True)\n vom_node = InvGammaNode(init_value=1./9, candsd=0.5, name=\"Var_of_Mean_Node\", alpha=10, beta=5, observed = True)\n aov_node = InvGammaNode(init_value=11, candsd=0.5, name=\"Alpha_of_Var_Node\", alpha=10, beta=5, observed = True)\n bov_node = InvGammaNode(init_value=2.5, candsd=0.5, name=\"Beta_of_Var_Node\", alpha=10, beta=5, observed = True)\n mean_node = NormalNode(init_value=estimated_mean, candsd=0.2, name=\"Mean_Node\", mean=mom_node, var=vom_node)\n var_node = InvGammaNode(init_value=estimated_var, candsd=0.15, name=\"Var_Node\", alpha=aov_node, beta=bov_node)\n\n node_list = []\n\n data_nodes = []\n for datum in data:\n data_node = NormalNode(init_value = datum, candsd=0, name=\"Data\", observed=True, mean=mean_node, var=var_node)\n data_nodes.append(data_node)\n node_list.append(data_node)\n\n node_list.extend([mean_node, var_node, vom_node, mom_node, bov_node, aov_node ])\n\n nrounds = 10000\n burn = 100\n\n fig = plt.figure()\n fig.suptitle('Var of Mean Distribution', fontsize=14, fontweight='bold')\n\n mom_data = []\n for num in xrange(4):\n\n node_list[-4+num].observed = False\n\n for i in xrange(burn):\n for node in node_list:\n if node.observed == False:\n node.sample()\n\n for i in xrange(nrounds):\n for node in node_list:\n if node.observed == False:\n node.sample()\n\n\n\n mom_data.append(vom_node.sample_values[burn:])\n vom_node.sample_values = []\n\n plt.hist(mom_data,\n bins=20,\n normed=True,\n color=['Green', 'Blue', 'Yellow', 'Purple'],\n label=['vom_node', '+mom_node' , '+bov_node', '+aov_node' ])\n\n plt.legend(loc='upper right')\n plt.xlim(0,1)\n\n #plt.show()\n plt.savefig(\"Var_of_Mean_Compare.png\")\n\n\n\n\n\n","sub_path":"Faculty.py","file_name":"Faculty.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"359291383","text":"import sys\r\nimport os\r\nimport mysql.connector\r\n\r\nfrom bs4 import BeautifulSoup as bs\r\n\r\n\r\n# this function does an SQL request, then write data to PARCOURS2\r\ndef sql_request(db_con):\r\n\tcurs = db_con.cursor()\r\n\treq = \"SELECT lati, longi from parcours2\"\r\n\tcurs.execute(req)\r\n\trows = curs.fetchall()\r\n\ti = 0\r\n\r\n\tfile = open(\"PARCOURS2\", \"w\")\r\n\tfor row in rows:\r\n\t\tif i == 0:\r\n\t\t\tstr2write = \" new google.maps.LatLng({0}, {1})\\n\".format(row[0], row[1])\t\r\n\t\telse: \r\n\t\t\tstr2write = \",new google.maps.LatLng({0}, {1})\\n\".format(row[0], row[1])\r\n\t\t\r\n\t\tprint(str2write)\r\n\t\tfile.write(str2write)\r\n\t\ti += 1\r\n\tfile.close()\r\n\r\n# merge parcours1, parcours2 and parcours3 to page\r\ndef merge_files(files_to_merge, out):\r\n\tfor i in range(len(files_to_merge)):\r\n\t\tfile_to_read = open(files_to_merge[i], \"r\")\r\n\t\tdata = file_to_read.read()\r\n\t\tfile_to_read.close()\r\n\r\n\t\tfile_to_write = open(out, \"a\")\r\n\t\tfile_to_write.write(data)\r\n\t\tfile_to_write.close()\r\n\r\n# format data from page to real HTML\r\ndef format_html(page):\r\n\thtml = open(page).read()\r\n\tsoup = bs(html, 'html.parser')\r\n\r\n\tprettyHTML = soup.prettify()\r\n\tprint(prettyHTML)\r\n\tfile = open(\"page.html\", \"w\")\r\n\tfile.write(prettyHTML)\r\n\tfile.close()\r\n\r\nif __name__ == '__main__':\r\n\tconn = mysql.connector.connect(host=\"localhost\", user=\"root\", password=\"root\", database=\"ratp\")\r\n\tsql_request(conn)\r\n\ttry :\r\n\t\tos.remove(\"page.html\")\r\n\texcept:\r\n\t\tpass\r\n\tfiles = [\"PARCOURS1\", \"PARCOURS2\", \"PARCOURS3\"]\r\n\tmerge_files(files, \"page\")\r\n\tconn.close()\r\n\r\n\tformat_html(\"page\")\r\n\tos.remove(\"page\")","sub_path":"request_sql.py","file_name":"request_sql.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"645802497","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport json\nimport requests\nimport pymysql.cursors\nfrom bs4 import BeautifulSoup\nimport pdb\n\ntopic = [\n\t\t\t{'topic_id':'60443', 'topic_name':'爱情'},\n\t\t\t{'topic_id':'62355', 'topic_name':'喜剧'},\n\t\t\t{'topic_id':'62356', 'topic_name':'动画'},\n\t\t\t{'topic_id':'62359', 'topic_name':'剧情'},\n\t\t\t{'topic_id':'60454', 'topic_name':'科幻'},\n\t\t\t{'topic_id':'62360', 'topic_name':'动作'},\n\t\t\t{'topic_id':'62358', 'topic_name':'经典'},\n\t\t\t{'topic_id':'60979', 'topic_name':'悬疑'},\n\t\t\t{'topic_id':'61527', 'topic_name':'青春'},\n\t\t\t{'topic_id':'62364', 'topic_name':'犯罪'},\n\t\t\t{'topic_id':'62363', 'topic_name':'惊悚'},\n\t\t\t{'topic_id':'60434', 'topic_name':'文艺'},\n\t\t\t{'topic_id':'60334', 'topic_name':'纪录片'},\n\t\t\t{'topic_id':'209', 'topic_name':'搞笑'},\n\t\t\t{'topic_id':'60521', 'topic_name':'励志'},\n\t\t\t{'topic_id':'62369', 'topic_name':'恐怖'},\n\t\t\t{'topic_id':'62371', 'topic_name':'战争'},\n\t\t\t{'topic_id':'62370', 'topic_name':'短片'},\n\t\t\t{'topic_id':'62372', 'topic_name':'魔幻'},\n\t\t\t{'topic_id':'61810', 'topic_name':'黑色幽默'},\n\t\t\t{'topic_id':'62376', 'topic_name':'传记'},\n\t\t\t{'topic_id':'62375', 'topic_name':'情色'},\n\t\t\t{'topic_id':'62374', 'topic_name':'动画短片'},\n\t\t\t{'topic_id':'62377', 'topic_name':'感人'},\n\t\t\t{'topic_id':'62378', 'topic_name':'暴力'},\n\t\t\t{'topic_id':'62383', 'topic_name':'浪漫'},\n\t\t\t{'topic_id':'61554', 'topic_name':'女性'},\n\t\t\t{'topic_id':'62381', 'topic_name':'同��'},\n\t\t\t{'topic_id':'62386', 'topic_name':'史诗'},\n\t\t\t{'topic_id':'62387', 'topic_name':'童话'},\n\t\t\t{'topic_id':'62388', 'topic_name':'烂片'},\n\t\t\t{'topic_id':'62389', 'topic_name':'cult'}\n\t\t]\n\n# Connect to the database\nconnection = pymysql.connect(host='localhost',\n user='root',\n password='hanfeng',\n db='douban_db',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n\n\ndef add_movie_record(movie):\n\t# 将电影信息添加到数据表中\n\ttry:\n\t\tname = movie['name'].strip(' ')\n\t\tlink = movie['link'].strip(' ')\n\t\tmovie_desc = movie['desc'].strip(' ')\n\t\trating = movie['rating'].strip(' ')\n\t\timglink = movie['imgLink'].strip(' ')\n\t\twith connection.cursor() as cursor:\n\t\t\tsql = \"INSERT INTO douban_movies VALUES (%s, %s, %s, %s, %s);\"\n\t\t\tcursor.execute(sql, (name, link, movie_desc, rating, imglink))\n\t\tconnection.commit()\n\texcept Exception as e:\n\t\traise e\n\ndef is_existed(movie):\n\t# 判断该部电影是否已存在于数据库中\n\ttry:\n\t\twith connection.cursor() as cursor:\n\t\t\tsql = \"SELECT name FROM douban_movies WHERE name=%s;\"\n\t\t\tcursor.execute(sql, movie['name'])\n\t\t\tresult = cursor.fetchone()\n\t\t\tif result is None:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\texcept Exception as e:\n\t\traise e\n\ndef get_tags():\n\t# 获取豆瓣电影分类标签\n\ttagList = []\n\turl = \"http://movie.douban.com/tag/\"\n\ttry:\n\t\tr = requests.get(url)\n\texcept Exception as e:\n\t\traise e\n\telse:\n\t\tbsObj = BeautifulSoup(r.content, 'html.parser')\n\t\ttabList = bsObj.findAll('table', {'class':'tagCol'})\n\t\tfor tab in tabList:\n\t\t\tfor td in tab.tbody.findAll('td'):\n\t\t\t\ttagList.append(td.a.text)\n\t\treturn tagList\n\ndef get_movies(params):\n\t# 根据参数获取豆瓣电影信息\n\tmovieList = []\n\turl = \"https://www.douban.com/j/tag/items\"\n\theaders = {\n\t\t'content-type': 'application/json',\n\t\t'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36',\n\t}\n\t# params = {'start':start, 'limit':limit, 'topic_id':topic_id, 'topic_name':topic_name, 'mod':'movie'}\n\ttry:\n\t\tresponse = requests.get(url, params=params, headers=headers)\n\t\tresponse.raise_for_status()\n\t\t# pdb.set_trace()\n\texcept Exception as e:\n\t\traise e\n\telse:\n\t\t# pdb.set_trace()\n\t\tobj = response.json()\n\t\thtml = obj.get('html')\n\t\tbsObj = BeautifulSoup(html, 'html.parser')\n\t\tdList = bsObj.findAll('dl')\n\t\tfor dl in dList:\n\t\t\ttry:\n\t\t\t\tmovieObj = {}\n\t\t\t\tmovieObj['name'] = dl.dd.find('a', {'class':'title'}).text\n\t\t\t\tmovieObj['link'] = dl.dd.find('a', {'class':'title'}).get('href')\n\t\t\t\tmovieObj['desc'] = dl.dd.find('div', {'class':'desc'}).text\n\t\t\t\tmovieObj['rating'] = dl.dd.find('span', {'class':'rating_nums'}).text\n\t\t\t\tmovieObj['imgLink'] = dl.dt.find('img').get('src')\n\t\t\t\tmovieList.append(movieObj)\n\t\t\texcept Exception as e:\n\t\t\t\tcontinue\n\t\treturn movieList\n\n\ndef main():\n\tfor t in topic:\n\t\ttopic_id = t['topic_id']\n\t\ttopic_name = t['topic_name']\n\t\tfor s in range(0, 1000, 10):\n\t\t\tparams = {'start':s, 'limit':10, 'topic_id':topic_id, 'topic_name':topic_name, 'mod':'movie' }\n\t\t\tmovies = get_movies(params)\n\t\t\tfor m in movies:\n\t\t\t\tif is_existed(m):\n\t\t\t\t\tadd_movie_record(m)\n\t\t\t\t\tprint(m)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"This record has been in the douban_movies table.\")\n\t\t\t\t\tcontinue\n\n\nif __name__ == '__main__':\n\tmain()\n\n","sub_path":"douban-movies-crawler.py","file_name":"douban-movies-crawler.py","file_ext":"py","file_size_in_byte":4882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"550932670","text":"import numpy as np\nfrom numba import jit\nfrom scipy.ndimage import convolve\n\nclass Playground:\n \n \n def __init__(self, height, width, alpha = 0.2):\n \n assert (alpha < 1) and (alpha > 0)\n assert (height >= 3) and (type(height) == int)\n assert (width >= 3) and (type(width) == int)\n \n self.height = height\n self.width = width\n self.square = self.height * self.width\n self.alpha = alpha\n \n self.typical_shape = (self.height, self.width)\n self.player_field = np.full(self.typical_shape, '*')\n \n def fully_generate(self, x_point, y_point):\n self.mines = np.zeros(self.typical_shape, dtype = 'bool')\n self.place_mines(x_point, y_point)\n \n self.find_values()\n \n def place_mines(self, x_point, y_point):\n self.amount_of_mines = place_mines(self.height, self.width, self.alpha, self.mines, x_point, y_point)\n \n def find_values(self):\n mask = np.ones((3,3))\n mask[1][1] = 0\n self.values = convolve(self.mines.astype(np.uint8), mask, mode='constant', cval=0.0)\n \n def show(self):\n print()\n for y in range(self.height):\n for x in range(self.width):\n print(self.player_field[y, x], end=\"\")\n print()\n print()\n \n def enter(self, x, y):\n \n result = 0\n \n if self.player_field[y, x] == '*':\n if self.mines[y, x]:\n self.player_field[y, x] = '!'\n return -1\n \n result += 1\n self.player_field[y, x] = str(self.values[y, x])\n \n if self.values[y, x] == 0:\n if x > 0: result += self.enter(x-1,y) \n if y > 0: result += self.enter(x,y-1)\n if x < self.width - 1: result += self.enter(x+1,y)\n if y < self.height - 1: result += self.enter(x,y+1)\n if (x > 0 and y > 0): result += self.enter(x-1,y-1)\n if (x > 0 and y < self.height - 1): result += self.enter(x-1,y+1)\n if (x < self.width - 1 and y > 0): result += self.enter(x+1,y-1)\n if (x < self.width - 1 and y < self.height - 1): result += self.enter(x+1,y+1)\n \n return result\n\n \n@jit(nopython=True)\ndef place_mines(height, width, alpha, mines, x, y):\n amount_of_mines = int(np.round(height * width * alpha))\n positions = np.arange(height * width)\n positions = np.delete(positions, y*width + x)\n np.random.shuffle(positions)\n for pos in positions[:amount_of_mines]:\n mines[pos // width, pos % width] = True\n \n return amount_of_mines\n\n\nclass Game:\n \n def __init__(self, height, width, alpha = 0.2):\n self.height = height\n self.width = width\n self.alpha = alpha\n \n def start(self, mode = 'player'):\n assert mode == 'player' or mode == 'bot'\n self.mode = mode\n self.pg = Playground(self.height, self.width, self.alpha)\n self.closed = self.height * self.width\n self.game_runned = True\n self.field_generated = False\n self.won = False\n self.lose = False\n \n def do_step(self, x, y):\n if not self.field_generated:\n self.pg.fully_generate(x, y)\n self.mines_amount = self.pg.amount_of_mines\n\n self.field_generated = True\n if self.game_runned:\n result = self.pg.enter(int(x), int(y))\n if result == -1:\n self.fail_game()\n else:\n self.closed -= result\n if self.closed == self.mines_amount:\n self.win_game()\n \n def fail_game(self):\n self.game_runned = False\n self.lose = True\n if self.mode == 'player':\n print(\"You have failed!\")\n \n def win_game(self):\n self.game_runned = False\n self.won = True\n if self.mode == 'player':\n print(\"You have won!\")\n \n def show(self):\n if self.mode == 'player':\n self.pg.show()\n else:\n frames = np.empty((self.height * self.width, 5, 5), dtype = 'str')\n new_df = np.zeros((self.height + 4, self.width + 4), dtype = 'str')\n new_df[2:-2, 2:-2] = self.pg.player_field\n for y in range(self.height):\n for x in range(self.width):\n frames[y * self.width + x] = new_df[y:y+5, x:x+5].reshape(1, 5, 5)\n return frames","sub_path":"env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":4526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"37617594","text":"import argparse\nimport os\nimport time\n\n\ndef parse():\n parser = argparse.ArgumentParser(description='crzINX. Simple http server')\n parser.add_argument('-b', '--bind', type=str, help='Host name. Default: 0.0.0.0', default='0.0.0.0')\n parser.add_argument('-p', '--port', type=int, help='Port number. Example: 8080. Default: 8080', default=8080)\n parser.add_argument('-r', '--rootdir', type=str, help='ROOTDIR. Example: /home/user/web_prj', default=os.getcwd())\n parser.add_argument('-c', '--cpu', type=int, help='NCPU. CPU number', default=1)\n args = parser.parse_args()\n host = args.bind\n port = args.port\n root_dir = os.path.normpath(args.rootdir)\n cpu_number = args.cpu\n is_dir = os.path.isdir(root_dir)\n if not is_dir:\n print('{} this directory does not exist. Would you want to create it?'.format(root_dir))\n answer = raw_input('--> ')\n if (answer == 'y') or (answer == 'Y') or (answer == 'yes') or (answer == 'Yes'):\n try:\n os.makedirs(root_dir)\n except IOError as e:\n print(e)\n else:\n print(\"Success!\")\n else:\n print(\"Current dir will use as root. {}\".format(os.getcwd()))\n print(\"Args:\\nhost - {}\\nport - {}\\nroot_dir - {}\\ncpu_number - {}\".format(host, port, root_dir, cpu_number))\n return host, port, root_dir, cpu_number\n\n\ndef init_logger(root_dir):\n log_dir = os.path.normpath(root_dir + \"/aelogs\")\n is_dir = os.path.isdir(log_dir)\n access_log_path = os.path.normpath(log_dir + \"/access.log\")\n error_log_path = os.path.normpath(log_dir + \"/error.log\")\n access_log = False\n error_log = False\n if not is_dir:\n try:\n os.makedirs(log_dir)\n access_log = open(access_log_path, 'w')\n error_log = open(error_log_path, 'w')\n except IOError as e:\n print(e)\n exit(1)\n else:\n try:\n access_log = open(access_log_path, 'a')\n error_log = open(error_log_path, 'a')\n except IOError as e:\n print (e)\n exit(1)\n return access_log, error_log\n\n\ndef logger(msg, f):\n f.write(time.ctime() + \" : \" + msg + \"\\n\")\n","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"128266929","text":"import copy\n\nimport requests\n\nfrom common.block import Block\nfrom common.io_mem_pool import get_transactions_from_memory, store_transactions_in_memory\nfrom common.node import Node\nfrom node.transaction_validation.script import StackScript\n\nFILENAME = \"src/doc/mem_pool\"\n\n\nclass TransactionException(Exception):\n def __init__(self, expression, message):\n self.expression = expression\n self.message = message\n\n\nclass OtherNode(Node):\n def __init__(self, ip: str, port: int):\n super().__init__(ip, port)\n\n def send_transaction(self, transaction_data: dict) -> requests.Response:\n return self.post(\"transactions\", transaction_data)\n\n\nclass Transaction:\n def __init__(self, blockchain: Block):\n self.blockchain = blockchain\n self.transaction_data = {}\n self.inputs = []\n self.outputs = []\n self.is_valid = False\n self.is_funds_sufficient = False\n\n def receive(self, transaction: dict):\n self.transaction_data = transaction\n self.inputs = transaction[\"inputs\"]\n self.outputs = transaction[\"outputs\"]\n\n def execute_script(self, unlocking_script, locking_script):\n unlocking_script_list = unlocking_script.split(\" \")\n locking_script_list = locking_script.split(\" \")\n transaction_data = copy.deepcopy(self.transaction_data)\n if \"transaction_hash\" in transaction_data:\n transaction_data.pop(\"transaction_hash\")\n stack_script = StackScript(transaction_data)\n for element in unlocking_script_list:\n if element.startswith(\"OP\"):\n class_method = getattr(StackScript, element.lower())\n class_method(stack_script)\n else:\n stack_script.push(element)\n for element in locking_script_list:\n if element.startswith(\"OP\"):\n class_method = getattr(StackScript, element.lower())\n class_method(stack_script)\n else:\n stack_script.push(element)\n\n def validate(self):\n\n for tx_input in self.inputs:\n transaction_hash = tx_input[\"transaction_hash\"]\n output_index = tx_input[\"output_index\"]\n try:\n locking_script = self.blockchain.get_locking_script_from_utxo(transaction_hash, output_index)\n except Exception:\n raise TransactionException(f\"{transaction_hash}:{output_index}\", \"Could not find locking script for utxo\")\n try:\n self.execute_script(tx_input[\"unlocking_script\"], locking_script)\n self.is_valid = True\n except Exception:\n print('Transaction script validation failed')\n raise TransactionException(f\"UTXO ({transaction_hash}:{output_index})\", \"Transaction script validation failed\")\n\n def get_total_amount_in_inputs(self) -> int:\n total_in = 0\n for tx_input in self.inputs:\n transaction_data = self.blockchain.get_transaction_from_utxo(tx_input[\"transaction_hash\"])\n utxo_amount = transaction_data[\"outputs\"][tx_input[\"output_index\"]][\"amount\"]\n total_in = total_in + utxo_amount\n return total_in\n\n def get_total_amount_in_outputs(self) -> int:\n total_out = 0\n for tx_output in self.outputs:\n amount = tx_output[\"amount\"]\n total_out = total_out + amount\n return total_out\n\n def validate_funds(self):\n inputs_total = self.get_total_amount_in_inputs()\n outputs_total = self.get_total_amount_in_outputs()\n try:\n assert inputs_total == outputs_total\n self.is_funds_sufficient = True\n except AssertionError:\n print('Transaction inputs and outputs did not match')\n raise TransactionException(f\"inputs ({inputs_total}), outputs ({outputs_total})\",\n \"Transaction inputs and outputs did not match\")\n\n def broadcast(self):\n node_list = [OtherNode(\"127.0.0.1\", 5001), OtherNode(\"127.0.0.1\", 5002)]\n for node in node_list:\n try:\n node.send_transaction(self.transaction_data)\n except requests.ConnectionError:\n pass\n\n def store(self):\n if self.is_valid and self.is_funds_sufficient:\n current_transactions = get_transactions_from_memory()\n current_transactions.append(self.transaction_data)\n store_transactions_in_memory(current_transactions)\n","sub_path":"src/node/transaction_validation/transaction_validation.py","file_name":"transaction_validation.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"18249889","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 2 15:12:28 2019\n\n@author: ry4n\n\"\"\"\n\n# user imports salary, amount they would like to save, and cost of their home\nannual_salary = float(input(\"Please Enter your annaul salary: \"))\nsave_perc = float(input(\"Please Enter the percent of salary to save, as a decimal: \"))\ntotal_cost = float(input(\"Please Enter the cost of your dream home:\"))\nsemi_annual_raise = float(input(\"Please Enter the semi-annual raise as decimal: \"))\n\n#Setup of variables\ndown_payment = 0.25 * total_cost \nmonthly_salary = annual_salary/12\ncurrent_savings = 0\nr=0.04\nportion_saved = save_perc * monthly_salary\nt = 0\n\n#Setup of algorithm\n\nwhile current_savings < down_payment: \n current_savings = current_savings*1.0033384306446\n current_savings += portion_saved \n t += 1\n if current_savings > down_payment: \n print(\"Number of months: \", t)\n \nprint(\"That is roughly \", t/12, \"years\")","sub_path":"ps1b.py","file_name":"ps1b.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"137334951","text":"from potnanny.extensions import db\nfrom potnanny.apps.outlet.models import Outlet\nfrom potnanny.utils import WeekdayMap\nimport json\nimport re\n\n\nclass Schedule(db.Model):\n __tablename__ = 'schedules'\n id = db.Column(db.Integer, primary_key=True)\n outlet_id = db.Column(db.Integer, db.ForeignKey('outlets.id'))\n on_time = db.Column(db.String(16), nullable=False, server_default='')\n off_time = db.Column(db.String(16), nullable=False, server_default='')\n days = db.Column(db.Integer, nullable=False, server_default=\"127\")\n custom = db.Column(db.Integer, nullable=False, server_default=\"0\")\n active = db.Column(db.Boolean(), nullable=False, server_default='1')\n \n outlet = db.relationship(\"Outlet\", \n backref=db.backref(\"children\", \n cascade=\"all,delete\"))\n \n def __init__(self, oid, ontime, offtime, days, cust):\n self.outlet_id = oid\n self.on_time = ontime\n self.off_time = offtime\n self.days = days\n self.custom = cust\n\n def __repr__(self):\n d = \",\".join(self.run_days())\n return \"%s %s/%s (%s)\" % (self.outlet.name, self.on_time,\n self.off_time, d)\n\n def as_dict(self):\n return {'id': self.id, \n 'outlet': self.outlet, \n 'on_time': self.on_time,\n 'off_time': self.off_time,\n 'days': self.days,\n 'active': self.active,\n }\n \n def run_days(self):\n results = [];\n dow = WeekdayMap(show_first=2).reverse_ordered_list()\n if self.days == 127:\n results.append('Every Day')\n else:\n for item in dow:\n if (self.days & item[1]):\n results.append(item[0])\n\n return results\n\n def runs_on(self, wkday):\n dow = WeekdayMap().reverse_ordered_list()\n for d in dow:\n k, v = d\n if re.search(wkday, k, re.IGNORECASE):\n return True\n\n return False\n\n \n","sub_path":"potnanny/apps/schedule/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"343742827","text":"import pandas as pd\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.metrics import silhouette_samples, silhouette_score\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn import metrics\r\nimport sklearn.metrics as sm\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.cluster import AgglomerativeClustering\r\nimport scipy.cluster.hierarchy as sch\r\nimport plotly.express as px\r\nimport scipy\r\nfrom scipy.cluster.hierarchy import dendrogram, linkage\r\nfrom scipy.cluster.hierarchy import fcluster\r\nfrom scipy.cluster.hierarchy import cophenet\r\nfrom scipy.spatial.distance import pdist\r\nimport matplotlib.cm as cm\r\nimport numpy as np\r\n\r\n\r\n#Dataset\r\nmobility_covid = pd.read_csv('E:\\\\PHD\\\\Courses\\\\Data-Mining\\\\Project\\\\datasets\\\\Clustering-new\\\\Mobility+Covid-19\\\\total.csv')\r\n#print(mobility_covid.head())\r\nmobility_covid= mobility_covid.groupby('Country/Region').mean().reset_index()\r\nmobility_covid.to_csv('E:\\\\PHD\\\\Courses\\\\Data-Mining\\\\Project\\\\datasets\\\\Clustering-new\\\\Mobility+Covid-19\\\\total_avg.csv')\r\n#print(mobility_covid.head())\r\ncluster = mobility_covid.drop(['Unnamed: 0','Country/Region','deaths','pop','day_x','month_x','year_x','case_fatality_rate','infection_rate','mortality_rate','grocery_and_pharmacy_percent_change_from_baseline','parks_percent_change_from_baseline','residential_percent_change_from_baseline','transit_stations_percent_change_from_baseline','workplaces_percent_change_from_baseline'], axis=1)\r\n#print(cluster.head())\r\n#cluster.to_csv('E:\\\\PHD\\\\Courses\\\\Data-Mining\\\\Project\\\\datasets\\\\Clustering-new\\\\Mobility+Covid-19\\\\cluster.csv')\r\n\r\n#Make Scaling\r\ncluster = StandardScaler().fit_transform(cluster)\r\n#print(cluster)\r\n\r\n\r\n#Silhoutte Analysis to get the number of clusters for KMeans Clustering Algorithm\r\nrange_n_clusters = [2, 3, 4, 5]\r\n\r\nfor n_clusters in range_n_clusters:\r\n # Create a subplot with 1 row and 2 columns\r\n fig, (ax1, ax2) = plt.subplots(1, 2)\r\n fig.set_size_inches(18, 7)\r\n ax1.set_xlim([-0.1, 1])\r\n ax1.set_ylim([0, len(cluster) + (n_clusters + 1) * 10])\r\n\r\n # Initialize the clusterer with n_clusters value and a random generator\r\n # seed of 10 for reproducibility.\r\n clusterer = KMeans(n_clusters=n_clusters, random_state=10)\r\n #cluster_labels = clusterer.fit_predict(cluster1)\r\n cluster_labels = clusterer.fit_predict(cluster)\r\n # The silhouette_score gives the average value for all the samples.\r\n # This gives a perspective into the density and separation of the formed\r\n # clusters\r\n #silhouette_avg = silhouette_score(cluster1, cluster_labels)\r\n silhouette_avg = silhouette_score(cluster, cluster_labels)\r\n print(\"For n_clusters =\", n_clusters,\r\n \"The average silhouette_score is :\", silhouette_avg)\r\n\r\n # Compute the silhouette scores for each sample\r\n #sample_silhouette_values = silhouette_samples(cluster1, cluster_labels)\r\n sample_silhouette_values = silhouette_samples(cluster, cluster_labels)\r\n\r\n y_lower = 10\r\n for i in range(n_clusters):\r\n # Aggregate the silhouette scores for samples belonging to\r\n # cluster i, and sort them\r\n ith_cluster_silhouette_values = \\\r\n sample_silhouette_values[cluster_labels == i]\r\n\r\n ith_cluster_silhouette_values.sort()\r\n\r\n size_cluster_i = ith_cluster_silhouette_values.shape[0]\r\n y_upper = y_lower + size_cluster_i\r\n\r\n color = cm.nipy_spectral(float(i) / n_clusters)\r\n ax1.fill_betweenx(np.arange(y_lower, y_upper),\r\n 0, ith_cluster_silhouette_values,\r\n facecolor=color, edgecolor=color, alpha=0.7)\r\n # Label the silhouette plots with their cluster numbers at the middle\r\n ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))\r\n\r\n # Compute the new y_lower for next plot\r\n y_lower = y_upper + 10 # 10 for the 0 samples\r\n\r\n ax1.set_title(\"The silhouette plot for the various clusters.\")\r\n ax1.set_xlabel(\"The silhouette coefficient values\")\r\n ax1.set_ylabel(\"Cluster label\")\r\n\r\n # The vertical line for average silhouette score of all the values\r\n ax1.axvline(x=silhouette_avg, color=\"red\", linestyle=\"--\")\r\n\r\n ax1.set_yticks([]) # Clear the yaxis labels / ticks\r\n ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])\r\n\r\n # 2nd Plot showing the actual clusters formed\r\n colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)\r\n #ax2.scatter(cluster1[:, 0], cluster1[:, 1], marker='.', s=30, lw=0, alpha=0.7,\r\n # c=colors, edgecolor='k')\r\n\r\n ax2.scatter(cluster[:, 0], cluster[:, 1], marker='.', s=30, lw=0, alpha=0.7,\r\n c=colors, edgecolor='k')\r\n\r\n\r\n # Labeling the clusters\r\n centers = clusterer.cluster_centers_\r\n # Draw white circles at cluster centers\r\n ax2.scatter(centers[:, 0], centers[:, 1], marker='o',\r\n c=\"white\", alpha=1, s=200, edgecolor='k')\r\n\r\n for i, c in enumerate(centers):\r\n ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,\r\n s=50, edgecolor='k')\r\n\r\n ax2.set_title(\"The visualization of the clustered data.\")\r\n ax2.set_xlabel(\"Feature space for the 1st feature\")\r\n ax2.set_ylabel(\"Feature space for the 2nd feature\")\r\n\r\n plt.suptitle((\"Silhouette analysis for KMeans clustering on sample data \"\r\n \"with n_clusters = %d\" % n_clusters),\r\n fontsize=14, fontweight='bold')\r\n\r\nplt.show()\r\n\r\n\r\n#KMeans with n_cluster = 2\r\nkmeans = KMeans(n_clusters=6)\r\nkmeans.fit(cluster)\r\n#kmeans.fit(cluster1)\r\nlabels = mobility_covid['Country/Region']\r\nclusters = kmeans.predict(cluster)\r\n#clusters = kmeans.predict(cluster1)\r\nprint(\"clusters \",clusters)\r\nprint(labels)\r\n# assign the clustering labels\r\nmobility_covid['cluster'] = clusters\r\nKmeans = mobility_covid.groupby('cluster').mean().reset_index()\r\n#print(Kmeans)\r\nprint(mobility_covid[mobility_covid['cluster'] == 0]['Country/Region'].values.tolist(), end=\" \")\r\nprint(mobility_covid[mobility_covid['cluster'] == 1]['Country/Region'].values.tolist(), end=\" \")\r\nprint(mobility_covid[mobility_covid['cluster'] == 2]['Country/Region'].values.tolist(), end=\" \")\r\nprint(mobility_covid[mobility_covid['cluster'] == 3]['Country/Region'].values.tolist(), end=\" \")\r\nprint(mobility_covid[mobility_covid['cluster'] == 4]['Country/Region'].values.tolist(), end=\" \")\r\n#print(mobility_covid[mobility_covid['cluster'] == 5]['Country/Region'].values.tolist(), end=\" \")\r\n\r\n#Evaluation\r\n#ari = metrics.cluster.adjusted_rand_score(labels,clusters)\r\n#print(\"Adjusted Rand Index \",ari)\r\n#nmi = metrics.cluster.normalized_mutual_info_score(labels,clusters)\r\n#print(\"Normalized Mutual Information \",nmi)\r\n#m = metrics.cluster.adjusted_mutual_info_score(labels,clusters)\r\n#print(\"adjusted_mutual_info_score \",m)\r\n\r\ncluster_labels = kmeans.fit_predict(cluster)\r\n#cluster_labels = kmeans.fit_predict(cluster1)\r\n\r\n#silhouette = sm.silhouette_score(cluster1, cluster_labels)\r\nsilhouette = sm.silhouette_score(cluster, cluster_labels)\r\nprint(\"The average silhouette_score is :\", silhouette)\r\n\r\n#accuracy_score = sm.accuracy_score(cluster, cluster_labels)\r\n#print(\"The accuracy_score is :\", accuracy_score)\r\n\r\n#Plotting (Before Clustering)\r\n#plt.scatter(cluster1[:, 0],cluster1[:, 1])\r\nplt.scatter(cluster[:, 0],cluster[:, 1])\r\nplt.xlabel(\"Confirmed Cases\")\r\n#plt.ylabel(\"Grocery_percent_change_from_baseline\")\r\n#plt.ylabel(\"Parks_percent_change_from_baseline\")\r\nplt.ylabel(\"Retail_percent_change_from_baseline\")\r\nplt.title(\"The data before Clustering\")\r\nplt.show()\r\n\r\ncenter = kmeans.cluster_centers_\r\nprint(center)\r\n\r\nplt.scatter(center[0][0],center[0][1],marker = '*',s=200,color='y')\r\nplt.scatter(center[1][0],center[1][1],marker = '*',s=200,color='y')\r\nplt.scatter(center[2][0],center[2][1],marker = '*',s=200,color='y')\r\nplt.scatter(center[3][0],center[3][1],marker = '*',s=200,color='y')\r\nplt.scatter(center[4][0],center[4][1],marker = '*',s=200,color='y')\r\n#plt.scatter(center[5][0],center[5][1],marker = '*',s=200,color='y')\r\n\r\n\r\nplt.scatter(cluster[cluster_labels==0,0],cluster[cluster_labels==0,1],s=50,color='r', label = 'Cluster 1')\r\nplt.scatter(cluster[cluster_labels==1,0],cluster[cluster_labels==1,1],s=50,color='g', label = 'Cluster 2')\r\nplt.scatter(cluster[cluster_labels==2,0],cluster[cluster_labels==2,1],s=50,color='b', label = 'Cluster 3')\r\nplt.scatter(cluster[cluster_labels==3,0],cluster[cluster_labels==3,1],s=50,color='c', label = 'Cluster 4')\r\nplt.scatter(cluster[cluster_labels==4,0],cluster[cluster_labels==4,1],s=50,color='m', label = 'Cluster 5')\r\n#plt.scatter(cluster[cluster_labels==5,0],cluster[cluster_labels==5,1],s=50,color='k', label = 'Cluster 6')\r\n\r\n\r\nplt.text(cluster[0, 0],cluster[0, 1],labels[0],fontsize=8)\r\nplt.text(cluster[1, 0],cluster[1, 1],labels[1],fontsize=8)\r\nplt.text(cluster[2, 0],cluster[2, 1],labels[2],fontsize=8)\r\nplt.text(cluster[3, 0],cluster[3, 1],labels[3],fontsize=8)\r\nplt.text(cluster[4, 0],cluster[4, 1],labels[4],fontsize=8)\r\nplt.text(cluster[5, 0],cluster[5, 1],labels[5],fontsize=8)\r\nplt.text(cluster[6, 0],cluster[6, 1],labels[6],fontsize=8)\r\nplt.text(cluster[7, 0],cluster[7, 1],labels[7],fontsize=8)\r\nplt.text(cluster[8, 0],cluster[8, 1],labels[8],fontsize=8)\r\nplt.text(cluster[9, 0],cluster[9, 1],labels[9],fontsize=8)\r\nplt.text(cluster[10, 0],cluster[10, 1],labels[10],fontsize=8)\r\nplt.text(cluster[11, 0],cluster[11, 1],labels[11],fontsize=8)\r\nplt.text(cluster[12, 0],cluster[12, 1],labels[12],fontsize=8)\r\nplt.text(cluster[13, 0],cluster[13, 1],labels[13],fontsize=8)\r\n#plt.text(cluster[14, 0],cluster[14, 1],labels[14],fontsize=8)\r\nplt.xlabel(\"Confirmed Cases\")\r\n\r\n#plt.ylabel(\"Grocery_percent_change_from_baseline\")\r\n#plt.ylabel(\"Parks_percent_change_from_baseline\")\r\nplt.ylabel(\"Retail_percent_change_from_baseline\")\r\nplt.legend()\r\nplt.show()\r\n","sub_path":"Cluster_mobility_covid.py","file_name":"Cluster_mobility_covid.py","file_ext":"py","file_size_in_byte":9760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"67367932","text":"# -*- coding: utf-8 -*-\n# @Author: Puffrora\n# @Date: 2019-10-14 11:01:24\n# @Last Modified by: Puffrora\n# @Last Modified time: 2019-10-14 19:08:06\n\n\n# 双指针left right 窗口移动\n# 当 A[right] = 1 时 left 不变 right 继续移���\n# 当 A[right] = 0 时,\n# \t0 的数量在 K 的范围内 left 不变 right 继续移动\n# \t0 的数量 > K\n# \t\t当 A[left] == 0 时 即 left 指向了一个零 只需要 left 右移一格 就可以减少一个零\n# \t\t当 A[left] == 1 时 即此时窗口内包了 K 个零 需要先移动至下个零再右移一格才能减少一个零\n\nclass Solution:\n\tdef longestOnes(self, A, K):\n\t\tleft, right = 0, 0\n\t\tres = 0\n\t\twhile right < len(A):\n\t\t\tif A[right] == 0:\n\t\t\t\tif K == 0:\n\t\t\t\t\twhile A[left] == 1:\n\t\t\t\t\t\tleft += 1\n\t\t\t\t\tleft += 1\n\t\t\t\telse:\n\t\t\t\t\tK -= 1\n\t\t\tres = max(res, right-left+1)\n\t\t\tright += 1\n\t\t\t\n\t\treturn res\n","sub_path":"Leetcode/leetcode1004 最大连续1的个数III.py","file_name":"leetcode1004 最大连续1的个数III.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"170652003","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport func \n#定义网络\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv1d(1,4,kernel_size=2),\n nn.BatchNorm1d(4),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=2, stride=2))\n self.layer2 = nn.Sequential(\n nn.Conv1d(4, 16, kernel_size=2),\n nn.BatchNorm1d(16),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=2, stride=2))\n self.layer3 = nn.Sequential(\n nn.Conv1d(16, 16, kernel_size=2),\n nn.BatchNorm1d(16),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=2, stride=2))\n self.layer4 = nn.Sequential(\n nn.Conv1d(16, 1, kernel_size=2),\n nn.BatchNorm1d(1),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=2, stride=2))\n self.fc0= nn.Linear(1,500)\n self.fc1= nn.Linear(30,1)\n def forward(self, x):\n out = self.fc0(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.fc1(out)\n return out\n#载入网络\nnet=Net()\nnet.load_state_dict(torch.load('Mynet.pkl'))\noptimizer = torch.optim.SGD(net.parameters(), lr=0.02)\nloss_func = torch.nn.MSELoss() \n#生成测试数据集\nt=np.random.rand(400)*1400\nt=np.sort(t)\nn=np.size(t)\n\nans=func.NARMA(t,n)\nans=torch.from_numpy(ans).float()\nans=ans.reshape(400,1,1)\nt=torch.from_numpy(t).float()\nt=t.reshape(400,1,1)\n\n#dataset=[torch.from_numpy(t).float(),torch.from_numpy(ans).float()]\n\n#测试网络\nnet.eval()\ntest_out=net(t)\nloss = loss_func(test_out,ans)\nprint(loss)\n\n","sub_path":"project1.py","file_name":"project1.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"210756006","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n# wczytanie danych datasetu\niris = pd.read_csv('iris.csv', sep=',')\n\n# legenda wykresu\ncolors = ['red', 'green', 'blue']\ngatunki = iris['species'].unique()\n\n\nfor i in range(0, 3):\n gatunki_df = iris[iris['species'] == gatunki[i]]\n plt.scatter(\n gatunki_df['sepal_length'],\n gatunki_df['sepal_width'],\n color=colors[i],\n alpha=0.7,\n label=gatunki[i]\n )\n\nplt.xlabel('sepal length (cm)')\nplt.ylabel('sepal width (cm)')\nplt.legend()\n\nplt.show()\n","sub_path":"wd9/zad04.py","file_name":"zad04.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"22728091","text":"# Lectura del tablero se encarga de la leer el tablero del archivo\n# Recibe el tamaño de las columnas y los renglones\n\n\nclass LecturaDelTablero:\n def __init__(self, archivo=\"Tablero.txt\"):\n self.tablero = leerTablero(archivo)\n self.fil = len(self.tablero)\n self.col = len(self.tablero[0])\n\n def __str__(self):\n salida = \"\"\n for f in range(self.fil):\n for c in range(self.col):\n if self.tablero[f][c] == 0:\n salida += \" \"\n if self.tablero[f][c] == 1:\n salida += \"x \"\n if self.tablero[f][c] == 2:\n salida += \"I \"\n if self.tablero[f][c] == 3:\n salida += \"F \"\n if self.tablero[f][c] == 4:\n salida += \"- \"\n salida += \"\\n\"\n return salida\n\n def camino(self, lista):\n del lista[-1]\n for i in range(len(lista)):\n self.tablero[lista[i][0]][lista[i][1]] = 4\n\n\nclass Nodo:\n def __init__(self, pos=[0, 0], padre=None):\n self.pos = pos\n self.padre = padre\n self.h = distancia(self.pos, pos_f)\n\n if self.padre == None:\n self.g = 0\n else:\n self.g = self.padre.g + 1\n self.f = self.g + self.h\n\n\nclass AEstrella:\n def __init__(self, tablero):\n self.tablero = tablero\n\n # Nodos de inicio y fin.\n self.inicio = Nodo(buscarPos(2, tablero))\n self.fin = Nodo(buscarPos(3, tablero))\n\n # Crea las listas abierta y cerrada.\n self.abierta = []\n self.cerrada = []\n\n # Añade el nodo inicial a la lista cerrada.\n self.cerrada.append(self.inicio)\n\n # Añade vecinos a la lista abierta\n self.abierta += self.vecinos(self.inicio)\n\n # Buscar mientras objetivo no este en la lista cerrada.\n while self.objetivo():\n self.buscar()\n\n self.camino = self.camino()\n\n # Devuelve una lista con los nodos vecinos transitables.\n def vecinos(self, nodo):\n vecinos = []\n # basicamente: si los nodos (casillas) alrededor no son pared u\n # obstaculo entonces agregar como posibles vecinos\n\n # abajo\n if self.tablero.tablero[nodo.pos[0] + 1][nodo.pos[1]] != 1:\n vecinos.append(Nodo([nodo.pos[0] + 1, nodo.pos[1]], nodo))\n\n # arriba\n if self.tablero.tablero[nodo.pos[0] - 1][nodo.pos[1]] != 1:\n vecinos.append(Nodo([nodo.pos[0] - 1, nodo.pos[1]], nodo))\n\n # izquierda\n if self.tablero.tablero[nodo.pos[0]][nodo.pos[1] - 1] != 1:\n vecinos.append(Nodo([nodo.pos[0], nodo.pos[1] - 1], nodo))\n\n # derecha\n if self.tablero.tablero[nodo.pos[0]][nodo.pos[1] + 1] != 1:\n vecinos.append(Nodo([nodo.pos[0], nodo.pos[1] + 1], nodo))\n\n return vecinos\n\n # Pasa el elemento de f menor de la lista abierta a la cerrada.\n def f_menor(self):\n a = self.abierta[0]\n n = 0\n for i in range(1, len(self.abierta)):\n if self.abierta[i].f < a.f:\n a = self.abierta[i]\n n = i\n self.cerrada.append(self.abierta[n])\n del self.abierta[n]\n\n # Comprueba si un nodo está en una lista.\n def en_lista(self, nodo, lista):\n for i in range(len(lista)):\n if nodo.pos == lista[i].pos:\n return 1\n return 0\n\n # Gestiona los vecinos del nodo seleccionado.\n def ruta(self):\n for i in range(len(self.nodos)):\n if self.en_lista(self.nodos[i], self.cerrada):\n continue\n elif not self.en_lista(self.nodos[i], self.abierta):\n self.abierta.append(self.nodos[i])\n else:\n if self.select.g + 1 < self.nodos[i].g:\n for j in range(len(self.abierta)):\n if self.nodos[i].pos == self.abierta[j].pos:\n del self.abierta[j]\n self.abierta.append(self.nodos[i])\n break\n\n # Analiza el último elemento de la lista cerrada.\n def buscar(self):\n self.f_menor()\n self.select = self.cerrada[-1]\n self.nodos = self.vecinos(self.select)\n self.ruta()\n\n # Comprueba si el objetivo objetivo está en la lista abierta.\n def objetivo(self):\n for i in range(len(self.abierta)):\n if self.fin.pos == self.abierta[i].pos:\n return 0\n return 1\n\n # Retorna una lista con las posiciones del camino a seguir.\n def camino(self):\n for i in range(len(self.abierta)):\n if self.fin.pos == self.abierta[i].pos:\n objetivo = self.abierta[i]\n\n camino = []\n while objetivo.padre != None:\n camino.append(objetivo.pos)\n objetivo = objetivo.padre\n camino.reverse()\n return camino\n\n\n# ---------------------------------------------------------------------\n\n\n# Funciones\n# ---------------------------------------------------------------------\n\n# Devuelve la posición de \"x\" en una lista.\n# El simbolo para el nodo inicial es \"I\"\n# El simbolo para el nodo final es \"F\"\n\ndef buscarPos(x, tablero):\n for f in range(tablero.fil):\n for c in range(tablero.col):\n if tablero.tablero[f][c] == x:\n return [f, c]\n return 0\n\n\n# Distancia entre dos puntos.\ndef distancia(a, b):\n return abs(a[0] - b[0]) + abs(a[1] - b[1]) # Valor absoluto.\n\n\n# Quita el ultimo caracter de una lista.\ndef quitarUltimo(lista):\n for i in range(len(lista)):\n lista[i] = lista[i][:-1]\n return lista\n\n\n# Covierte una cadena en una lista.\ndef listarCadena(cadena):\n lista = []\n for i in range(len(cadena)):\n if cadena[i] == \"-\":\n lista.append(0)\n if cadena[i] == \"x\":\n lista.append(1)\n if cadena[i] == \"I\":\n lista.append(2)\n if cadena[i] == \"F\":\n lista.append(3)\n return lista\n\n\n# Lee un archivo de texto y lo convierte en una lista.\ndef leerTablero(archivo):\n mapa = open(archivo, \"r\")\n mapa = mapa.readlines()\n mapa = quitarUltimo(mapa)\n for i in range(len(mapa)):\n mapa[i] = listarCadena(mapa[i])\n return mapa\n\n\n# ---------------------------------------------------------------------\n\ndef main():\n tablero = LecturaDelTablero()\n globals()[\"pos_f\"] = buscarPos(3, tablero)\n A = AEstrella(tablero)\n tablero.camino(A.camino)\n print(tablero)\n return 0\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Proyecto_Bravo_Ruiz/A_Estrella/A_Estrella.py","file_name":"A_Estrella.py","file_ext":"py","file_size_in_byte":6577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"38586590","text":"import numpy\nfrom scipy.stats import skewnorm\n\nTH_SKEWNORM_PPF = 0.99\nPPF_TUNE_X0 = 0.707\nTH_SKEWNESS = -5\nMIN_SCALE = 0.01\nMIN_SCALING_FACTOR = 1e-18\nOT_DISCARD_PERCENT = 0.001\n\n\ndef css_thresholding(x, percent=OT_DISCARD_PERCENT):\n x_np = numpy.array(x)\n x_np[x_np < MIN_SCALING_FACTOR] = MIN_SCALING_FACTOR\n x_sorted = numpy.sort(x_np)\n x2 = x_sorted**2\n Z = x2.sum()\n energy_loss = 0\n for i in range(x2.size):\n energy_loss += x2[i]\n if energy_loss / Z > percent:\n break\n th = (x_sorted[i-1] + x_sorted[i]) / 2 if i > 0 else 0\n return th\n\n\ndef find_closest_index(x_sorted, target):\n for i in range(len(x_sorted)):\n if x_sorted[i] <= target <= x_sorted[i + 1]:\n break\n return i if (target - x_sorted[i] > x_sorted[i + 1] - target) else (i + 1)\n\n\ndef sep_two_skewed_normals(x, th_init):\n x0 = x[x < th_init]\n x1 = x[x >= th_init]\n\n if x0.size == 0:\n return th_init, (x.min() - 1, x1.mean(), 0.01, x1.std(), 0, 0)\n if x1.size == 1:\n a1 = TH_SKEWNESS\n m1 = x0.mean()\n s1 = MIN_SCALE\n else:\n a1, m1, s1 = skewnorm.fit(x1)\n if a1 > TH_SKEWNESS:\n a1, m1, s1 = skewnorm.fit(x1, f0=TH_SKEWNESS)\n if x0.size == 1:\n a0 = TH_SKEWNESS\n m0 = x0.mean()\n s0 = MIN_SCALE\n else:\n a0, m0, s0 = skewnorm.fit(x0)\n if a0 > TH_SKEWNESS:\n a0, m0, s0 = skewnorm.fit(x0, f0=TH_SKEWNESS)\n\n num_x0_last = x0.size\n num_change = 1\n x_sorted = sorted(x)\n nums_x0 = [num_x0_last, ]\n while num_change:\n # E, binary search for new th\n i0 = int(x0.size/2)\n i1 = x.size - int(x1.size/2)\n while i1 - i0 > 1:\n i = int((i0 + i1) / 2)\n p0 = skewnorm.pdf(x_sorted[i], a0, m0, s0) - skewnorm.pdf(x_sorted[i], a1, m1, s1)\n if p0 > 0:\n i0 = i\n else:\n i1 = i\n\n th = (x_sorted[i0] + x_sorted[i1]) / 2\n\n x0 = x[x < th]\n x1 = x[x >= th]\n\n # M\n if x0.size == 0:\n break\n if x1.size == 1:\n a1 = TH_SKEWNESS\n m1 = x0.mean()\n s1 = MIN_SCALE\n else:\n a1, m1, s1 = skewnorm.fit(x1)\n if a1 > TH_SKEWNESS:\n a1, m1, s1 = skewnorm.fit(x1, f0=TH_SKEWNESS)\n if x0.size == 1:\n a0 = TH_SKEWNESS\n m0 = x0.mean()\n s0 = MIN_SCALE\n else:\n a0, m0, s0 = skewnorm.fit(x0)\n if a0 > TH_SKEWNESS:\n a0, m0, s0 = skewnorm.fit(x0, f0=TH_SKEWNESS)\n\n # update\n num_change = x0.size - num_x0_last\n num_x0_last = x0.size\n if num_x0_last not in nums_x0:\n nums_x0.append(num_x0_last)\n else:\n break\n\n th = min(skewnorm.ppf(TH_SKEWNORM_PPF, a0, m0, s0), th)\n # extreme case that under very weak L1 constraint, negligible cluster is fitted with large sigma\n if s1 > 0.1 and s0 / s1 > 10:\n th = min(skewnorm.ppf(1e-4, a1, m1, s1), th)\n return th, (m0, m1, s0, s1, a0, a1)\n\n\ndef em_thresholding(x, alpha=1e-3):\n x_np = numpy.array(x)\n x_np[x_np < MIN_SCALING_FACTOR] = MIN_SCALING_FACTOR\n x_sorted = sorted(x_np)\n prune_th_init = alpha * x_np.max()\n if prune_th_init < x_sorted[2]:\n prune_th_init = (x_sorted[1] + x_sorted[2]) / 2\n x_log10 = numpy.log10(x_np)\n th_log10, (l0, l1, s0, s1, a0, a1) = sep_two_skewed_normals(x_log10, numpy.log10(prune_th_init))\n th = numpy.power(10, th_log10)\n if th > x_np.max(): # Failure case, seldom\n th = prune_th_init\n return th, (l0, l1, s0, s1, a0, a1)\n\n\ndef kmeans_thresholding(x):\n kmeans = KMeans(n_clusters=2, random_state=0).fit(numpy.log10(numpy.array(x)).reshape(-1, 1))\n th = numpy.power(10, kmeans.cluster_centers_.mean())\n log10_info = sorted([_[0] for _ in kmeans.cluster_centers_])\n return th, log10_info\n\n\n\"\"\"\nimport math\nfrom scipy.optimize import newton\nINV_SQRT_2PI = 1 / math.sqrt(2 * math.pi)\nPERCENT_FOR_ESTIMATION = 0.9\n\ndef normal_pdf(x, m, s):\n return INV_SQRT_2PI / s * numpy.exp(-0.5 * (x - m) ** 2 / s ** 2)\n\n\ndef calculate_th(m0, s0, m1, s1, w0):\n th_init = m1 + (m0 - m1) * s1 / (s1 + s0)\n try:\n th = newton(\n lambda x: w0 * (x - m0) * normal_pdf(x, m0, s0) / s0 ** 3 + (1 - w0) * (x - m1) * normal_pdf(x, m1,\n s1) / s1 ** 3,\n th_init\n )\n except:\n th = th_init\n return th\n\n\ndef approximate_th(m0, s0, m1, s1, w0):\n #approximate threshold with the equal point for better efficiency\n\n s0_sq = s0 * s0\n s1_sq = s1 * s1\n m0_sq = m0 * m0\n m1_sq = m1 * m1\n M = w0 * s1 / (1 - w0) / s0\n a = 0.5 * (1 / s0_sq - 1 / s1_sq)\n b = m1 / s1_sq - m0 / s0_sq\n c = 0.5 * (m0_sq / s0_sq - m1_sq / s1_sq) - math.log(M)\n\n th = 0.5 * (-b - math.sqrt(b * b - 4 * a * c)) / a\n if th < m0 or th > m1:\n th = 0.5 * (-b + math.sqrt(b * b - 4 * a * c)) / a\n return th\n\n\ndef em_two_gaussians(x, th_init, tol=1e-3):\n x0 = x[x < th_init]\n x1 = x[x >= th_init]\n\n if x0.size == 0:\n return th_init, (x.min() - 1, x1.mean(), 10 * tol, x1.std(), 0)\n\n m0 = x0.mean()\n m1 = x1.mean()\n s0 = x0.std()\n s1 = x1.std()\n w0 = x0.size / x.size\n\n if x1.size < 2:\n return th_init, (m0, m1, s0, 10 * tol, w0)\n elif x0.size < 2:\n return th_init, (m0, m1, 10 * tol, s1, w0)\n\n num_x0_last = x0.size\n num_change = 1\n while num_change:\n # E\n th = calculate_th(m0, s0, m1, s1, w0)\n # th = approximate_th(m0, s0, m1, s1, w0)\n x0 = x[x < th]\n x1 = x[x >= th]\n\n # avoid singular values\n if x0.size < 2 or x1.size < 2:\n break\n\n # M\n m0 = x0.mean()\n m1 = x1.mean()\n s0 = x0.std()\n s1 = x1.std()\n w0 = x1.size / x.size\n\n # update\n num_change = x0.size - num_x0_last\n num_x0_last = x0.size\n\n # singular break for too large th\n if th > m1:\n x_sorted = sorted(x)\n th = (x_sorted[num_x0_last - 1] + x_sorted[num_x0_last]) / 2\n\n # non-overlap constraint\n th = min(th, m1 - 3 * s1)\n if th < m0 + 3 * s0:\n th = min(th, th_init)\n return th, (m0, m1, s0, s1, w0)\n\"\"\"\n","sub_path":"netslim/thresholding.py","file_name":"thresholding.py","file_ext":"py","file_size_in_byte":6365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"170355108","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[8]:\n\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sb\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[9]:\n\n\ndf = pd.read_excel(r'E:/ML Projects/BMI.xlsx')\n\n\n# In[10]:\n\n\ndf.head()\n\n\n# In[11]:\n\n\ndf.isnull().sum()\n\n\n# In[12]:\n\n\ndf.dtypes\n\n\n# In[13]:\n\n\ndf = df.drop(['Gender'],axis=1)\n\n\n# In[14]:\n\n\ndf.head()\n\n\n# In[17]:\n\n\nx = df.iloc[:,:-1].values\ny = df.iloc[:,-1].values\n\n\n# In[18]:\n\n\nfrom sklearn.model_selection import train_test_split\n\n\n# In[19]:\n\n\nx_train,x_test,y_train,y_test = train_test_split(x,y, test_size=0.25,\n random_state=0)\n\n\n# In[20]:\n\n\nfrom sklearn.linear_model import LinearRegression\n\n\n# In[21]:\n\n\nclassfier = LinearRegression()\n\n\n# In[22]:\n\n\nclassfier.fit(x_train,y_train)\n\n\n# In[23]:\n\n\ny_pred = classfier.predict(x_test)\n\n\n# In[24]:\n\n\ny_pred\n\n\n# In[25]:\n\n\nfrom sklearn.metrics import r2_score\n\n\n# In[26]:\n\n\nr2 = r2_score(y_test,y_pred)\n\n\n# In[27]:\n\n\nr2\n\n\n# In[28]:\n\n\nx.shape\n\n\n# In[29]:\n\n\nadj_r2 = (1-(1-r2)*(49/45))\n\n\n# In[30]:\n\n\nadj_r2\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Project - Body mass Index.py","file_name":"Project - Body mass Index.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"150671047","text":"import collections\n\n\nclass Solution:\n def checkInclusion(self, s1, s2):\n mp1 = collections.defaultdict(int)\n for c in s1:\n mp1[c] += 1\n\n window = 0\n for i, c in enumerate(s2):\n while mp1[c] == 0 and window:\n mp1[s2[i - window]] += 1\n window -= 1\n if mp1[c] > 0:\n mp1[c] -= 1\n window += 1\n if window == len(s1):\n return True\n return False\n","sub_path":"Python/permutation-in-string.py","file_name":"permutation-in-string.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"94527854","text":"N = 12 \ntemp = [0] * 12 \nmaior = 0.0\nmenor = 0.0 \n\n# primeiro mes deve ser lido seprado para atribuir a maior e a menor temperatura do momemto\ntemp[0] = float(input(\"Digite a temperatura do mes de janeiro: \"))\nmaior = temp[0]\nmenor = temp[0]\n\"\"\" MAIORES E MENORES TEMPERATURAS \"\"\"\ncont = 1\nwhile cont < N:\n temp[cont] = float(input(\"Digite a temperatura do proximo mes: \"))\n if temp[cont] >= maior:\n maior = temp[cont]\n elif temp[cont] <= menor:\n menor = temp[cont]\n cont += 1\n\"\"\" MESES DA MAIORES E MENORES TEMPERATURAS \"\"\"\ncont = 0\nwhile cont < N:\n if temp[cont] == maior:\n print(\"meses das maiores temperaturas: \", cont + 1)\n cont += 1\n #-----------------#\ncont = 0\nwhile cont < N:\n if temp[cont] == menor:\n print(\"meses das menores temperaturas: \", cont + 1)\nprint(maior, menor)\n","sub_path":"Vetores_Matrizes/vetores - exercícios/exercícios 2/exer4.py","file_name":"exer4.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"639172137","text":"#OOPS : Object is a Container and Class is also a Container\n\nclass MobilePhone:\n\n name = \"iPhone7\"\n brand = \"Apple\"\n\n # Constructor\n def __init__(self, ram, mem, col):\n self.ram = ram\n self.mem = mem\n self.color = col\n\n\n\n\n\n# Write Operation\n# 1. Create an Attribute if its not thr\n# 2. Update the value if its thr\n\nMobilePhone.screen = \"5 inches\"\nMobilePhone.screen = \"4 inches\"\n\nprint(MobilePhone.name,\" belongs to \", MobilePhone.brand,\" and has a screen size of\", MobilePhone.screen)\n\nprint(MobilePhone.__dict__)\n\n# Creation of an Object\nm1 = MobilePhone(\"4 GB\",\"128 GB\",\"Black\")\n\nm1.price = 60000\n\n\n#Ref Variable can access the property of class if it is not thr in the object\nprint(m1.__dict__)\nprint(m1.name)\n\n# print(MobilePhone.ram)\n# Object can access Class's Property\n# Class cannot access Object's Property\n","sub_path":"venv/Query.py","file_name":"Query.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"391718311","text":"# coding=utf-8\n\nimport zAI\nfrom zAI import zImage\nimport tkinter as tk\n\nzAI.utils.set_backend_key(key_name='MICROSOFT_AZURE_VISION_API_KEY', key_value='API KEY', save=True)\nzAI.utils.set_backend_key(key_name='MICROSOFT_AZURE_URL', key_value='END POINT', save=True)\n\nimageLink = 'people.jpg'\nimage = zImage(imageLink)\nimage.find_faces(backend='local')\n\nmyCloseup = image.extract_face(n=3, margin=15) # margin is the number of pixels we will expand\nmyCloseup.display()\nmyCloseup.save(\"./face.jpg\")\n","sub_path":"By Day/201910/20191010/zAI 라이브러리 응용/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"402032217","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.6-fat/egg/sc/social/viewcounter/tests/tests.py\n# Compiled at: 2010-08-18 13:21:09\nimport unittest, doctest\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom zope.testing import doctestunit\nfrom zope.component import testing, queryUtility\nfrom Testing import ZopeTestCase as ztc\nfrom Products.Five import zcml\nfrom Products.Five import fiveconfigure\nfrom Products.PloneTestCase import PloneTestCase as ptc\nfrom Products.PloneTestCase.layer import PloneSite\nfrom Products.PloneTestCase.layer import onsetup\nfrom Products.CMFPlone.utils import getToolByName\nfrom sc.social.viewcounter.pageview import Base\nfrom sc.social.viewcounter.pageview import SCOPED_SESSION_NAME\nfrom sc.social.viewcounter.pageview import session\nfrom sc.social.viewcounter.pageview import PageView\nfrom zope.app.cache.interfaces.ram import IRAMCache\nimport sc.social.viewcounter\n\ndef InvalidateCache(function_name):\n cache = queryUtility(IRAMCache)\n if cache:\n cache.invalidate(function_name)\n\n\n@onsetup\ndef setup_product():\n fiveconfigure.debug_mode = True\n zcml.load_config('testing.zcml', sc.social.viewcounter.tests)\n zcml.load_config('configure.zcml', sc.social.viewcounter)\n fiveconfigure.debug_mode = False\n\n\nsetup_product()\nptc.setupPloneSite(extension_profiles=['sc.social.viewcounter:default'])\n\nclass TestCase(ptc.PloneTestCase):\n __module__ = __name__\n baseContents = []\n\n def afterSetUp(self):\n \"\"\"\n \"\"\"\n objects = []\n self.loginAsPortalOwner()\n baseObject = self.portal\n for typeName in ['News Item', 'Document']:\n objects.extend(self.createBaseContent(baseObject, typeName, quantity=5))\n\n self.baseContents = [ (o.UID(), ('/').join(o.getPhysicalPath()), o.portal_type) for o in objects ]\n\n def createBaseContent(self, parent, typeName='News Item', quantity=5):\n \"\"\" \n \"\"\"\n wt = getToolByName(parent, 'portal_workflow')\n objects = []\n for item in range(quantity):\n oId = '%s_%04d' % (typeName, item)\n oTitle = oId\n oId = parent.invokeFactory(typeName, id=oId, title=oTitle)\n oContent = parent[oId]\n if item % 2:\n oContent.setSubject(['odd content'])\n else:\n oContent.setSubject(['even content'])\n oContent.reindexObject()\n wt.doActionFor(oContent, 'publish')\n objects.append(oContent)\n\n return objects\n\n def invalidateReportCache(self):\n \"\"\"Invalidate report cache\n \"\"\"\n InvalidateCache('sc.social.viewcounter.browser.viewcounter._reportPageViews')\n\n def populateViewCounter(self, cleanBefore=False, periods=[], invalidate=True):\n \"\"\" Populate viewcounter database\n in order to have data for this test\n \"\"\"\n contents = self.baseContents\n testSession = session()\n if cleanBefore:\n Base.metadata.drop_all(bind=testSession.bind)\n Base.metadata.create_all(bind=testSession.bind)\n vcBrowserView = self.portal.restrictedTraverse('vc_reports')\n contents = zip(range(len(self.baseContents), 0, -1), contents)\n user_name = 'foo'\n user_ip = '42.42.42.42'\n if not periods:\n periods = [\n 'lastHour', 'lastDay', 'lastWeek', 'lastMonth']\n for period in periods:\n timeRange = getattr(vcBrowserView, period)\n (tStart, tEnd) = timeRange\n seconds = (tEnd - tStart).days * 86400 + (tEnd - tStart).seconds\n accesses = []\n for (mult, content) in contents:\n (object_uid, object_path, object_type) = content\n accesses.extend([ (tStart + timedelta(seconds=seconds / mult * i),\n PageView(object_uid, object_path, object_type, user_ip, user_name))\n for i in range(1, mult) ])\n for access in accesses:\n (dt, pv) = access\n pv.access_datetime = dt\n testSession.add(pv)\n\n if invalidate:\n self.invalidateReportCache()\n\n class layer(PloneSite):\n __module__ = __name__\n\n @classmethod\n def tearDown(cls):\n pass\n\n\ndef test_suite():\n return unittest.TestSuite([ztc.ZopeDocFileSuite('docs/integration.txt', package='sc.social.viewcounter', test_class=TestCase, optionflags=doctest.REPORT_ONLY_FIRST_FAILURE), ztc.ZopeDocFileSuite('docs/caching.txt', package='sc.social.viewcounter', test_class=TestCase, optionflags=doctest.REPORT_ONLY_FIRST_FAILURE), ztc.ZopeDocFileSuite('docs/portlet.txt', package='sc.social.viewcounter', test_class=TestCase, optionflags=doctest.REPORT_ONLY_FIRST_FAILURE), ztc.FunctionalDocFileSuite('docs/browser.txt', package='sc.social.viewcounter', test_class=TestCase, optionflags=doctest.REPORT_ONLY_FIRST_FAILURE)])\n\n\nif __name__ == '__main__':\n unittest.main(defaultTest='test_suite')","sub_path":"pycfiles/sc.social.viewcounter-1.0.7-py2.4/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"247240709","text":"import math\r\n\r\n\r\nclass Node:\r\n\r\n def __init__(self, data):\r\n self.left = None\r\n self.right = None\r\n self.data = data\r\n\r\n# Insert method to create nodes\r\n def insert(self, data):\r\n if self.data:\r\n if data < self.data:\r\n if self.left is None:\r\n self.left = Node(data)\r\n else:\r\n self.left.insert(data)\r\n elif data > self.data:\r\n if self.right is None:\r\n self.right = Node(data)\r\n else:\r\n self.right.insert(data)\r\n else:\r\n self.data = data\r\n\r\n def createBalancedTree(self, data):\r\n return self.BalancedTree(data, 0, len(data)-1)\r\n\r\n def BalancedTree(self, data, start, end):\r\n if end < start:\r\n return None\r\n mid = math.floor((start+end)/2)\r\n tree = Node(data[mid])\r\n tree.left = self.BalancedTree(data, start, mid - 1)\r\n tree.right = self.BalancedTree(data, mid + 1, end)\r\n return tree\r\n\r\n def createlist(self, root):\r\n res = []\r\n if root:\r\n res = self.createlist(root.left)\r\n res.append(root.data)\r\n res = res + self.createlist(root.right)\r\n return res\r\n\r\n# Print the Tree\r\n def PrintTree(self):\r\n if self.left:\r\n print(f\"Вершина {self.data}\")\r\n print(f\"Лево {self.left.data}\")\r\n self.left.PrintTree()\r\n if self.right:\r\n print(f\"Вершина {self.data}\")\r\n print(f\"Право {self.right.data}\")\r\n self.right.PrintTree()\r\n\r\nroot = Node(12)\r\nroot.insert(1)\r\nroot.insert(4)\r\nroot.insert(9)\r\nroot.insert(10)\r\nroot.insert(5)\r\nroot.insert(8)\r\nroot.insert(11)\r\nroot.insert(2)\r\nroot.insert(14)\r\nroot.insert(3)\r\nroot.insert(7)\r\nroot.insert(20)\r\nroot.insert(16)\r\nlistTree = root.createlist(root)\r\nBalance = root.createBalancedTree(listTree)\r\nprint(Balance.PrintTree())\r\n\r\n","sub_path":"node2.py","file_name":"node2.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"213295054","text":"from utils import *\n\nname = 'build_heap'\n\ndef heapify(A, n, i):\n \"Recursively sift down i if A[i] is smaller than either of it's children.\"\n largest = i\n left = 2 * largest + 1\n right = 2 * largest + 2\n if left < n and A[largest] < A[left]:\n largest = left\n if right < n and A[largest] < A[right]:\n largest = right\n if largest != i:\n A[i], A[largest] = A[largest], A[i]\n heapify(A, n, largest)\n\ndef build_heap(A):\n \"Build a max heap by heapifying n/2 elements in reverse.\"\n n = len(A)\n for i in range(n//2, -1, -1):\n heapify(A, n, i)\n return A\n\nif __name__ == '__main__':\n print(rosalind_pretty(build_heap(list(Array(Input(name))[1]))))\n","sub_path":"Python/build_heap.py","file_name":"build_heap.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"633539202","text":"import pandas as pd\nimport globalparameter\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.feature_selection import RFECV\n\ncosine_similarity_column_precision = []\ncosine_similarity_column_recall = []\nwork_year_column_precision = []\nwork_year_column_recall = []\nhighest_degree_column_precision = []\nhighest_degree_column_recall = []\nexp_time_column_precision = []\nexp_time_column_recall = []\n\n\ndef calculateprecisionandrecall(extractnumber):\n length = extractnumber\n df = pd.read_csv(globalparameter.path + '/test1.csv', sep=',')\n id_column = df['normalized_id']\n index_column = df['index_normalized']\n cosine_similarity_column = df['normalized_cosine_similarity']\n work_year_column = df['normalized_work_year']\n highest_degree_column = df['normalized_highest_degree']\n exp_time_column = df['normalized_exp_time']\n user_data = pd.DataFrame(\n {'index': index_column, 'id': id_column, 'cosine_similarity': cosine_similarity_column,\n 'work_year': work_year_column,\n 'highest_degree': highest_degree_column, 'exp_time': exp_time_column})\n id_manual_top500 = user_data['id'][:length].tolist()\n id_manual_other = user_data['id'][length:globalparameter.total_number].tolist()\n calculatewiththreshold(user_data, id_manual_top500, id_manual_other)\n\ndef calculatewiththreshold(user_data, id_manual_top500, id_manual_other):\n\n threshold = 0\n step = 0.001\n for i in range(1000):\n positive_test_cosine_similarity = user_data[user_data['cosine_similarity']>=threshold]['id'].tolist()\n negative_test_cosine_similarity = user_data[user_data['cosine_similarity']=threshold]['id'].tolist()\n negative_test_work_year = user_data[user_data['work_year']=threshold]['id'].tolist()\n negative_test_highest_degree = user_data[user_data['highest_degree']=threshold]['id'].tolist()\n negative_test_exp_time = user_data[user_data['exp_time'] [1,2,7,4,5,6,3,8,9]\n[12,13,14] -> [12,14,13]\n[9,2,4,7,3] -> [2,7,4,3,9]\n\nThere is no need for a precheck. The array will always be not null and will always contain at least \none number.\n\nYou should not modify the input array!\n'''\n\n# Swapping Value Function\n\n\ndef adj_arr(arr):\n new_arr = []\n i = 0\n while i < len(arr):\n j = 0\n str_i = list(str(arr[i]))\n while j < len(str_i):\n if str_i[j] == '7':\n str_i[j] = '3'\n elif str_i[j] == '3':\n str_i[j] = '7'\n j += 1\n new_arr.append(int(''.join(str_i)))\n i += 1\n return new_arr\n\n\ndef sort_twisted37(arr):\n return adj_arr(sorted(adj_arr(arr)))\n\n# Test Cases\n\nprint(sort_twisted37([1, 2, 3, 4, 5, 6, 7, 8, 9]))\nprint(sort_twisted37([12, 13, 14]))\nprint(sort_twisted37([9, 2, 4, 7, 3]))\n","sub_path":"Python/Sorting_on_planet_Twisted37.py","file_name":"Sorting_on_planet_Twisted37.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"578142529","text":"# read_embark_fields_json_file.py 2/4/19 sm\n\"\"\"This module will read and validate the JSON that defines the EmbArk XML.\"\"\"\n\nfrom __future__ import print_function\n\nimport json\nfrom get_embark_xml_definitions import get_fields_definition, \\\n get_item_xpath, get_field_name, get_field_required, \\\n get_field_duplicates_allowed, get_field_xpath, get_field_default, \\\n get_does_not_start_with, get_starts_with, get_validation_rule, get_constant\n\n\ndef _read_embark_fields_file(filename):\n \"\"\" read json from json file \"\"\"\n try:\n with open(filename, 'r') as input_source:\n data = json.load(input_source)\n input_source.close()\n except IOError:\n print('Cannot open ' + filename)\n raise\n return data\n\n\ndef _validate_embark_fields_file(embark_field_definitions):\n \"\"\" validate format of json file \"\"\"\n get_item_xpath(embark_field_definitions)\n fields_definition = get_fields_definition(embark_field_definitions)\n for field in fields_definition:\n try:\n get_field_name(field)\n get_field_required(field)\n get_field_duplicates_allowed(field)\n get_field_xpath(field)\n get_field_default(field)\n get_does_not_start_with(field)\n get_starts_with(field)\n get_validation_rule(field)\n get_constant(field)\n except ValueError:\n # print('Error attempting to validate JSON file.')\n raise\n\n\ndef read_embark_fields_json_file(filename=\"./EmbArkXMLFields.json\"):\n \"\"\" calls routines to read and validate json file \"\"\"\n embark_field_definitions = \"\"\n try:\n embark_field_definitions = _read_embark_fields_file(filename)\n _validate_embark_fields_file(embark_field_definitions)\n except ValueError:\n print('ValueError in read_embark_fields_json_file encountered.')\n raise\n return embark_field_definitions\n","sub_path":"read_embark_fields_json_file.py","file_name":"read_embark_fields_json_file.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"561372128","text":"# -*- coding: utf-8 -*-\n\n#\n# Copyright (c) 2012-2019 Virtual Cable S.L.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# * Neither the name of Virtual Cable S.L. nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\n.. moduleauthor:: Adolfo Gómez, dkmaster at dkmon dot com\n\"\"\"\nimport logging\nimport typing\n\nfrom uds.core.services import Publication\nfrom uds.core.util.state import State\n\n# Not imported at runtime, just for type checking\nif typing.TYPE_CHECKING:\n from .service import LiveService\n\nlogger = logging.getLogger(__name__)\n\n\nclass LivePublication(Publication):\n \"\"\"\n This class provides the publication of a oVirtLinkedService\n \"\"\"\n\n suggestedTime = (\n 2 # : Suggested recheck time if publication is unfinished in seconds\n )\n\n _name: str = ''\n _reason: str = ''\n _templateId: str = ''\n _state: str = 'r'\n\n def service(self) -> 'LiveService':\n return typing.cast('LiveService', super().service())\n\n def marshal(self) -> bytes:\n \"\"\"\n returns data from an instance of Sample Publication serialized\n \"\"\"\n return '\\t'.join(\n ['v1', self._name, self._reason, self._templateId, self._state]\n ).encode('utf8')\n\n def unmarshal(self, data: bytes) -> None:\n \"\"\"\n deserializes the data and loads it inside instance.\n \"\"\"\n logger.debug('Data: %s', data)\n vals = data.decode('utf8').split('\\t')\n if vals[0] == 'v1':\n self._name, self._reason, self._templateId, self._state = vals[1:]\n\n def publish(self) -> str:\n \"\"\"\n Realizes the publication of the service\n \"\"\"\n self._name = self.service().sanitizeVmName(\n 'UDSP ' + self.dsName() + \"-\" + str(self.revision())\n )\n self._reason = '' # No error, no reason for it\n self._state = 'running'\n\n try:\n self._templateId = self.service().makeTemplate(self._name)\n except Exception as e:\n self._state = 'error'\n self._reason = str(e)\n return State.ERROR\n\n return State.RUNNING\n\n def checkState(self) -> str:\n \"\"\"\n Checks state of publication creation\n \"\"\"\n if self._state == 'running':\n try:\n if self.service().checkTemplatePublished(self._templateId) is False:\n return State.RUNNING\n self._state = 'ok'\n except Exception as e:\n self._state = 'error'\n self._reason = str(e)\n\n if self._state == 'error':\n return State.ERROR\n\n if self._state == 'ok':\n return State.FINISHED\n\n self._state = 'ok'\n return State.FINISHED\n\n def reasonOfError(self) -> str:\n \"\"\"\n If a publication produces an error, here we must notify the reason why\n it happened. This will be called just after publish or checkState\n if they return State.ERROR\n\n Returns an string, in our case, set at checkState\n \"\"\"\n return self._reason\n\n def destroy(self) -> str:\n \"\"\"\n This is called once a publication is no more needed.\n\n This method do whatever needed to clean up things, such as\n removing created \"external\" data (environment gets cleaned by core),\n etc..\n\n The retunred value is the same as when publishing, State.RUNNING,\n State.FINISHED or State.ERROR.\n \"\"\"\n # We do not do anything else to destroy this instance of publication\n try:\n self.service().removeTemplate(self._templateId)\n except Exception as e:\n self._state = 'error'\n self._reason = str(e)\n return State.ERROR\n\n return State.FINISHED\n\n def cancel(self) -> str:\n \"\"\"\n Do same thing as destroy\n \"\"\"\n return self.destroy()\n\n # Here ends the publication needed methods.\n # Methods provided below are specific for this publication\n # and will be used by user deployments that uses this kind of publication\n\n def getTemplateId(self) -> str:\n \"\"\"\n Returns the template id associated with the publication\n \"\"\"\n return self._templateId\n","sub_path":"server/src/uds/services/OpenNebula/publication.py","file_name":"publication.py","file_ext":"py","file_size_in_byte":5604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"406225352","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU General Public License, version 3.\n# See the file http://www.gnu.org/licenses/gpl.txt\n\nfrom inary.actionsapi import cmaketools\nfrom inary.actionsapi import inarytools\nfrom inary.actionsapi import shelltools\nfrom inary.actionsapi import get\n\nWorkDir = \"openal-soft-%s\" % get.srcVERSION()\n\ndef setup():\n options = \"-DALSA=1 \\\n -DPULSEAUDIO=1 \\\n -DOSS=1 \\\n -DEXAMPLES=OFF\"\n\n if get.buildTYPE() == \"emul32\":\n options += \" -DCMAKE_INSTALL_PREFIX=/emul32 \\\n -DLIB_SUFFIX=32\"\n shelltools.export(\"CFLAGS\", \"%s -m32\" % get.CFLAGS())\n\n cmaketools.configure(options)\n\ndef build():\n cmaketools.make()\n\ndef install():\n cmaketools.rawInstall(\"DESTDIR=%s\" % get.installDIR())\n\n # is there any \"libdir\" prefix for cmake ?\n if get.buildTYPE() == \"emul32\":\n from distutils.dir_util import copy_tree\n copy_tree(\"%s/emul32/lib32/\" % get.installDIR(), \"%s/usr/lib32\" % get.installDIR())\n inarytools.removeDir(\"/emul32\")\n inarytools.dosed(\"%s/usr/lib32/pkgconfig/openal.pc\" % get.installDIR(), \"emul32\", \"usr\")\n return\n\n inarytools.dodoc(\"COPYING\", \"alsoftrc.sample\")\n","sub_path":"multimedia/sound/openal/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"499731387","text":"import numpy as np\n# import tensorflow_datasets as tfds\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\npd.set_option('display.max_colwidth', 100)\n\ntraining_sentences = []\ntraining_labels = []\n\ntesting_sentences = []\ntesting_labels = []\n\n# imdb, info = tfds.load(\"imdb_reviews\", with_info=True, as_supervised=True)\n#\n# train_data, test_data = imdb['train'], imdb['test']\n#\n# for s, l in train_data:\n# training_sentences.append(str(s.numpy()))\n# training_labels.append(l.numpy())\n#\n# for s, l in test_data:\n# testing_sentences.append(str(s.numpy()))\n# testing_labels.append(l.numpy())\n\n\n\ndf_gen_1 = pd.read_csv('forum_content.csv', names=[\"link\", \"content\", \"label\"])\ndf_gen_2 = pd.read_csv('forum_content_gen.csv', names=[\"link\", \"content\", \"label\"])\ndf_am_1 = pd.read_csv('forum_content_literature_am.csv', names=[\"link\", \"content\", \"label\"])\ndf_am_2 = pd.read_csv('forum_content_am.csv', names=[\"link\", \"content\", \"label\"])\n\nall_df = pd.concat([df_gen_1,df_gen_2,df_am_1,df_am_2], ignore_index=True)\n\nall_df = all_df.sample(frac = 1)\nall_df.reset_index(inplace=True, drop=True)\nall_df.dropna(inplace=True)\ntrain, test = train_test_split(all_df, test_size=0.2)\n\n#str(s.tonumpy()) is needed in Python3 instead of just s.numpy()\n\nfor row in train.iterrows():\n content = row[1]['content']\n content = content.replace(\"\\\\r\\\\n\", \" \")\n training_sentences.append(content)\n training_labels.append(row[1]['label'])\n\nfor row in test.iterrows():\n content = row[1]['content']\n content = content.replace(\"\\\\r\\\\n\", \" \")\n testing_sentences.append(content)\n testing_labels.append(row[1]['label'])\n\ntraining_labels_final = np.array(training_labels)\ntesting_labels_final = np.array(testing_labels)\n\nvocab_size = 10000\nembedding_dim = 16\nmax_length = 120\ntrunc_type='post'\noov_tok = \"\"\n\n\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n\ntokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)\ntokenizer.fit_on_texts(training_sentences)\nword_index = tokenizer.word_index\nsequences = tokenizer.texts_to_sequences(training_sentences)\npadded = pad_sequences(sequences,maxlen=max_length, truncating=trunc_type)\n\ntesting_sequences = tokenizer.texts_to_sequences(testing_sentences)\ntesting_padded = pad_sequences(testing_sequences,maxlen=max_length)\n\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n\ndef decode_review(text):\n return ' '.join([reverse_word_index.get(i, '?') for i in text])\n\nprint(decode_review(padded[1]))\nprint(training_sentences[1])\n","sub_path":"cbow_embeddings.py","file_name":"cbow_embeddings.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"180212529","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Core app url config\n\"\"\"\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.conf.urls import url\n\nfrom core import views\n\ncategory_regex = \"|\".join([c[0] for c in settings.CATEGORIES])\n\nurlpatterns = [\n url(r'^(?P%s)/(?P[a-z0-9-]+)/$' % category_regex,\n views.SinglePostView.as_view(),\n name='single_post'),\n\n url(r'^(?P%s)/$' % category_regex,\n views.PostIndexView.as_view(),\n name='posts_index'),\n\n url(r'^(?P[\\w-]+)/$',\n views.SinglePageView.as_view(),\n name='single_page'),\n\n url(r'^$',\n views.LocaleRedirectView.as_view(),\n {'url': '/news'},\n name='first_page'),\n]\n","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"403980706","text":"import discord\nfrom discord.ext import commands\nimport config\nimport asyncio\nimport aiohttp\nimport config\n\nclass Stats:\n def __init__(self, bot):\n self.bot = bot\n\n async def send_stats(self):\n tokens = (\n (\"https://discordbots.org/api/bots/%s/stats\", config.orgtoken),\n (\"https://bots.discord.pw/api/bots/%s/stats\", config.pwtoken),\n (\"https://botsfordiscord.com/api/v1/bots/%s\", config.botsfordiscordtoken)\n )\n\n payload = {\"Content-Type\": \"application/json\", \"server_count\": len(self.bot.guilds)}\n for url, token in tokens:\n headers = {\"Authorization\": token}\n await self.bot.http_session.post(url % self.bot.user.id, json=payload, headers=headers)\n\n\n async def on_guild_join(self, guild):\n await self.send_stats()\n\n async def on_guild_remove(self, guild):\n await self.send_stats()\n\n\n\n\ndef setup(bot):\n bot.add_cog(Stats(bot))\n","sub_path":"cogs/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"271252190","text":"from feedly.activity import AggregatedActivity\nfrom feedly.aggregators.base import RecentVerbAggregator\nfrom feedly.feeds.base import BaseFeed\nfrom feedly.serializers.aggregated_activity_serializer import AggregatedActivitySerializer\nfrom feedly.storage.cassandra import AGGREGATED_FEED_STORE\nimport copy\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass AggregatedFeed(BaseFeed):\n '''\n An aggregated feed made for relatively small feeds\n It uses a sequential scan to detect if the group already exists\n Don't use this for feeds with a large max length\n '''\n max_length = 100\n serializer_class = AggregatedActivitySerializer\n column_family = AGGREGATED_FEED_STORE\n key_format = 'aggregated_feed_%s'\n\n def __init__(self, user_id, redis=None):\n '''\n User id (the user for which we want to read/write notifications)\n '''\n # input validation\n if not isinstance(user_id, int):\n raise ValueError('user id should be an int, found %r' % user_id)\n # support for different serialization schemes\n self.serializer = self.get_serializer()\n # support for pipelining redis\n self.user_id = user_id\n\n # write the key locations\n self.format_dict = dict(user_id=user_id)\n self.key = self.key_format % user_id\n\n def add_many(self, activities):\n '''\n Note this function is very specific to notifications, this won't\n get you good performance characteristics in applications with longer\n lists\n\n Add many works as follows:\n - retrieve all aggregated activities\n - add the new activities to the existing ones\n - update the values in Redis by sending several deletes and adds\n\n Trim the sorted set to max length\n Denormalize the unseen count\n Send a pubsub publish\n '''\n columns = []\n remove_activities = {}\n aggregator = self.get_aggregator()\n\n # first stick the new activities in groups\n aggregated_activities = aggregator.aggregate(activities)\n\n # get the current aggregated activities\n current_activities = self[:self.max_length]\n current_activities_dict = dict(\n [(a.group, a) for a in current_activities])\n\n # see what we need to update\n for activity in aggregated_activities:\n if activity.group in current_activities_dict:\n # update existing\n current_activity = current_activities_dict[activity.group]\n old_activity = copy.deepcopy(current_activity)\n for a in activity.activities:\n current_activity.append(a)\n new_activity = current_activity\n # we should only do this the first time, verify things go well\n if old_activity.group in remove_activities:\n raise ValueError('Thierry didnt expect this to happen')\n remove_activities[old_activity.group] = old_activity\n else:\n # create a new activity\n new_activity = activity\n current_activities.append(new_activity)\n\n # add the data to the to write list\n value = self.serialize_activity(new_activity)\n columns.append(value)\n\n # pipeline all our writes to improve performance\n # TODO: removed map just to be sure\n # first remove the old notifications\n # delete_results = self.remove_many(remove_activities.values())\n\n # add the data in batch\n for instance in columns:\n self.column_family.insert(instance)\n self.trim()\n # return the current state of the notification feed\n return current_activities\n\n def get_aggregator(self):\n '''\n Returns the class used for aggregation\n '''\n aggregator_class = RecentVerbAggregator\n aggregator = aggregator_class()\n return aggregator\n\n def contains(self, activity):\n # get all the current aggregated activities\n aggregated = self[:self.max_length]\n activities = sum([list(a.activities) for a in aggregated], [])\n # make sure we don't modify things in place\n activities = copy.deepcopy(activities)\n activity = copy.deepcopy(activity)\n\n # we don't care about the time of the activity, just the contents\n activity.time = None\n for activity in activities:\n activity.time = None\n\n present = activity in activities\n return present\n\n def remove_many(self, aggregated_activities):\n '''\n Efficiently remove many activities\n '''\n scores = []\n for activity in aggregated_activities:\n if not isinstance(activity, AggregatedActivity):\n raise ValueError('we can only remove aggregated activities')\n score = self.get_activity_score(activity)\n scores.append(score)\n results = RedisSortedSetCache.remove_by_scores(self, scores)\n return results\n\n","sub_path":"feedly/feeds/aggregated_feed.py","file_name":"aggregated_feed.py","file_ext":"py","file_size_in_byte":5061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"99437808","text":"from collections import Sequence\n\ndef flatten_helper(lst, new_lst):\n for element in lst:\n if isinstance(element, list):\n flatten_helper(element, new_lst)\n else:\n if element == None or (isinstance(element, Sequence) and len(element) == 0):\n continue\n else:\n new_lst.append(element)\n\ndef flatten(iterable):\n new_lst = []\n flatten_helper(iterable, new_lst)\n return new_lst\n","sub_path":"python/flatten-array/flatten_array.py","file_name":"flatten_array.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"370054628","text":"# Licensed to Elasticsearch B.V. under one or more contributor\n# license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright\n# ownership. Elasticsearch B.V. licenses this file to you under\n# the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#\thttp://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport unittest.mock as mock\nfrom unittest import TestCase\n\nfrom esrally import config, exceptions, racecontrol\n\n\nclass RaceControlTests(TestCase):\n def test_finds_available_pipelines(self):\n expected = [\n [\"from-sources-complete\", \"Builds and provisions Elasticsearch, runs a benchmark and reports results.\"],\n [\"from-sources-skip-build\", \"Provisions Elasticsearch (skips the build), runs a benchmark and reports results.\"],\n [\"from-distribution\", \"Downloads an Elasticsearch distribution, provisions it, runs a benchmark and reports results.\"],\n [\"benchmark-only\", \"Assumes an already running Elasticsearch instance, runs a benchmark and reports results\"],\n ]\n\n self.assertEqual(expected, racecontrol.available_pipelines())\n\n def test_prevents_running_an_unknown_pipeline(self):\n cfg = config.Config()\n cfg.add(config.Scope.benchmark, \"race\", \"pipeline\", \"invalid\")\n cfg.add(config.Scope.benchmark, \"mechanic\", \"distribution.version\", \"5.0.0\")\n\n with self.assertRaises(exceptions.SystemSetupError) as ctx:\n racecontrol.run(cfg)\n self.assertRegex(ctx.exception.args[0], r\"Unknown pipeline \\[invalid\\]. List the available pipelines with [\\S]+? list pipelines.\")\n\n @mock.patch.dict(os.environ, {\"RALLY_RUNNING_IN_DOCKER\": \"true\"})\n def test_passes_benchmark_only_pipeline_in_docker(self):\n mock_pipeline = mock.Mock()\n test_pipeline_name = \"benchmark-only\"\n racecontrol.Pipeline(\"benchmark-only\", \"Mocked benchmark-only pipeline for unittest\", mock_pipeline)\n cfg = config.Config()\n cfg.add(config.Scope.benchmark, \"race\", \"pipeline\", \"benchmark-only\")\n\n racecontrol.run(cfg)\n\n mock_pipeline.assert_called_once_with(cfg)\n\n del racecontrol.pipelines[test_pipeline_name]\n\n @mock.patch.dict(os.environ, {\"RALLY_RUNNING_IN_DOCKER\": \"true\"})\n def test_fails_without_benchmark_only_pipeline_in_docker(self):\n mock_pipeline = mock.Mock()\n test_pipeline_name = \"unit-test-pipeline\"\n racecontrol.Pipeline(\"unit-test-pipeline\", \"Pipeline intended for unit-testing\", mock_pipeline)\n cfg = config.Config()\n cfg.add(config.Scope.benchmark, \"race\", \"pipeline\", \"unit-test-pipeline\")\n\n with self.assertRaises(exceptions.SystemSetupError) as ctx:\n racecontrol.run(cfg)\n\n self.assertEqual(\n \"Only the [benchmark-only] pipeline is supported by the Rally Docker image.\\n\"\n \"Add --pipeline=benchmark-only in your Rally arguments and try again.\\n\"\n \"For more details read the docs for the benchmark-only pipeline in \"\n \"https://esrally.readthedocs.io/en/latest/pipelines.html#benchmark-only\\n\",\n ctx.exception.args[0])\n del racecontrol.pipelines[test_pipeline_name]\n\n def test_runs_a_known_pipeline(self):\n mock_pipeline = mock.Mock()\n test_pipeline_name = \"unit-test-pipeline\"\n\n racecontrol.Pipeline(\"unit-test-pipeline\", \"Pipeline intended for unit-testing\", mock_pipeline)\n\n cfg = config.Config()\n cfg.add(config.Scope.benchmark, \"race\", \"pipeline\", \"unit-test-pipeline\")\n cfg.add(config.Scope.benchmark, \"mechanic\", \"distribution.version\", \"\")\n\n racecontrol.run(cfg)\n\n mock_pipeline.assert_called_once_with(cfg)\n\n # ensure we remove it again from the list of registered pipelines to avoid unwanted side effects\n del racecontrol.pipelines[test_pipeline_name]\n","sub_path":"tests/racecontrol_test.py","file_name":"racecontrol_test.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"262919139","text":"#%%\nimport pickle\nimport os\nimport numpy as np\nimport random\nfrom sklearn.neighbors import KNeighborsRegressor\n\n\nclass MLPlay:\n def __init__(self):\n \n self.ball_served = False\n self.previous_ball = (0,0)\n with open(os.path.join(os.path.dirname(__file__),'save','KNN_R_2300.pickle'),'rb') as f:\n self.model = pickle.load(f)\n \n def update(self, scene_info):\n \n if (scene_info[\"status\"] == \"GAME_OVER\" or\n scene_info[\"status\"] == \"GAME_PASS\"):\n return \"RESET\"\n if not self.ball_served:\n self.ball_served = True\n command = \"SERVE_TO_RIGHT\"\n else:\n Ball_x = scene_info[\"ball\"][0]\n Ball_y = scene_info[\"ball\"][1]\n Vector_x = scene_info[\"ball\"][0] - self.previous_ball[0]\n Vector_y = scene_info[\"ball\"][1] - self.previous_ball[1]\n Platform = scene_info[\"platform\"][0]\n if Vector_x > 0:\n if Vector_y > 0: Direction = 0\n\n else: Direction = 1\n else:\n if Vector_y > 0: Direction = 2\n else: Direction = 3\n \n X = np.array([Ball_x, Ball_y, Vector_x, Vector_y, Direction]).reshape((1, -1)) # 展開成一列\n y = self.model.predict(X)\n\n # print(y)\n\n if scene_info[\"platform\"][0]+20 + 5 < y:\n command = \"MOVE_RIGHT\"\n elif scene_info[\"platform\"][0]+20 - 5 > y:\n command = \"MOVE_LEFT\"\n else:\n command = random.choice((\"MOVE_RIGHT\",\"MOVE_LEFT\",\"NONE\"))\n\n self.previous_ball = scene_info[\"ball\"]\n return command\n\n def reset(self):\n self.ball_served = False \n\n\n# %%\n","sub_path":"games/arkanoid/ml/play_KNNmodel_regression.py","file_name":"play_KNNmodel_regression.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"98726880","text":"#!/usr/bin/python3\n\nimport logging\nimport os\nimport v4l2\nimport fcntl\nimport mmap\nimport multiprocessing as mp\nimport time\n#import asyncore\nimport selectors\n\nimport bufferman\nimport numpy\nfrom PIL import Image\n\nlogging.basicConfig(format='%(asctime)s %(levelname)s %(name)s: %(message)s'\n\t\t\t,datefmt='%I:%M:%S'\n#\t\t\t,filename=\"aalog.log\"\n\t\t\t,level=logging.DEBUG)\n\ndef opendev(vfn):\n\tdfile = os.open(vfn, os.O_RDWR | os.O_NONBLOCK, 0)\n\treturn dfile\n\t\ndef closedev(dfile):\n\tos.close(dfile)\n\nclass asyncamtest():\n\tdef __init__(self,cq,rq,vdev):\n\t\tself.vdev = vdev\n\t\tself.cq = cq\n\t\tself.rq = rq\n\t\tself.lgr = logging.getLogger('selectorscamtest')\n\t\tself._buffers = []\n\t\tself._mmaps = []\n\t\tself.asel = selectors.DefaultSelector()\n\t\tself.asel.register(self.vdev, selectors.EVENT_READ | selectors.EVENT_WRITE, self.vfileio)\n\t\tself.lgr.info(\"__init__ completes\")\n\n\tdef runloop(self):\n\t\tself.lgr.info(\"runloop starts\")\n\t\tself.running = True\n\t\twhile self.running:\n\t\t\tif not self.cq.empty():\n\t\t\t\tself.msgin(self.cq.get())\n#\t\t\tasyncore.loop(0.2, map = self.cmap, count = 3)\n\t\t\tevents = self.asel.select(.2)\n\t\t\tself.lgr.info(\"runloop select returns %d events\" % len(events))\n\t\t\tfor key, mask in events:\n\t\t\t\tcallback = key.data\n\t\t\t\tcallback(mask)\t\t\n\t\tself.releaseBuffs()\n\t\tself.lgr.info(\"runloop finishes\")\n\n\tdef vfileio(self, mask):\n\t\tma = \"\" if selectors.EVENT_READ & mask == 0 else \"have read \" \n\t\tmb = \"\" if selectors.EVENT_WRITE & mask == 0 else \"have write \"\n\t\tself.lgr.info(\"fileio \" + ma + mb)\n\t\tif ma != \"\":\n\t\t\tself.handle_read()\n\n\tdef msgin(self, msgin):\n\t\tcmd = msgin['cmd']\n\t\tself.lgr.info(\"msgin gets %s\" % cmd)\n\t\tif cmd == 'done':\n\t\t\tself.running = False\n\t\t\tself.lgr.info(\"msgin running unset\")\n\t\t\tmsgin['resu'] = 'closing'\n\t\t\tself.rq.put(msgin)\n\t\telif cmd == 'takepics':\n\t\t\tself.takepics(msgin)\n\t\telse:\n\t\t\tself.lgr.info(\"msgin What> - %s\" % cmd)\n\t\t\tqi['resu'] = 'What? - %s' % cms\n\t\t\tself.rq.put(msgin)\n\n\tdef takepics(self, msgin):\n\t\tself.lgr.info(\"prepare to take piccy \")\n\t\tself._bufferMode = v4l2.V4L2_MEMORY_MMAP\n\t\tself.fpixformat = v4l2.V4L2_PIX_FMT_YUYV\n\t\trbuffcount = msgin['buffcount']\n\t\tvformat = self.getFormat()\n\t\tself.allocBuffs(rbuffcount, vformat)\n\t\tprocparams={}\n\t\tprocparams['rotact'] = 0\n#\t\tprocparams['basefilename'] = \"%s%s%%04d\" % ('zz', camsettings['sequnamef'])\n\t\tprocparams['basefilename'] = \"fred%04d\"\n\t\tprocparams['savetype'] = \"JPEG\"\n\t\tself._buffman = bufferman.bufferman(self, vformat, vformat.fmt.pix.pixelformat, procparams)\n\t\tself.stype = v4l2.v4l2_buf_type(v4l2.V4L2_BUF_TYPE_VIDEO_CAPTURE)\n\t\tfcntl.ioctl(self.vdev, v4l2.VIDIOC_STREAMON, self.stype)\n\t\tself.lgr.info(\"takepiccy rolling\")\n\t\t\n\tdef handle_read(self):\n\t\t\t\t#but first check we've not closed it recently....\n\t\tif self.vdev is None:\n\t\t\tself.lgr.debug(\"camera ready to read but file is None\")\n\t\t\treturn\n\t\t\n\t\tself.lgr.debug(\"camera ready to read....\")\n\t\tself._dqbuf.type = v4l2.V4L2_BUF_TYPE_VIDEO_CAPTURE\n\t\tself._dqbuf.memory = v4l2.V4L2_MEMORY_MMAP\n\t\tself._dqbuf.reserved = 0\n\t\tfcntl.ioctl(self.vdev, v4l2.VIDIOC_DQBUF, self._dqbuf)\n#\t\tself.lgr.debug(v4camSupport.expandBufferFlags(self._dqbuf.flags))\n\t\ttstamp = time.strftime(\"%Y:%m:%d %H:%M:%S\")\n\t\tself._buffman.makeSmartImage(self._dqbuf.index, 5, tstamp)\n\t\tfcntl.ioctl(self.vdev, v4l2.VIDIOC_STREAMOFF, self.stype)\n\t\tself._buffman.releaseSmartImageBuff()\n\t\tself.releaseBuffs()\n\t\tself.lgr.info(\"camera read finishes\")\n\n\tdef getFormat(self):\n\t\tvFormat = v4l2.v4l2_format()\n\t\tvFormat.type = v4l2.V4L2_BUF_TYPE_VIDEO_CAPTURE\n\t\tfcntl.ioctl(self.vdev, v4l2.VIDIOC_G_FMT, vFormat) # lets just see what we get.....\n\t\tself.lgr.info(\"video frame format before: - linestride \" + str(vFormat.fmt.pix.bytesperline) \n\t\t\t+ \", imageInfo:\" + str(vFormat.fmt.pix.width) + \"/\" + str(vFormat.fmt.pix.height))\n\t\tvFormat.type = v4l2.V4L2_BUF_TYPE_VIDEO_CAPTURE\n\t\tvFormat.fmt.pix.width = 640\n\t\tvFormat.fmt.pix.height = 480\n\t\tvFormat.fmt.pix.pixelformat = self.fpixformat\n\t\tvFormat.fmt.pix.field = v4l2.V4L2_FIELD_NONE\n\t\tvFormat.fmt.pix.bytesperline = 0\n\t\tfcntl.ioctl(self.vdev, v4l2.VIDIOC_S_FMT, vFormat)\n\t\tself.lgr.info(\"video frame format now set - linestride \" + str(vFormat.fmt.pix.bytesperline) \n\t\t\t+ \", imageInfo:\" + str(vFormat.fmt.pix.width) + \"/\" + str(vFormat.fmt.pix.height) + \" is \" + str(vFormat.fmt.pix.sizeimage) \n\t\t\t+ \" from \" + str(vFormat.fmt.pix.width * vFormat.fmt.pix.height))\n\t\tfcntl.ioctl(self.vdev, v4l2.VIDIOC_G_FMT, vFormat)\n\t\tself.lgr.info(\"video frame format REALLY set - linestride \" + str(vFormat.fmt.pix.bytesperline) \n\t\t\t+ \", imageInfo:\" + str(vFormat.fmt.pix.width) + \"/\" + str(vFormat.fmt.pix.height) + \" is \" + str(vFormat.fmt.pix.sizeimage) \n\t\t\t+ \" from \" + str(vFormat.fmt.pix.width * vFormat.fmt.pix.height))\n#\t\t\t+ \", colour space: \" + colorSpaces.get(vFormat.fmt.pix.colorspace)\n#\t\t\t+ \", pixel format: \" + pixelFormats[ vFormat.fmt.pix.pixelformat])\n\t\n\t\treturn vFormat\n\n\tdef allocBuffs(self, buffcount, vformat):\n\t\tbuffRequ = v4l2.v4l2_requestbuffers()\n\t\tbuffRequ.count = buffcount\n\t\tbuffRequ.type = v4l2.V4L2_BUF_TYPE_VIDEO_CAPTURE\n\t\tbuffRequ.memory = self._bufferMode\n\t\tfcntl.ioctl(self.vdev, v4l2.VIDIOC_REQBUFS, buffRequ)\n\t\tself.lgr.info(\"agent::allocBuffs requested \" + str(buffcount) + \", got \" + str(buffRequ.count))\n\n\t\tself._buffers = []\n\t\tself._mmaps = []\n\t\tfor bi in range(0, buffRequ.count):\n\t\t\tabuf = v4l2.v4l2_buffer()\n\t\t\tself._buffers.append(abuf)\n\t\t\tabuf.type = v4l2.V4L2_BUF_TYPE_VIDEO_CAPTURE\n\t\t\tabuf.memory = self._bufferMode\n\t\t\tabuf.index = bi\n\t\t\tfcntl.ioctl(self.vdev, v4l2.VIDIOC_QUERYBUF, abuf)\n\t\t\tabuf.length = vformat.fmt.pix.sizeimage\n\t\t\tif self._bufferMode == v4l2.V4L2_MEMORY_MMAP:\n\t\t\t\tfcntl.ioctl(self.vdev, v4l2.VIDIOC_QBUF, abuf)\n\t\t\t\tself._mmaps.append(mmap.mmap(self.vdev, abuf.length, mmap.MAP_SHARED, \n\t\t\t\t\tmmap.PROT_READ | mmap.PROT_WRITE, offset=abuf.m.offset))\n\t\t\telse:\n\t\t\t\tabufarea = ctypes.c_int(55)\n\t\t\t\tabuf.m.userptr = ctypes.byref(abufarea)\n\t\t\t\tfcntl.ioctl(self.vdev, v4l2.VIDIOC_QBUF, abuf)\n\n\t\tself._dqbuf = v4l2.v4l2_buffer()\n\n\tdef releaseBuffs(self):\n\t\tif self._bufferMode == v4l2.V4L2_MEMORY_MMAP:\n\t\t\tfor bi in range(0, len(self._mmaps)):\n\t\t\t\tself._mmaps[bi].close()\n\t\t\tself._mmaps = []\n\t\tself._buffers = []\n\t\n\t\t\ndef runCamera(cq, rq, vfile):\n\tlgr = logging.getLogger('runCamera')\n\tlgr.info(\"camera process started\")\n\tcdev = opendev(vfile)\n\tlgr.info('camera file opened')\n\tct = asyncamtest(cq,rq,cdev)\n\tct.runloop()\n\tif not cdev is None:\n\t\tclosedev(cdev)\n\t\tlgr.info('camera closed')\n\ttime.sleep(0.5)\n\tlgr.info(\"camera process finished\")\n\nif __name__==\"__main__\":\n\tlogging.info(\"now attempting to run usb camera using multiple processes and memory mapped IO.......\")\n\timport argparse\n\tparser = argparse.ArgumentParser(description=\"test app for multi processing with memory mapped IO\")\n\tparser.add_argument( \"-v\", \"--video\"\n\t\t, type=int\n\t\t, help=\"number of the video device to be tested (from /dev/videox) default is video0\")\n\targs = parser.parse_args()\n\tvnum = args.video if args.video else 0\n\tvfilename = '/dev/video%d' % vnum\n\ttry:\n\t\tdfile = opendev(vfilename)\n\texcept IOError:\n\t\tlogging.critical(\"Unable to open device - IOError\" + vfilename)\n\t\tquit()\n\texcept:\n\t\tlogging.critical(\"Device open failed for \" + vfilename)\n\t\tquit()\n\n\tlogging.info(\"device %s opened OK - start camera process\" % vfilename)\n\tclosedev(dfile)\n\tcomq = mp.Queue()\n\trespq = mp.\tQueue()\n\tcamproc = mp.Process(target=runCamera, name = \"camtest video%d\" % vnum\n\t\t\t\t, args= (comq, respq, vfilename))\n\tcamproc.start()\n\tcomq.put({'cmd': 'takepics', 'buffcount':4})\n\ttime.sleep(4)\n\tcomq.put({'cmd':'done'})\n\tresp = respq.get()\n\tprint(\"->\" + str(resp))\n\t\n\tcamproc.join()\n\tlogging.info(\"byeeee\")","sub_path":"imcol/testv4l2.py","file_name":"testv4l2.py","file_ext":"py","file_size_in_byte":7566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"325554172","text":"#libraries\nimport boto3\nimport subprocess\n\n#instance variables\naws_region = \"test\"\ntags_to_find = ['Backup','Name']\n\n#code\nsession = boto3.Session(profile_name='default')\n\nec2 = boto3.client('ec2')\n\nresponse = ec2.describe_instances(Filters=[{'Name' : 'tag-key','Values' : tags_to_find }])\n\nprint(response)\n","sub_path":"Python/scan-tagged-instances.py","file_name":"scan-tagged-instances.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"229632170","text":"from mininet.topo import Topo\nfrom mininet.nodelib import NAT\n\n\n\nclass MyTopo(Topo):\n\n def build(self):\n self.hostNum = 5\n self.switchNum = 6\n self.addTopo()\n\n\n\n def addTopo(self):\n # DB :\n host8 = self.addHost('h8', ip=\"192.168.1.8\", mac='00:00:00:00:00:08')\n host6 = self.addHost('h6', ip=\"192.168.1.6\", mac='00:00:00:00:00:06')\n host7 = self.addHost('h7', ip=\"193.168.1.7\", mac='00:00:00:00:00:07')\n\n switch5 = self.addSwitch('s5', ip=\"193.168.2.1\", datapath='user')\n switch6 = self.addSwitch('s6', ip=\"192.168.2.2\", datapath='user')\n\n gateway3 = self.addSwitch('g3', ip=\"192.1.1.2\", dpid='C')\n gateway4 = self.addSwitch('g4', ip=\"193.1.1.1\", dpid='D')\n\n # host - switch\n self.addLink(switch5, host7, 1, 1)\n self.addLink(switch6, host8, 2, 1)\n self.addLink(switch6, host6, 1, 1)\n\n # switch - switch\n self.addLink(switch5, switch6, 2, 4)\n\n # switch - gateway\n self.addLink(switch5, gateway4, 3, 1)\n self.addLink(switch6, gateway3, 3, 1)\n\n # gateway - gateway\n self.addLink(gateway3, gateway4, 2, 2)\n\n # gateway - NAT\n\n\n\ntopos = {'mytopo': (lambda: MyTopo())}","sub_path":"topo/d2.py","file_name":"d2.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"276700131","text":"#!/usr/bin/env python3\n\n# Copyright (C) 2011 Luke Benstead\n#\n# This program is free software; you can redistribute it and/or modify it under\n# the terms of the GNU General Public License as published by the Free Software\n# Foundation; either version 2 of the License, or (at your option) any later\n# version.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n\"\"\" A code completion parser for Python \"\"\"\n\nfrom io import StringIO\nimport tokenize\nimport keyword\nimport builtins\n\nfrom token import DEDENT, NEWLINE\n\nclass ScopeType:\n MODULE = 1\n CLASS = 2\n METHOD = 3\n\nKEYWORDS_THAT_INHERIT_SCOPE = [ \"if\", \"else\", \"for\", \"elif\", \"try\", \"except\", \"do\", \"while\", \"with\" ]\nKEYWORDS_THAT_ARE_IGNORED = [ \"raise\", \"assert\", \"break\", \"continue\", \"throw\", \"print\", \"pass\", \"return\" ]\n\nclass Scope(object):\n def __init__(self, name, scope_type, parent=None):\n self.name = name\n self.scope_type = scope_type\n self.parent = parent\n \n self.variables = set()\n self.methods = set()\n self.types = set()\n self.keywords = set()\n \n if scope_type == ScopeType.MODULE:\n self.methods = set([ x for x in dir(builtins) if __builtins__.get(x).__class__ == isinstance.__class__ ])\n self.types = set([ x for x in dir(builtins) if isinstance(__builtins__.get(x), type) ])\n self.keywords = set(keyword.kwlist)\n self.modules = set()\n self.inherited_scopes = set()\n\n self.children = {}\n \n def inherit(self, scope):\n import copy\n self.inherited_scopes.add(copy.deepcopy(scope))\n\n def get_variables(self):\n result = list(self.variables)\n for scope in self.inherited_scopes:\n if isinstance(scope, Scope):\n result.extend(scope.get_variables())\n else:\n #FIXME: look up class scope\n pass\n return set(result)\n \n def get_methods(self):\n result = list(self.methods)\n for scope in self.inherited_scopes:\n if isinstance(scope, Scope): \n result.extend(scope.get_methods())\n else:\n #FIXME: look up class scope\n pass \n return set(result)\n \n def get_types(self):\n result = list(self.types)\n for scope in self.inherited_scopes:\n if isinstance(scope, Scope): \n result.extend(scope.get_types())\n else:\n #FIXME: look up class scope\n pass \n return set(result) \n\n def get_modules(self):\n result = list(self.modules)\n for scope in self.inherited_scopes:\n if isinstance(scope, Scope):\n result.extend(scope.get_modules())\n else:\n pass\n return set(result)\n \nclass ObjectScope(Scope):\n def __init__(self, parent):\n super(ObjectScope, self).__init__(\"object\", ScopeType.CLASS, parent=parent)\n \n attrs = dir(object)\n methods = [ x for x in attrs if callable(getattr(object, x, None)) ]\n variables = [ x for x in attrs if not callable(getattr(object, x, None)) ]\n \n self.variables = set(variables)\n self.methods = set(methods)\n self.types = set()\n self.keywords = set()\n\nclass ListScope(Scope):\n def __init__(self, parent):\n super(ListScope, self).__init__(\"list\", ScopeType.CLASS, parent=parent)\n \n attrs = dir(list)\n methods = [ x for x in attrs if callable(getattr(list, x, None)) if not x.startswith(\"__\") ]\n variables = [ x for x in attrs if not callable(getattr(list, x, None)) if not x.startswith(\"__\") ]\n \n self.variables = set(variables)\n self.methods = set(methods)\n\nclass TupleScope(Scope):\n def __init__(self, parent):\n super(TupleScope, self).__init__(\"tuple\", ScopeType.CLASS, parent=parent)\n \n attrs = dir(tuple)\n methods = [ x for x in attrs if callable(getattr(tuple, x, None)) if not x.startswith(\"__\")]\n variables = [ x for x in attrs if not callable(getattr(tuple, x, None)) if not x.startswith(\"__\") ]\n \n self.variables = set(variables)\n self.methods = set(methods)\n\nclass IntScope(Scope):\n def __init__(self, parent):\n super(IntScope, self).__init__(\"int\", ScopeType.CLASS, parent=parent)\n \n attrs = dir(int)\n methods = [ x for x in attrs if callable(getattr(int, x, None)) if not x.startswith(\"__\")]\n variables = [ x for x in attrs if not callable(getattr(int, x, None)) if not x.startswith(\"__\") ]\n \n self.variables = set(variables)\n self.methods = set(methods)\n\nclass StrScope(Scope):\n def __init__(self, parent):\n super(StrScope, self).__init__(\"str\", ScopeType.CLASS, parent=parent)\n \n attrs = dir(str)\n methods = [ x for x in attrs if callable(getattr(str, x, None)) if not x.startswith(\"_\")]\n variables = [ x for x in attrs if not callable(getattr(str, x, None)) if not x.startswith(\"_\") ]\n \n self.variables = set(variables)\n self.methods = set(methods)\n\nclass DictScope(Scope):\n def __init__(self, parent):\n super(DictScope, self).__init__(\"dict\", ScopeType.CLASS, parent=parent)\n \n attrs = dir(dict)\n methods = [ x for x in attrs if callable(getattr(dict, x, None)) if not x.startswith(\"__\")]\n variables = [ x for x in attrs if not callable(getattr(dict, x, None)) if not x.startswith(\"__\") ]\n \n self.variables = set(variables)\n self.methods = set(methods)\n \n \nclass FileParser(object):\n def __init__(self, file_contents, current_line=None):\n self._line_no = 0\n self._global = Scope(\"__global__\", ScopeType.MODULE)\n self._current_scope = self._global\n self._current_line = current_line\n self._active_scope = self.get_global_scope()\n self._do_parse(file_contents)\n\n def _parse_to_end(self):\n tokens = []\n ignore_rest = False\n while True:\n tok_type, token, line = self._get_next_token()\n \n if tok_type == tokenize.COMMENT:\n #print(\"COMMENT\")\n ignore_rest = True\n \n if not ignore_rest:\n tokens.append((tok_type, token))\n\n if tok_type == NEWLINE or token == \"\\n\":\n break;\n return tokens\n \n def _parse_class(self):\n tok_type, token, line = self._get_next_token()\n \n class_name = token\n class_scope = Scope(token, ScopeType.CLASS, parent=self._current_scope)\n self._current_scope.types.add(class_name) #Store this class as a type\n self._current_scope.children[class_name] = class_scope\n self._current_scope = class_scope\n #print(\"New scope: %s at line %s\" % (self._current_scope.name, self._line_no))\n tokens = self._parse_to_end()\n\n #We have an open bracket, this means the class has parents\n if tokens and tokens[0][1] == \"(\":\n tokens = tokens[1:]\n for tok_type, token in tokens[:]: \n if tokens[0][1] == \":\": \n break\n \n tokens = tokens[1:]\n if token == \")\": break\n if token == \",\": continue\n \n self._current_scope.inherited_scopes.add(token)\n\n #If at this point tokens[0] is a colon, we need to check and see if there are any other statements\n #after it, if so, we need to dedent\n if tokens and tokens[0][1] == \":\":\n if len(tokens) > 1 and tokens[1][1] != \"\\n\":\n #Ignore everything after the colon, but dedent, this is likely something like:\n # class B(object): pass\n self._dedent()\n\n def _parse_method(self):\n tok_type, token, line = self._get_next_token()\n\n method_name = token\n \n method_scope = Scope(token, ScopeType.METHOD, parent=self._current_scope)\n \n self._current_scope.methods.add(method_name) #Store this class as a type \n self._current_scope.children[method_name] = method_scope\n self._current_scope = method_scope\n \n #print(\"New scope: %s at line %s\" % (self._current_scope.name, self._line_no))\n\n tokens = self._parse_to_end()\n \n tokens = [ x for x in tokens if x[1] not in (\"(\", \",\", \")\", \":\") ]\n\n\n if not tokens: \n return\n\n class_scope = self._find_parent_scope_of_type(ScopeType.CLASS)\n if class_scope: #This is a method that has a parent class\n #Grab the first argument (normally \"self\")\n first_token_type, first_token = tokens[0] \n \n tokens = tokens[1:] #Remove from the tokens list\n #print(\"CLASS VAR: \", class_scope.name, first_token)\n \n #add it to the variables list\n self._current_scope.variables.add(first_token)\n #set the scope for the variable as that of the parent class (so self.whatever works)\n assert(isinstance(class_scope, Scope))\n self._current_scope.children[first_token] = class_scope\n \n #The type of all other args are anybody's guess, so just treat them as \"object\"s\n for tok_type, token in tokens:\n #generic object scope\n self._current_scope.children[token] = ObjectScope(parent=self._current_scope)\n self._current_scope.variables.add(token)\n\n def _parse_with(self):\n while True:\n tok_type, token_str, line = self._get_next_token()\n if tok_type == NEWLINE:\n break\n \n #If we find the \"as\" token, we know the next token is the variable name\n if token_str == \"as\":\n tok_type, token_str, line = self._get_next_token()\n if tok_type == NEWLINE:\n break;\n self._current_scope.variables.add(token_str)\n break\n \n self._parse_to_end()\n\n\n def _parse_from_import(self):\n pass\n \n def _parse_import(self):\n tokens = self._parse_to_end()\n last_token = None\n rename_last_token_to_next_token = False\n for tok_type, token in tokens:\n if token in [\",\"]:\n continue\n elif token == \"as\" and last_token:\n rename_last_token_to_next_token = True\n continue\n elif rename_last_token_to_next_token:\n #If the last token was \"as\" then we rename the token before to the new name\n self._current_scope.modules.remove(last_token)\n self._current_scope.modules.add(token)\n self._current_scope.children[token] = self._current_scope.children[last_token]\n del self._current_scope.children[last_token]\n rename_last_token_to_next_token = False\n else:\n self._current_scope.modules.add(token)\n self._current_scope.children[token] = ObjectScope(parent=self._current_scope)\n last_token = token\n\n def _parse_statement(self, lvalue_type, lvalue):\n \"\"\" FIXME handle multiple lvalues\"\"\"\n \n tokens = [(lvalue_type, lvalue)] + self._parse_to_end()\n\n is_assignment_statement = False\n equals_position = None\n i = 0\n for token in tokens:\n if token[1] == \"=\":\n is_assignment_statement = True\n equals_position = i\n break\n i += 1\n \n is_assignment_to_member = lvalue == \"self\"\n \n scope = self._current_scope\n \n if is_assignment_statement:\n lvalue_tokens = [ x for x in tokens[:equals_position] if x[1] not in (\",\",) ]\n rvalue_tokens = [ x for x in tokens[equals_position + 1:] if x[1] not in (\",\",) ]\n if is_assignment_to_member:\n class_scope = self._find_parent_scope_of_type(ScopeType.CLASS)\n if not class_scope:\n #print(\"Don't understand this: \" + str(tokens))\n if self._current_scope:\n #print(self._current_scope.name)\n #print(self._current_scope.parent.name)\n pass\n else:\n #print(\"NO CURRENT SCOPE!\")\n pass\n return \n else:\n scope = class_scope\n assert(isinstance(scope, Scope))\n if len(lvalue_tokens) == 1 or is_assignment_to_member:\n if is_assignment_to_member:\n lvalue_name = lvalue_tokens[2][1] # [ 'self', '.', 'something' ]\n else:\n lvalue_name = lvalue_tokens[0][1]\n scope.variables.add(lvalue_name)\n if rvalue_tokens[0][1] == \"[\":\n scope.children[lvalue_name] = ListScope(self._current_scope)\n elif rvalue_tokens[0][1] == \"(\":\n scope.children[lvalue_name] = TupleScope(self._current_scope)\n elif rvalue_tokens[0][1] == \"{\":\n scope.children[lvalue_name] = DictScope(self._current_scope) \n elif rvalue_tokens[0][0] == tokenize.NUMBER:\n scope.children[lvalue_name] = IntScope(self._current_scope)\n elif rvalue_tokens[0][0] == tokenize.STRING:\n scope.children[lvalue_name] = StrScope(self._current_scope)\n else:\n #print(\"TODO: Handle assignment: \", lvalue_tokens, \"=\", rvalue_tokens)\n pass\n\n def _find_parent_scope_of_type(self, scope_type):\n current = self._current_scope\n while current.scope_type != scope_type and current.parent:\n current = current.parent\n \n if current.scope_type != scope_type:\n return None\n \n return current\n \n def _dedent(self):\n to_dedent = self._dedent_stack.pop()\n \n if to_dedent:\n if self._current_scope.parent:\n self._current_scope = self._current_scope.parent\n #print(\"New scope: %s at line %s\" % (self._current_scope.name, self._line_no))\n else:\n #print(\"Ignoring dedent at %s\" % self._line_no)\n pass\n \n def _get_next_token(self):\n while True:\n tok_type, token, (lineno, indent), end, line = next(self._gen) \t\n #print(token)\n if token == \"\\n\" or tok_type == tokenize.NEWLINE: \n self._line_no += 1\n elif \"\\n\" in token:\n self._line_no += token.count(\"\\n\")\n\n if tok_type == DEDENT:\n self._dedent()\n continue\n else:\n break\n\n return tok_type, token, line\n \n def _do_parse(self, file_contents):\n buf = StringIO(file_contents)\n self._gen = tokenize.generate_tokens(buf.readline)\n \n in_block_without_scope = 0\n self._line_no = 0\n self._dedent_stack = []\n \n while True:\n try:\n tok_type, token, line = self._get_next_token()\n \n# print(line, lineno, self._current_line)\n if self._current_line == self._line_no:\n self._active_scope = self._current_scope\n\n \n if token == \"#\" or tok_type == tokenize.COMMENT:\n self._parse_to_end()\n elif tok_type == tokenize.STRING:\n self._parse_to_end()\n elif token == \"class\":\n #print(\"Pushing dedent: %s\" % True) \n self._dedent_stack.append(True)\n self._parse_class()\n elif token == \"def\":\n #print(\"Pushing dedent: %s\" % True)\n self._dedent_stack.append(True)\n self._parse_method()\n elif token == \"from\":\n self._parse_from_import()\n elif token == \"import\":\n self._parse_import()\n elif token in KEYWORDS_THAT_INHERIT_SCOPE: \n tokens = self._parse_to_end()\n token_types = [x[0] for x in tokens ]\n block_finished = False\n if tokenize.COLON in token_types:\n colon_idx = token_types.index(tokenize.COLON)\n tokens_after_colon = [ x for x in token_types[colon_idx+1:] if x not in (tokenize.NEWLINE,) ]\n if len(tokens_after_colon):\n #print(\"IF LINE: \", tokens_after_colon)\n block_finished = True \n #print(\"Pushing dedent: %s\" % block_finished)\n self._dedent_stack.append(block_finished)\n \n elif token in KEYWORDS_THAT_ARE_IGNORED:\n self._parse_to_end()\n elif token in keyword.kwlist:\n #print(\"Unhandled keyword: '\" + token + \"'\")\n self._parse_to_end()\n else:\n if token.strip():\n self._parse_statement(tok_type, token)\n\n except StopIteration:\n break\n \n def get_global_scope(self):\n return self._global\n \n def get_active_scope(self):\n return self._active_scope or self.get_global_scope()\n\nclass Completer(object):\n def __init__(self):\n self._parsers = {}\n self._active_parser = None\n \n def parse_file(self, name, file_content, line):\n try:\n parser = FileParser(file_content, current_line=line)\n except (IndentationError, tokenize.TokenError):\n pass\n else:\n self._parsers[name] = parser\n if name in self._parsers:\n self._active_parser = name\n \n def get_completions(self, match):\n \"\"\"\n \t\tGet the completions for match, using the location of the\n \t\tcurrent_line to detect the current scope\n \t\"\"\"\n \t\n #print(\"Completing: \" + match)\n if not self._active_parser:\n return []\n \n parser = self._parsers[self._active_parser]\n scope_at_line = parser.get_active_scope()\n \n parts = match.split(\".\")\n all_possible = set()\n #print(\"Scope: \" + scope_at_line.name)\n all_possible.update(scope_at_line.get_variables())\n all_possible.update(scope_at_line.get_methods())\n all_possible.update(scope_at_line.get_types()) \n all_possible.update(scope_at_line.get_modules())\n \n if match in all_possible:\n all_possible.remove(match) #Don't include the match\n \n matches = []\n \n global_matches = set() \n global_matches.update(parser.get_global_scope().get_variables())\n global_matches.update(parser.get_global_scope().get_methods())\n global_matches.update(parser.get_global_scope().get_types())\n global_matches.update(parser.get_global_scope().get_modules())\n \n if match in global_matches:\n global_matches.remove(match) #Don't include the match \n \n for possible in global_matches:\n if possible.startswith(match) or not match.strip():\n matches.append(possible)\n \n for part in parts: \n if part in all_possible and part in scope_at_line.children:\n scope_at_line = scope_at_line.children[part]\n #print(\"Looking at scope: \" + scope_at_line.name)\n matches = []\n \n all_possible = set()\n all_possible.update(scope_at_line.get_variables())\n all_possible.update(scope_at_line.get_methods())\n all_possible.update(scope_at_line.get_types())\n all_possible.update(scope_at_line.get_modules())\n\n if match in all_possible:\n all_possible.remove(match) #Don't include the match\n else:\n for possible in all_possible:\n if possible.startswith(part):\n matches.append(possible)\n break\n \n return sorted(list(set(matches)))\n\nc = Completer()\ndef complete(file_content, match, line):\n c.parse_file(\"test\", file_content, line)\n return [ { 'abbr' : x } for x in c.get_completions(match) ]\n\nif __name__ == '__main__':\n sample = \"\"\"\n class A(object):\n class_var = 2\n \n def __init__(self):\n self._var_1 = None\n var1, var2 = (a, b)\n \n def _private(self):\n pass\n \n def public(self, other):\n self._var_1 = 1\n var2 = 2\n \n def submethod(_something):\n pass\n \n with open(\"x.txt\") as f:\n data = f.read()\n \n if self == other:\n g = 1\n \n try:\n pass\n except Something:\n pass\n except Something, e:\n pass\n except (Something, SomethingElse):\n pass\n except (Something, SomethingElse), e:\n pass\n \n class B: pass\n class C(object): pass\n \n def main():\n a = A()\n \"\"\"\n \n matches = complete(sample, \"va\", 6)\n #print(\"Matches are: %s\" % matches)\n matches = complete(sample, \"se\", 6)\n #print(\"Matches are: %s\" % matches)\n matches = complete(sample, \"self.\", 6)\n #print(\"Matches are: %s\" % matches)\n\n parser = FileParser(sample)\n global_scope = parser.get_global_scope()\n \n assert global_scope.name == \"__global__\"\n assert \"A\" in global_scope.types\n assert \"B\" in global_scope.types\n assert \"main\" in global_scope.methods\n\n assert \"A\" in global_scope.children\n assert \"__init__\" in global_scope.children[\"A\"].methods\n assert \"_private\" in global_scope.children[\"A\"].methods\n assert \"public\" in global_scope.children[\"A\"].methods\n assert \"class_var\" in global_scope.children[\"A\"].variables\n \n assert \"a\" in global_scope.children[\"main\"].variables\n assert \"submethod\" in global_scope.children[\"A\"].children[\"public\"].methods\n assert \"f\" in global_scope.children[\"A\"].children[\"public\"].variables\n \n assert \"self\" in global_scope.children[\"A\"].children[\"public\"].variables\n assert \"self\" in global_scope.children[\"A\"].children[\"public\"].children\n \n #Self should inherit the parent class scope\n assert \"public\" in global_scope.children[\"A\"].children[\"public\"].children[\"self\"].methods\n \n #Unknown variables should inherit the \"object\" scope and so should contain __class__ etc.\n assert \"__class__\" in global_scope.children[\"A\"].children[\"public\"].children[\"other\"].methods\n\n assert \"g\" in global_scope.children[\"A\"].children[\"public\"].variables\n","sub_path":"pythoncodecompletion/code_complete.py","file_name":"code_complete.py","file_ext":"py","file_size_in_byte":23840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"323399846","text":"from glob import glob\nimport argparse\n\nimport pandas as pd\nimport numpy as np\n\nfrom imet.n04_dataset import DATA_ROOT, N_CLASSES\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n arg = parser.add_argument\n arg('--predictions', type=str, default='*/test.h5')\n arg('--threshold', type=float, default=0.13)\n arg('--output', type=str, default='submission.csv')\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n\n lst = []\n for fn in sorted(glob(args.predictions)):\n df = pd.read_hdf(fn, 'prob')\n lst.append(df.drop('id', axis=1).values)\n\n if len(lst) > 0:\n print(f'avg {len(lst)} checkpoints')\n arr = np.mean(np.array(lst), axis=0)\n else:\n arr = lst[0]\n\n attrs = []\n for i in range(arr.shape[0]):\n index = np.where(arr[i] > args.threshold)[0]\n attrs.append(' '.join(index.astype(str)))\n\n print(attrs[:10])\n\n subm = pd.DataFrame()\n subm['id'] = df['id']\n subm['attribute_ids'] = attrs\n subm.to_csv(args.output, index=False)\n print(subm.shape)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"imet/n06_make_submission.py","file_name":"n06_make_submission.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"645612304","text":"\"\"\"\n======================\n@author:LiXuejin\n\n@time:2019/10/31:22:55\n\n@email:lixuejin@fang.com\n======================\n\"\"\"\nimport logging\nfrom logging.handlers import RotatingFileHandler\nfrom scripts.context import LOG_PATH_FILE\n\n\nclass MyLogger(object):\n\n def __init__(self):\n # 创建一个日志收集器\n self.my_log = logging.getLogger(\"log\")\n # 设置日志收集器的等级\n self.my_log.setLevel(\"INFO\")\n # 定义日志收集器的渠道\n self.console_handler = logging.StreamHandler() # 输出控制台\n # 输出到文件\n self.file_handler = RotatingFileHandler(filename=LOG_PATH_FILE,\n mode=\"a\",\n maxBytes=1024 * 1024,\n backupCount=3,\n encoding=\"utf-8\")\n\n # 设置日志输入渠道的等级\n self.console_handler.setLevel(\"INFO\")\n self.file_handler.setLevel(\"INFO\")\n # 设置日志输出格式\n formatter = logging.Formatter(\"%(asctime)s - [%(levelname)s] - %(module)s - %(name)s - \"\n \"%(lineno)d - [日志信息]:%(message)s\")\n self.console_handler.setFormatter(formatter)\n self.file_handler.setFormatter(formatter)\n\n # 对接渠道\n self.my_log.addHandler(self.console_handler)\n self.my_log.addHandler(self.file_handler)\n\n def get_logger(self):\n return self.my_log\n\n\nlogger = MyLogger().get_logger()\n\nif __name__ == '__main__':\n logger = MyLogger().get_logger()\n logger.info(\"ceshi\")\n","sub_path":"unittest_exercise/scripts/mylogger.py","file_name":"mylogger.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"255494372","text":"import requests\nimport urllib3\nimport time\nimport sys\nimport os\nfrom bs4 import BeautifulSoup\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nheaders = {\n \"Connection\":\"close\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36\",\n \"Sec-Fetch-Mode\": \"navigate\",\n \"Sec-Fetch-User\": \"?1\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3\",\n \"Sec-Fetch-Site\": \"same-origin\",\n \"Referer\": \"https://dns.aizhan.com/\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Cookie\": \"_csrf=938dc864deae8c2955c6ef0de7c6d296061de204633d782e219644443cc6b513a%3A2%3A%7Bi%3A0%3Bs%3A5%3A%22_csrf%22%3Bi%3A1%3Bs%3A32%3A%22Dy-kdF9NOyJpwnn-jefIbnP8JchiuwId%22%3B%7D; Hm_lvt_b37205f3f69d03924c5447d020c09192=1581833140; allSites=www.zjrarj.com%2C0; Hm_lpvt_b37205f3f69d03924c5447d020c09192=1581841964\"\n}\nex_str = '暂无域名解析到该IP'\nfil_href_list = []\nhref_list = []\nfile_name = sys.argv[1]\ntar_file = open(file_name,'r')\nfile_lines = tar_file.readlines()\ntry:\n os.mkdir('./output')\nexcept:\n pass\n\nfor target in file_lines:\n tar = target.strip()\n ori_url = \"https://dns.aizhan.com/\" + tar + \"/\"\n url_num = 1\n cou = 0\n res = requests.get(url=ori_url + str(url_num) + '/',headers=headers,verify=False)\n bs = BeautifulSoup(res.text,'lxml')\n\n if ex_str in str(bs):\n print(tar + ':' + ex_str)\n else:\n while ex_str not in str(bs):\n for item in bs.find_all('a'):\n if 'nofollow' in str(item) and 'ICP' not in str(item):\n href_list.append(str(item.get('href')))\n cou +=1\n\n #收集完当前界面,跳转下一页继续收集\n url_num += 1\n url = ori_url + str(url_num) + '/'\n res = requests.get(url=url, headers=headers, verify=False)\n bs = BeautifulSoup(res.text, 'lxml')\n time.sleep(1)\n\n res_file_name = '[' + str(cou) + '] ' + tar + '.txt'\n res_file = open('./output/' + res_file_name,'w')\n\n for res in href_list:\n res_file.write(res + '\\n')\n res_file.close()\n\n print('[+] ' + tar + ':' + '收集到' + str(cou) + '条记录')","sub_path":"dns_aizhan.py","file_name":"dns_aizhan.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"605003489","text":"def main():\n number = 729\n print(f'Binary form of {number} is {bin(number)}')\n\n print('\\tBinary -> Gray')\n gray = binary_to_gray(number)\n print(f'Number {bin(number)} converted to Gray code: {bin(gray)}')\n\n print('\\tGray -> Binary')\n binary = gray_to_binary(gray)\n print(f'Number in Gray code {bin(number)} converted to binary: {bin(binary)}')\n\n\ndef binary_to_gray(number):\n \"\"\"Convert binary to Gray code\"\"\"\n if isinstance(number, str):\n number = int(number, 2)\n\n return number ^ (number >> 1)\n\n\ndef gray_to_binary(number):\n \"\"\"Convert Gray code to binary\"\"\"\n if isinstance(number, str):\n number = int(number, 2)\n\n mask = number >> 1\n while mask:\n number ^= mask\n mask >>= 1\n return number\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"Lab3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"282126182","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 17-10-10 上午12:01\n# @Author : sadscv\n# @File : split_train_valid_test.py\n\nimport json\ndata_split = open('data_split.json', 'w+')\n\nvalid_data = open('valid_data.json', 'w+')\nwith open('data.json','r+') as data:\n file = json.load(data)\n out = {}\n out['images'] = []\n valid_out = {}\n valid_out['images'] = []\n for i in range(len(file['images'])):\n tmp_dict = file['images'][i]\n print('count:{}, info:{}'.format(i, tmp_dict))\n if i >= 200000 and i < 205000:\n tmp_dict['split'] = 'val'\n print(i, tmp_dict)\n valid_out['images'].append(tmp_dict)\n if i >= 205000:\n tmp_dict['split'] = 'test'\n print(i, tmp_dict)\n out['images'].append(tmp_dict)\n out['ix_to_word'] = file['ix_to_word']\n json.dump(out, data_split)\n json.dump(valid_out, valid_data)\n\n# with open('data_split.json','r') as data:\n# file = json.load(data)\n# count = 0\n# for i in file['images']:\n# print('count:{}, info:{}'.format(i, 1))","sub_path":"scripts/split_train_valid_test.py","file_name":"split_train_valid_test.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"92393952","text":"import sys\nimport random\nm = [[0]*3 for i in range(3)]\nidx1 = 0\nidx2 = 0\n\nwhile not(idx1 == (len(m) - 1) and idx2 == (len(m) - 1)):\n\tdirec = random.randint(0, 1)\n\tif direc == 0 and not idx1 == (len(m) - 1): \n\t\tidx1 += 1\n\t\tm[idx1][idx2] = 1\n\telif direc == 1 and not idx2 == (len(m) - 1):\n\t\tidx2 += 1\n\t\tm[idx1][idx2] = 1\n\telse:\n\t\tbreak\n\nl = [int (i) for i in sys.argv[1:]]\n\nn = len(l)\ncount = [1]*n\nfor i in range (1 , n):\n\tfor j in range(0 , i):\n\t\tif l[i] > l[j] and count[i] < count[j] + 1 :\n\t\t\tcount[i] = count[j] + 1\n\nprint(max(count))","sub_path":"1.Nature-Inspired-Computing-lab/LIS-genetic.py","file_name":"LIS-genetic.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"221405042","text":"# encoding: utf-8\n\"\"\"\nauthor: willy au\ndate: 15 Nov. 2017\n\nTime complexity of heap sort algorithm:\n + Average -> O(n.log(n))\n + Worst -> O(n.log(n))\n\nSpace complexity, we modify array in-place therefore:\n + O(1)\n\nWe use a max heap to sort array here.\nSince for a size n, index stems from 0 to n-1.\nThe parent of node i is given by (i-1)//2.\nThe children of a node are given by index 2*i+1 and 2*i+2.\n\"\"\"\n\n\n\ndef climb(heap, i):\n \"\"\" Assume data from 0 to i-1 is a max_heap \"\"\"\n if i > 0:\n key = heap[i]\n while key > heap[(i-1)//2]:\n heap[i] = heap[(i-1)//2]\n i = (i-1)//2\n if i == 0:\n break\n heap[i] = key\n\n\ndef exchange(heap, i, j):\n heap[i], heap[j] = heap[j], heap[i]\n\n\ndef descent(heap, start, end):\n if start < end:\n i = start\n key = heap[i]\n found = False\n while not found and i <= (end-1)//2:\n i_left, i_right = 2*i+1, 2*i+2\n # edge case with only one child\n if i_left == end:\n i_next = i_left\n # else check which child to pick\n else:\n if heap[i_left] > heap[i_right]:\n i_next = i_left\n else:\n i_next = i_right\n if key >= heap[i_next]:\n found = True\n else:\n heap[i] = heap[i_next]\n i = i_next\n heap[i] = key\n\n\ndef heap_sort(array):\n n = len(array)\n for i in range(1, n):\n climb(array, i)\n for i in range(1, n):\n exchange(array, 0, n-i)\n descent(array, 0, n-i-1)\n\n","sub_path":"algo/heap_sort.py","file_name":"heap_sort.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"254833022","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: AS\n\"\"\"\n\ndef shop_list(dishes, person = 1):\n cook_book = {}\n with open('cook_book.txt') as f:\n for line in f:\n dish = line.strip() #1 строка - название блюда\n qty = f.readline().strip() #количество ингредиентов\n ingredients_list = [] \n for i in range(int(qty)): #цикл по ингредиентам\n ingredient = f.readline().strip().split(' | ') \n ingredients_list.append(dict(zip(['ingredients_name', 'qty', 'measure'], ingredient)))\n f.readline() #прочитать разделитель\n cook_book[dish] = ingredients_list\n shop_dict = {}\n ingr_list = []\n for dish in dishes:\n if dish in cook_book.keys(): #проверить является ли он ключом в словаре cook_book и вернуть значение\n for i in range(len(cook_book[dish])):\n ingr_list.append(cook_book[dish][i])\n key_ingr_list = ['measure', 'quantity']\n for i in range(len(ingr_list)):\n qty_msr = [ingr_list[i]['measure'], int(ingr_list[i]['qty'])*person]\n ingr_name = ingr_list[i]['ingredients_name']\n qty_msr_dict = dict(zip(key_ingr_list, qty_msr))\n ingr_qty_msr_dict = {ingr_name : qty_msr_dict}\n shop_dict.update(ingr_qty_msr_dict)\n print(shop_dict)\n \nshop_list(['Омлет', 'Запеченный картофель'], 5)\n","sub_path":"HW-open_file/cook_book_final2.py","file_name":"cook_book_final2.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"136954974","text":"import unittest\n\nfrom robarray import RobArray\n\nclass TestCase(unittest.TestCase):\n \n def test_should_set_and_get_element(self):\n arr = RobArray(1)\n \n arr[0] = 0\n \n self.assertEquals(0, arr[0])\n\n\n def test_should_return_correct_length(self):\n arr = RobArray(5)\n\n self.assertEquals(5, len(arr))\n \n \n def test_should_clear_values_with_99(self):\n arr = RobArray(1)\n value = 99\n \n arr.clear(value)\n\n for i in range(len(arr)):\n self.assertEquals(value, arr[i])\n\n \n def test_should_iterate_over_all_elements(self):\n arr = RobArray(5, value=0)\n\n cunt = 0\n for element in arr:\n cunt += 1\n\n self.assertEquals(len(arr), cunt)","sub_path":"practice/arrays/test_array.py","file_name":"test_array.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"522411801","text":"from typing import List\n\n\nclass Solution:\n def singleNumber(self, nums: List[int]) -> int:\n result = nums[0]\n for i in range(1, len(nums)):\n result = result ^ nums[i]\n return result\n\n\nt = Solution()\nprint(t.singleNumber([4, 1, 2, 1, 2]))\n","sub_path":"src/Single Number.py","file_name":"Single Number.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"569880893","text":"# -*- coding: utf-8 -*-\r\n\r\nimport math\r\nfrom torch import FloatTensor, LongTensor, arange\r\n\r\n\r\n# %% Parameter class\r\nclass Parameter(object):\r\n \"\"\"Object representing a model parameter along with its gradient.\r\n\r\n Args:\r\n pytorch_tensor (Tensor): Float or LongTensor that the parameter should\r\n be initialized to.\r\n\r\n Attributes:\r\n data (Tensor): tensor containing the parameter value.\r\n grad (Tensor): tensor of the same shape as data containing the current\r\n gradient.\r\n \"\"\"\r\n\r\n def __init__(self, pytorch_tensor):\r\n self.data = pytorch_tensor.clone() # tensors passed by reference\r\n self.grad = pytorch_tensor.clone().fill_(0) # maintains Float or Long\r\n\r\n def zero_grad(self):\r\n \"\"\"Set gradient to zero.\r\n \"\"\"\r\n self.grad.fill_(0)\r\n\r\n\r\n# %% Module class and child classes\r\n\r\nclass Module (object):\r\n \"\"\" Base class for Modules.\r\n Modules are intended as the elementary building blocks making up complex\r\n neural networks.\r\n \"\"\"\r\n def forward(self, * input):\r\n raise NotImplementedError\r\n\r\n def backward(self, * gradwrtoutput):\r\n raise NotImplementedError\r\n\r\n def param(self):\r\n return []\r\n\r\n def zero_grad(self):\r\n pass\r\n\r\n\r\nclass Linear(Module):\r\n \"\"\"Fully-connected linear layer.\r\n\r\n Args:\r\n input_units (int): number of input units to the layer.\r\n output_units (int): number of output units to the layer.\r\n bias (bool): indicate whether a bias should be added to each output.\r\n Default: True.\r\n nonlinearity (str): non-linear activation layer which the output is fed\r\n to. This will affect weight initialization. Default: sigmoid (for\r\n a initialization gain of 1).\r\n\r\n Attributes:\r\n input_units (int): number of input units to the layer.\r\n output_units (int): number of output units to the layer.\r\n \"\"\"\r\n\r\n def __init__(self, input_units, output_units, bias=True, nonlinearity=None):\r\n super(Linear, self).__init__()\r\n self.input_units = input_units\r\n self.output_units = output_units\r\n self.weights = Parameter(FloatTensor(output_units, input_units))\r\n self.input = None # last input used for forward pass\r\n\r\n if bias:\r\n self.bias = Parameter(FloatTensor(output_units))\r\n else:\r\n self.bias = None\r\n\r\n if nonlinearity is None:\r\n nonlinearity = 'sigmoid'\r\n\r\n self._initialize_parameters(nonlinearity)\r\n\r\n def _initialize_parameters(self, nonlinearity):\r\n \"\"\"Initialize weights and biases of fully-connected layer so that the\r\n variance of the activations is controlled.\r\n Args:\r\n nonlinearity (str): 'sigmoid', 'tanh', 'relu' or None. Scales the\r\n variance of the parameters to account for the effect of the\r\n non-linear activation. This makes most sense if the same non-\r\n linear activation is used throughout the network.\r\n\r\n \"\"\"\r\n # Variance correction associated with nonlinear activation of network\r\n nonlinearity = nonlinearity.lower()\r\n if nonlinearity == 'sigmoid':\r\n self.init_gain = 1.0\r\n elif nonlinearity == 'tanh':\r\n self.init_gain = 5.0 / 3.0\r\n elif nonlinearity == 'relu':\r\n self.init_gain = math.sqrt(2.0)\r\n else:\r\n raise ValueError(\"Unsupported nonlinearity {}\".format(nonlinearity))\r\n\r\n # Control variance of activations (not of gradients)\r\n uniform_corr = math.sqrt(3) # account for uniform distribution\r\n stdv = uniform_corr * self.init_gain / math.sqrt(self.input_units)\r\n # TMP this is to match PyTorch\r\n stdv = 1/ math.sqrt(self.input_units)\r\n self.weights.data.uniform_(-stdv, stdv)\r\n if self.bias is not None:\r\n self.bias.data.uniform_(-stdv, stdv)\r\n\r\n def param(self):\r\n if self.bias is not None:\r\n return [self.weights, self.bias]\r\n else:\r\n return [self.weights]\r\n\r\n def zero_grad(self):\r\n for p in self.param():\r\n p.zero_grad()\r\n\r\n def forward(self, x):\r\n \"\"\"\r\n Applies forward pass of fully connected layer to input x\r\n\r\n Args:\r\n x (FloatTensor): if 2D must have size Nb x input_units, where Nb\r\n is the batch size. If x is 1D, it is assumed that Nb=1.\r\n\r\n Returns:\r\n FloatTensor: Nb x output_units, always 2D.\r\n\r\n \"\"\"\r\n # store current input for backward pass\r\n self.input = x.clone().view(-1, self.input_units)\r\n\r\n if self.bias is not None:\r\n # automatic broadcasting for bias:\r\n return self.input.mm(self.weights.data.t()) + self.bias.data.view(-1, self.output_units)\r\n else:\r\n return self.input.mm(self.weights.data.t())\r\n\r\n def backward(self, dl_dout):\r\n \"\"\"\r\n Applies backward pass of fully connected layer starting from gradient\r\n with respect to current output.\r\n\r\n Args:\r\n dl_dout (FloatTensor): if 2D, must have size Nb x output_units,\r\n where Nb is the batch size. If 1D, it is assumed that Nb=1.\r\n Contains the derivative of the batch loss with respect to\r\n each output unit, for each batch sample, of the current\r\n backward pass.\r\n\r\n Returns:\r\n FloatTensor: Nb x input_units tensor containing the derivative of\r\n the batch loss with respect to each input unit, for each batch\r\n sample, of the current backward pass.\r\n \"\"\"\r\n ndim = len(list(dl_dout.size()))\r\n assert ndim > 0, \"dl_dout argument cannot be empty\"\r\n Nb = 1 # case where dl_dout is 1D, only one sample\r\n if ndim > 1:\r\n Nb = dl_dout.size(0)\r\n\r\n # Gradient increment for weights (broadcasting for batch-processing)\r\n # (sum contributions of all samples in the batch)\r\n grad_inc = (dl_dout.view(Nb, self.output_units, 1) *\r\n self.input.view(Nb, 1, self.input_units)).sum(0)\r\n self.weights.grad.add_(grad_inc)\r\n\r\n # Gradient increment for bias\r\n # (sum of contributions of all samples in the batch)\r\n if self.bias is not None:\r\n self.bias.grad.add_(dl_dout.view(Nb, self.output_units).sum(0))\r\n\r\n # Return dl_din\r\n return dl_dout.view(Nb, self.output_units).mm(self.weights.data)\r\n\r\n\r\nclass ReLU(Module):\r\n \"\"\"Rectified linear unit activation layer.\r\n \"\"\"\r\n def __init__(self):\r\n super(ReLU, self).__init__()\r\n self.input = None # last input used for forward pass\r\n\r\n def forward(self, x):\r\n # store current input for backward pass\r\n self.input = x.clone()\r\n\r\n out = x.clone()\r\n out[x < 0] = 0\r\n return out\r\n\r\n def backward(self, dl_dout):\r\n return dl_dout * self._grad(self.input)\r\n\r\n def _grad(self, x):\r\n dout_dx = x.clone().fill_(1)\r\n dout_dx[x < 0] = 0\r\n return dout_dx\r\n\r\n @staticmethod\r\n def nonlin_str():\r\n return \"relu\"\r\n\r\n\r\nclass Tanh(Module):\r\n \"\"\"Hyperbolic tangent activation layer.\r\n \"\"\"\r\n\r\n def __init__(self):\r\n super(Tanh, self).__init__()\r\n self.input = None # last input used for forward pass\r\n\r\n def forward(self, x):\r\n # store current input for backward pass\r\n self.input = x.clone()\r\n return x.tanh()\r\n\r\n def backward(self, dl_dout):\r\n return dl_dout * self._grad(self.input)\r\n\r\n def _grad(self, x):\r\n return 1-x.tanh().pow(2)\r\n\r\n @staticmethod\r\n def nonlin_str():\r\n return \"tanh\"\r\n\r\n\r\nclass Sigmoid(Module):\r\n \"\"\"Sigmoid activation layer.\r\n \"\"\"\r\n def __init__(self):\r\n super(Sigmoid, self).__init__()\r\n self.input = None\r\n\r\n def forward(self, x):\r\n # store current input for backward pass\r\n self.input = x.clone()\r\n return x.sigmoid()\r\n\r\n def backward(self, dl_dout):\r\n return dl_dout * self._grad(self.input)\r\n\r\n def _grad(self, x):\r\n return (x.exp() + x.mul(-1).exp() + 2).pow(-1)\r\n\r\n @staticmethod\r\n def nonlin_str():\r\n return \"sigmoid\"\r\n\r\n\r\nclass Sequential(Module):\r\n\r\n def __init__(self, moduleList):\r\n super(Sequential, self).__init__()\r\n self.moduleList = moduleList\r\n self.input = None\r\n\r\n def forward(self, x):\r\n self.input = x.clone()\r\n output = x.clone()\r\n for m in self.moduleList:\r\n output = m.forward(output)\r\n return output\r\n\r\n def backward(self, dl_dout):\r\n dl_din = dl_dout.clone()\r\n for m in self.moduleList[::-1]:\r\n dl_din = m.backward(dl_din)\r\n return dl_din\r\n\r\n def param(self):\r\n par_list = []\r\n for m in self.moduleList:\r\n par_list.extend(m.param())\r\n return par_list\r\n\r\n def zero_grad(self):\r\n for m in self.moduleList:\r\n m.zero_grad()\r\n\r\n\r\nclass LogSoftMax(Module):\r\n \"\"\" Layer that applies logarithm and softmax component-wise.\r\n \"\"\"\r\n def __init__(self):\r\n super(LogSoftMax, self).__init__()\r\n self.input = None\r\n\r\n def forward(self, x):\r\n self.input = x.clone()\r\n # shift by max for numerical stability\r\n x_norm = x - x.max(dim=1, keepdim=True)[0]\r\n e_x = x_norm.exp()\r\n return x_norm - e_x.sum(dim=1, keepdim=True).log()\r\n\r\n def backward(self, dl_dout):\r\n # shift by max for numerical stability\r\n x_norm = self.input - self.input.max(1, keepdim=True)[0]\r\n e_x = x_norm.exp() # b_size x dim\r\n softmax_x = e_x / e_x.sum(dim=1, keepdim=True) # div uses broadcasting to keep b_size x dim\r\n return (-softmax_x * dl_dout.sum(dim=1, keepdim=True)) + dl_dout # mul uses braodcasting\r\n\r\n\r\n# %% Loss functions\r\nclass Loss(object):\r\n \"\"\" Base class for Loss functions.\r\n \"\"\"\r\n def loss(self, output, target):\r\n raise NotImplementedError\r\n\r\n def backward(self, output, target):\r\n \"\"\"\r\n Returns derivative of loss(output, target) with respect to output.\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n\r\nclass MSELoss(Loss):\r\n \"\"\" Mean squared error loss\r\n \"\"\"\r\n def loss(self, output, target):\r\n \"\"\"Computes the sum of the squared differences between each sample of\r\n the batch and sums over all batch samples.\r\n\r\n Args:\r\n output (FloatTensor): model output, of size Nb x d1 x d2 x ...\r\n where Nb is the batch size.\r\n target (Tensor): must have the same size as output. Converted to\r\n FloatTensor automatically.\r\n\r\n Returns:\r\n float: a scalar floating-point value.\r\n \"\"\"\r\n return (output-target.float()).pow(2).sum()/output.size(0) # sum over all samples and entries\r\n\r\n def backward(self, output, target):\r\n return 2 * (output-target.float()) / output.size(0)\r\n\r\n\r\nclass NLLLoss(Loss):\r\n \"\"\" Negative log likelihood loss.\r\n \"\"\"\r\n\r\n def loss(self, output, target):\r\n \"\"\"\r\n \"\"\"\r\n # Get dimension\r\n ndim = len(list(output.size()))\r\n assert ndim == 2, \"output argument must have size Nb x d\"\r\n Nb = output.size(0)\r\n out_dim = output.size(1)\r\n # sum the \"on-target\" activations across the batch:\r\n return - output.view(-1)[arange(0, Nb).long()*out_dim + target.long()].sum()/Nb\r\n\r\n def backward(self, output, target):\r\n dl_din = FloatTensor(output.size()).fill_(0)\r\n # Get dimension\r\n ndim = len(list(output.size()))\r\n assert ndim == 2, \"output argument must have size Nb x d\"\r\n\r\n Nb = output.size(0)\r\n for i in range(Nb):\r\n dl_din[i, target[i]] = -1/Nb\r\n return dl_din\r\n\r\n\r\nclass CrossEntropyLoss(Loss):\r\n \"\"\" Cross entropy loss.\r\n\r\n Equivalent to using NLLLoss and adding a final LogSoftMax layer to\r\n the network.\r\n\r\n Attributes:\r\n nll (NLLLoss): NLL loss is used internally, coupled with self.lsm\r\n lsm (LogSoftMax): internal LogSoftMax Module to simulate adding an\r\n extra LogSoftMax layer to the network being trained.\r\n \"\"\"\r\n def __init__(self):\r\n super(CrossEntropyLoss, self).__init__()\r\n self.nll = NLLLoss()\r\n self.lsm = LogSoftMax()\r\n\r\n def loss(self, output, target):\r\n return self.nll.loss(self.lsm.forward(output), target)\r\n\r\n def backward(self, output, target):\r\n # input -> LSM -> s -> NLL -> output\r\n dl_ds = self.nll.backward(output, target)\r\n return self.lsm.backward(dl_ds)\r\n\r\n\r\n# %% Optimizer class\r\nclass Optimizer(object):\r\n \"\"\" Base class for optimizers.\r\n \"\"\"\r\n def __init__(self, params):\r\n self.params = params\r\n\r\n def step(self, * input):\r\n raise NotImplementedError\r\n\r\n\r\nclass SGD(Optimizer):\r\n \"\"\" Stochastic gradient descend with fixed learning rate and momentum.\r\n\r\n Args:\r\n params (iterable of type Parameter): iterable yielding the parameters\r\n (Parameter objects) of the model to optimize, typically a list.\r\n lr (float): strictly positive learning rate or gradient step length\r\n momentum (float): non-negative weight of the momentum or inertia term\r\n (Rumelhart et al., Nature 1986). If set to 0, vanilla SGD is\r\n performed. Default: 0.\r\n\r\n Attributes:\r\n params (iterable of type Parameter): iterable yielding the parameters\r\n of the model to optimize, typically a list of Parameter objects.\r\n lr (float): learning rate or gradient step length.\r\n momentum (float): momentum or inertia term.\r\n step_buf (dict): contains the previous increment for each parameter if\r\n momentum is non-zero.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, params, lr, momentum=0):\r\n super(SGD, self).__init__(params)\r\n assert lr > 0, \"learning rate should be strictly positive\"\r\n self.lr = lr\r\n assert momentum >= 0, \"momentum term should be non-negative\"\r\n self.momentum = momentum\r\n if self.momentum > 0:\r\n self.step_buf = {}\r\n for p in self.params:\r\n self.step_buf[p] = FloatTensor(p.grad.size()).zero_()\r\n\r\n def step(self):\r\n \"\"\"Updates the parameters of model using either vanilla SGD (if\r\n momentum is zero) or SGD with inertia. Parameter steps are used for the\r\n next iteration if an inertia term is present.\r\n \"\"\"\r\n for p in self.params:\r\n param_step = self.lr * p.grad\r\n if self.momentum > 0:\r\n param_step.add_(self.momentum * self.step_buf[p])\r\n self.step_buf[p] = param_step.clone()\r\n p.data.add_(-param_step)\r\n\r\n\r\n# %% Train and test functions\r\n\r\ndef compute_nb_errors(model, data_input, data_target, one_hot=False, batch_size=100):\r\n \"\"\"Compute number of classification errors of a given model.\r\n\r\n Args:\r\n model (Module): Module trained for classification\r\n data_input (FloatTensor): must have 2D size Nb x Nin, where Nb\r\n is the batch size and Nin the number of input units of model.\r\n data_target (FloatTensor): must have 2D size Nb x Nout, where Nout\r\n is the number of output units of the model, which should match the\r\n number of classes. One-hot encoding must be used, i.e.\r\n data_target[i,j]=1 if data sample i belongs to class j, and\r\n data_target[i,j]=-1 otherwise.\r\n one_hot (bool): specify if one-hot encoding was used for the target.\r\n Default: False.\r\n batch_size (int): batch size which should be used for an efficient\r\n forward pass. Does not necessarily need to be a divider of the\r\n number of data samples, althgough this is often desirable for\r\n statistical reasons. Note that this parameter does not influence\r\n model training at all. Default: 100.\r\n \"\"\"\r\n Ndata = data_input.size(0)\r\n if one_hot:\r\n data_label = data_target.max(dim=1)[1]\r\n nb_errors = 0\r\n for b_start in range(0, data_input.size(0), batch_size):\r\n # account for boundary effects:\r\n bsize_eff = batch_size - max(0, b_start + batch_size - Ndata)\r\n # batch output has size Nbatch x 2 if one_hot=True, Nbatch otherwise:\r\n batch_output = model.forward(data_input.narrow(0, b_start, bsize_eff))\r\n if one_hot:\r\n pred_label = batch_output.max(dim=1)[1] # has size Nbatch\r\n nb_err_batch = 0\r\n for k in range(bsize_eff):\r\n if data_label[b_start+k] != pred_label[k]:\r\n nb_err_batch = nb_err_batch + 1\r\n else:\r\n nb_err_batch = (batch_output.max(1)[1] !=\r\n data_target.narrow(0, b_start, bsize_eff)).long().sum()\r\n # conversion to Long solves serious overflow problem; otherwise the above results are treated as 1-byte short ints\r\n nb_errors += nb_err_batch\r\n return nb_errors\r\n\r\n\r\ndef train_model(model, train_input, train_target, criterion, optimizer, n_epochs=50, batch_size=100, log_loss=False, one_hot=None):\r\n \"\"\"Train model.\r\n\r\n Args:\r\n model (Module)\r\n train_input (FloatTensor)\r\n train_target (Tensor): converted to float if needed\r\n criterion (Loss): loss function\r\n optimizer (Optimizer): optimizer\r\n n_epochs (int)\r\n batch_size (int)\r\n log_loss (bool): set to True to print training progress a few times.\r\n one_hot (bool): if specified, used to print the train error.\r\n Default:None\r\n\r\n \"\"\"\r\n Nprint_stdout = 5 # number of times loss is printed to standard output\r\n Ntrain = train_input.size(0)\r\n for i_ep in range(n_epochs):\r\n ep_loss = 0.0\r\n for b_start in range(0, Ntrain, batch_size):\r\n model.zero_grad()\r\n\r\n # account for boundary effects\r\n bsize_eff = batch_size - max(0, b_start + batch_size - Ntrain)\r\n\r\n # forward pass\r\n output = model.forward(train_input.narrow(0, b_start, bsize_eff))\r\n batch_loss = criterion.loss(output, train_target.narrow(0, b_start, bsize_eff))\r\n ep_loss += batch_loss\r\n\r\n # backward pass\r\n dl_dout = criterion.backward(output, train_target.narrow(0, b_start, bsize_eff))\r\n dl_dx = model.backward(dl_dout)\r\n\r\n # parameter update\r\n optimizer.step()\r\n\r\n # print progress\r\n err_str = \"\" # training error rate to be displayed\r\n if one_hot is not None:\r\n ep_err = compute_nb_errors(model, train_input, train_target, one_hot)\r\n err_str = \"(error rate {:3.2g} %)\".format(100*ep_err/Ntrain)\r\n\r\n if log_loss and i_ep % round(n_epochs/Nprint_stdout) == 0:\r\n print(\"epoch {:d}/{:d}: training loss {:4.3g} {:s}\"\r\n \"\".format(i_ep+1, n_epochs, ep_loss, err_str))\r\n\r\n# %% Create toy example MLP\r\ndef create_miniproject2_model(nonlin_activ=ReLU):\r\n \"\"\"Create the neural network used in the validation of mini-project 2\r\n \"\"\"\r\n nonlin = nonlin_activ.nonlin_str()\r\n\r\n fc1 = Linear(2, 25, nonlinearity=nonlin)\r\n fc2 = Linear(25, 25, nonlinearity=nonlin)\r\n fc3 = Linear(25, 25, nonlinearity=nonlin)\r\n fc_out = Linear(25, 2)\r\n model = Sequential([fc1, nonlin_activ(),\r\n fc2, nonlin_activ(),\r\n fc3, nonlin_activ(),\r\n fc_out])\r\n return model\r\n","sub_path":"MiniProjet2/Proj2_Cleres_Lesimple_Rensonnet/nn_group14.py","file_name":"nn_group14.py","file_ext":"py","file_size_in_byte":19682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"307937757","text":"#Lists are data types in python\n#They are the equivalent of arrays in other languages\nmy_list = []\nmy_other_list = list()\n\nlist_a = [\"a\",\"b\",\"c\",\"d\"] #list of strings\nlist_b = [1,2,3,4,5,6] #list of numbers\nlist_c = [1,\"west\",34,\"longitude\"] #mixed list\n\n#nesting lists\nlist_d = [ [\"a\",\"b\",\"c\",\"d\"], [1,2,3,4,5,6], [1,\"west\",34,\"longitude\"] ]\n\n#Two lists can be joined using the extend() method\nlist_a = [\"a\",\"b\",\"c\",\"d\"]\nlist_b = [1,2,3,4,5,6]\n\n#join list_b to list_a\nlist_a.extend(list_b)\n\nprint(list_a)\nprint(list_b)\n\n#append() function\n#used to append values on the list\nlist_e = [\"a\",\"b\",\"c\",\"d\"]\nlist_e.append(\"e\")\nprint(list_e)\n\n#arranging lists using the reverse() and sort() methods\nlist_e = [\"a\",\"b\",\"c\",\"d\"]\nlist_e.reverse() #reverse() method reverses the list order\nprint(list_e) #['d', 'c', 'b', 'a']\n\n#sort() method\nlist_f = [\"e\",\"d\",\"c\",\"b\",\"a\"]\nlist_f.sort()\nprint(list_f)\n\n#reverse and sort methodsonly work on the lists they were called on and have no return values\n\n\n","sub_path":"list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"338087251","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\nimport pymongo\nfrom pymongo import MongoClient\nfrom datetime import datetime\n\nlinks = []\n\napp = MongoClient('localhost', 27017)\ndb2 = app[\"database_3\"]\ncollection2 = db2[\"events\"]\n\nwith open('urls.txt', 'r') as file:\n for row in file:\n res = requests.get(row.strip())\n if res.ok:\n result = BeautifulSoup(res.text, 'html.parser')\n capital = result.find('tr', {'id': 'places_capital__row'}).find('td', {'class': 'w2p_fw'})\n pays = result.find('tr', {'id': 'places_country__row'}).find('td', {'class': 'w2p_fw'})\n population = result.find('tr', {'id': 'places_population__row'}).find('td', {'class': 'w2p_fw'})\n data = {\"Pays\": pays.text, \"Capital\": capital.text, \"Population\": population.text, \"Date\": datetime.now()}\n collection2.insert_one(data)\n print(\"Donnée insérée\")\n time.sleep(2)###Délai pour éviter de spamer et rendre plus réaliste le scénario d'arrivé des events ","sub_path":"scraping2.py","file_name":"scraping2.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"1331395","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 14 10:48:20 2021\n\n@author: juliafoyer\n\"\"\"\n\nimport random\nimport numpy as np\nimport scanpy as sc\nimport anndata as an\nimport scipy.stats as st\nfrom scipy.spatial import KDTree\nimport math\nfrom numba import njit\nfrom numba import jit\nfrom typing import *\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\n\ndef get_gene_identity(adata):\n \"\"\" add short description here\n \n Parameters:\n ----------\n \"\"\"\n n_spots,n_genes = adata.shape\n gene_id_list = []\n X = adata.X\n for spot in range(n_spots):\n gene_id_list.append([])\n for gene in range(n_genes):\n gene_id_list[spot] += [gene] *int(X[spot,gene])\n return gene_id_list\n\ndef get_ids_dt_wt_test(gene_ids: List[List[int]],\n umi_factors: List[np.ndarray],\n n_genes: int,\n K: int)->Tuple[np.ndarray,np.ndarray,np.ndarray]:\n \"\"\" add short description here\n \n Parameters:\n ----------\n \"\"\"\n \n n_spots = len(gene_ids)\n dt = np.zeros((n_spots, K))\n wt = np.zeros((K, n_genes))\n \n for spot in range(n_spots):\n for gene,factor in zip(gene_ids[spot],umi_factors[spot]):\n dt[spot,factor] += 1\n wt[factor,gene] += 1\n \n return dt,wt\n\ndef get_theta_test(dt):\n \"\"\" add short description here\n \n Parameters:\n ----------\n \"\"\"\n n_spots, K = dt.shape\n theta = np.zeros((n_spots, K))\n for spot in range(n_spots):\n for factor in range(K):\n theta[spot, factor] = dt[spot, factor] / np.sum(dt, axis=1)[spot]\n return theta\n\ndef remove_false_neighbours_test(dist,indx):\n \"\"\" add short description here\n \n Parameters:\n ----------\n \"\"\"\n nbr_filter = lambda xs,ds : [x for x,d in zip(xs,ds) if not np.isinf(d)]\n new_idx = [nbr_filter(i,d) for i,d in zip(indx,dist)]\n new_dist = [nbr_filter(d,d) for d in dist]\n return new_dist,new_idx\n\ndef get_E_test(indx_sel: List[int])->int:\n \"\"\" add short description here\n \n Parameters:\n ----------\n \"\"\"\n # make list to hold neighbor index pairs\n indx_tuples = []\n # iterate over neibhorhoods\n for spot,nbrhd in enumerate(indx_sel):\n # iterate over neihbors in neighborhood\n for nbr in nbrhd[1::]:\n # make a pair (spot,neibhor)\n pair = [spot,nbr]\n # sort pair to store edges the same way\n # (a,b) and (b,a) will now be (a,b) and (a,b)\n pair.sort()\n # store pair, convert to tuple\n indx_tuples.append(tuple(pair))\n # apply set to pair list to remove duplicates\n # will only hold unique edges now\n indx_tuples = set(indx_tuples)\n # return length\n return len(indx_tuples)\n\n","sub_path":"test_ST_LDA_functions.py","file_name":"test_ST_LDA_functions.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"457433841","text":"import requests, json\n\n\ndegree_sign= u'\\N{DEGREE SIGN}'\n\ndef show_Main_Menu():\n\tprint('Connection Status: Connected'\n\t'\\n Welcome to WEATHER NOW')\n\tprint('1 - Find current weather for a specific location')\n\tprint('2 - Quit')\n\n\treturn int(input('What would you like to do? '))\n\n\ndef getZipCode():\n\tzipcode = input(\"Please enter a 5 digit Zip Code: \")\n\twhile len(zipcode) == 5:\n\t\t\tzipcode = zipcode\n\t\t\tbreak\n\n\telse:\n\t\tzipcode = input(\"Oops! You did not enter a valid zip code. Please enter a valid zip code: \")\n\t\t\n\treturn zipcode\n\n\ndef create_url():\n\tfull_url = \"\"\n\turl = \"http://api.openweathermap.org/data/2.5/weather?zip=\"\n\tzipcode = getZipCode()\n\tapi = \"e4f9c3989f6e3d0f337d8f18c8995bef\"\n\t\n\tfull_url = url + zipcode + \"&units=imperial&us&appid=\" + api\n\n\treturn full_url\n\n\ndef connect():\n\tfull_url = (\"http://api.openweathermap.org/data/2.5/weather?zip=32566&units=imperial&us&appid=e4f9c3989f6e3d0f337d8f18c8995bef\")\n\ttry:\n\t\treq = requests.get(full_url)\n\t\treturn True\n\t\t\n\texcept:\n\t\tprint(\"***An unexpected error has occured, connection to Weather Service is currently unavailable***\")\n\n\ndef get_weather():\n\tfull_url = create_url()\n\treq = requests.get(full_url)\n\twx = req.json()\n\n\tif wx[\"cod\"] != \"404\": \n \n\t\tlocation_name = wx[\"name\"]\n\n\t\ttempDetails = wx[\"main\"]\n\t\tcurrent_temperature = tempDetails[\"temp\"] \n\t\tcurrent_humidiy = tempDetails[\"humidity\"] \n\t\tcurrent_pressure = tempDetails[\"pressure\"] \n\n\t\twindDetails = wx[\"wind\"]\n\t\tcurrent_wind = windDetails[\"speed\"]\n \n\t\tcurrent_statement = wx[\"weather\"] \n\t\tweather_description = current_statement[0][\"description\"] \n\n\t\tprint(\"Current weather for \" + location_name + \": \" + str(weather_description) +\n\t \t\"\\n Temperature: \" + str(current_temperature) + degree_sign + \"F\" +\n\t\t\t\"\\n Humidity: \" + str(current_humidiy) + \"%\" +\n\t\t\t\"\\n Wind: \" + str(current_wind) + \"mph\" +\n\t\t\t\"\\n Atmospheric Pressure: \" + str(current_pressure) + \" hPa\")\n\n\telse: \n\t\tprint(\"An unexpected error has occured, please ensure you have entered the correct zip code and try again\") \n\t\tshow_Main_Menu()\n\n\ndef run_Weather():\n\tif connect() == True:\n\t\twhile (True):\n\t\t\tmenu_option = show_Main_Menu()\n\n\t\t\tif (menu_option == 1):\n\t\t\t\tconnect()\n\t\t\t\tyour_weather = get_weather()\n\t\t\t\tprint(your_weather)\n\n\t\t\telif (menu_option == 2):\n\t\t\t\tprint('Thank you! See you soon!')\n\t\t\t\tbreak\n\t\t\t\n\t\t\telse:\n\t\t\t\tprint('Please select a valid option')\n\n\t\t\tinput('Press enter to return to the Menu')\n\telse:\n\t\tprint('Please Try Again Later')\n\n\nrun_Weather()","sub_path":"Final_Project_Blanchard/weather_final.py","file_name":"weather_final.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"455448185","text":"import csv\nimport io\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.generic import CreateView, ListView\nfrom rest_framework import generics\nfrom tablib import Dataset\n\nfrom .forms import UploadCSVForm\nfrom .models import Employee, Roster, Shift, Upload\nfrom .serializers import EmployeeSerializer, RosterSerializer, ShiftSerializer\n\n\ndef index(request):\n return HttpResponse(\"Hello, world. You're at the shiftmanger index.\")\n\n\nclass EmployeeListCreate(generics.ListCreateAPIView):\n queryset = Employee.objects.all()\n serializer_class = EmployeeSerializer\n\n\nclass ShiftListCreate(generics.ListCreateAPIView):\n queryset = Shift.objects.all()\n serializer_class = ShiftSerializer\n\n\nclass RosterListCreate(generics.ListCreateAPIView):\n queryset = Roster.objects.all()\n serializer_class = RosterSerializer\n\n\ndef upload_csv(request):\n if request.method == 'POST':\n form = UploadCSVForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n filename = request.FILES['upload'].name\n if filename.find('employees') > -1:\n parse_csv(request.FILES['upload'], 'employees')\n elif filename.find('shifts') > -1:\n parse_csv(request.FILES['upload'], 'shifts')\n\n return HttpResponse(f\"Succesfully uploaded {filename}\")\n else:\n form = UploadCSVForm()\n return render(request, 'upload_csv.html', {'form': form})\n\n\ndef parse_csv(data, upload_type):\n data.seek(0)\n decoded_file = data.read().decode('utf-8')\n io_string = io.StringIO(decoded_file)\n reader = csv.reader(io_string, delimiter=',', quotechar='\"')\n\n # Skip header\n next(reader)\n\n if upload_type == 'employees':\n for row in reader:\n try:\n Employee.objects.get(first_name=row[0], last_name=row[1])\n except Employee.DoesNotExist as e:\n Employee(first_name=row[0], last_name=row[1]).save()\n\n if upload_type == 'shifts':\n for row in reader:\n try:\n Shift.objects.get(\n date=row[0],\n start_time=row[1],\n end_time=row[2],\n break_length=row[3],\n )\n except Shift.DoesNotExist as e:\n Shift(\n date=row[0],\n start_time=row[1],\n end_time=row[2],\n break_length=row[3],\n ).save()\n","sub_path":"shiftmanager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"350159434","text":"def calculate_marks(solutions, students_answers):\n id_questions = list(solutions.keys())\n ret = {} # Dictionary to be returned\n # Iterate from student to student\n for student in students_answers:\n mark = 0\n print(\"\\nStudent ID: {0} ------------\".format(student))\n print(\"Student {0} answers: {1}\".format(student, students_answers[student]))\n # Iterate through student answers\n for answer_key in students_answers[student]:\n print(\"Student answer to question {0}: {1}\".format(answer_key, students_answers[student][answer_key]))\n if not (answer_key in id_questions):\n print(\n \"Question ID {0} is not in the answer sheet, valid IDs are: {1}\".format(answer_key, id_questions))\n pass\n elif students_answers[student][answer_key] == solutions[answer_key]:\n print (\"Answer to question {0} is {1}. +1\".format(answer_key, students_answers[student][answer_key]))\n mark += 1\n else:\n print (\"Answer to question {0} is {1}. -0.25\".format(answer_key, solutions[answer_key]))\n mark -= 0.25\n\n print ('\\nSTUDENTS\\' MARKS')\n ret[student] = mark\n\n return ret\n\n\nif __name__ == \"__main__\":\n \"\"\"Sample main to test functionality. Compute the marks of a couple of students\"\"\"\n print(calculate_marks({1: 'a', 2: 'b'}, {1: {1: 'a', 2: 'c'}, 2: {1: 'b', 2: 'b'}}))\n","sub_path":"Python Exercises/Exercise6/calculate_marks.py","file_name":"calculate_marks.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"437241767","text":"#信誉值随轮次变化,第6轮和第15轮报告错误信息,从0开始\n#0.139, 0.256, 0.253, 0.144, 0.077, 0.073, 0.058对应0%,5%,10%,15%,20%,25%,30%\nimport math\nimport openpyxl\nimport random\nimport random_g\n\n#读数据\ndata = openpyxl.load_workbook('car-rsu.xlsx')\ncar_name = data.worksheets[0]\n#行数\nn = car_name.max_row #有多少个rsu\n\nrsu_car = [0] * n #每个rsu有多少辆车\n\ncar_num = 0\n\n#print(car_name.cell(17,1).value)\n\nfor i in range(1, n + 1):\n\ttemp = car_name.cell(i,1).value\n\trsu_car[i - 1] = temp\n\t#print(type(temp))\n\tcar_num = car_num + temp\n#print(car_num)\n\ncar = []#车辆信息[a, b, c ,d]分别代表信誉值,数据可信度,是否正确,类别\n#car = [a] * car_num\nfor i in range(0, car_num):\n\tcar.append([0]*4)\n\ndef deal_data():#为每辆车赋值(是否正确,可信度->信誉值)\n\tper = [0.139, 0.256, 0.253, 0.144, 0.077, 0.073, 0.058]#不同的比例\n\tcar_per = []\n\ttemp = 0\n\tfor i in range(0, 6):\n\t\ttemp = temp + int(per[i] * car_num)\n\t\tcar_per.append([temp, i])\n\tcar_per.append([car_num, 6])#不同等级的分布\n\t#print(car_per)\n\tv = random_g.random_nr(car_num, car_num)#车辆id随机排序\n\t#print(mv)\n\n\t#为每辆车分配等级\n\tfor i in v:\n\t\tfor j in car_per:\n\t\t\tif i > j[0] - 1:\n\t\t\t\tcar[i][3] = j[1]\n\n\t#将恶意车辆置为1,诚实车辆置为0\n\t#根据车辆类别有不同概率作恶\n\tfor i in car:\n\t\tif i[3] == 0:\n\t\t\ti[2] = 0\n\t\telif i[3] == 1:\n\t\t\tif random.random() < 0.05:\n\t\t\t\ti[2] = 1\n\t\t\telse:\n\t\t\t\ti[2] = 0\n\t\telif i[3] == 2:\n\t\t\tif random.random() < 0.1:\n\t\t\t\ti[2] = 1\n\t\t\telse:\n\t\t\t\ti[2] = 0\n\t\telif i[3] == 3:\n\t\t\tif random.random() < 0.15:\n\t\t\t\ti[2] = 1\n\t\t\telse:\n\t\t\t\ti[2] = 0\n\t\telif i[3] == 4:\n\t\t\tif random.random() < 0.2:\n\t\t\t\ti[2] = 1\n\t\t\telse:\n\t\t\t\ti[2] = 0\n\t\telif i[3] == 5:\n\t\t\tif random.random() < 0.25:\n\t\t\t\ti[2] = 1\n\t\t\telse:\n\t\t\t\ti[2] = 0\n\t\telif i[3] == 6:\n\t\t\tif random.random() < 0.3:\n\t\t\t\ti[2] = 1\n\t\t\telse:\n\t\t\t\ti[2] = 0\n\t\n\t#为车辆分配信息可信度\n\tfor i in range(0, car_num):\n\t\tif car[i][2] == 0:\n\t\t\tcar[i][1] = round(random.uniform(0.7, 0.9),2)\n\t\telse:\n\t\t\tcar[i][1] = round(random.uniform(0.5, 0.8),2)\n\n\t#车辆当前信誉值\n\tfor i in range(0, car_num):\n\t\tcar[i][0] = int((math.exp(1 / math.log(1 / (car[i][1] - 0.1))) - 1) * 10)\n\n\ndef rsu_de():#把车辆分给不同的rsu\n\tleft = 0#起始位置\n\tcar_data = []\n\tfor i in range(0, n):#n个rsu\n\t\tcar_n = rsu_car[i] #这个rsu的车辆数\n\t\tright = left + car_n - 1#中止位置\n\t\tcar_r = []\n\t\tfor i in range(0, car_n):\n\t\t\tcar_r.append([0]*3)\n\n\t\tfor j in range(0, car_n):\n\t\t\tcar_r[j][0] = car[left + j][0]\n\t\t\tcar_r[j][1] = car[left + j][1]\n\t\t\tcar_r[j][2] = car[left + j][2]\n\t\tleft = right + 1\n\t\tcar_data.append(car_r)\n\t#print(car_data[0])\n\t#print(car_data[n - 1])\n\treturn(car_data)\n\n\ndef rsu_bys(car): #单个rsu计算贝叶斯\n\te = 0.999 #先验概率\n\tp = 1\n\tpe = 1\n\tfor i in car:\n\t\tif i[2] == 0:#是正确事件\n\t\t\tp = p * i[1]\n\t\t\tpe = pe * (1 - i[1])\n\t\t\t#print(i[1])\n\t\telse:#是错误事件\n\t\t\tp = p *(1 - i[1])\n\t\t\tpe = pe * i[1]\n\t\t\t#print(i[1])\n\t#print(e * p, (1 - e) * pe)\n\tcr = (e * p) / (e * p + ((1 - e) * pe))\n\t#print(cr, crx)\n\treturn cr\n\n#计算每个rsu得到的可信度\ndef rsu_all(d, celi):\n\tfor i in range(0, n):\n\t\tceli[i] = round(rsu_bys(d[i]),4)\n\t#print(celi)\n\treturn celi\n\n#计算事件是否可信\ndef event(celi):\n\tt = 0\n\tf = 0\n\tfor i in range(0, n):\n\t\tif celi[i] >= 0.5:\n\t\t\tt = t + 1\n\t\telse:\n\t\t\tf = f + 1\n\tif t > f:\n\t\treturn 0, t, f#事件可信\n\telse:\n\t\treturn 1, t, f #事件不可信\n\n\n\n\t\n\n#计算该车辆的信誉值\ndef change_repu():\n\trepu = [5]\n\tre = 5\n\toffset = 0\n\tfor i in range(0, 25):\n\t\tdeal_data()\n\t\td = rsu_de()\n\n\t\tceli = []\n\t\tfor a in range(0, n):\n\t\t\tceli.append([0])\n\n\t\trsu_all(d, celi)\n\t\t\n\t\t#res = event(celi)[0]\n\t\tt = event(celi)[1]\n\t\tf = event(celi)[2]\n\t\t\n\t\tif i == 5:\n\t\t\toffset = round(-1 * abs(f - t) / (t + f) * (math.log(re + 1) + 1),4)\n\t\telif i == 14:\n\t\t\toffset = round(-1 * abs(f - t) / (t + f) * (math.log(re + 1) + 1),4)\n\t\telse:\n\t\t\toffset = round(abs(t - f) / (t + f) * 1 / (math.log(re + 1) + 1),4)\n\n\t\tprint(offset)\n\n\n\t\tre = re + offset\n\t\trepu.append(round(re,4))\n\treturn(repu)\n\n#change_repu()\nprint(change_repu())","sub_path":"graduation/2018级/fuzhao/code/code/repu/repu_value.py","file_name":"repu_value.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"622579123","text":"'''\n\tDescription: Preprocess data\n\tAuthor: Ho Ming Poon\n'''\nimport os\nimport json\nimport zipfile\nimport csv\nimport random\nimport re, string, unicodedata\nimport nltk\nimport contractions\nimport inflect\nfrom bs4 import BeautifulSoup\nfrom nltk import word_tokenize, sent_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import LancasterStemmer, WordNetLemmatizer\nenglishWords = set(nltk.corpus.words.words())\n\n\ndef unzip(zipFile,extractTo):\n\t# Unzip zip files\n\tzip_ref = zipfile.ZipFile(zipFile, 'r')\n\tzip_ref.extractall(extractTo)\n\tzip_ref.close()\n\n\ndef createFolder(directory):\n\t# Creates new directory\n\tif not os.path.exists(directory):\n\t\tos.makedirs(directory)\n\n\ndef strip_html(text):\n\t# Removes html tags from text\n\tsoup = BeautifulSoup(text, \"html.parser\")\n\treturn soup.get_text()\n\n\ndef remove_between_square_brackets(text):\n\t# Removes brackets from text\n\treturn re.sub('\\[[^]]*\\]', '', text)\n\n\ndef remove_underscore(text):\n\t# Removes underscores from text\n\treturn text.replace('_','')\n\n\ndef replace_contractions(text):\n\t# Replace contractions in text'''\n\treturn contractions.fix(text)\n\n\ndef remove_non_ascii(words):\n\t# Remove non-ASCII characters from list of tokenized words\n\tnew_words = []\n\tfor word in words:\n\t\tnew_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n\t\tnew_words.append(new_word)\n\treturn new_words\n\n\ndef to_lowercase(words):\n\t# Convert all characters to lowercase from list of tokenized words\n\tnew_words = []\n\tfor word in words:\n\t\tnew_word = word.lower()\n\t\tnew_words.append(new_word)\n\treturn new_words\n\n\ndef remove_punctuation(words):\n\t# Remove punctuation from list of tokenized words except period\n\tnew_words = []\n\tfor word in words:\n\t\tif word != \".\":\n\t\t\tnew_word = re.sub(r'[^\\w\\s]', '', word)\n\t\t\tif new_word != '':\n\t\t\t\tnew_words.append(new_word)\n\t\telse:\n\t\t\tnew_words.append(word)\n\treturn new_words\n\n\ndef replace_numbers(words):\n\t# Replace all interger occurrences in list of tokenized words with textual representation\n\tp = inflect.engine()\n\tnew_words = []\n\tfor word in words:\n\t\tif word.isdigit():\n\t\t\tnew_word = p.number_to_words(word)\n\t\t\tnew_words.append(new_word)\n\t\telse:\n\t\t\tnew_words.append(word)\n\treturn new_words\n\n\ndef remove_stopwords(words):\n\t# Remove stop words from list of tokenized words\n\tnew_words = []\n\tfor word in words:\n\t\tif word not in stopwords.words('english'):\n\t\t\tnew_words.append(word)\n\treturn new_words\n\n\ndef remove_nonEnglish(words):\n\t# Remove non-english words from text\n\tnew_words = []\n\tfor word in words:\n\t\tfor w in nltk.wordpunct_tokenize(word):\n\t\t\tif w.lower() in englishWords or not w.isalpha():\n\t\t\t\tnew_words.append(w)\n\treturn new_words\n\n\ndef stem_words(words):\n\n\t# Stem words in list of tokenized words\n\tstemmer = LancasterStemmer()\n\tstems = []\n\tfor word in words:\n\t\tstem = stemmer.stem(word)\n\t\tstems.append(stem)\n\treturn stems\n\n\ndef lemmatize_verbs(words):\n\t# Lemmatize verbs in list of tokenized words\n\tlemmatizer = WordNetLemmatizer()\n\tlemmas = []\n\tfor word in words:\n\t\tlemma = lemmatizer.lemmatize(word, pos='v')\n\t\tlemmas.append(lemma)\n\treturn lemmas\n\n\ndef denoise_text(text):\n\t# Denoise text\n\ttext = strip_html(text)\n\ttext = remove_between_square_brackets(text)\n\ttext = remove_underscore(text)\n\ttext = replace_contractions(text)\n\treturn text\n\n\ndef normalize(words):\n\t# Normalize text\n\twords = remove_non_ascii(words)\n\twords = to_lowercase(words)\n\twords = remove_punctuation(words)\n\t#words = replace_numbers(words)\n\twords = remove_stopwords(words)\n\twords = remove_nonEnglish(words)\n\treturn words\n\n\ndef stem_and_lemmatize(words):\n\t# Stem/Lemmatize text\n\tstems = stem_words(words)\n\t#lemmas = lemmatize_verbs(words)\n\treturn stems\n\n\ndef preprocess(numOfArticles,file,directory):\n\n\toutFile = open(file,\"w\")\n\n\ttry:\n\t\tfor i in range(len(directory)):\n\t\t\tcounter = 0\n\t\t\tnumOfFiles = len([name for name in os.listdir(\"./\" + directory[i] + \"/\" + directory[i] + \"Data\")])\n\t\t\tfor k in range(1,numOfFiles+1):\n\t\t\t\tif counter < numOfArticles:\n\t\t\t\t\tdata = open(\"./\" + directory[i] + \"/\" + directory[i] + \"Data/news_\" + str(k) + \".json\")\n\t\t\t\t\td = json.loads(data.readline())\n\t\t\t\t\ttext = d.get(\"text\")\n\t\t\t\t\ttext = denoise_text(text)\n\t\t\t\t\twords = nltk.word_tokenize(text)\n\t\t\t\t\twords = normalize(words)\n\t\t\t\t\t#words = stem_and_lemmatize(words)\n\t\t\t\t\ttext = \"\"\n\t\t\t\t\tfor word in words:\n\t\t\t\t\t\tif word != \".\":\n\t\t\t\t\t\t\ttext = text + \" \" + word\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttext = text + word\n\t\t\t\t\tif len(text) <= 10000 and len(text) >= 100:\n\t\t\t\t\t\toutFile.write(directory[i] + \",\" + text[1:] + \"\\n\")\n\t\t\t\t\t\tcounter += 1\n\t\t\t\t\tdata.close()\n\t\t\tprint(\"Successfully preprocessed \" + str(counter) + \" \" + directory[i] + \" data ...\")\n\t\toutFile.close()\n\n\texcept Exception:\n\t\tprint(\"Failed to preprocess data ...\")\n\n\ndef run(file):\n\n\tif not os.path.exists(\"./world_news/world_newsData\"):\n\t\tunzip(\"./world_news/world_newsData.zip\",\"./world_news/world_newsData\")\n\n\tdirectory = [\"world_news\"]\n\n\t# Preprocess data\n\tpreprocess(40000,file,directory)\n\n\nrun(\"world_news.csv\")","sub_path":"corpus/preprocess7.py","file_name":"preprocess7.py","file_ext":"py","file_size_in_byte":4929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"272850338","text":"import urllib.request\n\nurl_base = 'http://yann.lecun.com/exdb/mnist/'\nkey_file = {\n 'train_img':'train-images-idx3-ubyte.gz',\n 'train_label':'train-labels-idx1-ubyte.gz',\n 'test_img':'t10k-images-idx3-ubyte.gz',\n 'test_label':'t10k-labels-idx1-ubyte.gz'\n}\n\ndataset_dir = './mnist_data' #データを保存する場所\n\nfor v in key_file.values():\n file_path = dataset_dir + '/' + v\n urllib.request.urlretrieve(url_base + v, file_path)","sub_path":"mnistdl.py","file_name":"mnistdl.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"588395670","text":"\"\"\"A Bluetooth passive coordinator.\n\nReceives data from advertisements but can also poll.\n\"\"\"\nfrom __future__ import annotations\n\nfrom collections.abc import Callable, Coroutine\nimport logging\nfrom typing import Any, Generic, TypeVar\n\nfrom bleak import BleakError\n\nfrom homeassistant.core import HomeAssistant, callback\nfrom homeassistant.helpers.debounce import Debouncer\nfrom homeassistant.util.dt import monotonic_time_coarse\n\nfrom . import BluetoothChange, BluetoothScanningMode, BluetoothServiceInfoBleak\nfrom .passive_update_coordinator import PassiveBluetoothDataUpdateCoordinator\n\nPOLL_DEFAULT_COOLDOWN = 10\nPOLL_DEFAULT_IMMEDIATE = True\n\n_T = TypeVar(\"_T\")\n\n\nclass ActiveBluetoothDataUpdateCoordinator(\n PassiveBluetoothDataUpdateCoordinator, Generic[_T]\n):\n \"\"\"A coordinator that receives passive data from advertisements but can also poll.\n\n Unlike the passive processor coordinator, this coordinator does call a parser\n method to parse the data from the advertisement.\n\n Every time an advertisement is received, needs_poll_method is called to work\n out if a poll is needed. This should return True if it is and False if it is\n not needed.\n\n def needs_poll_method(\n svc_info: BluetoothServiceInfoBleak,\n last_poll: float | None\n ) -> bool:\n return True\n\n If there has been no poll since HA started, `last_poll` will be None.\n Otherwise it is the number of seconds since one was last attempted.\n\n If a poll is needed, the coordinator will call poll_method. This is a coroutine.\n It should return the same type of data as your update_method. The expectation is\n that data from advertisements and from polling are being parsed and fed into\n a shared object that represents the current state of the device.\n\n async def poll_method(svc_info: BluetoothServiceInfoBleak) -> YourDataType:\n return YourDataType(....)\n\n BluetoothServiceInfoBleak.device contains a BLEDevice. You should use this in\n your poll function, as it is the most efficient way to get a BleakClient.\n\n Once the poll is complete, the coordinator will call _async_handle_bluetooth_poll\n which needs to be implemented in the subclass.\n \"\"\"\n\n def __init__(\n self,\n hass: HomeAssistant,\n logger: logging.Logger,\n *,\n address: str,\n mode: BluetoothScanningMode,\n needs_poll_method: Callable[[BluetoothServiceInfoBleak, float | None], bool],\n poll_method: Callable[\n [BluetoothServiceInfoBleak],\n Coroutine[Any, Any, _T],\n ]\n | None = None,\n poll_debouncer: Debouncer[Coroutine[Any, Any, None]] | None = None,\n connectable: bool = True,\n ) -> None:\n \"\"\"Initialize the coordinator.\"\"\"\n super().__init__(hass, logger, address, mode, connectable)\n # It's None before the first successful update.\n # Set type to just T to remove annoying checks that data is not None\n # when it was already checked during setup.\n self.data: _T = None # type: ignore[assignment]\n\n self._needs_poll_method = needs_poll_method\n self._poll_method = poll_method\n self._last_poll: float | None = None\n self.last_poll_successful = True\n\n # We keep the last service info in case the poller needs to refer to\n # e.g. its BLEDevice\n self._last_service_info: BluetoothServiceInfoBleak | None = None\n\n if poll_debouncer is None:\n poll_debouncer = Debouncer(\n hass,\n logger,\n cooldown=POLL_DEFAULT_COOLDOWN,\n immediate=POLL_DEFAULT_IMMEDIATE,\n function=self._async_poll,\n )\n else:\n poll_debouncer.function = self._async_poll\n\n self._debounced_poll = poll_debouncer\n\n def needs_poll(self, service_info: BluetoothServiceInfoBleak) -> bool:\n \"\"\"Return true if time to try and poll.\"\"\"\n if self.hass.is_stopping:\n return False\n poll_age: float | None = None\n if self._last_poll:\n poll_age = monotonic_time_coarse() - self._last_poll\n return self._needs_poll_method(service_info, poll_age)\n\n async def _async_poll_data(\n self, last_service_info: BluetoothServiceInfoBleak\n ) -> _T:\n \"\"\"Fetch the latest data from the source.\"\"\"\n if self._poll_method is None:\n raise NotImplementedError(\"Poll method not implemented\")\n return await self._poll_method(last_service_info)\n\n async def _async_poll(self) -> None:\n \"\"\"Poll the device to retrieve any extra data.\"\"\"\n assert self._last_service_info\n\n try:\n self.data = await self._async_poll_data(self._last_service_info)\n except BleakError as exc:\n if self.last_poll_successful:\n self.logger.error(\n \"%s: Bluetooth error whilst polling: %s\", self.address, str(exc)\n )\n self.last_poll_successful = False\n return\n except Exception: # pylint: disable=broad-except\n if self.last_poll_successful:\n self.logger.exception(\"%s: Failure while polling\", self.address)\n self.last_poll_successful = False\n return\n finally:\n self._last_poll = monotonic_time_coarse()\n\n if not self.last_poll_successful:\n self.logger.debug(\"%s: Polling recovered\", self.address)\n self.last_poll_successful = True\n\n self._async_handle_bluetooth_poll()\n\n @callback\n def _async_handle_bluetooth_poll(self) -> None:\n \"\"\"Handle a poll event.\"\"\"\n self.async_update_listeners()\n\n @callback\n def _async_handle_bluetooth_event(\n self,\n service_info: BluetoothServiceInfoBleak,\n change: BluetoothChange,\n ) -> None:\n \"\"\"Handle a Bluetooth event.\"\"\"\n super()._async_handle_bluetooth_event(service_info, change)\n\n self._last_service_info = service_info\n\n # See if its time to poll\n # We use bluetooth events to trigger the poll so that we scan as soon as\n # possible after a device comes online or back in range, if a poll is due\n if self.needs_poll(service_info):\n self.hass.async_create_task(self._debounced_poll.async_call())\n\n @callback\n def _async_stop(self) -> None:\n \"\"\"Cancel debouncer and stop the callbacks.\"\"\"\n self._debounced_poll.async_cancel()\n super()._async_stop()\n","sub_path":"homeassistant/components/bluetooth/active_update_coordinator.py","file_name":"active_update_coordinator.py","file_ext":"py","file_size_in_byte":6516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"335549033","text":"import pygeoj\nfrom sys import argv\n\n\ndef match(file1_features, file2_features, label_for_file1, label_for_file2, show_diffs):\n num_without_id = 0\n num_not_matched = 0\n num_diff = 0\n num_ok = 0\n\n for feature1 in file1_features:\n id1 = None\n if 'id' not in feature1.properties:\n if 'GEOID' in feature1.properties:\n id1 = feature1.properties['GEOID']\n else:\n print(label_for_file1 + ': Found feature with no ID')\n for prop in feature1.properties:\n print('\\t1[' + prop + ']=\"' + str(feature1.properties[prop]) + '\"')\n num_without_id += 1\n else:\n id1 = feature1.properties['id']\n\n if id1 is not None:\n matched_feature2 = None\n for feature2 in file2_features:\n id2 = None\n if 'id' in feature2.properties:\n id2 = feature2.properties['id']\n elif 'GEOID' in feature2.properties:\n id2 = feature2.properties['GEOID']\n\n if id2 is not None and id1 == id2:\n matched_feature2 = feature2\n\n if matched_feature2 is None:\n print(label_for_file1 + ': feature with id ' + id1 + ' not in ' + label_for_file2)\n num_not_matched += 1\n\n elif show_diffs:\n if diff(feature1, matched_feature2, label_for_file1, label_for_file2, id1):\n num_diff += 1\n else:\n num_ok += 1\n else:\n num_ok += 1\n\n return num_without_id, num_not_matched, num_diff, num_ok\n\n\ndef print_diff_header(already_has_differences, file_label, areaid):\n if not already_has_differences:\n print(file_label + ': Diff[id=' + areaid + ']:')\n\n\ndef diff(feature1, feature2, label_for_file1, label_for_file2, areaid):\n has_differences = False\n for prop in feature1.properties:\n if prop not in feature2.properties:\n if prop != 'date_history' and (not prop.endswith('_history') or len(feature1.properties[prop]) > 0):\n print_diff_header(has_differences, label_for_file1, areaid)\n has_differences = True\n print('\\tMissing from ' + label_for_file2 + ': ' + prop)\n elif feature1.properties[prop] != feature2.properties[prop]:\n if not prop.endswith('_history'):\n print_diff_header(has_differences, label_for_file1, areaid)\n has_differences = True\n print('\\tProperty Difference: ' + prop + '[' + label_for_file1 + ']=\"' + str(\n feature1.properties[prop]) + '\", [' + label_for_file2 + ']=\"' + str(feature2.properties[prop]) + '\"')\n\n for prop in feature2.properties:\n if prop not in feature1.properties:\n if prop != 'date_history':\n print_diff_header(has_differences, label_for_file1, areaid)\n has_differences = True\n print('\\tMissing from ' + label_for_file1 + ': ' + prop)\n\n return has_differences\n\n\ndef diff_geojson(file1, file2):\n # file1 = '/Users/jfeldman/projects/covid19-clean-copy/docs/data/2020-04-06-cases-healthcare-history.geojson'\n # file2 = '/Users/jfeldman/projects/covid19/docs/data/2020-04-06-cases-healthcare-history.geojson'\n\n file1_geojson = pygeoj.load(filepath=file1)\n file2_geojson = pygeoj.load(filepath=file2)\n\n num1_without_id, num1_notin_2, num_diff, num_ok = match(file1_geojson, file2_geojson, '1', '2', True)\n num2_without_id, num2_notin_1, x, y = match(file2_geojson, file1_geojson, '2', '1', False)\n print(str(num1_without_id) + ' had no ID in 1')\n print(str(num2_without_id) + ' had no ID in 2')\n print(str(num1_notin_2) + ' features in 1 not in 2')\n print(str(num2_notin_1) + ' features in 2 not in 1')\n print(str(num_diff) + ' matched but were different')\n print(str(num_ok) + ' matched completely')\n\n\nif len(argv) < 3:\n print(\"USAGE: python compare-geojson.py \")\nelse:\n diff_geojson(argv[1], argv[2])\n","sub_path":"scripts/compare-geojson.py","file_name":"compare-geojson.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"92684485","text":"\"\"\"\nProvides classes for declarative settings definition\n\n\nNotes:\n===============================================================================\nTerm 'settings set' means any declaration of settings, i.e. class,\ninherited from ModelSettings or ApplicationSettings.\n\n\nSettings declaration:\n===============================================================================\nTo define your own settings set, inherit ModelSettings or ApplicationSettings:\n\n >>> import django_wide\n >>>\n >>> class MyModelSettings(django_wide.ModelSettingSet):\n >>> pagination_limit = django_wide.IntegerField(default=10)\n >>> my_option = django_wide.BooleanField()\n >>>\n >>> class MyAppSettings(django_wide.ApplicationSettingSet):\n >>> admin_email = django_wide.EmailField()\n\nWhen you inherit ModelSettings, new settings set will be registered.\nTo apply this settings set to your model, assign your ModelSettings\ninheritor class (or instance, it does not matter) to any attribute\nof model class. Model may have several settings sets.\n\n >>> from django.db import models\n >>>\n >>> class MyModel(models.Model):\n >>>\n >>> # you can just assign settings class\n >>> settings = MyModelSettings\n >>>\n >>> # or instantiate this class if you prefer such syntax :)\n >>> alternative_settings = MyModelSettings()\n\nAlso you can define model settings sets directly inside your model:\n\n >>> from django.db import models\n >>>\n >>> class MyModel(models.Model):\n >>>\n >>> class settings(django_wide.ModelSettingSet):\n >>> one_more_option = django_wide.IntegerField()\n\nNote that if you define model settings set, but not assign it to any model,\nit won't be created (in database) at all. You must to bind model settings to\nany model.\n\nWith application-wide settings, things are easier.\nDeclaration syntax is same, and your settings class should be only created.\nIf class created - then your settings created and can be used in runtime.\nYou can place your app settings definition into any module that will be\nimported during Django initialization. Also you can place (and I recommend\nto do so) in app_settings module - it will be auto-discovered by django_wide.\n\n\nSettings reading:\n===============================================================================\n\n >>> MyModel.settings.my_option\n False\n >>> MyModel.settings.pagination_limit\n 10\n >>> MyAppSettings.admin_email\n 'admin@mysite.com'\n\n\nSettings writing:\n===============================================================================\n\n >>> MyModel.settings.pagination_limit = 15\n >>> MyModel.settings.pagination_limit\n 15\n # setting is assigned in memory, but not committed to database,\n # so after server restart value from DB will be restored.\n # Call save() to commit changes:\n >>> MyModel.settings.save()\n\n\"\"\"\n__author__ = 'faddey'\n\nimport os\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils import six\nfrom django.utils.importlib import import_module\nfrom django.apps import apps\n\nfrom django_wide.fields import SettingBaseField\n\n\n__all__ = ['ModelSettingSet', 'ApplicationSettingSet']\n\ndef _inherit_fields(base_classes, attrs):\n \"\"\"\n This procedure collects settings fields from base classes, makes\n fields copies if necessary and updates attrs with complete field set.\n \"\"\"\n\n def sorted_keys(fields_dict):\n return sorted(\n fields_dict,\n key=lambda _key: fields_dict[_key].declaration_order\n )\n\n # get fields from base classes\n base_fields = {}\n for base_cls in reversed(base_classes):\n if hasattr(base_cls, '_declared_fields'):\n base_fields.update(base_cls._declared_fields)\n\n # forbid shadowing fields by non-fields:\n for key, f in attrs.items():\n if not isinstance(f, SettingBaseField) and key in base_fields:\n raise ImproperlyConfigured(\n \"Cannot shadow field '{}' by non-field value {}.\"\n .format(key, attrs[key])\n )\n\n # get new fields\n declared_fields = {\n key: f\n for key, f in attrs.items()\n if isinstance(f, SettingBaseField)\n }\n\n # define order of keys\n keys = sorted_keys(base_fields)\n for key in sorted_keys(declared_fields):\n if key not in base_fields:\n keys.append(key)\n\n # compose resulting field set\n fields = {\n key: declared_fields.get(key, base_fields.get(key)).copy()\n for key in keys\n }\n\n # update attrs dict, save fields to '_declared_fields'\n # it is also overrides fields as descriptors. if we won't do this,\n # attribute access will return base fields, to own fields.\n attrs.update(fields)\n attrs['_declared_fields'] = fields\n\n return attrs, fields\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# Model-wide settings #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\nclass ModelSettingsMetaclass(type):\n \"\"\"\n Sets keys for all fields, so fields can determine\n how to access its data in the database.\n\n Saves all fields into '_declared_fields' attribute\n for simple introspecting.\n\n Saves all produced settings sets to registry\n for initialization in DjangoWideConfig.ready()\n \"\"\"\n\n def __new__(mcs, name, bases, attrs):\n super_new = super(ModelSettingsMetaclass, mcs).__new__\n\n if bases == (object, ):\n # it is SettingsDefinition class itself\n # so we should return itself, not instance\n return super_new(mcs, name, bases, attrs)\n\n if '_declared_fields' in attrs:\n raise ImproperlyConfigured(\n \"'_declared_fields' is reserved by django_wide, \"\n \"so don't use this attribute in your settings definitions.\"\n )\n\n attrs, fields = _inherit_fields(bases, attrs)\n\n return super_new(mcs, name, bases, attrs)\n\n\n@six.add_metaclass(ModelSettingsMetaclass)\nclass ModelSettingSet(object):\n\n def __new__(cls):\n # Instantiation of this class should create new fields dictionary,\n # because each instance will be used for different models, and\n # we need to provide access to different field instances.\n super_new = super(ModelSettingSet, cls).__new__\n return super_new(ModelSettingsMetaclass(\n cls.__name__,\n (cls, ),\n {}\n ))\n\n ### THIS OLD CODE SAVED FOR COMMENTS WHICH WILL BE REFACTORED LATER\n # @transaction.atomic\n # def _update_definition(self):\n # \"\"\"\n # Run this method once after full Django initialization.\n # It is checks if setting definition was changed after previous\n # project launch and updates settings database.\n # \"\"\"\n #\n # # define procedure that creates value object for the setting\n # # because we use this procedure twice...\n # def create_value_object(field):\n # value_kwargs = {}\n # if 'default' in field.kwargs:\n # value_kwargs['value'] = field.kwargs['default']\n # values_manager = field.instance.value_type.model_class().objects\n # new_value = values_manager.create(**value_kwargs)\n # field.instance.value_id = new_value.id\n #\n # model_type = ContentType.objects.get_for_model(self._model)\n # existing_settings = list(ModelSetting.objects.filter(model=model_type))\n #\n # # initialize field.instance: now we can define all except 'value_id'\n # for field in self._declared_fields.values():\n # field.instance.model = model_type\n # field.instance.value_type = ContentType.objects.get_for_model(\n # field._setting_model\n # )\n #\n # # remove outdated settings:\n # # if key of setting not present in declared settings,\n # # we should remove it\n # for setting in existing_settings:\n # if setting.key not in self._declared_fields:\n # # i'm not sure whether content type framework\n # # cascade deletes objects linked via GenericForeignKey\n # # so i delete them manually\n # setting.value_object.delete()\n # setting.delete()\n # existing_settings = {s.key: s for s in existing_settings if s.pk}\n #\n # # finalize fields initialization algorithm:\n # # if setting with same key already exists:\n # # if type of setting changed:\n # # remove old value of old type\n # # create new value of new type\n # # update existing setting type\n # # else (setting with this key does not exists):\n # # create value for this setting\n # # create setting itself\n # for field in self._declared_fields.values():\n # # if this field already exists, we should only update\n # # its type (if required)\n # ex_setting = existing_settings.get(field.instance.key)\n #\n # if ex_setting:\n # # field already exists\n # if ex_setting.value_type_id != field.instance.value_type_id:\n # # value type differs, so old value should be removed\n # ex_setting.value_object.delete()\n # # create new value:\n # create_value_object(field)\n # else:\n # field.instance.value_id = ex_setting.value_id\n #\n # # since this field already exists, we should\n # # set id of existing setting to this declared setting\n # field.instance.id = ex_setting.id\n # else:\n # # field does not exists yet, so we should\n # # to create new value for it\n # create_value_object(field)\n #\n # # finally, save setting instance since it is fully initialized\n # field.instance.save()\n\n def save(self):\n for field in self._declared_fields.values():\n if field.was_changed:\n field.instance.value_object.save()\n field.was_changed = False\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# Application-wide settings #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\nclass ApplicationSettingsMetaclass(type):\n\n def __new__(mcs, name, bases, attrs):\n super_new = super(ApplicationSettingsMetaclass, mcs).__new__\n\n if bases == (object, ):\n return super_new(mcs, name, bases, attrs)\n\n if '_declared_fields' in attrs:\n raise ImproperlyConfigured(\n \"'_declared_fields' is reserved by django_wide, \"\n \"so don't use this attribute in your settings definitions.\"\n )\n\n app_label = _get_app_label(attrs['__module__'])\n registry = ApplicationSettingSet._registry\n if app_label not in registry:\n registry[app_label] = {}\n\n if name in registry[app_label]:\n raise ImproperlyConfigured(\n \"App-wide settings set with name '{}' \"\n \"for app '{}' defined twice.\"\n .format(name, app_label)\n )\n\n attrs, fields = _inherit_fields(bases, attrs)\n\n new_cls = super_new(mcs, name, bases, attrs)\n\n registry[app_label][name] = new_cls\n\n return new_cls\n\n def __setattr__(self, key, value):\n fields = self._declared_fields\n if key not in fields:\n super_self = super(ApplicationSettingsMetaclass, self)\n return super_self.__setattr__(key, value)\n descriptor = fields[key]\n return descriptor.__set__(None, value)\n\n\n@six.add_metaclass(ApplicationSettingsMetaclass)\nclass ApplicationSettingSet(object):\n\n _registry = {}\n\n @classmethod\n def save(cls):\n for f in cls._declared_fields.values():\n if f.was_changed:\n f.instance.value_object.save()\n f.was_changed = False\n\n\ndef _get_app_label(module_path):\n module_path = import_module(module_path).__file__\n pairs = [\n (app.label, app.path)\n for app in apps.get_app_configs()\n ]\n longest_prefix = ''\n found_label = None\n for label, path in pairs:\n prefix = os.path.commonprefix([\n module_path, os.path.join(path, '')\n ])\n if len(prefix) > len(longest_prefix):\n longest_prefix = prefix\n found_label = label\n return found_label\n\n","sub_path":"django_wide/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":12627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"195835042","text":"# ---------------------------------------------------------------\n# Problem 2 | Southern California Regional ICPC 2018 | Eshan Uniyal\n# December 2018, Python 3\n# http://socalcontest.org/history/2018/SC2018ICPCProblems.pdf\n# ---------------------------------------------------------------\n\nimport timer\n\n# Sample Input:\n\"\"\"\nLost is Close to Lose\n\"Better Documents Inc. wants to add Typo Checking in to the next generation of word processors,\" he said.\n***\n\"\"\"\n\ndef main():\n \"\"\"main function\"\"\"\n # takes 0.007 seconds to check sample input; should be able to compute 100 lines of text well below 2 second limit\n\n # taking input\n lines = []\n\n while True:\n\n line = input()\n\n if line == '***':\n break\n\n else:\n lines.append(line)\n\n timer.start()\n\n # creating a set of unique words in text\n words = set([])\n\n for line in lines:\n\n line = line.replace('.', ' ') # to replace periods with whitespace as outlined in problem\n\n for word in line.split(' '):\n\n # formatting words\n formatted_word = ''\n\n for character in word:\n if character.isalpha():\n formatted_word += character\n\n # adding words to set\n if len(formatted_word) > 0:\n words.add(formatted_word.lower())\n\n # creating dictionary to store words and similar sets\n words_dict = {word: [] for word in words}\n alphabet = 'abcdefghijklmnopqrstuvwxyz'\n\n\n for word in words_dict:\n converted_cores = []\n\n # versions converted by deleting a single character\n for i in range(0, len(word)):\n new_word = word[0 : i] + word[i + 1 :]\n converted_cores.append(new_word)\n\n for letter in alphabet:\n\n # versions created by inserting a single character\n for i in range(0, len(word)):\n\n new_word = word[0 : i] + letter + word[i :]\n converted_cores.append(new_word)\n\n converted_cores.append(word + letter) # to cover edge case\n\n # versions created by replacing a single character\n for i in range(0, len(word)):\n\n new_word = word[0: i] + letter + word[i + 1:]\n converted_cores.append(new_word)\n\n # versions created by transposing any two adjacent letters\n if len(word) > 1:\n for i in range(0, len(word) - 1):\n\n new_word = word[0 : i] + word[i + 1] + word[i] + word[i + 2: ]\n converted_cores.append(new_word)\n\n words_dict[word] = converted_cores\n # print(word, converted_cores)\n\n # checking for similar word cores\n similar_words = {}\n\n for word, cores in words_dict.items():\n for core in cores:\n\n if core in words_dict and core != word:\n\n try:\n similar_words[word].append(core)\n except:\n similar_words[word] = [core]\n\n # creating a solution list for sorting\n solutions = []\n\n for word, cores in similar_words.items():\n\n cores.sort()\n solution = [word] + cores\n solutions.append(solution)\n\n solutions.sort()\n\n # printing output\n for solution in solutions:\n print(f\"{solution[0]}: {' '.join(solution[1 : ])}\")\n\nmain()\ntimer.end()","sub_path":"Southern_California_Regional_ICPC_2018/problem_2.py","file_name":"problem_2.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"249378682","text":"# playing to see the relation between the thomas-fermi energy and the dot potential\n\nfrom thomas_fermi import *\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Model parameters\n# potential profile\nV_L1 = 5e-3 \nV_L2 = 5e-3 \n\n# lead voltages\nmu_L1 = 10e-3\nmu_L2 = 10.1e-3\n\n# mu_D vs V_D\nV_D_vec = np.linspace(0,5e-3,100)\n\nN = 0\nK = calculate_K(1e-3,3)\nmu_D0_vec = np.zeros(V_D_vec.size)\nE_TF0_vec = np.zeros(V_D_vec.size)\nfor i in range(V_D_vec.size):\n V_D = V_D_vec[i]\n V = np.array([V_L1, V_D, V_L2])\n \n mu_D, n = solve_TF(mu_L1,mu_L2,N,V,K)\n mu_D0_vec[i] = mu_D\n E_TF0_vec[i] = calculate_E_TF(mu_L1,mu_L2,mu_D,n,V,K) \n\nN = 1\nK = calculate_K(1e-3,3)\nmu_D1_vec = np.zeros(V_D_vec.size)\nE_TF1_vec = np.zeros(V_D_vec.size)\nfor i in range(V_D_vec.size):\n V_D = V_D_vec[i]\n V = np.array([V_L1, V_D, V_L2])\n \n mu_D, n = solve_TF(mu_L1,mu_L2,N,V,K)\n mu_D1_vec[i] = mu_D\n E_TF1_vec[i] = calculate_E_TF(mu_L1,mu_L2,mu_D,n,V,K) \n\nN = 2\nK = calculate_K(1e-3,3)\nmu_D2_vec = np.zeros(V_D_vec.size)\nE_TF2_vec = np.zeros(V_D_vec.size)\nfor i in range(V_D_vec.size):\n V_D = V_D_vec[i]\n V = np.array([V_L1, V_D, V_L2])\n \n mu_D, n = solve_TF(mu_L1,mu_L2,N,V,K)\n mu_D2_vec[i] = mu_D\n E_TF2_vec[i] = calculate_E_TF(mu_L1,mu_L2,mu_D,n,V,K) \n\nf, ax = plt.subplots(1)\nax.plot(V_D_vec,E_TF0_vec)\nax.plot(V_D_vec,E_TF1_vec)\nax.plot(V_D_vec,E_TF2_vec)\nax.set_xlabel('Gate voltage')\nax.set_ylabel('Thomas-Fermi energy')\nplt.show()\n","sub_path":"junk/markov/tf_energy_play.py","file_name":"tf_energy_play.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"316645524","text":"# program0805.py\nfname = \"program0805.py\"\nfile = None\ntry:\n file = open(fname, \"r\", encoding=\"utf-8\")\n for line in file:\n print(line, end=\"\")\nexcept FileNotFoundError:\n print(\"您要读取的文件不存在,请确认\")\nelse:\n print(\"文件读取正常结束\")\nfinally:\n print(\"文件正常关闭\")\n if file != None:\n file.close()\n","sub_path":"ch10a/ex1005.py","file_name":"ex1005.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"208827313","text":"#!/usr/bin/env python3\n\n# import libraries\nimport os\nimport psycopg2\n\n# import additional files\nimport config\n\ndef db_connect(connection_name):\n\t# establish database connection\n\tglobal db_conn\n\tdb_conn = psycopg2.connect(config.DB_URL)\n\tglobal db_crsr\n\tdb_crsr = db_conn.cursor()\n\tprint(f'database connected: {connection_name}')\n\n\treturn True\n\ndef db_stats():\n\tdb_crsr.execute(\"\"\"SELECT COUNT(*) FROM flaired_redditors WHERE riot_verified = False\"\"\")\n\tunverified_redditors = int(db_crsr.fetchone()[0])\n\n\tdb_crsr.execute(\"\"\"SELECT COUNT(*) FROM flaired_redditors WHERE riot_verified = True\"\"\")\n\tverified_redditors = int(db_crsr.fetchone()[0])\n\n\tdb_crsr.execute(\"\"\"SELECT COUNT(*) FROM flaired_redditors WHERE riot_verified = True AND riot_region = 'br1'\"\"\")\n\tbr1_redditors = int(db_crsr.fetchone()[0])\n\n\tdb_crsr.execute(\"\"\"SELECT COUNT(*) FROM flaired_redditors WHERE riot_verified = True AND riot_region = 'eun1'\"\"\")\n\teun1_redditors = int(db_crsr.fetchone()[0])\n\n\tdb_crsr.execute(\"\"\"SELECT COUNT(*) FROM flaired_redditors WHERE riot_verified = True AND riot_region = 'euw1'\"\"\")\n\teuw1_redditors = int(db_crsr.fetchone()[0])\n\n\tdb_crsr.execute(\"\"\"SELECT COUNT(*) FROM flaired_redditors WHERE riot_verified = True AND riot_region = 'jp1'\"\"\")\n\tjp1_redditors = int(db_crsr.fetchone()[0])\n\n\tdb_crsr.execute(\"\"\"SELECT COUNT(*) FROM flaired_redditors WHERE riot_verified = True AND riot_region = 'kr'\"\"\")\n\tkr_redditors = int(db_crsr.fetchone()[0])\n\n\tdb_crsr.execute(\"\"\"SELECT COUNT(*) FROM flaired_redditors WHERE riot_verified = True AND riot_region = 'la1'\"\"\")\n\tla1_redditors = int(db_crsr.fetchone()[0])\n\n\tdb_crsr.execute(\"\"\"SELECT COUNT(*) FROM flaired_redditors WHERE riot_verified = True AND riot_region = 'la2'\"\"\")\n\tla2_redditors = int(db_crsr.fetchone()[0])\n\n\tdb_crsr.execute(\"\"\"SELECT COUNT(*) FROM flaired_redditors WHERE riot_verified = True AND riot_region = 'na1'\"\"\")\n\tna1_redditors = int(db_crsr.fetchone()[0])\n\n\tdb_crsr.execute(\"\"\"SELECT COUNT(*) FROM flaired_redditors WHERE riot_verified = True AND riot_region = 'oc1'\"\"\")\n\toc1_redditors = int(db_crsr.fetchone()[0])\n\n\tdb_crsr.execute(\"\"\"SELECT COUNT(*) FROM flaired_redditors WHERE riot_verified = True AND riot_region = 'ru'\"\"\")\n\tru_redditors = int(db_crsr.fetchone()[0])\n\n\tdb_crsr.execute(\"\"\"SELECT COUNT(*) FROM flaired_redditors WHERE riot_verified = True AND riot_region = 'tr1'\"\"\")\n\ttr1_redditors = int(db_crsr.fetchone()[0])\n\n\tdb_crsr.execute(\"\"\"SELECT COUNT(*) FROM guide_submissions\"\"\")\n\tguide_submissions = int(db_crsr.fetchone()[0])\n\n\t# print connection properties\n\tprint(f'postgres connection info: {db_conn.get_dsn_parameters()}'\n\t\t+ f'\\nunverified redditors: {unverified_redditors}'\n\t\t+ f'\\nverified redditors: {verified_redditors}'\n\t\t+ f'\\nbr1: {br1_redditors}'\n\t\t+ f'\\neun1: {eun1_redditors}'\n\t\t+ f'\\neuw1: {euw1_redditors}'\n\t\t+ f'\\njp1: {jp1_redditors}'\n\t\t+ f'\\nkr: {kr_redditors}'\n\t\t+ f'\\nla1: {la1_redditors}'\n\t\t+ f'\\nla2: {la2_redditors}'\n\t\t+ f'\\nna1: {na1_redditors}'\n\t\t+ f'\\noc1: {oc1_redditors}'\n\t\t+ f'\\nru: {ru_redditors}'\n\t\t+ f'\\ntr1: {tr1_redditors}'\n\t\t+ f'\\nguide submissions: {guide_submissions}')\n\n# TABLE flaired_redditors\n# db_id SERIAL PRIMARY KEY\n# reddit_username VARCHAR(30) NOT NULL\n# riot_region VARCHAR(6) NOT NULL\n# riot_summoner_name VARCHAR(30) NOT NULL\n# riot_summoner_id VARCHAR(100) DEFAULT NULL\n# riot_verification_key VARCHAR(6) NOT NULL\n# riot_verified BOOLEAN DEFAULT False\n# riot_verified_rank VARCHAR(40) DEFAULT NULL\n# custom_flair VARCHAR(60) DEFAULT NULL\n\n# TABLE guide_submissions\n# db_id SERIAL PRIMARY KEY\n# reddit_id VARCHAR(7) NOT NULL\n# title VARCHAR(300) NOT NULL\n# author VARCHAR(30) NOT NULL\n# full_selftext VARCHAR(40000) NOT NULL\n# created_utc NUMERIC(10) NOT NULL\n# keyword_1 VARCHAR(50) DEFAULT NULL\n# keyword_2 VARCHAR(50) DEFAULT NULL\n# keyword_3 VARCHAR(50) DEFAULT NULL\n# keyword_4 VARCHAR(50) DEFAULT NULL\n# keyword_5 VARCHAR(50) DEFAULT NULL","sub_path":"connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"363416514","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/8/9 12:00\n# @Author : Ruby\n# @FileName: hello.py\n# @Software: PyCharm\n\n\n# print(\"hello world\")\n# print(\"hello world\")\nfrom selenium import webdriver\ndriver = webdriver.Chrome()\ndriver.get('http://www.baidu.com')\ndriver.find_element_by_id('kw').send_keys('selenium')\ndriver.find_element_by_id('su').click()\n#driver.close()\n#driver.quit()\n","sub_path":"hello/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"267945591","text":"from urllib.parse import urljoin\r\n\r\nfrom ..core import *\r\nfrom ..vparsers import *\r\nfrom ..utils import *\r\n\r\n\r\nclass HomeInvestParser(ConditionalWebpageParser):\r\n url = \"https://homeinvest.pl/\"\r\n method = \"GET\"\r\n \r\n page_limit = 15 #request params\r\n schema = [\r\n DataUnit(label=\"Inwestycja\", parser=DOMTextExtractor(), id=\"_inv\"),\r\n DataUnit(label=\"Budynek\", parser=DOMTextExtractor(), id=\"building\"),\r\n DataUnit(label=\"Nr lokalu\", parser=DOMTextExtractor(), id=\"number\"),\r\n DataUnit(label=\"Pokoje\", parser=IntParser(DOMTextExtractor()), id=\"rooms\"),\r\n DataUnit(label=\"Piętro\", parser=FloorParser(DOMTextExtractor()), id=\"floor\"),\r\n DataUnit(label=\"Powierzchnia\", parser=AreaParser(DOMTextExtractor()), id=\"area\"),\r\n DataUnit(label=\"Etap\", parser=NoneParser(), id=\"none\"),\r\n DataUnit(label=\"Rzut PDF\", parser=LinkParser(DOMElementExtractor(\"a\")), id=\"plan\"),\r\n DataUnit(label=\"Rzut 3D\", parser=NoneParser(), id=\"none\"),\r\n DataUnit(label=\"Zapytaj o ofertę\", parser=NoneParser(), id=\"none\"),\r\n DataUnit(label=\"Dostępność\", parser=StatusParser(DOMTextExtractor()), id=\"status\")\r\n ]\r\n \r\n @attributeerror_wrapper(return_value=True)\r\n def is_last_page(self, soup):\r\n last_link = soup.find(\"ul\", {\"class\": \"pagination\"}).find_all(\"li\")[-1]\r\n return \"disabled\" in last_link.get(\"class\", [])\r\n \r\n @attributeerror_wrapper(return_value=[])\r\n def find_records(self, soup):\r\n records = soup.find(\"table\", {\"class\": \"homeinvest-wyniki\"})\\\r\n .find_all(\"tr\")[1:]\r\n return records\r\n \r\n def split_record(self, record):\r\n return record.find_all(\"td\")\r\n \r\n def get_request_params(self):\r\n params = super().get_request_params()\r\n params[\"limit\"] = self.page_limit\r\n self._start = getattr(self, \"_start\", -self.page_limit) + self.page_limit\r\n params[\"start\"] = self._start\r\n return params\r\n \r\n def modify_record(self, record, soup):\r\n record[\"fid\"] = record[\"number\"]\r\n record[\"plan\"] = urljoin(self.url, record[\"plan\"])\r\n return record\r\n","sub_path":"parsers/homeinvest/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"153091977","text":"# Training\n# TRAIN_WIDER_PATH = \"../face-detection-project-prakash/WIDER_train/images/\"\n# TRAIN_WIDER_PATH = \"../FaceDetection-DSFD/data/ImageNet/train/\"\n# TRAIN_WIDER_PATH = \"../FaceDetection-DSFD/data/ImageNet/val/\"\nTRAIN_WIDER_PATH = \"../FDDB/\"\n# TRAIN_WIDER_PATH = \"./WIDER_train/\"\n\n# Validation\n# VAL_WIDER_PATH = \"../face-detection-project-prakash/WIDER_val/\"\nVAL_WIDER_PATH = None\n\n# Testing\n#TEST_WIDER_PATH = \"./WIDER_test/\"\nTEST_WIDER_PATH = None\n\n# Ground Truth\n# GROUND_TRUTH_PATH = \"../face-detection-project-prakash/wider_face_split/\"\n# GROUND_TRUTH_PATH = \"../FaceDetection-DSFD/\"\n# GROUND_TRUTH_PATH = \"../floating_head_generation/\"\nGROUND_TRUTH_PATH = \"../FDDB/\"\n\n# GROUND_TRUTH_FILENAME = \"wider_face_train_bbx_gt.txt\"\n# GROUND_TRUTH_FILENAME = \"imagenet_bbox_randomized_train.txt\"\n# GROUND_TRUTH_FILENAME = \"imagenet_bbox_randomized_val.txt\"\n# GROUND_TRUTH_FILENAME = \"category_3_without_contour.txt\"\nGROUND_TRUTH_FILENAME = 'fddb_wider_annot.txt'\n\n# Output\nOUTPUT_PATH = \"./output_cat1/\"\n\n\n# SHARD SIZE\n# SHARD_SIZE = 13000 #for train\nSHARD_SIZE = 4000 #for val\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"604840042","text":"#!/usr/bin/python\n\nimport sys,os,inspect\n\n# Add the path of this file to the python import path. I wonder if there's a better way to do this?\nsys.path.append(os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0])))\nfrom aci_model_plugin import ACIModelPlugin\n\nclass ActionModule(ACIModelPlugin):\n\tdef run(self, tmp=None, task_vars=None, **kwargs):\n\t\tresult = super(ActionModule, self).run(tmp, task_vars)\n\n\t\tmodule_args = result['invocation']['module_args']\n\n\t\tnew_module_args = self._create_common_module_args(\n\t\t\t\"uni/tn-%s\"%(module_args['tenant_name']),\n\t\t\tmodule_args)\n\n\t\tnew_module_args['body'] = self._create_mo('fvTenant',\n\t\t\tname=module_args['tenant_name'])\n\n\t\treturn self._execute_module(module_name='aci_model', module_args=new_module_args, tmp=tmp, task_vars=task_vars)","sub_path":"roles/aci-model/action_plugins/aci_tenant.py","file_name":"aci_tenant.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"473779027","text":"class Solution(object):\n def convert(self, suffix):\n \tstack = []\n \tlist = suffix.split(' ')\n \ti = 0\n \tpre = ''\n \twhile i\n div.block-container {\n max-width: 1200px !important;\n }\n\n'''\nst.markdown(css, unsafe_allow_html=True)\n\n\ndef section(num: int, s: str) -> None:\n st.markdown(f'#### [{num}. {s}](https://nlp100.github.io/ja/ch04.html#{num}-{s})')\n\n\nst.markdown('## 第4章: 形態素解析')\n\n\nsection(30, '形態素解析結果の読み込み')\n\nst.markdown('''\n> 形態素解析結果(neko.txt.mecab)を読み込むプログラムを実装せよ.ただし,各形態素は表層形(surface),基本形(base),品詞(pos),品詞細分類1(pos1)をキーとするマッピング型に格納し,1文を形態素(マッピング型)のリストとして表現せよ.第4章の残りの問題では,ここで作ったプログラムを活用せよ\n''')\n\nst.markdown('''\n **MeCab version: 0.996** \n `./neko.txt.mecab` が存在していると仮定 \n 各行は `表層形\\t品詞,品詞細分類1,品詞細分類2,品詞細分類3,活用形,活用型,原形,読み,発音` の形式 \n''')\n\n\nwith st.echo():\n with open('./neko.txt.mecab') as f:\n lines = f.readlines()\n\n # 最終結果が入るリストを定義.\n sentences: list[list[dict]] = []\n\n # 1文中の形態素が格納されるリストを定義.\n # 1文を読み切るたびに結果に追加して、自身は初期化する.\n sentence: list[dict] = []\n\n for line in lines:\n if line.strip() == 'EOS':\n if not sentence:\n continue\n sentences.append(sentence)\n sentence = []\n else:\n surface, others = line.split('\\t')\n elms = others.split(',')\n pos, pos1, base = elms[0], elms[1], elms[6]\n sentence.append({\n 'surface': surface,\n 'pos': pos,\n 'pos1': pos1,\n 'base': base,\n })\n else:\n if sentence:\n sentences.append(sentence)\n\nst.markdown('1文目(吾輩は..)の確認')\nst.write(sentences[1])\n\n\nsection(31, '動詞')\n\nst.markdown('''\n> 動詞の表層形をすべて抽出せよ.\n''')\n\nwith st.echo():\n verb_surface_set = set(d['surface'] for d in itertools.chain(*sentences) if d['pos'] == '動詞')\n\nst.info(\n '\\n\\n'.join([\n '愚直に書くと以下の様に2重for-loopで書くことになります.',\n 'ただ今回の場合文章毎に処理する必要がなく、全形態素について処理すればいいだけなので **itertools.chain** で'\n '2重リストをフラットにして一度に処理してしまうのが良さそうです. 動詞を判定する条件も簡単なのでジェネレータ式で簡単に書きます. ',\n '**set()** の引数には Iterator をそのまま書けるのでジェネレータ式を直接書いています. '\n ])\n)\n\n\nwith st.echo():\n # 愚直解法\n verb_surface_set_verbose = set()\n for sentence in sentences:\n for morph in sentence:\n if morph['pos'] == '動詞':\n verb_surface_set_verbose.add(morph['surface'])\n\n\nassert verb_surface_set == verb_surface_set_verbose\n\n\nsection(32, '動詞の基本形')\n\nst.markdown('''\n> 動詞の基本形をすべて抽出せよ.\n''')\n\nwith st.echo():\n verb_base_set = set(d['base'] for d in itertools.chain(*sentences) if d['pos'] == '動詞')\n\nst.info(\n '\\n\\n'.join([\n '31. 動詞 と同様です.',\n ])\n)\n\n\nsection(33, '「AのB」')\n\nst.markdown('''\n> 2つの名詞が「の」で連結されている名詞句を抽出せよ.\n''')\n\nwith st.echo():\n a_of_b_list: list[str] = []\n # 文を跨いで連結されていても意味がないので、文ごとの処理となります\n for sentence in sentences:\n # 連続した3つの形態素に興味があるので、0番目から最後から3番目の形態素までのfor-loopにします.\n for i in range(len(sentence)-2):\n morph1 = sentence[i]\n morph2 = sentence[i+1]\n morph3 = sentence[i+2]\n # 全ての条件を and でつなぐときには all関数を使うと可読性が向上することがあります\n if all([\n morph1['pos'] == '名詞',\n morph2['surface'] == 'の' and morph2['pos1'] == '連体化',\n morph3['pos'] == '名詞',\n ]):\n a_of_b_list.append(f'{morph1[\"surface\"]}{morph2[\"surface\"]}{morph3[\"surface\"]}')\n\nst.write('先頭の5つ確認')\nst.write(a_of_b_list[:5])\n\nsection(34, '名詞の連接')\n\nst.markdown('''\n> 名詞の連接(連続して出現する名詞)を最長一致で抽出せよ.\n''')\n\nwith st.echo():\n # 最長の連接を与える形態素のリストを用意\n max_morphs: list[dict] = []\n for sentence in sentences:\n # カウンタを用意\n i = 0\n # 一時的な連接を保持する形態素のリスト\n morphs: list[dict] = []\n while i < len(sentence):\n morph = sentence[i]\n if morph['pos'] == '名詞':\n # 名詞が続く場合は一時変数に追加\n morphs.append(morph)\n else:\n # 名詞が終わった場合は、暫定の最長のものと比較して更新可否判定\n if morphs and len(morphs) > len(max_morphs):\n max_morphs = morphs\n morphs = []\n i += 1\n\nst.write('確認')\nst.write(''.join(morph['surface'] for morph in max_morphs))\n\n\nsection(35, '単語の出現頻度')\n\nst.markdown('''\n> 文章中に出現する単語とその出現頻度を求め,出現頻度の高い順に並べよ\n''')\n\nst.warning(\n '語/単語の定義について十分な理解がなく、以下のコードは全て形態素 := 単語 とみなしたようなコードになっています\\n\\n'\n '助詞等を排除する前処理や複数の連続する名詞の形態素を一定のルールでマージするような処理が必要な可能性があります.'\n)\n\n\nwith st.echo():\n # 何かをカウントするときには、自動的にキーを初期化してくれる defaultdict を使用すると便利です\n d: defaultdict = defaultdict(int)\n # 前述の通り文章ごとに考える必要がないので itertools.chain でフラットにして1重のfor-loopで処理\n for morph in itertools.chain(*sentences):\n d[morph['surface']] += 1\n # 出現数でソート\n occurrences = list(sorted([(k, v) for k, v in d.items()], key=lambda x: x[1], reverse=True))\n\nst.write('先頭5単語を確認')\nst.write(occurrences[:5])\n\nst.info('最後の出現数でソートする部分 ( **occurrences** を計算する部分 ) のコードですが、愚直解法は以下のようになります。これを簡潔に書いたものと思ってください.')\n\nwith st.echo():\n # カウンタ(辞書) を、出現単語と出現回数を示すタプルのリストに変換する\n tuples: list[Tuple[str, int]] = []\n for k, v in d.items():\n tuples.append((k, v))\n # タプルのリストを タプルの1番目の値(:= 出現回数) を元にソートします\n occurrences_verbose = sorted(tuples, key=lambda t: t[1], reverse=True)\n\nassert occurrences == occurrences_verbose\n\nsection(36, '頻度上位10語')\n\nst.markdown('''\n> 出現頻度が高い10語とその出現頻度をグラフ(例えば棒グラフなど)で表示せよ\n''')\n\n\nst.write('pandasと[altair](https://altair-viz.github.io/)を使います')\n\nwith st.echo():\n df = pd.DataFrame({\n '単語': [x[0] for x in occurrences[:10]],\n '出現回数': [x[1] for x in occurrences[:10]],\n })\n c = alt.Chart(df).mark_bar().encode(\n x=alt.X(\"単語:O\", sort='-y'),\n y=alt.Y(\"出現回数:Q\"),\n ).properties(height=500)\n st.altair_chart(c, use_container_width=True)\n\n\nsection(37, '「猫」と共起頻度の高い上位10語')\n\nst.markdown('''\n> 「猫」とよく共起する(共起頻度が高い)10語とその出現頻度をグラフ(例えば棒グラフなど)で表示せよ\n''')\n\nwith st.echo():\n # 猫と共起する単語のカウンタを初期化\n cat_d: defaultdict = defaultdict(int)\n for sentence in sentences:\n for i, morph in enumerate(sentence):\n # 自身が '猫' でなくて、前後いずれかが '猫' のものがカウント対象\n if morph['surface'] != '猫' and (\n (i > 0 and sentence[i-1]['surface'] == '猫') or\n (i < len(sentence) - 1 and sentence[i+1]['surface'] == '猫')\n ):\n cat_d[morph['surface']] += 1\n # 出現数でソート (先述した方法)\n cat_co_occurrences = list(sorted([(k, v) for k, v in cat_d.items()], key=lambda x: x[1], reverse=True))\n # グラフ描画\n df = pd.DataFrame({\n '単語': [x[0] for x in cat_co_occurrences[:10]],\n '出現回数': [x[1] for x in cat_co_occurrences[:10]],\n })\n c = alt.Chart(df).mark_bar().encode(\n x=alt.X(\"単語:O\", sort='-y'),\n y=alt.Y(\"出現回数:Q\"),\n ).properties(height=500)\n st.altair_chart(c, use_container_width=True)\n\n\nsection(38, 'ヒストグラム')\n\nst.markdown('''\n> 単語の出現頻度のヒストグラムを描け.ただし,横軸は出現頻度を表し,1から単語の出現頻度の最大値までの線形目盛とする.縦軸はx軸で示される出現頻度となった単語の異なり数(種類数)である\n''')\n\nwith st.echo():\n df = pd.DataFrame({\n '単語': [x[0] for x in occurrences],\n '出現回数': [x[1] for x in occurrences],\n })\n c = alt.Chart(df).mark_bar().encode(\n x=alt.X(\n \"出現回数:Q\",\n bin=alt.Bin(step=1, extent=[0, occurrences[0][1]]),\n ),\n y=alt.Y(\"count():Q\"),\n ).properties(height=500)\n st.altair_chart(c, use_container_width=True)\n\n\nsection(39, 'Zipfの法則')\n\nst.markdown('''\n> 単語の出現頻度順位を横軸,その出現頻度を縦軸として,両対数グラフをプロットせよ\n''')\n\nwith st.echo():\n c = alt.Chart(df).mark_bar().encode(\n x=alt.X(\n \"出現回数:Q\",\n scale=alt.Scale(type='log')\n ),\n y=alt.Y(\n \"count():Q\",\n scale=alt.Scale(type='log')\n ),\n ).properties(height=500)\n st.altair_chart(c, use_container_width=True)\n","sub_path":"2023/nagura/ch04/4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"546492375","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.15-x86_64/egg/xadmin/migrations/0004_auto_20180722_1448.py\n# Compiled at: 2018-07-22 02:49:51\n# Size of source mod 2**32: 783 bytes\nfrom __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.utils.timezone\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('xadmin', '0003_auto_20160715_0100')]\n operations = [\n migrations.AlterField(model_name='log',\n name='action_time',\n field=models.DateTimeField(db_index=True, default=(django.utils.timezone.now), editable=False, verbose_name='action time')),\n migrations.AlterField(model_name='log',\n name='object_id',\n field=models.CharField(blank=True, db_index=True, max_length=191, null=True, verbose_name='object id'))]","sub_path":"pycfiles/xadmin_croxlink2-0.7.0.1-py3.6/0004_auto_20180722_1448.cpython-36.py","file_name":"0004_auto_20180722_1448.cpython-36.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"33110625","text":"def junta_nome_sobrenome(x,y):\n i=0\n z=[]\n while i<=(n-1):\n z.append(x[i]+y[i])\n i+=1\n return z\nx=[\"enrico\",\"joao\", \"pedro\"]\ny=[\" de deus\", \" da silva\", \" augustu\"]\nn=len(x)\n","sub_path":"backup/user_116/ch50_2020_03_26_00_31_55_780131.py","file_name":"ch50_2020_03_26_00_31_55_780131.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"189895405","text":"from django.conf.urls import patterns, include, url\nfrom django.views.generic import TemplateView\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nfrom accounts.views import profile\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'views.home', name='home'),\n # url(r'^/', include('foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'views.home', name='home'),\n url(r'^test/$', 'views.test'),\n url(r'^sso_process/$', 'views.sso_process'),\n url(r'^accounts/', include('accounts.urls')),\n url(r'^workspace/', include('workspace.urls')),\n url(r'^ctc/', include('ctc.urls')),\n url(r'^grp/', include('grp.urls')),\n url(r'^base.html/$', TemplateView.as_view(template_name =\"base.html\")),\n)\n","sub_path":"srportal/spweb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"362847212","text":"\n# Define model\n\nfrom architecture import model_architecture\nfrom data import SIZE_X, SIZE_Y, IMG_CH\n\nimage_shape = (SIZE_Y, SIZE_X, IMG_CH)\nmodel = model_architecture(image_shape)\n\nmodel.compile(loss = 'mse', optimizer = 'adam', metrics=['mae'])\n\n# Train model\n\nfrom data import train_generator, validation_generator\nfrom data import load_data\n\n# Load data\ntrain_samples, validation_samples = load_data()\n\n# Define parameters\nbatch_size = 64\nnb_epoch = 40\nsamples_per_epoch = len(train_samples)\nnb_val_samples = len(validation_samples)\n\nsteps_per_epoch = 100 #int( len(train_samples) / batch_size )\nvalidation_steps = 10 #int( len(validation_samples) / batch_size )\n\n# Define history callbacks\nfrom visualisation import LossHistory\nhistory = LossHistory()\n\n# Fit\nprint(\"Training network..\")\nfor epoch in range(nb_epoch):\n\n # Fit one epoch \n non_zero_bias = 1/(1 + epoch / 5.)\n #non_zero_bias = 1.\n print(\"Non zero bias = \" + str(non_zero_bias)) \n \n # Define data generators \n train = train_generator(train_samples, batch_size, non_zero_bias)\n validation = train_generator(validation_samples, batch_size)\n\n model.fit_generator(train,\n steps_per_epoch = steps_per_epoch, \n initial_epoch = epoch,\n epochs = epoch + 1, \n verbose = 1,\n validation_data = validation, \n validation_steps = validation_steps, \n callbacks=[history])\n\nprint(\"Network trained!\")\n\n# Plot loss graph\nfrom visualisation import loos_graph\nloos_graph(history)\n\n# Save model\n\nmodel.save('model.h5')\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"217735474","text":"# flask app\nfrom hailfast import app\nfrom flask import request, render_template\nimport requests\n\n# keys\nfrom API_KEY import api_token, APPLICATION_ID, REST_API_KEY, MASTER_KEY\n\n# parse\nfrom parse_rest.connection import register\nfrom parse_rest.datatypes import Object, GeoPoint\n\n#mongolab\n#from pymongo import MongoClient\n\n# misc\nfrom datetime import datetime\nfrom pytz import timezone\nimport pytz\nimport re\nfrom math import radians, cos, sin, asin, sqrt\n#import pudb\n\nregister(APPLICATION_ID, REST_API_KEY)\nYO_API = \"https://api.justyo.co/yo/\"\neastern = timezone('US/Eastern')\nutc = pytz.utc\n\nclass IntersectionYo(Object):\n pass\n\ndef haversine(lon1, lat1, lon2, lat2):\n \"\"\"\n Calculate the great circle distance between two points \n on the earth (specified in decimal degrees)\n \"\"\"\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n\n # 6367 km is the radius of the Earth\n # 3958.75 mi is radius of the Earth\n mi = 3958.75 * c\n return mi\n\ndef getIntersection(lat, lon, radius):\n now = datetime.now(utc)\n dateEST = now.astimezone(eastern)\n now_Hour = dateEST.strftime('%H')\n now_Day = dateEST.weekday()\n\n if (now_Day < 4):\n # weekday\n now_Day = 'weekDay'\n else:\n # weekend\n now_Day = 'weekEnd'\n\n dayHour = now_Day + now_Hour\n\n currentLocation = GeoPoint(lat, lon)\n nearbyIntersects = IntersectionYo.Query.filter(point__nearSphere=currentLocation).limit(95)\n\n maxValue = [-1,'default']\n\n for intersection in nearbyIntersects:\n distance = haversine(lon, lat, intersection.point.longitude, intersection.point.latitude)\n if (distance < radius):\n val = float(getattr(intersection, dayHour))\n \n if (val > maxValue[0]):\n maxValue[0] = val\n maxValue[1] = intersection.intersectionID\n else:\n break\n\n # Get Lat Long of maxValue\n if (maxValue[0] == -1):\n return None\n else:\n result = IntersectionYo.Query.get(intersectionID=maxValue[1])\n return result.Name\n\n\ndef send_yo(username, link):\n \"\"\"Yo a username\"\"\"\n\n #print username\n #print link\n\n requests.post(\n YO_API,\n data={'api_token': api_token, 'username': username, 'link': link})\n\n\n@app.route('/')\ndef main():\n \"\"\"Index Controller\"\"\"\n return render_template('index.html')\n\n\n@app.errorhandler(404)\ndef handle_error(e):\n return render_template('404.html')\n\n\n@app.route('/noresult')\ndef noresult():\n now = datetime.now(utc)\n dateEST = now.astimezone(eastern)\n now_str = dateEST.strftime('%B %d %I:%M %p (EST)');\n return render_template('noresult.html',DATE_ = now_str)\n\n\n@app.route('/response')\ndef response():\n fiveHundoFt = request.args.get('fiveHundoFt')\n fiveHundoFt = re.sub(r\"_\",\" \",fiveHundoFt)\n fiveHundoFt = re.sub(r\"And\",\"&\",fiveHundoFt)\n #print fiveHundoFt\n\n quarterMi = request.args.get('quarterMi')\n quarterMi = re.sub(r\"_\",\" \",quarterMi)\n quarterMi = re.sub(r\"And\",\"&\",quarterMi)\n\n #print quarterMi\n\n now = datetime.now(utc)\n dateEST = now.astimezone(eastern)\n now_str = dateEST.strftime('%B %d %I:%M %p');\n\n return render_template('response.html',\n DATE_ = now_str,\n FIVEHUNDO_ = fiveHundoFt,\n QUARTER_ = quarterMi\n )\n\n@app.route('/yo')\ndef yo():\n \"\"\"Handle callback request\"\"\"\n username = request.args.get('username')\n location = request.args.get('location')\n splitted = location.split(';')\n latitude = float(splitted[0])\n longitude = float(splitted[1])\n\n fiveHundoFt = getIntersection(latitude,longitude, 0.10) # about 500 feet or 2 blocks\n\n quarterMi = getIntersection(latitude, longitude, 0.25) # roughly 5 blocks\n\n\n if quarterMi is None:\n send_yo(username, 'http://hailfast.herokuapp.com/noresult')\n else:\n link = \"http://hailfast.herokuapp.com/response?fiveHundoFt={0}&quarterMi={1}\".format(fiveHundoFt, quarterMi)\n send_yo(username, link)\n return 'OK'","sub_path":"hailfast/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":4295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"341185742","text":"# 1163 Last Substring in Lexicographical Order (Hard)\n\n# Given a string s, return the last substring of s in lexicographical order.\n\n\n# Example 1:\n\n# Input: s = \"abab\"\n# Output: \"bab\"\n# Explanation: The substrings are[\"a\", \"ab\", \"aba\", \"abab\", \"b\", \"ba\", \"bab\"]. The lexicographically maximum substring is \"bab\".\n# Example 2:\n\n# Input: s = \"leetcode\"\n# Output: \"tcode\"\n\n\n# Constraints:\n\n# 1 <= s.length <= 4 * 105\n# s contains only lowercase English letters.\n\nclass Solution:\n def lastSubstring(self, s: str) -> str:\n i, j, k = 0, 1, 0\n n = len(s)\n while j + k < n:\n if s[i+k] == s[j+k]:\n k += 1\n continue\n elif s[i+k] > s[j+k]:\n j = j + k + 1\n else:\n i = max(i + k + 1, j)\n j = i + 1\n k = 0\n return s[i:]\n","sub_path":"2021_problems/1163_lastsubstringinlexicographicalorder_hard.py","file_name":"1163_lastsubstringinlexicographicalorder_hard.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"408411907","text":"rocketScale = int(input(\"Rocket Scale (ODD NUMBERS ONLY) : \"))\n\nLEFT_WALL = \"/\"\nRIGHT_WALL = \"\\\\\"\nSPACE = \" \"\nBASE = \"=\"\nCORNER = \"+\"\nVERTICAL_WALL = \"|\"\n\n\ndef tip():\n for i in range(0, rocketScale):\n print(SPACE * int(rocketScale - i) + LEFT_WALL + SPACE * i * 2 + RIGHT_WALL)\n print(CORNER + BASE * rocketScale * 2 + CORNER)\n\n\ndef body():\n for i in range(0, rocketScale):\n print(VERTICAL_WALL + SPACE * (rocketScale * 2) + VERTICAL_WALL)\n print(CORNER + BASE * rocketScale * 2 + CORNER)\n\n\ndef thruster():\n if rocketScale == 1:\n print(\" /\\\\ \")\n else:\n for i in range(0, int((rocketScale - 1) / 2)):\n print(SPACE * int((rocketScale / 2) - i) + LEFT_WALL + SPACE * i * 2 + RIGHT_WALL, end=\"\")\n print(SPACE * (int((rocketScale / 2) - i)) * 2 + LEFT_WALL + SPACE * i * 2 + RIGHT_WALL + SPACE * int(rocketScale - i))\n\n\ntip()\nbody()\nbody()\nbody()\nthruster()\n","sub_path":"Modular Programming/week02/lab02_justTheTip.py","file_name":"lab02_justTheTip.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"335415976","text":"class Group(object):\r\n def __init__(self, _name):\r\n self.name = _name\r\n self.groups = []\r\n self.users = []\r\n\r\n def add_group(self, group):\r\n self.groups.append(group)\r\n\r\n def add_user(self, user):\r\n self.users.append(user)\r\n\r\n def get_groups(self):\r\n return self.groups\r\n\r\n def get_users(self):\r\n return self.users\r\n\r\n def get_name(self):\r\n return self.name\r\n\r\n\r\nparent = Group(\"parent\")\r\nparent.add_user(\"parent_user_1\")\r\nparent.add_user(\"parent_user_2\")\r\n\r\nchild = Group(\"child\")\r\nchild.add_user(\"child_user_1\")\r\nchild.add_user(\"child_user_2\")\r\n\r\nsub_child = Group(\"sub_child\")\r\nsub_child_user = \"sub_child_user\"\r\nsub_child.add_user(sub_child_user)\r\n\r\nsub_sub_child_1 = Group(\"sub_sub_child_1\")\r\nsub_sub_child_2 = Group(\"sub_sub_child_2\")\r\nsub_sub_child_2.add_user(\"sub_sub_child_2_user\")\r\n\r\nsub_child.add_group(sub_sub_child_1)\r\nsub_child.add_group(sub_sub_child_2)\r\nchild.add_group(sub_child)\r\nparent.add_group(child)\r\n\r\n\r\ndef is_user_in_group(user, group):\r\n \"\"\"\r\n Return True if user is in the group, False otherwise.\r\n\r\n Args:\r\n user(str): user name/id\r\n group(class:Group): group to check user membership against\r\n \"\"\"\r\n name = user if '/' not in user else user[0:user.find('/')]\r\n if name in group.get_users() :\r\n return True\r\n \r\n for sub_group in group.get_groups() :\r\n if is_user_in_group(name, sub_group) :\r\n return True\r\n \r\n return False\r\n\r\nprint(is_user_in_group('sub_child_user/123', parent))\r\n# True\r\nprint(is_user_in_group('unknown/123', parent))\r\n# False\r\nprint(is_user_in_group('parent_user_1/123', parent))\r\n# True\r\nprint(is_user_in_group('child_user_1/123', parent))\r\n# True\r\nprint(is_user_in_group('sub_sub_child_2_user/123', parent))\r\n# True","sub_path":"P1 - Data Structures/problem4.py","file_name":"problem4.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"314604082","text":"#! /usr/bin/env python \n# -*- coding: utf-8 -*- \n#Author:Chao Email:helloworldchao@outlook.com Version:0.5\n\nimport shutil, os, time\nfrom os.path import getsize, join, isdir, isfile\n\ndef copyFolder(sourceDir, targetDir):\n\n\tsizeS = 0\n\tsizeT = 0\n\t\n\t#分别遍历源目录和目标目录来确定目录大小\n\tfor root, dir, file in os.walk(sourceDir):\n\t\tsizeS += sum([getsize(join(root, name)) for name in file])\n\t\n\tfor root, dir, file in os.walk(targetDir):\n\t\tsizeT += sum([getsize(join(root, name)) for name in file])\n\t\n\t#若目录大小不同则源目录有更改需要重新同步\n\tif(sizeS != sizeT):\n\t\t#在同步前删除存在的目标目录来确保copytree时不会报错\n\t\tif(os.path.exists(targetDir)):\n\t\t\tshutil.rmtree(targetDir)\n\t\t\n\t\tshutil.copytree(sourceDir, targetDir)\n\n\t\nif __name__ == \"__main__\":\n\t\n\tsourceDir = \"C:\\\\Users\\\\Vincent\\\\Documents\\\\Rockstar Games\\\\GTA V\" #源目录\n\ttargetDir = \"F:\\\\OneDrive\\\\GTA5\" #目标目录\n\n\twhile True:\n\t\tcopyFolder(sourceDir, targetDir)\n\t\ttime.sleep(300) #每5分钟在后台检测一次来确保源目录是否被更改\n","sub_path":"Auto-Sync.py","file_name":"Auto-Sync.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"243855545","text":"import numpy as np\n\ndef getEntropyFromHC(HC_T):\n Temps = HC_T[:, 0]\n specific_heat = HC_T[:, 1] \n \n Entropy_array=[]\n \n Entropy=np.log(2)\n Entropy_array.append(Entropy)\n \n for T in range(len(Temps)-1):\n dEntropy=0\n for t in range(T, 0 , -1):\n dT = Temps[t-1] - Temps[t] \n dEntropy+= dT * specific_heat[t]/Temps[t]\n Entropy= ( np.log(2) - dEntropy ) #+ 1./Temps[0] * E0\n Entropy_array.append(Entropy)\n return Entropy_array\n","sub_path":"Applications/getEntropyFromHC.py","file_name":"getEntropyFromHC.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"539228697","text":"\"\"\"\n Created by Amirk on 2018-07-21.\n\"\"\"\nfrom app.libs.error import APIExcption\n\n\nclass Success(APIExcption):\n code = 201\n error_code = 0\n msg = 'ok'\n\n\nclass DeleteSuccess(Success):\n code = 202\n error_code = 1\n\n\nclass ServerError(APIExcption):\n code = 500\n error_code = 1000\n msg = \"sorry, we make server unknown error ‘(*>﹏<*)′ \"\n\n\nclass ClientTypeError(APIExcption):\n code = 400\n error_code = 1006\n msg = 'client is invalid'\n\n\nclass ParameterException(APIExcption):\n code = 400\n error_code = 1001\n\n\nclass NotFound(APIExcption):\n code = 404\n error_code = 1001\n msg = \"The resource is not found \"\n\n\nclass AuthFailed(APIExcption):\n code = 401\n error_code = 1005\n msg = 'authorization failed'\n\n\nclass Forbidden(APIExcption):\n code = 403\n error_code = 1004\n msg = 'Forbidden permissions'\n","sub_path":"genger/app/libs/error_code.py","file_name":"error_code.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"641525971","text":"#DSA-Tryout\n\nimport sys\n\nsys.setrecursionlimit(10000) #This is to overcome default python recursion limit\n\ndef fibonacci(num):\n if num == 0:\n return 0\n elif num == 1:\n return 1\n \n if num in memo:\n return memo[num]\n else:\n value = fibonacci(num - 1) + fibonacci(num - 2)\n memo[num] = value\n return value\n\nmemo={} #global dictionary to store the fibonacci number already computed\nprint(\"Fibonacci number:\", fibonacci(55))\n","sub_path":"Data-Structures-and-Algorithms/Day-6/Exercises/Exercise-26.py","file_name":"Exercise-26.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"300996738","text":"import socket\nimport threading\nimport json\nimport queue\nimport time\n\ndef server(q):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect(connect)\n test = {\n \"type\": \"search\",\n \"name\": \"room\"\n }\n write(s, test)\n result = read(s)\n if not result['msg']:\n test = {\n 'type': 'create',\n 'name': 'room',\n 'address': address['server']\n }\n write(s, test)\n read(s)\n q.put(read(s))\n s.close()\n\ndef client(q):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect(connect)\n test = {\n \"type\": \"search\",\n \"name\": \"room\"\n }\n write(s, test)\n result = read(s)\n if result['msg']:\n test = {\n 'type': 'join',\n 'name': 'room',\n 'address': address['client']\n }\n write(s, test)\n q.put(read(s))\n s.close()\n\ndef error (q): \n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect(connect)\n msg = {\"wha\": \"ha ha\"}\n write(s, msg)\n read(s)\n\n\ndef write(s, data):\n msg = json.dumps(data)\n print(msg)\n s.send(msg.encode())\n\ndef read(s):\n msg = s.recv(1024)\n print(msg.decode())\n return json.loads(msg.decode())\n\naddress = {\n 'server': {'lan': '192.168.1.9', 'port':8888},\n 'client': {'lan': '192.168.1.37', 'port': 54312}\n}\nconnect = ('127.0.0.1', 8122)\n\nq = queue.Queue()\nthreading.Thread(target=error, args=(q,)).start()\ntime.sleep(1)\ncmsg = q.get()\nsmsg = q.get()\nprint(cmsg['address']['port'] == address['server']['port'])\nprint(smsg['address']['port'] == address['client']['port'])\n\n","sub_path":"server/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"601676013","text":"import importlib\nimport logging\nimport os\nimport traceback\n\nfrom apps import zero_app\nfrom ui import Printer, Menu\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass ListWithMetadata(list):\n ordering_alias = None\n\n\nclass AppManager(object):\n subdir_menus = {}\n \"\"\" Example of subdir_menus:\n {'apps/network_apps': , \n ...\n 'apps/system_apps': }\n \"\"\"\n app_list = {}\n \"\"\"Example of app_list:\n {'apps/network_apps/wpa_cli': , \n 'apps/system_apps/system': , \n ...\n 'apps/network_apps/network': }\n \"\"\"\n\n def __init__(self, app_directory, i, o):\n self.app_directory = app_directory\n self.i = i\n self.o = o\n\n def load_all_apps(self):\n base_menu = Menu([], self.i, self.o, \"Main app menu\",\n exitable=False) # Main menu for all applications.\n base_menu.exit_entry = [\"Exit\", \"exit\"]\n base_menu.process_contents()\n self.subdir_menus[self.app_directory] = base_menu\n for path, subdirs, modules in app_walk(self.app_directory):\n for subdir in subdirs:\n # First, we create subdir menus (not yet linking because they're not created in correct order) and put them in subdir_menus.\n subdir_path = os.path.join(path, subdir)\n self.subdir_menus[subdir_path] = Menu([], self.i, self.o, subdir_path)\n for _module in modules:\n # Then, we load modules and store them along with their paths\n try:\n module_path = os.path.join(path, _module)\n app = self.load_app(module_path)\n logger.info(\"Loaded app {}\".format(module_path))\n self.app_list[module_path] = app\n except Exception as e:\n logger.error(\"Failed to load app {}\".format(module_path))\n logger.error(traceback.format_exc())\n Printer([\"Failed to load\", os.path.split(module_path)[1]], self.i, self.o, 2)\n for subdir_path in self.subdir_menus:\n # Now it's time to link menus to parent menus\n if subdir_path == self.app_directory:\n continue\n parent_path = os.path.split(subdir_path)[0]\n ordering = self.get_ordering(parent_path)\n parent_menu = self.subdir_menus[parent_path]\n subdir_menu = self.subdir_menus[subdir_path]\n subdir_menu_name = self.get_subdir_menu_name(subdir_path)\n # Inserting by the ordering given\n parent_menu_contents = self.insert_by_ordering([subdir_menu_name, subdir_menu.activate],\n os.path.split(subdir_path)[1], parent_menu.contents,\n ordering)\n parent_menu.set_contents(parent_menu_contents)\n for app_path in self.app_list:\n # Last thing is attaching applications to the menu structure created.\n app = self.app_list[app_path]\n subdir_path, app_dirname = os.path.split(app_path)\n ordering = self.get_ordering(subdir_path)\n menu_name = app.menu_name if hasattr(app, \"menu_name\") else app_dirname.capitalize()\n self.bind_callback(app, app_path, menu_name, ordering, subdir_path)\n return base_menu\n\n def bind_callback(self, app, app_path, menu_name, ordering, subdir_path):\n if hasattr(app, \"callback\") and callable(app.callback): # for function based apps\n subdir_menu = self.subdir_menus[subdir_path]\n subdir_menu_contents = self.insert_by_ordering([menu_name, app.callback], os.path.split(app_path)[1],\n subdir_menu.contents, ordering)\n subdir_menu.set_contents(subdir_menu_contents)\n return True\n if hasattr(app, \"on_start\") and callable(app.on_start): # for class based apps\n subdir_menu = self.subdir_menus[subdir_path]\n subdir_menu_contents = self.insert_by_ordering([menu_name, app.on_start], os.path.split(app_path)[1],\n subdir_menu.contents, ordering)\n subdir_menu.set_contents(subdir_menu_contents)\n else:\n logger.debug(\"App \\\"{}\\\" has no callback; loading silently\".format(menu_name))\n\n def load_app(self, app_path):\n app_import_path = app_path.replace('/', '.')\n # If user runs in single-app mode and by accident\n # autocompletes the app name too far, it shouldn't fail\n main_py_string = \".main.py\"\n if app_import_path.endswith(main_py_string):\n app_import_path = app_import_path[:-len(main_py_string)]\n app = importlib.import_module(app_import_path + '.main', package='apps')\n if is_class_based_module(app):\n zero_app_subclass = get_zeroapp_class_in_module(app)\n app = zero_app_subclass(self.i, self.o)\n else:\n app.init_app(self.i, self.o)\n return app\n\n def get_subdir_menu_name(self, subdir_path):\n \"\"\"This function gets a subdirectory path and imports __init__.py from it. It then gets _menu_name attribute from __init__.py and returns it. \n If failed to either import __init__.py or get the _menu_name attribute, it returns the subdirectory name.\"\"\"\n subdir_import_path = subdir_path.replace('/', '.')\n try:\n subdir_object = importlib.import_module(subdir_import_path + '.__init__')\n return subdir_object._menu_name\n except Exception as e:\n logger.error(\"Exception while loading __init__.py for subdir {}\".format(subdir_path))\n logger.error(e)\n return os.path.split(subdir_path)[1].capitalize()\n\n def get_ordering(self, path, cache=None):\n \"\"\"This function gets a subdirectory path and imports __init__.py from it. It then gets _ordering attribute from __init__.py and returns it. It also caches the attribute for faster initialization.\n If failed to either import __init__.py or get the _ordering attribute, it returns an empty list.\"\"\"\n if cache is None:\n cache = {}\n if path in cache:\n return cache[path]\n import_path = path.replace('/', '.')\n ordering = []\n try:\n imported_module = importlib.import_module(import_path + '.__init__')\n ordering = imported_module._ordering\n logger.debug(\"Found ordering for {} directory!\".format(import_path))\n except ImportError as e:\n logger.error(\"Exception while loading __init__.py for directory {}\".format(path))\n logger.debug(e)\n except AttributeError as e:\n pass\n finally:\n cache[path] = ordering\n return ordering\n\n def insert_by_ordering(self, to_insert, alias, l, ordering):\n if alias in ordering:\n # HAAAAAAAAAAAAAAXXXXXXXXXX\n to_insert = ListWithMetadata(to_insert)\n # Marking the object we're inserting with its alias\n # so that we won't mix up ordering of elements later\n to_insert.ordering_alias = alias\n if not l: # No conditions to check\n l.append(to_insert)\n return l\n for e in l:\n if hasattr(e, \"ordering_alias\"):\n if ordering.index(e.ordering_alias) > ordering.index(alias):\n l.insert(l.index(e), to_insert)\n return l\n else:\n pass # going to next element\n else:\n l.insert(l.index(e), to_insert)\n return l\n l.append(to_insert)\n return l # Catch-all\n\n\ndef app_walk(base_dir):\n \"\"\"Example of app_walk(directory): \n [('./apps', ['ee_apps', 'media_apps', 'test', 'system_apps', 'skeleton', 'network_apps'], ['__init__.pyc', '__init__.py']),\n ('./apps/ee_apps', ['i2ctools'], ['__init__.pyc', '__init__.py']),\n ('./apps/ee_apps/i2ctools', [], ['__init__.pyc', '__init__.py', 'main.pyc', 'main.py']),\n ('./apps/media_apps', ['mocp', 'volume'], ['__init__.pyc', '__init__.py']),\n ('./apps/media_apps/mocp', [], ['__init__.pyc', '__init__.py', 'main.pyc', 'main.py']),\n ('./apps/media_apps/volume', [], ['__init__.pyc', '__init__.py', 'main.pyc', 'main.py'])]\n \"\"\"\n walk_results = []\n modules = []\n subdirs = []\n for element in os.listdir(base_dir):\n full_path = os.path.join(base_dir, element)\n if os.path.isdir(full_path):\n if is_subdir(full_path):\n subdirs.append(element)\n results = app_walk(full_path)\n for result in results:\n walk_results.append(result)\n elif is_module_dir(full_path):\n modules.append(element)\n walk_results.append((base_dir, subdirs, modules))\n return walk_results\n\n\ndef get_zeroapp_class_in_module(module_):\n if 'init_app' in dir(module_):\n return None\n module_content = [item for item in dir(module_) if not item.startswith('__')]\n for item in module_content:\n class_ = getattr(module_, item)\n try:\n if issubclass(class_, zero_app.ZeroApp):\n return class_\n except Exception as e:\n pass # todo : check why isinstance(class_, ClassType)==False in python2\n return None\n\n\ndef is_class_based_module(module_):\n return get_zeroapp_class_in_module(module_) is not None\n\n\ndef is_module_dir(dir_path):\n contents = os.listdir(dir_path)\n return \"main.py\" in contents and \"do_not_load\" not in contents\n\n\ndef is_subdir(dir_path):\n contents = os.listdir(dir_path)\n return \"__init__.py\" in contents and \"main.py\" not in contents and \"do_not_load\" not in contents\n","sub_path":"apps/app_manager.py","file_name":"app_manager.py","file_ext":"py","file_size_in_byte":10247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"385984134","text":"import os\nimport pandas\nimport re\nimport json\nimport shutil\n\n\"\"\"\nitems = os.listdir(\"../data/all_csv/\")\n\npattern_left = r'[0-9]+(?= \\-)'\npattern_right = r'(?<=\\- )[0-9]+'\n#newlist = []\nfor name in items:\n\tname = \"../data/all_csv/{}\".format(name)\n\tif name.endswith('csv'):\n\t\tdata = pandas.read_csv(name, '#')\n\t\trow = data.iloc[1]\n\t\tfor k in row.keys():\n\t\t\tif isinstance(row[k], str) and re.match(r'[0-9]+ \\- [0-9]+', row[k]):\n\t\t\t\tif \"score\" in k:\n\t\t\t\t\tcandidates_left = []\n\t\t\t\t\tcandidates_right = []\n\t\t\t\t\tfor i in range(len(data)):\n\t\t\t\t\t\tgroups = re.search(pattern_left, data.iloc[i][k]):\n\t\t\t\t\t\tcandidates_left.append(re.search(pattern_left, data.iloc[i][k]).group())\t\t\t\t\t\t\n\t\t\t\t\t\tcandidates_right.append(re.search(pattern_right, data.iloc[i][k]).group())\n\t\t\t\t\tdata.rename(columns = {k: 'aggregate ' + k}, inplace = True)\n\t\t\t\t\tdata[k + \" (left)\"] = candidates_left\n\t\t\t\t\tdata[k + \" (right)\"] = candidates_right\n\t\t\t\t\tprint data\n\t\t\t\telif k == \"result\":\n\t\t\t\t\tcandidates_left = []\n\t\t\t\t\tcandidates_right = []\n\t\t\t\t\tfor i in range(len(data)):\n\t\t\t\t\t\tcandidates_left.append(re.search(pattern_left, data.iloc[i][k]).group())\t\t\t\t\t\t\n\t\t\t\t\t\tcandidates_right.append(re.search(pattern_right, data.iloc[i][k]).group())\n\t\t\t\t\tdata.rename(columns = {'result': 'aggregate result'}, inplace = True)\n\t\t\t\t\tdata[k + \" (left)\"] = candidates_left\n\t\t\t\t\tdata[k + \" (right)\"] = candidates_right\n\t\t\t\t\tprint data\n\t\t\t\telif k in [\"height\", \"record\"]:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tprint k\n\t\t\t\t\tprint row[k]\n\t\t\t\t\tprint\n\nwith open('../data/complex_ids.json') as f:\n\tcomplex_id = set(json.load(f))\n\nwith open('/tmp/clean_files.txt') as f:\n\tfiles = map(lambda x:x.strip(), f.readlines())\n\ncleaned_complex = set(files) & set(complex_id)\n\nwith open('../data/cleaned_complex_ids.json', 'w') as f:\n\tjson.dump(list(cleaned_complex), f)\n\"\"\"\nwith open('../READY/r1_training_all.json') as f:\n\tfiles_1 = json.load(f)\nwith open('../READY/r2_training_all.json') as f:\n\tfiles_2 = json.load(f)\nfiles_1.update(files_2)\n\nfiles = set(files_1)\n\nitems = os.listdir(\"../data/all_csv/\")\nfor name in items:\n\tif name.endswith('csv'):\n\t\tif name not in files:\n\t\t\tfull_name = \"../data/all_csv/{}\".format(name)\n\t\t\ttrash_name = \"../data/trash_csv/{}\".format(name)\n\t\t\tshutil.move(full_name, trash_name)\n","sub_path":"code/show_columns.py","file_name":"show_columns.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"8272924","text":"# coding: utf8\nfrom __future__ import unicode_literals\n\nfrom spacy.util import get_lang_class\nimport pytest\n\n\n@pytest.mark.parametrize(\"text\", [\"-0.23\", \"+123,456\", \"±1\"])\n@pytest.mark.parametrize(\"lang\", [\"en\", \"xx\"])\ndef test_issue2782(text, lang):\n \"\"\"Check that like_num handles + and - before number.\"\"\"\n cls = get_lang_class(lang)\n nlp = cls()\n doc = nlp(text)\n assert len(doc) == 1\n assert doc[0].like_num\n","sub_path":"python/spaCy/2018/12/test_issue2782.py","file_name":"test_issue2782.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"599910103","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/vehiclePreview20/vehicle_preview_buying_panel.py\nimport time\nfrom collections import namedtuple\nimport BigWorld\nfrom CurrentVehicle import g_currentPreviewVehicle\nfrom adisp import process\nfrom constants import RentType, GameSeasonType\nfrom gui import DialogsInterface\nfrom gui.ClientUpdateManager import g_clientUpdateManager\nfrom gui.Scaleform.daapi.settings.views import VIEW_ALIAS\nfrom gui.Scaleform.daapi.view.dialogs import I18nConfirmDialogMeta, DIALOG_BUTTON_ID\nfrom gui.Scaleform.daapi.view.lobby.store.browser.ingameshop_helpers import isIngameShopEnabled\nfrom gui.Scaleform.daapi.view.lobby.techtree.techtree_dp import g_techTreeDP\nfrom gui.Scaleform.daapi.view.lobby.vehiclePreview20.vehicle_preview_dp import DefaultVehPreviewDataProvider\nfrom gui.Scaleform.daapi.view.meta.VehiclePreviewBuyingPanelMeta import VehiclePreviewBuyingPanelMeta\nfrom gui.Scaleform.locale.MENU import MENU\nfrom gui.Scaleform.locale.RES_ICONS import RES_ICONS\nfrom gui.Scaleform.locale.TOOLTIPS import TOOLTIPS\nfrom gui.Scaleform.locale.VEHICLE_PREVIEW import VEHICLE_PREVIEW\nfrom gui.app_loader import g_appLoader\nfrom gui.game_control.wallet import WalletController\nfrom gui.hangar_cameras.hangar_camera_common import CameraRelatedEvents\nfrom gui.impl import backport\nfrom gui.impl.gen import R\nfrom gui.ingame_shop import canBuyGoldForVehicleThroughWeb, showBuyVehicleOverlay, showBuyGoldForBundle\nfrom gui.referral_program import showGetVehiclePage\nfrom gui.shared import event_dispatcher, g_eventBus\nfrom gui.shared import events, EVENT_BUS_SCOPE\nfrom gui.shared.event_dispatcher import showVehicleRentDialog\nfrom gui.shared.events import HasCtxEvent\nfrom gui.shared.formatters import icons, text_styles, formatPrice\nfrom gui.shared.economics import getPriceTypeAndValue\nfrom gui.shared.formatters import getItemPricesVO, getItemUnlockPricesVO, chooseItemPriceVO\nfrom gui.shared.tooltips.formatters import getActionPriceData\nfrom gui.shared.gui_items.items_actions import factory\nfrom gui.shared.gui_items.gui_item_economics import ItemPrice\nfrom gui.shared.money import Currency, MONEY_UNDEFINED\nfrom gui.shared.utils.functions import makeTooltip\nfrom shared_utils import findFirst, first\nfrom helpers import dependency\nfrom helpers import time_utils\nfrom helpers.i18n import makeString as _ms\nfrom items_kit_helper import lookupItem, BOX_TYPE, showItemTooltip\nfrom items_kit_helper import OFFER_CHANGED_EVENT, getActiveOffer, mayObtainForMoney, mayObtainWithMoneyExchange\nfrom skeletons.gui.game_control import IVehicleComparisonBasket\nfrom skeletons.gui.game_control import ITradeInController, IRestoreController, IHeroTankController\nfrom skeletons.gui.goodies import IGoodiesCache\nfrom skeletons.gui.lobby_context import ILobbyContext\nfrom skeletons.gui.shared import IItemsCache\nfrom web_client_api.common import ItemPackTypeGroup, ItemPackEntry\n_ButtonState = namedtuple('_ButtonState', ('enabled', 'itemPrice', 'label', 'isAction', 'actionTooltip', 'tooltip', 'title', 'isMoneyEnough', 'isUnlock', 'isPrevItemsUnlock'))\n\ndef _buildBuyButtonTooltip(key):\n return makeTooltip(TOOLTIPS.vehiclepreview_buybutton_all(key, 'header'), TOOLTIPS.vehiclepreview_buybutton_all(key, 'body'))\n\n\nclass VehiclePreviewBuyingPanel(VehiclePreviewBuyingPanelMeta):\n _itemsCache = dependency.descriptor(IItemsCache)\n _goodiesCache = dependency.descriptor(IGoodiesCache)\n _comparisonBasket = dependency.descriptor(IVehicleComparisonBasket)\n _tradeIn = dependency.descriptor(ITradeInController)\n _restores = dependency.descriptor(IRestoreController)\n _heroTanks = dependency.descriptor(IHeroTankController)\n _lobbyContext = dependency.descriptor(ILobbyContext)\n\n def __init__(self, skipConfirm=False):\n super(VehiclePreviewBuyingPanel, self).__init__()\n heroTankCD = self._heroTanks.getCurrentTankCD()\n self._vehicleCD = g_currentPreviewVehicle.item.intCD\n self._vehicleLevel = g_currentPreviewVehicle.item.level\n self._actionType = None\n self._skipConfirm = skipConfirm\n self._disableBuyButton = False\n self.__previewDP = DefaultVehPreviewDataProvider()\n self.__isHeroTank = heroTankCD and heroTankCD == self._vehicleCD\n self.__price = None\n self.__title = None\n self.__description = None\n self.__items = None\n self.__offers = None\n self.__currentOffer = None\n self.__styleByGroup = {}\n self.__vehicleByGroup = {}\n self.__endTime = None\n self.__oldPrice = MONEY_UNDEFINED\n self.__buyParams = None\n self.__backAlias = None\n self.__timeCallbackID = None\n self.__timeLeftIcon = icons.makeImageTag(RES_ICONS.MAPS_ICONS_LIBRARY_TIME_ICON, 16, 16)\n self.__cachedVehiclesVOs = None\n self.__cachedItemsVOs = None\n self.__cachedCollapsedItemsVOs = None\n g_techTreeDP.load()\n return\n\n def onBuyOrResearchClick(self):\n vehicle = g_currentPreviewVehicle.item\n if self.__items is not None:\n self.__purchasePackage()\n elif self.__offers is not None:\n self.__purchaseOffer()\n elif canBuyGoldForVehicleThroughWeb(vehicle):\n self.__purchaseSingleVehicle(vehicle)\n elif self.__isHeroTank:\n self.__purchaseHeroTank()\n else:\n self.__research()\n return\n\n def setTimerData(self, endTime, oldPrice):\n self.__oldPrice = oldPrice\n if endTime is not None:\n self.__endTime = endTime\n self.__onLeftTimeUpdated()\n self.__updateBtnState()\n return\n\n def setBuyParams(self, buyParams):\n self.__buyParams = buyParams\n\n def setBackAlias(self, backAlias):\n self.__backAlias = backAlias\n\n def setPackItems(self, packItems, price, title):\n self.__title = title if title is not None else ''\n self.__price = price\n self.__items = packItems\n self.__styleByGroup.clear()\n self.__vehicleByGroup.clear()\n vehiclesItems, items = self.__previewDP.separateItemsPack(self.__items)\n for item in items:\n if item.type in ItemPackTypeGroup.STYLE and item.groupID not in self.__styleByGroup:\n self.__styleByGroup[item.groupID] = item.id\n\n for vehicleItem in vehiclesItems:\n self.__vehicleByGroup[vehicleItem.id] = vehicleItem.groupID\n\n vehiclesVOs, itemsVOs, collapseItemsVOs = self.__previewDP.getItemsPackData(g_currentPreviewVehicle.item, items, vehiclesItems)\n self.__cachedVehiclesVOs = vehiclesVOs\n self.__cachedItemsVOs = itemsVOs\n self.__cachedCollapsedItemsVOs = collapseItemsVOs\n self.__update()\n return\n\n def onCarouselVehilceSelected(self, intCD):\n self._vehicleCD = intCD\n g_currentPreviewVehicle.selectVehicle(intCD)\n\n def setOffers(self, offers, title, description):\n self.__offers = offers\n self.__title = title\n self.__description = description\n selectedID = getActiveOffer(self.__offers).id\n offersData = self.__previewDP.getOffersData(self.__offers, selectedID) if len(self.__offers) > 1 else []\n if offersData:\n self.as_setOffersDataS(offersData)\n self.onOfferSelected(selectedID)\n\n def onOfferSelected(self, offerID):\n self.__currentOffer = findFirst(lambda o: o.id == offerID, self.__offers)\n if self.__currentOffer:\n vehicle = g_currentPreviewVehicle.item\n crew = self.__currentOffer.crew\n g_eventBus.handleEvent(HasCtxEvent(ctx={'vehicleItems': [ItemPackEntry(id=vehicle.intCD, groupID=crew.groupID)],\n 'crewItems': [crew]}, eventType=OFFER_CHANGED_EVENT))\n self.__buyParams = self.__currentOffer.buyParams\n self.__price = self.__currentOffer.buyPrice\n self.as_setBuyDataS(self.__previewDP.getOffersBuyingPanelData(self.__getBtnData()))\n description = self.__description or self.__getCurrentOfferDescription() or {}\n self.as_setSetTitleTooltipS(makeTooltip(**description))\n\n def showTooltip(self, intCD, itemType):\n toolTipMgr = g_appLoader.getApp().getToolTipMgr()\n if itemType == BOX_TYPE:\n toolTipMgr.onCreateComplexTooltip(makeTooltip(TOOLTIPS.VEHICLEPREVIEW_BOXTOOLTIP_HEADER, TOOLTIPS.VEHICLEPREVIEW_BOXTOOLTIP_BODY), 'INFO')\n return\n try:\n try:\n itemId = int(intCD)\n except ValueError:\n itemId = intCD\n\n rawItem = [ item for item in self.__items if item.id == itemId and item.type == itemType ][0]\n item = lookupItem(rawItem, self._itemsCache, self._goodiesCache)\n showItemTooltip(toolTipMgr, rawItem, item)\n except IndexError:\n return\n\n def updateData(self, useCompactData):\n self.__update(collapseItems=useCompactData)\n\n def _populate(self):\n super(VehiclePreviewBuyingPanel, self)._populate()\n g_clientUpdateManager.addMoneyCallback(self.__updateBtnState)\n g_clientUpdateManager.addCallbacks({'stats.freeXP': self.__updateBtnState,\n 'inventory': self.__updateBtnState,\n 'serverSettings.blueprints_config': self.__onBlueprintsModeChanged})\n g_currentPreviewVehicle.onVehicleUnlocked += self.__updateBtnState\n g_currentPreviewVehicle.onChanged += self.__onVehicleChanged\n self._restores.onRestoreChangeNotify += self.__onRestoreChanged\n self._lobbyContext.getServerSettings().onServerSettingsChange += self.__onServerSettingsChanged\n self.addListener(CameraRelatedEvents.VEHICLE_LOADING, self.__onVehicleLoading, EVENT_BUS_SCOPE.DEFAULT)\n\n def _dispose(self):\n g_clientUpdateManager.removeObjectCallbacks(self)\n g_currentPreviewVehicle.onVehicleUnlocked -= self.__updateBtnState\n g_currentPreviewVehicle.onChanged -= self.__onVehicleChanged\n self._restores.onRestoreChangeNotify -= self.__onRestoreChanged\n self._lobbyContext.getServerSettings().onServerSettingsChange -= self.__onServerSettingsChanged\n self.removeListener(CameraRelatedEvents.VEHICLE_LOADING, self.__onVehicleLoading, EVENT_BUS_SCOPE.DEFAULT)\n self.__stopTimer()\n self.__styleByGroup.clear()\n self.__vehicleByGroup.clear()\n super(VehiclePreviewBuyingPanel, self)._dispose()\n\n def __update(self, collapseItems=False):\n if self.__cachedVehiclesVOs:\n g_currentPreviewVehicle.selectVehicle(self.__cachedVehiclesVOs[0]['intCD'])\n self.as_setSetVehiclesDataS({'vehicles': self.__cachedVehiclesVOs})\n if collapseItems and self.__cachedCollapsedItemsVOs:\n self.as_setSetItemsDataS({'items': self.__cachedCollapsedItemsVOs})\n elif self.__cachedItemsVOs:\n self.as_setSetItemsDataS({'items': self.__cachedItemsVOs})\n self.__updateBtnState()\n\n def __getOfferByID(self, offerID):\n return findFirst(lambda o: o.buy_params['transactionID'] == offerID, self.__offers)\n\n def __isReferralWindow(self):\n return self.__backAlias == VIEW_ALIAS.REFERRAL_PROGRAM_WINDOW\n\n def __getConfirmationDialogKey(self):\n key = 'buyConfirmation'\n if self.__isReferralWindow():\n key = 'referralReward'\n return key\n\n def __buyRequestConfirmation(self, key='buyConfirmation'):\n return DialogsInterface.showDialog(meta=I18nConfirmDialogMeta(key=key, messageCtx={'product': self.__title or '\"This Pack\"',\n 'price': formatPrice(self.__price, reverse=True, useIcon=True)}, focusedID=DIALOG_BUTTON_ID.SUBMIT))\n\n def __onVehicleLoading(self, ctxEvent):\n vehicle = g_currentPreviewVehicle.item\n if vehicle is None:\n return\n else:\n groupID = self.__vehicleByGroup.get(vehicle.intCD)\n if not ctxEvent.ctx.get('started') and groupID in self.__styleByGroup:\n customizationStyle = self.__styleByGroup[groupID]\n style = self._itemsCache.items.getItemByCD(customizationStyle)\n if style is not None and not style.isRentable:\n g_currentPreviewVehicle.previewStyle(style)\n return\n\n def __updateBtnState(self, *args):\n item = g_currentPreviewVehicle.item\n if item is None:\n return\n else:\n btnData = self.__getBtnData()\n self._actionType = self.__previewDP.getBuyType(item)\n if self.__items:\n buyingPanelData = self.__previewDP.getItemPackBuyingPanelData(item, btnData, self.__items)\n elif self.__offers:\n buyingPanelData = self.__previewDP.getOffersBuyingPanelData(btnData)\n else:\n buyingPanelData = self.__previewDP.getBuyingPanelData(item, btnData, self.__isHeroTank)\n buyingPanelData.update({'isReferralEnabled': self.__isReferralWindow()})\n self.as_setBuyDataS(buyingPanelData)\n return\n\n def __onVehicleChanged(self, *args):\n if g_currentPreviewVehicle.isPresent():\n self._vehicleCD = g_currentPreviewVehicle.item.intCD\n if not self.__price:\n self.__updateBtnState()\n\n def __onRestoreChanged(self, vehicles):\n if g_currentPreviewVehicle.isPresent():\n if self._vehicleCD in vehicles:\n self.__updateBtnState()\n\n def __onServerSettingsChanged(self, diff):\n if self._lobbyContext.getServerSettings().isIngameDataChangedInDiff(diff, 'isEnabled'):\n self.__updateBtnState()\n\n def __onBlueprintsModeChanged(self, _):\n self.__updateBtnState()\n\n def __getBtnData(self):\n if self.__price is not None:\n return self.__getBtnDataPack()\n else:\n vehicle = g_currentPreviewVehicle.item\n return self.__getBtnDataUnlockedVehicle(vehicle) if vehicle.isUnlocked else self.__getBtnDataLockedVehicle(vehicle)\n\n def __getBtnDataPack(self):\n tooltip = ''\n actionTooltip = None\n currency = self.__price.getCurrency()\n if self._disableBuyButton:\n tooltip = _buildBuyButtonTooltip('endTime')\n enabled = False\n else:\n enabled = self.__walletAvailableForCurrency(currency) and (isIngameShopEnabled() if currency == Currency.GOLD else mayObtainForMoney(self.__price) or mayObtainWithMoneyExchange(self.__price))\n if self.__currentOffer and self.__currentOffer.bestOffer and self.__currentOffer.eventType:\n actionTooltip = self.__getBestOfferTooltipData(self.__currentOffer.eventType)\n if self.__isReferralWindow():\n buttonLabel = backport.text(R.strings.vehicle_preview.buyingPanel.buyBtn.label.obtain())\n elif self.__items:\n buttonLabel = backport.text(R.strings.vehicle_preview.buyingPanel.buyBtn.label.buyItemPack())\n elif self.__offers and self.__currentOffer:\n buttonLabel = backport.text(R.strings.vehicle_preview.buyingPanel.buyBtn.label.rent())\n self.__title = self.__getCurrentOfferTitle()\n else:\n buttonLabel = backport.text(R.strings.vehicle_preview.buyingPanel.buyBtn.label.buy())\n return _ButtonState(enabled=enabled, itemPrice=getItemPricesVO(ItemPrice(price=self.__price, defPrice=self.__oldPrice)), label=buttonLabel, isAction=self.__oldPrice.isDefined() or actionTooltip is not None, actionTooltip=actionTooltip, tooltip=tooltip, title=self.__title, isMoneyEnough=True, isUnlock=False, isPrevItemsUnlock=True)\n\n def __getBtnDataUnlockedVehicle(self, vehicle):\n money = self._itemsCache.items.stats.money\n money = self._tradeIn.addTradeInPriceIfNeeded(vehicle, money)\n notEnoughMoneyTooltip = ''\n actionTooltip = getActionPriceData(vehicle)\n exchangeRate = self._itemsCache.items.shop.exchangeRate\n priceType, price = getPriceTypeAndValue(vehicle, money, exchangeRate)\n itemPrice = chooseItemPriceVO(priceType, price)\n currency = price.getCurrency(byWeight=True)\n walletAvailable = self.__walletAvailableForCurrency(currency)\n isAction = False\n minRentPricePackage = vehicle.getRentPackage()\n if minRentPricePackage:\n isAction = minRentPricePackage['rentPrice'] != minRentPricePackage['defaultRentPrice']\n elif not vehicle.isRestoreAvailable():\n isAction = vehicle.buyPrices.getSum().isActionPrice()\n mayObtain = self.__isHeroTank or walletAvailable and vehicle.mayObtainWithMoneyExchange(money, exchangeRate)\n isBuyingAvailable = not vehicle.isHidden or vehicle.isRentable or vehicle.isRestorePossible()\n isMoneyEnough = True\n if walletAvailable:\n if currency == Currency.GOLD:\n if not mayObtain:\n if isBuyingAvailable:\n notEnoughMoneyTooltip = _buildBuyButtonTooltip('notEnoughGold')\n isMoneyEnough = False\n if isIngameShopEnabled():\n mayObtain = True\n elif not mayObtain and isBuyingAvailable:\n notEnoughMoneyTooltip = _buildBuyButtonTooltip('notEnoughCredits')\n isMoneyEnough = False\n if self._disableBuyButton:\n mayObtain = False\n isMoneyEnough = False\n return _ButtonState(enabled=mayObtain, itemPrice=itemPrice, label=backport.text(R.strings.vehicle_preview.buyingPanel.buyBtn.label.restore()) if vehicle.isRestorePossible() else backport.text(R.strings.vehicle_preview.buyingPanel.buyBtn.label.buy()), isAction=isAction, actionTooltip=actionTooltip, tooltip=notEnoughMoneyTooltip, title=self.__title, isMoneyEnough=isMoneyEnough, isUnlock=False, isPrevItemsUnlock=True)\n\n def __getBtnDataLockedVehicle(self, vehicle):\n stats = self._itemsCache.items.stats\n tooltip = ''\n nodeCD = vehicle.intCD\n _, isXpEnough = g_techTreeDP.isVehicleAvailableToUnlock(nodeCD, self._vehicleLevel)\n unlocks = self._itemsCache.items.stats.unlocks\n isNext2Unlock, unlockProps = g_techTreeDP.isNext2Unlock(nodeCD, unlocked=set(unlocks), xps=stats.vehiclesXPs, freeXP=stats.freeXP, level=self._vehicleLevel)\n isAvailableToUnlock = isXpEnough and isNext2Unlock\n if not isAvailableToUnlock:\n if not isXpEnough:\n tooltip = _buildBuyButtonTooltip('notEnoughXp')\n elif any((bool(cd in unlocks) for cd in g_techTreeDP.getTopLevel(nodeCD))):\n tooltip = _buildBuyButtonTooltip('parentModuleIsLocked')\n else:\n tooltip = _buildBuyButtonTooltip('parentVehicleIsLocked')\n return _ButtonState(enabled=isAvailableToUnlock, itemPrice=getItemUnlockPricesVO(unlockProps), label=backport.text(R.strings.vehicle_preview.buyingPanel.buyBtn.label.research()), isAction=unlockProps.discount > 0, actionTooltip=None, tooltip=tooltip, title=self.__title, isMoneyEnough=isXpEnough, isUnlock=True, isPrevItemsUnlock=isNext2Unlock)\n\n def __getBestOfferTooltipData(self, eventType=None):\n return VEHICLE_PREVIEW.BUYINGPANEL_OFFER_RENT_FRONTLINE_TOOLTIP_BEST_OFFER if eventType == 'frontline' else None\n\n def __getCurrentOfferTitle(self):\n if self.__offers and self.__currentOffer:\n if self.__currentOffer.eventType == 'frontline':\n firstRent = first(self.__currentOffer.rent)\n if len(self.__offers) > 1 or firstRent and firstRent.get('season') is not None:\n return _ms(backport.text(R.strings.vehicle_preview.buyingPanel.offer.rent.title.frontline.ordinal()))\n return _ms(backport.text(R.strings.vehicle_preview.buyingPanel.offer.rent.title.frontline.single_cycle()), cycles=self.__currentOffer.name)\n return self.__title\n\n def __getCurrentOfferDescription(self):\n return {'header': backport.text(R.strings.vehicle_preview.buyingPanel.offer.rent.frontline.description.header()),\n 'body': backport.text(R.strings.vehicle_preview.buyingPanel.offer.rent.frontline.description.body.credits())} if self.__currentOffer and self.__currentOffer.eventType == 'frontline' else None\n\n def __startTimer(self, interval):\n self.__stopTimer()\n self.__timeCallbackID = BigWorld.callback(interval, self.__onLeftTimeUpdated)\n\n def __stopTimer(self):\n if self.__timeCallbackID is not None:\n BigWorld.cancelCallback(self.__timeCallbackID)\n self.__timeCallbackID = None\n return\n\n def __setUsageLeftTime(self, leftTime):\n self.as_updateLeftTimeS(formattedTime='{} {}'.format(self.__timeLeftIcon, text_styles.tutorial(time_utils.getTillTimeString(leftTime, MENU.VEHICLEPREVIEW_TIMELEFT))), hasHoursAndMinutes=True)\n\n def __setShortLeftTime(self, leftTime):\n self.as_updateLeftTimeS(formattedTime='{} {}'.format(self.__timeLeftIcon, text_styles.tutorial(time_utils.getTillTimeString(leftTime, MENU.VEHICLEPREVIEW_TIMELEFTSHORT))), hasHoursAndMinutes=True)\n\n def __setDateLeftTime(self):\n gmTime = time_utils.getTimeStructInLocal(self.__endTime)\n monthName = _ms(MENU.datetime_months(gmTime.tm_mon))\n fmtValues = _ms('%s %s %s' % (gmTime.tm_mday, monthName, gmTime.tm_year))\n tooltip = makeTooltip(header=TOOLTIPS.VEHICLEPREVIEW_SHOPPACK_DATETIMETOOLTIP_HEADER, body=_ms(TOOLTIPS.VEHICLEPREVIEW_SHOPPACK_DATETIMETOOLTIP_BODY, namePack=text_styles.neutral(self.__title), date=fmtValues))\n self.as_setSetTitleTooltipS(tooltip)\n self.as_updateLeftTimeS(formattedTime='')\n\n def __timeOver(self):\n self.__endTime = None\n self._disableBuyButton = True\n formattedTime = '{} {}'.format(icons.makeImageTag(RES_ICONS.MAPS_ICONS_LIBRARY_ALERTICON2, vSpace=-2), text_styles.alert(MENU.VEHICLEPREVIEW_ENDTIME))\n self.as_updateLeftTimeS(formattedTime=formattedTime)\n self.__updateBtnState()\n return\n\n def __onLeftTimeUpdated(self):\n leftTime = self.__endTime - time_utils.getServerUTCTime()\n self.__timeCallbackID = None\n if leftTime < 0:\n self.__timeOver()\n elif leftTime > time_utils.ONE_DAY:\n self.__setDateLeftTime()\n self.__startTimer(leftTime - time_utils.ONE_DAY)\n else:\n gmTime = time.gmtime(leftTime)\n if gmTime.tm_min == 0:\n self.__setShortLeftTime(leftTime)\n else:\n self.__setUsageLeftTime(leftTime)\n self.__startTimer(gmTime.tm_sec + 1)\n return\n\n @process\n def __purchasePackage(self):\n if self.__items is not None:\n requestConfirmed = yield self.__buyRequestConfirmation(self.__getConfirmationDialogKey())\n if requestConfirmed:\n if self.__isReferralWindow():\n inventoryVehicle = self._itemsCache.items.getItemByCD(g_currentPreviewVehicle.item.intCD)\n showGetVehiclePage(inventoryVehicle, self.__buyParams)\n return\n goldPrice = self.__price.get(Currency.GOLD, 0)\n if goldPrice > self._itemsCache.items.stats.gold:\n showBuyGoldForBundle(goldPrice, self.__buyParams)\n else:\n showBuyVehicleOverlay(self.__buyParams)\n return\n\n def __purchaseOffer(self):\n rent = self.__currentOffer.rent\n cycles = [ r['cycle'] for r in rent if r.get('cycle') ]\n seasons = [ r['season'] for r in rent if r.get('season') ]\n showVehicleRentDialog(g_currentPreviewVehicle.item.intCD, RentType.SEASON_CYCLE_RENT if cycles else RentType.SEASON_RENT, cycles if cycles else seasons, GameSeasonType.EPIC if self.__currentOffer.eventType == 'frontline' else None, self.__currentOffer.buyPrice, self.__currentOffer.buyParams)\n return\n\n def __purchaseSingleVehicle(self, vehicle):\n event_dispatcher.showVehicleBuyDialog(vehicle)\n\n def __purchaseHeroTank(self):\n url = self._heroTanks.getCurrentRelatedURL()\n self.fireEvent(events.OpenLinkEvent(events.OpenLinkEvent.SPECIFIED, url=url))\n\n def __research(self):\n if self._actionType == factory.UNLOCK_ITEM:\n unlockProps = g_techTreeDP.getUnlockProps(self._vehicleCD, self._vehicleLevel)\n factory.doAction(factory.UNLOCK_ITEM, self._vehicleCD, unlockProps, skipConfirm=self._skipConfirm)\n else:\n factory.doAction(factory.BUY_VEHICLE, self._vehicleCD, False, None, VIEW_ALIAS.VEHICLE_PREVIEW_20, skipConfirm=self._skipConfirm)\n return\n\n def __walletAvailableForCurrency(self, currency):\n return self._itemsCache.items.stats.currencyStatuses.get(currency) == WalletController.STATUS.AVAILABLE\n","sub_path":"source/res/scripts/client/gui/Scaleform/daapi/view/lobby/vehiclePreview20/vehicle_preview_buying_panel.py","file_name":"vehicle_preview_buying_panel.py","file_ext":"py","file_size_in_byte":24830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"19817534","text":"import discord\nfrom discord.ext import commands\nimport json\n\n# Get configuration.json\nwith open(\"configuration.json\", \"r\") as config: \n\tdata = json.load(config)\n\ttoken = data[\"token\"]\n\tprefix = data[\"prefix\"]\n\n\nclass Greetings(commands.Cog):\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\t\tself._last_member = None\n\n# Intents\nintents = discord.Intents.default()\n\nbot = commands.Bot(prefix, intents = intents)\n\n# Load cogs\ninitial_extensions = [\n\t\"Cogs.onCommandError\",\n\t\"Cogs.help\",\n\t\"Cogs.ping\"\n]\n\nprint(initial_extensions)\n\nif __name__ == '__main__':\n\tfor extension in initial_extensions:\n\t\ttry:\n\t\t\tbot.load_extension(extension)\n\t\texcept Exception as e:\n\t\t\tprint(f\"Failed to load extension {extension}\")\n\n# @commands.Cog.listener()\n# async def on_member_join(self, member):\n@commands.Cog.listener()\nasync def on_message(self, message):\n\tif not(message.startswith(\"!mzk\")):\n\t\treturn\n\tmessage_cut = message[4:]\n\tusr_cmd = message_cut.lower()\n\n\tif usr_cmd == 'konnichiwa':\n\t\tmessage.channel.send('Doumo')\n\n@bot.event\nasync def on_ready():\n\tprint(f\"We have logged in as {bot.user}\")\n\tawait bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name =f\"{bot.command_prefix}help\"))\n\tprint(discord.__version__)\n\nbot.run(token)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"339750949","text":"\nfrom deepdiff import DeepDiff, extract\n\nimport re\nimport os\n\nimport yaml\n\nfrom desired_state.util import make_matcher\nfrom desired_state.rule import select_rules, select_rules_recursive, Action\nfrom desired_state.diff import deduplicate_rules, get_rule_action_subtree\n\nfrom pprint import pformat\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef load_rule(name):\n with open(os.path.join(HERE, 'rules', f'{name}.yml')) as f:\n return yaml.safe_load(f.read())\n\n\ndef load_state(name, version):\n with open(os.path.join(HERE, 'states', name, f'{version}.yml')) as f:\n return yaml.safe_load(f.read())\n\n\ndef run_diff_get_action(a, b, rules):\n\n diff = DeepDiff(a, b, ignore_order=True)\n matching_rules = select_rules_recursive(diff, rules['rules'], a, b)\n dedup_matching_rules = deduplicate_rules(matching_rules)\n assert len(dedup_matching_rules) >= 1, \"No rules found\"\n assert len(dedup_matching_rules) == 1, \"More than on rule found: \" + pformat(dedup_matching_rules)\n action, subtree = get_rule_action_subtree(dedup_matching_rules[0], a, b)\n return action, subtree\n\n\ndef run_diff_get_actions(a, b, rules):\n\n diff = DeepDiff(a, b, ignore_order=True)\n matching_rules = select_rules_recursive(diff, rules['rules'], a, b)\n dedup_matching_rules = deduplicate_rules(matching_rules)\n return [get_rule_action_subtree(x, a, b) for x in dedup_matching_rules]\n\ndef test_rule1():\n\n rule = yaml.safe_load(r'''\n rule_selector: root.routers.index\n inventory: all\n create:\n - tasks: create_tasks.yml\n retrieve:\n - tasks: get_tasks.yml\n update:\n - tasks: create_tasks.yml\n delete:\n - tasks: del_tasks.yml\n ''')\n\n\ndef test_rule2():\n\n rule = yaml.safe_load(r'''\n rule_selector: root.routers.index\n inventory_selector: node.name\n create:\n - role: create_role\n retrieve:\n - role: get_role\n update:\n - role: update_role\n delete:\n - role: delete_role\n ''')\n\n\ndef test_rules_change():\n\n t1 = yaml.safe_load('''\n routers:\n - name: R1\n - name: R2\n ''')\n\n t2 = yaml.safe_load('''\n routers:\n - name: R1\n - name: R3\n ''')\n\n rules = yaml.safe_load(r'''\n rules:\n - rule_selector: root.routers.index\n inventory_selector: node.name\n ''')\n\n\n actions = run_diff_get_actions(t1, t2, rules)\n\n assert len(actions) == 1\n assert actions[0][0] == Action.UPDATE\n assert actions[0][1] == {'name': 'R3'}\n\n\ndef test_rules_add():\n\n t1 = load_state('delete_value', 'B')\n t2 = load_state('delete_value', 'A')\n rules = load_rule('routers_simple')\n\n actions = run_diff_get_actions(t1, t2, rules)\n\n assert len(actions) == 1\n assert actions[0][0] == Action.UPDATE\n assert actions[0][1] == {'interfaces': [{'ip_address': '1.1.1.1', 'name': 'eth1'}], 'name': 'R1'}\n\n\ndef test_rules_delete():\n\n t1 = load_state('delete_value', 'A')\n t2 = load_state('delete_value', 'B')\n rules = load_rule('routers_simple')\n\n actions = run_diff_get_actions(t1, t2, rules)\n\n assert len(actions) == 1\n assert actions[0][0] == Action.UPDATE\n assert actions[0][1] == {'interfaces': [{'name': 'eth1'}], 'name': 'R1'}\n\n\ndef test_rules_rename():\n '''\n '''\n\n t1 = load_state('rename_item', 'A')\n t2 = load_state('rename_item', 'B')\n rules = load_rule('routers_simple')\n\n actions = run_diff_get_actions(t1, t2, rules)\n\n assert len(actions) == 1\n assert actions[0][0] == Action.UPDATE\n assert actions[0][1] == {'interfaces': [{'ip_address': '1.1.1.1', 'name': 'eth1'}], 'name': 'R2'}\n\n\ndef test_rules_list_insert_element():\n '''\n This case tests insertion into a list. It should cause one add\n instead of multiple changes.\n '''\n\n t1 = load_state('add_list_value', 'B')\n t2 = load_state('add_list_value', 'A')\n rules = load_rule('routers_simple')\n\n actions = run_diff_get_actions(t1, t2, rules)\n\n print(pformat(actions))\n\n assert len(actions) == 1\n assert actions[0][0] == Action.CREATE\n assert actions[0][1] == {'name': 'R2'}\n\n\ndef test_rules_list_remove_element():\n '''\n This case tests insertion into a list. It should cause one remove\n instead of multiple changes.\n '''\n\n t1 = load_state('add_list_value', 'A')\n t2 = load_state('add_list_value', 'B')\n rules = load_rule('routers_simple')\n\n actions = run_diff_get_actions(t1, t2, rules)\n\n print(pformat(actions))\n\n assert len(actions) == 1\n assert actions[0][0] == Action.DELETE\n assert actions[0][1] == {'name': 'R2'}\n\n\ndef test_rules_dictionary_add_item():\n '''\n This case tests insertion into a list. It should cause one add\n instead of multiple changes.\n '''\n\n t1 = load_state('add_dict_value', 'A')\n t2 = load_state('add_dict_value', 'B')\n rules = load_rule('routers_simple')\n\n actions = run_diff_get_actions(t1, t2, rules)\n\n print(actions)\n\n assert len(actions) == 1\n assert actions[0][0] == Action.UPDATE\n assert actions[0][1] == {'name': 'R1', 'router-id': '1.1.1.1'}\n\n\ndef test_rules_dictionary_remove_item():\n '''\n This case tests insertion into a list. It should cause one add\n instead of multiple changes.\n '''\n\n t1 = load_state('add_dict_value', 'A')\n t2 = load_state('add_dict_value', 'B')\n rules = load_rule('routers_simple')\n\n action, subtree = run_diff_get_action(t2, t1, rules)\n\n assert action == Action.UPDATE\n assert subtree == {'name': 'R1'}\n\n\ndef test_empty_add_item():\n '''\n Tests the case to add an item to an empty file.\n '''\n\n t1 = load_state('empty_add_item', 'A')\n t2 = load_state('empty_add_item', 'B')\n rules = load_rule('routers_simple')\n\n action, subtree = run_diff_get_action(t1, t2, rules)\n\n assert action == Action.CREATE\n assert subtree == {'name': 'R1'}\n\n\ndef test_empty_remove_item():\n '''\n Tests the case to remove everything\n '''\n\n t1 = load_state('empty_add_item', 'B')\n t2 = load_state('empty_add_item', 'A')\n rules = load_rule('routers_simple')\n\n action, subtree = run_diff_get_action(t1, t2, rules)\n\n assert action == Action.DELETE\n assert subtree == {'name': 'R1'}\n\n\ndef test_reorder_list():\n '''\n '''\n\n t1 = load_state('reorder_list', 'A')\n t2 = load_state('reorder_list', 'B')\n rules = load_rule('routers_simple')\n\n actions = run_diff_get_actions(t1, t2, rules)\n\n assert len(actions) == 0\n\n\ndef test_rules_rename_key():\n '''\n Changing a key in a subtree should cause a single update.\n '''\n\n t1 = load_state('rename_key', 'A')\n t2 = load_state('rename_key', 'B')\n rules = load_rule('routers_simple')\n\n actions = run_diff_get_actions(t1, t2, rules)\n\n print(pformat(actions))\n\n assert len(actions) == 1\n assert actions[0][0] == Action.UPDATE\n assert actions[0][1] == {'interfaces': [{'ip_address': '1.1.1.1', 'name': 'eth1'}], 'label': 'R1'}\n\n\ndef test_rules_rename_key2():\n '''\n Changing the root key of a subtree should cause a delete and a create operation.\n '''\n\n t1 = load_state('rename_key2', 'A')\n t2 = load_state('rename_key2', 'B')\n rules = load_rule('router_switch')\n\n actions = run_diff_get_actions(t1, t2, rules)\n\n print(pformat(actions))\n\n assert len(actions) == 2\n assert actions[0][0] == Action.CREATE\n assert actions[0][1] == {'interfaces': [{'ip_address': '1.1.1.1', 'name': 'eth1'}], 'name': 'R1'}\n assert actions[1][0] == Action.DELETE\n assert actions[1][1] == {'interfaces': [{'ip_address': '1.1.1.1', 'name': 'eth1'}], 'name': 'R1'}\n","sub_path":"tests/test_rules.py","file_name":"test_rules.py","file_ext":"py","file_size_in_byte":8050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"523761395","text":"# Program ini dibuat oleh Muhammad Athallah (2020)\n\n# Inisiasi nilai koordinat\nsumbuX = 0\nsumbuY = 0\n# Memakai manipulasi string\nout = \"Karakter Meong Brosss berada di koordinat (0,0)-\"\n\n# Bagian input\nprint(\"### MEONG BROSSS (Speedrunning Mode) ###\\n\")\nperintah = int(input(\"Masukkan banyak perintah yang ingin diberikan: \"))\nprint()\n\n# Mekanisme Program\nfor i in range(perintah):\n masukan = input(\"Masukkan perintah: \")\n if masukan == \"HOME\":\n break # perintah untuk exit mekanisme program\n elif masukan == \"U\":\n sumbuY += 1 # bergerak ke utara\n elif masukan == \"S\":\n sumbuY -= 1 # bergerak ke selatan\n elif masukan == \"T\":\n sumbuX += 1 # bergerak ke timur\n elif masukan == \"B\":\n sumbuX -= 1 # bergerak ke barat\n # Bagian concatenate string output\n out += \"(\" + str(sumbuX) + \",\" + str(sumbuY) + \")\"\n # Cek apakah sudah di akhir string\n if (i != perintah-1):\n out += \"-\"\n\n# Bagian Output\nprint()\nprint(out)\nprint()\nprint(\"### PROGRAM SELESAI ###\")\n","sub_path":"Lab 2/trace_meong.py","file_name":"trace_meong.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"16183871","text":"from datetime import date, datetime\nfrom flask import Flask, flash, redirect, render_template, request, session, abort, Response, make_response\nfrom functools import wraps, update_wrapper\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_mail import Mail, Message\nimport os\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef index():\n import dao\n maxComic = dao.getMaxComic()\n return serveComic(maxComic) \n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\", **locals())\n\n@app.route(\"/characters\")\ndef characters():\n return render_template(\"characters.html\", **locals())\n\n@app.route(\"/archive\")\ndef archive():\n import dao\n comics = dao.getAllComics()\n maxChapter = dao.getMaxChapter()\n return render_template('archive.html', comics=comics, maxChapter=maxChapter) \n\n@app.route(\"/contact\")\ndef contact():\n return render_template(\"contact.html\")\n\n@app.route('/contact/sent', methods=['GET', 'POST'])\ndef sent():\n mail_settings = { \n \"MAIL_SERVER\": request.environ['MAIL_SERVER'],\n \"MAIL_PORT\" : 465,\n \"MAIL_USE_TLS\" : False,\n \"MAIL_USE_SSL\" : True,\n \"MAIL_USERNAME\" : request.environ['MAIL_USERNAME'],\n \"MAIL_PASSWORD\" : request.environ['MAIL_PASSWORD']\n } \n app.config.update(mail_settings)\n mail = Mail(app)\n\n ADMINS=[request.environ['MAIL_USERNAME'], \"contact@magicalheroinescomic.com\"]\n if request.method=='POST':\n reply_to = request.form.get('email')\n message = request.form.get('message')\n subject = \"Magical Heroines Contact Request from \" + request.form.get('name')\n msg = Message(subject, recipients=ADMINS)\n msg.sender=(request.form.get('name'), reply_to) \n msg.body=message\n mail.send(msg)\n return render_template(\"thankyou.html\")\n\n@app.route('/privacy')\ndef privacy():\n return render_template(\"privacy.html\")\n\n@app.route('/comic/')\ndef serveComic(curComicInput=None):\n import dao\n\n d0 = date(2019, 1, 11)\n today = date.today()\n delta = today - d0\n\n curComic = None\n try:\n curComic = long(curComicInput)\n except ValueError:\n return page_not_found(None)\n\n maxComic = dao.getMaxComic()\n if curComic is None:\n curComic = maxComic\n\n comic = dao.getComic(curComic)\n if comic is None:\n return page_not_found(None)\n \n nextComic = dao.getNextComic(comic.comicId)\n firstComic = dao.getMinComic()\n prevComic = dao.getPrevComic(comic.comicId)\n lastComic = int(maxComic)\n if int(comic.comicId) == int(maxComic):\n lastComic = None \n nextComic = None\n if int(comic.comicId) == firstComic:\n firstComic = None \n prevComic = None \n\n return render_template('home.html', comic=comic, firstComic=firstComic, prevComic=prevComic, nextComic=nextComic, lastComic=lastComic)\n\n@app.route('/about/preamble')\ndef preamble():\n return render_template('preamble.html')\n\n@app.route('/rss.xml')\ndef rss():\n import dao\n comics = dao.getLatestComics(100)\n rss = render_template('rss.xml', comics=comics)\n return Response(rss, mimetype='text/xml')\n\n@app.route('/sitemap.xml')\ndef sitemap():\n return render_template('sitemap.xml')\n\n@app.route('/robots.txt')\ndef robots_txt():\n return render_template(\"robots.txt\")\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n \nif __name__ == \"__main__\":\n app.run()\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"303723677","text":"# ### PGUIWEBPAGE #### #\n\nfrom PyQt5.QtWebEngineWidgets import QWebEnginePage\nfrom PyQt5 import QtGui as qtg\nfrom PyQt5 import QtWebKitWidgets as qtwk\n\nfrom . import messages as msg\n\nclass PguiWebPage(QWebEnginePage):\n \"\"\"Subclassed QWebEnginePage,\n representing the actual web page object in the browser.\n\n This was subclassed so that some functions can be overridden.\n \"\"\"\n\n def __init__(self, config, parent=None, profile=None, debug=None):\n \"\"\"Constructor for the class\"\"\"\n self.debug = debug or (lambda x: None)\n self.config = config\n if not profile:\n super().__init__(parent)\n else:\n super().__init__(profile, parent)\n \n debug(f\"Profile is: {self.profile()}\")\n\n self.featurePermissionRequested.connect(self.onFeaturePermissionRequested)\n\n def onFeaturePermissionRequested(self, url, feature):\n if feature in (QWebEnginePage.MediaAudioCapture, \n QWebEnginePage.MediaVideoCapture, \n QWebEnginePage.MediaAudioVideoCapture):\n self.setFeaturePermission(url, feature, QWebEnginePage.PermissionGrantedByUser)\n else:\n self.setFeaturePermission(url, feature, QWebEnginePage.PermissionDeniedByUser)\n\n def javaScriptConsoleMessage(self, level, message, line, sourceid):\n \"\"\"\n Handle console.log messages from javascript.\n Overridden from QWebEnginePage so that we can\n send javascript errors to debug.\n \"\"\"\n\n self.debug(f'Javascript Error in \"{sourceid}\" line {line}: {message}')\n\n def javaScriptConfirm(self, frame, msg):\n \"\"\"\n Handle javascript confirm() dialogs.\n Overridden from QWebEnginePage so that we can (if configured)\n force yes/no on these dialogs.\n \"\"\"\n\n if self.config.force_js_confirm == \"accept\":\n return True\n\n elif self.config.force_js_confirm == \"deny\":\n return False\n \n else:\n return super().javaScriptConfirm(self, frame, msg)\n\n def javaScriptAlert(self, frame, msg):\n if not self.config.suppress_alerts:\n return super().javaScriptAlert(frame, msg)\n\n def certificateError(self, error):\n \"\"\"\n Handle SSL errors in the browser.\n Overridden from QWebEnginePage.\n Called whenever the browser encounters an SSL error.\n Checks the ssl_mode and responds accordingly.\n Doesn't seem to get called in Qt 5.4\n \"\"\"\n\n self.debug(\"certificate error\")\n \n if self.config.ssl_mode == 'ignore':\n self.debug(\"Certificate error ignored\")\n self.debug(error.errorDescriptforce_js_confirmion())\n return True\n \n else:\n self.setHtml(\n msg.CERTIFICATE_ERROR.format(\n url=error.url().toString(),\n start_url=self.config.start_url))\n\n def renderProcessTerminated(self, *args):\n self.debug(\"RenderProcessTerminated: {}\".format(args))\n super().renderProcessTerminated(args)\n\n# ### END PGUIWEBPAGE DEFINITION ### #\n","sub_path":"pguiwebpage.py","file_name":"pguiwebpage.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"257595950","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', 'hotelsite.views.home', name='home'),\n url(r'^about/', 'hotelsite.views.about', name='about'),\n url(r'^reviews/', 'hotelsite.views.reviews', name='reviews'),\n\n url(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"hotelreviews/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"420646436","text":"import argparse\nimport unittest\nimport sys\nfrom test.time_logging_test_runner import TimeLoggingTestRunner\n\ntests_abbreviations = {\n 'ea': 'test_example_a',\n 'aa': 'test_answer_part_a',\n 'eb': 'test_example_b',\n 'ab': 'test_answer_part_b'}\n\n\ndef run_tests(prefix='test', pattern='test*.py', verbose=False):\n verbosity_level = 2 if verbose else 1\n loader = unittest.TestLoader()\n loader.testMethodPrefix = prefix\n tests = loader.discover('test', pattern)\n runner = TimeLoggingTestRunner(verbosity=verbosity_level)\n runner.run(tests)\n\n\ndef main():\n parser = argparse.ArgumentParser(prog='AdventOfCode2018')\n parser.add_argument('-a', '--all', action='store_true', help='Will run all the tests')\n parser.add_argument('-v', '--verbose', action='store_true', help='Verbose output')\n parser.add_argument('-d', '--day', type=str, help='The day to test.')\n parser.add_argument('-t', '--tests', choices=tests_abbreviations.keys(),\n help='Which tests to run: ' + str(tests_abbreviations))\n\n args = parser.parse_args()\n\n if args.all:\n run_tests(verbose=args.verbose)\n elif args.day is not None:\n file_pattern = 'test_day' + args.day + '.py'\n if args.tests is not None:\n prefix = tests_abbreviations[args.tests]\n run_tests(prefix=prefix, pattern=file_pattern, verbose=args.verbose)\n else:\n run_tests(pattern=file_pattern, verbose=args.verbose)\n else:\n run_tests(verbose=args.verbose)\n\n\nif __name__ == '__main__':\n assert sys.version_info >= (3,7)\n main()\n","sub_path":"run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"628721010","text":"# encoding: utf-8\n\n###########################################################################################################\n#\n#\n#\tReporter Plugin\n#\n#\tRead the docs:\n#\thttps://github.com/schriftgestalt/GlyphsSDK/tree/master/Python%20Templates/Reporter\n#\n#\n###########################################################################################################\n\n\nfrom GlyphsApp.plugins import *\n\nclass ShowVerticalMetrics(ReporterPlugin):\n\n\tdef settings(self):\n\t\tself.menuName = Glyphs.localize({'en': u'Vertical Metrics', 'de': u'Vertikalmaße'})\n\t\tself.verticalMetrics = (\n\t\t\t\"hheaAscender\",\n\t\t\t\"hheaDescender\",\n\t\t\t# \"hheaLineGap\",\n\t\t\t\"typoAscender\",\n\t\t\t\"typoDescender\",\n\t\t\t# \"typoLineGap\",\n\t\t\t\"winAscent\",\n\t\t\t\"winDescent\"\n\t\t)\n\t\t\n\tdef background(self, layer):\n\t\tdefaultColor = NSColor.greenColor()\n\t\tdefaultColor.set()\n\t\tthisMaster = layer.associatedFontMaster()\n\t\theightsAlreadyUsed = []\n\t\txPosition = self.controller.viewPort.origin.x - self.controller.selectedLayerOrigin.x\n\t\tif thisMaster:\n\t\t\tfor thisMetric in self.verticalMetrics:\n\t\t\t\theight = thisMaster.customParameters[thisMetric]\n\t\t\t\tif height:\n\t\t\t\t\tif thisMetric == \"winDescent\":\n\t\t\t\t\t\theight *= -1\n\n\t\t\t\t\talignment = \"bottomright\"\n\t\t\t\t\tif height in heightsAlreadyUsed:\n\t\t\t\t\t\talignment = \"topright\"\n\t\t\t\t\t\tif \"win\" in thisMetric:\n\t\t\t\t\t\t\talignment = \"bottomleft\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tzoomFactor = self.getScale()\n\t\t\t\t\t\theightsAlreadyUsed.append(height)\n\t\t\t\t\t\tline = NSBezierPath.bezierPath()\n\t\t\t\t\t\tline.moveToPoint_( NSPoint(-50000, height) )\n\t\t\t\t\t\tline.lineToPoint_( NSPoint(+50000, height) )\n\t\t\t\t\t\tline.setLineWidth_( 1.0/zoomFactor )\n\t\t\t\t\t\tline.setLineDash_count_phase_( [1.0/zoomFactor, 3.0/zoomFactor], 2, 3.5/zoomFactor )\n\t\t\t\t\t\tline.stroke()\n\t\t\t\t\t\n\t\t\t\t\tself.drawTextAtPoint(\n\t\t\t\t\t\t\" \"+thisMetric+\" \", \n\t\t\t\t\t\tNSPoint(\n\t\t\t\t\t\t\t(xPosition+80)/zoomFactor, \n\t\t\t\t\t\t\theight+2/zoomFactor if \"bottom\" in alignment else height,\n\t\t\t\t\t\t\t),\n\t\t\t\t\t\tfontColor=defaultColor,\n\t\t\t\t\t\talign=alignment\n\t\t\t\t\t\t)\n\n","sub_path":"ShowVerticalMetrics.glyphsReporter/Contents/Resources/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"646572813","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n\n\n\n\n\nimport bpy\n\n\n\n# アドオン情報\nbl_info = {\n\t\"name\" : \"Sculpt status header\",\n\t\"author\" : \"bookyakuno\",\n\t\"version\" : (0, 1),\n\t\"blender\" : (2, 77),\n\t\"location\" : \"Sculpt Mode > 3DView > header > Left\",\n\t\"description\" : \"Sculpt smart status\",\n\t\"warning\" : \"\",\n\t\"wiki_url\" : \"\",\n\t\"tracker_url\" : \"\",\n\t\"category\" : \"UI\"\n}\n\n\n\n# ヘッダーに項目追加\ndef sculpt_header(self, context):\n\n\tlayout = self.layout\n\t\n\tif context.sculpt_object:\n\t\n\t\tsculpt = context.tool_settings.sculpt\n\t\t\n\t\tcol = layout.column(align=True)\n\t\trow = col.row(align=True)\n\t\t# シンメトリー\n\t\trow.prop(sculpt, \"use_symmetry_x\", text=\"X\", toggle=True)\n\t\trow.prop(sculpt, \"use_symmetry_y\", text=\"Y\", toggle=True)\n\t\trow.prop(sculpt, \"use_symmetry_z\", text=\"Z\", toggle=True)\n\t\n\n\n# \t\ttoolsettings = context.tool_settings\n# \t\tsettings = self.paint_settings(context)\n# \t\tbrush = settings.brush\n# \n# \n# \t\tcol.template_ID_preview(settings, \"brush\", new=\"brush.add\", rows=3, cols=8)\n\t\n\t\n\t\n\t\t# Dynatopo\n\t\trow.separator()\n\t\tif context.sculpt_object.use_dynamic_topology_sculpting:\n\t\t\trow.operator(\"sculpt.dynamic_topology_toggle\", icon='CANCEL', text=\"\")\n\t\telse:\n\t\t\trow.operator(\"sculpt.dynamic_topology_toggle\", icon='MOD_REMESH', text=\"\")\n\n\n\n\n\n\t# アドオンを有効にしたときの処理\ndef register():\n\t# オペレーターなどを登録\n# \tbpy.utils.register_module(__name__)\n\t# ヘッダーメニューに項目追加\n\tbpy.types.VIEW3D_HT_header.prepend(sculpt_header)\n\n# アドオンを無効にしたときの処理\ndef unregister():\n\t# オペレーターなどを解除\n# \tbpy.utils.unregister_module(__name__)\n\t# ヘッダーメニューの項目解除\n\tbpy.types.VIEW3D_HT_header.remove(sculpt_header)\n\n\n\n# このスクリプトを単独で実行した時に実行\nif __name__ == '__main__':\n\tregister()\n","sub_path":"Sculpt_status_header.py","file_name":"Sculpt_status_header.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"420873359","text":"import time\nimport requests\n\nfor i in range(10):\n try:\n requests.get(\"http://127.0.0.1:5112/shutdown\")\n time.sleep(5)\n break\n except Exception as e:\n print(f\"ERROR: ({type(e)}) - {e}\")\n","sub_path":"challenge-2-hotels-filter/solutions/better_solution/cleanup.py","file_name":"cleanup.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"15784598","text":"## run 10 motif split simulation script\nimport logging\n\nimport click \nimport numpy as np\nfrom pathlib import Path\n\nimport torch\nfrom torch import optim\n\nfrom raptgen import models\nfrom raptgen.models import CNN_Mul_VAE, LSTM_Mul_VAE, CNNLSTM_Mul_VAE\nfrom raptgen.models import CNN_AR_VAE, LSTM_AR_VAE, CNNLSTM_AR_VAE\nfrom raptgen.models import CNN_PHMM_VAE, LSTM_PHMM_VAE, CNNLSTM_PHMM_VAE\n\nfrom raptgen.data import SequenceGenerator, SingleRound\n\nimport os\ndir_path = os.path.dirname(os.path.realpath(__file__))\ndefault_path = str(Path(f\"{dir_path}/../out/simlulation/paired\").resolve())\n\n@click.command(help='run experiment with paired motif',\n context_settings=dict(show_default=True))\n@click.option(\"--n-seq\", help = \"the number of the sequence to generate\", type = int, default = 5000)\n@click.option(\"--seed\", help = \"seed for seqeunce generation reproduction\", type = int, default = 0)\n@click.option(\"--epochs\", help = \"the number of training epochs\", type = int, default = 1000)\n@click.option(\"--threshold\", help = \"the number of epochs with no loss update to stop training\", type = int, default = 50)\n@click.option(\"--use-cuda/--no-cuda\", help = \"use cuda if available\", is_flag=True, default = True)\n@click.option(\"--cuda-id\", help = \"the device id of cuda to run\", type = int, default = 0)\n@click.option(\"--save-dir\", help = \"path to save results\", type = click.Path(), default=default_path)\n@click.option(\"--reg-epochs\", help = \"the number of epochs to conduct state transition regularization\", type = int, default=50)\n@click.option(\"--multi\", help = \"the number of training for multiple times\", type = int, default=1)\n@click.option(\"--only-cnn/--all-models\", help = \"train all encoder types or not\", type = bool, default=False)\ndef main(n_seq, seed, epochs, threshold, cuda_id, use_cuda, save_dir,reg_epochs, multi, only_cnn):\n logger = logging.getLogger(__name__)\n \n logger.info(f\"saving to {save_dir}\")\n save_dir = Path(save_dir).expanduser()\n save_dir.mkdir(exist_ok = True, parents=True)\n\n # generate sequences\n fwd_adapter = \"AAAAA\"\n rev_adapter = \"GGGGG\"\n\n generator = SequenceGenerator(\n num_motifs = 1,\n seed=seed, \n fix_random_region_length=True, \n error_rate=0, \n generate_motifs=True, \n add_primer=True, \n forward_primer=fwd_adapter,\n reverse_primer=rev_adapter, \n middle_insert_range=[2, 6],\n one_side_proba=0.5,\n paired=True)\n \n reads, motif_indices, paired_indices = generator.sample(n_seq)\n with open(save_dir/\"seqences.txt\",\"w\") as f:\n for index, read in zip(motif_indices, reads):\n f.write(f\"{index}, {read}\\n\")\n with open(save_dir/\"motifs.txt\",\"w\") as f:\n for motif in generator.motifs:\n f.write(f\"{motif}\\n\") \n\n experiment = SingleRound(\n reads,\n forward_adapter = fwd_adapter,\n reverse_adapter = rev_adapter)\n\n # training \n train_loader, test_loader = experiment.get_dataloader(use_cuda=use_cuda)\n device = torch.device(f\"cuda:{cuda_id}\" if (use_cuda and torch.cuda.is_available()) else \"cpu\")\n \n train_kwargs = {\n \"epochs\" : epochs,\n \"threshold\" : threshold,\n \"device\" : device,\n \"train_loader\" : train_loader,\n \"test_loader\" : test_loader,\n \"save_dir\" : save_dir,\n \"beta_schedule\" : True, \n \"force_matching\" : True,\n \"force_epochs\" : reg_epochs,\n }\n\n # evaluate models\n target_len = experiment.random_region_length\n\n results = dict()\n for i in range(multi):\n eval_models = [\n CNN_Mul_VAE (target_len=target_len, embed_size=2),\n CNN_AR_VAE (embed_size=2),\n CNN_PHMM_VAE (motif_len=target_len, embed_size=2)\n ]\n if not only_cnn:\n eval_models.extend([\n LSTM_Mul_VAE (target_len=target_len, embed_size=2),\n LSTM_AR_VAE (embed_size=2),\n LSTM_PHMM_VAE (motif_len=target_len, embed_size=2),\n CNNLSTM_Mul_VAE(target_len=target_len, embed_size=2),\n CNNLSTM_AR_VAE(embed_size=2),\n CNNLSTM_PHMM_VAE(motif_len=target_len, embed_size=2)])\n for model in eval_models:\n model_str = str(type(model)).split(\"\\'\")[-2].split(\".\")[-1].lower()\n if multi > 1:\n model_str += f\"_{i}\"\n model_str += \".mdl\"\n print(f\"training {model_str}\")\n optimizer = optim.Adam(model.parameters())\n model = model.to(device)\n\n train_kwargs.update({\n \"model\" : model,\n \"model_str\" : model_str,\n \"optimizer\" : optimizer})\n results[model_str] = models.train(**train_kwargs)\n\n torch.cuda.empty_cache()\n\nif __name__ == \"__main__\":\n Path(\"./.log\").mkdir(parents=True, exist_ok=True)\n formatter = '%(levelname)s : %(name)s : %(asctime)s : %(message)s'\n logging.basicConfig(\n filename='.log/logger.log',\n level=logging.DEBUG,\n format=formatter)\n \n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n logging.getLogger('').addHandler(console)\n main()\n","sub_path":"scripts/paired.py","file_name":"paired.py","file_ext":"py","file_size_in_byte":5237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"223107270","text":"import config\n\nimport mongoengine as mdb\n\n\nclass Category(mdb.Document):\n \"\"\"Stores a category label and its optional budget\"\"\"\n\n CATEGORIES = (\n \"Groceries\",\n \"Costco\",\n \"Dining\",\n \"Transportation\",\n \"Entertainment\",\n \"Shopping\",\n \"Healthcare\",\n \"Bills\",\n \"TBD\",\n )\n\n label = mdb.StringField(required=True, choices=CATEGORIES)\n\n budget = mdb.DecimalField(required=False)\n\n\nclass CategoryMap(mdb.Document):\n \"\"\"Stores mapping of merchant to category\"\"\"\n\n category = mdb.ReferenceField(Category, required=True)\n\n merchant = mdb.StringField(required=True, max_length=80)\n\n\nclass Account(mdb.Document):\n \"\"\"Stores all accounts of interest, including closed accounts\"\"\"\n\n BANKS = {\n \"CapitalOne\",\n \"Chase\",\n \"Tangerine\",\n \"RBC\",\n \"Fidelity\",\n }\n\n CARD_TYPES = {\n \"VISA\",\n \"MASTERCARD\",\n \"AMEX\",\n \"DEBIT\",\n }\n\n # Financial institution backing the account\n bank = mdb.StringField(required=True, choices=BANKS)\n\n # The type of card\n card_type = mdb.StringField(required=True, choices=CARD_TYPES)\n\n # Last four digits of the card\n last_four = mdb.IntField(required=True, min_length=4, max_length=4)\n\n # The date the account was closed, if no longer active\n closed_date = mdb.DateTimeField()\n\n # descriptive information like \"Costco\" or \"Amazon Canadian Visa\"\n description = mdb.StringField(max_length=80)\n\n\nclass Transaction(mdb.Document):\n \"\"\"Describes a single transaction event\"\"\"\n\n # dollar amount\n amount = mdb.DecimalField(required=True)\n\n # descriptive account name (human readable)\n account = mdb.ReferenceField(Account, required=True)\n\n # date of transaction\n date = mdb.DateTimeField(required=True)\n\n # full description either from original statement or provided by user\n description = mdb.StringField(required=False, max_length=80)\n\n category = mdb.ReferenceField(Category)\n\n # TODO - not sure if this date index works\n meta = {\n 'ordering': ['-date'],\n 'indexes': ['date'],\n }\n\n def csv_row(self):\n return \",\".join([\n self.date.strftime(config.time_format),\n self.category.label,\n self.account.description,\n self.description,\n '{:.2f}'.format(self.amount),\n ]) + '\\n'\n\n @staticmethod\n def csv_header():\n return \",\".join([\n 'date (YYYY-MM-DD)',\n 'category',\n 'account',\n 'description',\n 'amount',\n ]) + '\\n'\n\n @staticmethod\n def query(start_date, end_date, category_filters=None):\n \"\"\"Returns a list of filtered and mutated objects\"\"\"\n\n if category_filters:\n categories = Category.objects(\n label__in=category_filters)\n\n results = Transaction.objects(\n date__gte=start_date,\n date__lt=end_date,\n category__in=categories)\n else:\n results = Transaction.objects(\n date__gte=start_date,\n date__lt=end_date)\n\n return results\n","sub_path":"compoundfin/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"265245798","text":"import argparse\n\nfrom nfl_json.download import Download\n\ndef main():\n parser = argparse.ArgumentParser(description='Download new JSON game data from NFL.com')\n parser.add_argument('--year', type=int, default=None, \n help='Filter to a specific year.')\n parser.add_argument('--players', action='store_true', \n help='Force an update on the players JSON file.')\n args = parser.parse_args()\n \n download = Download()\n download.main(year=args.year, force_players_update=args.players)\n\nif __name__ == '__main__':\n main()","sub_path":"nfl_json/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"454624185","text":"\"\"\"\nThis spider is a StaffCare spider created on top of the ATSSpider\nscrapy crawl staffcare -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://www.staffcare.com/job-search/\"\n\nsample url:\nhttp://www.staffcare.com/job-search/\n\"\"\"\n\nfrom re import compile, sub\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, NormalizedJoin, ConvertDateString\nfrom brightcorp.lib.utils import extract_first\n\n\nclass StaffCare(ATSSpider):\n\n name = \"staffcare\"\n url_fragmentanchor = '?ps=90&page=1'\n ref_re = compile(r\"jId=(\\d+)\")\n table_map_data = {\n 'Job Title': 'title',\n 'State': 'location',\n 'Specialty': 'jobcategory',\n 'Date Posted': 'date',\n }\n download_delay = 0.4\n\n def parse(self, response):\n sel = Selector(response)\n tableheads = sel.xpath(\n '//table[@class=\"tbl-lined\"]/tr/th/a/text()'\n ).extract()\n meta_xpaths = {}\n for th in tableheads:\n if th in self.table_map_data:\n meta_xpaths[self.table_map_data[th]] = \"./td[\" + str(\n tableheads.index(th) + 2) + \"]//text()\"\n\n jobs = sel.xpath('//table[@class=\"tbl-lined\"]//tr')\n for job in jobs:\n job_url = job.xpath('./td/a/@href').extract()\n if job_url:\n job_url = urljoin(response.url, job_url[0])\n meta = {}\n for mx in meta_xpaths:\n meta[mx] = job.xpath(meta_xpaths[mx]).extract()\n\n yield Request(\n job_url, callback=self.parse_job_callback(), meta=meta\n )\n\n next_url = extract_first(sel.xpath('//a[@title=\"Next\"]/@href'))\n if next_url:\n next_url = urljoin(response.url, next_url)\n yield Request(next_url, callback=self.parse)\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n\n for k, v in response.meta.iteritems():\n if k in self.table_map_data.values():\n if k == 'date':\n loader.add_value(k, v, ConvertDateString('%m/%d/%Y'))\n else:\n loader.add_value(k, v)\n\n loader.add_value('url', response.url)\n loader.add_value(\n 'referencenumber', response.url,\n Prefix('%s-' % self.name), re=self.ref_re\n )\n\n loader.add_xpath(\n 'duration', '//label[text()=\"Duration:\"]/following-sibling::text()'\n )\n loader.add_xpath(\n 'description',\n [\n '//h3[contains(text(), \"Job Description & Requirements\")]',\n '//h3[contains(text(), \"Job Description & Requirements\")]/following-sibling::node()',\n ]\n )\n loader.add_xpath(\n 'qualifications',\n [\n '//div[@id=\"divRequiredQualifications\"]',\n '//div[@id=\"divPreferredQualifications\"]',\n ]\n )\n loader.add_xpath(\n 'benefits', '//div[@id=\"divJobBenefits\"]', NormalizedJoin()\n )\n loader.add_xpath(\n 'company_description', '//div[@id=\"divCompany\"]', NormalizedJoin()\n )\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/staffcare.py","file_name":"staffcare.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"608646535","text":"import argparse\nimport configparser\nimport os\nimport core \nimport random\nimport Utility.Plotter as PL\n\n# Main function! Runs the program!\ndef main():\n\tprint (\"\\n\\n######################################################\\n\")\n\tprint (\"ARNF V1.0b \\n\")\n\tprint (\"######################################################\\n\")\t\n\n\tprint (\"Parsing the arguements...\\n\")\n\n\targParser = argparse.ArgumentParser(description=\"A tool to study Artificial Regulatory Networks.\")\n\targParser.add_argument('-gc', '--generateConfig',\n\t\thelp=\"Generates the default config.ini file. Note: removes the existing file.\", action=\"store_true\")\n\targParser.add_argument('-gus', '--generateUniqueSeq',\n\t\thelp=\"Generates the default inputs.ini file. Note: removes the existing file.\", action=\"store_true\")\n\targParser.add_argument('-sp', '--simplePlot',\n\t\thelp=\"Plots the files on the output folder. Can have these values: Task, Evolution\")\n\targs = argParser.parse_args()\n\n\tif args.generateConfig:\n\t\tgenerateConfig()\n\t\tprint(\"ARNF: config.ini generated. Exiting!\\n\")\n\t\texit(1)\n\n\tif args.generateUniqueSeq:\n\t\tgenerateUniqueSeq()\n\t\tprint(\"ARNF: inputs.ini generated. Exiting!\\n\")\n\t\texit(1)\n\n\tif args.simplePlot:\n\t\tPlotter = PL.Plotter()\n\t\tPlotter.simplePlot(args.simplePlot)\n\t\tprint(\"ARNF: Plot generated. Exiting!\\n\")\n\t\texit(1)\t\n\n\tprint (\"Running the core...\\n\")\n\n\tif not os.path.isfile(\"config.ini\"):\n\t\tprint(\"ARNF: config.ini file does not exist! Exiting!\\n\")\n\t\texit(1)\n\n\tconfigParser = configparser.ConfigParser()\n\tconfigParser.read(\"config.ini\")\n\trandom.seed(configParser['CORE']['seed'])\n\tcoreObj = core.Core()\n\tcoreObj.run()\n\n# Core configuration. It's here cause we don't have a seperate directory for the application core for the sake of simplicity\ndef gConfig():\n\tconf = {\n\t\t\t\"runs\": \"10\",\n\t\t\t\"task\": \"ClosedWorld\",\n\t\t\t\"genome\" : \"Linear\",\n\t\t\t\"seed\" : 100,\n\t\t\t\"network\" : \"ARNB\",\n\t\t\t\"evolver\" : \"EOne\",\n\t\t\t\"elitism\" : True,\n\t\t}\n\treturn conf\n\n\t\n# Dynamically generates the config file\ndef generateConfig():\n\tdirList = [\"Genome\", \"Task\", \"Evolver\", \"Network\"]\n\n\tparser = configparser.ConfigParser()\n\tparser['CORE'] = gConfig()\n\n\tfor eachDir in dirList:\n\t\tfor subdirs, dirs, files in os.walk(eachDir):\n\t\t\tfor file in files:\n\t\t\t\tname = file.split('.') \n\t\t\t\tif name[1] == \"py\" and not (file[0:8] == \"Abstract\"):\n\t\t\t\t\tmodule = __import__(eachDir + \".\" + name[0])\n\t\t\t\t\tmy_class = getattr(getattr(module, name[0]), name[0])\n\t\t\t\t\tparser[(eachDir + '.' +name[0]).upper()] = my_class.gConfig()\n\n\twith open('config.ini', 'w') as configfile:\n\t\tparser.write(configfile)\n\tpass\n\ndef generateUniqueSeq():\n\tparser = configparser.ConfigParser()\n\tresults = {}\n\tfor i in range(1000):\n\t\tstring = \"\"\n\t\tfor j in range(32):\n\t\t\tstring += str(random.randint(0,1))\n\t\tresults[i] = string\n\ttemp = {}\n\tcounter = 0\n\tfor index, seq in results.items():\n\t\tfound = 0\n\t\tfor key, value in temp.items():\n\t\t\tif value == seq:\n\t\t\t\tfound = 1\n\t\t\t\tbreak\n\t\tif found == 0:\n\t\t\ttemp[counter] = seq\n\t\t\tcounter += 1\n\n\tparser[\"INP\"] = temp\n\n\twith open('inputs.ini', 'w') as configfile:\n\t\tparser.write(configfile)\n\tpass\n\nmain()","sub_path":"arnf.py","file_name":"arnf.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"149699349","text":"import os\nimport pandas as pd\nfrom Datasets.src.predict_genre import main as predict_genre\n\nTestSub_df = pd.DataFrame(columns = ['movie','genre','subtitles'])\nfor subdir in os.listdir('Datasets\\TestDataset'):\n for file in os.listdir(os.path.join('Datasets\\TestDataset', subdir)):\n with open(\"Datasets/TestDataset/\"+subdir+'/'+file, 'r',encoding='Latin-1') as subs:\n data = ' '.join([line for line in subs.readlines()]).replace('\\n', ' ')\n TestSub_df = TestSub_df.append({'movie':os.path.splitext(file)[0],'genre':subdir,'subtitles':data},ignore_index=True)\nTestSub_df.to_csv('TestSub.csv')\n","sub_path":"TheGoodTheBad&TheAI/PreprocessTestSub.py","file_name":"PreprocessTestSub.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"128048433","text":"# Динамическая типизация двумерных циклов\n\"\"\"\nЗадача короля\n\"\"\"\n\n\"\"\"\nНаибольшая общая подпоследовательность (длина этой подпоследователньости)\n\"\"\"\n# его скорость О(M)*N\ndef lcs(A, B):\n \"\"\"\n\n :param A: Массивы чисел\n :param B: Массивы чисел\n F = длина наибольшей общей подпоследовательности\n :return:\n \"\"\"\n F = [[0] * (len(B) + 1) for i in range(len(A) + 1)]\n for i in range(len(A) + 1):\n for j in range(len(B) + 1):\n if A[i - 1] == B[j - 1]:\n F[i][j] = 1 + F[i - 1][j - 1]\n else:\n F[i][j] = max(F[i-1][j], F[i][j - 1])\n return F[-1][-1] # из последней строки последний элемент\nA = [1, 2, 3, 4, 5]\nB = [1, 2, 3]\nprint(lcs(A, B))\n\n# Наибольшая возрастающая подпоследованность\n\"\"\"\nFi = НВП для части А[0:i], которая заканчивается и содержит элемент аi = A[i-1]\n\"\"\"\nC = [3, 2, 7, 4, 5, 6]\ndef gis(C):\n \"\"\"\n :param C: Длина наибольшей общей возрастающей подпоследовательности\n :return:\n \"\"\"\n F = [0] * (len(C) + 1)\n for i in range(1, len(C) + 1):\n m = 0 # максимум\n for j in range(0, i):\n if C[i - 1] > C[j - 1] and F[j - 1] > m:\n m = F[j]\n F[i] = m + 1\n return F[len(C)]\n\n\nprint(gis(C))","sub_path":"МФТИ Лекции/Лекция №11 Двумерное программирование.py","file_name":"Лекция №11 Двумерное программирование.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"347254261","text":"\"\"\"Python script to run FAIRSEQ models on TPU\n\nThis file mimics pytorch/fairseq/train.py, but contains some changes that work\n well with TPUs. Example bash script:\n\n\n```bash\nexport XRT_TPU_CONFIG=\"tpu_worker;0;$TPU_IP_ADDRESS:8470\"\npython fairseq_train_tpu.py \\\n $path_data \\\n --arch=transformer_vaswani_wmt_en_de_big \\\n --max-sentences=$batch_size \\\n --max-sentences-valid=$batch_size \\\n --max-source-positions=128 \\\n --max-target-positions=128 \\\n --required-batch-size-multiple=$batch_size \\\n --max-tokens=4096 \\\n --no-save \\\n --attention-dropout=0.1 \\\n --no-progress-bar \\\n --criterion=label_smoothed_cross_entropy \\\n --log-interval=100 \\\n --source-lang=en \\\n --lr-scheduler=inverse_sqrt \\\n --min-lr 1e-09 \\\n --skip-invalid-size-inputs-valid-test \\\n --target-lang=de \\\n --label-smoothing=0.1 \\\n --update-freq=1 \\\n --optimizer adam \\\n --warmup-init-lr 1e-07 \\\n --lr 0.0005 \\\n --warmup-updates 4000 \\\n --share-all-embeddings \\\n --dropout 0.3 \\\n --weight-decay 0.0 \\\n --valid-subset=valid \\\n --max-epoch=5 \\\n --num_cores=8 \\\n --metrics_debug \\\n --pad_to_length=128 \\\n --log_steps=100\n```\n\nHere, TPU specific flags are\n --num_cores\n --metrics_debug\n --pad_to_length\n --log_steps\n\n\"\"\"\n\nimport argparse\nimport sys\nimport os\nimport math\nimport collections\nfrom datetime import datetime\nimport utils\n\nutils.initialize_path('fairseq')\n\nimport torch\n\nimport torch_xla\nimport torch_xla_py.data_parallel as dp\nimport torch_xla_py.utils as xu\nimport torch_xla_py.xla_model as xm\n\nfrom fairseq.data import data_utils\n# Overwriting collate_tokens to guarantee constant size input tensors\n# This is reducing the number of graph recompiles\ncollate_tokens_gpu = data_utils.collate_tokens\nimport train as fairseq_train\n\n\ndef collate_tokens_tpu(values,\n pad_idx,\n eos_idx=None,\n left_pad=False,\n move_eos_to_beginning=False):\n # Copied over from fairseq.data_utils, and modified so that num_columns\n # in the output tensor is not too variable.\n\n # correcting columns\n global PAD_TO_LENGTH\n size = max(v.size(0) for v in values)\n if size > PAD_TO_LENGTH:\n xu.eprint(\n 'I had to change PAD_TO_LENGTH from {} to {}, this is going to trigger graph recompiles'\n .format(PAD_TO_LENGTH, size))\n PAD_TO_LENGTH = size\n size = PAD_TO_LENGTH\n # done correcting\n res = values[0].new(len(values), size).fill_(pad_idx)\n\n def copy_tensor(src, dst):\n assert dst.numel() == src.numel()\n if move_eos_to_beginning:\n assert src[-1] == eos_idx\n dst[0] = eos_idx\n dst[1:] = src[:-1]\n else:\n dst.copy_(src)\n\n for i, v in enumerate(values):\n copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])\n return res\n\n\ndata_utils.collate_tokens = collate_tokens_tpu\n\nfrom fairseq import options, tasks, checkpoint_utils, progress_bar, utils\nfrom fairseq.trainer import Trainer\nfrom fairseq.data import iterators\nfrom fairseq.meters import StopwatchMeter, AverageMeter\n\n\ndef parse_args():\n # We need to control certain flags here.\n # e.g. parallelization needs to be suppressed and deferred to torch_xla flags\n # e.g. input tensor shapes need to be controlled via\n # max_sentences, required_batch_size_multiple\n parser = options.get_training_parser()\n parser.add_argument('--num_cores', type=int, default=8)\n parser.add_argument('--pad_to_length', type=int, default=64)\n parser.add_argument('--log_steps', type=int, default=20)\n parser.add_argument('--use_gpu', action='store_true')\n parser.add_argument('--metrics_debug', action='store_true')\n FLAGS = options.parse_args_and_arch(parser)\n if not FLAGS.use_gpu:\n if FLAGS.fp16:\n raise RuntimeError(\n '--fp16 was provided, this is controlled by env var XLA_USE_BF16')\n if FLAGS.distributed_world_size > 1:\n xu.eprint('suppressing \"distributed_world_size\"')\n FLAGS.distributed_world_size = 1\n if FLAGS.distributed_init_method is not None:\n xu.eprint('suppressing \"distributed_init_method\"')\n FLAGS.distributed_init_method = None\n if FLAGS.max_sentences != FLAGS.required_batch_size_multiple:\n batch_size = max(\n filter(lambda r: r is not None,\n [FLAGS.max_sentences, FLAGS.required_batch_size_multiple]))\n xu.eprint(\n '\"max_sentences\" and \"required_batch_size_multiple\" must be equal'\n ' to have good performance on TPUs. Using {}'.format(batch_size))\n FLAGS.max_sentences = batch_size\n FLAGS.required_batch_size_multiple = batch_size\n if FLAGS.max_sentences_valid is not None and FLAGS.max_sentences_valid != FLAGS.max_sentences:\n FLAGS.max_sentences_valid = FLAGS.max_sentences\n xu.eprint('\"max_sentences_valid\" and \"max_sentences\" must be equal'\n ' to have good performance on TPUs. Using {}'.format(\n FLAGS.max_sentences))\n if FLAGS.max_tokens is not None:\n xu.eprint('\"max_tokens\" needs to be None for better TPU performance')\n FLAGS.max_tokens = None\n return FLAGS\n\n\ndef prepare_task(args, devices):\n # Setup task, e.g., translation, language modeling, etc.\n task = tasks.setup_task(args)\n\n # Load valid dataset (we load training data below, based on the latest checkpoint)\n for valid_sub_split in args.valid_subset.split(','):\n task.load_dataset(valid_sub_split, combine=True, epoch=0)\n\n # Build models and criteria to print some metadata\n model_parallel = dp.DataParallel(\n lambda: task.build_model(args), device_ids=devices)\n model, criterion = task.build_model(args), task.build_criterion(args)\n print(model)\n print('| model {}, criterion {}'.format(args.arch,\n criterion.__class__.__name__))\n print('| num. model params: {} (num. trained: {})'.format(\n sum(p.numel() for p in model.parameters()),\n sum(p.numel() for p in model.parameters() if p.requires_grad),\n ))\n del model, criterion\n\n # Build trainers\n trainers = {\n device: Trainer(args, task, model, task.build_criterion(args), xla=True)\n for device, model in zip(model_parallel.devices, model_parallel.models)\n }\n trainer = trainers[devices[0]]\n lr = trainer.get_lr()\n\n # TODO(taylanbil): for now, this next line is only creating the iterator.\n # validate its behavior with the case where a checkpoint actually exists.\n\n # Load the latest checkpoint if one is available and restore the\n # corresponding train iterator\n extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer)\n valid_subsets = args.valid_subset.split(',')\n return task, trainers, model_parallel, epoch_itr, lr, valid_subsets\n\n\ndef main_tpu(args):\n\n def log_step(step_type, device, step, tracker=None, metrics_debug=False):\n msg = '{}/ {}, device {}, step {}'.format(step_type, utils.now(), device,\n step)\n if tracker:\n rates = tracker.rate(), tracker.global_rate()\n msg += ', Rate={:.2f}, Global Rate={:.2f}'.format(*rates)\n return msg\n\n def train_loop_fn(model, loader, device, context):\n trainer = trainers[str(device)]\n stats = None\n tracker = xm.RateTracker()\n for i, samples in loader:\n if i and not (i % args.log_steps):\n print(\n log_step(\n 'training',\n device,\n i,\n tracker=tracker,\n metrics_debug=args.metrics_debug))\n _log_output = trainer.train_step(samples)\n xm.optimizer_step(trainer.optimizer)\n tracker.add(len(samples) * args.max_sentences) # n_batches * batch_size\n stats = fairseq_train.get_training_stats(trainer)\n return tracker, stats\n\n def valid_loop_fn(model, loader, device, context):\n trainer = trainers[str(device)]\n # reset validation loss meters\n for k in ['valid_loss', 'valid_nll_loss']:\n meter = trainer.get_meter(k)\n if meter is not None:\n meter.reset()\n extra_meters = collections.defaultdict(lambda: AverageMeter())\n for i, sample in loader:\n if not (i % args.log_steps):\n print(\n log_step(\n 'validation',\n device,\n i,\n tracker=None,\n metrics_debug=args.metrics_debug))\n log_output = trainer.valid_step(sample)\n for k, v in log_output.items():\n if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:\n continue\n extra_meters[k].update(v)\n stats = fairseq_train.get_valid_stats(trainer)\n for k, meter in extra_meters.items():\n stats[k] = meter.avg\n return stats\n\n def validate_subset(args, trainers, task, epoch_itr, subset):\n print('Validating the subset \"{}\"'.format(subset))\n # Initialize data iterator\n itr = task.get_batch_iterator(\n dataset=task.dataset(subset),\n max_tokens=args.max_tokens,\n max_sentences=args.max_sentences_valid,\n max_positions=utils.resolve_max_positions(\n task.max_positions(),\n list(trainers.values())[0].get_model().max_positions(),\n ),\n ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,\n required_batch_size_multiple=args.required_batch_size_multiple,\n seed=args.seed,\n num_workers=args.num_workers).next_epoch_itr(shuffle=False)\n progress = progress_bar.build_progress_bar(\n args,\n itr,\n epoch_itr.epoch,\n prefix='valid on \\'{}\\' subset'.format(subset),\n no_progress_bar='simple')\n stats_per_device = model_parallel(valid_loop_fn, progress)\n valid_losses = [stats['loss'].avg for stats in stats_per_device]\n print('validation stats on subset \"{}\" - {}'.format(subset, utils.now()))\n for stats in stats_per_device:\n progress.print(stats, tag=subset, step=trainer.get_num_updates())\n return valid_losses\n\n def validate(args, trainers, task, epoch_itr, subsets):\n valid_losses = {\n subset: validate_subset(args, trainers, task, epoch_itr, subset)\n for subset in subsets\n }\n return valid_losses\n\n def initialize_loader_for_epoch(args, epoch_itr):\n if epoch_itr.epoch <= len(args.update_freq):\n update_freq = args.update_freq[epoch_itr.epoch - 1]\n else:\n update_freq = args.update_freq[-1]\n\n # Initialize data iterator\n itr = epoch_itr.next_epoch_itr(\n fix_batches_to_gpus=False, shuffle=(epoch_itr.epoch >= args.curriculum))\n itr = iterators.GroupedIterator(itr, update_freq)\n progress = progress_bar.build_progress_bar(\n args, itr, epoch_itr.epoch, no_progress_bar='simple')\n return progress\n\n def keep_training(lr, epoch_itr, trainers):\n # Train until the learning rate gets too small\n max_epoch = args.max_epoch or math.inf\n max_update = args.max_update or math.inf\n lr = min(trainer.get_lr() for trainer in trainers.values())\n n_updates = max(trainer.get_num_updates() for trainer in trainers.values())\n return ((lr > FLAGS.min_lr) and (epoch_itr.epoch < max_epoch) and\n (n_updates < max_update))\n\n xu.eprint('Args')\n for key, val in args.__dict__.items():\n xu.eprint('\\t{} {}'.format(key, val))\n xu.eprint('---------')\n\n devices = xm.get_xla_supported_devices(max_devices=args.num_cores)\n task, trainers, model_parallel, epoch_itr, lr, valid_subsets = prepare_task(\n args, devices)\n\n train_meter = StopwatchMeter()\n train_meter.start()\n while keep_training(lr, epoch_itr, trainers):\n # TRAINING\n print('Epoch {} begin {}'.format(epoch_itr.epoch + 1, utils.now()))\n progress = initialize_loader_for_epoch(args, epoch_itr)\n out = model_parallel(train_loop_fn, progress)\n trackers, stats_ = zip(*out)\n print('Epoch {} Training stats:'.format(epoch_itr.epoch))\n for device, trainer in trainers.items():\n stats = fairseq_train.get_training_stats(trainer)\n print('device {}'.format(device))\n progress.print(stats, tag=device)\n print('Epoch {} Tracker Rates:'.format(epoch_itr.epoch))\n for tracker in trackers:\n rates = tracker.rate(), tracker.global_rate()\n print('\\tRate={:.2f}, Global Rate={:.2f}'.format(*rates))\n print('Epoch {} end {}'.format(epoch_itr.epoch, utils.now()))\n if args.metrics_debug:\n print(torch_xla._XLAC._xla_metrics_report())\n\n # VALIDATION\n if not args.disable_validation and epoch_itr.epoch % args.validate_interval == 0:\n valid_losses = validate(args, trainers, task, epoch_itr, valid_subsets)\n\n # only use average first validation loss from the first device\n # to update the learning rate\n vloss = valid_losses[valid_subsets[0]][0]\n print('old learning rate: {}'.format(lr))\n lr = trainers[devices[0]].lr_step(epoch_itr.epoch, vloss)\n print('new learning rate: {}'.format(lr))\n\n # save checkpoint\n if epoch_itr.epoch % args.save_interval == 0:\n checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, vloss)\n\n if args.metrics_debug:\n print(torch_xla._XLAC._xla_metrics_report())\n\n train_meter.stop()\n print('| done training in {:.1f} seconds'.format(train_meter.sum))\n\n\nif __name__ == '__main__':\n # override certain args so that we use XLA parallelism instead of torch.\n FLAGS = parse_args()\n if FLAGS.use_gpu:\n data_utils.collate_tokens = collate_tokens_gpu\n fairseq_train.cli_main()\n else:\n PAD_TO_LENGTH = FLAGS.pad_to_length\n main_tpu(FLAGS)\n","sub_path":"fairseq_train_tpu.py","file_name":"fairseq_train_tpu.py","file_ext":"py","file_size_in_byte":13403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"290676036","text":"# Python program to implement client side of chat room. \nimport socket \nimport select \nimport sys \n\n# Function to encode \ndef encoder(key, clear): \n enc = [] \n \n for i in range(len(clear)): \n key_c = key[i % len(key)] \n enc_c = chr((ord(clear[i]) +\n ord(key_c)) % 256) \n \n enc.append(enc_c) \n stri=\"\"\n stri = stri.join(enc) \n return stri\n \n# Function to decode \ndef decoder(key, enc): \n dec = [] \n \n \n for i in range(len(enc)): \n key_c = key[i % len(key)] \n dec_c = chr((256 + ord(enc[i]) -\n ord(key_c)) % 256) \n \n dec.append(dec_c) \n return \"\".join(dec) \n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \nif len(sys.argv) != 4: \n print(\"Correct usage: script, IP address, port number, user ID\")\n exit() \nIP_address = str(sys.argv[1]) \nPort = int(sys.argv[2]) \nserver.connect((IP_address, Port)) \nuserId = str(sys.argv[3])\nserver.send(userId.encode())\nserver_resp = server.recv(1024)\nserver_resp = server_resp.decode()\nprint(decoder(\"vigenerecipher\",server_resp)) \n# Vigenère cipher \n \n\n \nwhile True: \n \n # maintains a list of possible input streams \n sockets_list = [sys.stdin, server] \n \n \"\"\" There are two possible input situations. Either the \n user wants to give manual input to send to other people, \n or the server is sending a message to be printed on the \n screen. Select returns from sockets_list, the stream that \n is reader for input. So for example, if the server wants \n to send a message, then the if condition will hold true \n below.If the user wants to send a message, the else \n condition will evaluate as true\"\"\"\n read_sockets,write_socket, error_socket = select.select(sockets_list,[],[]) \n \"\"\"The above line waits for I/O in all three \"\"\"\n \n for socks in read_sockets: \n if socks == server: \n message = socks.recv(1024)\n message = message.decode()\n print(decoder(\"vigenerecipher\",message)) \n else: \n message = sys.stdin.readline() \n msg = message\n msg = encoder(\"vigenerecipher\",msg)\n server.send(msg.encode()) \n sys.stdout.write(\"\")\n sys.stdout.write(message) \n sys.stdout.flush() \nserver.close() \n","sub_path":"Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"536937708","text":"AVAILABLE_EXPS = {'forward_model', 'inverse_model',\n 'tandem_net', 'vae', 'gan', 'inn'}\n\n\ndef get_configs(experiment):\n if experiment not in AVAILABLE_EXPS:\n raise NotImplementedError\n\n configs = {\n 'forward_model':\n {'model_name': 'forward_model',\n 'input_dim': 4,\n 'output_dim': 3,\n 'epochs': 100,\n 'weight_decay': 1e-5,\n 'learning_rate': 1e-3},\n\n 'inverse_model':\n {'model_name': 'inverse_net',\n 'input_dim': 3,\n 'output_dim': 4,\n 'epochs': 100,\n 'weight_decay': 1e-5,\n 'learning_rate': 1e-3},\n\n 'tandem_net':\n {'model_name': 'tandem_net',\n 'input_dim': 3,\n 'output_dim': 3,\n 'epochs': 100,\n 'weight_decay': 1e-5,\n 'learning_rate': 1e-3},\n\n 'vae':\n {'model_name': 'vae',\n 'input_dim': 4,\n 'latent_dim': 5,\n 'epochs': 500,\n 'weight_decay': 1e-5,\n 'learning_rate': 1e-3},\n\n 'gan':\n {'model_name': 'gan',\n 'input_dim': 3,\n 'hidden_dim': 128,\n 'output_dim': 4,\n 'noise_dim': 1,\n 'epochs': 500,\n 'weight_decay': 1e-5,\n 'g_learning_rate': 1e-3,\n 'd_learning_rate': 1e-4},\n\n 'inn':\n {'model_name': 'inn',\n 'input_dim': 4,\n 'hidden_dim': 128,\n 'output_dim': 3,\n 'latent_dim': 2, \n 'ndim_total': 16,\n 'epochs': 2000,\n 'weight_decay': 1e-5,\n 'learning_rate': 5e-4},\n\n }\n\n return configs[experiment]\n\n \n ","sub_path":"Model/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"323050536","text":"import asyncio\nimport aiohttp\nimport discord\nimport collections\nimport random\nimport functools\n\nfrom . import helpers\n\n\n__all__ = ('Client',)\n\n\nWaiter = collections.namedtuple('Waiter', ('event', 'predicate'))\n\n\nclass Client:\n\n \"\"\"\n Helper for communicating with the discord REST, Gateway and Voice APIs.\n\n Parameters:\n token: str\n using for authorization\n https://discordapp.com/developers/applications/me/create\n session: aiohttp.ClientSession\n used for requesting\n loop: asyncio.BaseEventLoop\n used for asynchronous tasks\n \"\"\"\n\n __slots__ = ('_session', '_client', '_listeners', '_waiters', '_voices',\n '_started', '_ready', '_initials', '_unavailable', '_acking',\n '_bot', '_loop')\n\n _inspect = staticmethod(lambda presence: (presence, 250))\n\n def __init__(self, token, bot = True, session = None, loop = None):\n\n if not loop:\n\n loop = asyncio.get_event_loop()\n\n if not session:\n\n session = aiohttp.ClientSession(loop = loop)\n\n if bot:\n\n token = 'Bot ' + token\n\n rest = discord.rest.Client(\n session,\n token = token,\n loop = loop\n )\n\n client = discord.Client(\n rest,\n token,\n callback = self._handle,\n loop = loop\n )\n\n self._session = session\n\n self._client = client\n\n self._listeners = {}\n\n self._waiters = collections.defaultdict(list)\n\n self._voices = {}\n\n self._started = asyncio.Event(loop = loop)\n\n self._ready = asyncio.Event(loop = loop)\n\n self._initials = []\n\n self._unavailable = []\n\n self._acking = None\n\n self._bot = bot\n\n self._loop = loop\n\n @property\n def voices(self):\n\n \"\"\"\n Active voice clients.\n \"\"\"\n\n return self._voices\n\n @property\n def ready(self):\n\n \"\"\"\n Set when the cache is complete.\n \"\"\"\n\n return self._ready\n\n @property\n def unavailable(self):\n\n \"\"\"\n All unavailable guild ids.\n \"\"\"\n\n return self._unavailable\n\n @property\n def cache(self):\n\n \"\"\"\n Client's structure cache.\n \"\"\"\n\n return self._client.cache\n\n @property\n def closed(self):\n\n \"\"\"\n Whether the client has manually stopped operations.\n \"\"\"\n\n return self._client.closed\n\n @property\n def loop(self):\n\n \"\"\"\n Event loop for internal operations.\n \"\"\"\n\n return self._loop\n\n def on(self, name):\n\n \"\"\"\n Decorator setting function called after receiving the event.\n \"\"\"\n\n def wrapper(function):\n\n self._listeners[name] = function\n\n return function\n\n return wrapper\n\n def wait(self, name, track = None):\n\n \"\"\"\n Decorator creating a predicated waiter for the event.\n The waiter is just an event that can be waited with a timeout.\n \"\"\"\n\n def wrapper(predicate):\n\n event = asyncio.Event(loop = self._loop)\n\n waiter = Waiter(event, predicate)\n\n waiters = self._waiters[name]\n\n waiters.append(waiter)\n\n tracked = track if track else event\n\n async def observe():\n\n await tracked.wait()\n\n waiters.remove(waiter)\n\n self._loop.create_task(observe())\n\n return waiter\n\n return wrapper\n\n async def gateway(self,\n count = None,\n shards = None,\n inspect = lambda id: {}):\n\n \"\"\"\n Connect and spawn shards.\n\n Parameters:\n count: int\n the shard count\n shards: list[id]\n id: int\n the shard id\n inspect:\n callable returning initial presence\n \"\"\"\n\n for control in (self._started, self._ready):\n\n control.clear()\n\n url, suggested = await self._client.get_gateway(bot = self._bot)\n\n if not count:\n\n count = suggested\n\n if not inspect:\n\n inspect = lambda id: {}\n\n if not shards:\n\n shards = range(count)\n\n shards = {id: inspect(id) for id in shards}\n\n for index, (id, presence) in enumerate(shards.items()):\n\n await asyncio.sleep(index * 6, loop = self._loop)\n\n shard = await self._client.start(\n self._session,\n url,\n id,\n count,\n functools.partial(self._inspect, presence)\n )\n\n await shard.ready.wait()\n\n self._started.set()\n\n async def voice(self, guild_id, channel_id, timeout = 60):\n\n \"\"\"\n Update the client's voice state.\n\n Raises ValueError if not connected to gateway or missing the shard.\n Raised ValueError if trying to disconnect while not connected to voice.\n \"\"\"\n\n await self._ready.wait()\n\n shard = random.choice(self._client.shards)\n\n shard_id = discord.utils.shard_id(guild_id, shard.info.count)\n\n for shard in self._client.shards:\n\n if shard.info.id == shard_id:\n\n break\n\n else:\n\n raise ValueError('Missing shard {}.'.format(shard_id))\n\n execute = shard.update_voice_state(guild_id, channel_id, False, False)\n\n if channel_id:\n\n try:\n\n client = self._voices[guild_id]\n\n except KeyError:\n\n user_id = self._client.cache.user.id\n\n state = None\n\n track_0 = asyncio.Event(loop = self._loop)\n\n @self.wait('voice state update', track = track_0)\n async def waiter_0(shard_, guild_, state_, fake_):\n\n nonlocal state\n\n state = state_\n\n return guild_.id == guild_id and state.user_id == user_id\n\n endpoint = token = None\n\n track_1 = asyncio.Event(loop = self._loop)\n\n @self.wait('voice server update', track = track_1)\n async def waiter_1(shard_, guild_, endpoint_, token_):\n\n nonlocal endpoint, token\n\n endpoint, token = endpoint_, token_\n\n return guild_.id == guild_id\n\n await execute\n\n coroutines = (waiter_0.event.wait(), waiter_1.event.wait())\n\n gathered = asyncio.gather(*coroutines, loop = self._loop)\n\n try:\n\n await asyncio.wait_for(gathered, timeout = timeout)\n\n finally:\n\n for track in (track_0, track_1):\n\n track.set()\n\n session_id = state.session_id\n\n naive, *junk = endpoint.split(':', 1)\n\n url = 'wss://' + naive\n\n client = discord.voice.Client(\n self._session,\n url,\n token,\n user_id,\n guild_id,\n session_id,\n loop = self._loop\n )\n\n await client.start()\n\n await client.ready.wait()\n\n self._voices[guild_id] = client\n\n else:\n\n await execute\n\n else:\n\n try:\n\n client = self._voices.pop(guild_id)\n\n except KeyError:\n\n raise ValueError('Not in voice.')\n\n else:\n\n await client.close()\n\n finally:\n\n await execute\n\n return client\n\n async def close(self):\n\n \"\"\"\n Close all voices.\n Close the discord client.\n Close the aiohttp session.\n \"\"\"\n\n for voice in self._voices.values():\n\n await voice.close()\n\n await self._client.close()\n\n await self._session.close()\n\n async def get_audit_logs(self, guild_id, **payload):\n\n \"\"\"\n Get the audit logs.\n \"\"\"\n\n audit_logs = await self._client.get_audit_logs(\n guild_id,\n **payload\n )\n\n return audit_logs\n\n async def get_channel(self, channel_id):\n\n \"\"\"\n Get the channel.\n \"\"\"\n\n channel = await self._client.get_channel(\n channel_id\n )\n\n return channel\n\n async def get_channels(self, guild_id):\n\n \"\"\"\n Get the guild channels.\n \"\"\"\n\n channels = await self._client.get_guild_channels(\n guild_id\n )\n\n return channels\n\n async def create_channel(self, guild_id, **payload):\n\n \"\"\"\n Create a guild channel.\n\n Payload Parameters:\n name: str\n channel name (2-100 characters)\n type: str\n \"voice\" or \"text\"\n bitrate: int\n the bitrate (in bits) of the voice channel (voice only)\n user_limit: int\n the user limit of the voice channel (voice only)\n permission_overwrites: list of dict\n the channel's permission overwrites\n \"\"\"\n\n channel = await self._client.create_guild_channel(\n guild_id,\n **payload\n )\n\n return channel\n\n async def update_channel(self, channel_id, **payload):\n\n \"\"\"\n Update the channel.\n\n Payload Parameters:\n name: str\n channel name\n position: int\n channel position\n topic: str\n channel topic\n bitrate: int\n channel bitrate\n user_limit: int\n channel user limit\n \"\"\"\n\n channel = await self._client.update_channel(\n channel_id,\n **payload\n )\n\n return channel\n\n async def update_overwrite(self, channel_id, overwrite_id, **payload):\n\n \"\"\"\n Update the channel permission overwrite.\n\n Payload Parameters:\n allow: int\n the bitwise value of all allowed permissions\n deny: int\n the bitwise value of all disallowed permissions\n type: str\n 'member' for a user or 'role' for a role\n \"\"\"\n\n await self._client.update_channel_overwrites(\n channel_id,\n overwrite_id,\n **payload\n )\n\n async def delete_overwrite(self, channel_id, overwrite_id):\n\n \"\"\"\n Delete the permissions overwrite.\n \"\"\"\n\n await self._client.delete_channel_overwrite(\n channel_id,\n overwrite_id\n )\n\n async def update_channel_positions(self, guild_id, **payload):\n\n \"\"\"\n Update the channels' positions.\n\n Payload Parameters:\n positions: dict of str, int pairs\n the corresponding channel ids and their desired positions\n \"\"\"\n\n await self._client.update_channel_positions(\n guild_id,\n **payload\n )\n\n async def delete_channel(self, channel_id):\n\n \"\"\"\n Delete the channel.\n \"\"\"\n\n await self._client.delete_channel(\n channel_id,\n **payload\n )\n\n async def ack_message(self, channel_id, message_id):\n\n token = await self._client.ack_message(\n channel_id,\n message_id,\n token = self._acking\n )\n\n self._acking = token\n\n async def get_message(self, channel_id, message_id):\n\n \"\"\"\n Get the message.\n \"\"\"\n\n message = await self._client.get_message(\n channel_id,\n message_id\n )\n\n return message\n\n async def get_messages(self, channel_id, **payload):\n\n \"\"\"\n Get the messages.\n\n Payload Parameters:\n around: str\n get messages around this message id\n before: str\n get messages before this message id\n after: str\n get messages after this message id\n limit: int\n max number of messages | (50)\n\n This is an async generator.\n \"\"\"\n\n limit = payload.pop('limit', 50)\n\n before = payload.pop('before', None)\n\n while limit > 0:\n\n messages = await self._client.get_channel_messages(\n channel_id,\n before = before,\n limit = min(limit, 100),\n **payload\n )\n\n if not messages:\n\n break\n\n before = messages[-1].id\n\n limit -= len(messages)\n\n for message in messages:\n\n yield message\n\n async def create_message(self, channel_id, **payload):\n\n \"\"\"\n Create a message.\n\n Payload Parameters:\n content: str\n the message contents\n nonce: str\n handled by wait unless specified\n used for optimistic message sending\n tts: bool\n true if this is a text-to-speech message\n files: tuple of tuple pairs of str, bytes\n the contents of the files being sent\n embed: dict\n embedded rich content\n \"\"\"\n\n message = await self._client.create_message(\n channel_id,\n **payload\n )\n\n return message\n\n async def update_message(self, channel_id, message_id, **payload):\n\n \"\"\"\n Update the message.\n\n Payload Parameters:\n content: str\n the message contents\n embed: dict\n embedded rich content\n \"\"\"\n\n message = await self._client.update_message(\n channel_id,\n message_id,\n **payload\n )\n\n return message\n\n async def delete_message(self, channel_id, message_id):\n\n \"\"\"\n Delete the message.\n \"\"\"\n\n await self._client.delete_message(\n channel_id,\n message_id\n )\n\n async def delete_messages(self, channel_id, message_ids):\n\n \"\"\"\n Delete the messages.\n \"\"\"\n\n await self._client.delete_messages(\n channel_id,\n message_ids\n )\n\n async def get_reactions(self, channel_id, message_id, emoji, **payload):\n\n \"\"\"\n Get the reactions.\n\n Payload Parameters:\n before: str\n get users before this user id\n after:\n get users after this user id\n limit:\n integer\tmax number of users | 100\n\n This is an async generator.\n \"\"\"\n\n limit = payload.pop('limit', 100)\n\n after = payload.pop('after', None)\n\n while limit > 0:\n\n users = await self._client.get_reactions(\n channel_id,\n message_id,\n emoji,\n after = after,\n limit = min(limit, 100),\n **payload\n )\n\n if not users:\n\n break\n\n after = users[-1].id\n\n limit -= len(users)\n\n for user in users:\n\n yield user\n\n async def create_reaction(self, channel_id, message_id, emoji):\n\n \"\"\"\n Create a reaction.\n \"\"\"\n\n await self._client.create_reaction(\n channel_id,\n message_id,\n emoji\n )\n\n async def delete_reaction(self, channel_id, message_id, emoji, user_id):\n\n \"\"\"\n Delete the user's reaction.\n \"\"\"\n\n if user_id == self._client.cache.user.id:\n\n await self._client.delete_current_user_reaction(\n channel_id,\n message_id,\n emoji\n )\n\n else:\n\n await self._client.delete_user_reaction(\n channel_id,\n message_id,\n emoji,\n user_id\n )\n\n async def clear_reactions(self, channel_id, message_id):\n\n \"\"\"\n Delete all reactions.\n \"\"\"\n\n await self._client.clear_reactions(\n channel_id,\n message_id\n )\n\n async def get_invite(self, invite_code):\n\n \"\"\"\n Get the invite.\n \"\"\"\n\n invite = await self._client.get_invite(\n invite_code\n )\n\n return invite\n\n async def get_invites(self, channel_id):\n\n \"\"\"\n Get the channel's invites.\n \"\"\"\n\n invites = await self._client.get_channel_invites(\n channel_id\n )\n\n return invites\n\n async def create_invite(self, channel_id, **payload):\n\n \"\"\"\n Create an invite.\n\n Payload Parameters:\n max_age: int\n duration of invite in seconds before expiry or 0 for never\n max_uses: int\n max number of uses or 0 for unlimited\n temporary: bool\n whether this invite only grants temporary membership\n unique: bool\n if true, don't try to reuse a similar invite\n \"\"\"\n\n invite = await self._client.create_channel_invite(\n channel_id,\n **payload\n )\n\n return invite\n\n async def delete_invite(self, invite_code):\n\n \"\"\"\n Delete the invite.\n \"\"\"\n\n invite = await self._client.delete_invite(\n invite_code\n )\n\n return invite\n\n async def accept_invite(self, invite_code):\n\n \"\"\"\n Accept the invite.\n \"\"\"\n\n invite = await self._client.accept_invite(\n invite_code\n )\n\n return invite\n\n async def start_typing(self, channel_id):\n\n \"\"\"\n Trigger the typing indicator.\n \"\"\"\n\n await self._client.trigger_typing_indicator(\n channel_id\n )\n\n async def get_pins(self, channel_id):\n\n \"\"\"\n Get the pinned messages.\n \"\"\"\n\n messages = await self._client.get_pinned_messages(\n channel_id\n )\n\n return messages\n\n async def create_pin(self, channel_id, message_id):\n\n \"\"\"\n Add a pinned message.\n \"\"\"\n\n await self._client.add_pinned_channel_message(\n channel_id,\n message_id\n )\n\n async def delete_pin(self, channel_id, message_id):\n\n \"\"\"\n Remove a pinned message.\n \"\"\"\n\n await self._client.delete_pinned_channel_message(\n channel_id,\n message_id\n )\n\n async def get_emoji(self, guild_id, emoji_id):\n\n \"\"\"\n Get the emoji.\n \"\"\"\n\n emoji = await self._client.get_guild_emoji(\n guild_id,\n emoji_id\n )\n\n return emoji\n\n async def get_emojis(self, guild_id):\n\n \"\"\"\n Get the emojis.\n \"\"\"\n\n emojis = await self._client.get_guild_emojis(\n guild_id\n )\n\n return emojis\n\n async def create_emoji(self, guild_id, **payload):\n\n \"\"\"\n Create an emoji.\n\n Payload Parameters:\n name: str\n name of the emoji\n image: base64\n the 128x128 emoji image\n roles: list of str\n list of role ids for which this emoji will be whitelisted\n \"\"\"\n\n emoji = await self._client.create_guild_emoji(\n guild_id,\n **payload\n )\n\n return emoji\n\n async def update_emoji(self, guild_id, emoji_id, **payload):\n\n \"\"\"\n Update the emoji.\n\n Payload Parameters:\n name: str\n name of the emoji\n roles: list of str\n list of role ids to which this emoji will be whitelisted\n \"\"\"\n\n emoji = await self._client.update_guild_emoji(\n guild_id,\n emoji_id,\n **payload\n )\n\n return emoji\n\n async def delete_emoji(self, guild_id, emoji_id):\n\n \"\"\"\n Delete the emoji.\n \"\"\"\n\n await self._client.delete_guild_emoji(\n guild_id,\n emoji_id\n )\n\n async def get_guild(self, guild_id):\n\n \"\"\"\n Get the guild.\n \"\"\"\n\n guild = await self._client.get_guild(\n guild_id\n )\n\n return guild\n\n async def get_guilds(self, **payload):\n\n \"\"\"\n Get the guilds.\n\n Payload Parameters:\n before: str\n get guilds before this guild id\n after: str\n get guilds after this guild id\n limit: int\n max number of guilds to return\n \"\"\"\n\n guilds = await self._client.get_current_user_guilds(\n **payload\n )\n\n return guilds\n\n async def create_guild(self, **payload):\n\n \"\"\"\n Create a guild.\n\n Payload Parameters:\n name: str\n name of the guild\n region: str\n voice region id\n icon: base64\n jpeg image for the guild icon\n verification_level: int\n verification level\n default_message_notifications: int\n default message notification level\n explicit_content_filter: int\n explicit content filter level\n roles: list of dict\n new guild roles\n channels: list of dict\n new guild's channels\n \"\"\"\n\n guild = await self._client.create_guild(\n **payload\n )\n\n return guild\n\n async def update_guild(self, guild_id, **payload):\n\n \"\"\"\n Update the guild.\n\n Payload Parameters:\n name: str\n guild name\n region: str\n guild voice region id\n verification_level: int\n guild verification level\n default_message_notifications: int\n default message notifications setting\n afk_channel_id: str\n id for afk channel\n icon: base64\n jpeg image bytes for the guild icon\n owner_id: str\n user id to transfer guild ownership to\n splash: str\n jpeg image bytes for the guild splash\n \"\"\"\n\n guild = await self._client.update_guild(\n guild_id,\n **payload\n )\n\n return guild\n\n async def delete_guild(self, guild_id):\n\n \"\"\"\n Delete the guild.\n \"\"\"\n\n await self._client.delete_guild(\n guild_id\n )\n\n async def leave_guild(self, guild_id):\n\n \"\"\"\n Leave the guild.\n \"\"\"\n\n await self._client.leave_guild(\n guild_id\n )\n\n async def get_member(self, guild_id, user_id):\n\n \"\"\"\n Get the guild's member.\n \"\"\"\n\n member = await self._client.get_guild_member(\n guild_id,\n user_id\n )\n\n return member\n\n async def get_members(self, guild_id, **payload):\n\n \"\"\"\n Get the members.\n\n Payload Parameters:\n limit: int\n max number of members | 1\n after: str\n the highest user id in the previous page\n \"\"\"\n\n limit = payload.pop('limit', 1)\n\n after = payload.pop('after', None)\n\n while limit > 0:\n\n members = await self._client.get_guild_members(\n guild_id,\n after = after,\n limit = min(limit, 1000),\n **payload\n )\n\n if not members:\n\n break\n\n after = members[-1].user.id\n\n limit -= len(members)\n\n for member in members:\n\n yield member\n\n async def create_member(self, guild_id, user_id, **payload):\n\n \"\"\"\n Add a guild member.\n\n Payload Parameters:\n access_token: string\n an oauth2 access token granted with the guilds.join to the\n bot's application for the user you want to add to the guild\n nick: str\n value to set users nickname\n roles: list of dict\n list of roles the member is assigned\n requires the 'manage_roles' permission\n mute: bool\n if the user is muted\n deaf: bool\n if the user is deafened\n \"\"\"\n\n member = await self._client.add_guild_member(\n guild_id,\n user_id,\n **payload\n )\n\n return member\n\n async def update_member(self, guild_id, user_id, **payload):\n\n \"\"\"\n Update the guild member.\n\n Payload Parameters:\n nick: str\n value to set users nickname to\n roles: list of str\n list of role ids the member is assigned\n mute: bool\n if the user is muted\n deaf: bool\n if the user is deafened\n channel_id: str\n id of channel to move user to (if they are connected to voice)\n \"\"\"\n\n tasks = []\n\n if user_id == self._client.cache.user.id:\n\n try:\n\n nick = payload.pop('nick')\n\n except KeyError:\n\n pass\n\n else:\n\n await self._client.update_current_user_nick(\n guild_id,\n nick = nick\n )\n\n await self._client.update_guild_member(\n guild_id,\n user_id,\n **payload\n )\n\n await asyncio.gather(*tasks, loop = self._loop)\n\n async def delete_member(self, guild_id, user_id):\n\n \"\"\"\n Remove a member.\n \"\"\"\n\n await self._client.remove_guild_member(\n guild_id,\n user_id\n )\n\n async def add_role(self, guild_id, user_id, role_id):\n\n \"\"\"\n Add a role.\n \"\"\"\n\n await self._client.add_guild_member_role(\n guild_id,\n user_id,\n role_id\n )\n\n async def remove_role(self, guild_id, user_id, role_id):\n\n \"\"\"\n Remove a role.\n \"\"\"\n\n await self._client.remove_guild_member_role(\n guild_id,\n user_id,\n role_id\n )\n\n async def get_bans(self, guild_id):\n\n \"\"\"\n Get the bans.\n \"\"\"\n\n bans = await self._client.get_guild_bans(\n guild_id\n )\n\n return bans\n\n async def create_ban(self, guild_id, user_id, **payload):\n\n \"\"\"\n Create a ban.\n\n Payload Parameters:\n delete_message_days: int\n number of days to delete messages for\n \"\"\"\n\n helpers.translate(\n payload,\n ('delete_message_days', 'days')\n )\n\n await self._client.create_guild_ban(\n guild_id,\n user_id,\n **payload\n )\n\n async def delete_ban(self, guild_id, user_id):\n\n \"\"\"\n Remove the ban for a user.\n \"\"\"\n\n await self._client.remove_guild_ban(\n guild_id,\n user_id\n )\n\n async def get_roles(self, guild_id):\n\n \"\"\"\n Get the roles.\n \"\"\"\n\n roles = await self._client.get_guild_roles(\n guild_id\n )\n\n return roles\n\n async def create_role(self, guild_id, **payload):\n\n \"\"\"\n Create a role.\n\n Payload Parameters:\n name: str\n name of the role\n permissions: int\n bitwise of the enabled/disabled permissions\n color: int\n RGB color value\n hoist: bool\n whether the role should be displayed separately in the sidebar\n mentionable: bool\n whether the role should be mentionable\n \"\"\"\n\n role = await self._client.create_guild_role(\n guild_id,\n **payload\n )\n\n return role\n\n async def update_role_positions(self, guild_id, **payload):\n\n \"\"\"\n Update the roles' positions.\n\n Payload Parameters:\n positions: dict of str, int pairs\n the corresponding role ids and their desired positions\n \"\"\"\n\n roles = await self._client.update_guild_role_positions(\n guild_id,\n **payload\n )\n\n return roles\n\n async def update_role(self, guild_id, role_id, **payload):\n\n \"\"\"\n Update the guild role.\n\n Payload Parameters:\n name: str\n name of the role\n permissions: int\n bitwise of the enabled/disabled permissions\n color: int\n RGB color value\n hoist: bool\n whether the role should be displayed separately in the sidebar\n mentionable: bool\n whether the role should be mentionable\n \"\"\"\n\n role = await self._client.update_guild_role(\n guild_id,\n role_id,\n **payload\n )\n\n return role\n\n async def delete_role(self, guild_id, role_id):\n\n \"\"\"\n Delete the role.\n \"\"\"\n\n await self._client.delete_guild_role(\n guild_id,\n role_id\n )\n\n async def get_prune(self, guild_id, **payload):\n\n \"\"\"\n Get the prune count.\n\n Payload Parameters:\n days: int\n \tnumber of days to count prune for\n \"\"\"\n\n count = await self._client.get_guild_prune_count(\n guild_id,\n **payload\n )\n\n return count\n\n async def begin_prune(self, guild_id, **payload):\n\n \"\"\"\n Begin the prune.\n\n Payload Parameters:\n days: int\n \tnumber of days to count prune for\n \"\"\"\n\n count = await self._client.begin_guild_prune(\n guild_id,\n **payload\n )\n\n return count\n\n async def get_voice_regions(self, guild_id):\n\n \"\"\"\n Get the guild's voice regions.\n \"\"\"\n\n regions = await self._client.get_guild_voice_regions(\n guild_id\n )\n\n return regions\n\n async def get_invites_all(self, guild_id):\n\n \"\"\"\n Get the invites.\n \"\"\"\n\n invites = await self._client.get_guild_invites(\n guild_id\n )\n\n return invites\n\n async def get_integrations(self, guild_id):\n\n \"\"\"\n Get the integrations.\n \"\"\"\n\n integrations = await self._client.get_guild_integrations(\n guild_id\n )\n\n return integrations\n\n async def create_integration(self, guild_id, **payload):\n\n \"\"\"\n Create an integration.\n\n Payload Parameters:\n type: str\n the integration type\n id: str\n the integration id\n \"\"\"\n\n integration = await self._client.create_guild_integration(\n guild_id,\n **payload\n )\n\n return integration\n\n async def update_integration(self, guild_id, integration_id, **payload):\n\n \"\"\"\n Update the integration.\n\n Payload Parameters:\n behavior: int\n integer\tthe behavior when an integration subscription lapses\n grace_period: int\n period (in seconds) where the\n integration will ignore lapsed subscriptions\n emoticons: bool\n whether emoticons should be synced for\n this integration (twitch only currently)\n \"\"\"\n\n await self._client.update_guild_integration(\n guild_id,\n integration_id,\n **payload\n )\n\n async def delete_integration(self, guild_id, integration_id):\n\n \"\"\"\n Delete the guild integration.\n \"\"\"\n\n await self._client.delete_guild_integration(\n guild_id,\n integration_id\n )\n\n async def sync_integration(self, guild_id, integration_id):\n\n \"\"\"\n Sync the integration.\n \"\"\"\n\n await self._client.sync_guild_integration(\n guild_id,\n integration_id\n )\n\n async def get_embed(self, guild_id):\n\n \"\"\"\n Get the embed.\n \"\"\"\n\n embed = await self._client.get_guild_embed(\n guild_id\n )\n\n return embed\n\n async def update_embed(self, guild_id):\n\n \"\"\"\n Update the embed.\n \"\"\"\n\n embed = await self._client.update_guild_embed(\n guild_id\n )\n\n return embed\n\n async def get_user(self, user_id):\n\n \"\"\"\n Get the user.\n \"\"\"\n\n if user_id == self._client.cache.user.id:\n\n user = await self._client.get_current_user()\n\n else:\n\n user = await self._client.get_user(user_id)\n\n return user\n\n async def update_user(self, **payload):\n\n \"\"\"\n Update the current user.\n\n Payload Parameters:\n username: str\n users username\n if changed may cause the users discriminator to be randomized\n avatar: base64\n if passed, modifies the user's avatar\n \"\"\"\n\n user = await self._client.update_current_user(\n **payload\n )\n\n return user\n\n async def get_dms(self):\n\n \"\"\"\n Get the dm channels.\n \"\"\"\n\n channels = await self._client.get_user_dms()\n\n return channels\n\n async def create_dm(self, **payload):\n\n \"\"\"\n Create a dm channel.\n\n Payload Parameters:\n recipient_id: str\n the recipient to open a DM channel with\n \"\"\"\n\n channel = await self._client.create_dm(\n **payload\n )\n\n return channel\n\n async def create_group_dm(self, **payload):\n\n \"\"\"\n Create a group dm channel.\n\n Payload Parameters:\n access_tokens: list of str\n \taccess tokens of users that have\n granted your app the gdm.join scope\n nicks: dict\n a dict of str (id), str (nick) pairs\n \"\"\"\n\n channel = await self._client.create_group_dm(\n **payload\n )\n\n return channel\n\n async def get_connections(self):\n\n \"\"\"\n Get the connections.\n \"\"\"\n\n connections = await self._clinet.get_user_connections()\n\n return connections\n\n async def get_voice_regions_all(self):\n\n \"\"\"\n Get the voice regions.\n \"\"\"\n\n regions = await self._client.get_voice_regions()\n\n return regions\n\n async def get_webhook(self, webhook_id, webhook_token = None):\n\n \"\"\"\n Get the webhook.\n \"\"\"\n\n webhook = await self._client.get_webhook(\n webhook_id,\n webhook_token = webhook_token\n )\n\n return webhook\n\n async def get_webhooks(self, channel_id):\n\n \"\"\"\n Get the channel's webhooks.\n \"\"\"\n\n webhooks = await self._client.get_channel_webhooks(\n channel_id\n )\n\n return webhooks\n\n async def get_guild_webhooks_all(self, guild_id):\n\n \"\"\"\n Get the guild's webhooks.\n \"\"\"\n\n webhooks = await self._client.get_guild_webhooks(\n guild_id\n )\n\n return webhooks\n\n async def create_webhook(self, channel_id, **payload):\n\n \"\"\"\n Create a webhook.\n\n Payload Parameters:\n name: str\n name of the webhook\n avatar: base64\n image for the default webhook avatar\n \"\"\"\n\n webhook = await self._client.create_webhook(\n channel_id,\n **payload\n )\n\n return webhook\n\n async def update_webhook(self, webhook_id, webhook_token = None, **payload):\n\n \"\"\"\n Update the webhook.\n\n Payload Parameters:\n name: str\n the default name of the webhook\n avatar: bytes or base64\n image for the default webhook avatar\n \"\"\"\n\n webhook = await self._client.update_webhook(\n webhook_id,\n webhook_token = webhook_token,\n **payload\n )\n\n return webhook\n\n async def delete_webhook(self, webhook_id, webhook_token = None):\n\n \"\"\"\n Delete the webhook.\n \"\"\"\n\n await self._client.delete_webhook(\n webhook_id,\n webhook_token = webhook_token\n )\n\n async def execute_webhook(self, webhook_id, webhook_token, **payload):\n\n \"\"\"\n Execute the webhook.\n\n Payload Parameters:\n wait: bool\n waits for server confirmation of message send before response,\n and returns the created message body (defaults to false)\n content: str\n the message contents (up to 2000 characters)\n username: str\n override the default username of the webhook\n avatar_url: str\n override the default avatar of the webhook\n files: tuple of tuple pairs of str, bytes\n the contents of the files being sent\n embeds: list of overwrite objects\n embed objects embedded rich content\n \"\"\"\n\n message = await self._client.execute_webhook(\n webhook_id,\n webhook_token,\n **payload\n )\n\n return message\n\n async def get_information(self):\n\n \"\"\"\n Get the current user's application information.\n \"\"\"\n\n information = await self._client.get_information()\n\n return information\n\n def _handle(self, event, shard, *args):\n\n try:\n\n handler = getattr(self, '_handle_' + event)\n\n except AttributeError:\n\n pass\n\n else:\n\n self._loop.create_task(handler(shard, *args))\n\n def _emit(self, event, *args):\n\n tasks = []\n\n try:\n\n listener = self._listeners[event]\n\n except KeyError:\n\n pass\n\n else:\n\n tasks.append(listener(*args))\n\n async def assess(waiter):\n\n result = await waiter.predicate(*args)\n\n if await waiter.predicate(*args):\n\n waiter.event.set()\n\n for waiter in self._waiters[event]:\n\n tasks.append(assess(waiter))\n\n gathered = asyncio.gather(*tasks, loop = self._loop)\n\n asyncio.ensure_future(gathered, loop = self._loop)\n\n async def _handle_READY(self, shard):\n\n if not self._ready.is_set():\n\n initials = (*self._client.cache.guilds.values(),)\n\n self._initials.extend(guild.id for guild in initials)\n\n if not self._bot:\n\n asyncio.ensure_future(\n asyncio.gather(\n *(\n self._handle_GUILD_CREATE(shard, initial)\n for initial\n in initials\n ),\n loop = self._loop\n ),\n loop = self._loop\n )\n\n self._emit('ready', shard)\n\n async def _handle_RESUMED(self, shard, trace):\n\n self._emit('resume')\n\n async def _handle_CHANNEL_CREATE(self, shard, guild, channel):\n\n self._emit('channel create', shard, guild, channel)\n\n async def _handle_CHANNEL_UPDATE(self, shard, guild, channel, fake):\n\n self._emit('channel update', shard, guild, channel, fake)\n\n async def _handle_CHANNEL_DELETE(self, shard, guild, channel):\n\n self._emit('channel delete', shard, guild, channel)\n\n async def _handle_CHANNEL_PINS_UPDATE(self, shard, guild, channel, timestamp):\n\n self._emit('pins update', shard, guild, channel, timestamp)\n\n async def _handle_GUILD_CREATE(self, shard, guild):\n\n try:\n\n self._unavailable.remove(guild.id)\n\n except ValueError:\n\n if guild.large:\n\n @self.wait('guild chunk')\n async def waiter_0(shard_, guild_):\n\n return (\n guild_.id == guild.id\n and len(guild_.members) >= guild_.member_count\n )\n\n await shard.request_guild_members(guild.id, '', 0)\n\n await waiter_0.event.wait()\n\n await self._started.wait()\n\n try:\n\n self._initials.remove(guild.id)\n\n except ValueError:\n\n self._emit('guild create', shard, guild)\n\n else:\n\n if not self._initials:\n\n self._ready.set()\n\n self._emit('guild cache', shard, guild)\n\n else:\n\n self._emit('guild available', shard, guild)\n\n async def _handle_GUILD_UPDATE(self, shard, guild, fake):\n\n self._emit('guild update', shard, guild, fake)\n\n async def _handle_GUILD_DELETE(self, shard, guild):\n\n if guild.unavailable:\n\n self._unavailables.append(guild.id)\n\n self._emit('guild unavailable', shard, guild)\n\n else:\n\n self._emit('guild delete', shard, guild)\n\n async def _handle_GUILD_BAN_ADD(self, shard, guild, user):\n\n self._emit('guild ban', shard, guild, user)\n\n async def _handle_GUILD_BAN_REMOVE(self, shard, guild, user):\n\n self._emit('guild unban', shard, guild, user)\n\n async def _handle_GUILD_EMOJIS_UPDATE(self, shard, guild, fake):\n\n self._emit('emojis update', shard, guild, fake)\n\n async def _handle_GUILD_INTEGRATIONS_UPDATE(self, shard, guild):\n\n self._emit('integrations update', shard, guild)\n\n async def _handle_GUILD_MEMBER_ADD(self, shard, guild, member):\n\n self._emit('member create', shard, guild, member)\n\n async def _handle_GUILD_MEMBER_REMOVE(self, shard, guild, member):\n\n self._emit('member delete', shard, guild, member)\n\n async def _handle_GUILD_MEMBER_UPDATE(self, shard, guild, member, fake):\n\n self._emit('member update', shard, guild, member, fake)\n\n async def _handle_GUILD_MEMBERS_CHUNK(self, shard, guild):\n\n self._emit('guild chunk', shard, guild)\n\n async def _handle_GUILD_ROLE_CREATE(self, shard, guild, role):\n\n self._emit('role create', shard, guild, role)\n\n async def _handle_GUILD_ROLE_UPDATE(self, shard, guild, role, fake):\n\n self._emit('role update', shard, guild, role, fake)\n\n async def _handle_GUILD_ROLE_DELETE(self, shard, guild, role):\n\n self._emit('role delete', shard, guild, role)\n\n async def _handle_MESSAGE_CREATE(self, shard, guild, channel, message):\n\n self._emit('message create', shard, guild, channel, message)\n\n async def _handle_MESSAGE_UPDATE(self, shard, guild, channel, message, fake):\n\n self._emit('message update', shard, guild, channel, message, fake)\n\n async def _handle_MESSAGE_DELETE(self, shard, guild, channel, message):\n\n self._emit('message delete', shard, guild, channel, message)\n\n async def _handle_MESSAGE_DELETE_BULK(self, shard, guild, channel, messages):\n\n self._emit('messages delete', shard, guild, channel, messages)\n\n async def _handle_MESSAGE_REACTION_ADD(self, shard, guild, channel, message, emoji, user):\n\n self._emit('reaction create', shard, guild, channel, message, emoji, user)\n\n async def _handle_MESSAGE_REACTION_REMOVE(self, shard, guild, channel, message, emoji, user):\n\n self._emit('reaction delete', shard, guild, channel, message, emoji, user)\n\n async def _handle_MESSAGE_REACTION_REMOVE_ALL(self, shard, guild, channel, message):\n\n self._emit('reactions clear', shard, guild, channel, message)\n\n async def _handle_PRESENCE_UPDATE(self, shard, guild, member, fake):\n\n self._emit('presence update', shard, guild, member, fake)\n\n async def _handle_TYPING_START(self, shard, guild, channel, user, timestamp):\n\n self._emit('typing start', shard, guild, channel, user, timestamp)\n\n async def _handle_USER_UPDATE(self, shard, user, fake):\n\n self._emit('user update', shard, user, fake)\n\n async def _handle_VOICE_STATE_UPDATE(self, shard, guild, state, fake):\n\n if state.user_id == self._client.cache.user.id:\n\n if not state.channel_id:\n\n try:\n\n client = self._voices.pop(state.guild_id)\n\n except KeyError:\n\n pass\n\n else:\n\n await client.close()\n\n self._emit('voice state update', shard, guild, state, fake)\n\n async def _handle_VOICE_SERVER_UPDATE(self, shard, guild, endpoint, token):\n\n self._emit('voice server update', shard, guild, endpoint, token)\n\n async def _handle_WEBHOOKS_UPDATE(self, shard, guild, channel):\n\n self._emit('webhooks update', guild, channel)\n","sub_path":"discord/driver/engine/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":45082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"378123679","text":"from django.core.management import BaseCommand\nfrom django.conf import settings\nfrom django.db import transaction\nfrom deposit.models import MemberDepositRecord\n\nfrom operation.models import Customer, Member\nfrom zerocar.utils import ShowProcess, DepositGrades\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n count, fixed = 0, 0\n customer_queryset = Customer.objects.all()\n process_bar = ShowProcess(customer_queryset.count())\n for customer in customer_queryset:\n process_bar.show_process()\n normal_deposit_queryset = customer.memberdepositrecord_set.filter(\n state=MemberDepositRecord.States.NORMAL, is_curr=1)\n normal_deposit_count, deposit_total_amount = normal_deposit_queryset.count(), \\\n sum([d.amount for d in normal_deposit_queryset])\n with transaction.atomic():\n member = Member.objects.select_for_update().filter(customer=customer).first()\n if not member:\n print(f'用���{customer.pk}:{customer.mobile}没有member对象,数据错误!')\n answer = input(f'y(删除用户)/n(给用户添加member记录),y/n\\n')\n if answer == 'y':\n try:\n customer.delete()\n except:\n print(f'用户{customer.mobile}已存在关联数据,无法删除,只能添加member数据')\n member = Member()\n member.customer = customer\n member.save()\n continue\n else:\n member = Member()\n member.customer = customer\n member.save()\n\n # 用户不存在保证金等级,但是又有保证金记录异常的情况\n if member.deposit_grade is None:\n if normal_deposit_count == 0:\n continue\n if deposit_total_amount not in [settings.DEPOSIT_LV1, settings.DEPOSIT_LV2]:\n print(f'用户{customer.mobile}违章预缴金缴纳金额异常,金额为:{deposit_total_amount},无法修复,请悉知!')\n count += 1\n continue\n\n print(f'用户{customer.mobile}违章预缴金等级为空,但是存在状态正常的违章预缴金,总额为:{deposit_total_amount}!')\n lv = MemberDepositRecord.AMOUNT_MAP_TO_GRADE.get(deposit_total_amount)\n answer = input(f'是否修复违章预缴金等级为[{lv}],y/n\\n')\n if answer == 'y':\n member.deposit_grade = lv\n member.save()\n fixed += 1\n count += 1\n continue\n\n # 存在用户保证金,但是又没有保证金记录的情况\n if member.deposit_grade:\n if (member.deposit_grade == DepositGrades.LEVEL1\n and deposit_total_amount == settings.DEPOSIT_LV1) or (\n member.deposit_grade == DepositGrades.LEVEL2\n and deposit_total_amount == settings.DEPOSIT_LV2):\n continue\n\n if not normal_deposit_count:\n print(\n f'用户{customer.mobile}违章预缴金等级为{member.deposit_grade},但是没有正常的违章预缴金!')\n answer = input(f'是否把用户的违章预缴金等级设置为None,y/n\\n')\n if answer == 'y':\n member.deposit_grade = None\n member.save()\n fixed += 1\n count += 1\n continue\n\n if (member.deposit_grade == DepositGrades.LEVEL1\n and deposit_total_amount == settings.DEPOSIT_LV2) or (\n member.deposit_grade == DepositGrades.LEVEL2\n and deposit_total_amount == settings.DEPOSIT_LV1):\n print(\n f'用户{customer.mobile}违章预缴金等级是{member.deposit_grade},但是有{deposit_total_amount}的违章预缴金!'\n )\n lv = MemberDepositRecord.AMOUNT_MAP_TO_GRADE.get(deposit_total_amount)\n answer = input(f'是否把用户的违章预缴金等级设置为{lv},y/n\\n')\n if answer == 'y':\n member.deposit_grade = lv\n member.save()\n fixed += 1\n count += 1\n continue\n\n if deposit_total_amount not in [settings.DEPOSIT_LV1, settings.DEPOSIT_LV2]:\n print(f'用户{customer.mobile}违章预缴金缴纳金额异常,金额为:{deposit_total_amount},无法修复,请悉知!')\n continue\n\n print(f'用户{customer.mobile}违章预缴金异常,但是没有找到原因!')\n return f'发现了{count}个用户的违章预缴金不对,修复了{fixed}个'\n","sub_path":"simplegit/zerocar-master/operation/management/commands/repair_member_deposit.py","file_name":"repair_member_deposit.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"424120739","text":"from __future__ import print_function\nimport csv, multiprocessing, cv2, os\nimport numpy as np\nimport urllib.request\n\nclass AppURLopener(urllib.request.FancyURLopener):\n version = \"Mozilla/5.0\"\n\nopener = AppURLopener()\ndef url_to_image(url):\n resp = urllib.request.urlopen(url)#opener.open(url)\n image = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n image = cv2.imdecode(image, cv2.IMREAD_UNCHANGED)\n return image\n\n\ndef download_and_resize(country, im_id, im_url):\n try:\n save_dir = os.path.join('./images/', country)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n save_path = os.path.join(save_dir,im_url.split('/')[-1])\n\n if os.path.isfile(save_path):\n save_path = os.path.join(save_dir,im_id,im_url.split('/')[-1])\n\n if not os.path.isfile(save_path):\n print(save_path, im_url)\n img = url_to_image(im_url)\n cv2.imwrite(save_path,img)\n else:\n print('Already saved: ' + save_path)\n except Exception as e:\n #print(e)\n with open(\"./log/bad.txt\", \"a\") as bad:\n bad.write(save_path)\n bad.write(\"\\n\")\n\ndef main():\n country_dir = './countries/'\n for filename in os.listdir(country_dir):\n \n country = filename.strip('.csv')\n\n print('Processing...', country)\n\n if country=='':\n country = 'undefined'\n\n with open(country_dir+filename, 'r') as train_f:\n train_reader = csv.reader(train_f)\n header = train_reader.__next__()\n pool = multiprocessing.Pool(processes=2*multiprocessing.cpu_count())\n results = [pool.apply_async( download_and_resize, [ country, image_id, image_data[0] ] )\n for image_id, image_data in enumerate(train_reader)]\n pool.close()\n pool.join()\n\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"download_images.py","file_name":"download_images.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"285842473","text":"import pymongo\r\nimport os\r\nfrom vyka_stock.util.dataconstants import DataConstants\r\nfrom vyka_stock.db.financeutils import FinanceUtils\r\nfrom vyka_stock.util.loggermanager import LoggerManager\r\nfrom vyka_stock.config import vykaconstants as CONSTANTS\r\nfrom datetime import datetime\r\nfrom pandas.io.json import json_normalize #package for flattening json in pandas df\r\nimport json \r\nfrom pymongo.errors import BulkWriteError\r\nfrom arctic import CHUNK_STORE, Arctic\r\n\r\nclass MongoUtils(object):\r\n\r\n def __init__(self):\r\n self.logger = LoggerManager().getLogger(__name__)\r\n\r\n def write_time_series_to_disk(self,fname,data_frame,last_spot_price = 0,zach_rank = 'NA',\r\n engine = 'artic', db_server = None,\r\n db_port = None, username = None, password = None,\r\n filter_out_matching = None, timeout = 10, connection_string = None, add_asoftms = True, \r\n lib_type = \"CHUNK_STORE\",ticker_symbol = None):\r\n \"\"\"Writes Pandas data frame to disk as HDF5 format or bcolz format or in Arctic\r\n\r\n Parmeters\r\n ---------\r\n fname : str\r\n path of file\r\n data_frame : DataFrame\r\n data frame to be written to disk\r\n engine : str\r\n 'hdf5_fixed' - use HDF5 fixed format, very quick, but cannot append to this\r\n 'hdf5_table' - use HDF5 table format, slower but can append to\r\n 'arctic' - use Arctic/MongoDB database\r\n 'redis' - use Redis\r\n append_data : bool\r\n False - write a fresh copy of data on disk each time\r\n True - append data to disk\r\n db_server : str\r\n Database server for arctic (default: '127.0.0.1')\r\n timeout : int\r\n Number of seconds to do timeout\r\n \"\"\" \r\n try: \r\n if (engine == 'arctic'): \r\n socketTimeoutMS = 30 * 1000\r\n ticker_symbol = os.path.basename(ticker_symbol).replace('.', '_')\r\n \r\n print(\"Load Arctic/MongoDB library: \" + ticker_symbol)\r\n\r\n if username is not None and password is not None:\r\n c = pymongo.MongoClient(db_server, connect=False, username=username, password=password)\r\n else:\r\n c = pymongo.MongoClient(connection_string)\r\n\r\n store = Arctic(c,socketTimeoutMS=socketTimeoutMS, serverSelectionTimeoutMS=socketTimeoutMS,\r\n connectTimeoutMS=socketTimeoutMS)\r\n\r\n database = None\r\n\r\n try:\r\n database = store[fname]\r\n except:\r\n pass\r\n\r\n if database is None:\r\n if (lib_type == \"CHUNK_STORE\"):\r\n store.initialize_library(fname,lib_type=CHUNK_STORE)\r\n else:\r\n # Defaults to version store \r\n store.initialize_library(fname, audit=False)\r\n print(\"Created MongoDB library: \" + fname)\r\n else:\r\n print(\"Got MongoDB library: \" + fname)\r\n \r\n data_frame.rename(columns = {'ask.fmt':'ask_fmt'}, inplace = True) \r\n data_frame.rename(columns = {'ask.raw':'ask_raw'}, inplace = True) \r\n data_frame.rename(columns = {'bid.fmt':'bid_fmt'}, inplace = True) \r\n data_frame.rename(columns = {'bid.raw':'bid_raw'}, inplace = True) \r\n data_frame.rename(columns = {'change.fmt':'change_fmt'}, inplace = True) \r\n data_frame.rename(columns = {'expiration.fmt':'expiration_fmt'}, inplace = True) \r\n data_frame.rename(columns = {'change.raw':'change_raw'}, inplace = True) \r\n data_frame.rename(columns = {'openInterest.raw':'openInterest_raw'}, inplace = True) \r\n data_frame.rename(columns = {'percentChange.fmt':'percentChange_fmt'}, inplace = True)\r\n data_frame.rename(columns = {'strike.fmt':'strike_fmt'}, inplace = True)\r\n data_frame.rename(columns = {'strike.raw':'strike_raw'}, inplace = True)\r\n data_frame.rename(columns = {'volume.longFmt':'volume_longFmt'}, inplace = True)\r\n data_frame.rename(columns = {'volume.fmt':'volume_fmt'}, inplace = True)\r\n data_frame.rename(columns = {'volume.raw':'volume_raw'}, inplace = True)\r\n data_frame.rename(columns = {'expiration.longFmt':'expiration_longFmt'}, inplace = True)\r\n data_frame.rename(columns = {'expiration.raw':'expiration_raw'}, inplace = True)\r\n data_frame.rename(columns = {'impliedVolatility.fmt':'impliedVolatility_fmt'}, inplace = True)\r\n data_frame.rename(columns = {'impliedVolatility.raw':'impliedVolatility_raw'}, inplace = True)\r\n data_frame.rename(columns = {'lastPrice.fmt':'lastPrice_fmt'}, inplace = True)\r\n data_frame.rename(columns = {'lastPrice.raw':'lastPrice_raw'}, inplace = True)\r\n data_frame.rename(columns = {'lastTradeDate.fmt':'lastTradeDate_fmt'}, inplace = True)\r\n data_frame.rename(columns = {'lastTradeDate.raw':'lastTradeDate_raw'}, inplace = True)\r\n data_frame.rename(columns = {'lastTradeDate.longFmt':'lastTradeDate_longFmt'}, inplace = True)\r\n data_frame.rename(columns = {'openInterest.longFmt':'openInterest_longFmt'}, inplace = True)\r\n data_frame.rename(columns = {'openInterest.fmt':'openInterest_fmt'}, inplace = True)\r\n data_frame.rename(columns = {'openInterest.raw':'openInterest_raw'}, inplace = True)\r\n data_frame.rename(columns = {'percentChange.fmt':'percentChange_fmt'}, inplace = True)\r\n data_frame.rename(columns = {'percentChange.raw':'percentChange_raw'}, inplace = True)\r\n\r\n if(add_asoftms):\r\n data_frame['date'] = datetime.now()\r\n if(ticker_symbol is not None):\r\n data_frame['ticker_symbol'] = ticker_symbol\r\n if(zach_rank is not None):\r\n data_frame['zack_rank'] = zach_rank\r\n if(last_spot_price is not None):\r\n data_frame['last_spot_price']= last_spot_price\r\n # can duplicate values if we have existing dates\r\n #print(data_frame.head())\r\n\r\n financeUtils = FinanceUtils()\r\n returned_df = financeUtils.getTheoriticalValue(data_frame)\r\n\r\n try:\r\n library = store[fname]\r\n except Exception:\r\n store.initialize_library(fname, lib_type=CHUNK_STORE)\r\n library = store[fname]\r\n\r\n returned_df = returned_df.applymap(str)\r\n if(ticker_symbol in library.list_symbols()):\r\n if(lib_type == \"CHUNK_STORE\"):\r\n library.append(ticker_symbol, returned_df)\r\n else:\r\n library.append(ticker_symbol,returned_df, metadata={ticker_symbol: str(datetime.now())}) \r\n else:\r\n if(lib_type == \"CHUNK_STORE\"):\r\n library.write(ticker_symbol, returned_df, chunk_size='D')\r\n else:\r\n library.write(ticker_symbol,returned_df, metadata={ticker_symbol: str(datetime.now())}) \r\n #library.write(fname,data_frame) \r\n print(\"Written MongoDB library: \" + ticker_symbol)\r\n except BulkWriteError as exc:\r\n print(\" ------- \",exc.details)\r\n finally:\r\n c.close()\r\n\r\n\r\n def unix_time_millis(self,dt):\r\n epoch = datetime.utcfromtimestamp(0)\r\n return (dt - epoch).total_seconds() * 1000.0\r\n\r\n\r\nif __name__ == '__main__':\r\n print(\" Nothing \")\r\n mongoUtil = MongoUtils()\r\n with open('C:\\\\vyka\\\\vyka_stock\\\\vyka_stock\\\\util\\\\sample.json','r') as f:\r\n d = json.load(f)\r\n data_frame_sample = json_normalize(d)\r\n # note: you need to set up Man-AHL's Arctic and MongoDB database for this to work\r\n # write to Arctic (to MongoDB) - by default use's Arctic's VersionStore\r\n #mongoUtil.write_time_series_to_disk(\"options_data_puts\",data_frame_sample, engine='arctic',\r\n # connection_string = CONSTANTS.ATLAS_MONGO_CONNECTION_STRING, add_asoftms = True )\r\n","sub_path":"vyka_stock/db/mongoutils.py","file_name":"mongoutils.py","file_ext":"py","file_size_in_byte":8695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"122387845","text":"from random import choice, randint as rnd\n\nfrom tkinter import Tk, Canvas, BOTH, mainloop, CENTER, Frame\n\nimport math\n\nroot = Tk()\nfr = Frame(root)\nroot.geometry('800x600')\ncanvas = Canvas(root, bg='white')\ncanvas.pack(fill=BOTH, expand=1)\nspeed = 1\ndT = 10\n\n\nclass Target():\n \"\"\"Responsible for creating a ractangle object that\"\"\"\n \"\"\"You get prize for hiting it\"\"\"\n def __init__(self, score):\n self.score = score\n self.prize = 1\n self.speed_x = rnd(-5, 5) / 5 * speed\n self.speed_y = rnd(-5, 5) / 5 * speed\n self.x = rnd(100, 700)\n self.y = rnd(100, 500)\n self.r = rnd(2, 50)\n self.colors = ['red', 'orange', 'yellow', 'green', 'blue']\n self.color = choice(self.colors)\n self.id = canvas.create_oval(\n self.x - self.r,\n self.y - self.r,\n self.x + self.r,\n self.y + self.r,\n fill=self.color,\n width=0)\n\n def appear(self):\n canvas.delete(self.id)\n self.speed_x = rnd(-5, 5) / 5 * speed\n self.speed_y = rnd(-5, 5) / 5 * speed\n self.x = rnd(100, 700)\n self.y = rnd(100, 500)\n self.r = rnd(2, 50)\n self.colors = ['red', 'orange', 'yellow', 'green', 'blue']\n self.color = choice(self.colors)\n self.id = canvas.create_oval(\n self.x - self.r,\n self.y - self.r,\n self.x + self.r,\n self.y + self.r,\n fill=self.color,\n width=0)\n\n def move(self):\n canvas.delete(self.id)\n\n if self.x > 700 - self.r and self.speed_x > 0 or self.x < self.r and self.speed_x < 0:\n self.speed_x *= rnd(1, 10) / (-5)\n\n if self.y > 600 - self.r and self.speed_y > 0 or self.y < self.r and self.speed_y < 0:\n self.speed_y *= rnd(1, 10) / (-5)\n\n self.x += self.speed_x\n self.y += self.speed_y\n\n self.id = canvas.create_oval(\n self.x - self.r,\n self.y - self.r,\n self.x + self.r,\n self.y + self.r,\n fill=self.color,\n width=0)\n\n\nclass Ball:\n def __init__(self):\n self.x = 0\n self.y = 0\n self.elastic = 0\n self.g = 0\n self.live = 0\n self.r = 0\n self.vx = 0\n self.vy = 0\n self.color = choice(['blue', 'green', 'red', 'brown'])\n self.id = canvas.create_oval(\n self.x - self.r,\n self.y - self.r,\n self.x + self.r,\n self.y + self.r,\n fill=self.color)\n\n def appear(self, vx, vy):\n self.x = 20\n self.y = 450\n self.elastic = 0.6\n self.g = 0.1\n self.live = 1000\n self.r = 5\n self.vx = vx\n self.vy = vy\n self.color = choice(['blue', 'green', 'red', 'brown'])\n self.id = canvas.create_oval(\n self.x - self.r,\n self.y - self.r,\n self.x + self.r,\n self.y + self.r,\n fill=self.color)\n\n def move(self):\n canvas.delete(self.id)\n\n self.vy -= self.g\n self.y -= self.vy\n self.x += self.vx\n\n if self.y > 500:\n self.vy += self.g\n self.vy *= -self.elastic\n self.vx *= self.elastic\n self.y = 500\n\n if self.x > 800:\n self.vx *= -self.elastic\n self.vy *= self.elastic\n self.x = 800\n\n if self.live <= 0:\n canvas.delete(self.id)\n print('bI')\n else:\n self.live -= 1\n\n self.id = canvas.create_oval(\n self.x - self.r,\n self.y - self.r,\n self.x + self.r,\n self.y + self.r,\n fill=self.color)\n\n def hit_test(self, obj):\n return abs(obj.x - self.x) <= (self.r + obj.r) and abs(obj.y - self.y) <= (self.r + obj.r)\n\n\nclass Gun:\n def __init__(self):\n self.x = 20\n self.y = 450\n self.len_x = 20\n self.len_y = 20\n self.id = canvas.create_line(\n self.x,\n self.y,\n self.x + self.len_x,\n self.y - self.len_y,\n fill='black',\n width=7)\n\n def drowing(self, power, angle):\n canvas.delete(self.id)\n self.len_x = max(power, 3) * 10 * math.cos(angle)\n self.len_y = -max(power, 3) * 10 * math.sin(angle)\n\n if power != 0:\n self.id = canvas.create_line(\n self.x,\n self.y,\n self.x + self.len_x,\n self.y - self.len_y,\n fill='orange',\n width=7)\n else:\n self.id = canvas.create_line(\n self.x,\n self.y,\n self.x + self.len_x,\n self.y - self.len_y,\n fill='black',\n width=7)\n\n\nclass Game():\n def __init__(self):\n self.bullet = 0\n self.score = 0\n self.score_text = canvas.create_text(\n 10,\n 10,\n text=self.score,\n justify=CENTER,\n font=\"Verdana 10\")\n self.gun = Gun()\n self.gun_x = 20\n self.preparation = 0\n self.gun_y = 450\n self.angle = 0\n self.power = 0\n self.targets = [Target(self.score) for numb in range(5)]\n self.balls = []\n for target in self.targets:\n target.appear()\n\n def hittest(self):\n for ball in self.balls:\n for target in self.targets:\n if (ball.x - target.x) ** 2 + (ball.y - target.y) ** 2 < (ball.r + target.r) ** 2:\n self.score += 1\n canvas.delete(self.score_text)\n self.score_text = canvas.create_text(\n 10,\n 10,\n text=self.score,\n justify=CENTER,\n font=\"Verdana 10\")\n\n canvas.delete(target.id)\n self.targets.pop(self.targets.index(target))\n\n def new_ball(self, event):\n self.balls.append(Ball())\n this_ball = self.balls[len(self.balls) - 1]\n vx = self.power * math.cos(self.angle)\n vy = self.power * math.sin(self.angle)\n this_ball.appear(vx, -vy)\n self.power = 0\n self.preparation = 0\n self.bullet += 1\n\n def shot_prepair(self, event):\n self.preparation = 1\n\n def targetting(self, event):\n self.angle = math.atan((event.y - self.gun_y) / (event.x - self.gun_x))\n\n def ball_to_old(self):\n numb = 0\n\n while numb < len(self.balls):\n self.balls[numb].move()\n\n if self.balls[numb].live <= 0:\n canvas.delete(self.balls[numb].id)\n self.balls.pop(numb)\n\n numb += 1\n\n def power_up(self):\n if self.power < 15 and self.preparation == 1:\n self.power += 0.1\n\n def targets_dinamics(self):\n for target in self.targets:\n target.move()\n\n def end_check(self):\n if len(self.targets) == 0:\n self.end_text = canvas.create_text(\n 400,\n 300,\n text='вы уничтожили цели за ' + str(self.bullet) + ' выстрелов',\n justify=CENTER,\n font=\"Verdana 25\")\n else:\n root.after(dT, self.main)\n\n def main(self):\n self.targets_dinamics()\n canvas.bind('', self.targetting)\n canvas.bind('', self.shot_prepair)\n self.power_up()\n canvas.bind('', self.new_ball)\n self.ball_to_old()\n self.gun.drowing(self.power, self.angle)\n self.hittest()\n self.end_check()\n\n\ngame = Game()\ngame.main()\nmainloop()\n","sub_path":"gun.py","file_name":"gun.py","file_ext":"py","file_size_in_byte":8196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"235968343","text":"'''\r\n(Extra Credit) \r\nUsing the UserDict module, create a class called AnyCaseDict, \r\nwhich will be just like a dictionary but keys that are strings will refer to the same value, \r\nregardless of the case of the key. \r\nFor example, the following keys: \"Color\", \"color\", \"COLOR\" and \"coloR\" will refer to the same value.\r\n(Hint: Override the built-in __setitem__ and __getitem__ methods.)\r\n'''\r\n\r\n# Reference\r\n# https://docs.python.org/3/library/collections.html\r\n# UserDict object's \"data\" attribute: A real dictionary used to store the contents of the UserDict class.\r\n\r\n# import UserList\r\nfrom collections import UserDict\r\n\r\n# check the contents of UserDict\r\ndir(UserDict)\r\n\r\n# create a new class\r\nclass AnyCaseDict(UserDict):\r\n # convert key to lower characters in order to refer to the same value\r\n def __setitem__(self, key, value):\r\n try:\r\n keylow = key.lower()\r\n self.data[keylow] = value\r\n except AttributeError:\r\n self.data[key] = value\r\n \r\n def __getitem__(self, key):\r\n try:\r\n keylow = key.lower()\r\n return self.data[keylow]\r\n except AttributeError:\r\n return self.data[key]\r\n\r\n# create an AnyCaseDict instance\r\ndict = AnyCaseDict()\r\n\r\n# input values\r\ndict['Color'] = 1\r\ndict['TEXT'] = 2\r\ndict[10] = 3\r\ndict['20'] = 4\r\nprint(dict) # {'color': 1, 'text': 2, 10: 3, '20': 4}\r\n\r\n# access values with keys\r\ndict['Color'] # 1\r\ndict['color'] # 1\r\ndict['COLOR'] # 1\r\ndict['coloR'] # 1\r\n\r\ndict['Text'] # 2\r\ndict['text'] # 2\r\ndict['TEXT'] # 2\r\ndict['texT'] # 2\r\n\r\ndict[10] # 3\r\ndict['10'] # KeyError\r\n\r\ndict['20'] # 4\r\ndict[20] # KeyError","sub_path":"Module_10/Daisuke_X442.3_Assignment_10_3.py","file_name":"Daisuke_X442.3_Assignment_10_3.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"564955535","text":"import numpy as np\nimport cv2\nimport time\nimport multiprocessing\nimport picamera\nfrom joblib import Parallel, delayed\n\n# start clock / time test for speed\nstart_time = time.time()\n\n#############################################################################\n\n# function to implement SURF like algorithm to dectect matches in a scene to a template \n\ndef image_pro(img, img2):\n\n # Initiate ORB detector\n orb = cv2.ORB()\n \n # compute the descriptors with ORB\n kp, des = orb.detectAndCompute(img, None)\n kp2, des2 = orb.detectAndCompute(img2,None)\n\n # create BFMatcher object\n bf = cv2.BFMatcher()\n \n # Match descriptors.\n matches = bf.knnMatch(des,des2, k = 2)\n\n # creates an array of matches \t\n good = []\n for m,n in matches:\n if m.distance < 0.75*n.distance:\n good.append(m)\n\n # gets length of the array created previously and returns the number of matches found for each picture\n match = len(good)\n return match\n\n##############################################################################\n\n# uses number of matches returned from first function to approximate distance and location. \n\ndef distance(match):\n if max(match) >= 20 and max(match) == match[0]: \n print(\"within 1 ft of marker 1\")\n elif max(match) >= 20 and max(match) == match[1]: \n print(\"within 1 ft of marker 2\")\n elif max(match) >= 20 and max(match) == match[2]: \n print(\"within 1 ft of marker 3\")\n elif max(match) >= 20 and max(match) == match[3]: \n print(\"within 1 ft of marker 4\")\n elif max(match) < 20 and max(match) >= 15 and max(match) == match[0]: \n print(\"within 3 ft of marker 1\")\n elif max(match) < 20 and max(match) >= 15 and max(match) == match[1]: \n print(\"within 3 ft of marker 2\")\n elif max(match) < 20 and max(match) >= 15 and max(match) == match[2]: \n print(\"within 3 ft of marker 3\")\n elif max(match) < 20 and max(match) >= 15 and max(match) == match[3]: \n print(\"within 3 ft of marker 4\")\n\n################################################################################\n\n# camera code to take photograph that will be searched \ncamera = picamera.PiCamera()\ncamera.capture('scene.jpg')\n\n# img = the images to look for / img2 = scene to be searched \nimg = [cv2.imread('1.png',1), cv2.imread('2.png',1), cv2.imread('3.png',1), cv2.imread('4.png',1)]\nimg2 = cv2.imread('scene.jpg')\n\n# gets cpu core count for use in a later function \ncores = multiprocessing.cpu_count()\n\n# input range \ninputs = range(len(img))\n\n#runs loop in parallel on multiple cores to reduce computation time on raspberry pi\nmatch = Parallel(n_jobs = cores, backend = 'threading' )(delayed(image_pro)(img[i], img2) for i in inputs)\n\n# outputs \"distance and approximate \"location\"\nouput = distance(match) \n\n# print statement to get program runtime | using this for optimization \nprint(\"%s\" % str((time,time.time() - start_time)))\n","sub_path":"orb.py","file_name":"orb.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"98333296","text":"# coding: utf-8\n\n\"\"\"\n ETSI GS MEC 012 - Radio Network Information API\n\n The ETSI MEC ISG MEC012 Radio Network Information API described using OpenAPI. # noqa: E501\n\n OpenAPI spec version: 2.1.1\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nclass S1BearerNotificationS1UeInfo(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'associate_id': 'list[AssociateId]',\n 'ecgi': 'list[Ecgi]',\n 's1_bearer_info': 'list[S1BearerInfoS1BearerInfoDetailed]',\n 'temp_ue_id': 'CellChangeNotificationTempUeId'\n }\n\n attribute_map = {\n 'associate_id': 'associateId',\n 'ecgi': 'ecgi',\n 's1_bearer_info': 's1BearerInfo',\n 'temp_ue_id': 'tempUeId'\n }\n\n def __init__(self, associate_id=None, ecgi=None, s1_bearer_info=None, temp_ue_id=None): # noqa: E501\n \"\"\"S1BearerNotificationS1UeInfo - a model defined in Swagger\"\"\" # noqa: E501\n self._associate_id = None\n self._ecgi = None\n self._s1_bearer_info = None\n self._temp_ue_id = None\n self.discriminator = None\n if associate_id is not None:\n self.associate_id = associate_id\n self.ecgi = ecgi\n self.s1_bearer_info = s1_bearer_info\n if temp_ue_id is not None:\n self.temp_ue_id = temp_ue_id\n\n @property\n def associate_id(self):\n \"\"\"Gets the associate_id of this S1BearerNotificationS1UeInfo. # noqa: E501\n\n 0 to N identifiers to associate the information for a specific UE or flow. # noqa: E501\n\n :return: The associate_id of this S1BearerNotificationS1UeInfo. # noqa: E501\n :rtype: list[AssociateId]\n \"\"\"\n return self._associate_id\n\n @associate_id.setter\n def associate_id(self, associate_id):\n \"\"\"Sets the associate_id of this S1BearerNotificationS1UeInfo.\n\n 0 to N identifiers to associate the information for a specific UE or flow. # noqa: E501\n\n :param associate_id: The associate_id of this S1BearerNotificationS1UeInfo. # noqa: E501\n :type: list[AssociateId]\n \"\"\"\n\n self._associate_id = associate_id\n\n @property\n def ecgi(self):\n \"\"\"Gets the ecgi of this S1BearerNotificationS1UeInfo. # noqa: E501\n\n E-UTRAN Cell Global Identifier. # noqa: E501\n\n :return: The ecgi of this S1BearerNotificationS1UeInfo. # noqa: E501\n :rtype: list[Ecgi]\n \"\"\"\n return self._ecgi\n\n @ecgi.setter\n def ecgi(self, ecgi):\n \"\"\"Sets the ecgi of this S1BearerNotificationS1UeInfo.\n\n E-UTRAN Cell Global Identifier. # noqa: E501\n\n :param ecgi: The ecgi of this S1BearerNotificationS1UeInfo. # noqa: E501\n :type: list[Ecgi]\n \"\"\"\n if ecgi is None:\n raise ValueError(\"Invalid value for `ecgi`, must not be `None`\") # noqa: E501\n\n self._ecgi = ecgi\n\n @property\n def s1_bearer_info(self):\n \"\"\"Gets the s1_bearer_info of this S1BearerNotificationS1UeInfo. # noqa: E501\n\n S1 bearer information as defined below. # noqa: E501\n\n :return: The s1_bearer_info of this S1BearerNotificationS1UeInfo. # noqa: E501\n :rtype: list[S1BearerInfoS1BearerInfoDetailed]\n \"\"\"\n return self._s1_bearer_info\n\n @s1_bearer_info.setter\n def s1_bearer_info(self, s1_bearer_info):\n \"\"\"Sets the s1_bearer_info of this S1BearerNotificationS1UeInfo.\n\n S1 bearer information as defined below. # noqa: E501\n\n :param s1_bearer_info: The s1_bearer_info of this S1BearerNotificationS1UeInfo. # noqa: E501\n :type: list[S1BearerInfoS1BearerInfoDetailed]\n \"\"\"\n if s1_bearer_info is None:\n raise ValueError(\"Invalid value for `s1_bearer_info`, must not be `None`\") # noqa: E501\n\n self._s1_bearer_info = s1_bearer_info\n\n @property\n def temp_ue_id(self):\n \"\"\"Gets the temp_ue_id of this S1BearerNotificationS1UeInfo. # noqa: E501\n\n\n :return: The temp_ue_id of this S1BearerNotificationS1UeInfo. # noqa: E501\n :rtype: CellChangeNotificationTempUeId\n \"\"\"\n return self._temp_ue_id\n\n @temp_ue_id.setter\n def temp_ue_id(self, temp_ue_id):\n \"\"\"Sets the temp_ue_id of this S1BearerNotificationS1UeInfo.\n\n\n :param temp_ue_id: The temp_ue_id of this S1BearerNotificationS1UeInfo. # noqa: E501\n :type: CellChangeNotificationTempUeId\n \"\"\"\n\n self._temp_ue_id = temp_ue_id\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(S1BearerNotificationS1UeInfo, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, S1BearerNotificationS1UeInfo):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"python-client-generated/swagger_client/models/s1_bearer_notification_s1_ue_info.py","file_name":"s1_bearer_notification_s1_ue_info.py","file_ext":"py","file_size_in_byte":6406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"253728737","text":"from django.conf.urls import patterns, include, url\n\n\nurlpatterns = patterns('events.views',\n url(r'^my-gigs/$', 'my_gigs', name='my_gigs'),\n url(r'^dashboard/$', 'venue_dashboard', name='venue_dashboard'),\n url(r'^(?P\\d+)/$', 'event_detail', name='event_detail'),\n url(r'^(?P\\d+)/edit/$', 'event_edit', name='event_edit'),\n url(r'^add/$', 'event_add', name='event_add'),\n)\n","sub_path":"smallslive/events/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"615558948","text":"from os import system\r\n\r\nfrom classesJeu import Main, Paquet\r\n\r\njeu = True\r\nwhile jeu:\r\n paquet = Paquet()\r\n paquet.melange()\r\n players = [Main(True), Main(False)]\r\n\r\n for player in players:\r\n for i in range(2):\r\n player.ajouteCarte(paquet.distribue())\r\n \r\n for player in players:\r\n player.affiche()\r\n \r\n gameOver = False\r\n while not gameOver:\r\n # partie joueur\r\n resp = input(\"Carte supplémentaire (C) ou Passe (P) ?\")\r\n if resp == \"C\":\r\n players[0].ajouteCarte(paquet.distribue())\r\n players[0].calculePoints()\r\n if players[0].points > 21:\r\n print(f\"Vous avez perdu ! avec un score de {players[0].points}\")\r\n gameOver = True\r\n break\r\n # partie Croupier\r\n if int(players[1].points) < 17:\r\n players[1].ajouteCarte(paquet.distribue())\r\n players[1].calculePoints()\r\n if players[1].points > 21:\r\n print(\"Le croupier depasse 21, Vous avez gagné!\")\r\n gameOver = True\r\n break\r\n # fin de la manche\r\n for player in players:\r\n player.affiche(\"all\")\r\n if players[0].points < players[1].points:\r\n print(\"votre score est inferieur a celui du croupier\")\r\n elif players[0].points == players[1].points:\r\n print(\"meme score que le croupier\")\r\n else:\r\n print(\"votre score est superieur a celui du croupier\")\r\n gameOver = True\r\n if input(\"continuer [O/N]?\") == \"N\":\r\n jeu = False\r\n else:\r\n system(\"cls\")\r\n","sub_path":"exo/nsi terminal/exo/exo 3 - jeu du blackJack/moteurJeu.py","file_name":"moteurJeu.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"49381059","text":"import sys\nimport binascii\nimport struct\n\n# first param is start addr (e.g. 0x4000)\nstart = int(sys.argv[1], 0)\n# second param is firmware file name\nfn = sys.argv[2]\n\nwith open(fn, 'r') as f:\n fw = f.read()\nchecksum_1 = 0 # sum(x)\nchecksum_2 = 0 # -sum(x)\nfor i in range(start, len(fw)-2, 2):\n checksum_1 += struct.unpack('!H', fw[i:i+2])[0]\n checksum_2 += -struct.unpack('!H', fw[i:i + 2])[0]\n checksum_1_packed = struct.pack('!H', checksum_1 & 0xFFFF)\n checksum_2_packed = struct.pack('!H', checksum_2 & 0xFFFF)\n if i > 2 and checksum_1_packed == fw[i+2:i+4]:\n print(\"Checksum by sum found: {}\".format(hex(i+2)))\n if i > 2 and checksum_2_packed == fw[i+2:i+4]:\n print(\"Checksum by -sum found: {}\".format(hex(i+2)))\n\n# end = 0x4e5ec\n# struct.pack('!H', sum([struct.unpack('!H', fw[addr:addr+2])[0] for addr in range(0x4000, end, 2)]) & 0xFFFF)\n# fw[end:end+2]\n","sub_path":"tools/checksum-search.py","file_name":"checksum-search.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"578256708","text":"\"\"\"\nSeeder de l'application\n(génère un jeu d'essai aléatoire)\n\"\"\"\n\nfrom services import TravelService, CityService\nfrom models import City, Travel, Accomodation, Transport, Session\nfrom faker import Factory\nimport random\nimport datetime\n\n\nclass Seeder:\n \"\"\"\n Seeder\n \"\"\"\n\n def __init__(self, number_of_cities=60, number_of_travels=30):\n \"\"\"\n Constructor\n :param session: Session\n :param number_of_cities: integer\n :param number_of_travels: integer\n :return: void\n \"\"\"\n self.session = Session()\n self.travel_service = TravelService()\n self.city_service = CityService()\n self.fake = Factory.create()\n self.number_of_cities = number_of_cities\n self.number_of_travels = number_of_travels\n\n def run(self):\n \"\"\"\n Point d'entrée\n :return: void\n \"\"\"\n self.seed_cities(self.number_of_cities)\n self.seed_travels(self.number_of_travels)\n self.session.close()\n\n def seed_cities(self, number):\n \"\"\"\n Crée des enregistrements aléatoires dans la table des villes\n :param number: nombre de villes\n :return: void\n \"\"\"\n for i in range(0, number):\n city = City()\n city.name = self.fake.city()\n city.country = self.fake.country()\n city.is_capital = self.fake.pybool()\n self.city_service.save(city)\n\n capitals = self.city_service.all_capitals()\n non_capitals = self.city_service.all_non_capitals()\n\n for city in non_capitals:\n city.capital_id = random.choice(capitals).id\n self.city_service.save(city)\n\n def seed_travels(self, number):\n \"\"\"\n Crée des enregistrements aléatoires dans la table des voyages\n :param number: nombre de voyages\n :return: void\n \"\"\"\n cities = self.city_service.all()\n\n for i in range(0, number):\n travel = Travel()\n travel.start = self.fake.date_time_this_year(before_now=True, after_now=False)\n travel.end = travel.start + datetime.timedelta(days=random.randrange(1, 10, 1))\n travel.review = self.fake.text(max_nb_chars=200)\n travel.city_id = random.choice(cities).id\n\n for j in range(0, 3):\n transport = Transport()\n transport.type = random.choice(['Train', 'Avion', 'Voiture', 'Bateau', 'Bus'])\n transport.price = random.randrange(10, 150, 5)\n transport.duration = random.randrange(1, 24, 1)\n travel.transports.append(transport)\n\n for k in range(0, 3):\n accomodation = Accomodation()\n accomodation.name = self.fake.company()\n accomodation.type = random.choice(['Camping', 'Hôtel', 'Gîte', 'Appartement'])\n accomodation.price = random.randrange(50, 500, 10)\n travel.accomodations.append(accomodation)\n\n self.travel_service.save(travel)\n\n\nif __name__ == '__main__':\n seeder = Seeder()\n seeder.run()","sub_path":"seeder.py","file_name":"seeder.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"608882279","text":"__author__ = 'v8'\nimport numpy as np\nimport warnings\n\nwarnings.simplefilter(\"ignore\")\n\ndef forward(X, transition, emission, hmm_model):\n \"\"\"\n\n :param X:\n :param transition:\n :param emission:\n :param hmm_model:\n :return:\n \"\"\"\n f = np.empty((len(hmm_model.states), len(X)+1))\n f.fill(float('-inf'))\n f[0,0] = 0.\n for i in range(1, f.shape[1]):\n for l in range(1, f.shape[0]):\n f[l,i] = emission[l, X[i-1]] + reduce(np.logaddexp, [f[k,i-1] + transition[k,l] for k in range(f.shape[0])])\n\n p_x = reduce(np.logaddexp, f[:,-1]+transition[:,-1])\n return p_x, f\n\n\ndef backward(X, transition, emission, hmm_model):\n \"\"\"\n\n :param X:\n :param transition:\n :param emission:\n :param hmm_model:\n :return:\n \"\"\"\n b = np.empty((len(hmm_model.states), len(X)))\n b.fill(float('-inf'))\n b[:,-1] = transition[:,-1]\n\n for i in xrange(len(X) - 2, -1, -1):\n for k in range(b.shape[0]):\n # calculating b_k(i)\n sub_sums = [(emission[l, X[i+1]] +\n transition[k, l] +\n b[l, i+1]) for l in range(b.shape[0])]\n # take the log of the sum\n b[k, i] = reduce(np.logaddexp, sub_sums)\n\n p_x = reduce(np.logaddexp, transition[0,:] + b[:,0] + emission[:,X[0]])\n return p_x, b\n\n\ndef update_transition_matrix(p_x, f, b, X, A, transition, emission):\n \"\"\"\n\n :param p_x:\n :param f:\n :param b:\n :param X:\n :param A:\n :param transition:\n :param emission:\n :return:\n \"\"\"\n for k in range(1,A.shape[0]-1):\n for l in range(A.shape[1]):\n A[k,l] += np.exp(reduce(np.logaddexp,\n [f[k,i] + transition[k,l] + emission[l,X[i+1]] + b[l,i+1] for i in range(len(X)-1)]) - p_x)\n # add pseudocount to indicate the transition from BG2 to end state\n #A[-2,-1] += 1.0\n return A\n\n\ndef update_emission_matrix(p_x, f, b, X, E):\n \"\"\"\n\n :param p_x:\n :param f:\n :param b:\n :param X:\n :param E:\n :return:\n \"\"\"\n for s in range(4):\n for k in range(1, E.shape[0]):\n E[k,s] += np.exp(reduce(np.logaddexp, [(f[k,i] + b[k, i]) for i in np.argwhere(np.array(X) == s)[:,0]]) - p_x)\n return E\n","sub_path":"algorithms3.py","file_name":"algorithms3.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"387514698","text":"from oandapyV20 import API\nimport oandapyV20.endpoints.instruments as instruments\nimport oandapyV20.endpoints.pricing as pricing\nimport pandas as pd\nimport os\n\naccountID = os.environ[\"oanda_accountID\"]\naccess_token = os.environ[\"oanda_access_token\"]\napi = API(access_token=access_token)\n\n\ndef now():\n\n ps = pricing.PricingStream(accountID=accountID, params={\"instruments\": \"USD_JPY\"})\n\n for R in api.request(ps):\n if \"bids\" in R.keys():\n return R[\"bids\"][0][\"price\"]\n ps.terminate(\"maxrecs records received\")\n\n\ndef candles():\n\n params = {\n \"alignmentTimezone\": \"Japan\",\n \"count\": 25,\n \"granularity\": \"D\"\n }\n\n r = instruments.InstrumentsCandles(instrument=\"USD_JPY\", params=params)\n api.request(r)\n\n data = []\n for raw in r.response['candles']:\n data.append([raw['time'], raw['volume'], raw['mid']['o'], raw['mid']['h'], raw['mid']['l'],\n raw['mid']['c']])\n\n # リストからデータフレームへ変換\n df = pd.DataFrame(data)\n df.columns = ['time', 'volume', 'open', 'high', 'low', 'close']\n df = df.set_index('time')\n\n # date型を綺麗にする\n df.index = pd.to_datetime(df.index)\n return df","sub_path":"Readfx.py","file_name":"Readfx.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"174516769","text":"\"\"\"CPU functionality.\"\"\"\n\nimport sys\n\nHLT = 0b00000001\nLDI = 0b10000010\nPRN = 0b01000111\nMUL = 0b10100010\nPUSH = 0b01000101\nPOP = 0b01000110\nCMP = 0b10100111\nJEQ = 0b01010101\nJNE = 0b01010110\nJMP = 0b01010100\nRET = 0b00010001\nCALL = 0b01010000\nFL = 6\nSC = 7\nclass CPU:\n \"\"\"Main CPU class.\"\"\"\n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n # flags for greater less and equal\n self.E = 0b00000001\n self.G = 0b00000010\n self.L = 0b00000100\n self.flags = {\"e\": False, \"g\": False, \"l\": False}\n #set array of 8 zeroes\n self.reg = [0] * 8\n #set array of 256 zeroes\n self.ram = [0] * 256\n #declaring special register for stack pointer\n self.reg[7] = len(self.ram) - 1\n self.reg[FL] = 0b00000000\n #program counter\n self.pc = 0\n #boolian for stopping the program and starting it.\n self.halted = False\n #Stack Counter\n self.sc = 0\n\n #return the element stored in the address of ram\n def ram_read(self, address):\n return self.ram[address]\n\n def ram_write(self, address, val):\n #overwrite the value at the index provided\n self.ram[address] = val\n\n def load(self, filename):\n \"\"\"Load a program into memory.\"\"\"\n #address counter\n address = 0\n #passes the filename into the open function to be evaluated\n with open(filename) as fp:\n #for each line in the page\n for line in fp:\n #split the comment by # into a list\n comment_split = line.split(\"#\")\n #removes any unnecisary spaces\n num = comment_split[0].strip()\n\n #if the number is a string\n if num == '': # ignore blanks\n continue\n #convert the value into an integer\n val = int(num, 2)\n self.ram_write(address, val)\n address += 1\n\n\n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"# XXX:\n #if the operation is ADD\n if op == \"ADD\":\n self.reg[reg_a] += self.reg[reg_b]\n #elif op == \"SUB\": etc\n else:\n raise Exception(\"Unsupported ALU operation\")\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n\n print(f\"TRACE: %02X | %02X %02X %02X |\" % (\n self.pc,\n #self.fl,\n #self.ie,\n self.ram_read(self.pc),\n self.ram_read(self.pc + 1),\n self.ram_read(self.pc + 2)\n ), end='')\n\n for i in range(8):\n print(\" %02X\" % self.reg[i], end='')\n\n\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n\n while not self.halted:\n\n #set variable to the value at the program counter in ram\n instruction_to_execute = self.ram_read(self.pc)\n\n #setting operands to the next two indexes in ram to the one being exicuted\n operand_a = self.ram_read(self.pc + 1)\n operand_b = self.ram_read(self.pc + 2)\n #pass the current instruction and the next two lines through execute_instruction\n self.execute_instruction(instruction_to_execute, operand_a, operand_b)\n #print(self.pc)\n ##TODO##\n ##JMP JNE JEQ\n def execute_instruction(self, instruction, operand_a, operand_b):\n #if the insctruction is HLT stop the program and increase the counter\n\n if instruction == HLT:\n self.halted = True\n self.pc += 1\n #if sets flags for each different possible outcome\n #FL bits: 00000LGE\n elif instruction == CMP:\n #print(f'cmp operands: {self.reg[operand_a], self.reg[operand_b]}')\n\n if self.reg[operand_a] == self.reg[operand_b]:\n self.flags[\"e\"] = True\n\n elif self.reg[operand_a] > self.reg[operand_b]:\n self.flags[\"g\"] = True\n\n else:\n self.flags[\"l\"] = True\n\n self.pc +=3\n\n #set pc to disired register\n elif instruction == JMP:\n\n self.pc = self.reg[operand_a]\n\n #set pc to desired register if the equal flag is set to true\n elif instruction == JEQ:\n #print(\"jeq hit\")\n if self.flags[\"e\"] == True:\n #print(\"JEQ is true\")\n self.pc = self.reg[operand_a]\n self.flags['e'] = False\n else:\n self.pc +=2\n\n #set pc to desired register if the equal flag is set to false\n elif instruction == JNE:\n if self.flags[\"e\"] == False:\n #print(\"JNE is True\")\n self.pc = self.reg[operand_a]\n else:\n self.pc +=2\n #if the instcruction is LDI set the value at self.reg[operand_a] equal to the second operand\n elif instruction == POP:\n\n val = self.ram[self.reg[SC] + 1]\n self.reg[SC] +=1\n self.reg[operand_a] = val\n self.pc +=2\n\n elif instruction == PUSH:\n self.ram[self.reg[SC]] = self.reg[operand_a]\n\n self.reg[SC] -= 1\n self.pc += 2\n\n elif instruction == LDI:\n self.reg[operand_a] = operand_b\n #print(self.reg[operand_a])\n self.pc += 3\n elif instruction == CALL:\n self.reg[operand_a]\n elif instruction == RET:\n pass\n #if the instrucstion is PRN then print the value in the provided register and move to the next command\n elif instruction == PRN:\n print(self.reg[operand_a])\n self.pc += 2\n #if the instruction is MUL set the value in the register[operand_a] equal to the prduct of the two registers provided\n elif instruction == MUL:\n print('mult hit')\n self.reg[operand_a] = self.reg[operand_a] * self.reg[operand_b]\n self.pc += 3\n","sub_path":"ls8/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":6030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"361353439","text":"# %load q04_find_top_10/build.py\n# default imports\nfrom greyatomlib.olympics_project_new.q03_better_event.build import q03_better_event,q02_country_operations, q01_rename_columns\npath = './data/olympics.csv'\nOlympicsDF=q01_rename_columns(path) \nOlympicsDF=q02_country_operations(OlympicsDF)\nOlympicsDF=q03_better_event(OlympicsDF) \n\ndef q04_find_top_10(olympic , summer_column , winter_column , total_column ):\n top_countries = []\n top_summer_countries = []\n top_winter_countries = []\n common_countries = []\n \n for x in list(olympic[total_column].loc[:144].nlargest(10).index.values):\n top_countries.append(olympic['Country_Name'][x])\n \n for x in list(olympic[summer_column].loc[:144].nlargest(10).index.values):\n top_summer_countries.append(olympic['Country_Name'][x])\n\n \n for x in list(olympic[winter_column].loc[:144].nlargest(10).index.values):\n top_winter_countries.append(olympic['Country_Name'][x])\n \n common_countries = list(set(top_winter_countries) & set(top_summer_countries) & set(top_countries))\n \n return top_summer_countries, top_winter_countries ,top_countries ,common_countries\n\n\n\n\n","sub_path":"q04_find_top_10/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"90939381","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport logging\n\nimport numpy as np\nfrom utils.dsdb import dasquery\n\n\ndef get_name(str):\n str = str.split(\"_dipoleRecoil\")[0].split(\"_Tune\")[0].split(\"_amcatnlo\")[0].split(\"_13TeV\")[0]\n str = str.replace(\"/\", \"\")\n return str\n\n\ndef has_expr(dataset, expression=\"ext\"):\n name = get_das_name(dataset)\n return expression in name\n\n\ndef get_dataset(dataset):\n ds = dataset[\"dataset\"]\n if len(ds) == 1:\n return ds[0]\n else:\n raise NotImplementedError\n\n\ndef get_das_name(dataset):\n ds = get_dataset(dataset)\n return ds[\"name\"]\n\n\ndef get_creation_date(dataset):\n if dataset is None:\n return -1\n else:\n ds = get_dataset(dataset)\n return ds[\"creation_date\"]\n\n\ndef get_gensim(dataset, identifier=\"GEN-SIM\"):\n name = get_das_name(dataset)\n while identifier not in name:\n query = dasquery(\"parent dataset=%s\" % name)\n if len(query) > 0:\n dataset = query[0][\"parent\"][0] # % FIXME! - dataset can have more than one parent\n name = dataset[\"name\"]\n else:\n return None\n return dataset\n\n\ndef dataset_has_name(dataset, name=\"new_pmx\", match_case=True):\n das_name = get_das_name(dataset)\n if not match_case:\n name, das_name = name.lower(), das_name.lower()\n return name in das_name\n\n\ndef get_datasets_by_name(datasets, name=\"new_pmx\", parent=\"\", match_case=True):\n if len(parent) > 0:\n out = []\n for dataset in datasets:\n parent_dataset = get_gensim(dataset, parent)\n if parent_dataset is None:\n logging.info(f\"dropping {get_das_name(dataset)} because it has no {parent}\")\n continue\n a, b = name, parent_dataset[\"name\"]\n if not match_case:\n a, b = a.lower(), b.lower()\n if a in b:\n out.append(dataset)\n else:\n out = [dataset for dataset in datasets if dataset_has_name(dataset, name, match_case)]\n return out\n\n\ndef get_version(dataset, regex=\"v(\\d+)/\"):\n if not isinstance(dataset, str):\n dataset = get_das_name(dataset)\n\n version = re.search(regex, dataset)\n if version:\n return int(version.group(1))\n else:\n return 0\n\n\ndef sort_datasets_by_expressions(datasets, expressions):\n idx = np.argsort(-np.array(expressions).astype(np.int))\n return list(np.array(datasets)[idx])\n\n\ndef get_highest_version(datasets):\n sorted_datasets = sort_by_version(datasets, regex=\".+ver(\\d+).+\")\n return sorted_datasets[0]\n\n\ndef sort_by_version(datasets, regex=\"v(\\d+)/\"):\n versions = []\n for dataset in datasets:\n versions.append(get_version(dataset, regex=regex))\n return sort_datasets_by_expressions(datasets, versions)\n\n\ndef sort_by_ps_weights(datasets):\n ps_weights = []\n for dataset in datasets:\n ps_weights.append(has_expr(dataset, expression=\"PSWeights\"))\n return sort_datasets_by_expressions(datasets, ps_weights)\n\n\ndef sort_by_ext(datasets):\n ext = []\n for dataset in datasets:\n ext.append(has_expr(dataset, expression=\"ext\"))\n return sort_datasets_by_expressions(datasets, ext)\n\n\ndef sort_by_new_pmx(datasets):\n new_pmx = []\n for dataset in datasets:\n new_pmx.append(has_expr(dataset, expression=\"new_pmx\"))\n return sort_datasets_by_expressions(datasets, new_pmx)\n\n\ndef get_latest(datasets):\n out = []\n\n datasets = sort_by_ps_weights(datasets)\n datasets = sort_by_ext(datasets)\n datasets = sort_by_version(datasets)\n datasets = sort_by_new_pmx(datasets)\n\n gen_sim = [get_gensim(dataset) for dataset in datasets]\n\n if any(_ is None for _ in gen_sim):\n for dataset in datasets:\n dataset[\"dataset\"][0][\"name\"] += \"_nogen\"\n out.append(dataset)\n else:\n creation_dates = []\n for _ in gen_sim:\n _dataset = dasquery(_[\"name\"])[0]\n creation_dates.append(get_creation_date(_dataset))\n\n sorted_idx = np.argsort(-np.array(creation_dates).astype(np.int))\n burned = set([])\n for idx, creation_date in zip(sorted_idx, creation_dates):\n if creation_date == -1 or creation_date in burned:\n continue\n out.append(datasets[idx])\n burned.add(creation_date)\n return out\n\n\ndef clear_datasets(datasets):\n out = []\n keywords = [\n \"percentMaterial\",\n \"FlatPU\",\n \"BSandPUSummer16\",\n \"pilot\",\n \"ForMUOVal\",\n \"MUOTrackFix\",\n \"PU2017RECOPF\",\n \"PU2017RECOSIM\",\n ]\n for dataset in datasets:\n name = get_das_name(dataset)\n if any(keyword in name for keyword in keywords):\n continue\n out.append(dataset)\n return out\n\n\ndef same_dataset_different_version(datasets):\n unified_das_names = []\n for dataset in datasets:\n das_name = get_das_name(dataset)\n unified_das_name = re.sub(\"v(\\d+)/\", \"\", das_name)\n unified_das_names.append(unified_das_name)\n return len(set(unified_das_names)) == 1\n\n\ndef clear_pu(datasets):\n out = []\n for dataset in datasets:\n if (\n len(get_datasets_by_name([dataset], \"new_pmx\")) > 0\n or len(get_datasets_by_name([dataset], \"new_pmx\", parent=\"/MINIAODSIM\")) > 0\n or len(get_datasets_by_name([dataset], \"PU2017\", parent=\"/MINIAODSIM\")) > 0\n or len(get_datasets_by_name([dataset], \"PU2017\", parent=\"/AODSIM\")) > 0\n ):\n out.append(dataset)\n else:\n logging.info(f\"pu cleaning dropped:, {get_das_name(dataset)}\")\n return out\n\n\ndef get_datasets(das_name, year):\n datasets_out = []\n datasets = dasquery(das_name)\n datasets = clear_datasets(datasets)\n if len(datasets) == 0:\n logging.info(f\"No datasets avalable for: {das_name}\")\n elif len(datasets) == 1:\n datasets_out = [datasets[0]]\n else:\n logging.info(\"== Make decision to choose out of:\")\n logging.info(\"\\n\".join([get_das_name(ds) for ds in datasets]))\n # if \"TuneCP5_PSweights\" in das_name:\n # from IPython import embed\n #\n # embed()\n # has_psweights = get_datasets_by_name(datasets, \"PSWeights\", match_case=False)\n # if len(has_psweights) > 0:\n # datasets = has_psweights\n\n datasets_out = get_latest(datasets)\n if same_dataset_different_version(datasets_out):\n datasets_out = datasets_out[:1]\n else:\n logging.info(\"NONTRIVIAL\")\n logging.info(\"== chosen:\")\n logging.info(\"\\n\".join([get_das_name(ds) for ds in datasets_out]))\n dataset = {\"keys\": [get_das_name(ds) for ds in datasets_out]}\n return dataset\n\n\ndef split_das_name(das_name):\n values = das_name.split(\"/\")\n if len(values) == 4:\n _, name, tag, sim = values\n else:\n print(das_name)\n print(values)\n raise NotImplementedError\n return name, tag, sim\n\n\ndef merge_das_name(*args):\n return \"/\".join([\"\", *args])\n\n\ndef get_20xx(name, year, ppd, sim=\"nano\"):\n tag = ppd[year][sim][\"name\"] + \"*\" + ppd[year][sim][\"gt\"] + \"*\"\n das_name = merge_das_name(name, tag, sim.upper() + \"AODSIM\")\n datasets = get_datasets(das_name, year)\n return datasets\n\n\ndef change_campaign(datasets, year, ppd, sim=\"nano\"):\n names = []\n xs = {}\n changed_datasets = []\n for dataset in datasets:\n name, tag, _ = split_das_name(dataset[\"keys\"][0])\n if name not in names:\n names.append(name)\n xs[name] = dataset[\"xs\"]\n for name in names:\n print(name)\n dataset = get_20xx(name, year, ppd, sim)\n dataset[\"xs\"] = xs[name]\n changed_datasets.append(dataset)\n\n return changed_datasets\n\n\ndef change_campaign_data(datasets, year, ppd, sim=\"nano\"):\n changed_datasets = []\n for dataset in datasets:\n name, tag, _ = split_das_name(dataset[\"keys\"][0])\n run = tag.split(\"-\")[0]\n das_name = merge_das_name(\n name, \"-\".join([run + \"*\", ppd[year][sim][\"name\"] + \"*\"]), sim.upper() + \"AOD\"\n )\n if all(_ in das_name for _ in [\"MINIAOD\", \"Run2018D\"]):\n if any(_ in das_name for _ in [\"EGamma\", \"SingleMuon\"]):\n das_name = merge_das_name(\n name, \"-\".join([run + \"*\", \"22Jan2019\" + \"*\"]), sim.upper() + \"AOD\"\n )\n else:\n das_name = merge_das_name(\n name, \"-\".join([run + \"*\", \"Prompt\" + \"*\"]), sim.upper() + \"AOD\"\n )\n _ = dasquery(das_name)\n _dataset = get_highest_version(_)\n if _dataset is None:\n raise NotImplementedError\n dataset[\"keys\"] = [get_das_name(_dataset)]\n changed_datasets.append(dataset)\n return changed_datasets\n\n\nsimple_channels = {\n \"SingleElectron\": [\"e\"],\n \"EGamma\": [\"e\", \"ee\"],\n \"SingleMuon\": [\"mu\"],\n \"DoubleEG\": [\"ee\"],\n \"DoubleMuon\": [\"mumu\"],\n \"MuonEG\": [\"emu\"],\n}\n\n\ndef to_dict(dataset, is_data=False):\n aux = {}\n xs = None\n keys = dataset[\"keys\"]\n if \"xs\" in dataset:\n xs = dataset[\"xs\"]\n if \"aux\" in dataset:\n aux = dataset[\"aux\"]\n\n if len(keys) == 0:\n return {\"name\": \"NON_EXISTENT\"}\n das_name = keys[0]\n if dataset[\"is_data\"]:\n physics_name, run, format = das_name.split(\"/\")[1:]\n channels = simple_channels[physics_name]\n run_name = re.search(\"Run\\d+(\\w)[-_]\", run).group(1)\n aux.update({\"run\": run_name, \"channels\": channels})\n name = \"_\".join([\"data\", run_name, \"+\".join(channels)])\n else:\n name = get_name(das_name)\n return {\n \"name\": name,\n \"is_data\": dataset[\"is_data\"],\n \"aux\": aux,\n \"keys\": keys,\n \"misc\": {\"xs\": xs},\n }\n\n\ndef dictify_datasets(datasets):\n dictified = []\n for dataset in datasets:\n dictified.append(to_dict(dataset))\n return dictified\n","sub_path":"boostedhiggs/utils/das.py","file_name":"das.py","file_ext":"py","file_size_in_byte":9920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"110331217","text":"\"\"\"\nGiven an integer array nums, return true if any value appears at least twice in the array, and return false if every element is distinct.\n\"\"\"\n\n\ndef contains_duplicate(nums):\n seen = set()\n for num in nums:\n if num in seen:\n return True\n seen.add(num)\n\n return False\n\n\nif __name__ == \"__main__\":\n nums = [1, 2, 3, 1]\n print(contains_duplicate(nums))\n","sub_path":"fast-track/arrays/3_contains_duplicate.py","file_name":"3_contains_duplicate.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"491106248","text":"##===---------- test_sycl_queue_manager.py - dpctl -------*- Python -*----===##\n##\n## Data Parallel Control (dpCtl)\n##\n## Copyright 2020 Intel Corporation\n##\n## Licensed under the Apache License, Version 2.0 (the \"License\");\n## you may not use this file except in compliance with the License.\n## You may obtain a copy of the License at\n##\n## http://www.apache.org/licenses/LICENSE-2.0\n##\n## Unless required by applicable law or agreed to in writing, software\n## distributed under the License is distributed on an \"AS IS\" BASIS,\n## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n## See the License for the specific language governing permissions and\n## limitations under the License.\n##\n##===----------------------------------------------------------------------===##\n###\n### \\file\n### Defines unit test cases for the SyclQueueManager class in sycl_core.pyx.\n##===----------------------------------------------------------------------===##\n\nimport dpctl\nimport unittest\n\n\nclass TestGetNumPlatforms(unittest.TestCase):\n @unittest.skipIf(not dpctl.has_sycl_platforms(), \"No SYCL platforms available\")\n def test_dpctl_get_num_platforms(self):\n if dpctl.has_sycl_platforms:\n self.assertGreaterEqual(dpctl.get_num_platforms(), 1)\n\n\n@unittest.skipIf(not dpctl.has_sycl_platforms(), \"No SYCL platforms available\")\nclass TestDumpMethods(unittest.TestCase):\n def test_dpctl_dump(self):\n try:\n dpctl.dump()\n except Exception:\n self.fail(\"Encountered an exception inside dump().\")\n\n def test_dpctl_dump_device_info(self):\n q = dpctl.get_current_queue()\n try:\n q.get_sycl_device().dump_device_info()\n except Exception:\n self.fail(\"Encountered an exception inside dump_device_info().\")\n\n\n@unittest.skipIf(not dpctl.has_sycl_platforms(), \"No SYCL platforms available\")\nclass TestIsInDeviceContext(unittest.TestCase):\n def test_is_in_device_context_outside_device_ctxt(self):\n self.assertFalse(dpctl.is_in_device_context())\n\n @unittest.skipUnless(dpctl.has_gpu_queues(), \"No OpenCL GPU queues available\")\n def test_is_in_device_context_inside_device_ctxt(self):\n with dpctl.device_context(\"opencl:gpu:0\"):\n self.assertTrue(dpctl.is_in_device_context())\n\n @unittest.skipUnless(dpctl.has_gpu_queues(), \"No OpenCL GPU queues available\")\n @unittest.skipUnless(dpctl.has_cpu_queues(), \"No OpenCL CPU queues available\")\n def test_is_in_device_context_inside_nested_device_ctxt(self):\n with dpctl.device_context(\"opencl:cpu:0\"):\n with dpctl.device_context(\"opencl:gpu:0\"):\n self.assertTrue(dpctl.is_in_device_context())\n self.assertTrue(dpctl.is_in_device_context())\n self.assertFalse(dpctl.is_in_device_context())\n\n\n@unittest.skipIf(not dpctl.has_sycl_platforms(), \"No SYCL platforms available\")\nclass TestGetCurrentDevice(unittest.TestCase):\n def test_get_current_device_type_outside_device_ctxt(self):\n self.assertNotEqual(dpctl.get_current_device_type(), None)\n\n def test_get_current_device_type_inside_device_ctxt(self):\n self.assertNotEqual(dpctl.get_current_device_type(), None)\n\n with dpctl.device_context(\"opencl:gpu:0\"):\n self.assertEqual(dpctl.get_current_device_type(), dpctl.device_type.gpu)\n\n self.assertNotEqual(dpctl.get_current_device_type(), None)\n\n @unittest.skipUnless(dpctl.has_cpu_queues(), \"No OpenCL CPU queues available\")\n def test_get_current_device_type_inside_nested_device_ctxt(self):\n self.assertNotEqual(dpctl.get_current_device_type(), None)\n\n with dpctl.device_context(\"opencl:cpu:0\"):\n self.assertEqual(dpctl.get_current_device_type(), dpctl.device_type.cpu)\n\n with dpctl.device_context(\"opencl:gpu:0\"):\n self.assertEqual(dpctl.get_current_device_type(), dpctl.device_type.gpu)\n self.assertEqual(dpctl.get_current_device_type(), dpctl.device_type.cpu)\n\n self.assertNotEqual(dpctl.get_current_device_type(), None)\n\n\n@unittest.skipIf(not dpctl.has_sycl_platforms(), \"No SYCL platforms available\")\nclass TestGetCurrentQueueInMultipleThreads(unittest.TestCase):\n def test_num_current_queues_outside_with_clause(self):\n self.assertEqual(dpctl.get_num_activated_queues(), 0)\n\n @unittest.skipUnless(dpctl.has_gpu_queues(), \"No OpenCL GPU queues available\")\n @unittest.skipUnless(dpctl.has_cpu_queues(), \"No OpenCL CPU queues available\")\n def test_num_current_queues_inside_with_clause(self):\n with dpctl.device_context(\"opencl:cpu:0\"):\n self.assertEqual(dpctl.get_num_activated_queues(), 1)\n with dpctl.device_context(\"opencl:gpu:0\"):\n self.assertEqual(dpctl.get_num_activated_queues(), 2)\n self.assertEqual(dpctl.get_num_activated_queues(), 0)\n\n @unittest.skipUnless(dpctl.has_gpu_queues(), \"No OpenCL GPU queues available\")\n @unittest.skipUnless(dpctl.has_cpu_queues(), \"No OpenCL CPU queues available\")\n def test_num_current_queues_inside_threads(self):\n from threading import Thread\n\n def SessionThread(self):\n self.assertEqual(dpctl.get_num_activated_queues(), 0)\n with dpctl.device_context(\"opencl:gpu:0\"):\n self.assertEqual(dpctl.get_num_activated_queues(), 1)\n\n Session1 = Thread(target=SessionThread(self))\n Session2 = Thread(target=SessionThread(self))\n with dpctl.device_context(\"opencl:cpu:0\"):\n self.assertEqual(dpctl.get_num_activated_queues(), 1)\n Session1.start()\n Session2.start()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"dpctl/tests/test_sycl_queue_manager.py","file_name":"test_sycl_queue_manager.py","file_ext":"py","file_size_in_byte":5709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"455022420","text":"from custom_func.crypt_func import *\r\nfrom custom_func.sign_func import *\r\n\r\n# 加密算法配置\r\nencrypt = [{'name': 'AES(ECB)_TO_string', 'function': aes_encode_to_string, 'mode': 'ECB',\r\n 'comment': 'AES加密并编码成字符串, ECB模式'},\r\n {'name': 'AES(CBC)_TO_string', 'function': aes_encode_to_string, 'mode': 'CBC',\r\n 'comment': 'AES加密并编码成字符串, CBC模式'},\r\n {'name': 'AES(ECB)_TO_base64', 'function': aes_encode_to_b64, 'mode': 'ECB',\r\n 'comment': 'AES加密并进行base64编码, ECB模式'},\r\n {'name': 'AES(CBC)_TO_base64', 'function': aes_encode_to_b64, 'mode': 'CBC',\r\n 'comment': 'AES加密并进行base64编码, CBC模式'},\r\n {'name': 'AES(ECB)_TO_base64_URLEncode', 'function': aes_encode_to_b64_url_encode,\r\n 'mode': 'ECB', 'comment': 'AES加密并进行base64编码后再URLEncode, ECB模式'},\r\n {'name': 'AES(CBC)_TO_base64_URLEncode', 'function': aes_encode_to_b64_url_encode,\r\n 'mode': 'CBC', 'comment': 'AES加密并进行base64编码后再URLEncode, CBC模式'},\r\n {'name': 'DES(ECB)_TO_base64', 'function': des_encode_to_b64, 'mode': 'ECB',\r\n 'comment': 'DES加密并进行base64编码, ECB模式'},\r\n {'name': 'DES(CBC)_TO_base64', 'function': des_encode_to_b64, 'mode': 'CBC',\r\n 'comment': 'DES加密并进行base64编码, CBC模式'},\r\n {'name': 'DES(ECB)_TO_base64_URLEncode', 'function': des_encode_to_b64_url_encode,\r\n 'mode': 'ECB', 'comment': 'DES加密并进行base64编码后再URLEncode, ECB模式'},\r\n {'name': 'DES(CBC)_TO_base64_URLEncode', 'function': des_encode_to_b64_url_encode,\r\n 'mode': 'CBC', 'comment': 'DES加密并进行base64编码后再URLEncode, CBC模式'},\r\n {'name': 'DES3(ECB)_TO_base64', 'function': des3_encode_to_b64, 'mode': 'ECB',\r\n 'comment': 'DES3加密并进行base64编码, ECB模式'},\r\n {'name': 'DES3(CBC)_TO_base64', 'function': des3_encode_to_b64, 'mode': 'CBC',\r\n 'comment': 'DES3加密并进行base64编码, CBC模式'},\r\n {'name': 'DES3(ECB)_TO_base64_URLEncode', 'function': des3_encode_to_b64_url_encode,\r\n 'mode': 'ECB', 'comment': 'DES3加密并进行base64编码后再URLEncode, ECB模式'},\r\n {'name': 'DES3(CBC)_TO_base64_URLEncode', 'function': des3_encode_to_b64_url_encode,\r\n 'mode': 'CBC', 'comment': 'DES3加密并进行base64编码后再URLEncode, CBC模式'},\r\n {'name': 'RSA_TO_base64', 'function': rsa_encode_to_b64,\r\n 'mode': '200', 'comment': 'RSA加密并进行base64编码'}]\r\n\r\n# 解密算法配置\r\ndecrypt = [{'name': 'AES(ECB)_FROM_string', 'function': aes_decode_from_string, 'mode': 'ECB',\r\n 'comment': 'AES加密并编码成字符串对应解密方法, ECB模式'},\r\n {'name': 'AES(CBC)_FROM_string', 'function': aes_decode_from_string, 'mode': 'CBC',\r\n 'comment': 'AES加密并编码成字符串对应解密方法, CBC模式'},\r\n {'name': 'AES(ECB)_FROM_base64', 'function': aes_decode_from_b64, 'mode': 'ECB',\r\n 'comment': 'AES加密并进行base64编码对应解密方法, ECB模式'},\r\n {'name': 'AES(CBC)_FROM_base64', 'function': aes_decode_from_b64, 'mode': 'CBC',\r\n 'comment': 'AES加密并进行base64编码对应解密方法, CBC模式'},\r\n {'name': 'AES(ECB)_FROM_base64_URLEncode', 'function': aes_decode_from_b64_url_encode,\r\n 'mode': 'ECB', 'comment': 'AES加密并进行base64编码后再URLEncode对应解密方法, ECB模式'},\r\n {'name': 'AES(CBC)_FROM_base64_URLEncode', 'function': aes_decode_from_b64_url_encode,\r\n 'mode': 'CBC', 'comment': 'AES加密并进行base64编码后再URLEncode对应解密方法, CBC模式'},\r\n {'name': 'DES(ECB)_FROM_base64', 'function': des_decode_from_b64, 'mode': 'ECB',\r\n 'comment': 'DES加密并进行base64编码对应解密方法, ECB模式'},\r\n {'name': 'DES(CBC)_FROM_base64', 'function': des_decode_from_b64, 'mode': 'CBC',\r\n 'comment': 'DES加密并进行base64编码对应解密方法, CBC模式'},\r\n {'name': 'DES(ECB)_FROM_base64_URLEncode', 'function': des_decode_from_b64_url_encode,\r\n 'mode': 'ECB', 'comment': 'DES加密并进行base64编码后再URLEncode对应解密方法, ECB模式'},\r\n {'name': 'DES(CBC)_FROM_base64_URLEncode', 'function': des_decode_from_b64_url_encode,\r\n 'mode': 'CBC', 'comment': 'DES加密并进行base64编码后再URLEncode对应解密方法, CBC模式'},\r\n {'name': 'DES3(ECB)_FROM_base64', 'function': des3_decode_from_b64, 'mode': 'ECB',\r\n 'comment': 'DES3加密并进行base64编码对应解密方法, ECB模式'},\r\n {'name': 'DES3(CBC)_FROM_base64', 'function': des3_decode_from_b64, 'mode': 'CBC',\r\n 'comment': 'DES3加密并进行base64编码对应解密方法, CBC模式'},\r\n {'name': 'DES3(ECB)_FROM_base64_URLEncode', 'function': des3_decode_from_b64_url_encode,\r\n 'mode': 'ECB', 'comment': 'DES3加密并进行base64编码后再URLEncode对应解密方法, ECB模式'},\r\n {'name': 'DES3(CBC)_FROM_base64_URLEncode', 'function': des3_decode_from_b64_url_encode,\r\n 'mode': 'CBC', 'comment': 'DES3加密并进行base64编码后再URLEncode对应解密方法, CBC模式'},\r\n {'name': 'RSA_FROM_base64', 'function': rsa_decode_from_b64,\r\n 'mode': '256', 'comment': 'RSA加密并进行base64编码对应解密方法'}]\r\n\r\n# 自定义方法配置\r\ncustoms_func = [\r\n {'name': 'public_md5_sign_one', 'function': public_md5_sign_one, 'comment': '通用加签方式一, 格式: p1v1p2p2p3v3signKey'},\r\n {'name': 'public_md5_sign_two', 'function': public_md5_sign_two, 'comment': '通用加签方式二, 格式: signKeyp1v1p2p2p3v3'},\r\n {'name': 'public_md5_sign_three', 'function': public_md5_sign_three, 'comment': '通用加签方式三, 格式: signKeyp1v1p2p2p3v3signKey'}\r\n ]\r\n","sub_path":"config/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":6177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"219408872","text":"import pytest\nimport multiprocessing\nimport subprocess\nimport time\nfrom random import randint\n\nfrom pade.core import new_ams\nfrom pade.core.sniffer import Sniffer\nfrom pade.misc.utility import start_loop\n\n\nclass start_loop_test:\n \"\"\"\n Start and stops reactor thread for agents under test\n \"\"\"\n\n def __init__(self, agents):\n self.agents = agents\n\n def __enter__(self):\n self.p = multiprocessing.Process(\n target=start_loop, args=(self.agents,))\n self.p.start()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.p.terminate()\n\n\n@pytest.fixture(scope='session')\ndef start_runtime():\n \"\"\"\n Starts AMS (no sniffer) and returns its access poitnt\n \"\"\"\n\n processes = []\n ams_dict = {'name': 'localhost', 'port': randint(9000, 60000)}\n\n # Start AMS in a subprocess\n commands = ['python', new_ams.__file__, 'pade_user',\n 'email@', '12345', str(ams_dict['port'])]\n p = subprocess.Popen(commands, stdin=subprocess.PIPE)\n processes.append(p)\n\n # Delay before tests to start AMS\n time.sleep(5.0)\n\n # Start tests\n yield ams_dict\n\n # Terminate runtime\n for p in processes:\n p.terminate()\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"483485726","text":"class Node(object):\n def __init__(self, next=None, val=None):\n self.next = next\n self.val = val\n\n\nhead = Node(None, 0)\ncurr = head\nfor i in range(1, 10):\n curr.next = Node(val=i)\n curr = curr.next\n # print(curr.val)\n\n\ndef p(h: Node):\n cur = h\n while cur is not None:\n print(cur.val, ' => ', end='')\n cur = cur.next\n print()\n\n\np(head)\n\n\ndef reverse(h: Node):\n if not h:\n return\n pre = h\n cur = h.next\n pre.next = None\n while cur:\n next = cur.next\n cur.next = pre\n pre = cur\n cur = next\n return pre\n\n\n# new_head = reverse(head)\n# p(new_head)\n# p(head)\n\ndef recursive(h: Node):\n if h is None or h.next is None:\n return h\n else:\n next = h.next\n h.next = None\n newhead = recursive(next)\n next.next = h\n return newhead\n\n\nnew_head = recursive(head)\np(new_head)\n","sub_path":"misc/reverseLinkedList.py","file_name":"reverseLinkedList.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"247389097","text":"import os, io\nimport json\nimport argparse\nfrom collections import OrderedDict\n#import torch\nfrom fastext import FastVector \nimport re\nfrom utils import bool_flag, initialize_exp\nfrom utils import load_external_embeddings\nfrom models import build_model\nfrom trainer import Trainer\nfrom word_translation import DIC_EVAL_PATH, load_identical_char_dico, load_dictionary\nfrom gensim.models import KeyedVectors\nimport numpy as np\nimport itertools\nimport nltk\nVALIDATION_METRIC = 'mean_cosine-csls_knn_10-S2T-10000'\nimport sys \n\n\n# main\nparser = argparse.ArgumentParser(description='Supervised training')\nparser.add_argument(\"--seed\", type=int, default=-1, help=\"Initialization seed\")\nparser.add_argument(\"--verbose\", type=int, default=2, help=\"Verbose level (2:debug, 1:info, 0:warning)\")\nparser.add_argument(\"--exp_path\", type=str, default=\"\", help=\"Where to store experiment logs and models\")\nparser.add_argument(\"--cuda\", type=bool_flag, default=True, help=\"Run on GPU\")\nparser.add_argument(\"--export\", type=bool_flag, default=True, help=\"Export embeddings after training\")\nparser.add_argument(\"--phrase\", type=bool_flag, default=False, help=\"Respect Phrases\")\n# data\nparser.add_argument(\"--src_lang\", type=str, default='en', help=\"Source language\")\nparser.add_argument(\"--tgt_lang\", type=str, default='es', help=\"Target language\")\nparser.add_argument(\"--emb_dim\", type=int, default=300, help=\"Embedding dimension\")\nparser.add_argument(\"--max_vocab\", type=int, default=200000, help=\"Maximum vocabulary size\")\n# training refinement\nparser.add_argument(\"--n_iters\", type=int, default=50000, help=\"Number of iterations\")\n# dictionary creation parameters (for refinement)\nparser.add_argument(\"--dico_train\", type=str, default=\"default\", help=\"Path to training dictionary (default: use identical character strings)\")\nparser.add_argument(\"--dico_method\", type=str, default='csls_knn_10', help=\"Method used for dictionary generation (nn/invsm_beta_30/csls_knn_10)\")\nparser.add_argument(\"--dico_build\", type=str, default='S2T&T2S', help=\"S2T,T2S,S2T|T2S,S2T&T2S\")\nparser.add_argument(\"--dico_threshold\", type=float, default=0, help=\"Threshold confidence for dictionary generation\")\nparser.add_argument(\"--dico_max_rank\", type=int, default=10000, help=\"Maximum dictionary words rank (0 to disable)\")\nparser.add_argument(\"--dico_min_size\", type=int, default=0, help=\"Minimum generated dictionary size (0 to disable)\")\nparser.add_argument(\"--dico_max_size\", type=int, default=0, help=\"Maximum generated dictionary size (0 to disable)\")\n# reload pre-trained embeddings\nparser.add_argument(\"--src_emb\", type=str, default='', help=\"Reload source embeddings\")\nparser.add_argument(\"--tgt_emb\", type=str, default='', help=\"Reload target embeddings\")\nparser.add_argument(\"--query\", type=str, default='', help=\"Reloud source query file\")\nparser.add_argument(\"--query_morph\", type=str, default='', help=\"Reloud source query file\")\nparser.add_argument(\"--model\", type=str, default='', help=\"[multi,fastext]\")\nparser.add_argument(\"--src_align\", type=str, default='', help=\"Reload source alignments\")\nparser.add_argument(\"--tgt_align\", type=str, default='', help=\"Reload target alignments\")\nparser.add_argument(\"--rank\", type=str, default='', help=\"top-rank translations\")\n\nparser.add_argument(\"--normalize_embeddings\", type=str, default=\"\", help=\"Normalize embeddings before training\")\n\n\n# parse parameters\nparams = parser.parse_args()\n\n# check parameters\n#assert not params.cuda or torch.cuda.is_available()\nassert params.dico_train in [\"identical_char\", \"default\"] or os.path.isfile(params.dico_train)\nassert params.dico_build in [\"S2T\", \"T2S\", \"S2T|T2S\", \"S2T&T2S\"]\nassert params.dico_max_size == 0 or params.dico_max_size < params.dico_max_rank\nassert params.dico_max_size == 0 or params.dico_max_size > params.dico_min_size\nprint('generating Indri query format ..')\n\nen2sw = {}\nfrom nltk import PorterStemmer \nstemmer = PorterStemmer()\n\nq2text={}\nd2text={}\nd2morph={}\nq2docs={}\nq2docs2score={}\nq2morph = {}\nwith open(params.dico_train) as f:\n for line in f:\n word, trans = line.rstrip().split(' ')\n if(word in en2sw):\n en2sw[word].append(trans)\n else:\n en2sw[word] = [trans]\n\ndef get_en2sw(en):\n if en in en2sw:\n return en2sw[en]\n else:\n return [en]\n\ndef parse_morph_file(filename):\n with open(filename) as fin:\n i = 0\n for line in fin:\n query_id, line = line.split()\n line = line[2:-2]\n #dicts = [json.loads(x) for x in re.split(r'(\\{.*?\\})', line) if len(x)>1]\n print(line)\n dictionary = json.loads(line)\n dictionary = (get_en2sw(dictionary['word']), str(dictionary['number']), str(dictionary['tense']))\n print(dictionary)\n q2morph[query_id] = dictionary\n i += 1\n return q2morph \n\ndef parse_doc_morph_file(filename):\n with open(filename) as fin:\n i = 0\n morph_info = []\n for line in fin:\n line.strip()\n line = line[2:-3]\n dicts = [json.loads(x) for x in re.split(r'(\\{.*?\\})', line) if len(x)>1]\n dicts = [(dictionary['word'], str(dictionary['number']), str(dictionary['tense'])) for dictionary in dicts]\n morph_info.extend(dicts)\n return morph_info\n\nif __name__ == '__main__':\n q2morph = parse_morph_file(params.query_morph)\n with open('result/result.file') as f:\n for line in f.readlines():\n tokens = line.split(' ')\n qid = tokens[0]\n did = tokens[2]\n score = tokens[4]\n if qid in q2docs:\n q2docs[qid].append(did)\n q2docs2score[qid].append((did,float(score)))\n else:\n q2docs[qid] = [did]\n q2docs2score[qid] = [(did,float(score))]\n\n for filename in os.listdir('docs.morph/'):\n if filename.endswith(\".txt\"): \n #with open(os.path.join('docs.morph/', filename)) as f:\n #data = f.read().replace('\\r\\n',' ').replace('\\n',' ').replace('\\t',' ')\n #m = re.search('MATERIAL_BASE(.+?)\\.', filename)\n #if m:\n # filename = 'MATERIAL_BASE'+m.group(1)\n docid = filename[:-4]\n filename = os.path.join('docs.morph', filename)\n d2morph[docid]=parse_doc_morph_file(filename)#nltk.pos_tag(nltk.word_tokenize(data.encode('utf-8')))\n try:\n os.remove(\"result/result.morphology\")\n except:\n pass\n\n\n for q in q2docs2score:\n doc_list = []\n for did, score in q2docs2score[q]:\n flag = False\n words, number, tense = q2morph[q]\n for word in words:\n #print(word)\n if word == 'viazi':\n print(word, number, tense)\n if (word, number, tense) in d2morph[did]:\n if (did, score) not in doc_list:\n flag = True\n if flag:\n doc_list.append((did,score)) \n sorted_doc_list = sorted(doc_list,key=lambda tup: -tup[1]) \n with open(\"result/result.morphology\", 'a') as f:\n rank = 1\n for doc,score in sorted_doc_list:\n f.write(q+\" Q0 \"+ doc + \" \"+str(rank)+\" \" + str(score)+ \" indri\\n\")\n rank +=1\n\n print('done')\n \"\"\"\n Learning loop for Procrustes Iterative Refinement\n \"\"\"\n\n\n","sub_path":"data/tl/ANALYSIS-EN/src/morph.py","file_name":"morph.py","file_ext":"py","file_size_in_byte":7423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"569009346","text":"import voluptuous as vol\n\nDOMAIN = \"octopus_energy\"\n\nCONFIG_MAIN_API_KEY = \"Api key\"\nCONFIG_MAIN_ACCOUNT_ID = \"Account Id\"\nCONFIG_SMETS1 = \"SMETS1\"\n\nCONFIG_TARGET_NAME = \"Name\"\nCONFIG_TARGET_HOURS = \"Hours\"\nCONFIG_TARGET_TYPE = \"Type\"\nCONFIG_TARGET_START_TIME = \"Start time\"\nCONFIG_TARGET_END_TIME = \"End time\"\n\nDATA_CONFIG = \"CONFIG\"\nDATA_COORDINATOR = \"COORDINATOR\"\nDATA_CLIENT = \"CLIENT\"\nDATA_RATES = \"RATES\"\n\nREGEX_HOURS = \"^[0-9]+(\\.[0-9]+)*$\"\nREGEX_TIME = \"^([0-1]?[0-9]|2[0-3]):[0-5][0-9]$\"\nREGEX_ENTITY_NAME = \"^[a-z0-9_]+$\"\nREGEX_TARIFF_PARTS = \"^([A-Z])-([0-9A-Z]+)-([A-Z0-9-]+)-([A-Z])$\"\n\nDATA_SCHEMA_ACCOUNT = vol.Schema({\n vol.Required(CONFIG_MAIN_API_KEY): str,\n vol.Required(CONFIG_MAIN_ACCOUNT_ID): str,\n vol.Optional(CONFIG_SMETS1): bool,\n})\n\nDATA_SCHEMA_TARGET = vol.Schema({\n vol.Required(CONFIG_TARGET_NAME): str,\n vol.Required(CONFIG_TARGET_HOURS): str,\n vol.Required(CONFIG_TARGET_TYPE, default=\"Continuous\"): vol.In({\n \"Continuous\": \"Continuous\",\n \"Intermittent\": \"Intermittent\"\n }),\n vol.Optional(CONFIG_TARGET_START_TIME): str,\n vol.Optional(CONFIG_TARGET_END_TIME): str,\n})","sub_path":"custom_components/octopus_energy/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"367393100","text":"\nimport os\nimport platform\nimport copy\nimport json\nimport pyblish.api\n\n\nclass SubmitDeadlineStandIn(pyblish.api.InstancePlugin):\n\n order = pyblish.api.ExtractorOrder + 0.2\n hosts = [\"maya\"]\n label = \"Deadline Stand-In\"\n\n families = [\n \"reveries.standin\",\n ]\n\n targets = [\"deadline\"]\n\n def process(self, instance):\n import reveries\n\n reveries_path = reveries.__file__\n script_file = os.path.join(os.path.dirname(reveries_path),\n \"scripts\",\n \"deadline_standin_by_frame.py\")\n\n instance.data[\"submitted\"] = True\n\n context = instance.context\n\n if not all(result[\"success\"] for result in context.data[\"results\"]):\n self.log.warning(\"Atomicity not held, aborting.\")\n return\n\n # Context data\n\n username = context.data[\"user\"]\n comment = context.data.get(\"comment\", \"\")\n project = context.data[\"projectDoc\"]\n asset = context.data[\"assetDoc\"][\"name\"]\n\n fpath = context.data[\"currentMaking\"]\n workspace = context.data[\"workspaceDir\"]\n maya_version = context.data[\"mayaVersion\"]\n\n project_id = str(project[\"_id\"])[-4:].upper()\n project_code = project[\"data\"].get(\"codename\") or project_id\n fname = os.path.basename(fpath)\n\n batch_name = \"({projcode}): [{asset}] {filename}\".format(\n projcode=project_code,\n asset=asset,\n filename=fname\n )\n\n # Instance data\n\n subset = instance.data[\"subset\"]\n version = instance.data[\"versionNext\"]\n\n deadline_pool = instance.data[\"deadlinePool\"]\n deadline_prio = instance.data[\"deadlinePriority\"]\n deadline_group = instance.data.get(\"deadlineGroup\")\n\n frame_start = int(instance.data[\"startFrame\"])\n frame_end = int(instance.data[\"endFrame\"])\n frame_step = int(instance.data[\"byFrameStep\"])\n\n frames = \"{start}-{end}x{step}\".format(\n start=frame_start,\n end=frame_end,\n step=frame_step,\n )\n\n job_name = \"{subset} v{version:0>3}\".format(\n subset=subset,\n version=version,\n )\n\n # Assemble payload\n\n payload = {\n \"JobInfo\": {\n \"Plugin\": \"MayaBatch\",\n \"BatchName\": batch_name, # Top-level group name\n \"Name\": job_name,\n \"UserName\": username,\n \"MachineName\": platform.node(),\n \"Comment\": comment,\n \"Pool\": deadline_pool,\n \"Priority\": deadline_prio,\n \"Group\": deadline_group,\n\n \"Frames\": frames,\n \"ChunkSize\": 1,\n\n \"ExtraInfo0\": project[\"name\"],\n },\n \"PluginInfo\": {\n\n \"ScriptJob\": True,\n \"ScriptFilename\": script_file,\n\n # Input\n \"SceneFile\": fpath,\n # Resolve relative references\n \"ProjectPath\": workspace,\n # Mandatory for Deadline\n \"Version\": maya_version,\n },\n # Mandatory for Deadline, may be empty\n \"AuxFiles\": [],\n \"IdOnly\": True\n }\n\n if instance.data.get(\"hasYeti\"):\n # Change Deadline group for Yeti\n payload[\"JobInfo\"][\"Group\"] = \"yeti_render\"\n\n # Environment\n\n environment = self.assemble_environment(instance)\n\n if instance.data.get(\"hasAtomsCrowds\"):\n # Change Deadline group for AtomsCrowd\n payload[\"JobInfo\"][\"Group\"] = \"atomscrowd\"\n else:\n # AtomsCrowd module path is available for every machine by\n # default, so we must remove it if this renderLayer does\n # not require AtomsCrowd plugin. Or the license will not\n # be enough for other job that require Atoms.\n module_path = environment[\"MAYA_MODULE_PATH\"]\n filtered = list()\n for path in module_path.split(\";\"):\n if \"AtomsMaya\" not in path:\n filtered.append(path)\n environment[\"MAYA_MODULE_PATH\"] = \";\".join(filtered)\n\n parsed_environment = {\n \"EnvironmentKeyValue%d\" % index: u\"{key}={value}\".format(\n key=key,\n value=environment[key]\n ) for index, key in enumerate(environment)\n }\n payload[\"JobInfo\"].update(parsed_environment)\n\n self.log.info(\"Submitting.. %s\" % instance)\n self.log.info(json.dumps(\n payload, indent=4, sort_keys=True)\n )\n\n # Submit\n\n submitter = context.data[\"deadlineSubmitter\"]\n index = submitter.add_job(payload)\n\n # Publish script\n\n payload = copy.deepcopy(payload)\n\n script_file = os.path.join(os.path.dirname(reveries_path),\n \"scripts\",\n \"deadline_publish.py\")\n # Clean up\n payload[\"JobInfo\"].pop(\"Frames\")\n payload[\"JobInfo\"].pop(\"ChunkSize\")\n # Update\n payload[\"JobInfo\"].update({\n \"Name\": \"|| Publish: \" + payload[\"JobInfo\"][\"Name\"],\n \"JobDependencies\": index,\n \"InitialStatus\": \"Active\",\n })\n payload[\"PluginInfo\"].update({\n \"ScriptJob\": True,\n \"ScriptFilename\": script_file,\n })\n\n submitter.add_job(payload)\n\n def assemble_environment(self, instance):\n \"\"\"Compose submission required environment variables for instance\n\n Return:\n environment (dict)\n\n \"\"\"\n submitter = instance.context.data[\"deadlineSubmitter\"]\n environment = submitter.instance_env(instance)\n\n # From current environment\n for var in [\n \"MAYA_MODULE_PATH\",\n \"ARNOLD_PLUGIN_PATH\",\n ]:\n environment[var] = os.getenv(var, \"\")\n\n # Remote data json file path\n environment[\"REMOTE_DATA_PATH\"] = instance.data[\"remoteDataPath\"]\n\n return environment\n","sub_path":"plugins/maya/publish/submit_deadline_standin.py","file_name":"submit_deadline_standin.py","file_ext":"py","file_size_in_byte":6113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"332558787","text":"\"\"\"\n\"\"\"\nimport argparse\nimport glob\nimport logging\nimport re\nimport os\n\nimport cv2\nfrom mpi4py import MPI\nimport numpy as np\nfrom PIL import Image\nimport pkg_resources\nfrom tqdm import tqdm\n\n\nCOMM = MPI.COMM_WORLD\nRANK = COMM.Get_rank()\nSIZE = COMM.Get_size()\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument('input', default=None, type=str,\n help='path to the trakem2 input file')\n parser.add_argument('output', default=None, type=str,\n help='path to write output images')\n parser.add_argument('--range', default=None, type=int, nargs=2,\n help='start and end of the image sequence to process')\n parser.add_argument('--fiji', default='fiji', type=str,\n help='path to ImageJ-linux64 executable')\n return parser.parse_args(args)\n\n\ndef split_aligntxt(align_txt, output_dir):\n \"\"\"Split a trakem2 input file evenly across MPI ranks.\n\n Parameters\n ----------\n align_txt : str\n Path to the trakem2 input file.\n output_dir : str\n Path to write subdivided trakem2 inputs.\n \"\"\"\n # Read in trakem2 commands from the contiguous input file.\n with open(align_txt, 'r') as f:\n lines = f.readlines()\n\n get_key = lambda x: int(re.search(r'S_(\\d+)_', x).group(1))\n key_line_dict = {get_key(l):l for l in lines}\n\n # For each input, extract the z-index of the input and group files on the\n # same z-plane.\n key_set = np.asarray(list(key_line_dict.keys()))\n key_sublists = np.array_split(key_set, SIZE)\n\n # For each rank, write the assigned commands to an independent file.\n for i, keys in enumerate(key_sublists):\n with open(os.path.join(output_dir, 'align_%d.txt' % i), 'w') as f:\n for key in keys:\n f.writelines(key_line_dict[key])\n\n\ndef get_keys(align_txt):\n \"\"\"Get the z-indices of a list of images.\n\n Parameters\n ----------\n align_txt : str\n Path to the trakem2 input file.\n\n Return\n ------\n key_sublists : list of list of int\n List of image indices split evenly into SIZE groups.\n \"\"\"\n # Get filenames to be exported.\n with open(align_txt, 'r') as f:\n lines = f.readlines()\n\n # Extract the image index.\n get_key = lambda x: int(re.search(r'S_(\\d+).*', x).group(1))\n keys = np.asarray([get_key(l) for l in lines])\n keys.sort()\n\n # Split images evenly amongst MPI ranks.\n key_sublists = np.array_split(keys, SIZE)\n return key_sublists\n\n\ndef mpi_export(input, output, image_range=None, fiji='fiji'):\n \"\"\"Export a trakem2 project as images.\n\n Parameters\n ----------\n input : str\n Path to the trakem2 input file.\n output : str\n Path to write subdivided trakem2 inputs.\n image_range :\n fiji : str\n Path to an ImageJ-linux64 executable.\n \"\"\"\n # On rank 0, create the output directory, get the path to the ImageJ\n # script to run, and split the commands enevly among the ranks.\n if RANK == 0:\n os.makedirs(output, exist_ok=True)\n resource_path = 'ext/export.bsh'\n bsh_path = pkg_resources.resource_filename(__name__, resource_path)\n logging.warning('macro: %s', bsh_path)\n\n # Select and split a subset of images if a range is passed.\n if not image_range:\n key_sublist = get_keys(input)\n else:\n sub_range = image_range\n keys = np.asarray(range(sub_range[0], sub_range[1]))\n key_sublist = np.array_split(keys, SIZE)\n\n begin = get_keys(input)[0][0]\n else:\n pass\n key_sublist = None\n bsh_path = None\n begin = None\n\n # Synchronize and send data to all ranks.\n bsh_path = COMM.bcast(bsh_path, 0)\n key_sublist = COMM.scatter(key_sublist, 0)\n begin = COMM.bcast(begin, 0)\n\n # Set up the ImageJ command to run.\n print(key_sublist)\n command = '%s -Xms6g -Xmx6g --headless -Dinput=%s -Doutput=%s -Drange=%s -Dbegin=%d -- --no-splash %s' % (\n fiji, input, output, '%d,%d' % (key_sublist[0], key_sublist[-1]), begin, bsh_path)\n\n # Run the command on each rank.\n print(command)\n os.system(command)\n\n\ndef main():\n args = parse_args()\n mpi_export(args.input,\n args.output,\n args.range,\n fiji=args.fiji)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"happyneuron/trakem2/mpi_export.py","file_name":"mpi_export.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"94170390","text":"from ..dataset import DataSet\nfrom .basenode import BaseNode\nimport numpy as np\nfrom functools import reduce\n\ndef _ch_idx(channels, names):\n '''Construct a set of channel indices, given a list of mixed integer indices\n and string names.'''\n if channels is None:\n return set([])\n else:\n return set([names.index(ch) if type(ch) == str else ch\n for ch in channels])\n\nclass EEGMontage(BaseNode):\n '''\n This node can be used to specify an EEG montage, e.g. which electrodes\n record EEG and EOG, which reference(s) to use and which channels are\n considered 'bad' and should not be used. Multiple reference types\n can be specified.\n\n TODO: option to load montage from file\n\n Channels can be specified either as a string name, or an integer index.\n\n Parameters\n ----------\n eeg : list of channels (default [])\n By default, all channels not specified as EOG or reference are regarded\n as EEG channels. Use this parameter to specify a subset of the channels\n to be regarded as EEG. Specify `None` to indicate no EEG channels are\n present.\n\n eog : list of channels (default None)\n Use this parameter to specify a subset of the channels to be regarded\n as EOG. When EOG channels are specified, the `calc_reog` parameter can\n be used to caluclate the radial EOG channel. Specify `None` or an empty\n list to indicate no EOG channels are present.\n\n bads : list of channels (default None)\n Use this parameter to specify a subset of channels that are considered\n 'bad' and should not be used. These could for example be electrodes\n with bad contact, or unusual artifacts. Bad channels are set to all\n zeros and are not used to compute the CAR. Specify `None` or an empty\n list to indicate no bad channels are present.\n\n ref : list of channels (default [] = CAR)\n Set to a single channel to use a single electrode as reference. Set to\n a list of channels to use the mean of multiple electordes as reference.\n Set to an empty list to use CAR (common average reference, e.g. the\n mean of all EEG channels). Specify `None` to indicate no referencing \n should be done (for example if the signal has already been referenced).\n\n bipolar : dict: str -> (channel, channel) (default None)\n Specify electrode pairs, take the difference between them as signal.\n Electrode pairs are specified in a dictionary, where each value is a\n pair of two channels. The difference between them will be computed and\n stored as a new channel, which name is specified as the corresponding\n string key in the dictionary. \n \n heog : (channel, channel) (default None)\n Specifies that these two channels record the horizontal EOG. The signal\n will be referenced bipolar and stored as a new channel named 'hEOG'.\n\n veog : (channel, channel) (default None)\n Specifies that these two channels record the vertical EOG. The signal\n will be referenced bipolar and stored as a new channel named 'vEOG'.\n\n calc_reog : bool (default False)\n When set to `True`, the rEOG component is computed by taking the mean\n of the EOG channels and substracting the EEG reference. This only works\n if EOG channels have been specified and the reference is not set to\n `None`. The name of the rEOG channel is 'rEOG'.\n\n drop : list of channels (default None)\n Specifies channels to be dropped from the recording. For example, use\n this to remove channels that are not connected to any electrode.\n \n drop_ref : bool (default False)\n By default, the reference channels are kept. Set this parameter to\n `True` to drop the reference channels.\n '''\n def __init__(self, eeg=[], eog=None, bads=None, ref=[], bipolar=None,\n heog=None, veog=None, calc_reog=False, drop=None, drop_ref=False):\n BaseNode.__init__(self)\n\n assert (eeg is None or hasattr(eeg, '__iter__')), \\\n 'Parameter eeg should either be None or a list'\n assert (eog is None or hasattr(eog, '__iter__')), \\\n 'Parameter eog should either be None or a list'\n assert (bads is None or hasattr(bads, '__iter__')), \\\n 'Parameter bads should either be None or a list'\n assert (ref is None or hasattr(ref, '__iter__')), \\\n 'Parameter ref should either be None or a list'\n assert (bipolar is None or type(bipolar) == dict), \\\n 'Parameter bipolar should either be None or a dictionary'\n if bipolar is not None:\n for channels in list(bipolar.values()):\n assert len(channels) == 2, ('Bipolar channels should be a '\n 'dictionary containing tuples as '\n 'values')\n assert (heog is None or (hasattr(heog, '__iter__') and len(heog) == 2)), \\\n 'Parameter heog should either be None or a tuple'\n assert (veog is None or (hasattr(veog, '__iter__') and len(veog) == 2)), \\\n 'Parameter veog should either be None or a tuple'\n\n self.eeg = eeg\n self.eog = None if eog == [] else eog\n self.bads = None if bads == [] else bads\n self.ref = ref\n self.bipolar = None if bipolar == {} else bipolar\n self.heog = heog\n self.veog = veog\n self.calc_reog = calc_reog\n self.drop = None if drop == [] else drop\n self.drop_ref = drop_ref\n\n def apply_(self, d):\n self.all_channels = set(range(d.data.shape[0]))\n\n # EEG channels\n if self.eeg == []:\n self.eeg_idx = set(self.all_channels)\n elif self.eeg is not None:\n self.eeg_idx = _ch_idx(self.eeg, d.feat_lab[0])\n else:\n self.eeg_idx = set([])\n\n # Channels to drop\n self.drop_idx = _ch_idx(self.drop, d.feat_lab[0])\n # Remove dropped channels from EEG index\n self.eeg_idx -= self.drop_idx\n\n # Other EOG channels\n self.eog_idx = _ch_idx(self.eog, d.feat_lab[0])\n # EOG channels are not EEG channels\n self.eeg_idx -= self.eog_idx\n\n # hEOG and vEOG channels\n \n self.heog_idx = _ch_idx(self.heog, d.feat_lab[0])\n # hEOG channels are EOG channels\n self.eog_idx = self.eog_idx.union(self.heog_idx)\n self.eeg_idx -= self.heog_idx\n \n self.veog_idx = _ch_idx(self.veog, d.feat_lab[0])\n # vEOG channels are EOG channels\n self.eog_idx = self.eog_idx.union(self.veog_idx)\n self.eeg_idx -= self.veog_idx\n\n # Bad channels\n self.bads_idx = _ch_idx(self.bads, d.feat_lab[0])\n\n # Reference channels\n self.ref_idx = _ch_idx(self.ref, d.feat_lab[0])\n # Ref channels are not EEG channels\n self.eeg_idx -= self.ref_idx\n # Ref channels are not EOG channels\n self.eog_idx -= self.ref_idx\n\n # Bipolar references\n if self.bipolar is not None:\n self.bipolar_idx = {}\n for name, channels in list(self.bipolar.items()):\n self.bipolar_idx[name] = _ch_idx(channels, d.feat_lab[0])\n self.bipolar_idx_set = \\\n reduce(lambda a,b: a.union(b), list(self.bipolar_idx.values()))\n else:\n self.bipolar_idx = {}\n self.bipolar_idx_set = set([])\n\n # Collect all the channels used as reference at some point\n self.drop_ref_idx = set.union(self.ref_idx, self.veog_idx,\n self.heog_idx, self.bipolar_idx_set)\n\n # Start applying references\n data = d.data.copy()\n\n # Set bad channels to zero\n if self.bads is not None:\n data[list(self.bads_idx), :] = 0\n\n # Calculate reference signal\n if self.ref is None:\n ref = None\n elif self.ref == []:\n # CAR\n ref = np.mean(d.data[list(self.eeg_idx - self.bads_idx), :], axis=0)\n else:\n ref = np.mean(d.data[list(self.ref_idx), :], axis=0)\n\n # Reference signal (do not reference the reference and bad channels)\n if ref is not None:\n data[list(self.all_channels-self.ref_idx-self.bads_idx), :] -= ref\n\n # Bipolar channels\n if self.bipolar is None:\n bipolar = None\n else:\n bipolar = {}\n for name, channels in list(self.bipolar_idx.items()):\n channels = list(channels)\n bipolar[name] = data[channels[0],:] - data[channels[1],:]\n\n # Calculate hEOG and vEOG\n if self.heog is None:\n heog = None\n else:\n heog = (data[list(self.heog_idx)[0], :] -\n data[list(self.heog_idx)[1], :])\n\n if self.veog is None:\n veog = None\n else:\n veog = (data[list(self.veog_idx)[0], :] -\n data[list(self.veog_idx)[1], :])\n\n # Calculate the rEOG if possible (and desired)\n if self.calc_reog:\n assert len(self.eog_idx) > 0, \\\n 'Must specify EOG channels in order to calculate rEOG'\n reog = np.mean(data[list(self.eog_idx),:], axis=0)\n else:\n reog = None\n\n\n # Drop ref channels from EEG and EOG list if requested\n if self.drop_ref:\n drop_idx = self.drop_idx.union(self.drop_ref_idx)\n else:\n drop_idx = self.drop_idx\n\n # Drop the channels that should be dropped\n data = data[list(self.all_channels - drop_idx), :]\n ch_names = [d.feat_lab[0][ch] for ch in self.all_channels\n if ch not in drop_idx]\n\n # Put everything in a DataSet\n data = [data]\n \n if bipolar is not None:\n for name, channel in list(bipolar.items()):\n data.append(channel[np.newaxis, :])\n ch_names.append(name)\n\n if heog is not None:\n data.append(heog[np.newaxis, :])\n ch_names.append('hEOG')\n if veog is not None:\n data.append(veog[np.newaxis, :])\n ch_names.append('vEOG')\n if reog is not None:\n data.append(reog[np.newaxis, :])\n ch_names.append('rEOG')\n if ref is not None and not self.drop_ref:\n data.append(ref[np.newaxis, :])\n ch_names.append('REF')\n\n data = np.vstack(data)\n\n return DataSet(data=data, feat_lab=[ch_names], default=d)\n\n","sub_path":"psychic/nodes/eeg_montage.py","file_name":"eeg_montage.py","file_ext":"py","file_size_in_byte":10556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"62847882","text":"#!/usr/bin/env python\nimport argparse\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import training\nfrom chainer.training import extensions\nimport pickle\nimport optuna\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sklearn.metrics\n\nimport util\nfrom gnn import GNN\n\ndef objective_with_dataset(dataset):\n\n def objective(trial):\n num_layers = trial.suggest_int('num_layers', 1, 10)\n num_mlp_layers = trial.suggest_int('num_mlp_layers', 1, 10)\n hidden_dim = trial.suggest_int('hidden_dim', 16, 128)\n final_dropout = trial.suggest_uniform('final_dropout', 0, 0.5)\n graph_pooling_type = trial.suggest_categorical('graph_pooling_type', ['max', 'average', 'sum'])\n neighbor_pooling_type = trial.suggest_categorical('neighbor_pooling_type', ['max', 'average', 'sum'])\n batchsize = trial.suggest_int('batchsize', 16, 128)\n\n device = chainer.get_device(0)\n # Classification\n model = GNN(num_layers, num_mlp_layers, dataset.graphs[0].node_features.shape[1],\n hidden_dim, dataset.graphs[0].node_features.shape[1], final_dropout,\n graph_pooling_type, neighbor_pooling_type, \"Regression\")\n\n # choose the using device\n model.to_device(device)\n device.use()\n\n # Setup an optimizer\n optimizer = chainer.optimizers.Adam()\n optimizer.setup(model)\n\n # split the dataset into traindata and testdata\n train, test = chainer.datasets.split_dataset_random(dataset, int(dataset.__len__() * 0.9))\n train_iter = chainer.iterators.SerialIterator(train, batchsize)\n test_iter = chainer.iterators.SerialIterator(test, batchsize, repeat=False, shuffle=False)\n\n # Set up a trainer\n updater = training.updaters.StandardUpdater(train_iter, optimizer, device=device, converter=dataset.converter)\n trainer = training.Trainer(updater, (300, 'epoch'), out= \"result/hypara/regression\")\n\n # Evaluate the model with the test dataset for each epoch\n trainer.extend(extensions.Evaluator(test_iter, model, device=device, converter=dataset.converter))\n\n trainer.extend(extensions.LogReport(filename='log_{}.dat'.format(trial.number)))\n trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss_{}.png'.format(trial.number)))\n\n # Write a log of evaluation statistics for each epoch\n trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name='accuracy_{}.png'.format(trial.number)))\n\n # Run the training\n trainer.run()\n\n # save the model ?\n # chainer.serializers.save_npz('./result/hypara/regression/{0}.model'.format(trial.number), model)\n\n # return the AUC\n graphs, target = dataset.converter(test, device)\n with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):\n y_pred = model(graphs)\n y_pred.to_cpu()\n y_pred = y_pred.array\n target = chainer.cuda.to_cpu(target)\n\n try:\n value = sklearn.metrics.mean_squared_error(target, y_pred)\n except ValueError:\n value = 10e9\n\n return value\n\n return objective\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='The simple implementation of GIN using sparse matrix multiplication')\n parser.add_argument('--dataset', type=str, default=\"mixed\",\n help='name of dataset (default: mixed)')\n parser.add_argument('--degree_as_tag', type=str, default=\"binary\",\n \t\thelp='let the input node features be the degree of nodes (heuristics for unlabeled graph)')\n parser.add_argument('--dataset_num', type=int, default=10000, help='# of dataset')\n group = parser.add_argument_group('deprecated arguments')\n group.add_argument('--gpu', '-g', dest='device', type=int, nargs='?', const=0, help='GPU ID (negative value indicates CPU)')\n args = parser.parse_args()\n\n # load Grapdata\n device = chainer.get_device(0)\n dataset = util.GraphData(args.dataset, args.degree_as_tag, \"Regression\", args.dataset_num, device)\n\n study_name = 'regression-study'\n study = optuna.create_study(study_name=study_name, storage='sqlite:///regression.db', load_if_exists=True)\n study.optimize(objective_with_dataset(dataset), n_trials=100)\n\n print(study.best_params, study.best_value)\n hist_df = study.trials_dataframe()\n hist_df.to_csv(\"hypara_search.csv\")\n","sub_path":"hypara_regression.py","file_name":"hypara_regression.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"378863832","text":"#!/usr/bin/env python\nfrom tools.load import LoadMatrix\nfrom numpy import random\nlm=LoadMatrix()\n\nrandom.seed(17)\nfrom tools.multiclass_shared import prepare_data\n[traindat, label_traindat, testdat, label_testdat] = prepare_data(False)\n\nparameter_list = [[traindat, label_traindat, testdat, label_testdat]]\n\ndef evaluation_multiclassovrevaluation_modular (traindat, label_traindat, testdat, label_testdat):\n\tfrom shogun.Features import MulticlassLabels\n\tfrom shogun.Evaluation import MulticlassOVREvaluation,ROCEvaluation\n\tfrom modshogun import MulticlassLibLinear,RealFeatures,ContingencyTableEvaluation,ACCURACY\n\tfrom shogun.Mathematics import Math\n\t\n\tMath.init_random(1)\n\n\tground_truth_labels = MulticlassLabels(label_traindat)\n\tsvm = MulticlassLibLinear(1.0,RealFeatures(traindat),MulticlassLabels(label_traindat))\n\tsvm.train()\n\tpredicted_labels = svm.apply()\n\t\n\tbinary_evaluator = ROCEvaluation()\n\tevaluator = MulticlassOVREvaluation(binary_evaluator)\n\tmean_roc = evaluator.evaluate(predicted_labels,ground_truth_labels)\n\t#print mean_roc\n\t\n\tbinary_evaluator = ContingencyTableEvaluation(ACCURACY)\n\tevaluator = MulticlassOVREvaluation(binary_evaluator)\n\tmean_accuracy = evaluator.evaluate(predicted_labels,ground_truth_labels)\n\t#print mean_accuracy\n\n\treturn mean_roc, mean_accuracy, predicted_labels, svm\n\n\nif __name__=='__main__':\n\tprint('MulticlassOVREvaluation')\n\tevaluation_multiclassovrevaluation_modular(*parameter_list[0])\n\n","sub_path":"examples/undocumented/python_modular/evaluation_multiclassovrevaluation_modular.py","file_name":"evaluation_multiclassovrevaluation_modular.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"131230580","text":"'''\nMOD03: geolocation data\n- Produce latitude/longitude data set ('Latitude', 'Longitude')\n- Show color bar for all graphs\n- Sun field geometry (imshow the values over an area)\n - Viewing zenith angle ('SensorZenith')\n - Relative azimuthal ('SensorAzimuth'-'SolarAzimuth')\n - Solar zenith angle ('SolarZenith')\n- Function to crop area out of modis file from lat lon\n'''\n\nfrom read_MODIS_02 import * #includes matplotlib and numpy\n\n# #used by get functions(filename can be modified by choose_file function)\n# geo_files = {'high_lat' :'/Users/vllgsbr2/Desktop/MODIS_Training/Data/high_latitude/MOD03.A2018248.0450.061.2018248114733.hdf',\n# 'toronto' :'/Users/vllgsbr2/Desktop/MODIS_Training/Data/toronto_09_05_18/MOD03.A2018248.1630.061.2018248230625.hdf',\n# 'maracaibo' :'/Users/vllgsbr2/Desktop/MODIS_Training/Data/venezuela_08_21_18/MOD03.A2018233.1545.061.2018233214936.hdf',\n# 'twhs' :'/Users/vllgsbr2/Desktop/MODIS_Training/Data/03032015TWHS/MOD03.A2015062.1645.061.2017319034323.hdf'}\n# filename = geo_files['high_lat']\n\nfieldnames_list = ['SolarZenith', 'SensorZenith', 'SolarAzimuth',\\\n 'SensorAzimuth', 'Latitude', 'Longitude']\n\n#create dictionaries for angles (used by get functions)\nsolar_zenith = {}\nsensor_zenith = {}\nsolar_azimuth = {}\nsensor_azimuth = {}\n\ndef get_solarZenith(filename):\n #obtain field information to grab scales/offsets\n SD_field_rawData = 1 #0 SD, 1 field & 2 returns raw data\n solar_zenith['scale_factor'] = get_data(filename, fieldnames_list[0], SD_field_rawData).attributes()['scale_factor']\n\n #correct values by scales/offsets\n SD_field_rawData = 2 #0 SD, 1 field & 2 returns raw data\n solar_zenith['corrected_raw_data'] = get_data(filename, fieldnames_list[0], SD_field_rawData) * solar_zenith['scale_factor']\n\n return solar_zenith['corrected_raw_data']\n\ndef get_sensorZenith(filename):\n #obtain field information to grab scales/offsets\n SD_field_rawData = 1 #0 SD, 1 field & 2 returns raw data\n sensor_zenith['scale_factor'] = get_data(filename, fieldnames_list[1], SD_field_rawData).attributes()['scale_factor']\n\n #correct values by scales/offsets\n SD_field_rawData = 2 #0 SD, 1 field & 2 returns raw data\n sensor_zenith['corrected_raw_data'] = get_data(filename, fieldnames_list[1], SD_field_rawData) * sensor_zenith['scale_factor']\n\n return sensor_zenith['corrected_raw_data']\n\ndef get_solarAzimuth(filename):\n #obtain field information to grab scales/offsets\n SD_field_rawData = 1 #0 SD, 1 field & 2 returns raw data\n solar_azimuth['scale_factor'] = get_data(filename, fieldnames_list[2], SD_field_rawData).attributes()['scale_factor']\n\n #correct values by scales/offsets\n SD_field_rawData = 2 #0 SD, 1 field & 2 returns raw data\n solar_azimuth['corrected_raw_data'] = get_data(filename, fieldnames_list[2], SD_field_rawData) * solar_azimuth['scale_factor']\n\n return solar_azimuth['corrected_raw_data']\n\ndef get_sensorAzimuth(filename):\n #obtain field information to grab scales/offsets\n SD_field_rawData = 1 #0 SD, 1 field & 2 returns raw data\n sensor_azimuth['scale_factor'] = get_data(filename, fieldnames_list[3], SD_field_rawData).attributes()['scale_factor']\n\n #correct values by scales/offsets\n SD_field_rawData = 2 #0 SD, 1 field & 2 returns raw data\n sensor_azimuth['corrected_raw_data'] = get_data(filename, fieldnames_list[3], SD_field_rawData) * sensor_azimuth['scale_factor']\n\n return sensor_azimuth['corrected_raw_data']\n\ndef get_relativeAzimuth(filename):\n relative_azimuth = get_sensorAzimuth(filename) - get_solarAzimuth(filename)\n return relative_azimuth\n\ndef get_lat(filename):\n SD_field_rawData = 2\n lat = get_data(filename, fieldnames_list[4], SD_field_rawData)\n\n return lat\n\ndef get_lon(filename):\n SD_field_rawData = 2\n lon = get_data(filename, fieldnames_list[5], SD_field_rawData)\n\n return lon\n\ndef get_scattering_angle(filename):\n vza = np.deg2rad(get_sensorAzimuth(filename))\n sza = np.deg2rad(get_solarZenith(filename))\n raa = np.deg2rad(get_relativeAzimuth(filename))\n cos_scat = np.cos(sza) * np.cos(vza) + np.sin(sza) * np.sin(vza) * np.cos(raa)\n scatter_angle = np.rad2deg(np.arccos(cos_scat))\n\n return scatter_angle\n\nif __name__ == '__main__':\n\n filename_MOD_03 = '/home/javi/MODIS_Training/MOD03.A2019121.1515.061.2019122040718.hdf'\n scat_angle = get_scattering_angle(filename_MOD_03)\n plt.imshow(scat_angle, cmap='jet')\n plt.colorbar()\n plt.show()\n\n # #plot\n # fig, axes = plt.subplots(ncols=3)\n # cmap = 'jet'\n #\n # plot_1 = axes[0].imshow(get_solarZenith(filename_MOD_03), cmap = cmap)\n # axes[0].set_title('Solar Zenith Angle\\n[degrees]')\n #\n # plot_2 = axes[1].imshow(get_sensorZenith(filename_MOD_03), cmap = cmap)\n # axes[1].set_title('Sensor Zenith Angle\\n[degrees]')\n #\n # plot_3 = axes[2].imshow(get_relativeAzimuth(filename_MOD_03), cmap = cmap, vmin=-260, vmax=-210)\n # axes[2].set_title('Relative Azimuthal Angle\\n[degrees]')\n #\n # fig.colorbar(plot_1, ax=axes[0])\n # fig.colorbar(plot_2, ax=axes[1])\n # fig.colorbar(plot_3, ax=axes[2])\n #\n # fig1, axes1 = plt.subplots(ncols=2)\n #\n # plot_11 = axes1[0].imshow(get_lon(), cmap = cmap)\n # axes1[0].set_title('Longitude\\n[degrees]')\n # plot_22 = axes1[1].imshow(get_lat(), cmap = cmap)\n # axes1[1].set_title('Latitude\\n[degrees]')\n #\n # fig1.colorbar(plot_1, ax=axes1[0])\n # fig1.colorbar(plot_2, ax=axes1[1])\n #\n # plt.show()\n\n # #debugging tools\n # file = SD('/Users/vllgsbr2/Desktop/MODIS_Training/Data/03032015TWHS/MOD03.A2015062.1645.061.2017319034323.hdf')\n # data = file.select('EV_500_Aggr1km_RefSB')\n # pprint.pprint(data.attributes()) #tells me scales, offsets and bands\n # pprint.pprint(file.datasets()) # shows data fields in file from SD('filename')\n","sub_path":"test_thresholds/read_MODIS_03.py","file_name":"read_MODIS_03.py","file_ext":"py","file_size_in_byte":5969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"458151349","text":"__author__ = 'Tkwk33'\n\nimport time\n\nwith open(\"enable1.txt\", \"r\") as f:\n data = set([e.strip().lower() for e in f])\n\n\ndef two_word_pali(dic, counter=0):\n for firstw in dic:\n for secondw in dic:\n string = firstw + secondw\n if string == string[::-1]:\n counter += 1\n print('{} {}'.format(firstw, secondw))\n print('There are {} palindromes in the dictionary.'.format(counter))\n\nif __name__ == '__main__':\n start = time.time()\n print('Searching...')\n two_word_pali(data)\n print('and it took {} to find them.'.format(time.time()-start))\n\n\n","sub_path":"Two words Palindromes.py","file_name":"Two words Palindromes.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"73108691","text":"# import Kratos\n\nimport os\nimport sys\n\nsys.path.append(os.path.join('..', '..', '..'))\n\nfrom KratosMultiphysics import *\nfrom KratosMultiphysics.GeoMechanicsApplication import *\n\n# Import Kratos \"wrapper\" for unittests\nimport KratosMultiphysics.KratosUnittest as KratosUnittest\n\n# Import the tests o test_classes to create the suits\nfrom generalTests import KratosGeoMechanicsGeneralTests\nfrom test_excavation import KratosGeoMechanicsExcavationTests\nfrom test_interface import KratosGeoMechanicsInterfaceTests\nfrom test_reset_displacement import KratosGeoMechanicsResetDisplacementTests\nfrom test_benchmark_set_1 import KratosGeoMechanicsBenchmarkSet1\nfrom test_benchmark_set_2 import KratosGeoMechanicsBenchmarkSet2\nfrom test_soil_structure_interactions import KratosGeoMechanicsSoilStructureInteractionTests\nfrom test_water_pressure import KratosGeoMechanicsWaterPressureTests\nfrom test_dynamics import KratosGeoMechanicsDynamicsTests\nfrom test_elements import KratosGeoMechanicsElementTypeTests\nfrom test_steady_state_groundwater_flow import KratosGeoMechanicsSteadyStateGroundWaterFlowTests\nfrom test_soil_weight import KratosGeoMechanicsSoilWeightTests\n\n\ndef AssambleTestSuites(is_team_city):\n ''' Populates the test suites to run.\n\n Populates the test suites to run. At least, it should pupulate the suites:\n \"small\", \"nighlty\" and \"all\"\n\n Return\n ------\n\n suites: A dictionary of suites\n The set of suites with its test_cases added.\n '''\n\n # Create an array with the selected tests (Small tests):\n # smallSuite will contain the following tests:\n # - testSmallExample\n\n small_test_cases = [\n KratosGeoMechanicsGeneralTests,\n KratosGeoMechanicsExcavationTests,\n KratosGeoMechanicsInterfaceTests,\n KratosGeoMechanicsResetDisplacementTests,\n KratosGeoMechanicsSoilStructureInteractionTests,\n KratosGeoMechanicsWaterPressureTests,\n KratosGeoMechanicsBenchmarkSet1,\n KratosGeoMechanicsBenchmarkSet2,\n KratosGeoMechanicsElementTypeTests,\n KratosGeoMechanicsSteadyStateGroundWaterFlowTests,\n KratosGeoMechanicsSoilWeightTests\n ]\n\n # Create an array with the selected tests\n # nightSuite will contain the following tests:\n # - testSmallExample\n # - testNightlyFirstExample\n # - testNightlySecondExample\n\n night_test_cases = [KratosGeoMechanicsDynamicsTests]\n night_test_cases.extend(small_test_cases)\n\n # Create an array that contains all the tests from every testCase\n # in the list:\n\n all_test_cases = []\n all_test_cases.extend(night_test_cases)\n\n # add the tests to the corresponding suite,\n if is_team_city:\n\n smallSuite = unittest.TestSuite()\n nightSuite = unittest.TestSuite()\n allSuite = unittest.TestSuite()\n\n for test in small_test_cases:\n smallSuite.addTests(unittest.TestLoader().loadTestsFromTestCase(\n test))\n\n for test in night_test_cases:\n nightSuite.addTests(unittest.TestLoader().loadTestsFromTestCase(\n test))\n\n for test in all_test_cases:\n allSuite.addTests(unittest.TestLoader().loadTestsFromTestCase(\n test))\n\n suites = allSuite\n else:\n suites = KratosUnittest.KratosSuites\n smallSuite = suites['small']\n nightSuite = suites['nightly']\n allSuite = suites['all']\n\n smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases(small_test_cases))\n nightSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases(night_test_cases))\n allSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases(all_test_cases))\n\n return suites\n\n\nif __name__ == '__main__':\n is_team_city = False\n\n try:\n from teamcity import is_running_under_teamcity\n from teamcity.unittestpy import TeamcityTestRunner\n\n is_team_city = is_running_under_teamcity()\n except ImportError:\n pass\n\n if is_team_city:\n import unittest\n runner = TeamcityTestRunner()\n runner.run(AssambleTestSuites(is_team_city))\n else:\n KratosUnittest.runTests(AssambleTestSuites(is_team_city))\n\n","sub_path":"applications/GeoMechanicsApplication/tests/test_GeoMechanicsApplication.py","file_name":"test_GeoMechanicsApplication.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"530849143","text":"# -*- coding:utf-8 -*-\n# 绘制简单图形\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef simple_line_plot(x, y, figure_no):\n plt.figure(figure_no)\n plt.plot(x, y)\n plt.xlabel(\"x values\")\n plt.ylabel('y values')\n plt.title(\"Simple Line\")\n\n\ndef simple_dots(x, y, figure_no):\n plt.figure(figure_no)\n plt.plot(x, y, \"or\")\n plt.xlabel(\"x values\")\n plt.ylabel('y values')\n plt.title(\"Simple dots\")\n\n\ndef simple_scatters(x, y, figure_no):\n plt.figure(figure_no)\n plt.scatter(x, y)\n plt.xlabel(\"x values\")\n plt.ylabel('y values')\n plt.title(\"Simple Line\")\n\n\ndef scatter_with_color(x, y, labels, figure_no):\n plt.figure(figure_no)\n plt.scatter(x, y, c=labels)\n plt.xlabel(\"x values\")\n plt.ylabel('y values')\n plt.title(\"Scatter with color\")\n\n\ndef x_y_axis_labeling(x, y, x_labels, y_labels, figure_no):\n plt.figure(figure_no)\n plt.plot(x, y, \"+r\")\n plt.margins(0.2)\n plt.xticks(x, x_lables, rotation='vertical')\n plt.yticks(y, y_labels,)\n\n\ndef plot_heat_map(x, figure_no):\n plt.figure(figure_no)\n plt.pcolor(x)\n plt.colorbar()\n\n\nif __name__ == \"__main__\":\n plt.close(\"all\")\n # x,y样例数据生成折线图和简单的点图\n x = np.arange(1, 100, dtype=float)\n y = np.array([np.power(xx, 2) for xx in x])\n\n figure_no = 1\n simple_line_plot(x, y, figure_no=figure_no)\n figure_no += 1\n simple_dots(x, y, figure_no=figure_no)\n\n # x,y样例数据生成散点图\n x = np.random.uniform(size=100)\n y = np.random.uniform(size=100)\n\n figure_no += 1\n simple_scatters(x, y, figure_no)\n figure_no += 1\n label = np.random.randint(2, size=100)\n scatter_with_color(x, y, label, figure_no)\n\n x = np.array(range(1, 6))\n y = np.array(range(100, 600, 100))\n x_lables = [\"e1\", \"e2\", \"e3\", \"e4\", \"e5\"]\n y_labels = [\"w1\", \"w2\", \"w3\", \"w4\", \"w5\"]\n x_y_axis_labeling(x, y, x_lables, y_labels, 5)\n\n x = np.random.normal(loc=0.5, scale=0.2, size=(10, 10))\n plot_heat_map(x, 6)\n\n plt.show()\n","sub_path":"python数据科学指南/第二章/matplotlibtest.py","file_name":"matplotlibtest.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"460933474","text":"from docker.docker import Docker\nfrom docker.config import Configuration,base_args\nimport argparse\n\ndef get_args():\n parser = base_args()\n parser.add_argument(\"-visual\", action='store_true',\n help='if you want to visualize your result, choose it.')\n parser.add_argument(\"-save\", action='store_true',\n help='if you want to save your result, choose it.')\n return parser\n\nif __name__ == \"__main__\":\n args = get_args()\n cfg = Configuration(args,'test')\n doc = Docker(cfg)\n doc.test(visualize=cfg.visual,save_result=cfg.save)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"585818218","text":"import numpy as np\n\n\n\nclass ECA:\n def __init__(self, rule):\n self.rule = rule #int\n self.wolfram = \"\".join(np.binary_repr(rule, 8))\n self.fdict = {}\n for i in range(8):\n self.fdict[\"\".join(np.binary_repr(i, 3))] = self.wolfram[7-i]\n \n def simulate(self, init, time):\n history = [init]\n n = len(init)\n state = init\n for _ in range(time):\n new_state = \"\"\n for i in range(len(state)):\n st = str(state[(i-1)%n]) + str(state[i%n]) + str(state[(i+1)%n])\n new_state += self.fdict[st]\n state = new_state\n history.append(state)\n return history\n \ndef left_window(history, index, length):\n window = []\n for index, _line in enumerate(history):\n shift = _line[index:] + _line[:index]\n print(\"line:\", _line)\n print(\"shit:\",shift)\n window.append(shift[index:index+length])\n return window\n\n \ndef right_window(history, index, length):\n window = []\n for ind, _line in enumerate(history):\n shift = _line[-ind:] + _line[:-ind]\n print(\"line:\", _line)\n print(\"shit:\",shift)\n window.append(shift[index:index+length])\n return window\n\ndef window(history, index, length):\n window = []\n for _line in history:\n window.append(_line[index:index+length])\n return window\n \n \ndef check_automaton(in1, in2, automaton, time_threshold):\n state1, state2 = in1, in2\n history1 = automaton.simulate(state1, time_threshold)\n history2 = automaton.simulate(state2, time_threshold)\n left_window(history1, 0, 10)\n\n\ndef maj(x):\n m = sum([int(i) for i in x])/len(x)\n if m==0.5:\n return m\n if m < 0.5:\n return -1\n if m > 0.5:\n return 1\n\ndef generate_data(grid_size):\n out1, out2 = 1, 1\n while out1*out2 == 1:\n m1, m2 = 0.5, 0.5\n while m1 == 0.5:\n in1 = np.bitwise_xor(np.random.randint(0, 2, size=grid_size), np.random.randint(0, 2, size=grid_size))\n m1 = maj(in1)\n while m2 == 0.5:\n in2 = np.bitwise_xor(np.random.randint(0, 2, size=grid_size), np.random.randint(0, 2, size=grid_size))\n m2 = maj(in2)\n out1, out2 = maj(in1), maj(in2)\n return in1, in2, out1, out2\n \nRULES = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18, 19, 22, 23, 24, 25, 26, 27, 28, 29, 30, 32, 33, \n 34, 35, 36, 37, 38, 40, 41, 42, 43, 44, 45, 46, 50, 51, 54, 56, 57, 58, 60, 62, 72, 73, 74, 76, 77, 78, \n 90, 94, 104, 105, 106, 108, 110, 122, 126, 128, 130, 132, 134, 136, 138, 140, 142, 146, 150, 152, 154, 156, \n 160, 162, 164, 168, 170, 172, 178, 184, 200, 204, 232]\n\nrule = 10\n\ngrid_size = 15\ntime_threshold = 400\nrepeat=1\n\nautomaton = ECA(rule)\nhistory = automaton.simulate(\"011110101011111111\", 20)\nfor h in history:\n print(h)\nprint()\nfor w in right_window(history, 0, 8):\n print(w)\n# for i in range(repeat):\n# in1, in2, out1, out2 = generate_data(grid_size)\n# if not i % 20:\n# print(f\"{i}/{repeat}\")\n# for rule in RULES:\n# automaton = ECA(rule) \n# if check_automaton(in1, in2, automaton, time_threshold):\n# RULES.pop(RULES.index(rule))\n# print(RULES) \n\n\n \n \n ","sub_path":"computation_in_CAs/majority_task.py","file_name":"majority_task.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"171334871","text":"from copy import deepcopy\nfrom sys import version_info\n\nimport numpy as np\n\nfrom occamypy import Vector\n\n\nclass VectorNumpy(Vector):\n \"\"\"In-core python vector class\"\"\"\n \n def __init__(self, in_content):\n \"\"\"\n VectorNumpy constructor: arr = np.array\n This class stores array with C memory order (i.e., row-wise sorting)\n \"\"\"\n if isinstance(in_content, str): # Header file name passed to constructor\n self.arr = np.load(in_content, allow_pickle=True)\n elif isinstance(in_content, np.ndarray): # Numpy array passed to constructor\n if np.isfortran(in_content):\n raise TypeError('Input array not a C contiguous array!')\n self.arr = np.array(in_content, copy=False)\n self.ax_info = None\n elif isinstance(in_content, tuple): # Tuple size passed to constructor\n # self.arr = np.zeros(tuple(reversed(in_vec)))\n self.arr = np.empty(in_content)\n self.ax_info = None\n else: # Not supported type\n raise ValueError(\"ERROR! Input variable not currently supported!\")\n \n super(VectorNumpy, self).__init__()\n # Number of elements per axis (tuple). Checking also the memory order\n self.shape = self.arr.shape # If fortran the first axis is the \"fastest\"\n self.ndim = self.arr.ndim # Number of axes integer\n self.size = self.arr.size # Total number of elements\n \n def getNdArray(self):\n \"\"\"Function to return Ndarray of the vector\"\"\"\n return self.arr\n \n def norm(self, N=2):\n \"\"\"Function to compute vector N-norm using Numpy\"\"\"\n return np.linalg.norm(self.getNdArray().ravel(), ord=N)\n \n def zero(self):\n \"\"\"Function to zero out a vector\"\"\"\n self.getNdArray().fill(0)\n return self\n \n def max(self):\n \"\"\"Function to obtain maximum value in the vector\"\"\"\n return self.getNdArray().max()\n \n def min(self):\n \"\"\"Function to obtain minimum value in the vector\"\"\"\n return self.getNdArray().min()\n \n def set(self, val):\n \"\"\"Function to set all values in the vector\"\"\"\n self.getNdArray().fill(val)\n return self\n \n def scale(self, sc):\n \"\"\"Function to scale a vector\"\"\"\n self.getNdArray()[:] *= sc\n return self\n \n def addbias(self, bias):\n self.getNdArray()[:] += bias\n return self\n \n def rand(self, snr=1.):\n \"\"\"Fill vector with random number (~U[1,-1]) with a given SNR\"\"\"\n rms = np.sqrt(np.mean(np.square(self.getNdArray())))\n amp_noise = 1.0\n if rms > 0. and rms != np.inf:\n amp_noise = np.sqrt(3. / snr) * rms # sqrt(3*Power_signal/SNR)\n self.getNdArray()[:] = amp_noise * (2. * np.random.random(self.getNdArray().shape) - 1.)\n return self\n \n def clone(self):\n \"\"\"Function to clone (deep copy) a vector from a vector or a Space\"\"\"\n vec_clone = deepcopy(self) # Deep clone of vector\n # Checking if a vector space was provided\n if vec_clone.getNdArray().size == 0: # this is the shape of np.ndarray!\n vec_clone.arr = np.zeros(vec_clone.shape, dtype=self.getNdArray().dtype)\n return vec_clone\n \n def cloneSpace(self):\n \"\"\"Function to clone vector space only (vector without actual vector array by using empty array of size 0)\"\"\"\n vec_space = VectorNumpy(np.empty(0, dtype=self.getNdArray().dtype))\n # Cloning space of input vector\n vec_space.ndim = self.ndim\n vec_space.shape = self.shape\n vec_space.size = self.size\n return vec_space\n \n def checkSame(self, other):\n \"\"\"Function to check dimensionality of vectors\"\"\"\n return self.shape == other.shape\n \n def abs(self):\n self.getNdArray()[:] = np.abs(self.getNdArray())\n return self\n \n def sign(self):\n self.getNdArray()[:] = np.sign(self.getNdArray())\n return self\n \n def reciprocal(self):\n self.getNdArray()[:] = 1. / self.getNdArray()\n return self\n \n def maximum(self, other):\n if np.isscalar(other):\n self.getNdArray()[:] = np.maximum(self.getNdArray(), other)\n return self\n elif isinstance(other, VectorNumpy):\n if not self.checkSame(other):\n raise ValueError('Dimensionality not equal: self = %s; other = %s' % (self.shape, other.shape))\n self.getNdArray()[:] = np.maximum(self.getNdArray(), other.getNdArray())\n return self\n else:\n raise TypeError(\"Provided input has to be either a scalar or a %s!\" % self.whoami)\n \n def conj(self):\n self.getNdArray()[:] = np.conjugate(self.getNdArray())\n return self\n \n def transpose(self):\n other = VectorNumpy(tuple(reversed(self.shape)))\n other[:] = self.getNdArray().T\n return other\n \n def pow(self, power):\n \"\"\"Compute element-wise power of the vector\"\"\"\n self.getNdArray()[:] = self.getNdArray() ** power\n return self\n \n def real(self):\n \"\"\"Return the real part of the vector\"\"\"\n self.getNdArray()[:] = self.getNdArray().real\n return self\n \n def imag(self, ):\n \"\"\"Return the imaginary part of the vector\"\"\"\n self.getNdArray()[:] = self.getNdArray().imag\n return self\n \n def copy(self, other):\n \"\"\"Function to copy vector from input vector\"\"\"\n # Checking whether the input is a vector or not\n if not isinstance(other, VectorNumpy):\n raise TypeError(\"Provided input vector not a %s!\" % self.whoami)\n # Checking dimensionality\n if not self.checkSame(other):\n raise ValueError('Dimensionality not equal: self = %s; other = %s' % (self.shape, other.shape))\n # Element-wise copy of the input array\n self.getNdArray()[:] = other.getNdArray()\n return self\n \n def scaleAdd(self, other, sc1=1.0, sc2=1.0):\n \"\"\"Function to scale a vector\"\"\"\n # Checking whether the input is a vector or not\n if not isinstance(other, VectorNumpy):\n raise TypeError(\"Provided input vector not a %s!\" % self.whoami)\n # Checking dimensionality\n if not self.checkSame(other):\n raise ValueError('Dimensionality not equal: self = %s; other = %s' % (self.shape, other.shape))\n # Performing scaling and addition\n self.getNdArray()[:] = sc1 * self.getNdArray() + sc2 * other.getNdArray()\n return self\n \n def dot(self, other):\n \"\"\"Function to compute dot product between two vectors\"\"\"\n # Checking whether the input is a vector or not\n if not isinstance(other, VectorNumpy):\n raise TypeError(\"Provided input vector not a %s!\" % self.whoami)\n # Checking size (must have same number of elements)\n if self.size != other.size:\n raise ValueError(\"Vector size mismatching: self = %d; other = %d\" % (self.size, other.size))\n # Checking dimensionality\n if not self.checkSame(other):\n raise ValueError('Dimensionality not equal: self = %s; other = %s' % (self.shape, other.shape))\n return np.vdot(self.getNdArray().ravel(), other.getNdArray().ravel())\n \n def multiply(self, other):\n \"\"\"Function to multiply element-wise two vectors\"\"\"\n # Checking whether the input is a vector or not\n if not isinstance(other, VectorNumpy):\n raise TypeError(\"Provided input vector not a %s!\" % self.whoami)\n # Checking size (must have same number of elements)\n if self.size != other.size:\n raise ValueError(\"Vector size mismatching: self = %s; other = %s\" % (self.size, other.size))\n # Checking dimensionality\n if not self.checkSame(other):\n raise ValueError('Dimensionality not equal: self = %s; other = %s' % (self.shape, other.shape))\n # Performing element-wise multiplication\n self.getNdArray()[:] = np.multiply(self.getNdArray(), other.getNdArray())\n return self\n \n def isDifferent(self, other):\n \"\"\"Function to check if two vectors are identical using built-in hash function\"\"\"\n # Checking whether the input is a vector or not\n if not isinstance(other, VectorNumpy):\n raise TypeError(\"Provided input vector not a %s!\" % self.whoami)\n # Using Hash table for python2 and numpy built-in function array_equal otherwise\n if version_info[0] == 2:\n # First make both array buffers read-only\n self.arr.flags.writeable = False\n other.arr.flags.writeable = False\n chcksum1 = hash(self.getNdArray().data)\n chcksum2 = hash(other.getNdArray().data)\n # Remake array buffers writable\n self.arr.flags.writeable = True\n other.arr.flags.writeable = True\n isDiff = (chcksum1 != chcksum2)\n else:\n isDiff = (not np.array_equal(self.getNdArray(), other.getNdArray()))\n return isDiff\n \n def clipVector(self, low, high):\n \"\"\"Function to bound vector values based on input vectors low and high\"\"\"\n if not isinstance(low, VectorNumpy):\n raise TypeError(\"Provided input low vector not a %s!\" % self.whoami)\n if not isinstance(high, VectorNumpy):\n raise TypeError(\"Provided input high vector not a %s!\" % self.whoami)\n self.getNdArray()[:] = np.minimum(np.maximum(low.getNdArray(), self.getNdArray()), high.getNdArray())\n return self\n","sub_path":"occamypy/numpy/vector.py","file_name":"vector.py","file_ext":"py","file_size_in_byte":9611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"397171933","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect, JsonResponse\nfrom django.core import serializers\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.views import generic\nfrom django.views.decorators.cache import never_cache\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.models import User\n\nfrom datetime import datetime\nimport simplejson as json\n\nfrom psq_django_project.celery import debug_task\nfrom celery.task.control import revoke\nfrom celery.result import AsyncResult\n\n\nfrom .models import Category, Question, Page, Choice, UserProfile, Document, Spectra, PSQFasta, PSQResults\nfrom .forms import CategoryForm, PageForm, UserProfileForm, DocumentForm, SpectraForm\nfrom .tasks import make_fasta, test_task, search_mzml\nfrom .bing_search import bing_search\nfrom .helpers import get_category_list\n\n\ndef index(request):\n\n category_list = Category.objects.order_by('-likes')[:5]\n page_list = Page.objects.order_by('-views')[:5]\n latest_question_list = Question.objects.order_by('-pub_date')[:5]\n\n context_dict = {'categories': category_list,\n 'pages': page_list,\n 'latest_questions': latest_question_list,\n 'time_now': datetime.now()}\n\n\n # Cookies\n visits = request.session.get('visits')\n if not visits:\n visits = 1\n reset_last_visit_time = False\n\n\n last_visit = request.session.get('last_visit')\n if last_visit:\n last_visit_time = datetime.strptime(last_visit[:-7], \"%Y-%m-%d %H:%M:%S\")\n\n if (datetime.now() - last_visit_time).seconds > 0:\n visits += 1\n reset_last_visit_time = True\n\n else:\n reset_last_visit_time = True\n\n if reset_last_visit_time:\n request.session['last_visit'] = str(datetime.now())\n request.session['visits'] = visits\n\n context_dict['visits'] = visits\n\n response = render(request, 'poll/index.html', context_dict)\n\n return response\n\n\ndef about(request):\n\n if request.session.get('visits'):\n count = request.session.get('visits')\n\n else:\n count = 0\n\n context_dict = {'visits': count}\n\n return render(request, template_name='poll/about.html', context=context_dict)\n\n@never_cache\ndef jobs(request):\n\n # USING CELERY API\n import requests as rq\n import pandas as pd\n import simplejson as json\n\n context_dict = {}\n\n server = 'http://localhost:5555'\n ext_1 = '/api/tasks?state=SUCCESS&limit=10'\n ext_2 = '/api/tasks?state=STARTED&limit=10'\n ret_1 = rq.get(server + ext_1, )\n ret_2 = rq.get(server + ext_2, )\n\n if ret_1.status_code == 200:\n\n p1 = pd.read_json(ret_1.text).transpose()\n\n\n\n #p.loc['state']\n #print(p.loc['state'])\n\n html_table_1 = p1.to_html(index=False).replace(' 1:\n filename = sys.argv[1]\nelse:\n filename = \"data.pickle\"\n\nrate = Rate(200)\n\nclass Hertz:\n def __init__(self, format, rollover):\n self.rollover = rollover\n self.format = format\n self.reset()\n\n def increment(self):\n self.count += 1\n\n if (self.count % self.rollover) == 0:\n now = time.monotonic()\n hz = self.count/(now - self.start_hz)\n self.count = 0\n self.start_hz = now\n print(self.format.format(hz))\n\n def reset(self):\n self.start_hz = time.monotonic()\n self.count = 0\n\ntry:\n start = time.monotonic()\n\n imuhz = Hertz(\">> imu {:0.1f} hz\",100)\n camhz = Hertz(\">> camera {:0.1f} hz\",20)\n\n while True:\n agmpt = s.read()\n if agmpt is None:\n print(f\"{Fore.RED}*** oops: No IMU ***{Fore.RESET}\")\n continue\n\n imuhz.increment()\n dt = time.monotonic() - start\n\n if (imuhz.count % 5) == 0:\n ok,f = camera.read()\n if ok:\n f = cv2.flip(f, -1) # Flip camera vertically, bad mounting\n # print(f.shape)\n ff = comp.compress(f)\n if ff is None:\n print(f\"{Fore.RED}*** oops: No Camera ***{Fore.RESET}\")\n continue\n\n agmpt += ((ff, f.shape), dt,)\n # print(f\">> image[{f.shape}]: {len(ff)}\")\n # camhz.increment()\n else:\n # append timestamp\n agmpt += (dt,)\n data.append(agmpt)\n rate.sleep()\n\nexcept KeyboardInterrupt:\n print(\"ctrl-C\")\nfinally:\n s.close()\n camera.close()\n # cv2.destroyAllWindows()\n\n if len(data) > 0:\n print(f\">> Collected {len(data)} data points, saving to {filename}\")\n savePickle(data, filename)\n #\n # if len(cam_data) > 0:\n # savePickle(cam_data, \"cam.pickle\")\n\n print(\"\\n\\nbye ...\\n\")\n","sub_path":"software/camera-imu/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"647436541","text":"from tkinter import *\nfrom random import *\n\nclass MoveBall:\n def __init__(self):\n window = Tk()\n window.title('Move a Ball')\n \n self.width = 200\n self.height = 200\n self.canvas = Canvas(window, width = self.width, height = self.height)\n self.canvas.pack()\n \n frame = Frame(window)\n frame.pack()\n Button(frame, text = 'left', command = self.left).pack(side = LEFT)\n Button(frame, text = 'right', command = self.right).pack(side = LEFT)\n Button(frame, text = 'up', command = self.up).pack(side = LEFT)\n Button(frame, text = 'down', command = self.down).pack(side = LEFT)\n \n self.x, self.y = randint(0, 200), randint(0, 200)\n self.canvas.create_oval(self.x, self.y, self.x + 10, self.y + 10, fill = 'black', tags = 'ball')\n \n self.d = 5\n \n window.mainloop()\n\n '''\n def isOver(self):\n if self.x > 190 or self.y > 190:\n isOver = True\n '''\n \n def left(self):\n self.canvas.move('ball', -self.d, 0)\n #self.canvas.update()\n \n def right(self):\n self.canvas.move('ball', self.d, 0)\n \n def up(self):\n self.canvas.move('ball', 0, self.d)\n \n def down(self):\n self.canvas.move('ball', 0, -self.d)\n \nMoveBall()\n \n \n \n","sub_path":"python_advance/tkinter/tt.py","file_name":"tt.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"469254657","text":"import bpy\nfrom ... base_types.node import AnimationNode\n\nclass ReplicateStringsNode(bpy.types.Node, AnimationNode):\n bl_idname = \"an_ReplicateStringsNode\"\n bl_label = \"Replicate Text\"\n\n inputNames = { \"Text\" : \"text\",\n \"Amount\" : \"amount\" }\n\n outputNames = { \"Text\" : \"text\" }\n\n def create(self):\n self.inputs.new(\"an_StringSocket\", \"Text\")\n socket = self.inputs.new(\"an_IntegerSocket\", \"Amount\")\n socket.setMinMax(0, 1000000)\n socket.value = 2\n self.outputs.new(\"an_StringSocket\", \"Text\")\n\n def getExecutionCode(self):\n return \"$text$ = %text% * %amount%\"\n","sub_path":"nodes/text/replicate_strings.py","file_name":"replicate_strings.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"496023025","text":"\n\n\n\n# Your code here\n.5\n\n\n\n# Your code here\np_w = .6\np_o = .1\n.3*p_w\n\n# Your code here\np_g95 = .35\np_g98 = .25\np_diesel = .4\np_f_g95 = .6\np_f_g98 = .5\np_f_d = .3\n#buys 95 AND fills tank\np_g95 * p_f_g95 #.21\n#next customer fills tank p(buys_gas) * p(fills it)\np_f_n_g95 = p_g95 * p_f_g95\np_f_n_g98 = p_g98 * p_f_g98\np_f_n_d = p_diesel * p_f_d\np_f_n_g95 + p_f_n_g98 + p_f_n_d\n\n\n\n\n\n# Your code here\np_ao = .4\np_bo = .25\np_co = .35\n#all flights\np_ao * p_bo * p_co\n#at least one\np_atleast_1 = p_ao + p_bo + p_co\n#exactly one\n#p_ao - p_ao_n_bo - p_ao_n_co + p_bo - p_ao_n_bo - p_bo_n_co + p_co - p_ao_n_co - p_bo_n_co - 3 * (p_ao_n_bo_n_co)\n","sub_path":"week_3/YelpProject/law_total_prob.py","file_name":"law_total_prob.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"442742229","text":"\n# 套用二分法\n\ndef search(self, nums, target: int) -> int:\n if len(nums) <= 0:\n return -1\n l, r = 0, len(nums) - 1\n while l < r:\n mid = (r - l) // 2 + l\n if nums[mid] == target:\n return mid\n if nums[mid] > nums[l]:\n if nums[l] <= target <= nums[mid]:\n r = mid\n else:\n ''' 这里 +1,因为上面是 <= 符号 '''\n l = mid + 1\n else:\n '''注意:这里必须是 mid+1,因为根据我们的比较方式,mid属于左边的序列'''\n if nums[mid + 1] <= target <= nums[r]:\n l = mid + 1\n else:\n r = mid\n return l if nums[l] == target else -1","sub_path":"Week_03/搜索二维矩阵.py","file_name":"搜索二维矩阵.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"584538990","text":"# Librerias de FLASK a Usar\nfrom flask import Flask\nfrom flask import session\nfrom flask import request\nfrom flask import redirect\nfrom flask import url_for\nfrom flask import render_template\nfrom flask_wtf.csrf import CSRFProtect\nfrom flask import jsonify\n\n# Import keys\nimport secret\n\n# Otras Librerias\nimport json\nimport facebook\nimport requests\nimport datetime\nfrom pymongo import MongoClient\nfrom config import DevelopmentConfig\n\n# Funciones\nimport search_engines\nimport authentication\nfrom validate_event import validate\n\n# Base de datos\n#from modelo import db\n#from modelo import Evento\n\n# Incializacion App FLASK\napp = Flask(__name__)\napp.config.from_object(DevelopmentConfig)\ncsrf = CSRFProtect()\nmongo = MongoClient('localhost',27017)\ndb = mongo.eventos\n\n# Vector de Categoras - Las categorias en este vector seran renderizadas para su asignacion a los eventos\ncategorias = ['Rock','Clasica','Electrónica','HipHop','Salsa','Reggaeton','Reggae','Jazz','Vallenato','Merengue','Negocios','Ciencias','Literatura','Artes','Tecnología','Salud','Festivales Culturales','Teatro','Ferias Comerciales','Cine','Eventos Deportivos']\n\n\n@app.route('/')\ndef index():\n return redirect(url_for('login'))\n\n\n# Ruta /login\n@app.route('/login')\ndef login():\n return render_template('login.html')\n\n# Ruta /logout\n@app.route('/logout')\ndef logout():\n # Cierra session cada vez que entra al login\n session.pop('nombre', None)\n session.pop('token', None)\n session.pop('id', None)\n\n return redirect(url_for('login'))\n\n# Ruta /admin\n@app.route('/admin', methods = ['GET','POST'])\ndef admin():\n\n if request.method == 'GET':\n if 'nombre' in session:\n #muestra datos de la session\n return render_template('admin.html', nombre=session['nombre'])\n else:\n return render_template('no_access.html')\n\n if request.method == 'POST':\n\n # Agrega datos a la sesion\n for i in request.form.lists():\n session[i[0]] = i[1][0]\n\n return json.dumps({'OK':200})\n\n# Ruta /events\n@app.route('/eventos_facebook', methods = ['GET', 'POST'])\ndef events():\n\n if request.method == 'GET':\n\n if 'token' in session:\n\n event_json_list = []\n # Time\n tnow = datetime.datetime.now()\n # App Data\n app_id = secret.APP_ID\n app_secret = secret.APP_SECRET\n # Tokens\n token_user = session['token']\n # Search events by cities and trust organizers.\n cities = ['Bucaramanga','Floridablanca','Piedecuesta','Girón']\n organizers_txt = open('./static/files/creators',\"r\")\n organizers_ids = [line.split(\",\")[1] for line in organizers_txt.readlines()]\n organizers_txt.close()\n\n # Authentication for facebook\n graph_user = authentication.as_user(token_user)\n graph_app = authentication.as_app(app_id,app_secret)\n\n # Result\n new_ids = search_engines.search_all_events(graph_user,graph_app,cities,tnow,organizers_ids)\n\n for j,event_id in enumerate(new_ids):\n evento_query = db.evento.find_one({'facebookId':event_id})\n\n if evento_query is None:\n event = facebook.GraphAPI(access_token=token_user, version=\"2.7\").get_object(id=event_id,\n fields='name,'\n 'start_time,'\n 'end_time,'\n 'description,'\n 'owner,'\n 'picture,'\n 'cover,'\n 'place,'\n 'updated_time,'\n 'is_draft,'\n 'type,'\n 'attending_count,'\n 'category,'\n 'declined_count,'\n 'interested_count,'\n 'is_canceled,'\n 'is_page_owned,'\n 'maybe_count,'\n 'noreply_count,'\n 'admins,'\n 'comments,'\n 'ticket_uri'\n )\n event_json = {\n \"title\":validate(event,['name'],1),\n \"url\": None,\n \"description\": validate(event, ['description'], 1),\n \"dateInit\":validate(event,['start_time'],1),\n \"dateEnd\":validate(event,['end_time'],1),\n \"facebookId\": validate(event, ['id'], 1),\n \"imageUrl\": validate(event, ['cover', 'source'], 2),\n \"location\":{\n \"city\": validate(event, ['place', 'location', 'city'], 3),\n \"country\": validate(event, ['place', 'location', 'country'], 3),\n \"street\": validate(event, ['place', 'location', 'street'], 3),\n \"name\": validate(event, ['place', 'name'], 2),\n \"zip\": validate(event, ['place', 'location', 'zip'], 3),\n \"geometry\":{\n \"coordinates\":[validate(event,['place','location','longitude'],3),validate(event,['place','location','latitude'],3)],\n \"type\":None\n },\n },\n \"_id\": None,\n \"verify\": None,\n \"noactive\":validate(event,['is_canceled'],1),\n \"subscription\": None,\n \"rate\": [validate(event, ['attending_count'], 1),\n validate(event, ['declined_count'], 1),\n validate(event, ['maybe_count'], 1),\n validate(event, ['noreply_count'], 1),\n validate(event, ['interested_count'], 1)],\n \"lastUpdateFB\":validate(event,['updated_time'],1),\n \"verifyPageFB\":validate(event,['is_page_owned'],1),\n \"creatorFB\": validate(event, ['owner', 'name'], 2),\n \"category\": validate(event,['category'],1),\n \"ticket_uri\":validate(event,['ticket_uri'],1),\n \"typeFB\":validate(event,['type'],1)\n\n\n }\n\n event_json_list.append(event_json)\n\n # Vector de Categoras - Las categorias en este vector seran renderizadas para su asignacion a los eventos\n categorias = ['Rock','Clasica','Electrónica','HipHop','Salsa','Reggaeton','Reggae','Jazz','Vallenato','Merengue','Negocios','Ciencias','Literatura','Artes','Tecnología','Salud','Festivales Culturales','Teatro','Ferias Comerciales','Cine','Eventos Deportivos']\n\n return render_template('eventos.html', eventos= event_json_list, categorias = categorias)\n\n else:\n return render_template('no_access.html')\n\n if request.method == 'POST':\n if 'nombre' in session:\n db.evento.insert_one(request.json)\n return json.dumps({'status':200})\n\n@app.route('/nuevo_evento', methods = ['GET'])\ndef nuevo_evento():\n\n esqueleto_evento = {\n \"title\": \"Titulo\",\n \"url\": \"url\",\n \"description\": \"Descripcion\",\n \"dateInit\":\"2017-12-17T08:00:00-0500\",\n \"dateEnd\": \"2017-12-17T08:00:00-0500\",\n \"facebookId\": \"Facebook id\",\n \"imageUrl\": \"Url de la Imagen\",\n \"city\": \"Ciudad\",\n \"country\": \"Pais\",\n \"street\": \"Direccion\",\n \"name\": \"name\",\n \"zip\": \"zip\",\n\n \"coordinates\":\"[ Longitud, Latitud]\",\n \"type\": \"tipo\",\n\n\n\n \"verify\": \"None\"\n\n\n\n\n }\n return render_template('nuevo_evento.html', esqueleto_evento=esqueleto_evento, categorias = categorias)\n\n@app.route('/organizadores_facebook', methods=[\"GET\",\"POST\"])\ndef organizadores_facebook():\n\n if request.method == \"POST\":\n pass\n\n if request.method == \"GET\":\n return render_template(\"organizadores_facebook.html\")\n\n\nif __name__ == '__main__':\n csrf.init_app(app)\n app.run(port=8000)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"436838782","text":"from keras.models import load_model\nfrom loss import softmax_cross_entropy_with_logits\nfrom datetime import datetime\nfrom glob import glob\nfrom shutil import move\nfrom os import mkdir, rmdir\nfrom os.path import basename\nfrom SavedState import SavedState, readSavedState\nimport numpy as np\nimport schedule\n\ndef train(modelName):\n timeStamp, numFiles = moveFilesForProcessing(modelName)\n \n if numFiles == 0:\n rmdir(timeStamp)\n return\n \n gameStates = np.zeros((numFiles, 9, 9, 65))\n pis = np.zeros((numFiles, 140))\n values = np.zeros((numFiles, 1))\n count = 0\n for file in glob('./' + timeStamp + '/' + modelName + '*'):\n savedState = readSavedState(file)\n gameStates[count] = savedState.gameState\n pis[count] = savedState.pi\n values[count] = savedState.value\n \n y = {'valueHead' : values, 'policyHead': pis}\n model = load_model('./models/' + modelName + '.h5', custom_objects={'softmax_cross_entropy_with_logits': softmax_cross_entropy_with_logits})\n model.save('./depModels/' + modelName + '-' + datetime.now().strftime(\"%d-%b-%Y-%H-%M-%S-%f\") + '.h5')\n \n model.fit(gameStates, y, batch_size=32, epochs=1)\n model.save('./models/' + modelName + '.h5')\n \n moveFilesDoneProcessing(timeStamp)\n \ndef moveFilesForProcessing(modelName):\n timeStamp = datetime.now().strftime(\"%d-%b-%Y-%H-%M-%S-%f\") + '-' + modelName\n mkdir(timeStamp)\n numFiles = 0\n for f in glob('./positions/' + modelName + '*'):\n tempFolder = datetime.now().strftime(\"%d-%b-%Y-%H-%M-%S-%f\")\n move(f, timeStamp + '/' + basename(f))\n numFiles = numFiles + 1\n \n return timeStamp, numFiles\n\ndef moveFilesDoneProcessing(timeStamp):\n mkdir('processed/' + timeStamp)\n for f in glob('./' + timeStamp + '/*'):\n move(f, 'processed/' + timeStamp + '/' + basename(f))\n rmdir(timeStamp)\n \ndef constTrain():\n schedule.every().hour.do(train, args=('model1'))","sub_path":"Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"272902208","text":"\"\"\" represents the trigger for the new flir mode \r\n\r\n Author : T. Wittmann - COEMM4\r\n Date : 08.05.2014 'pythonized' version\r\n\"\"\"\r\n\r\nfrom pytsl import get_logger, wait, load\r\n\r\ndef FLIR_S01():\r\n \"\"\"\r\n Subroutine: FLIR.S01 Name: NEW FLIR MODE TRIGGER\r\n \"\"\" \r\n logger = get_logger()\r\n\r\n NEW_FLIR_MODE_DEMANDED_CS1_T = load('ATK#ATK_15#ATK_15_HT_001_CSG01_AC_HAT_MESS_1#NEW_FLIR_MODE_DEMANDED@ATK_15_HT_001_0049_0_0')\r\n\r\n with logger.task(\"NEW FLIR MODE TRIGGER\", workload=2):\r\n # ==== STEP 1 ====\r\n logger.work(\"The new mode trigger will be set for 3 cycles = 240 ms.\")\r\n NEW_FLIR_MODE_DEMANDED_CS1_T.stimulate(True)\r\n wait(240)\r\n # WriteStepResult()\r\n # ==== STEP 2 ====\r\n logger.work(\"The trigger will be reset and the transition time is waited\")\r\n NEW_FLIR_MODE_DEMANDED_CS1_T.stimulate(False)\r\n # SPEC: The transition time to or from an operational mode is 2 sec maximum\r\n # wait(2000)\r\n # WriteStepResult()","sub_path":"cd/pytsl/test/example/FLIR_S01.py","file_name":"FLIR_S01.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"473911252","text":"\r\n\r\nmyfileloc = 'C:\\\\Users\\\\9888120\\\\Desktop\\\\My codes\\\\'\r\n\r\nimport sys\r\nsys.path.append(myfileloc+'utility.py')\r\nsys.path.append(myfileloc+'load_MasterDictionary.py')\r\nsys.path.append(myfileloc+'word_analysis.py')\r\nsys.path.append(myfileloc+'dictionary.py')\r\nsys.path.append(myfileloc+'fgindicator.py')\r\n\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import train_test_split, cross_val_score,GridSearchCV\r\n\r\nfrom sklearn.metrics import confusion_matrix,cohen_kappa_score\r\nfrom sklearn.metrics import classification_report\r\n\r\nfrom utility import *\r\n\r\nimport pickle\r\n\r\ncount_tf_nd = pickle.load( open( \"count_tf_nd.p\", \"rb\" ) )\r\ndf_label = pickle.load( open( \"df_label.p\", \"rb\" ) )\r\ndf_names = pickle.load( open( \"df_names.p\", \"rb\" ) )\r\n\r\nfgi_index_df = pickle.load(open( \"monthlyfgi.p\", \"rb\" ) )\r\n\r\n\r\n\r\n\r\nshifted_fgi_index_df = fgi_index_df.shift(6)\r\nshifted_fgi_index_df = shifted_fgi_index_df.dropna()\r\n\r\n\r\ndf = pd.concat([shifted_fgi_index_df, df_label],axis=1)\r\n\r\n\r\n\r\n\r\nfrom sklearn.metrics import classification_report, roc_auc_score, auc,roc_curve\r\nfrom sklearn.model_selection import ParameterGrid\r\n\r\nimport random\r\nseed = 999\r\nrandom.seed(seed)\r\n\r\n\r\nindices= range(len(count_tf_nd))\r\n\r\n#X_train, X_test, y_train, y_test, idx_train, idx_test = train_test_split(count_tf_nd,df_label, indices, train_size=0.8, stratify=df_label, random_state=seed, shuffle = True)\r\nX_train, X_test, y_train, y_test, idx_train, idx_test = train_test_split(count_tf_nd,df_label, indices, train_size=0.8, shuffle = False)\r\n\r\n\r\n### FGI prediction\r\ndf_fgi_test = df.iloc[idx_test]\r\ndf_fgi_train = df.iloc[idx_train]\r\n\r\n\r\n\r\ndf_fgi_test['fgi_forecast'] = df_fgi_test['fgi'].apply(binaryfun)\r\ndf_fgi_train['fgi_forecast'] = df_fgi_train['fgi'].apply(binaryfun)\r\n\r\n\r\ny_test_pred = df_fgi_test['fgi_forecast']\r\ny_train_pred = df_fgi_train['fgi_forecast']\r\n\r\nplot_actual_prediction(y_train, y_train_pred, title = 'Prediction using FGI (Org dictionary) in Train set: Actual vs prediction')\r\n\r\n\r\nplot_actual_prediction(y_test, y_test_pred, title = 'Prediction using FGI (Org dictionary) in Test set: Actual vs prediction')\r\n\r\n\r\n\r\n# train set\r\nconfusionmatrix_train = confusion_matrix(y_train, y_train_pred)\r\nprint(confusionmatrix_train)\r\n\r\nprint(classification_report(y_train, y_train_pred))\r\n\r\n\r\n\r\n# test set\r\nconfusionmatrix_test = confusion_matrix(y_test, y_test_pred)\r\nprint(confusionmatrix_test)\r\n\r\n\r\nprint(classification_report(y_test, y_test_pred))\r\n\r\nplot_all_confusion_matrix(confusionmatrix_train)\r\n\r\nplot_all_confusion_matrix(confusionmatrix_test)\r\n\r\n\r\n\r\n\r\n## machine learning\r\n\r\nimport parfit.parfit as pf\r\n\r\nfrom sklearn.linear_model import LogisticRegression\r\ngrid = {\r\n 'C': np.logspace(-5, 5, 10),\r\n 'penalty': ['l1']\r\n}\r\n\r\nparamGrid = ParameterGrid(grid)\r\nlog_bestModel_l1, bestScore, allModels, allScores = pf.bestFit(LogisticRegression(), paramGrid, X_train, y_train, X_test, y_test, nfolds=10, metric = roc_auc_score, scoreLabel = \"AUC\")\r\n\r\n#from sklearn.model_selection import GridSearchCV\r\n\r\n\r\n#log_bestModel_l1 = GridSearchCV(estimator = LogisticRegression(), param_grid = grid, scoring='roc_auc', cv=10)\r\n#log_bestModel_l1.fit(X_train, y_train)\r\n\r\n\r\n#pickle.dump(log_bestModel_l1, open( \"log_bestModel_l1_nonshuffle.p\", \"wb\" ) )\r\n\r\n#log_bestModel_l1 = pickle.load(open( \"log_bestModel_l1_nonshuffle.p\", \"rb\" ) )\r\n\r\ny_test_pred = log_bestModel_l1.predict(X_test)\r\n\r\ny_train_pred = log_bestModel_l1.predict(X_train)\r\n\r\n\r\n\r\n\r\nmodels = [log_bestModel_l1]\r\nmodel_names = ['Logistic Regression L1']\r\nplot_ROC(models,model_names, X_test, y_test)\r\n\r\n\r\n\r\n\r\ngrid = {\r\n 'C': np.logspace(-5, 5, 10),\r\n 'penalty': ['l2']\r\n}\r\n\r\nparamGrid = ParameterGrid(grid)\r\nlog_bestModel_l2, bestScore, allModels, allScores = pf.bestFit(LogisticRegression(), paramGrid, X_train, y_train, X_test, y_test, nfolds=10, metric = roc_auc_score, scoreLabel = \"AUC\")\r\n\r\n#pickle.dump(log_bestModel_l2, open( \"log_bestModel_l2_nonshuffle.p\", \"wb\" ) )\r\n#log_bestModel_l2 = pickle.load(open( \"log_bestModel_l2_nonshuffle.p\", \"rb\" ) )\r\n\r\nmodels = [log_bestModel_l2]\r\nmodel_names = ['Logistic Regression L2']\r\nplot_ROC(models,model_names, X_test, y_test)\r\n\r\n\r\n\r\n\r\ny_test_pred = log_bestModel_l2.predict(X_test)\r\n\r\ny_train_pred = log_bestModel_l2.predict(X_train)\r\n\r\n\r\n########## svm\r\nfrom sklearn.svm import SVC,LinearSVC\r\ngrid = {'C': np.logspace(-5, 5, 10),'penalty': ['l1'], 'loss': ['squared_hinge'], 'dual': [False]}\r\n\r\nparamGrid = ParameterGrid(grid)\r\nsvm_linear_l1_bestModel, bestScore, allModels, allScores = pf.bestFit(LinearSVC(), paramGrid, X_train, y_train, X_test, y_test, nfolds=10, metric = roc_auc_score, scoreLabel = \"AUC\")\r\n\r\n\r\n#pickle.dump(svm_linear_l1_bestModel, open( \"svm_linear_l1_bestModel_nonshuffle.p\", \"wb\" ) )\r\n#svm_linear_l1_bestModel = pickle.load(open( \"svm_linear_l1_bestModel_nonshuffle.p\", \"rb\" ) )\r\n\r\ny_test_pred = svm_linear_l1_bestModel.predict(X_test)\r\n\r\ny_train_pred = svm_linear_l1_bestModel.predict(X_train)\r\n\r\n\r\n\r\n\r\n\r\nmodels = [svm_linear_l1_bestModel]\r\nmodel_names = ['SVM linear L1']\r\nplot_ROC(models,model_names, X_test, y_test,linear_svm = True)\r\n\r\n\r\ngrid = {'C': np.logspace(-5, 5, 10),'penalty': ['l2'], 'loss': ['squared_hinge'], 'dual': [True]}\r\n\r\nparamGrid = ParameterGrid(grid)\r\nsvm_linear_l2_bestModel, bestScore, allModels, allScores = pf.bestFit(LinearSVC(), paramGrid, X_train, y_train, X_test, y_test, nfolds=10, metric = roc_auc_score, scoreLabel = \"AUC\")\r\n\r\n#svm_linear_l2_bestModel = GridSearchCV(LinearSVC(), cv=10, param_grid={'C': np.logspace(-5, 5, 10),'penalty': ['l2'], 'loss': ['squared_hinge'], 'dual': [True]}, n_jobs = 4)\r\n#svm_linear_l2_bestModel.fit(X_train, y_train)\r\n\r\n\r\n\r\n\r\n#pickle.dump(svm_linear_l2_bestModel, open( \"svm_linear_l2_bestModel_nonshuffle.p\", \"wb\" ) )\r\n\r\n#svm_linear_l2_bestModel = pickle.load(open( \"svm_linear_l2_bestModel_nonshuffle.p\", \"rb\" ) )\r\n\r\ny_test_pred = svm_linear_l2_bestModel.predict(X_test)\r\n\r\ny_train_pred = svm_linear_l2_bestModel.predict(X_train)\r\n\r\n\r\n\r\nmodels = [svm_linear_l2_bestModel]\r\nmodel_names = ['SVM linear L2']\r\nplot_ROC(models,model_names, X_test, y_test,linear_svm = True)\r\n\r\n\r\n\r\n#np.logspace(-3, 3, 10)\r\ngrid = {'C': [1e-04, 1e-02, 1, 1e02, 1e04], 'kernel': ['linear']}\r\n\r\ngrid = {'C': np.logspace(-5, 5, 10), 'kernel': ['poly']}\r\n\r\nparamGrid = ParameterGrid(grid)\r\nsvm_linear_bestModel, bestScore, allModels, allScores = pf.bestFit(SVC(probability=True), paramGrid, X_train, y_train, X_test, y_test, nfolds=10, metric = roc_auc_score, scoreLabel = \"AUC\")\r\n\r\nmodels = [svm_linear_bestModel]\r\nmodel_names = ['svm']\r\nplot_ROC(models,model_names, X_test, y_test)\r\n\r\n\r\n\r\n\r\n\r\n############## randam forest\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\n\r\nparam_grid = {\r\n 'bootstrap': [True],\r\n 'max_depth': [80, 90, 100, 110],\r\n 'max_features': [2, 3],\r\n 'min_samples_leaf': [3, 4, 5],\r\n 'min_samples_split': [8, 10, 12],\r\n 'n_estimators': [5, 10, 15, 20, 50, 100,200, 300, 1000]\r\n}\r\nparamGrid = ParameterGrid(param_grid)\r\nrf_bestModel, bestScore, allModels, allScores = pf.bestFit(RandomForestClassifier(), paramGrid, X_train, y_train, X_test, y_test, nfolds=10, metric = roc_auc_score, scoreLabel = \"AUC\")\r\n\r\n#rf_bestModel = GridSearchCV(RandomForestClassifier(), cv=10, param_grid={'n_estimators': [5, 10, 15, 20, 50, 100]}, n_jobs = 4)\r\n#rf_bestModel.fit(X_train, y_train)\r\n\r\n#pickle.dump(rf_bestModel, open( \"rf_bestModel_nonshuffle.p\", \"wb\" ) )\r\n\r\n#rf_bestModel = pickle.load(open( \"rf_bestModel_nonshuffle.p\", \"rb\" ) )\r\n\r\ny_test_pred = rf_bestModel.predict(X_test)\r\n\r\ny_train_pred = rf_bestModel.predict(X_train)\r\n\r\n\r\n\r\n\r\nmodels = [rf_bestModel]\r\nmodel_names = ['Random Forest']\r\nplot_ROC(models,model_names, X_test, y_test)\r\n\r\n\r\n\r\n# train set\r\nconfusionmatrix_train = confusion_matrix(y_train, y_train_pred)\r\nprint(confusionmatrix_train)\r\n\r\nprint(classification_report(y_train, y_train_pred))\r\n\r\n\r\n\r\n# test set\r\nconfusionmatrix_test = confusion_matrix(y_test, y_test_pred)\r\nprint(confusionmatrix_test)\r\n\r\n\r\nprint(classification_report(y_test, y_test_pred))\r\n\r\nprint(cohen_kappa_score(y_test, y_test_pred))\r\n\r\n\r\n\r\n\r\nplot_all_confusion_matrix(confusionmatrix_train)\r\n\r\nplot_all_confusion_matrix(confusionmatrix_test)\r\n\r\n\r\nplot_actual_prediction(y_train, y_train_pred, title = 'Random Forests in Train set: Actual vs prediction')\r\n\r\n\r\nplot_actual_prediction(y_test, y_test_pred, title = 'Random Forests in Test set: Actual vs prediction')\r\n\r\n\r\n\r\n","sub_path":"Project word represent bow CV04.py","file_name":"Project word represent bow CV04.py","file_ext":"py","file_size_in_byte":8429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"175259518","text":"from tkinter import *\r\nfrom tkinter import messagebox\r\nfrom tkinter.scrolledtext import ScrolledText\r\nfrom tkinter import ttk\r\nimport socket, threading, select, time, sys\r\n\r\nsckt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nsckt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n#server_address = ('127.0.0.1', 5000)\r\nserver_address = ('108.61.119.46', 5000)\r\n\r\nclass Window(Frame):\r\n def __init__(self, master):\r\n Frame.__init__(self, master)\r\n self.master = master\r\n self.grid()\r\n self.online_users = []\r\n self.aliasname = ''\r\n self.init_window()\r\n self.connect_to_server()\r\n self.pm_tabs = []\r\n \r\n def init_window(self):# Builds the UI for the main window\r\n self.n = ttk.Notebook(root)\r\n \r\n self.n.bind_all(\"<>\", self.tabChangedEvent)\r\n \r\n root.title('Python Chat')\r\n w = 480\r\n h = 272\r\n sw = root.winfo_screenwidth()\r\n sh = root.winfo_screenheight()\r\n x = (sw - w)/2\r\n y = (sh - h)/2\r\n root.geometry('%dx%d+%d+%d' % (w, h, x, y))\r\n \r\n self.textboxframe = ttk.Frame(self.n)\r\n self.textboxframe.grid(row=0, column=0, sticky=N+S+E+W)\r\n Grid.rowconfigure(root, 0, weight=1)\r\n Grid.columnconfigure(root, 0, weight=1) \r\n \r\n self.textReceive = ScrolledText(self.textboxframe, height=24, width=47, wrap = WORD)\r\n self.textReceive.grid(row = 0, column= 0, padx=(10,0), pady=(10,5), sticky=N+S+E+W)\r\n self.textReceive.config(state=DISABLED)\r\n Grid.rowconfigure(self.textboxframe, 0, weight=1)\r\n Grid.columnconfigure(self.textboxframe, 0, weight=1)\r\n \r\n self.userReceive = Listbox(self.textboxframe, width=12)\r\n self.userReceive.grid(row = 0, column= 1, rowspan=3, padx=(0,10), pady=(10,10), sticky=N+S+E+W)\r\n Grid.rowconfigure(self.textboxframe, 0, weight=1)\r\n Grid.columnconfigure(self.textboxframe, 0, weight=1)\r\n self.userReceive.insert(END, ' Online Users\\n')\r\n \r\n self.textEntry = ScrolledText(self.textboxframe, height=2, width=47, wrap = WORD)\r\n self.textEntry.grid(row = 2, column= 0, padx=(10,0), pady=(0,10), sticky=N+S+E+W)\r\n Grid.rowconfigure(self.textboxframe, 0, weight=1)\r\n Grid.columnconfigure(self.textboxframe, 0, weight=1)\r\n self.textEntry.bind('', self.check_username)\r\n \r\n self.n.add(self.textboxframe, text='Chat')\r\n self.n.grid(row=0, column=0, sticky=N+S+E+W)\r\n \r\n def tabChangedEvent(self,event):\r\n self.current_tab=event.widget.tab(event.widget.index(\"current\"),\"text\")\r\n \r\n \r\n def connect_to_server(self):# Attemps to connect to the server and starts the receiving thread\r\n try:\r\n sckt.connect(server_address)\r\n thread = threading.Thread(target=self.recv_loop, args=[sckt])\r\n thread.daemon = True\r\n thread.start()\r\n self.getuser_popup()\r\n except:\r\n messagebox.showinfo(message='Can\\'t connect to the server!\\nPlease try again later', icon='warning')\r\n quit()\r\n \r\n def check_username_pm(self, event=None):# Check if they have a username. If they have a username, check if they have a message \r\n user = self.aliasname\r\n message = self.pmEntry.get('1.0','end-1c')\r\n if user.isspace() or user == '':\r\n messagebox.showinfo(message='You must enter a username before you can chat', icon='warning')\r\n return 'break'\r\n elif message.isspace() or message == '':\r\n messagebox.showinfo(message='You must enter some text to chat', icon='warning')\r\n self.pmEntry.delete('1.0', END)\r\n return 'break'\r\n for cli in self.online_users:\r\n if cli in self.current_tab:\r\n billy = '/msg ' + cli + ' ' + message \r\n self.iterate_though_message(billy, user)\r\n return 'break' \r\n \r\n def check_username(self, event=None):# Check if they have a username. If they have a username, check if they have a message \r\n user = self.aliasname\r\n message = self.textEntry.get('1.0','end-1c')\r\n if user.isspace() or user == '':\r\n messagebox.showinfo(message='You must enter a username before you can chat', icon='warning')\r\n return 'break'\r\n elif message.isspace() or message == '':\r\n messagebox.showinfo(message='You must enter some text to chat', icon='warning')\r\n self.textEntry.delete('1.0', END)\r\n return 'break'\r\n self.iterate_though_message(message, user)\r\n return 'break'\r\n \r\n def iterate_though_message(self, message, user):# If there is a message check for / commands\r\n if message[0] == '/':\r\n self.check_for_user_command(message, user)\r\n else:\r\n self.sending(message, user)# If no / in the first position of the message is found, send the message\r\n \r\n def check_for_user_command(self, message, user):# After iterating through the message, check if the message contains any of these commands\r\n if '/nick ' in message: \r\n usr = message.split(' ',1)[1] # Grabs the username after /nick\r\n if usr == '' or ' ' in usr: # Checks format rules ie. if it contains spaces\r\n messagebox.showinfo(message='Username cannot contain spaces \\n Example: \"/nick username\"', icon='warning')\r\n return 'break'\r\n elif usr in self.online_users:# Checks if the username is taken\r\n messagebox.showinfo(message='That username is taken.', icon='warning')\r\n return 'break' \r\n else:\r\n self.send_username_to_server(user, usr)# If the username is not taken, send it to the server \r\n return 'break'\r\n elif '/nick' in message:\r\n messagebox.showinfo(message='You must enter a username \\n Example: \"/nick username\"', icon='warning')\r\n return 'break'\r\n else: \r\n if '/msg' in message:\r\n tabname = message.split(' ',2)\r\n out_msg = message.split()\r\n if len(out_msg) >= 3: \r\n self.add_tab_outgoing(tabname[1], user, message)\r\n else:\r\n messagebox.showinfo(message='You must enter some text to chat', icon='warning')\r\n \r\n def send_username_to_server(self, user, usr):# Sends the username to the server and updates the client title to include the new name\r\n sckt.send(user.encode('utf-8') + ':>/nick '.encode('utf-8') + usr.encode('utf-8'))\r\n self.textEntry.delete('1.0', END)\r\n self.master.title('Python Chat - %s' % usr)\r\n self.aliasname = usr\r\n\r\n def sending_pm(self, message, user):# Main sending method. Sends message to server to then be sent to all connected clients\r\n try:\r\n sckt.send(user.encode('utf-8')+ ':>'.encode('utf-8') + message.encode('utf-8'))\r\n self.textEntry.delete('1.0', END)\r\n except:\r\n messagebox.showinfo(message='Can\\'t send messages while not connected', icon='warning') \r\n \r\n def sending(self, message, user):# Main sending method. Sends message to server to then be sent to all connected clients\r\n try:\r\n sckt.send(user.encode('utf-8')+ ':>'.encode('utf-8') + message.encode('utf-8'))\r\n self.post_text(user + ':>' + message + '\\n')\r\n self.textEntry.delete('1.0', END)\r\n except:\r\n messagebox.showinfo(message='Can\\'t send messages while not connected', icon='warning')\r\n \r\n def recv_loop(self, connection):# The main receiving loop for incoming messages\r\n while True: \r\n (readable, writable, errored) = select.select([connection], [], [connection], 0.1)\r\n if readable or errored:\r\n data = connection.recv(1024)\r\n data = data.decode('utf-8')\r\n if not data:\r\n self.cant_connect()\r\n self.check_for_commands(data) \r\n \r\n def check_for_commands(self, data):# Looks for a string from the server to start the build_online_list method\r\n raw_msg = data.split('!',1)\r\n if data[0] == '?':\r\n self.build_online_list(data)\r\n elif data[0] == '!':\r\n self.post_pm(raw_msg)\r\n return 'break'\r\n else:\r\n self.post_text(data + '\\n') \r\n \r\n def post_pm(self, raw_msg):\r\n send_post = raw_msg[1][0:].split('/msg ')\r\n send_user = send_post[0].split(':>')\r\n send_msg = send_post[1].split(self.aliasname + \" \")\r\n done_send = send_user[0] + ':>' + send_msg[1]\r\n self.add_tab(send_user, done_send)\r\n \r\n def add_tab_outgoing(self, send_user, user, done_send):\r\n for client_id in self.online_users:\r\n # Check for new Client\r\n if send_user in client_id and send_user not in self.pm_tabs:\r\n # Create Client Tab\r\n self.pm_tabs.append(send_user)\r\n self.sending_user = send_user\r\n self.send_user = self.sending_user + 'frame'\r\n self.receive_user = self.sending_user + 'pmReceive'\r\n self.pm_Entry = self.sending_user + 'pmSend'\r\n self.pm_Close = self.sending_user + 'button'\r\n \r\n self.send_user= ttk.Frame(self.n)\r\n self.send_user.grid(row=0, column=0, rowspan=2, sticky=N+S+E+W)\r\n \r\n self.receive_user = ScrolledText(self.send_user, height=24, width=47, wrap = WORD)\r\n self.receive_user.grid(row = 0, column= 0, padx=(10,0), pady=(10,5), sticky=N+S+E+W)\r\n self.receive_user.config(state=DISABLED)\r\n \r\n self.pmEntry = ScrolledText(self.send_user, height=2, width=47, wrap = WORD)\r\n self.pmEntry.grid(row = 2, column= 0, padx=(10,0), pady=(0,10), sticky=N+S+E+W)\r\n self.pmEntry.bind('', self.check_username_pm)\r\n \r\n self.pm_Close = Button(self.send_user, width=7, text='Close tab', command=lambda:remove_on_close())\r\n self.pm_Close.grid(row = 0, column= 1, padx=(5,5), pady=(5,150), sticky=N+S+E+W)\r\n \r\n def remove_on_close():\r\n self.n.select()\r\n self.n.forget(self.n.select())\r\n for items in self.pm_tabs:\r\n if self.current_tab in items:\r\n self.pm_tabs.remove(self.current_tab)\r\n \r\n Grid.rowconfigure(self.send_user, 0, weight=1)\r\n Grid.columnconfigure(self.send_user, 0, weight=1)\r\n \r\n self.n.add(self.send_user, text = send_user)\r\n message = done_send[0:].split(' ',2)\r\n self.sending_pm(done_send, user)\r\n self.post_pm_controls(user + ':>' + message[2] + '\\n')\r\n self.n.select(self.send_user)\r\n self.pmEntry.delete('1.0', END)\r\n self.pmEntry.focus_force() \r\n \r\n elif send_user in client_id and send_user in self.pm_tabs:\r\n if client_id in self.current_tab:\r\n message = done_send[0:].split(' ',2)\r\n self.sending_pm(done_send, user)\r\n self.post_pm_controls(user + ':>' + message[2] + '\\n')\r\n self.pmEntry.delete('1.0', END)\r\n self.pmEntry.focus_force() \r\n \r\n def add_tab(self, send_user, done_send):\r\n for client_id in self.online_users:\r\n # Check for new Client\r\n if send_user[0] in client_id and send_user[0] not in self.pm_tabs:\r\n # Create Client Tab\r\n self.pm_tabs.append(send_user[0])\r\n self.sending_user = send_user[0]\r\n self.send_user = self.sending_user + 'frame'\r\n self.receive_user = self.sending_user + 'pmReceive'\r\n self.pm_Entry = self.sending_user + 'pmSend'\r\n self.pm_Close = self.sending_user + 'button'\r\n \r\n self.send_user= ttk.Frame(self.n)\r\n self.send_user.grid(row=0, column=0, rowspan=2, sticky=N+S+E+W)\r\n \r\n self.receive_user = ScrolledText(self.send_user, height=24, width=47, wrap = WORD)\r\n self.receive_user.grid(row = 0, column= 0, padx=(10,0), pady=(10,5), sticky=N+S+E+W)\r\n self.receive_user.config(state=DISABLED)\r\n \r\n self.pmEntry = ScrolledText(self.send_user, height=2, width=47, wrap = WORD)\r\n self.pmEntry.grid(row = 1, column= 0, padx=(10,0), pady=(0,10), sticky=N+S+E+W)\r\n self.pmEntry.bind('', self.check_username_pm)\r\n \r\n self.pm_Close = Button(self.send_user, width=7, text='Close tab', command=lambda:remove_on_close())\r\n self.pm_Close.grid(row = 0, column= 1, padx=(5,5), pady=(5,150), sticky=N+S+E+W)\r\n \r\n Grid.rowconfigure(self.send_user, 0, weight=1)\r\n Grid.columnconfigure(self.send_user, 0, weight=1)\r\n \r\n def remove_on_close():\r\n self.n.select()\r\n self.n.forget(self.n.select())\r\n for items in self.pm_tabs:\r\n if self.current_tab in items:\r\n self.pm_tabs.remove(self.current_tab)\r\n \r\n self.n.add(self.send_user, text = send_user[0])\r\n self.post_pm_controls(done_send + '\\n')\r\n self.pmEntry.delete('1.0', END)\r\n \r\n elif send_user[0] in client_id and send_user[0] in self.pm_tabs:\r\n self.post_pm_controls(done_send + '\\n')\r\n \r\n def build_online_list(self, data):# Builds the online users list\r\n raw_clients = data.split('=',1)[1]\r\n self.online_users = raw_clients.split(\",\")\r\n self.userReceive.delete(1, END)\r\n for items in self.online_users:\r\n self.userReceive.insert(END, ' ' + items)\r\n \r\n def getuser_popup(self):# Builds the UI for the username entry window \r\n self.top = Toplevel()\r\n self.top.transient(root)\r\n w = 210\r\n h = 145\r\n sw = self.top.winfo_screenwidth()\r\n sh = self.top.winfo_screenheight()\r\n x = (sw - w)/2\r\n y = (sh - h)/2\r\n self.top.geometry('%dx%d+%d+%d' % (w, h, x, y))\r\n \r\n self.enteruser = Entry(self.top, width=18)\r\n self.enteruser.place(x=32, y=30)\r\n self.enteruser.focus_force()\r\n self.enterusername = Label(self.top, text = 'Enter a username to chat')\r\n self.enterusername.place(x=26, y=5)\r\n self.changeuser = Label(self.top, text = 'You can change your username\\n later by typing /nick in the chat')\r\n self.changeuser.place(x=3, y=55)\r\n self.usernameButton = Button(self.top, text='Enter Chat', command = self.get_username, height=2, width=8)\r\n self.enteruser.bind('', self.get_username)\r\n self.usernameButton.place(x=58, y=90)\r\n \r\n def get_username(self, event=None):# Gets the initial username after hitting the enter chat button\r\n self.aliasname = self.enteruser.get()\r\n if self.aliasname == '':\r\n messagebox.showinfo(message='You must enter a username', icon='warning')\r\n elif ' ' in self.aliasname:\r\n messagebox.showinfo(message='Username cannot contain spaces', icon='warning')\r\n elif self.aliasname in self.online_users:\r\n messagebox.showinfo(message='Username is taken.', icon='warning')\r\n else:\r\n self.master.title('Python Chat - %s' % self.aliasname)\r\n sckt.send(self.aliasname.encode('utf-8') + ':>/nick '.encode('utf-8') + self.aliasname.encode('utf-8'))\r\n self.top.destroy()\r\n self.textEntry.focus_force()\r\n \r\n def post_pm_controls(self, pm):# Handles the state of the tabed text boxs as well as inserting text into the box\r\n self.receive_user.config(state=NORMAL)\r\n self.receive_user.insert(END, pm)\r\n self.receive_user.config(state=DISABLED)\r\n self.receive_user.see(END)\r\n \r\n def post_text(self, post):# Handles the state of the main text box as well as inserting text into the box\r\n self.textReceive.config(state=NORMAL)\r\n self.textReceive.insert(END, post)\r\n self.textReceive.config(state=DISABLED)\r\n self.textReceive.see(END)\r\n \r\nroot = Tk()\r\napp = Window(root)\r\nroot.mainloop()\r\n","sub_path":"PocketChatV0.6.1.py","file_name":"PocketChatV0.6.1.py","file_ext":"py","file_size_in_byte":17236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"445827232","text":"from flask import Flask, render_template\nfrom subprocess import call\n\napp = Flask(__name__)\n\nSENDER_CONFIG = '10101'\nSEND_CONFIG = ['-u', '-s']\nRECEIVER = {\n 'A': '1',\n 'B': '2',\n 'C': '3',\n 'D': '4',\n}\nSTATES = {\n 'on':'1',\n 'off':'0'\n}\n\ndef send_signal(receiver, state):\n call(['./send', SENDER_CONFIG, *SEND_CONFIG, receiver, state])\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/on/', methods=['POST'])\ndef on_all():\n for _, value in RECEIVER.items():\n send_signal(value, STATES['on'])\n return 'all on'\n\n@app.route('/off/', methods=['POST'])\ndef off_all():\n for _, value in RECEIVER.items():\n send_signal(value, STATES['off'])\n return 'all off'\n\n@app.route('/receiver///', methods=['POST'])\ndef switch(receiver_id, state_id):\n if receiver_id not in RECEIVER.keys():\n return '{} not in {}'.format(receiver_id, RECEIVER.keys()), 400\n elif state_id not in STATES.keys():\n return '{} not in {}'.format(state_id, STATES.keys()), 400\n else:\n send_signal(RECEIVER[receiver_id], STATES[state_id])\n return '{} {}'.format(receiver_id, state_id)\n\n\n","sub_path":"pi-433-remote.py","file_name":"pi-433-remote.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"89750997","text":"import numpy as np\nimport SimpleITK as sitk\nfrom tqdm import tqdm\nfrom preprocessing import normalize\nimport os\n\n\npath1 = \"Data/Brats2018/HGG/\"\npath2 = \"Data/Brats2018/LGG/\"\n\ndef load_data(path):\n my_dir = sorted(os.listdir(path))\n\n data = []\n gt = []\n\n for p in tqdm(my_dir):\n data_list = sorted(os.listdir(path+p))\n # print(\"sorted(os.listdir(path+p))\",sorted(os.listdir(path+p))) ['Brats18_2013_0_1_flair.nii.gz', 'Brats18_2013_0_1_seg.nii.gz', 'Brats18_2013_0_1_t1.nii.gz', 'Brats18_2013_0_1_t1ce.nii.gz', 'Brats18_2013_0_1_t2.nii.gz']\n\n img_itk = sitk.ReadImage(path + p + '/'+ data_list[0])\n # print(\"image path\",path + p + '/'+ data_list[0]) Data/Brats2018/LGG/Brats18_2013_0_1/Brats18_2013_0_1_flair.nii.gz\n flair = sitk.GetArrayFromImage(img_itk)\n # print(\"flair shape\",flair.shape) # (155, 240, 240)\n # print(\"flair dtype\",flair.dtype) # int16\n flair = normalize(flair)\n\n img_itk = sitk.ReadImage(path + p + '/'+ data_list[1])\n seg = sitk.GetArrayFromImage(img_itk)\n\n # print(\"seg shape\",seg.shape) # (155, 240, 240)\n # print(\"seg dtype\",seg.dtype) # uint8 / int16\n\n\n img_itk = sitk.ReadImage(path + p + '/'+ data_list[2])\n t1 = sitk.GetArrayFromImage(img_itk)\n t1 = normalize(t1)\n\n img_itk = sitk.ReadImage(path + p + '/'+ data_list[3])\n t1ce = sitk.GetArrayFromImage(img_itk)\n t1ce = normalize(t1ce)\n\n img_itk = sitk.ReadImage(path + p + '/'+ data_list[4])\n t2 = sitk.GetArrayFromImage(img_itk)\n t2 = normalize(t2)\n\n data.append([flair,t1,t1ce,t2])\n gt.append(seg)\n\n data = np.asarray(data,dtype=np.float32)\n gt = np.asarray(gt,dtype=np.uint8)\n return data,gt\n#\n#\n# # for HGG\n# data2,gt2 = load_data(path1) #HGG having 210 patients\n# for LGG\ndata2,gt2 = load_data(path2) #LGG having 75 patients\n\nprint(\"data2.shape\",data2.shape)\nprint(\"gt2.shape\",gt2.shape)\nprint(\"data2.dtype\",data2.dtype)\nprint(\"gt2.dtype\",gt2.dtype)\n\n#\nnp.save('LG_data.npy',data2)\nnp.save('LG_gt.npy',gt2)\n\n\n","sub_path":"dataLoad.py","file_name":"dataLoad.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"598854104","text":"# Copyright (c) 2012-2015 Netforce Co. Ltd.\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE\n# OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom netforce.model import Model, fields, get_model\n\n\nclass AllocWizard(Model):\n _name = \"account.alloc.wizard\"\n _transient = True\n _fields = {\n \"credit_id\": fields.Many2One(\"account.invoice\", \"Credit Note\", required=True, on_delete=\"cascade\"),\n \"type\": fields.Char(\"Type\"),\n \"lines\": fields.One2Many(\"account.alloc.wizard.line\", \"wiz_id\", \"Lines\"),\n \"amount_credit\": fields.Decimal(\"Outstanding Credit\", readonly=True),\n \"amount_alloc\": fields.Decimal(\"Total Amount to Credit\", readonly=True),\n \"amount_remain\": fields.Decimal(\"Remaining Credit\", readonly=True),\n }\n\n def default_get(self,field_names={},context={},**kw):\n if \"credit_id\" not in context:\n return {}\n credit_id = int(context[\"credit_id\"])\n cred = get_model(\"account.invoice\").browse(credit_id)\n contact_id = cred.contact_id.id\n lines = []\n for inv in get_model(\"account.invoice\").search_browse([[\"type\", \"=\", cred.type], [\"inv_type\", \"=\", \"invoice\"], [\"contact_id\", \"=\", contact_id], [\"state\", \"=\", \"waiting_payment\"], [\"currency_id\", \"=\", cred.currency_id.id]]):\n lines.append({\n \"invoice_id\": [inv.id, inv.name_get()[0][1]],\n \"date\": inv.date,\n \"amount_total\": inv.amount_total,\n \"amount_due\": inv.amount_due,\n })\n vals = {\n \"credit_id\": [cred.id, cred.name_get()[0][1]],\n \"lines\": lines,\n \"type\": cred.type,\n \"amount_credit\": cred.amount_credit_remain,\n \"amount_alloc\": 0,\n \"amount_remain\": cred.amount_credit_remain,\n }\n return vals\n\n def allocate(self, ids, context={}):\n obj = self.browse(ids)[0]\n for line in obj.lines:\n if not line.amount:\n continue\n vals = {\n \"credit_id\": obj.credit_id.id,\n \"invoice_id\": line.invoice_id.id,\n \"amount\": line.amount,\n }\n get_model(\"account.credit.alloc\").create(vals)\n return {\n \"next\": {\n \"name\": \"view_invoice\",\n \"active_id\": obj.credit_id.id,\n },\n \"flash\": \"Credit allocated.\",\n }\n\n def onchange_amount(self, context={}):\n data = context[\"data\"]\n amt = 0\n for line in data[\"lines\"]:\n amt += line.get(\"amount\", 0)\n data[\"amount_alloc\"] = amt\n data[\"amount_remain\"] = data[\"amount_credit\"] - amt\n return data\n\nAllocWizard.register()\n","sub_path":"day10/models/netforce_account/account_alloc_wizard.py","file_name":"account_alloc_wizard.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"636675876","text":"#python 3.5, 2016-08-11, v1.1.1, enter a directory to make filenames lowercase\n#ignores .py and .db files as well as directories\n\nimport os\n\nfte = ['.py','.db'];\n\ndirectory = input('directory ')\nte = '2'\nwhile te != '':\n te = input('exclude ')\n if te == '':\n break\n if te[0:1] != '.':\n te = '.' + te\n fte.append(te)\n\nfor file in os.listdir(directory):\n filename, fileext = os.path.splitext(directory + '\\\\' + file)\n if not os.path.isdir(directory + '\\\\' + file) and fileext not in fte:\n os.rename(directory+'\\\\'+file, directory+'\\\\'+filename.lower()+fileext.lower())\n print(filename + fileext)\n\ninput('\\n\\nPress enter to exit.')\n","sub_path":"filelowercase.py","file_name":"filelowercase.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"28400327","text":"from sanic.response import json\nfrom sanic.exceptions import ServerError\nfrom sanic import Blueprint\nfrom sanic.views import HTTPMethodView\n\nfrom dateutil.parser import parse\n\nfrom krx_dataserver.models import (\n asset,\n dailyprice,\n)\n\n\nbp_dailyprices = Blueprint('bp_dailyprice', url_prefix='/dailyprices')\n\n\n@bp_dailyprices.route('/')\nasync def get_dailyprices_list(request):\n query = dailyprice.select() \\\n .distinct(dailyprice.c.asset)\n scraped = await request.app.db.fetch_all(query=query)\n\n query = asset.select() \\\n .where(asset.c.code.in_(s['asset'] for s in scraped))\n rows = await request.app.db.fetch_all(query=query)\n\n return json({row['code']:row['name'] for row in rows})\n\n\n@bp_dailyprices.route('///')\nasync def get_dailyprices(request, asset, start, stop):\n # need to check the existence of asset\n try:\n start = parse(start).date()\n stop = parse(stop).date()\n except ValueError:\n # 416 Range Not Satisfiable\n raise ServerError('Date error', status_code=416)\n\n query = dailyprice.select() \\\n .where(dailyprice.c.asset == asset) \\\n .where(dailyprice.c.date >= start) \\\n .where(dailyprice.c.date < stop) \\\n .order_by(dailyprice.c.date)\n\n rows = await request.app.db.fetch_all(query=query)\n\n return json({row['date']: (\n row['open'],\n row['high'],\n row['low'],\n row['close'],\n row['volume'],\n ) for row in rows})\n\n\nclass MarketDataDailyPriceView(HTTPMethodView):\n\n async def get(self, request, asset):\n query = dailyprice.select()\n row = await request.app.db.fetch_one(query=query)\n print(row['asset'])\n return json({'received': True, 'asset': asset})\n\n def post(self, request, asset):\n pass\n\n\ndef setup_routes(app):\n\n @app.route('/hello')\n def hello(request):\n return json({'hello': 'world'})\n\n app.add_route(\n MarketDataDailyPriceView.as_view(),\n '/marketdata/dailyprices/')\n\n app.blueprint(bp_dailyprices)\n","sub_path":"krx-dataserver/krx_dataserver/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"62381774","text":"\ndef cond1(a, b, c):\n\tsum = a + b\n\tmul = a * b\n\n\tif a==0 and b!=0:\n\t\treturn b\n\telif b==0 and a!=0:\n\t\treturn a\n\telif a==0 and b==0:\n\t\tprint(\"Invalid numbers.\")\n\n\tif c==True:\n\t\treturn sum\n\telse:\n\t\treturn mul\n\nprint (cond1(4,4,True))","sub_path":"cond2.py","file_name":"cond2.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"513816737","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module contains testing utilities used throughout test scripts, including\ncommon functions and partial classes.\n\n\"\"\"\n\nimport os\nimport requests\nimport unittest\nimport numpy as np\nimport json\nimport pandas as pd\n\ndef get_root_path():\n '''Returns the path to the root repository directory.\n \n '''\n \n testing_path = os.path.dirname(os.path.realpath(__file__));\n root_path = os.path.split(testing_path)[0]\n \n return root_path;\n\ndef clean_up(dir_path):\n '''Cleans up the .fmu, .mo, .txt, .mat, .json files from directory.\n\n Parameters\n ----------\n dir_path : str\n Directory path to clean up\n \n '''\n\n files = os.listdir(dir_path)\n for f in files:\n if f.endswith('.fmu') or f.endswith('.mo') or f.endswith('.txt') or f.endswith('.mat') or f.endswith('.json'):\n os.remove(os.path.join(dir_path, f))\n \ndef run_tests(test_file_name):\n '''Run tests and save results for specified test file.\n \n Parameters\n ----------\n test_file_name : str\n Test file name (ends in .py)\n \n '''\n\n # Load tests\n test_loader = unittest.TestLoader()\n suite = test_loader.discover(os.path.join(get_root_path(),'testing'), pattern = test_file_name)\n num_cases = suite.countTestCases()\n # Run tests\n print('\\nFound {0} tests to run in {1}.\\n\\nRunning...'.format(num_cases, test_file_name))\n result = unittest.TextTestRunner(verbosity = 1).run(suite);\n # Parse and save results\n num_failures = len(result.failures)\n num_errors = len(result.errors)\n num_passed = num_cases - num_errors - num_failures\n log_json = {'TestFile':test_file_name, 'NCases':num_cases, 'NPassed':num_passed, 'NErrors':num_errors, 'NFailures':num_failures, 'Failures':{}, 'Errors':{}}\n for i, failure in enumerate(result.failures):\n log_json['Failures'][i]= failure[1]\n for i, error in enumerate(result.errors):\n log_json['Errors'][i]= error[1]\n log_file = os.path.splitext(test_file_name)[0] + '.log'\n with open(os.path.join(get_root_path(),'testing',log_file), 'w') as f:\n json.dump(log_json, f)\n \nclass partialTimeseries(object):\n '''This partial class implements common API testing timeseries data.\n \n '''\n \n def compare_ref_timeseries_df(self, df, ref_filepath):\n '''Compare a timeseries dataframe to a reference csv.\n \n Parameters\n ----------\n df : pandas DataFrame\n Test dataframe with \"time\" as index.\n ref_filepath : str\n Reference file path relative to testing directory.\n \n Returns\n -------\n None\n \n '''\n \n # Check time is index\n assert(df.index.name == 'time')\n # Perform test\n if os.path.exists(ref_filepath):\n # If reference exists, check it\n df_ref = pd.read_csv(ref_filepath, index_col='time') \n for key in df.columns:\n y_test = self.create_test_points(df[key]).get_values()\n y_ref = self.create_test_points(df_ref[key]).get_values()\n results = self.check_trajectory(y_test, y_ref)\n self.assertTrue(results['Pass'], '{0} Key is {1}.'.format(results['Message'],key))\n else:\n # Otherwise, save as reference\n df.to_csv(ref_filepath)\n \n return None\n \n def check_trajectory(self, y_test, y_ref):\n '''Check a numeric trajectory against a reference with a tolerance.\n \n Parameters\n ----------\n y_test : list-like of numerics\n Test trajectory\n y_ref : list-like of numerics\n Reference trajectory\n \n Returns\n -------\n result : dict\n Dictionary of result of check.\n {'Pass' : bool, True if ErrorMax <= tol, False otherwise.\n 'ErrorMax' : float or None, Maximum error, None if fail length check\n 'IndexMax' : int or None, Index of maximum error,None if fail length check\n 'Message' : str or None, Message if failed check, None if passed.\n }\n \n '''\n \n # Set tolerance\n tol = 1e-3\n # Initialize return dictionary\n result = {'Pass' : True,\n 'ErrorMax' : None,\n 'IndexMax' : None,\n 'Message' : None}\n # First, check that trajectories are same length\n if len(y_test) != len(y_ref):\n result['Pass'] = False\n result['Message'] = 'Test and reference trajectory not the same length.'\n else:\n # Initialize error arrays\n err_abs = np.zeros(len(y_ref))\n err_rel = np.zeros(len(y_ref))\n err_fun = np.zeros(len(y_ref))\n # Calculate errors\n for i in range(len(y_ref)):\n # Absolute error\n err_abs[i] = np.absolute(y_test[i] - y_ref[i])\n # Relative error\n if (abs(y_ref[i]) > 10 * tol):\n err_rel[i] = err_abs[i] / abs(y_ref[i])\n else:\n err_rel[i] = 0\n # Total error\n err_fun[i] = err_abs[i] + err_rel[i]\n # Assess error\n err_max = max(err_fun);\n i_max = np.argmax(err_fun);\n if err_max > tol:\n result['Pass'] = False\n result['ErrorMax'] = err_max,\n result['IndexMax'] = i_max,\n result['Message'] = 'Max error ({0}) in trajectory greater than tolerance ({1}) at index {2}. y_test: {3}, y_ref:{4}'.format(err_max, tol, i_max, y_test[i], y_ref[i])\n \n return result\n \n def create_test_points(self, s,n=500):\n '''Create interpolated points to test of a certain number.\n \n Useful to reduce number of points to test and to avoid failed tests from\n event times being slightly different.\n \n Parameters\n ----------\n s : pandas Series\n Series containing test points to create, with index as time floats.\n n : int, optional\n Number of points to create\n Default is 500\n \n Returns\n -------\n s_test : pandas Series\n Series containing interpolated data \n \n '''\n \n # Get data\n data = s.get_values()\n index = s.index.values\n # Make interpolated index\n t_min = index.min()\n t_max = index.max()\n t = np.linspace(t_min, t_max, n)\n # Interpolate data\n data_interp = np.interp(t,index,data)\n # Use at most 8 significant digits\n data_interp = [ float('{:.8g}'.format(x)) for x in data_interp ]\n # Make Series\n s_test = pd.Series(data=data_interp, index=t)\n \n return s_test\n\nclass partialTestAPI(partialTimeseries):\n '''This partial class implements common API tests for test cases.\n \n References to self attributes for the tests should be set in the setUp \n method of the particular testclass test. They are:\n\n url : str\n URL to deployed testcase.\n name_ref : str\n Name given to test\n inputs_ref : list of str\n List of names of inputs\n measurements_ref : list of str\n List of names of measurements\n step_ref : numeric\n Default simulation step\n \n '''\n \n def test_get_name(self):\n '''Test getting the name of test.\n \n '''\n\n name = requests.get('{0}/name'.format(self.url)).json()\n self.assertEqual(name, self.name_ref)\n \n def test_get_inputs(self):\n '''Test getting the input list of tests.\n \n '''\n \n inputs = requests.get('{0}/inputs'.format(self.url)).json()\n self.assertEqual(len(inputs), len(self.inputs_ref))\n for inp in inputs:\n self.assertTrue(inp in self.inputs_ref)\n self.assertTrue(inputs[inp]['Unit'] == self.inputs_ref[inp]['Unit'])\n self.assertTrue(inputs[inp]['Description'] == self.inputs_ref[inp]['Description'])\n self.assertTrue(inputs[inp]['Minimum'] == self.inputs_ref[inp]['Minimum'])\n self.assertTrue(inputs[inp]['Maximum'] == self.inputs_ref[inp]['Maximum'])\n\n def test_get_measurements(self):\n '''Test getting the measurement list of test.\n \n '''\n\n measurements = requests.get('{0}/measurements'.format(self.url)).json()\n self.assertEqual(len(measurements), len(self.measurements_ref))\n for measurement in measurements:\n self.assertTrue(measurement in self.measurements_ref)\n self.assertTrue(measurements[measurement]['Unit'] == self.measurements_ref[measurement]['Unit'])\n self.assertTrue(measurements[measurement]['Description'] == self.measurements_ref[measurement]['Description'])\n self.assertTrue(measurements[measurement]['Minimum'] == self.measurements_ref[measurement]['Minimum'])\n self.assertTrue(measurements[measurement]['Maximum'] == self.measurements_ref[measurement]['Maximum'])\n\n def test_get_step(self):\n '''Test getting the communication step of test.\n \n '''\n\n step = requests.get('{0}/step'.format(self.url)).json()\n self.assertEqual(step, self.step_ref)\n \n def test_set_step(self):\n '''Test setting the communication step of test.\n \n '''\n\n step = 101\n requests.put('{0}/step'.format(self.url), data={'step':step})\n step_set = requests.get('{0}/step'.format(self.url)).json()\n self.assertEqual(step, step_set)\n requests.put('{0}/step'.format(self.url), data={'step':self.step_ref})\n \n def test_reset(self):\n '''Test reseting of test.\n \n '''\n\n requests.put('{0}/reset'.format(self.url))\n \n def test_advance_no_data(self):\n '''Test advancing of simulation with no input data.\n\n This is a basic test of functionality. \n Tests for advancing with overwriting are done in the example tests.\n\n '''\n\n requests.put('{0}/reset'.format(self.url))\n y = requests.post('{0}/advance'.format(self.url), data=dict()).json()\n for key in y.keys():\n self.assertAlmostEqual(y[key], self.y_ref[key], places=3)\n\n def test_advance_false_overwrite(self):\n '''Test advancing of simulation with overwriting as false.\n\n This is a basic test of functionality. \n Tests for advancing with overwriting are done in the example tests.\n\n '''\n\n requests.put('{0}/reset'.format(self.url))\n if self.name == 'testcase1':\n u = {'oveAct_u':0, 'oveAct_activate':1500}\n elif self.name == 'testcase2':\n u = {'oveTSetRooHea_activate':0, 'oveTSetRooHea_u':273.15+22}\n requests.put('{0}/reset'.format(self.url))\n y = requests.post('{0}/advance'.format(self.url), data=u).json()\n for key in y.keys():\n self.assertAlmostEqual(y[key], self.y_ref[key], places=3)\n\n def test_get_forecast_default(self):\n '''Check that the forecaster is able to retrieve the data.\n \n Default forecast parameters for testcase used.\n\n '''\n\n requests.put('{0}/reset'.format(self.url))\n \n # Test case forecast\n forecast = requests.get('{0}/forecast'.format(self.url)).json()\n \n # Set reference file path\n ref_filepath = self.forecast_default_ref\n \n # Check the forecast\n df_forecaster = pd.DataFrame(forecast).set_index('time')\n\n self.compare_ref_timeseries_df(df_forecaster, ref_filepath)\n \n def test_put_and_get_parameters(self):\n '''Check PUT and GET of forecast settings.\n\n '''\n\n # Set forecast parameters\n ret = requests.put('{0}/forecast_parameters'.format(self.url), \n data=self.forecast_parameters_ref)\n \n # Get forecast parameters\n forecast_parameters = requests.get('{0}/forecast_parameters'.format(self.url)).json()\n \n # Check the forecast parameters\n self.assertDictEqual(forecast_parameters, self.forecast_parameters_ref)\n # Check the return on the put request\n self.assertDictEqual(ret.json(), self.forecast_parameters_ref)\n \n def test_get_forecast_with_parameters(self):\n '''Check that the forecaster is able to retrieve the data.\n \n Custom forecast parameters used.\n \n ''' \n\n requests.put('{0}/reset'.format(self.url))\n \n # Set forecast parameters\n requests.put('{0}/forecast_parameters'.format(self.url), \n data=self.forecast_parameters_ref)\n \n # Test case forecast\n forecast = requests.get('{0}/forecast'.format(self.url)).json()\n \n # Set reference file path\n ref_filepath = self.forecast_with_parameters_ref\n \n # Check the forecast\n df_forecaster = pd.DataFrame(forecast).set_index('time')\n\n self.compare_ref_timeseries_df(df_forecaster, ref_filepath)","sub_path":"testing/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":13253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"75562965","text":"import pydiffvg\nimport torch\nimport skimage\nimport numpy as np\n\n# Use GPU if available\npydiffvg.set_use_gpu(torch.cuda.is_available())\n\ncanvas_width, canvas_height = 256, 256\n# https://www.w3schools.com/graphics/svg_polygon.asp\npoints = torch.tensor([[120.0, 30.0],\n [ 60.0, 218.0],\n [210.0, 98.0],\n [ 30.0, 98.0],\n [180.0, 218.0]])\npolygon = pydiffvg.Polygon(points = points, is_closed = True)\nshapes = [polygon]\npolygon_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]),\n fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0]))\nshape_groups = [polygon_group]\nscene_args = pydiffvg.RenderFunction.serialize_scene(\\\n canvas_width, canvas_height, shapes, shape_groups)\n\nrender = pydiffvg.RenderFunction.apply\nimg = render(256, # width\n 256, # height\n 2, # num_samples_x\n 2, # num_samples_y\n 0, # seed\n None, # background_image\n *scene_args)\n# The output image is in linear RGB space. Do Gamma correction before saving the image.\npydiffvg.imwrite(img.cpu(), 'results/single_polygon/target.png', gamma=2.2)\ntarget = img.clone()\n\n# Move the polygon to produce initial guess\n# normalize points for easier learning rate\npoints_n = torch.tensor([[140.0 / 256.0, 20.0 / 256.0],\n [ 65.0 / 256.0, 228.0 / 256.0],\n [215.0 / 256.0, 100.0 / 256.0],\n [ 35.0 / 256.0, 90.0 / 256.0],\n [160.0 / 256.0, 208.0 / 256.0]], requires_grad=True)\ncolor = torch.tensor([0.3, 0.2, 0.5, 1.0], requires_grad=True)\npolygon.points = points_n * 256\npolygon_group.color = color\nscene_args = pydiffvg.RenderFunction.serialize_scene(\\\n canvas_width, canvas_height, shapes, shape_groups)\nimg = render(256, # width\n 256, # height\n 2, # num_samples_x\n 2, # num_samples_y\n 1, # seed\n None, # background_image\n *scene_args)\npydiffvg.imwrite(img.cpu(), 'results/single_polygon/init.png', gamma=2.2)\n\n# Optimize for radius & center\noptimizer = torch.optim.Adam([points_n, color], lr=1e-2)\n# Run 100 Adam iterations.\nfor t in range(100):\n print('iteration:', t)\n optimizer.zero_grad()\n # Forward pass: render the image.\n polygon.points = points_n * 256\n polygon_group.color = color\n scene_args = pydiffvg.RenderFunction.serialize_scene(\\\n canvas_width, canvas_height, shapes, shape_groups)\n img = render(256, # width\n 256, # height\n 2, # num_samples_x\n 2, # num_samples_y\n t+1, # seed\n None, # background_image\n *scene_args)\n # Save the intermediate render.\n pydiffvg.imwrite(img.cpu(), 'results/single_polygon/iter_{}.png'.format(t), gamma=2.2)\n # Compute the loss function. Here it is L2.\n loss = (img - target).pow(2).sum()\n print('loss:', loss.item())\n\n # Backpropagate the gradients.\n loss.backward()\n # Print the gradients\n print('points_n.grad:', points_n.grad)\n print('color.grad:', color.grad)\n\n # Take a gradient descent step.\n optimizer.step()\n # Print the current params.\n print('points:', polygon.points)\n print('color:', polygon_group.fill_color)\n\n# Render the final result.\npolygon.points = points_n * 256\npolygon_group.color = color\nscene_args = pydiffvg.RenderFunction.serialize_scene(\\\n canvas_width, canvas_height, shapes, shape_groups)\nimg = render(256, # width\n 256, # height\n 2, # num_samples_x\n 2, # num_samples_y\n 102, # seed\n None, # background_image\n *scene_args)\n# Save the images and differences.\npydiffvg.imwrite(img.cpu(), 'results/single_polygon/final.png')\n\n# Convert the intermediate renderings to a video.\nfrom subprocess import call\ncall([\"ffmpeg\", \"-framerate\", \"24\", \"-i\",\n \"results/single_polygon/iter_%d.png\", \"-vb\", \"20M\",\n \"results/single_polygon/out.mp4\"])\n","sub_path":"apps/single_polygon.py","file_name":"single_polygon.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"105363147","text":"\n# coding: utf-8\n\n# In[62]:\n\nimport numpy as np\nimport random\nimport string\nimport re\ndef build_matrix(text, mat):\n text = re.sub(\"[^a-z ]+\", \"\", text)\n #print(text)\n word_list = list(text.split())\n #print(word_list)\n print(text)\n corrupt=[]\n #corrupt_text\n for j in range(len(text)):\n letter=text[j]\n #for i in range(26):\n #if states[i]==str(first_letter):\n if letter==' ':\n corrupt.append(' ')\n continue\n else: \n k= random.uniform(0, 1)\n if k<0.2:\n values= mat[letter]\n random_index=random.randint(0, len(values)-1)\n corrupt.append(values[random_index])\n else:\n corrupt.append(letter)\n print(str(corrupt))\n states = []\n state_counter=[0]*26\n total=np.zeros((26, 1))\n start_probability = np.zeros((26, 1))\n transition_probability = np.zeros((26, 26))\n transition_counter = np.zeros((26, 26))\n generation_counter = np.zeros((26, 26))\n generation_probability = np.zeros((26, 26))\n \n for i in range(26):\n states.append(chr(ord('a') + i))\n print(states)\n\n for j in range(len(word_list)):\n word=word_list[j]\n #print(word)\n first_letter=word[0]\n for i in range(26):\n if states[i]==str(first_letter):\n #print('hi')\n state_counter[i]=state_counter[i]+1\n for i in range(26):\n start_probability[i]=float(state_counter[i]/26)\n print(start_probability)\n #calculate transition probability\n for j in range(len(word_list)):\n word=word_list[j]\n for i in range(len(word)-1):\n prev=states.index(word[i])\n nextl=states.index(word[i+1])\n transition_counter[prev][nextl]=transition_counter[prev][nextl]+1\n \n \n for i in range(26):\n total=np.sum(transition_counter,axis=1)\n for i in range(26): \n for j in range(26):\n transition_probability[i][j]=float(transition_counter[i][j]/total[i])\n #print(transition_counter) \n transition_probability[np.isnan(transition_probability)] = 0\n #print(transition_probability) \n \n #generation probability\n correct=list(text)\n print(correct)\n print(corrupt)\n for i in range(len(correct)):\n if (correct[i]==\" \"):\n continue\n else:\n r=states.index(correct[i])\n w=states.index(corrupt[i])\n generation_counter[r][w]=generation_counter[r][w]+1\n \n #print(generation_counter)\n for i in range(26):\n total=np.sum(generation_counter,axis=1)\n for i in range(26): \n for j in range(26):\n generation_probability[i][j]=float(generation_counter[i][j]/total[i])\n #implement Viterbi\n emission_probability = np.zeros((26, 26))\n emission_state = np.zeros((26, 1))\n test=list('an ruya kove sinner lpcation')\n for i in range(26):\n #print(generation_probability[i][states.index(test[0])])\n #print(start_probability[i])\n emission_probability[i][states.index(test[0])]= generation_probability[i][states.index(test[0])]*start_probability[i]\n print(emission_probability)\n \n \n \n \n \n\n\n# In[63]:\n\ndef main():\n f=open('text.txt',encoding=\"utf8\") \n text= f.read()\n corrupt_matrix= {'a': ['q','w','x','z','s'],\n 'b': ['c','v','n','n','f','g','h'],\n 'c': ['x','v','s','d','f'],\n 'd': ['e','s','f','x','c'],\n 'e': ['w','s','d','f','r'],\n 'f': ['r','d','c','v','g'],\n 'g': ['t','f','b','v','h'],\n 'h': ['y','g','b','n','j'],\n 'i': ['u','o','j','k'],\n 'j': ['u','i','h','k','n','m'],\n 'k': ['i','j','l','m'],\n 'l': ['o','k','p'],\n 'm': ['n','j','k'],\n 'n': ['j','h','b','m'],\n 'o': ['i','p','k','l'],\n 'p': ['o','l'],\n 'q': ['a','s','w'],\n 'r': ['e','d','f','t'],\n 's': ['w','a','d','z','x'],\n 't': ['r','f','g','y'],\n 'u': ['y','h','j','i'],\n 'v': ['f','c','g','b'],\n 'w': ['q','a','s','e'],\n 'x': ['z','s','d','c'],\n 'y': ['t','g','h','u'],\n 'z': ['a','s','x'],\n }\n \n build_matrix(text,corrupt_matrix)\nmain()\n\n\n# In[56]:\n\n\n\n\n# In[57]:\n\n\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n","sub_path":"Assignments/ADBMS_2.py","file_name":"ADBMS_2.py","file_ext":"py","file_size_in_byte":4692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"299343584","text":"import datetime\n\nimport pytest\nfrom tests.utils import file_response\n\nfrom city_scrapers.spiders.chi_mayors_bicycle_advisory_council import (\n ChiMayorsBicycleAdvisoryCouncilSpider\n)\n\ntest_response = file_response('files/chi_mayors_bicycle_advisory_council.html')\nspider = ChiMayorsBicycleAdvisoryCouncilSpider()\nparsed_items = [item for item in spider.parse(test_response) if isinstance(item, dict)]\n\n\n@pytest.mark.parametrize('item', parsed_items)\ndef test_name(item):\n assert item['name'] == \"Mayor's Bicycle Advisory Council\"\n\n\n@pytest.mark.parametrize('item', parsed_items)\ndef test_description(item):\n assert item['event_description'] == 'MBAC focuses on a wide range of ' + \\\n 'bicycle issues: safety, education, enforcement, and ' + \\\n 'infrastructure investment. The Council will help identify issues, ' + \\\n 'discuss ideas and set priorities for bicycle planning in Chicago.'\n\n\ndef test_start():\n assert parsed_items[0]['start'] == {\n 'date': datetime.date(2018, 3, 7),\n 'time': datetime.time(15, 0),\n 'note': 'Start at 3 p.m. unless otherwise noted'\n }\n\n\ndef test_end():\n assert parsed_items[0]['end'] == {'date': datetime.date(2018, 3, 7), 'time': None, 'note': ''}\n\n\ndef test_id():\n assert parsed_items[0][\n 'id'\n ] == 'chi_mayors_bicycle_advisory_council/201803071500/x/mayor_s_bicycle_advisory_council'\n\n\ndef test_status():\n assert parsed_items[0]['status'] == 'passed'\n\n\n@pytest.mark.parametrize('item', parsed_items)\ndef test_location(item):\n assert item['location'] == {\n 'address': '121 N LaSalle Dr, Chicago, IL',\n 'name': 'City Hall, Room 1103',\n 'neighborhood': 'Loop',\n }\n\n\n@pytest.mark.parametrize('item', parsed_items)\ndef test_sources(item):\n listing_page = {'url': spider.BASE_URL, 'note': ''}\n archive_page = {'url': spider.BASE_URL + 'mbac-meeting-archives/', 'note': 'documents'}\n assert item['sources'] == [listing_page, archive_page]\n\n\n@pytest.mark.parametrize('item', parsed_items)\ndef test_documents(item):\n doc_types = ['agenda', 'meeting minutes', 'presentations']\n\n if item['start']['date'] == datetime.date(2015, 6, 11):\n doc_types.append('d. taylor comments')\n elif item['start']['date'] == datetime.date(2015, 3, 12):\n doc_types.remove('presentations')\n elif item['start']['date'] == datetime.date(2018, 12, 12):\n doc_types = []\n\n assert [d['note'] for d in item['documents']] == doc_types\n\n\n@pytest.mark.parametrize('item', parsed_items)\ndef test_all_day(item):\n assert item['all_day'] is False\n\n\n@pytest.mark.parametrize('item', parsed_items)\ndef test_classification(item):\n assert item['classification'] == 'Advisory Committee'\n\n\n@pytest.mark.parametrize('item', parsed_items)\ndef test__type(item):\n assert item['_type'] == 'event'\n","sub_path":"tests/test_chi_mayors_bicycle_advisory_council.py","file_name":"test_chi_mayors_bicycle_advisory_council.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"456477422","text":"import matplotlib.pyplot as plt\n\n# fig 객체 생성\nfig = plt.figure()\n\n# 서브 슬롯 생성 (2행 1열)\nax1 = fig.add_subplot(2, 1, 1)\nax2 = fig.add_subplot(2, 1, 2)\n\n# x축 생성\nx = list(range(0, 100))\n\n# y축 생성\ny = [v * v for v in x]\n\n# z축 생성\nz = [v * v * 2 for v in x]\n\n# 라인 차트 (1행 1열)\nax1.plot(x, y, 'b', label='test1')\n\n# bar 차트(2행 1열)\nax2.bar(x, z, label='test2')\n\nplt.legend(loc='upper left')\nplt.title('test chart')\n\nplt.show()\n","sub_path":"section4/4-7-5.py","file_name":"4-7-5.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"1555750","text":"n = int(input(\"Enter the length of the sequence: \")) # Do not change this line\n#alltaf +síðustu 3 tolurnar\n\nsum=0\nnum1=0\nnum2=0\nnum3=1\n\nfor i in range(0, n):\n num1=num2\n num2=num3\n num3=sum\n sum=num1+num2+num3\n print(sum)","sub_path":"Sequence.py","file_name":"Sequence.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"8030150","text":"from math import log2\r\nimport sys\r\n\r\nfrom string import punctuation\r\n\r\ndef getInnerData(start, end, text):\r\n innerText = \"\"\r\n if text.find(start) != -1:\r\n startword = text[text.find(start):text.find(end)]\r\n innerText = startword[len(start):]\r\n return innerText\r\n\r\ndef strip_stopwords(data):\r\n\treturn ''.join(content for content in data if content not in punctuation)\r\ndef dialog(traindata,testdata):\r\n #traindata = open(r'C:\\Users\\manju\\Desktop\\Semester1\\NLP\\Assignment4\\DialogAct.train',encoding=\"utf8\")\r\n dialogtraindata = traindata.read()\r\n #print(dialogtraindata)\r\n traindata.close()\r\n #testdata = open(r'C:\\Users\\manju\\Desktop\\Semester1\\NLP\\Assignment4\\DialogAct.test',encoding=\"utf8\")\r\n dialogtestdata = testdata.read()\r\n #print(dialogtraindata)\r\n testdata.close()\r\n val1 = 0\r\n val2 = 0\r\n #stop_words = set(stopwords.words('english'))\r\n trainlabelDict = dict()\r\n dialogs = list(filter(None, dialogtraindata.split('Advisor:')))\r\n #print(str(len(dialogs)))\r\n for i in range(1,len(dialogs)):\r\n label = getInnerData('[',']',dialogs[i])\r\n lines = dialogs[i-1].split('\\n')\r\n for line in lines:\r\n if(\"Student:\" in line):\r\n sentence = line[len(\"Student:\"):]\r\n if label not in trainlabelDict:\r\n trainlabelDict[label] = list()\r\n words = sentence.split(\" \")\r\n for word in words :\r\n word = word.strip('\\n')\r\n word = strip_stopwords(word)\r\n if word != \"\":\r\n trainlabelDict[label].append(word)\r\n\r\n testLabelDict = dict()\r\n testSentenceDict = dict()\r\n testLinesDict = dict()\r\n \r\n dialogs = list(filter(None, dialogtestdata.split('Advisor:')))\r\n \r\n \r\n for i in range(1,len(dialogs)):\r\n \r\n label = getInnerData('[',']',dialogs[i])\r\n if('pull-select]' in label):\r\n label = 'pull-select'\r\n\r\n testLabelDict[i] =label\r\n lines = dialogs[i-1].split('\\n')\r\n testLinesDict[i] = lines \r\n for line in lines:\r\n if(\"Student:\" in line):\r\n sentence = line[len(\"Student:\"):]\r\n \r\n if i not in testSentenceDict:\r\n testSentenceDict[i]=list()\r\n words = sentence.split(\" \")\r\n for word in words :\r\n word = word.strip('\\n')\r\n word = strip_stopwords(word)\r\n #if word not in punctuation and word not in stop_words and word != \"\":\r\n if word != \"\":\r\n testSentenceDict[i].append(word)\r\n \r\n totalTrainWords = 0 \r\n labelprobdict = dict()\r\n \r\n for label in trainlabelDict:\r\n totalTrainWords += len(trainlabelDict[label])\r\n for label in trainlabelDict:\r\n labelprobdict[label] = len(trainlabelDict[label] )/ totalTrainWords\r\n labelDictUnique = dict()\r\n labelDictUniquenum = dict()\r\n for label in trainlabelDict:\r\n labelDictUnique[label] = list()\r\n labelDictUniquenum[label] = 0\r\n for word in trainlabelDict[label]:\r\n if word not in labelDictUnique[label]:\r\n labelDictUnique[label].append(word)\r\n labelDictUniquenum[label] += len(labelDictUnique[label])\r\n #labelDictUniquenum[label] += 1\r\n probWordDict = dict()\r\n \r\n finaldict = dict()\r\n \r\n for label in trainlabelDict.keys():\r\n #value1prob = 0\r\n for word in trainlabelDict[label]:\r\n if word not in probWordDict:\r\n probWordDict[word] = dict()\r\n if label not in probWordDict[word]:\r\n probWordDict[word][label] = 0\r\n value1 = trainlabelDict[label].count(word) + 1 \r\n value2 = len(trainlabelDict[label]) + labelDictUniquenum[label]\r\n prob = log2(value1/value2)\r\n probWordDict[word][label] += prob \r\n #value1prob +=prob\r\n \r\n for dialogCount in testSentenceDict.keys():\r\n probLabels = dict()\r\n \r\n for word in testSentenceDict[dialogCount]:\r\n if word in probWordDict:\r\n for label in probWordDict[word].keys():\r\n if label not in probLabels:\r\n probLabels[label] =0\r\n probLabels[label] +=probWordDict[word][label] \r\n \r\n maxValue = -sys.float_info.max\r\n finalsense =''\r\n for label in probLabels:\r\n #prob = probLabels[label] + log2(labelprobdict[label])\r\n if probLabels[label] > maxValue:\r\n maxValue = probLabels[label] * log2(labelprobdict[label])\r\n finalsense = label\r\n finaldict[dialogCount] = finalsense\r\n\r\n \r\n for key in finaldict:\r\n if key in testLabelDict:\r\n val2 += 1\r\n if testLabelDict[key] == finaldict[key]:\r\n val1 += 1\r\n accuracy = (val1/val2) \r\n \r\n print(\"Accuracy =\",accuracy*100)\r\n f = open('DialogAct.test.out','w') \r\n for key in testLinesDict:\r\n if key in finaldict:\r\n for line in testLinesDict[key]:\r\n if(\"Student:\" in line):\r\n f.write(line)\r\n f.write('\\n')\r\n f.write(\"Advisor: [\" + finaldict[key]+\"]\\n\")\r\n f.close()\r\n \r\n\r\n \r\n \r\n\r\ndef main():\r\n \r\n input1 = sys.argv[1]\r\n input2 = sys.argv[2]\r\n trainfile = open(input1,encoding=\"utf8\")\r\n testfile = open(input2,encoding=\"utf8\")\r\n dialog(trainfile,testfile)\r\n \r\n \r\nif __name__ == \"__main__\":\r\n main()","sub_path":"DialogAct.py","file_name":"DialogAct.py","file_ext":"py","file_size_in_byte":5688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"413524772","text":"import boto3\nimport pyotp as pyotp\nimport time\nimport datetime\nfrom customer import Customers\n\n\nclass OtpInterface:\n def __init__(self):\n # Create empty pyotp object\n self.totp = pyotp.TOTP(pyotp.random_base32(), interval = 300)\n\n # Returns the pyotp object\n def getObj(self):\n return self.totp\n\n # Function to create pyotp object with an otp changing every 5 minutes. Sends the first OTP to the input phone number using AWS SNS service.\n def send_otp(self, phone):\n # Append country code to phone number\n phone = \"+1\" + phone\n\n # Create seeded pyotp object\n # self.totp = pyotp.TOTP(pyotp.random_base32(), interval = 300)\n \n # Create SNS client\n client = boto3.client(\n \"sns\",\n aws_access_key_id=\"AKIAYO35KQMIIX4E5MNG\",\n aws_secret_access_key=\"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\",\n region_name=\"us-east-1\"\n )\n # Send SMS with current generated OTP\n client.publish(\n PhoneNumber=phone,\n Message=str(self.totp.now())\n )\n return 'OTP Sent'\n\n # Function to verify an OTP\n def verify_otp(self, otp):\n if self.totp is not None:\n if(self.totp.verify(otp)):\n self.totp = None\n return 'verified'\n else:\n return 'OTP not verified'\n else:\n return 'Must send OTP first before verification'","sub_path":"BankingSystem/otp.py","file_name":"otp.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"174187659","text":"# Reverse digits of an integer.\n#\n# Example1: x = 123, return 321\n# Example2: x = -123, return -321\n\n\nclass Solution(object):\n def reverse(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n y = 0\n num = []\n i = 1\n if x < 0:\n x = -1 * x\n i = -1\n\n while x != 0:\n mod_x = x % 10\n num.append(mod_x)\n x = x/10\n\n num_len = len(num)\n for j in num:\n y += j*pow(10, num_len-1)\n num_len -= 1\n\n if y > 2147483648:\n return 0\n\n return y*i\n\n# Note:\n# Append last digit to list one by one and than run another loop to create reverse number.\n","sub_path":"LeetCode/007-E-ReverseInteger.py","file_name":"007-E-ReverseInteger.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"652170456","text":"import os\n\nfrom . import storage\n\n\ndef test_get_abspath_fname_with_absolute_path():\n # Prepare\n fname = \"/my/absolute/path/myfile\"\n expected_abspath = fname\n\n # Execute\n actual_abspath = storage.get_abspath(fname)\n\n # Assert\n assert expected_abspath == actual_abspath\n\n\ndef test_get_abspath_fname_only_filename(output_dir):\n # Prepare\n fname = \"myfile\"\n expected_abspath = os.path.join(output_dir, fname)\n\n # Execute\n actual_abspath = storage.get_abspath(fname, output_dir)\n\n # Assert\n assert expected_abspath == actual_abspath\n assert output_dir in actual_abspath\n","sub_path":"xain/helpers/storage_test.py","file_name":"storage_test.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"160836429","text":"# -*- coding:utf8 -*-\nfrom EmailSender import *\nfrom WebsiteAnjuke import *\nimport schedule\nimport time\n\nPRINT_INTERVAL = 5\nREFERSH_INTERVAL = 60\nHOME_SPIDER = None\nOLD_URL = None\n\n\ndef check_and_send():\n global HOME_SPIDER, OLD_URL, PRINT_INTERVAL\n PRINT_INTERVAL -= 1\n if PRINT_INTERVAL == 0:\n print(time.strftime(\"Note: %Y-%m-%d %H:%M:%S is running\", time.localtime()))\n PRINT_INTERVAL = 5\n lastest_url = HOME_SPIDER.get_first_url()\n if lastest_url is None:\n return False\n # 安居客更新了一个uniqid字段,也需要筛除去\n if OLD_URL is not None and lastest_url is not None and lastest_url[:50] == OLD_URL[:50]:\n return False\n detail_spider = DetailSpider(lastest_url)\n houseinfo = detail_spider.get_houseinfo()\n if houseinfo is None:\n return False\n sender = EmailSender()\n try:\n sender.default_login()\n sender.anjuke_send_house_info(houseinfo)\n OLD_URL = lastest_url\n print('Note: Newest url is: ' + lastest_url[:47])\n except Exception as e:\n print('Error: SMTP connected error')\n print(repr(e))\n return False\n return True\n\nload_email_configure()\nHOME_SPIDER = HomeSpider(MEIDI_URL)\ncheck_and_send()\nschedule.every(REFERSH_INTERVAL).seconds.do(check_and_send)\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n","sub_path":"code/MainAnjuke.py","file_name":"MainAnjuke.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"394033615","text":"from django.core.cache import get_cache\nfrom functools import wraps\nfrom django.utils.decorators import available_attrs\nimport logging\nfrom django.http import HttpResponse\nimport json\nimport math\n#Get a cache instance that is specific for our plugin\ncache = get_cache('rate_limiting')\n\ndef rate_limit_by_ip(how_many_hits=50, in_how_long=1, exception_list=[], uid=None):\n \"\"\"\n Django decorator to limit how often an ip can acess a view.\n Args:\n how_many_hits :: How many hits are allowed in 'in_how_long' \n seconds before the limit is applied\n in_how_long :: If there are 'how_many_hits' in this many seconds,\n BOOM limited\n exception_list :: A list of IPs to exclude\n uid :: A value to look for in the request to use as a \n unique identifier for a request. If not set, the\n requestor's IP address is used\n Usage:\n @rate_limit_by_ip\n def my_view(request):\n #I am less likely to have excessive database hits due to DDOS or\n #shitty code\n\n \"\"\"\n def decorator(func):\n @wraps(func, assigned=available_attrs(func))\n def inner(request, *args, **kwargs):\n remote_addr = None\n if uid is None:\n remote_addr = request.META.get('REMOTE_ADDR', None)\n uid = 'REMOTE_ADDR'\n else:\n remote_addr = request.REQUEST.get(uid, None)\n if remote_addr is None:\n error = {'error': 'Your request did not include a value for the unique id: ' + uid}\n return HttpResponse(json.dumps(d), status=400, content_type=\"application/json\")\n cache_key = func.__name__ + '_' + str(remote_addr)\n cache_key = str(cache_key)\n retVal = None\n #Allow for exceptions\n #cache.clear()\n if remote_addr in exception_list:\n retVal = func(request, *args, **kwargs)\n count = cache.get(cache_key)\n #We haven't seen them before\n if count is None:\n # Figure out how frequently a hit would have to occur in\n # order to hit the limit\n cacheTime = how_many_hits / in_how_long\n cacheTime = float(in_how_long) / float(how_many_hits)\n #Put them in the cache\n #Adjust caching time so we are caching in seconds and not\n # fractions of seconds\n cacheTime = math.floor(cacheTime)\n if cacheTime < 1:\n cacheTime = 1\n cache.set(cache_key, 'cached', cacheTime)\n #Allow them in to the function\n retVal = func(request, *args, **kwargs)\n \n #We've seen them in the limit time, so sucks to be you\n else:\n #429 is the error code for 'Too Many Requests'\n d = {'error': \"You've hit your rate limit\"}\n retVal = HttpResponse(json.dumps(d), content_type=\"application/json\", status=429)\n return retVal\n return inner\n return decorator\n\n\n","sub_path":"ratelimit.py","file_name":"ratelimit.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"453919180","text":"from model_3d import *\n# ------------------------ Functions ------------------------------------------#\ndef test():\n \"\"\"\n A simple test routine to draw the quadcopter model\n \"\"\"\n \n # Creates a VPython Scene\n scene = display(title='Quad_Test', x=0, y=0, width=800, height=600, center=(0,0,0),\n background=(0,0,0), forward=(-1,-1,-1))\n\n # Inertial Static Reference Frame\n pointer = arrow(pos=(0,0,0), axis=(10,0,0), shaftwidth=0.1)\n pointer = arrow(pos=(0,0,0), axis=(0,10,0), shaftwidth=0.1)\n pointer = arrow(pos=(0,0,0), axis= (0,0,10), shaftwidth=0.1)\n\n # Instantiate a quadcopter 3d model\n pos = vector(3,0,3)\n roll, pitch, yaw = 0., 0., 0.\n quad = QuadcopterGraphic3d(pos, roll, pitch, yaw, scene, frame_length=3, frame_angle=pi/2)\n \n # Run indefinitely\n while True:\n # Run 30 times per second\n rate(30)\n\n # Set yaw (a half turn per second)\n yaw = yaw + pi/30\n quad.set_yaw(yaw)\n\n # Set roll (a half turn per second)\n # roll = roll + pi/30\n # quad.set_roll(roll)\n\n # Set pitch (a half turn per second)\n # pitch = pitch + pi/100\n # quad.set_pitch(pitch)\n\n # Set position (moves on x direction)\n pos = pos + vector(0,0,0.04)\n quad.set_pos(pos)\n\nif __name__ == \"__main__\":\n test()\n","sub_path":"python/py_quad_control/models/py/test_3d.py","file_name":"test_3d.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"498976130","text":"'''\nA number chain is created by continuously adding the square of the digits in a number to form a new number until it has been seen before.\n\nFor example,\n\n44 → 32 → 13 → 10 → 1 → 1\n85 → 89 → 145 → 42 → 20 → 4 → 16 → 37 → 58 → 89\n\nTherefore any chain that arrives at 1 or 89 will become stuck in an endless loop. \nWhat is most amazing is that EVERY starting number will eventually arrive at 1 or 89.\n\nHow many starting numbers below ten million will arrive at 89?\n'''\n\n# took a little long, but idk it gets it done relatively fast ~ 101 seconds\n# should have made a dictionary that marked whether a number goes to 1 or 89, would be much faster ~ 34 seconds (and could be more optomized but want to move on)\n\ndef chain(num):\n strNum = str(num)\n while num != 89 and num != 1:\n n = 0\n for c in strNum:\n n = n + int(c)**2\n num = n\n strNum = str(num)\n return num\n\ndef slow():\n count = 0\n nums = {}\n for x in range(1,10000001):\n if chain(x) == 89:\n # print(x)\n count = count + 1\n print(count)\n\ndef fast():\n count = 0\n nums = {}\n for x in range(1,10000001):\n num = x\n strNum = str(x)\n while num != 89 and num != 1:\n n = 0\n for c in strNum:\n n = n + int(c)**2\n num = n\n if num in nums:\n num = nums[num]\n strNum = str(num)\n if num == 89:\n nums[x] = 89\n count = count + 1\n else:\n nums[x] = 1\n return num\n\n print(count)\n\ndef main():\n # slow()\n fast()\n\nif __name__ == '__main__':\n import time\n start = time.time()\n main()\n print(\"time:\", time.time()-start)","sub_path":"lvl_03/prob_092/prob92.py","file_name":"prob92.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"531899096","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy.random import *\n\nx_size = 12\ny_size = 4\n\nclass Agent:\n def __init__(self):\n self.init()\n\n def init(self):\n self.x = 0\n self.y = 0\n self.action_list = [ self.up, self.down, self.left, self.right ]\n self.actions = len(self.action_list)\n\n def is_cliff(self):\n return self.y == 0 and self.x != 0 and self.x != x_size - 1\n\n def is_goal(self):\n return self.y == 0 and self.x == x_size - 1\n\n def get_bonus(self):\n if self.is_goal():\n return 0\n if self.is_cliff():\n return -100\n return -1\n\n def up(self):\n y = self.y + 1\n if y >= y_size:\n y = y_size - 1\n self.y = y\n return self.get_bonus() \n\n def down(self):\n y = self.y - 1\n if y < 0:\n y = 0\n self.y = y\n return self.get_bonus() \n\n def left(self):\n x = self.x - 1\n if x < 0:\n x = 0\n self.x = x\n return self.get_bonus() \n\n def right(self):\n x = self.x + 1\n if x >= x_size:\n x = x_size - 1\n self.x = x\n return self.get_bonus() \n\n def get_state(self):\n return self.actions * (self.x + x_size * self.y)\n\ndef e_greedy_action(state, Q, actions, epsilon):\n if random() <= epsilon:\n return randint(actions)\n else:\n return np.argmax(Q[state:state+actions])\n\ndef learningTD(episodes, alpha=0.1, gamma=1, epsilon=0.1):\n agent = Agent()\n\n action_list = agent.action_list\n actions = agent.actions\n\n state_size = x_size * y_size * actions\n Q = np.zeros(state_size)\n\n array_R = []\n for episode in range(episodes):\n R = 0\n while True:\n agent.init()\n s = agent.get_state()\n a = e_greedy_action(s, Q, actions, epsilon)\n print(\"a:\",a)\n\n sa = s + a\n\n while not agent.is_goal() and not agent.is_cliff():\n r = action_list[a]()\n\n R += r\n s1 = agent.get_state()\n a1 = e_greedy_action(s1, Q, actions, epsilon)\n sa1 = s1 + a1\n\n Q[sa] += alpha * (r + gamma * Q[sa1] - Q[sa])\n\n sa = sa1\n a = a1\n\n print(agent.x, agent.y)\n if agent.is_goal():\n break\n\n array_R.append(R)\n print(\"TD episode: {}, R: {}\".format(episode, R))\n\n return array_R\n\n\ndef learningQ(episodes, alpha=0.1, gamma=1, epsilon=0.1):\n agent = Agent()\n\n action_list = agent.action_list\n actions = agent.actions\n\n state_size = x_size * y_size * actions\n Q = np.zeros(state_size)\n\n array_R = []\n for episode in range(episodes):\n R = 0\n while True:\n agent.init()\n s = agent.get_state()\n\n\n while not agent.is_goal() and not agent.is_cliff():\n a = e_greedy_action(s, Q, actions, epsilon)\n sa = s + a\n\n r = action_list[a]()\n R += r\n\n s1 = agent.get_state()\n\n Q[sa] += alpha * (r + gamma * np.max(Q[s1:s1+actions]) - Q[sa])\n\n s = s1\n\n if agent.is_goal():\n break\n\n array_R.append(R)\n print(\"Q episode: {}, R: {}\".format(episode, R))\n\n return array_R\n\n\nepisodes = 1000\n\nR_TD = learningTD(episodes)\nx_TD = np.arange(len(R_TD))\nplt.plot(x_TD, R_TD, label=\"TD\")\n\nR_Q = learningQ(episodes)\nx_Q = np.arange(len(R_Q))\nplt.plot(x_Q, R_Q, label=\"Q\")\n\nplt.ylim(-200, 0)\nplt.legend()\n\nplt.show()\n","sub_path":"cliff.py","file_name":"cliff.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"621112093","text":"from keras.objectives import *\nimport keras.backend as K\nimport tensorflow as tf\n\n# Softmax cross-entropy loss function for segmentation\ndef softmax_sparse_crossentropy_ignoring_last_label(labels, x):\n x = K.reshape(x, (-1, K.int_shape(x)[-1]))\n log_softmax = tf.nn.log_softmax(x)\n\n labels = K.one_hot(tf.to_int32(K.flatten(labels)), K.int_shape(x)[-1]+1)\n labels = tf.pack(tf.unpack(labels, axis=-1)[:-1], axis=-1)\n\n cross_entropy = -K.sum(labels * log_softmax, axis=1)\n cross_entropy_mean = K.mean(cross_entropy)\n\n return cross_entropy_mean\n","sub_path":"utils/loss_function.py","file_name":"loss_function.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"465476935","text":"from flask import Flask, render_template, request\nimport pysolr\nimport pysolr\nimport json\nimport urllib.request\nimport urllib\nimport jwt\nfrom article import Article\napp = Flask(__name__)\n__CORE_NAME__ = 'article'\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/search')\ndef search():\n return render_template('search.html')\n\n@app.route('/result',methods = ['POST', 'GET'])\ndef result():\n if request.method == 'POST':\n Url = request.form['Url']\n article = Article()\n article.addUrl(Url)\n #article.download(Url)\n #article.set_summary()\n #article.set_category()\n #article.add_solr()\n\n result = {'title': article.get_title(),\n 'text': article.get_text(),\n 'category': article.get_category(),\n 'thumbnail': article.get_thumbnailUrl(),\n 'summary': article.get_summary()\n }\n return render_template(\"result.html\",result = result)\n\n\n\n@app.route('/searchresult',methods = ['POST', 'GET'])\ndef searchResult():\n if request.method == 'POST':\n Url = request.form['Url']\n search_term = Url.replace(' ', \"+\")\n search_url = \"http://localhost:8983/solr/{0}/select?q={1}&wt=json&indent=true\".format(__CORE_NAME__,search_term)\n search_resp = urllib.request.urlopen(search_url)\n result = json.loads(search_resp.read())\n result_response=result['response']['docs']\n print('print(result_response)')\n print(result_response)\n\n #encoded = jwt.encode(result, 'secret', algorithm='HS256')\n #return json.dumps(encoded.decode(encoding=\"utf-8\"))\n #\n\n return render_template(\"searchresult.html\",result = result_response)\n\nif __name__ == '__main__':\n app.run(debug = True)","sub_path":"web/web2.py","file_name":"web2.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"486699736","text":"import logging\nimport redis\nfrom flask import Flask, abort, make_response\nfrom json import dumps\nfrom re import fullmatch\n\nfrom sinking.cache.cache import RedisCache\nfrom sinking.utils import config\n\n\nerrors = {\n '0-general': {\n '1-unknown': '01:Internal or unknown error'\n },\n '1-sink-specific': {\n '1-not-found': '11:No sink found with this ID'\n },\n '2-batch': {\n '1-not-found': '21:No sink found with those IDs'\n }\n}\n\n\ndef res(data, error=None):\n response = make_response(dumps({\n 'success': error is None,\n 'error': error,\n 'data': data\n }))\n response.headers['Content-Type'] = 'application/json'\n return response\n\n\ndef setup_routes(app, cache):\n sink_id_regex = config.rules['collectors']['schema']['schema']['sink']['regex']\n\n @app.route('/s/')\n def list_sinks():\n sinks = cache.list_keys()\n return res({\n 'count': len(sinks),\n 'sinks': sinks\n })\n\n @app.route('/s/')\n def get_sink(sink_id):\n if None is fullmatch(sink_id_regex, sink_id):\n abort(400)\n\n data = cache.get(sink_id)\n if data is None:\n return res(data, errors['1-sink-specific']['1-not-found'])\n return res(data)\n\n @app.route('/batch/')\n def batch_sinks(sink_ids):\n # If any id is not of the right format, crash\n ids = sink_ids.split(',')\n if any([\n fullmatch(sink_id_regex, x) is None\n for x in ids\n ]):\n abort(400)\n\n result = {\n 'success': {},\n 'failure': {}\n }\n for sink_id in ids:\n data = cache.get(sink_id)\n if data is None:\n result['failure'][sink_id] = errors['1-sink-specific']['1-not-found']\n else:\n result['success'][sink_id] = data\n\n result['total_count'] = len(result['failure']) + len(result['success'])\n\n if len(result['success']) == 0:\n return res(result, errors['2-batch']['1-not-found'])\n return res(result)\n\n\ndef boot(defaults, configuration):\n log = logging.getLogger(__name__)\n\n redis_cache = RedisCache(redis.Redis.from_url(\n (configuration['redis'] if 'redis' in configuration else defaults['REDIS'])\n ))\n\n app = Flask(__name__)\n\n if 'api' in configuration:\n host = configuration['api']['host'] if 'host' in configuration['api']\\\n else defaults['API_HOST']\n port = configuration['api']['port'] if 'port' in configuration['api']\\\n else defaults['API_PORT']\n else:\n host = defaults['API_HOST']\n port = defaults['API_PORT']\n\n setup_routes(app, redis_cache)\n log.info(\"Booting API server at %s:%s\" % (host, port))\n app.run(host, port)\n\n","sub_path":"sinking/api/boot.py","file_name":"boot.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"79728159","text":"#encoding: utf-8\nfrom slimit.lexer import Lexer\n\ndef tokenize_js(code, need_type_info=False):\n # with open(code, 'r') as f:\n # code = f.read()\n lexer = Lexer()\n lexer.input(code)\n\n tokens = []\n types = []\n pos = []\n while True:\n token = lexer.token()\n if not token:\n break\n tokens.append(token.value)\n types.append(token.type)\n pos.append([token.lineno, token.lexpos])\n\n if need_type_info:\n return tokens, types, pos\n else:\n return tokens, pos\n\n# if __name__==\"__main__\":\n# print(\"No code/function passed in, function below is used to show you a case:\")\n# print()\n# CodeExample = open('def.js','r',encoding='utf-8').read()\n# print()\n# tokens, types, pos = tokenize_js(CodeExample, True)\n# print(tokens)\n# print(types)\n# print(pos)","sub_path":"tokenizer/js_tokenizer.py","file_name":"js_tokenizer.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"111416301","text":"import socket\nfrom abbTypedef import robTarget, robInfo\n\n\ntarget = robTarget()\ncmdFlag = -1\n\nwhile True:\n client=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n client.connect(('192.168.125.1',1025))\n\n cmdFlag = 2\n client.send(str(cmdFlag).encode('utf-8'))\n data = client.recv(1024)\n\n data = client.recv(1024)\n target.decode(data)\n print(target)\n client.send(\"COPY!\".encode('utf-8'))\n client.close()\n\n client=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n client.connect(('192.168.125.1',1025))\n\n cmdFlag = 3\n client.send(str(cmdFlag).encode('utf-8'))\n data = client.recv(1024)\n\n x=input(\"请输入X:\")\n target.set_x(float(x))\n y=input(\"请输入Y:\")\n target.set_y(float(y))\n z=input(\"请输入Z:\")\n target.set_z(float(z))\n q1=input(\"请输入Q1:\")\n target.set_q1(float(q1))\n q2=input(\"请输入Q2:\")\n target.set_q2(float(q2))\n q3=input(\"请输入Q3:\")\n target.set_q3(float(q3))\n q4=input(\"请输入Q4:\")\n target.set_q4(float(q4))\n client.send(target.encode())\n data = client.recv(1024)\n client.close()","sub_path":"src/Python/basicControl.py","file_name":"basicControl.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"553464507","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom urllib.parse import urlencode\nimport json\nfrom jiaozhen.items import JiaozhenItem\n\nclass LiuyanSpider(scrapy.Spider):\n name = 'liuyan'\n base_url = 'https://vp.fact.qq.com/loadmore?'\n start_urls = []\n for i in range(1,15):\n params = { \n 'page': i \n } \n url = base_url + urlencode(params)\n start_urls.append(url)\n \n def parse(self, response):\n resp = json.loads(response.text)\n article_url = 'https://vp.fact.qq.com/article?'\n for item in resp[\"content\"]:\n params = { \n 'id': item[\"id\"] \n } \n yield scrapy.Request(url=article_url+ urlencode(params), callback=self.parse_detail,meta={'tags':item[\"tag\"],'cover':item[\"cover\"],'type':item[\"explain\"]})\n\n def parse_detail(self,response):\n item = JiaozhenItem()\n item['title'] = response.css('body > div.title > h1::text').extract_first()\n item['descrip'] = response.css('body > div.title > p').extract_first()\n item['LiuyanType'] = response.css('body > div.check_content.text > div.check_content_mark > span.mark_total > span.mark_title::text').extract_first()\n answer = response.css('body > div.check_content.text > div.check_content_points>ul>li::text').extract()\n item[\"answer\"] = '\\n'.join(answer)\n item['detail'] = response.css('body > div.question.text').extract_first()\n item['author'] = response.css('body > div.check_content.text > div.check_content_text.check_content_writer::text').extract_first()\n item['tags'] = response.meta['tags']\n item['pic_url'] = response.meta['cover']\n item['type'] = response.meta['type']\n yield item\n\n\n","sub_path":"后端/data/较真平台/jiaozhen/jiaozhen/spiders/liuyan.py","file_name":"liuyan.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"138128949","text":"# Import van noodzakelijke libraries voor constructie berekeningen en ene helpers functie voor bepalen absulte\n# maximale moment.\nfrom anastruct.material.profile import HEB, HEA\nfrom anastruct.material.units import to_kN, to_kNm2\nfrom anastruct.fem.system import SystemElements\nfrom helpers import maximaal_moment\n\n# Elasticiteits modules staal.\nE = 210000\n\n# Klasse portaal. hierin zitten de eiegnschappen van een portaal en functies om het te toetsen.\nclass portaal:\n\n # Functie om een object met klasse portaal aan te maken. Op basis van de eigenschappen en de profielen van de ligger\n # en kolommen worden voor berekening noodzakelijke eigenschappen gegenereerd. HEA balken geimporteerd uit Anastruct.\n def __init__(self, hoogte, overspanning, hoh, ligger_teller, kolommen_teller):\n self.hoogte = hoogte\n self.overspanning = overspanning\n self.hoh = hoh * 1.25\n\n self.liggerprofiel = list(HEA)[ligger_teller]\n self.kolomprofiel = list(HEA)[kolommen_teller]\n\n self.A_ligger = HEA[list(HEA)[ligger_teller]][\"A\"]\n self.I_ligger = HEA[list(HEA)[ligger_teller]][\"Iy\"]\n self.W_ligger = HEA[list(HEA)[ligger_teller]][\"Wy\"]\n self.It_ligger= HEA[list(HEA)[ligger_teller]][\"It\"] * 10\n self.Iw_ligger = HEA[list(HEA)[ligger_teller]][\"Iw\"]\n self.Iz_ligger = HEA[list(HEA)[ligger_teller]][\"Iz\"]\n\n self.I_kolommen = HEA[list(HEA)[kolommen_teller]][\"Iy\"]\n self.A_kolommen = HEA[list(HEA)[kolommen_teller]][\"A\"]\n self.W_kolommen = HEA[list(HEA)[kolommen_teller]][\"Wy\"]\n self.It_kolommen = HEA[list(HEA)[kolommen_teller]][\"It\"] * 10\n self.Iw_kolommen = HEA[list(HEA)[kolommen_teller]][\"Iw\"]\n self.Iz_kolommen = HEA[list(HEA)[kolommen_teller]][\"Iz\"]\n\n self.A_profiel_ligger = HEA[list(HEA)[ligger_teller]][\"tw\"] * \\\n (HEA[list(HEA)[ligger_teller]][\"h\"] - 2 *HEA[list(HEA)[ligger_teller]][\"tf\"])\n\n self.A_profiel_kolommen = HEA[list(HEA)[kolommen_teller]][\"tw\"] * \\\n (HEA[list(HEA)[kolommen_teller]][\"h\"] - 2 * HEA[list(HEA)[kolommen_teller]][\"tf\"])\n\n self.EG_profiel_ligger = HEA[list(HEA)[ligger_teller]][\"G\"] / 100\n self.EG_profiel_kolommen = HEA[list(HEA)[kolommen_teller]][\"G\"] / 100\n\n\n # Functie op de krachten en verplaatsingen op de constructie te bepalen. Neemt de eigenschappen van het portaal en\n # de van toepassing zijn de belastingcombinatie. Functie levert krachten van elementen ov verplaatsingen terug.\n def krachten_en_verplaatsingen_elementen(self, belastingcombi):\n\n # Belasting combi is een dict. Op deze wijze worden de van toepassing zijnde factoren bepaald.\n factor_wind = belastingcombi.get('factor veranderlijk wind')\n factor_permanent = belastingcombi.get('factor permanent')\n factor_dak_ver = belastingcombi.get('factor veranderlijk dak')\n\n # Opstarten van een constructie taak. Anastruct standaard opzet.\n ss = SystemElements()\n\n # Creeren elementen voor liggers en kolommen, met bijbehorende EA en EI.\n ss.add_element(location=[[0, 0], [0, self.hoogte]], EA=to_kN(E * self.A_kolommen),\n EI=to_kNm2(E * self.I_kolommen))\n ss.add_element(location=[[0, self.hoogte], [self.overspanning, self.hoogte]], EA=to_kN(E * self.A_ligger),\n EI=to_kNm2(E * self.I_ligger))\n ss.add_element(location=[[self.overspanning, self.hoogte], [self.overspanning, 0]],\n EA=to_kN(E * self.A_kolommen), EI=to_kNm2(E * self.I_kolommen))\n\n # Toevoegen verbindingen.\n ss.add_support_hinged(node_id=1)\n ss.add_support_hinged(node_id=4)\n\n # Veranderlijke windbelasting kolom.\n ss.q_load(q=- factor_wind * self.hoh, element_id=1)\n\n # Veranderlijke en permanente lasten ligger.\n ss.q_load(q=-factor_permanent * 2 * self.hoh - factor_permanent * self.EG_profiel_ligger - factor_dak_ver * 1.5\n * self.hoh, element_id=2)\n\n # Vaste belastingen kolommen\n ss.point_load(node_id=2, Fz=- factor_permanent * self.EG_profiel_kolommen * self.hoogte)\n ss.point_load(node_id=3, Fz=- factor_permanent * self.EG_profiel_kolommen * self.hoogte)\n\n # Oplossen constructie. Onder elements worden resultaten opgeslagen, heeft de structuur van een dict.\n ss.solve()\n elements = ss.get_element_results()\n\n # Als de functie wordt aangeroepen voor de krachten op de elementen. Is te zien aan de factoren.\n if factor_wind is not 1 and factor_dak_ver is not 1:\n\n # Maatgevende krachten per element bepalen, die geranschikt worden opgeslagen in een lijst. De eerste kracht\n # in de lijst correspondeert met het eerste element van het portaal.\n moment = maximaal_moment(elements)\n dwarskracht = ss.get_element_result_range(\"shear\")\n normaalkracht = ss.get_element_result_range(\"axial\")\n\n # De krachten per element wordne nu in een dict opgeslagen. Dit ziet er als volgt uit:\n # {\"dwarskracht\": [25.0, 53.9, 7.8] etc.\n krachten_elementen = {\"dwarskracht\": dwarskracht, \"normaalkracht\": normaalkracht, \"moment\": moment}\n return krachten_elementen\n\n # Anders wordt de functie aangeroepen voor verplaatsingen.\n else:\n # Verkrijgen relatieve doorbuiging.\n element_doorbuiging = ss.get_element_results()\n element_doorbuiging = element_doorbuiging[1].get('wmin') / self.overspanning\n\n # Verkrijgen relatieve verplaatsing.\n knoop_verplaatsingen = ss.get_node_displacements()\n knoop_verplaatsingen = knoop_verplaatsingen[1][3] / self.hoogte\n\n return element_doorbuiging, knoop_verplaatsingen\n\n # Fucntie ter controle van verplaatsingen.\n def verplaatsingen_controle(self, belastingcombi):\n element_doorbuiging, knoop_verplaatsing = self.krachten_en_verplaatsingen_elementen(belastingcombi)\n\n # Als zowel doorbuiging als knoopverplaatisngen niet voldoen, bepalen welke realtief het meest belast\n # overbelast wordt.\n if element_doorbuiging > 1 / 250 and knoop_verplaatsing > 1 / 150:\n if element_doorbuiging / (1 / 250) > knoop_verplaatsing / (1 / 150):\n return 0\n else:\n return 1\n\n # Doorbuiging voldoet niet.\n if element_doorbuiging > 1 / 250:\n return 0\n\n # Knoop verplaatsingen voldoen niet.\n if knoop_verplaatsing > 1 / 150:\n return 1\n\n # Beide voldoen, return true.\n return True","sub_path":"portaal.py","file_name":"portaal.py","file_ext":"py","file_size_in_byte":6638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"26045420","text":"# -*- Mode: Python; py-indent-offset: 4 -*-\n# vim: tabstop=4 shiftwidth=4 expandtab\n#\n# Copyright (C) 2005-2009 Johan Dahlin \n#\n# importer.py: dynamic importer for introspected libraries.\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Lesser General Public\n# License as published by the Free Software Foundation; either\n# version 2.1 of the License, or (at your option) any later version.\n#\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this library; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301\n# USA\n\nfrom __future__ import absolute_import\n\nimport sys\nimport gobject\n\nfrom ._gi import Repository, RepositoryError\nfrom .module import DynamicModule, ModuleProxy\n\n\nrepository = Repository.get_default()\nmodules = {}\n\n\nclass DynamicImporter(object):\n\n # Note: see PEP302 for the Importer Protocol implemented below.\n\n def __init__(self, path):\n self.path = path\n\n def find_module(self, fullname, path=None):\n if not fullname.startswith(self.path):\n return\n\n path, namespace = fullname.rsplit('.', 1)\n if path != self.path:\n return\n try:\n repository.require(namespace)\n except RepositoryError:\n pass\n else:\n return self\n\n def load_module(self, fullname):\n if fullname in sys.modules:\n return sys.modules[fullname]\n\n path, namespace = fullname.rsplit('.', 1)\n\n # Workaround for GObject\n if namespace == 'GObject':\n sys.modules[fullname] = gobject\n return gobject\n\n dynamic_module = DynamicModule(namespace)\n modules[namespace] = dynamic_module\n\n overrides_modules = __import__('gi.overrides', fromlist=[namespace])\n overrides_module = getattr(overrides_modules, namespace, None)\n\n if overrides_module is not None:\n module = ModuleProxy(fullname, namespace, dynamic_module, overrides_module)\n else:\n module = dynamic_module\n\n module.__file__ = '<%s>' % fullname\n module.__loader__ = self\n\n sys.modules[fullname] = module\n\n return module\n\n","sub_path":"gi/importer.py","file_name":"importer.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"429809838","text":"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Optional\n\nimport ml.rl.types as rlt\nimport numpy as np\nimport torch\nfrom ml.rl.replay_memory.circular_replay_buffer import ReplayBuffer\n\n\nclass Sampler(ABC):\n \"\"\"Given scores, select the action.\"\"\"\n\n @abstractmethod\n def sample_action(\n self, scores: Any, possible_action_mask: Optional[Any]\n ) -> rlt.ActorOutput:\n raise NotImplementedError()\n\n @abstractmethod\n def log_prob(self, scores: Any, action: torch.Tensor) -> torch.Tensor:\n raise NotImplementedError()\n\n def update(self) -> None:\n \"\"\" Call to update internal parameters (e.g. decay epsilon) \"\"\"\n pass\n\n\n# From preprocessed observation, produce scores for sampler to select action\nScorer = Callable[[Any], Any]\n\n# Transform ReplayBuffer's transition batch to trainer.train\nTrainerPreprocessor = Callable[[Any], rlt.PreprocessedTrainingBatch]\n\n# Transform gym.Env's observation to Scorer's input\nPolicyPreprocessor = Callable[[Any], Any]\n\n\n# Transform sampled action to input to gym.Env.step\nActionPreprocessor = Callable[[rlt.ActorOutput], np.array]\n\n\nObservationType = Any\nRewardType = float\nTerminalType = bool\nPossibleActionsMaskType = Optional[torch.Tensor]\nReplayBufferAddFn = Callable[\n [\n ReplayBuffer,\n ObservationType,\n rlt.ActorOutput,\n RewardType,\n TerminalType,\n PossibleActionsMaskType,\n ],\n None,\n]\n\n# Called in post_step of Agent to train on sampled batch from RB\nReplayBufferTrainFn = Callable[[ReplayBuffer], None]\n\n\n@dataclass\nclass GaussianSamplerScore(rlt.BaseDataClass):\n loc: torch.Tensor\n scale_log: torch.Tensor\n","sub_path":"ml/rl/gym/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"250658520","text":"import sys\nimport os\nimport pygame\n\npygame.init()\n\nsize = width, height = 500, 500\nscreen = pygame.display.set_mode(size)\n\npygame.display.set_caption('Перемещение героя')\nscreen.fill((255, 255, 255))\nall_sprites = pygame.sprite.Group()\n\nclock = pygame.time.Clock()\n\nplayer = pygame.image.load(\"data/0_Reaper_Man_Throwing_011.png\")\n\nplayer = pygame.transform.scale(player, (150, 150))\n\nplayer = pygame.transform.flip(player, 90, 0)\n\n\n\nif __name__ == '__main__':\n running = True\n\n while running:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n screen.fill((255, 255, 255))\n screen.blit(player, (80, 80))\n all_sprites.draw(screen)\n\n pygame.display.flip()\n pygame.quit()\n","sub_path":"testing testing.py","file_name":"testing testing.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"622955959","text":"import time\nfrom Scrabble import *\nimport os\n\nfilePrefix = os.getcwd() + \"/\"\nfileName = filePrefix + files[0]\nprint(fileName)\ndictFile = \"DictTree\"\ndictionary = filePrefix + dictFile\ndictionary = \"/Users/acolby/OneDrive - The Perse School/NEA/Code/Scrabble checker/DictTree\"\nprint(dictionary)\n\n'''\nlines = get_entries(fileName)\n\nprint(\"retrieving dictionary\")\ntimeR1 = time.time()\ndictionary = retrieve_dictionary(dictionary)\ntimeR2 = time.time()\nprint(\"done in {:.5f} seconds\\n\".format(timeR2-timeR1))\n\nrootNode = Trie()\n\nprint(\"converting dictionary into a Trie\")\ntimeT1 = time.time()\nrootNode.store_words(dictionary)\n# adds a self referential pointer to the root Tree object\n# this allows for words to be found at any point in a row\nrootNode.add_child(rootNode)\ntimeT2 = time.time()\nprint(\"done in {} seconds\\n\".format(timeT2-timeT1))\n\npattern = \"...............\"\nhand = \"\"\nhand = pick_tiles(hand)\n\nprint(\"\")\nprint(\"find_words checking for \" + pattern)\nprint(\"with a hand of \" + hand)\ntimeM1 = time.time()\nallMatches = rootNode.fit_Row(pattern,hand)\ntimeM2 = time.time()\nprint(\"{} matches found in {:.5f} seconds\\n\".format(len(allMatches),timeM2-timeM1))\n\nrow = pattern\n\nprint(\"finding the best possible play from matches\")\ntimeX1 = time.time()\nbestMatch = check_max_play(allMatches,row,pattern)\ntimeX2 = time.time()\nprint(\"The best match is {} with {} points scored after {:.5f} seconds\\n\".format(bestMatch[0],bestMatch[1],timeX2-timeX1))\n\nprint(\"checking split_pattern functionality\\n\")\nsplitPattern(pattern)\n\nprint(\"\")\nprint(\"creating a new user\")\nplayer1 = Player(\"Edward\")\npassword = \"this_is_my_password\" # of course wouldn't be physically included in the actual code\nprint(\"finding hash for {} with user {}\".format(password,player1.get_name()))\nplayer1.check_admin(password)\n'''\n\ndictionary = retrieve_dictionary(dictionary)\nrootNode = dictionary._rootNode\nrootNode = dictionary.find_child(rootNode,\"x\")[0]\nrootNode = dictionary.find_child(rootNode,\"u\")[0]\ndictionary.print_tree(CurNode=rootNode)\n\nprint(\"Done1\")\n\nsolutions = dictionary.fit_Row(Row = \"....e...r.q....\")\nprint(solutions)\n\nprint(\"Done2\")\n","sub_path":"Scrabble/Testing.py","file_name":"Testing.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"460061520","text":"from ROOT import TFile\nfrom DataCollection import DataCollection\nfrom Dataset import concatenateAndShuffleDatasets\nfrom trainEvalBDT import *\nimport json\nimport sys\n\nbranch_names = [\n 'lPt1', 'lPt2',\n 'lEta1', 'lEta2',\n 'lPhi1', 'lPhi2',\n 'DeltaR_1', 'DeltaR_b1', 'DeltaR_2', 'DeltaR_b2',\n 'njets', 'nbjets', 'jetDeepCsv_b1',\n 'jetPt1', 'jetEta1', 'jetPhi1',\n 'jetPt2', 'jetEta2', 'jetPhi2',\n 'jetPt3', 'jetEta3', 'jetPhi3',\n 'jetPt4', 'jetEta4', 'jetPhi4',\n 'jetPt5', 'jetEta5', 'jetPhi5',\n 'mW1', 'mtop1',\n 'MET', 'H_t',\n 'I_rel1', 'I_rel2'\n ]\n\n\ninput_file_path = sys.argv[1]\n\nparameter_dict = {}\nwith open( input_file_path ) as f:\n parameter_dict = json.load( f )\n\ninputFile = TFile(\"../newTrees/reducedTrees/goodTreesTotal/trees_total_2018_nominal.root\")\n\nsignalTree = inputFile.Get(\"tree_signal_2018\")\nbkgTree = inputFile.Get(\"tree_background_2018\")\n\n\n#validation and test fractions \nvalidation_fraction = 0.2\ntest_fraction = 0.2\n\n# ensure reproducibility\n\nnp.random.seed(42)\n\nsignal_collection = DataCollection(signalTree, branch_names, validation_fraction, test_fraction, True, 'weight', only_positive_weights = True)\nbackground_collection = DataCollection(bkgTree, branch_names, validation_fraction, test_fraction, False, 'weight', only_positive_weights = True)\n\ntraining_data = concatenateAndShuffleDatasets(signal_collection.training_set, background_collection.training_set)\nvalidation_data = concatenateAndShuffleDatasets(signal_collection.validation_set, background_collection.validation_set)\ntest_data = concatenateAndShuffleDatasets(signal_collection.test_set, background_collection.test_set)\n\nmodel_name = sys.argv[1].replace(\".json\", \"\").replace(\"jsons/\", \"\")\n\ntrainBDT(training_data.samples, training_data.labels, train_weights = training_data.weights, feature_names = branch_names, model_name = model_name, alpha= parameter_dict['alpha'], colsample_bytree= parameter_dict['colsample_bytree'], gamma= parameter_dict['gamma'], learning_rate= parameter_dict['learning_rate'], max_depth= parameter_dict['max_depth'], min_child_weight= parameter_dict['min_child_weight'], number_of_trees= parameter_dict['number_of_trees'], subsample= parameter_dict['subsample'], number_of_threads = 1)\n\n\n# model_name = 'BDT_gen1_9_602020'\n\n# trainBDT(training_data.samples, training_data.labels, train_weights = training_data.weights, feature_names = branch_names, model_name = model_name, alpha= 0.24381717415896353, colsample_bytree= 0.5942614980268284, gamma= 0.38452327994920754, learning_rate= 0.20379440652737038, max_depth= 3, min_child_weight= 3.5265307141035, number_of_trees= 3495, subsample= 0.376355753917916, number_of_threads = 1)\n\n\n# trainBDT( training_data.samples, training_data.labels, train_weights = training_data.weights, \n# feature_names = branch_names, model_name = model_name, number_of_trees = 3351, learning_rate = 0.00890837798958, max_depth = 3, min_child_weight = 10.6625443577, subsample = 0.532977100111, colsample_bytree = 0.872556671469, gamma = 0.44060113248, alpha = 0.700171854761, number_of_threads = 1)\n\n\nevalBDT(model_name, signal_collection, background_collection)\n\n#evalBDT_fullData(model_name, signal_collection, background_collection)\n","sub_path":"machineLearning/runBDT.py","file_name":"runBDT.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"80164389","text":"import tensorflow as tf\nfrom tensorflow.keras.layers import Layer\nfrom typing import Optional, Union, Tuple\nfrom cvnn.layers.core import ComplexLayer\nfrom cvnn.layers.core import DEFAULT_COMPLEX_TYPE\n\n\nclass ComplexUpSampling2D(Layer, ComplexLayer):\n\n def __init__(self, size: Union[int, Tuple[int, int]] = (2, 2),\n data_format: Optional[str] = None, interpolation: str = 'nearest',\n align_corners: bool = False, dtype=DEFAULT_COMPLEX_TYPE, **kwargs):\n \"\"\"\n :param size: Int, or tuple of 2 integers. The upsampling factors for rows and columns.\n :param data_format: string, one of channels_last (default) or channels_first.\n The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape\n (batch_size, height, width, channels) while channels_first corresponds to inputs with shape\n (batch_size, channels, height, width).\n :param interpolation: A string, one of nearest or bilinear.\n :param align_corners: if True, the corner pixels of the input and output tensors are aligned,\n and thus preserving the values at those pixels.\n Example of align coreners: https://discuss.pytorch.org/t/what-we-should-use-align-corners-false/22663/9\n \"\"\"\n self.my_dtype = tf.dtypes.as_dtype(dtype)\n super(ComplexUpSampling2D, self).__init__(dtype=self.my_dtype.real_dtype, **kwargs)\n self.align_corners = align_corners\n if isinstance(size, int):\n self.factor_upsample = (size,) * 2\n else:\n self.factor_upsample = tuple(size) # Python will tell me if this is not possible\n # TODO: Check is tuple of ints and no negative values!\n self.interpolation = interpolation.lower()\n if self.interpolation not in {'nearest', 'bilinear'}:\n raise ValueError('`interpolation` argument should be one of `\"nearest\"` or `\"bilinear\"`.')\n if data_format is None:\n data_format = 'channels_last'\n self.data_format = data_format.lower()\n if self.data_format not in {'channels_first', 'channels_last'}:\n raise ValueError(f'The `data_format` argument must be one of \"channels_first\", \"channels_last\". '\n f'Received: {self.data_format}')\n\n def call(self, inputs, **kwargs):\n if self.data_format == 'channels_last':\n inputs = tf.transpose(inputs, perm=[1, 2, 0, 3])\n elif self.data_format == 'channels_first': # I checked it at init, shall I check again?\n inputs = tf.transpose(inputs, perm=[2, 3, 0, 1])\n else:\n raise ValueError(f'The `data_format` argument must be one of \"channels_first\", \"channels_last\". '\n f'Received: {self.data_format}')\n output = self.upsample(inputs=inputs)\n if self.data_format == 'channels_last':\n output = tf.transpose(output, perm=[2, 0, 1, 3])\n elif self.data_format == 'channels_first': # I checked it at init, shall I check again?\n output = tf.transpose(output, perm=[2, 3, 0, 1])\n else:\n raise ValueError(f'The `data_format` argument must be one of \"channels_first\", \"channels_last\". '\n f'Received: {self.data_format}')\n return output\n\n def upsample(self, inputs):\n if inputs.dtype.is_integer: # TODO: Check input is a tensor?\n inputs = tf.cast(inputs, dtype=tf.float32)\n desired_size = [i * o for i, o in zip(inputs.shape, self.factor_upsample)]\n assert len(desired_size) == 2 # The for will do only for the shortest so I should be Ok.\n i_output = tf.reshape(tf.constant([], dtype=inputs.dtype),\n (0, desired_size[1], tf.shape(inputs)[2], tf.shape(inputs)[3]))\n j_output = tf.reshape(tf.constant([], dtype=inputs.dtype), (1, 0, tf.shape(inputs)[2], tf.shape(inputs)[3]))\n for x in range(0, desired_size[0]):\n for y in range(0, desired_size[1]):\n if self.interpolation == 'bilinear':\n to_append = self.bilinear(inputs=inputs, x=x, y=y)\n elif self.interpolation == 'nearest':\n to_append = self.nearest_neighbor(inputs=inputs, x=x, y=y)\n else:\n raise ValueError(f\"Unknown interpolation method {self.interpolation}\")\n to_append = tf.expand_dims(tf.expand_dims(to_append, axis=0), axis=0)\n j_output = tf.concat([j_output, to_append], axis=1)\n i_output = tf.concat([i_output, j_output], axis=0)\n j_output = tf.reshape(tf.constant([], dtype=inputs.dtype), (1, 0, tf.shape(inputs)[2], tf.shape(inputs)[3]))\n return i_output\n\n def nearest_neighbor(self, inputs, x, y):\n # output = tf.repeat(input=tf.repeat(input=inputs, repeats=(self.factor_upsample[0],)*inputs.shape[0],\n # axis=0),\n # repeats=(self.factor_upsample[1],)*inputs.shape[1], axis=1)\n # i_new = tf.cast(tf.floor((inputs.shape[0] * x) / desired_output_shape[0]), dtype=tf.int32)\n # j_new = tf.cast(tf.floor((inputs.shape[1] * y) / desired_output_shape[1]), dtype=tf.int32)\n i_new, j_new = self._get_nearest_neighbour(x, y, inputs)\n to_append = inputs[i_new, j_new]\n # assert i_output.shape == (tuple(deisred_size) + (input.shape[2], input.shape[3]))\n return to_append\n\n def _get_nearest_neighbour(self, x, y, inputs):\n X_small = tf.linspace(0, tf.shape(inputs)[0] - 1, tf.shape(inputs)[0])\n Y_small = tf.linspace(0, tf.shape(inputs)[1] - 1, tf.shape(inputs)[1])\n X, Y = self._to_big(X_small, Y_small)\n i, j = self._get_closest_point(x, y, X, Y)\n return i, j\n\n @staticmethod\n def _get_closest_point(x, y, x_list, y_list):\n x_distance = tf.math.square(x_list - x)\n y_distance = tf.math.square(y_list - y)\n x_closest = tf.argmin(x_distance)\n y_closest = tf.argmin(y_distance)\n return x_closest, y_closest\n\n def bilinear(self, inputs, x, y):\n # Equations\n # https://www.ajdesigner.com/phpinterpolation/linear_interpolation_equation.php\n # Difference with align corners image:\n # https://discuss.pytorch.org/t/what-we-should-use-align-corners-false/22663/9\n # Examples:\n # https://www.omnicalculator.com/math/bilinear-interpolation\n # https://blogs.sas.com/content/iml/2020/05/18/what-is-bilinear-interpolation.html#:~:text=Bilinear%20interpolation%20is%20a%20weighted,the%20point%20and%20the%20corners.&text=The%20only%20important%20formula%20is,x%20%5B0%2C1%5D.\n # Implementations\n # https://stackoverflow.com/questions/8661537/how-to-perform-bilinear-interpolation-in-python\n (q11, q21, q12, q22), (x1, x2), (y1, y2) = self._get_q_points(x, y, inputs)\n # There are 3 cases:\n # 1. All 4 q's are different and surround the point\n # 2. There are basically 2 q's (get repeated)\n # 3. All 4 q's are equal\n x2_diff = tf.cast(x2, dtype=inputs.dtype) - tf.cast(x, dtype=inputs.dtype)\n x1_diff = tf.cast(x, dtype=inputs.dtype) - tf.cast(x1, dtype=inputs.dtype)\n y2_diff = tf.cast(y2, dtype=inputs.dtype) - tf.cast(y, dtype=inputs.dtype)\n y1_diff = tf.cast(y, dtype=inputs.dtype) - tf.cast(y1, dtype=inputs.dtype)\n delta_x = tf.cast(x2 - x1, dtype=inputs.dtype)\n delta_y = tf.cast(y2 - y1, dtype=inputs.dtype)\n # The next conditions happens in cases 2 or 3, only one for case 2 and both for case 3.\n # Using the following equations/conditions, the general equation stands for all 3 cases.\n if x1 == x2: # The index was exact, so just make both 1/2\n x2_diff = tf.cast(0.5, dtype=inputs.dtype)\n x1_diff = tf.cast(0.5, dtype=inputs.dtype)\n delta_x = tf.cast(1, dtype=inputs.dtype)\n if y1 == y2:\n y2_diff = tf.cast(0.5, dtype=inputs.dtype)\n y1_diff = tf.cast(0.5, dtype=inputs.dtype)\n delta_y = tf.cast(1, dtype=inputs.dtype)\n t11 = q11 * y2_diff * x2_diff\n t21 = q21 * x1_diff * y2_diff\n t12 = q12 * x2_diff * y1_diff\n t22 = q22 * x1_diff * y1_diff\n to_append = (t11 + t22 + t12 + t21) / (delta_y * delta_x)\n return to_append\n\n def _get_q_points(self, x, y, inputs):\n # 1. Get x and y coordinates of inputs (basically from 0 to the end)\n X_small = tf.linspace(0, tf.shape(inputs)[0] - 1, tf.shape(inputs)[0])\n Y_small = tf.linspace(0, tf.shape(inputs)[1] - 1, tf.shape(inputs)[1])\n # 2. Transform those coordinates into a index equivalent on the new bigger image.\n X, Y = self._to_big(X_small, Y_small)\n # 3. Get the closest points of X to x. x2 is the closest but bigger and x1 is closest but small\n # for example X = [0.5, 2.5 4.5] then\n # If x is 1, then x1 is 0.5 and x2 is 2.5.\n # If x is 2.5, both x1 and x2 are 2.5.\n # If x is 0, then x1 equals x2.\n # If x is 5 then x2 equals x1\n x2, x1, y2, y1 = self._get_4_closest_points(x, y, X, Y)\n # Get the points according to the coordinates obtained.\n q11 = inputs[tf.where(x1 == X)[0][0]][tf.where(y1 == Y)[0][0]]\n q21 = inputs[tf.where(x2 == X)[0][0]][tf.where(y1 == Y)[0][0]]\n q12 = inputs[tf.where(x1 == X)[0][0]][tf.where(y2 == Y)[0][0]]\n q22 = inputs[tf.where(x2 == X)[0][0]][tf.where(y2 == Y)[0][0]]\n return (q11, q21, q12, q22), (x1, x2), (y1, y2)\n\n def _to_big(self, x_index, y_index):\n # TODO: Check index dtype\n # This must use different equations according to align_corners\n # set_trace()\n if self.align_corners:\n x_index = tf.linspace(0, len(x_index) * self.factor_upsample[0] - 1, len(x_index))\n y_index = tf.linspace(0, len(y_index) * self.factor_upsample[1] - 1, len(y_index))\n else:\n x_index = (x_index + 0.5) * self.factor_upsample[0] - 0.5\n y_index = (y_index + 0.5) * self.factor_upsample[1] - 0.5\n # x_index = tf.linspace(self.factor_upsample[0]/2 + 0.5, len(x_index)*self.factor_upsample[0] - algo,\n # len(x_index))\n return x_index, y_index # Return the big index\n\n @staticmethod\n def _get_4_closest_points(x, y, x_list, y_list):\n # This function gets the 4 closests points\n x_distance = x_list - x\n y_distance = y_list - y\n if tf.math.reduce_any(x_distance >= 0):\n x_min = tf.math.reduce_min(tf.boolean_mask(x_distance, x_distance >= 0))\n x2 = tf.where(x_min == x_distance)[0][0]\n else:\n x2 = -1\n if tf.math.reduce_any(x_distance <= 0):\n x_min = tf.math.reduce_min(tf.math.abs(tf.boolean_mask(x_distance, x_distance <= 0)))\n x1 = tf.where(-x_min == x_distance)[0][0]\n else:\n x1 = -1\n if tf.math.reduce_any(y_distance >= 0):\n y_min = tf.math.reduce_min(tf.boolean_mask(y_distance, y_distance >= 0))\n y2 = tf.where(y_min == y_distance)[0][0]\n else:\n y2 = -1\n if tf.math.reduce_any(y_distance <= 0):\n y_min = tf.math.reduce_min(tf.math.abs(tf.boolean_mask(y_distance, y_distance <= 0)))\n y1 = tf.where(-y_min == y_distance)[0][0]\n else:\n y1 = -1\n if x2 == -1:\n x2 = x1\n if x1 == -1:\n x1 = x2\n if y2 == -1:\n y2 = y1\n if y1 == -1:\n y1 = y2\n return x_list[x2], x_list[x1], y_list[y2], y_list[y1]\n\n def get_real_equivalent(self):\n return ComplexUpSampling2D(size=self.factor_upsample, data_format=self.data_format,\n interpolation=self.interpolation, dtype=self.my_dtype.real_dtype)\n\n\n","sub_path":"cvnn/layers/upsampling.py","file_name":"upsampling.py","file_ext":"py","file_size_in_byte":11926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"151448824","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n\nfrom detectron2.engine import DefaultPredictor\nfrom detectron2.utils.visualizer import Visualizer\nfrom detectron2.data import DatasetCatalog, MetadataCatalog\nimport cv2\nfrom training import do_training\n\n\n\n\ndef get_predictor(dataset, cfg): \n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 #custom testing threshold for model\n cfg.DATASETS.TEST = dataset\n predictor = DefaultPredictor(cfg)\n return predictor\n\ndef do_prediction_and_visualization(dataset, cfg):\n dataset_dicts = DatasetCatalog.get(dataset)\n metadata=MetadataCatalog.get(dataset)\n # label=MetadataCatalog.get('cuboid_dataset_val').thing_classes \n predictor = get_predictor(dataset, cfg)\n # for d in random.sample(dataset_dicts, 50): \n for d in dataset_dicts: \n # if d['annotations']: #display +ve images only\n im = cv2.imread(d[\"file_name\"])\n outputs = predictor(im)\n # print(outputs)\n v = Visualizer(im[:, :, ::-1], \n metadata=metadata,\n scale=1, \n )\n output_instances = outputs['instances'].to('cpu')\n pred = output_instances.pred_classes\n # print(pred.tolist())\n classes=[] \n for i in range(len(pred.tolist())):\n classes.append('cuboid')\n scores = output_instances.scores\n labels = [\"{} {:.0f}%\".format(l, s * 100) for l, s in zip(classes, scores)]\n # gt = v.draw_dataset_dict(d) #display ground truth annotation\n out = v.overlay_instances(boxes=output_instances.pred_boxes, labels=labels, keypoints=output_instances.pred_keypoints)\n final_img = cv2.resize(out.get_image()[:, :, ::-1], (900,900))\n cv2.imshow('Predication: ' + d['image_id'] + '.jpg', final_img)\n k = cv2.waitKey(0)\n if k == 27: #esc key for stop\n cv2.destroyAllWindows()\n break\n cv2.destroyAllWindows() \n # print(output_instances.get_centers())\n \n \nif __name__=='__main__':\n _, cfg = do_training(train=False)\n dataset = \"cuboid_dataset_val\"\n do_prediction_and_visualization(dataset, cfg)\n\n\n\n\n\n\n","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"124239836","text":"# Define a function called is_even that accepts a number as an argument and returns a boolean (true/false) indicating whether that number is even or not (HINT: use the % operator).\n\n# Try calling it with different numbers.\n\ndef is_even(num):\n if (num % 2 == 0):\n return print(\"This number is even\")\n elif (num % 2 == 1):\n return print(\"This number is odd\")\n\nis_even(5)\nis_even(4)\n","sub_path":"excercise3.py","file_name":"excercise3.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"314907765","text":"'''\r\nPlotting Utility\r\n\r\n'''\r\nfrom matplotlib import pyplot as plt\r\nfrom pylab import rcParams\r\nrcParams['figure.figsize'] = 12,7\r\nimport numpy as np\r\n\r\n\r\ndef PlotAll(Acoustic,PreciseLog,PreciseEnvelopes,cutoff,macro_env):\r\n for i in np.arange(len(PreciseLog)):\r\n event = PreciseLog.iloc[i]\r\n mStart = event.Start\r\n mStop = event.Stop\r\n pStart1 = event['S_bot time']\r\n pStart2 = event['S_top time']\r\n story = event.Story\r\n subEventNo = event.SubEvents\r\n \r\n c = Acoustic.columns\r\n AcousticCut = Acoustic.iloc[mStart-1000:mStop+1000]\r\n MacroEnvCut = macro_env.iloc[int((mStart-1000)/1000):int((mStop+1000)/1000)]\r\n MacroEnvCut.index = MacroEnvCut.index*1000\r\n MacroEnvCut = MacroEnvCut*8\r\n \r\n ax = AcousticCut[c[0]].plot(c = 'r', alpha = 0.6)\r\n AcousticCut[c[1]].plot(c = 'b', alpha = 0.6, ax = ax)\r\n MacroEnvCut.plot(c='y',alpha=0.6,ax=ax)\r\n plt.plot(pStart1,0,marker='o',c='r',mec='black')\r\n plt.plot(pStart2,0,marker='o',c='blue',mec = 'black')\r\n \r\n pEnv = PreciseEnvelopes.iloc[i]\r\n A = pEnv[c[0]]\r\n B = pEnv[c[1]]\r\n \r\n A.plot(c = 'r')\r\n B.plot(c = 'blue')\r\n \r\n for i in range(len(story)):\r\n if i == subEventNo:\r\n plt.axvline(x=story[i],c='black',alpha=0.7,linewidth=4)\r\n else:\r\n plt.axvline(x=story[i],c='green',alpha=0.7,linewidth=4)\r\n \r\n plt.show()\r\n print('Event Start: ' + str(mStart) + ', Precise Starts: ' + str(pStart1) + ', ' + str(pStart2) + ', diff: ' + str(pStart1-pStart2))\r\n \r\n ","sub_path":"LBNL-November/Javellin_1/PlottingUtility0.py","file_name":"PlottingUtility0.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"439910263","text":"from django.db import models\n\n\nclass Category(models.Model):\n name = models.CharField(max_length=255, unique=True)\n parent = models.ForeignKey('self', blank=True, null=True, related_name='children', on_delete=models.CASCADE)\n\n class Meta:\n verbose_name = 'Category'\n verbose_name_plural = 'Categories'\n\n def __str__(self):\n return self.name\n\n @staticmethod\n def post_dump(category, parent=None):\n name = category.get('name')\n children = category.get('children')\n \n if parent:\n parent = Category.objects.create(name=name, parent=parent)\n else:\n parent, created = Category.objects.get_or_create(name=name)\n\n if children:\n for child in children:\n Category.post_dump(child, parent=parent)\n\n def get_elder_ids(self):\n if self.parent:\n return self.parent.get_elder_ids() + [self.parent.id]\n return []\n\n def get_children(self):\n return self.children.all()\n\n def get_parents(self):\n ids = self.get_elder_ids()\n return Category.objects.filter(id__in=ids)\n\n def get_siblings(self):\n if self.parent:\n return Category.objects.filter(parent_id=self.parent.id).exclude(id=self.id)\n return Category.objects.none()\n \n\n\n","sub_path":"main_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"483398090","text":"#imports\nimport sys\nsys.path.append('C:/Users/Owner/Documents/0Senior Uk/CE599/ce599-s17/18-Design and Classes/transport')\n\n#calling the cars import\nfrom Cars import Car\n\nif __name__==\"__main__\":\n\t\n\tprint('first car')\n\tcary= Car(color= 'yellow', location=[0,0],direction='F')\n\tcary.printcar()\n\tcary.go(2,'forward')\n\tcary.turn_left()\n\tcary.go(1,'forward')\n\tcary.printcar()\n\n\t\nfrom Car import Car\n\n\nCarg=Car(color= 'green', location=[0,0],direction='F')\nCarg.printcar()\nCarg.left_turn()\nCarg.go(1,'forward')\nCarg.right_turn()\nCarg.go(2,'forward')\nCarg.printcar()\n\t\nprint('Are you a Packers fan for picking yellow and green cars?')","sub_path":"18-Design and Classes/move_a_car.py","file_name":"move_a_car.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"346407749","text":"def check(List):\n for i in range(0,len(List)-1):\n if List[i]==List[i+1]and List[i]!=0:\n return True\n return False\n\nNeq=int(input())\nfor i in range(0,Neq):\n l=int(input())\n temp=input().split(\" \")\n List=[]\n for item in temp:\n List.append(int(item))\n x=0\n Zero=0\n while(check(List)):\n while (x < len(List) - 1):\n if List[x] == List[x + 1] and List[x] != 0:\n List[x] = List[x] + List[x + 1]\n List[x+1]=0\n x = x + 1\n x = 0\n Zero=0\n\n for item in List:\n if item!=0:\n print(item,end=\" \")\n else:\n Zero=Zero+1\n for x in range(0,Zero-1):\n print(0,end=\" \")\n print(0)","sub_path":"Code/CodeRecords/2395/60708/253488.py","file_name":"253488.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"347079803","text":"from model import db\n\n\"\"\"\nHelper table to define the many to many relationship between books and authors.\n\"\"\"\nauthorship_table = db.Table('authorship',\n db.Column('book_id', db.Integer, db.ForeignKey('book.book_id'), primary_key=True),\n db.Column('author_id', db.Integer, db.ForeignKey('author.author_id'), primary_key=True)\n )\n\n\nclass Book(db.Model):\n book_id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String, nullable=False)\n publish_date = db.Column(db.Date, nullable=False)\n subject = db.Column(db.String, nullable=False)\n genre = db.Column(db.String, nullable=False)\n notes = db.relationship('Note', backref=db.backref('book'))\n authors = db.relationship('Author', secondary=authorship_table, backref=db.backref('books', lazy='dynamic'))\n copies = db.relationship('BookCopy', cascade=\"all,delete\", backref=db.backref('book'))\n\n def __repr__(self): return\"\" \\\n % (self.book_id, self.title, self.publish_date, self.subject, self.genre, self.notes,\n self.authors, self.copies)\n\n def to_dict(self):\n \"\"\"\n Method to transform Book to a dictionary object.\n :return:\n \"\"\"\n print('Book to_dict')\n book_dict = {\n 'book_id': self.book_id,\n 'title': self.title,\n 'publish_date': self.publish_date,\n 'subject': self.subject,\n 'genre': self.genre,\n 'notes': self.notes,\n 'authors': self.authors,\n 'copies': self.copies\n }\n return book_dict\n\n def update(self, **kwargs):\n \"\"\"\n Method to update a Book's attributes\n :param kwargs: Given a dictionary of valid key value pairs\n :return: None\n \"\"\"\n print('Book.update()')\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n\n\n","sub_path":"model/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"47324357","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.core.validators\nfrom decimal import Decimal\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounting', '0010_saleitem_sale_orig_cost'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='saleitem',\n name='sale_orig_cost',\n field=models.DecimalField(max_digits=10, decimal_places=4, blank=True, validators=[django.core.validators.MinValueValidator(Decimal('0.0001'))]),\n ),\n ]\n","sub_path":"server/accounting/migrations/0011_auto_20160801_1317.py","file_name":"0011_auto_20160801_1317.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"413200253","text":"glob_table = [0] * 10\r\ndef all_completed():\r\n return sum(glob_table) == 10\r\n\r\ndef insert_num(number):\r\n global glob_table\r\n for digit in str(number):\r\n glob_table[int(digit)] = 1\r\n\r\ndef count_to_sleep(starting_num):\r\n global glob_table\r\n glob_table = [0] * 10\r\n if (starting_num == 0):\r\n return \"INSOMNIA\"\r\n for i in range(1, 9000000):\r\n insert_num(starting_num * i)\r\n if all_completed():\r\n return i*starting_num\r\n return \"INSOMNIA\"\r\n\r\n\r\ndef main():\r\n cases = int(raw_input())\r\n for i in range(1,cases+1):\r\n print(\"CASE #\" + str(i) + \": \" + str(count_to_sleep(int(raw_input()))))\r\n\r\nmain()\r\n\r\n\r\n","sub_path":"codes/CodeJamCrawler/16_0_1_neat/16_0_1_Wmson_insomnia.py","file_name":"16_0_1_Wmson_insomnia.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"76631226","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\tBakedPotato — Inventory Management System\n\n\tBakedPotato IMS\n\tCopyright 2010-2012, John David Steele (john.david.steele@gmail.com)\n\n\tLicensed under The MIT License\n\tRedistributions of files must retain the above copyright notice.\n\n\t@copyright Copyright 2010-2012, John David Steele (john.david.steele@gmail.com)\n\t@license MIT License (http://www.opensource.org/licenses/mit-license.php)'cmp-\n\"\"\"\n#Pragma\nfrom __future__ import unicode_literals\n\n#Standard Library\nimport csv\nimport logging \nimport re\nfrom datetime import datetime\nfrom decimal import *\n\n#Extended Library\n\n#Application Library\n\n#This Package\nfrom plugin.base_supplier_catalog_plugin import BaseSupplierCatalogPlugin\n\nlogger = logging.getLogger(__name__)\n\nclass SupplierCatalogHeartlandPlugin(BaseSupplierCatalogPlugin):\n\n\tdefault_encoding = 'windows-1252'\n\t\n\tcolumn_names = ['SKU', 'Name', 'Retail']\n\t\n\t#Effective 2011-03-22\n\tdiscount = '44.3';\n\tdiscount_by_manufacturer = {\n\t\t'ACU':'44.3', 'ATL':'44.3', 'ARI':'30.9', 'BAC':'48.5',\n\t\t'BLM':'44.3', 'BOW':'40.0', 'BLI':'31.9', 'BFS':'40.2',\n\t\t'CAB':'38.1', 'CIR':'44.3', 'MWI':'44.3', 'FVM':'44.3',\n\t\t'GGT':'34.5', 'GCR':'40.0', 'IMX':'48.5', 'KAD':'44.3',\n\t\t'KAT':'44.3', 'MIE':'40.2', 'MNT':'38.1', 'MDP':'48.5',\n\t\t'MRC':'44.3', 'NCE':'40.2', 'PCO':'44.3', 'PCM':'31.9',\n\t\t'RAP':'44.3', 'RAT':'38.1', 'SWH':'44.3', 'TCS':'39.2',\n\t\t'WHT':'41.0', 'WIL':'38.1', 'AMB':'44.3', 'AAC':'44.3',\n\t\t'CHO':'44.3', 'DPM':'38.1', 'EVG':'44.3', 'EXL':'53.6',\n\t\t'FLO':'48.5', 'HLB':'44.3', 'K+S':'44.3', 'KAL':'44.3',\n\t\t'KMT':'38.1', 'LAB':'44.3', 'MMZ':'44.3', 'MID':'44.3',\n\t\t'MSE':'38.1', 'PAC':'48.5', 'PKS':'44.3', 'PIN':'44.3',\n\t\t'PLS':'44.3', 'RIX':'44.3', 'ROB':'50.5', 'SEX':'44.3',\n\t\t'SMA':'44.3', 'SII':'48.5', 'SQU':'44.3', 'TAM':'48.5',\n\t\t'TNX':'48.5', 'TES':'48.5', 'XAC':'50', 'XUR':'44.3',\n\t\t'WOO':'44.3'\n\t}\n\t\t\n\tdiscount_by_sku = {\n\t\t#Atlas Bulk Track\t48.5\n\t\t'ATL1001049':'48.5',\n\t\t'ATL1001067':'48.5',\n\t\t'ATL155':'48.5',\n\t\t'ATL24':'48.5',\n\t\t'ATL2513':'48.5',\n\t\t'ATL2515':'48.5',\n\t\t'ATL2516':'48.5',\n\t\t'ATL2517':'48.5',\n\t\t'ATL2534':'48.5',\n\t\t'ATL410':'48.5',\n\t\t'ATL411':'48.5',\n\t\t'ATL412':'48.5',\n\t\t'ATL90150':'48.5',\n\t\t'ATL90151':'48.5',\n\t\t'ATL90152':'48.5',\n\t\t'ATL90153':'48.5',\n\t\t\n\t\t#Atlas O\t30.9\n\t\t#Atlas Trainman\t30.9\n\t\t#BLMA Brass\t26.3\n\t\t#BLI Brass, Hybrid\t21\n\t\t#BLI Blueline\t49.5\n\t\t#BLI Paragon 2\t41\n\t\t#Circuitron Tortoise Machines 41.6\n\t\t'CIR6000':'41.6',\n\t\t'CIR6006':'41.6',\n\t\t'CIR6012':'41.6',\n\t\t#Kadee '#' noted items\t25\n\t\t#Kato Special Items\t23.7\n\t\t#Model Power Tunnels\t40\n\t\t#Precision Craft Models Brass\t21\n\t}\n\t\n\tskipable = [\n\t\t'ATL0531', \n\t\t'CASH', \n\t\t'DASB', \n\t\t'DASR', \n\t\t'DROP SHIP',\n\t\t'FRGT',\n\t\t'FRGTADJ',\n\t\t'LIONEL COOP'\n\t]\n\t\n\tscales = [\n\t\tr'^(HO)\\s',\n\t\tr'\\s(HO)\\s',\n\t\tr'^HOn3\\s',\n\t\tr'^\\sHOn3\\s',\n\t\tr'^N\\s',\n\t\tr'\\sN\\s',\n\t\tr'^O\\s',\n\t\tr'\\sO\\s',\n\t]\n\n\tdef match_file_import(self, file_import):\n\t\tif re.search('lock', file_import.name):\n\t\t\treturn False\n\t\tif not re.search('hhwonhand-\\d{14}.csv', file_import.name):\n\t\t\treturn False\n\t\tmagic = file_import.magic()\n\t\tif magic['mime'] != 'text/plain':\n\t\t\treturn False\n\t\tif magic['magic'] != 'ISO-8859 text, with CRLF line terminators':\n\t\t\treturn False\n\t\treturn True\n\t\t\n\tdef get_items(self, supplier_catalog):\n\t\texpected_row_len = len(self.column_names)\n\t\tcontent = supplier_catalog.file_import.content\n\t\tlines = re.split(\"\\n\", content)\n\t\treader = csv.reader(lines, delimiter=bytes(','))\n\t\tfor row in reader:\n\t\t\t\n\t\t\tif row is None or row == []:\n\t\t\t\tyield None\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif len(row) != expected_row_len:\n\t\t\t\tlogger.warning(\"Row has incorrect length: expected %i, got %i '%s'\", expected_row_len, len(row), row)\n\t\t\t\tyield None\n\t\t\t\tcontinue\n\n\t\t\titem = dict()\n\t\t\ti = 0\n\t\t\tfor column_name in self.column_names:\n\t\t\t\tfield = row[i]\n\t\t\t\tfield = field.strip()\n\t\t\t\titem[column_name] = field\n\t\t\t\ti += 1\n\t\t\titem = self.recode(item)\n\t\t\tyield item\n\t\t\n\tdef issue_date(self, file_import):\n\t\tm = re.search('(\\d{4})(\\d{2})(\\d{2})(\\d{2})(\\d{2})(\\d{2}).csv$', file_import.name)\n\t\tif m:\n\t\t\treturn datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5)), int(m.group(6)))\n\n\t\tlogger.warning(\"Failed to convert issue_date for %s\", file_import.name)\n\t\treturn file_import.effective\n\n\tdef update_fields(self, fields):\n\t\t\"\"\"Update Field\"\"\"\n\n\t\tif fields is None:\n\t\t\tlogger.warning(\"Fields is empty\")\n\t\t\treturn None\n\n\t\tdata = dict()\n\t\t\n\t\tif 'SKU' in fields:\n\t\t\tif fields['SKU'] in self.skipable:\n\t\t\t\treturn None\n\t\t\tm = re.match(r'^(...)(.*)$', fields['SKU'])\n\t\t\tif m:\n\t\t\t\tdata['manufacturer_identifier'] = m.group(1)\n\t\t\t\tdata['product_identifier'] = m.group(2)\n\n\t\tdata['stock'] = True\n\t\t\n\t\tdata['scale'] = None\n\n\t\tif 'Name' in fields:\n\t\t\tdata['name'] = fields['Name']\n\t\t\t\n\t\t\tfor scale in self.scales:\n\t\t\t\tm = re.search(scale, data['name'])\n\t\t\t\tif m:\n\t\t\t\t\tdata['scale'] = m.group(0).strip()\n\t\t\t\n\t\t\t\n\t\tif 'Retail' in fields:\n\t\t\tdata['retail'] = Decimal(fields['Retail'])\n\t\t\tif fields['SKU'] in self.discount_by_sku:\n\t\t\t\tdiscount = self.discount_by_sku[fields['SKU']]\n\t\t\t\t\n\t\t\t\t\n\t\t\t##FIXME This shouldn't be hardcoded\n\t\t\telif data['manufacturer_identifier'] == 'KAD':\n\t\t\t\tm = re.match(r'^\\#', data['name'])\n\t\t\t\tif m:\n\t\t\t\t\tdiscount = '25'\n\t\t\t\telse:\n\t\t\t\t\tdiscount = '44.3'\n\t\t\telif data['manufacturer_identifier'] in self.discount_by_manufacturer:\n\t\t\t\tdiscount = self.discount_by_manufacturer[data['manufacturer_identifier']]\n\n\t\t\telse:\n\t\t\t\tdiscount = self.discount\n\n\t\t\tratio = (Decimal('100') - Decimal(discount)) / Decimal('100')\n\t\t\tdata['cost'] = data['retail'] * ratio\n\t\treturn data\n","sub_path":"turbogears/bakedpytato/plugin/supplier_catalog_heartland_plugin.py","file_name":"supplier_catalog_heartland_plugin.py","file_ext":"py","file_size_in_byte":5512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"625779454","text":"apogee={\"Empolyee\":200, 'address':\"Kramer in\", 'phone':\"512-44-404\"}\nprint(apogee)\n\nnetwork={\"switch\":\"sw1\",\"router\":\"rt1\",\"vpnrtr\":\"vpn1\"}\nperson={\"firstname\":\"b\",\"name\":\"aaaa\",\"age\":100}\nsite={\"bldg\":{\"idf\":100,\"cs\":\"cs1\"},\"ap\":200}\nsite[\"addr\"]=\"Krmaer Ln\" # just added \n#num=float(input(\"Give a number :\"))\n\nprint(site)\nif \"router\" not in site:\n site['router']=\"9500\"\nprint(site)\n\ndel site['ap'] # Delete the ap from site dictionary \nprint(site)","sub_path":"Week6/exo32.py","file_name":"exo32.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"216933671","text":"__author__ = 'qgw'\n'''\nGevent 是一个第三方库,可以轻松通过gevent实现并发同步或异步编程,在gevent中用到的主要模式是Greenlet, \n它是以C扩展模块形式接入Python的轻量级协程。 \nGreenlet全部运行在主程序操作系统进程的内部,但它们被协作式地调度。\n'''\nimport gevent\nimport time\n\ndef foo():\n print('Running in foo')\n gevent.sleep(2)\n print('Explicit context switch to foo again')\n\ndef bar():\n print('Explicit精确的 context内容 to bar')\n gevent.sleep(1)\n print('Implicit context swith back to bar')\n\ndef func():\n print('Running in func')\n gevent.sleep(0)\n print('Running fun again')\n\nif __name__ == '__main__':\n gevent.joinall([\n gevent.spawn(foo),\n gevent.spawn(bar),\n gevent.spawn(func)\n ])","sub_path":"OLDBOY/day10/gevent携程.py","file_name":"gevent携程.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"619756168","text":"'''\nhttps://www.geeksforgeeks.org/building-heap-from-array/\n\n'''\ndef heapify(arr, n, i): \n \n largest = i; # Initialize largest as root \n l = 2 * i + 1; # left = 2*i + 1 \n r = 2 * i + 2; # right = 2*i + 2 \n \n # If left child is larger than root \n if l < n and arr[l] > arr[largest]: \n largest = l; \n \n # If right child is larger than largest so far \n if r < n and arr[r] > arr[largest]: \n largest = r; \n \n # If largest is not root \n if largest != i: \n arr[i], arr[largest] = arr[largest], arr[i]; \n \n # Recursively heapify the affected sub-tree \n heapify(arr, n, largest); \n \n# Function to build a Max-Heap from the given array \ndef buildHeap(arr, n): \n \n # Index of last non-leaf node. It is like finding parent of leaf node i.e. parent of (size-1)th node parent = (n-1) -1 //2 = (n-2) // 2\n startIdx = (n-2) // 2; \n \n # Perform reverse level order traversal \n # from last non-leaf node and heapify \n # each node \n for i in range(startIdx, -1, -1): \n heapify(arr, n, i); \n \n# A utility function to print the array \n# representation of Heap \ndef printHeap(arr, n): \n print(\"Array representation of Heap is:\"); \n \n for i in range(n): \n print(arr[i], end = \" \"); \n print(); \n \n# Driver Code \nif __name__ == '__main__': \n \n # Binary Tree Representation \n # of input array \n # 1 \n # / \\ \n # 3 5 \n # / \\ / \\ \n # 4 6 13 10 \n # / \\ / \\ \n # 9 8 15 17 \n arr = [ 1, 3, 5, 4, 6, 13, \n 10, 9, 8, 15, 17 ]; \n \n n = len(arr); \n \n buildHeap(arr, n); \n \n printHeap(arr, n); ","sub_path":"geeksforgeeks/heap/build_heap_operation.py","file_name":"build_heap_operation.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"556215322","text":"\"\"\"\nthe edge of the period\n'beijing': {\n 'north': 41.055,\n 'south': 39.445,\n 'west': 115.422,\n 'east': 117.515\n }\nTime Reference:\n1467000000: Mon 12:00:00 2016-6-27\n\"\"\"\nfrom __future__ import print_function, division\n\nfrom math import sin, cos, sqrt, atan2, radians\nimport time\nimport math\nimport sys\nimport argparse\nimport multiprocessing\n\nfilelist = ['part-'+format(n, '05d') for n in range(4000)]\n\ninput_path = '/datahouse/yurl/TalkingData/data/BJ_cleaned_data/'\noutput_path = '/datahouse/tripflow/labelData-30-1200/'\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--minute', type=int, dest='minute',\n help='(required) the time threshold, unit: minute, e.g. 15', required=True)\nparser.add_argument('--space', type=int, dest='space',\n help='(required) the space threshold, unit: meter, e.g. 800', required=True)\nparser.add_argument('--write_mode', type=int, dest='write_mode', default=1,\n help='(optional) the output mode (default 1): 1 - write the records line by line, 1 - write all the records of one uid to one line and split the record with |')\nargs = parser.parse_args()\n\nminute = args.minute\nspace = args.space\nwrite_mode = args.write_mode\n\nMAX_SPACE_INTERVAL = space\nMIN_TIME_INTERVAL = minute * 60\nSPLIT = 0.001\n\nMAX_STAY_TRIP_SIZE = 10000;\n\nSTATE_ID_COUNT = -1\n\ndef convert_to_hour(seconds):\n hour = int((seconds - 1467000000) / 3600) % (7 * 24)\n return hour\n\ndef convert_longitude(data, split):\n return int((data - 115.422) / split)\n\ndef convert_latitude(data, split):\n return int((data - 39.445) / split)\n\ndef distance(lat1, lon1, lat2, lon2):\n \"\"\"\n compute distance given two points\n \"\"\"\n # radius of the earth by km\n RADIUS_EARTH = 6371\n DEGREE_TO_RADIAN = 2 * math.pi / 360\n COS_LATITUDE = 0.77\n\n lat1 = lat1 * DEGREE_TO_RADIAN\n lon1 = lon1 * DEGREE_TO_RADIAN\n lat2 = lat2 * DEGREE_TO_RADIAN\n lon2 = lon2 * DEGREE_TO_RADIAN\n x = (lon2 - lon1) * COS_LATITUDE\n y = lat2 - lat1\n return int(RADIUS_EARTH * sqrt(x * x + y * y) * 1000)\n\n\ndef sds_algorithm(segments):\n \"\"\"\n apply the sds algorithm on each segment from all the trajectories\n \"\"\"\n\n global STATE_ID_COUNT\n result = []\n stay_num, travel_num = 0, 0\n\n for seg in segments:\n\n STATE_ID_COUNT += 1\n # the segment with less than three records can not be labeled by our algorithm\n if len(seg) < 3:\n result.append(seg)\n continue\n\n # label STAY trips in the segment\n # the algorithm below refers to the Algorithm 2 in the paper in ShareLatex\n head = 0\n for cursor in xrange(1, len(seg)):\n\n # too-long stay trip, cut here\n if ((cursor - head) > MAX_STAY_TRIP_SIZE):\n print ('Cut too-long stay trip at segment offset: %d'%(cursor));\n\n if seg[cursor-1][1] - seg[head][1] >= MIN_TIME_INTERVAL:\n for k in xrange(head, cursor):\n # only label the record not labeled as stay any more\n if len(seg[k]) == 4:\n seg[k].append(0);\n stay_num = stay_num + 1;\n\n head = cursor;\n continue;\n\n for anchor in xrange(cursor - 1, head - 1, -1):\n space_interval = distance(\n seg[cursor][2], seg[cursor][3], seg[anchor][2], seg[anchor][3])\n\n if space_interval > MAX_SPACE_INTERVAL:\n if seg[cursor-1][1] - seg[head][1] >= MIN_TIME_INTERVAL:\n for k in xrange(head, cursor):\n # only label the record not labeled as stay\n if len(seg[k]) == 4:\n seg[k].append(0)\n stay_num += 1\n\n head = anchor + 1\n break\n\n # handle the remaining records in the segment\n if seg[len(seg)-1][1] - seg[head][1] >= MIN_TIME_INTERVAL:\n for k in xrange(head, len(seg)):\n # only label the record not labeled as stay any more\n if len(seg[k]) == 4:\n seg[k].append(0)\n stay_num += 1\n\n # label TRAVEL records in the segment\n # the algorithm below refers to the Algorithm 2 in the paper in ShareLatex\n for cursor in xrange(1, len(seg) - 1):\n # for all the unlabeled records till now\n if len(seg[cursor]) == 4:\n left, right = -1, -1\n\n # find the first out-of-range record on the left of cursor\n for l in reversed(xrange(cursor)):\n if distance(seg[cursor][2], seg[cursor][3], seg[l][2], seg[l][3]) > MAX_SPACE_INTERVAL:\n left = l\n break\n if seg[cursor][1] - seg[l][1] > MIN_TIME_INTERVAL:\n break\n\n # find the first out-of-range record on the right of cursor\n for r in xrange(cursor + 1, len(seg)):\n if distance(seg[cursor][2], seg[cursor][3], seg[r][2], seg[r][3]) > MAX_SPACE_INTERVAL:\n right = r\n break\n if seg[r][1] - seg[cursor][1] > MIN_TIME_INTERVAL:\n break\n\n if right != -1 and left != -1 and seg[right][1] - seg[left][1] <= MIN_TIME_INTERVAL:\n seg[cursor].append(1)\n #seg[cursor].append(STATE_ID_COUNT)\n travel_num += 1\n\n # if len(seg[right]) == 4:\n # seg[right].append(1)\n # seg[right].append(STATE_ID_COUNT)\n # travel_num += 1\n # elif len(seg[right]) == 5 and seg[right][4] != 1:\n # seg[right][4] = 1\n # seg[right].append(STATE_ID_COUNT)\n # travel_num += 1\n #\n #\n # if len(seg[left]) == 4:\n # seg[left].append(1)\n # seg[left].append(STATE_ID_COUNT)\n # travel_num += 1\n # elif len(seg[left]) == 5 and seg[left][4] != 1:\n # seg[left][4] = 1\n # seg[left].append(STATE_ID_COUNT)\n # travel_num += 1\n\n\n\n result.append(seg)\n\n return result, stay_num, travel_num\n\n\ndef label_and_compute_sparsity(filename):\n \"\"\"\n label the file given the filename\n append 0 after the stay record, append 1 after the travel record, do nothing for other records\n \"\"\"\n start_time = time.time()\n\n filename_r = input_path + 'P2-' + filename\n filename_w_tjt = output_path + filename + \\\n '-trajectory_' + str(minute) + \"-\" + str(space)\n filename_w_sparsity = output_path + filename + \\\n '-sparsity_' + str(minute) + \"-\" + str(space)\n\n with open(filename_r) as f:\n records = f.readlines()\n\n c_uid = -1\n segments, tjt = [], []\n labeled_segments, stats = [], []\n\n # divide the records into to segments\n for record in records:\n columns = record.split(',')\n\n if len(columns) < 4:\n print('An error line in line: ' + str(record))\n continue\n\n # set record columns\n uid = columns[0]\n time_second = int(columns[1][0:10])\n latitude, longtitue = float(columns[2]), float(columns[3])\n\n # check if it is the same trajectory\n if uid == c_uid:\n tjt.append([uid, time_second, latitude, longtitue])\n else:\n # new uid\n if c_uid != -1:\n # the current uid is valid, segment the trajectory of the current uid (c_uid)\n\t\t\t\t# sort the trajectory by time\n tjt.sort(key=lambda x: x[1])\n\n # truncate the trajectory into segments at every time interval larger than Delta_T, stored in segments\n\t\t # the first index of the current segment\n l = 0\n for r in xrange(1, len(tjt)):\n time_interval = tjt[r][1] - tjt[r-1][1]\n if time_interval > MIN_TIME_INTERVAL:\n segments.append(tjt[l:r])\n l = r\n\n if l < len(tjt):\n segments.append(tjt[l:])\n\n result, stay_num, travel_num = sds_algorithm(segments)\n\n\n # label the rest of records -1, stand for unknown\n for segments in result:\n for seg in segments:\n if len(seg) == 4:\n seg.append(-1)\n\n\n # compute global and local sparsity\n global_sparsity, local_sparsity = 0, 0\n local_sparsity_num = 0\n for i in xrange(1, len(tjt)):\n time_interval = tjt[i][1] - tjt[i-1][1]\n global_sparsity += time_interval\n if time_interval < MIN_TIME_INTERVAL:\n local_sparsity += time_interval\n local_sparsity_num += 1\n global_sparsity = global_sparsity / (len(tjt) - 1) if len(tjt) > 1 else 0\n local_sparsity = local_sparsity / (local_sparsity_num * MIN_TIME_INTERVAL) if local_sparsity_num > 0 else 0\n\n global_sparsity = format(global_sparsity, '.4f')\n local_sparsity = format(local_sparsity, '.4f')\n\n # store results\n labeled_segments.append(result)\n stats.append([uid, global_sparsity, local_sparsity, stay_num, travel_num, len(tjt)])\n\n # reset\n segments, tjt = [], []\n\n\n # refresh the arrays to only store the first record of the new trajectory (uid)\n tjt.append([uid, time_second, latitude, longtitue])\n c_uid = uid\n\n # output to file\n with open(filename_w_tjt, 'w') as f:\n for segments in labeled_segments:\n for seg in segments:\n seg = [','.join([str(x) for x in record]) for record in seg]\n if write_mode == 0:\n for record in seg:\n f.write(record + '\\n')\n if write_mode == 1:\n f.write('|'.join(seg) + '\\n')\n with open(filename_w_sparsity, 'w') as f:\n for stat in stats:\n f.write(','.join([str(x) for x in stat]) + '\\n')\n\n stay_num, travel_num, all_num = sum([x[3] for x in stats]), sum([x[4] for x in stats]), sum([x[5] for x in stats])\n print('[file %s] time %f, records num %d, stay num %d (%f%%), travel num %d (%f%%)'\n %(filename, time.time() - start_time, all_num, stay_num, stay_num / all_num * 100, travel_num, travel_num / all_num * 100))\n\n\nif __name__ == \"__main__\":\n pool = multiprocessing.Pool(processes=5)\n pool.map(label_and_compute_sparsity, filelist)\n\n","sub_path":"label_and_compute_stats.py","file_name":"label_and_compute_stats.py","file_ext":"py","file_size_in_byte":11030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"255282580","text":"# -*- coding: latin-1 -*-\r\n\r\nimport sys, os\r\n\r\n\r\nsys.path.append( gBase + \"/library.zip\" )\r\n\r\nfrom jFusion import *\r\n\r\n\r\n# --------------------------------------------------------------------------------------------------------------------------\r\ndef controlCIF( cCIF ) :\r\n\r\n # 9 dígitos\r\n if len(cCIF) != 9 :\r\n return \"ha de contener nueve dígitos\"\r\n\r\n # El primero una letra\r\n cTipo = cCIF[0]\r\n cTipos = \"ABCDEFGHJPQRSUVNW\"\r\n if cTipo not in cTipos :\r\n return \"el primer carácter ha de ser uno de los siguientes : \" + cTipos\r\n\r\n # Los 8 siguientes han de se números\r\n cNum = cCIF[1:9]\r\n if not cNum.isdigit() :\r\n return \"los caracteres en posiciones 1 a 9, han de ser números\"\r\n\r\n # El último es el dígito de control\r\n # Se toman únicamente los números centrales (en python se empieza desde 0)\r\n # 1. Sumar los dígitos de la posiciones pares.\r\n nPar = int(cCIF[2])+int(cCIF[4])+int(cCIF[6])\r\n # 2. Para cada uno de los dígitos de la posiciones impares, multiplicarlo por 2 y sumar los dígitos del resultado.\r\n nImpar = 0\r\n for pos in [ 1, 3, 5, 7 ] :\r\n n = int(cCIF[pos])*2\r\n nImpar += int(n/10) + n%10\r\n # 3. Calcular la suma\r\n nTotal = nPar + nImpar\r\n # 4. Tomar sólo el dígito de las unidades de C y restárselo a 10. Esta resta nos da D.\r\n nDigito = (10 - nTotal%10)%10\r\n cDigito = cCIF[8]\r\n if cTipo in \"CKLMNPQRSVW\" :\r\n dcLetras = \"JABCDEFGHI\"\r\n cLetra = dcLetras[nDigito]\r\n if cDigito != cLetra :\r\n return \"no es correcto el dígito de control ha de ser %s\"%cLetra\r\n else :\r\n if cDigito != str(nDigito) :\r\n return \"no es correcto el dígito de control ha de ser %s\"%str(nDigito)\r\n\r\n return None\r\n# --------------------------------------------------------------------------------------------------------------------------\r\n\r\n# --------------------------------------------------------------------------------------------------------------------------\r\ndef letraNIF( cNIFsinLetra ) :\r\n nNIF = int(cNIFsinLetra)\r\n cLetras = \"TRWAGMYFPDXBNJZSQVHLCKE\"\r\n return cLetras[nNIF%23]\r\n# --------------------------------------------------------------------------------------------------------------------------\r\n\r\n\r\n# --------------------------------------------------------------------------------------------------------------------------\r\ndef controlCIF_NIF( cCIF_NIF ) :\r\n tamCIF_NIF = len(cCIF_NIF)\r\n if tamCIF_NIF < 5 :\r\n return \"Error NIF : formato erróneo.\"\r\n\r\n cPrimero = cCIF_NIF[0]\r\n if cPrimero in \"0123456789XYZ\" :\r\n if cPrimero in \"XYZ\" : # Extranjeros\r\n digito = \"0\" if cPrimero == \"X\" else ( \"1\" if cPrimero == \"Y\" else \"2\" )\r\n cCIF_NIF = digito + cCIF_NIF[1:]\r\n cLetra = \"-\"\r\n if cCIF_NIF[-1].isalpha() :\r\n cLetra = cCIF_NIF[-1]\r\n cCIF_NIF = cCIF_NIF[:-1]\r\n if not cCIF_NIF.isdigit() :\r\n return \"Error : NIF erróneo.\"\r\n cLetraDebe = letraNIF( cCIF_NIF )\r\n if cLetraDebe != cLetra :\r\n return \"Error NIF : la letra del NIF tiene que ser %s.\"%cLetraDebe\r\n\r\n return None\r\n\r\n else :\r\n error = controlCIF( cCIF_NIF )\r\n if error :\r\n error = \"Error CIF : %s.\"%error\r\n return error\r\n# --------------------------------------------------------------------------------------------------------------------------\r\n\r\n\r\n# --------------------------------------------------------------------------------------------------------------------------\r\ndef controlDCbanco( c20 ) :\r\n\r\n c20 = c20.strip()\r\n if not c20 :\r\n return None\r\n\r\n cError = None\r\n if len(c20) != 20 :\r\n cError = \"no tiene 20 dígitos\"\r\n elif not c20.isdigit() :\r\n cError = \"sólo puede contener números\"\r\n else :\r\n liNum = [ int(x) for x in c20 ]\r\n\r\n liH1 = [ 4, 8, 5, 10, 9, 7, 3, 6 ]\r\n H1 = 0\r\n for numero, peso in enumerate(liH1) :\r\n H1 += peso*liNum[numero]\r\n\r\n liI1 = [ 1, 2, 4, 8, 5, 10, 9, 7, 3, 6 ]\r\n I1 = 0\r\n for numero, peso in enumerate(liI1) :\r\n I1 += peso*liNum[numero+10]\r\n\r\n ph = H1%11\r\n if ph == 0 :\r\n dc = 0\r\n elif ph == 1 :\r\n dc = 10\r\n else :\r\n dc = 10*(11-ph)\r\n\r\n pi = I1%11\r\n if pi == 0 :\r\n dc += 0\r\n elif pi == 1 :\r\n dc += 1\r\n else :\r\n dc += 11-pi\r\n\r\n num = liNum[8]*10 + liNum[9]\r\n if num != dc :\r\n cError = \"el dígito de control ha de ser %02d (y tiene %02d)\"%( dc, num )\r\n\r\n if cError :\r\n return \"Error en los datos bancarios (20 dígitos) : %s.\" % cError\r\n\r\n return cError\r\n\r\n# --------------------------------------------------------------------------------------------------------------------------\r\ndef control() :\r\n\r\n FC = FusionMGD.clave # Forma abreviada de llamada a la función\r\n\r\n error = controlCIF_NIF( FC( \"NIF\" ) )\r\n\r\n errorDC = controlDCbanco( FC( \"20DIGITOS\" ) )\r\n\r\n fichResp = FC( \"FRESP\" )\r\n\r\n f = open( fichResp, \"wb\" )\r\n if error :\r\n f.write( error )\r\n if errorDC :\r\n if error :\r\n f.write( \"\\r\\n\" )\r\n f.write( errorDC )\r\n f.close()\r\n# --------------------------------------------------------------------------------------------------------------------------\r\n\r\n# --------------------------------------------------------------------------------------------------------------------------\r\nif __name__ == '__main__':\r\n control()\r\n\r\n","sub_path":"internas/Varios/controlClPr.py","file_name":"controlClPr.py","file_ext":"py","file_size_in_byte":5625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"94033828","text":"#-*- coding: utf-8 -*-\nt = int(input())\n\n# Runtime: O(n) where `n` is number of pancakes\nfor i in range(t):\n seq = input()\n\n if seq.count('-') == 0:\n print('Case #%d: %d' % (i + 1, 0))\n continue\n\n happy = 0\n blank = 0\n for c in seq:\n if c == '+':\n happy = min(happy, blank + 1)\n blank = min(blank + 2, happy + 1)\n else:\n happy = min(happy + 2, blank + 1)\n blank = min(blank, happy + 1)\n print('Case #%d: %d' % (i + 1, happy))\n","sub_path":"codes/CodeJamCrawler/16_0_2/Brosa/revenge_of_pancakes.py","file_name":"revenge_of_pancakes.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"476491279","text":"from flask import request, json, Response, Blueprint, g\nfrom ..models.question import Question, QuestionSchema\n\nquestion_api = Blueprint('question_api', __name__)\nquestion_schema = QuestionSchema()\n\n\n@question_api.route('/', methods=['POST'])\ndef create():\n \"\"\"\n Create Question Function\n \"\"\"\n\n req_data = request.get_json()\n data = question_schema.load(req_data)\n \n question = Question(data)\n\n \"\"\"\n Create Question Function\n \"question.process_question()\n \"\"\"\n\n question.save()\n\n return custom_response({'result': \"Success\"}, 201)\n\n\n@question_api.route('/', methods=['GET'])\ndef get_all():\n quuestions = Question.get_all_questions()\n ser_questions = question_schema.dump(quuestions, many=True)\n\n return custom_response(ser_questions, 200)\n\n\ndef custom_response(res, status_code):\n \"\"\"\n Custom Response Function\n \"\"\"\n return Response(\n mimetype=\"application/json\",\n response=json.dumps(res),\n status=status_code\n )\n","sub_path":"Project/Manage/src/views/QuestionView.py","file_name":"QuestionView.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"326314741","text":"from arc_solvers.processing.convert_to_entailment import *\nfrom arc_solvers.processing.query_formulator.formulator import Formulator\n\n\nclass ProgrammaticalQ2AReformer(Formulator):\n\n def formulate(self, sample, **kwargs):\n fitb = get_fitb_from_question(sample['question']['stem'])\n choices = sample['question']['choices']\n for idx in range(len(choices)):\n choices[idx]['new_query'] = create_hypothesis(fitb, choices[\n idx]['text'])\n return sample\n","sub_path":"arc_solvers/processing/query_formulator/prog_q2a_reformer.py","file_name":"prog_q2a_reformer.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"286126495","text":"cafe = bytes('café', encoding='utf-8')\nprint(cafe)\n\nprint(cafe[0])\n\nprint(cafe[:1])\n\ncafe_arr = bytearray(cafe)\nprint(cafe_arr)\n\nprint(cafe_arr[-1:])\n\nprint(bytes.fromhex('31 4B CE A9'))\n\nimport array\n\nnumbers = array.array('h', [-2, -1, 0, 1, 2])\noctest = bytes(numbers)\nprint(octest)\n\nimport struct\n\nfmt = '<3s3sHH'\nwith open('filter.gif', 'rb') as fp:\n img = memoryview(fp.read())\nheader = img[:10]\nprint(bytes(header))\nstruct.unpack(fmt, header)\ndel header\ndel img\n","sub_path":"app/fluent_python_package/4/4.2.py","file_name":"4.2.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"609971656","text":"\r\nfrom flask_api import FlaskAPI\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom math import sqrt\r\n\r\nfrom flask import request, jsonify, abort\r\nfrom math import sqrt\r\n\r\n#local import\r\nfrom instance.config import app_config\r\n\r\ndb = SQLAlchemy()\r\n#using wolfram algorithm to compute value\r\ndef ComputeFib(number):\r\n return int(((1+sqrt(5))**number-(1-sqrt(5))**number)/(2**number*sqrt(5)))\r\n\r\n#Compute values for sequence and add to list\r\ndef get_fib_sequence(num_arg):\r\n results = []\r\n \r\n for x in range(0, num_arg):\r\n \r\n obj = ComputeFib(x) \r\n results.append(obj)\r\n return results\r\n\r\ndef config_app(config_type):\r\n\r\n from app.models import FibonacciNumbersRequest\r\n\r\n app = FlaskAPI(__name__, instance_relative_config=True)\r\n app.config.from_object(app_config[config_type])\r\n app.config.from_pyfile('config.py')\r\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\r\n db.init_app(app)\r\n \r\n @app.route('/FibNumberReqs/', methods=['DELETE'])\r\n def parse_delete_request(id, **kwargs):\r\n if request.method == 'DELETE':\r\n cur_rec = FibonacciNumbersRequest.query.get(id)\r\n db.session.delete(cur_rec)\r\n db.session.commit()\r\n return ('', 200)\r\n \r\n @app.route('/FibNumberReqs/', methods=['POST'])\r\n def parse_post_request(number, **kwargs):\r\n if request.method == 'POST':\r\n\r\n if number is not None:\r\n if number < 0 :\r\n #error the number can't be less than 0\r\n return('Bad Request number is less than 0', 400)\r\n #the value is good\r\n else: \r\n \r\n num_sq = get_fib_sequence(number)\r\n new_req = FibonacciNumbersRequest(fib_number_input = number)\r\n \r\n new_req.number_sequence = str(num_sq)\r\n db.session.add(new_req)\r\n db.session.commit()\r\n obj = {\r\n 'id' : new_req.id,\r\n 'fib_number_input' : new_req.fib_number_input,\r\n 'date_requested' : new_req.date_requested,\r\n 'date_modified' : new_req.date_modified,\r\n 'number_sequence' : new_req.number_sequence\r\n }\r\n response = jsonify(obj)\r\n response.status_code = 201\r\n return response\r\n\r\n @app.route('/FibNumberReqs/', methods=[ 'PUT' ])\r\n def parse_put_request(id, **kwargs):\r\n if request.method == 'PUT':\r\n return('', 405)\r\n \r\n\r\n #All get logic \r\n @app.route('/FibNumberReqs')\r\n def parse_noparam_request(**kwargs):\r\n\r\n req_id = -1\r\n verb = str(request.method)\r\n if None is not request.args.get('id'):\r\n req_id = request.args.get('id')\r\n elif None is not request.data.get('id'):\r\n req_id = request.data.get('id')\r\n if -1 is not req_id:\r\n try:\r\n cur_rec = FibonacciNumbersRequest.query.get(req_id)\r\n if None is not cur_rec:\r\n obj = {\r\n 'id' : cur_rec.id,\r\n 'fib_number_input' : cur_rec.fib_number_input,\r\n 'date_requested' : cur_rec.date_requested,\r\n 'date_modified' : cur_rec.date_modified,\r\n 'number_sequence' : cur_rec.number_sequence\r\n }\r\n response = jsonify(cur_rec)\r\n response.status_code = 200\r\n return response\r\n\r\n else:\r\n #No record for this id\r\n return ('', 204)\r\n \r\n except :\r\n #error getting the record should be from a malformed id value. Doesn't get hit though because type makes mapping not match\r\n return('', 400) \r\n \r\n else:\r\n reqlist = FibonacciNumbersRequest.query.all()\r\n if 0 == len(reqlist):\r\n #No id passed in and no items found\r\n return ('', 204)\r\n else:\r\n #No id so return the list\r\n results = []\r\n for item in reqlist:\r\n obj = {\r\n 'id' : item.id,\r\n 'fib_number_input' : item.fib_number_input,\r\n 'date_requested' : item.date_requested,\r\n 'date_modified' : item.date_modified,\r\n 'number_sequence' : item.number_sequence\r\n }\r\n results.append(obj)\r\n \r\n response = jsonify(results)\r\n response.status_code = 200\r\n return response\r\n\r\n return app","sub_path":"FibonacciREST/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"119358356","text":"#forward backward porprtional to speed %\r\n#left right porportional to left right motor balance\r\n\r\n\r\n\r\n\r\n\r\n#def joystickDrive(leftRight, speed);\r\n# drivemotor left(leftRIght% * speed )\r\n# drivemotor right(( 100 - leftRIght%) * speed )\r\n\r\n\r\n\"\"\"\r\npwmb = 40\r\nBI2 = 38\r\nBI1 = 37\r\nSTBY = 35\r\nAI1 = 33\r\nAI2 = 31\r\nPWMP(A?) = 29\r\n\r\n\r\nSTBY\tStandby\tAllows the H-bridges to work when high (has a pulldown resistor so it must actively pulled high)\r\nAIN1/BIN1\tInput 1 for channels A/B\tOne of the two inputs that determines the direction.\r\nAIN2/BIN2\tInput 2 for channels A/B\tOne of the two inputs that determines the direction.\r\nPWMA/PWMB\tPWM input for channels A/B\tPWM input that controls the speed\r\n\r\n\"\"\"\r\n\r\n\r\n\"\"\"\r\n \r\nFile: skidsteer_two_pwm_test.py\r\n \r\nThis code will test Raspberry Pi GPIO PWM on four GPIO\r\npins. The code test ran with L298N H-Bridge driver module connected.\r\n \r\nWebsite:\twww.bluetin.io\r\nDate:\t\t27/11/2017\r\n\"\"\"\r\n \r\n__author__ = \"Mark Heywood\"\r\n__version__ = \"0.1.0\"\r\n__license__ = \"MIT\"\r\n \r\nfrom gpiozero import PWMOutputDevice\r\nfrom gpiozero import DigitalOutputDevice\r\nfrom time import sleep\r\n \r\n#///////////////// Define Motor Driver GPIO Pins /////////////////\r\n# Motor A, Left Side GPIO CONSTANTS\r\nPWM_DRIVE_LEFT = 21\t\t# ENA - H-Bridge enable pin\r\nFORWARD_LEFT_PIN = 26\t# IN1 - Forward Drive\r\nREVERSE_LEFT_PIN = 19\t# IN2 - Reverse Drive\r\n# Motor B, Right Side GPIO CONSTANTS\r\nPWM_DRIVE_RIGHT = 5\t\t# ENB - H-Bridge enable pin\r\nFORWARD_RIGHT_PIN = 13\t# IN1 - Forward Drive\r\nREVERSE_RIGHT_PIN = 6\t# IN2 - Reverse Drive\r\n \r\n# Initialise objects for H-Bridge GPIO PWM pins\r\n# Set initial duty cycle to 0 and frequency to 1000\r\ndriveLeft = PWMOutputDevice(PWM_DRIVE_LEFT, True, 0, 1000)\r\ndriveRight = PWMOutputDevice(PWM_DRIVE_RIGHT, True, 0, 1000)\r\n \r\n# Initialise objects for H-Bridge digital GPIO pins\r\nforwardLeft = DigitalOutputDevice(FORWARD_LEFT_PIN)\r\nreverseLeft = DigitalOutputDevice(REVERSE_LEFT_PIN)\r\nforwardRight = DigitalOutputDevice(FORWARD_RIGHT_PIN)\r\nreverseRight = DigitalOutputDevice(REVERSE_RIGHT_PIN)\r\n \r\ndef allStop():\r\n\tforwardLeft.value = False\r\n\treverseLeft.value = False\r\n\tforwardRight.value = False\r\n\treverseRight.value = False\r\n\tdriveLeft.value = 0\r\n\tdriveRight.value = 0\r\n \r\ndef forwardDrive():\r\n\tforwardLeft.value = True\r\n\treverseLeft.value = False\r\n\tforwardRight.value = True\r\n\treverseRight.value = False\r\n\tdriveLeft.value = 1.0\r\n\tdriveRight.value = 1.0\r\n \r\ndef reverseDrive():\r\n\tforwardLeft.value = False\r\n\treverseLeft.value = True\r\n\tforwardRight.value = False\r\n\treverseRight.value = True\r\n\tdriveLeft.value = 1.0\r\n\tdriveRight.value = 1.0\r\n \r\ndef spinLeft():\r\n\tforwardLeft.value = False\r\n\treverseLeft.value = True\r\n\tforwardRight.value = True\r\n\treverseRight.value = False\r\n\tdriveLeft.value = 1.0\r\n\tdriveRight.value = 1.0\r\n \r\ndef SpinRight():\r\n\tforwardLeft.value = True\r\n\treverseLeft.value = False\r\n\tforwardRight.value = False\r\n\treverseRight.value = True\r\n\tdriveLeft.value = 1.0\r\n\tdriveRight.value = 1.0\r\n \r\ndef forwardTurnLeft():\r\n\tforwardLeft.value = True\r\n\treverseLeft.value = False\r\n\tforwardRight.value = True\r\n\treverseRight.value = False\r\n\tdriveLeft.value = 0.2\r\n\tdriveRight.value = 0.8\r\n \r\ndef forwardTurnRight():\r\n\tforwardLeft.value = True\r\n\treverseLeft.value = False\r\n\tforwardRight.value = True\r\n\treverseRight.value = False\r\n\tdriveLeft.value = 0.8\r\n\tdriveRight.value = 0.2\r\n \r\ndef reverseTurnLeft():\r\n\tforwardLeft.value = False\r\n\treverseLeft.value = True\r\n\tforwardRight.value = False\r\n\treverseRight.value = True\r\n\tdriveLeft.value = 0.2\r\n\tdriveRight.value = 0.8\r\n \r\ndef reverseTurnRight():\r\n\tforwardLeft.value = False\r\n\treverseLeft.value = True\r\n\tforwardRight.value = False\r\n\treverseRight.value = True\r\n\tdriveLeft.value = 0.8\r\n\tdriveRight.value = 0.2\r\n \r\ndef main():\r\n\tallStop()\r\n\tforwardDrive()\r\n\tsleep(5)\r\n\treverseDrive()\r\n\tsleep(5)\r\n\tspinLeft()\r\n\tsleep(5)\r\n\tSpinRight()\r\n\tsleep(5)\r\n\tforwardTurnLeft()\r\n\tsleep(5)\r\n\tforwardTurnRight()\r\n\tsleep(5)\r\n\treverseTurnLeft()\r\n\tsleep(5)\r\n\treverseTurnRight()\r\n\tsleep(5)\r\n\tallStop()\r\n \r\n \r\nif __name__ == \"__main__\":\r\n \"\"\" This is executed when run from the command line \"\"\"\r\n main()\r\n","sub_path":"drive.py","file_name":"drive.py","file_ext":"py","file_size_in_byte":4071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"318382625","text":"def san():\n L = [1]\n while True:\n yield L\n L = [1] + [L[i-1] + L[i] for i in range(len(L)) if i > 0] + [1]#len(L)代表的是L的元素数量\n\nn = 0\nfor t in san():\n print(t)\n n = n + 1\n if n == 10:\n break\n","sub_path":"杨辉三角(瞎几把打).py","file_name":"杨辉三角(瞎几把打).py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"176284072","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport cv2\n\nclass RGBHistogram:\n def __init__(self, bins):\n ##直方图初始化\n self.bins = bins\n\n def describe(self, image):\n ##计算RGB颜色空间的特征向量,并归一化\n hist = cv2.calcHist([image], [0, 1, 2],\n None, self.bins, [0, 256, 0, 256, 0, 256])\n hist = cv2.normalize(hist)\n\n #返回颜色直方图\n return hist.flatten()\n\n##利用掩模计算图像不同区域的特征直方图\nclass ColorDescriptor:\n def __init__(self, bins):\n #直方图初始化\n self.bins = bins\n \n def describe(self, image):\n ##将图片以HSV颜色空间的格式读入\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n features = []\n \n ##将图像分为四个区域\n (h, w) = image.shape[:2]\n (cX, cY) = (int(w * 0.5), int(h * 0.5))\n segments = [(0, cX, 0, cY), (cX, w, 0, cY), (cX, w, cY, h),\n (0, cX, cY, h)]\n \n ##在中心构建一个椭圆形的掩模\n (axesX, axesY) = (int(w * 0.75) / 2, int(h * 0.75) / 2)\n ellipMask = np.zeros(image.shape[:2], dtype = \"uint8\")\n cv2.ellipse(ellipMask, (cX, cY), (axesX, axesY), 0, 0, 360, 255, -1)\n \n #去掉每个区域与掩模重合的部分,然后计算该区域颜色直方图\n for (startX, endX, startY, endY) in segments:\n cornerMask = np.zeros(image.shape[:2], dtype = \"uint8\")\n cv2.rectangle(cornerMask, (startX, startY), (endX, endY), 255, -1)\n cornerMask = cv2.subtract(cornerMask, ellipMask)\n hist = self.histogram(image, cornerMask)\n features.extend(hist)\n \n ##对椭圆掩模区域计算其颜色直方图\n hist = self.histogram(image, ellipMask)\n features.extend(hist)\n \n #返回图像特征向量\n return features\n","sub_path":"BookSearch/descriptor.py","file_name":"descriptor.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"379647183","text":"#This file includes functions for returning an object based off\n#data from an athletes profile on Power of 10\nfrom bs4 import BeautifulSoup\nimport urllib\n\ndef scrape_athlete_po10(po10_athlete_id):\n output = {}\n #Required keys\n output[\"po10_athlete_id\"] = None\n output[\"name\"] = None\n output[\"po10_athlete_id\"] = po10_athlete_id\n \n # Initialize URL and BeautifulSoup\n url = \"http://www.powerof10.info/athletes/profile.aspx?athleteid={}\".format(po10_athlete_id)\n r = urllib.urlopen(url).read()\n soup = BeautifulSoup(r, \"html.parser\")\n\n # Check profile exists\n errors = soup.find_all(\"span\", {\"id\":\"cphBody_lblErrorMessage\"})\n if len(errors[0].text) > 0:\n return False\n\n #Get athlete name\n ath_name = soup.find_all(\"tr\", class_=\"athleteprofilesubheader\")\n ath_name = ath_name[0].find_all(\"td\")\n ath_name = ath_name[0].find_all(\"h2\")\n output[\"name\"] = ath_name[0].get_text().lstrip()\n\n #Get athlete information \n ath_info = soup.find_all(\"div\", id=\"cphBody_pnlAthleteDetails\")\n ath_info = ath_info[0].find_all(\"table\", cellpadding=\"2\")\n for element_i in ath_info:\n ath_info_block = element_i.find_all(\"tr\")\n for element_j in ath_info_block:\n split = element_j.get_text().split(\":\", 1)\n output[split[0]] = split[1]\n\n races = soup.find(\"div\", id=\"cphBody_pnlPerformances\")\n races = races.find(\"table\", attrs={\"class\": \"alternatingrowspanel\"})\n output[\"po10_races\"] = []\n for section in races:\n if section.get(\"style\") == \"background-color:WhiteSmoke;\" or section.get(\"style\") == \"background-color:Gainsboro;\":\n i_race = {}\n link = section.find(\"a\")[\"href\"].split(\"&\")\n for idx, slot in enumerate(link):\n if idx == 0:\n i_race[\"meeting_id\"] = slot.split(\"=\")[1]\n elif idx == 1:\n i_race[\"event\"] = slot.split(\"=\")[1]\n elif idx == 2:\n i_race[\"venue\"] = slot.split(\"=\")[1]\n elif idx == 3:\n i_race[\"date\"] = slot.split(\"=\")[1]\n #link = link.a[\"href\"]\n\n output[\"po10_races\"].append(i_race)\n\n return output\n","sub_path":"scrapers/po10/athlete.py","file_name":"athlete.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"54193695","text":"# This program takes a list and swaps the first and last value from the list..\r\n\r\n# Solution :\r\n\r\n# Take number of elements in the list ...\r\nn=int(input(\"Enter number of elements : \"))\r\na=[]\r\n# Accept values of list using a for loop...\r\nfor i in range(0,n):\r\n a.append(int(input(\"Entr element : \")))\r\n\r\n# Using temporary variable swap first and last value from list.\r\ntemp=a[0]\r\na[0]=a[-1]\r\na[-1]=temp\r\n\r\n# Print newly formed list...\r\n\r\nprint(a)","sub_path":"list2_SwapFirstLast.py","file_name":"list2_SwapFirstLast.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"425902700","text":"#!/usr/bin/python\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# Data generator\ndef get_sample():\n # y = 0.2x + 0.3\n x_gen = np.random.random()\n y_gen = 0.2 * x_gen + 0.3\n return np.reshape(x_gen, [1, 1]), np.reshape(y_gen, [1, 1])\n\n# Parameters\nlearning_rate = 0.01\nnum_samples = 10000\n\n# Input and output of the network\nx = tf.placeholder(tf.float32, [1, 1])\ny = tf.placeholder(tf.float32, [1, 1])\n\n\n# Network definition\nweight = tf.Variable(tf.random_normal([1, 1]))\nbias = tf.Variable(tf.random_normal([1]))\ny_pred = tf.add(tf.matmul(x, weight), bias)\n\n# Optimizer and cost function\ncost = tf.squared_difference(y_pred, y)\noptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n# Initializing the variables\ninit = tf.global_variables_initializer()\n\n# Launch the graph\nwith tf.Session() as sess:\n sess.run(init)\n sample = 0\n costs = list()\n while sample < num_samples:\n sample += 1\n train_x, train_y = get_sample()\n _, c = sess.run([optimizer, cost], feed_dict={x: train_x, y: train_y})\n costs.append(c.tolist()[0])\n if sample % 1000 == 0:\n print(\"Cost -\", c)\n print(\"\\nFinal weight and bias (m and c)\")\n print(\"W -\", weight.eval(), \", B -\", bias.eval())\n\nplt.plot(costs)\nplt.ylabel('Cost')\nplt.xlabel('Samples')\nplt.savefig(\"cost-sample.png\")\n","sub_path":"neural-nuts/1_linear_regression/1_linear_regression.py","file_name":"1_linear_regression.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"597601204","text":"import json, csv\nXHR_file = './h5.waimai.meituan.com.har'\ncsv_file = './h5.waimai.meituan.com.csv'\ntable = []\nwith open(XHR_file,encoding='utf-8') as har: # https://h5.waimai.meituan.com/waimai/mindex/menu?utm_source=5913&mtShopId=990717024543786\n xhr_entries = json.loads(har.read())[\"log\"][\"entries\"]\n xhr_entries = filter(lambda xhr : 'menuproducts' in xhr[\"request\"]['url'], xhr_entries)\n res_entries = map(lambda xhr : xhr[\"response\"][\"content\"]['text'], xhr_entries)\n for data in res_entries:\n for i in json.loads(data)[\"data\"][\"spuList\"]:\n table.append(tuple([i[\"spuName\"], i[\"spuId\"], i[\"tag\"], i[\"saleVolume\"], i[\"originPrice\"], i[\"currentPrice\"], i[\"sellStatus\"], i[\"skuList\"][0][\"skuId\"], i[\"skuList\"][0][\"spec\"], i[\"skuList\"][0][\"soldStatus\"], i[\"skuList\"][0][\"realStock\"], i[\"skuList\"][0][\"skuPromotionInfo\"]]))\n\nwith open(csv_file, \"w\", encoding='utf-8', newline='') as csvFile:\n writer = csv.writer(csvFile)\n fileHeader = (\"spuName\", \"spuId\", \"tag\", \"saleVolume\", \"originPrice\", \"currentPrice\", \"sellStatus\", \"skuId\", \"spec\", \"soldStatus\", \"realStock\", \"skuPromotionInfo\")\n writer.writerow(fileHeader)\n for i in table:\n try:\n writer.writerow(i)\n except UnicodeEncodeError as e:\n print(e)","sub_path":"py/request-demo/美团/get_menuproducts_from_meituan_xhr.py","file_name":"get_menuproducts_from_meituan_xhr.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"564311602","text":"from django.utils import timezone\nimport datetime\nfrom django.db import models\nfrom django.db.models import permalink\nfrom django.core.mail import send_mass_mail\nfrom django.template import loader, Context\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\n# from .forms import HOP_W_PM, COOL_KIDS_ALL, POM_WEDDING, SILK_W_S_AM,\n# MAUI_W_S_CHOICE, SING_S_AM, SUN_W_AM, ALEX, CHEESE_W_AM_PM, JULIA, ARI\n\nHOP_W_PM = [\n 'rachel insoft', 'phil masui', 'philip masui', 'mary awadallah',\n 'mary shehata', 'michael shehata', 'mike shehata', 'christina amendola',\n 'chrissy amendola', 'vince gaviria', 'vincent gaviria',\n 'amanda delshad', 'amanda mintzer', 'danny mintzer',\n 'celeste conrad', 'celeste holmes', 'jay holmes', 'ashley budasoff',\n 'larry budasoff', 'lawrence budasoff', 'adin insoft',\n 'leah kandel', 'jacob newman', 'kendrice newman',\n 'kendrice james', 'clay thibodeaux', 'tari tan', 'taralyn tan'\n 'ian mclachlan', 'milner', 'elliott milner',\n 'e. s. milner', 'e s milner', 'saul glasman', 'nicole neubarth',\n 'alan emanuel', 'kaori graybeal', 'matthias minderer',\n 'christina welsh', 'dan millman', 'daniel millman', 'rebecca yang',\n 'alex wiltschko', 'katherine gorman', 'ryan heisler', 'etta king',\n 'etta heisler']\n\nCOOL_KIDS_ALL = ['rebecca caine', 'john light', 'ben caine', 'anna caine']\n\nPOM_WEDDING = ['raphael koster', 'avi ruderman', 'priya ruderman',\n'francis song', 'sam ritter', 'samuel ritter', 'cotie long', 'kim stachenfeld',\n'kimberly stachenfeld', 'neil rabinowitz', 'jo rabinowitz', 'sonia rego',\n'matt kusner', 'matthew kusner']\n\nSILK_W_S_AM = ['claire caine', 'dan caine', 'daniel caine', 'jerome socolovsky',\n'petra glimaker', 'emanuel socolovsky', 'shoshi socolovsky', 'shosh socolovsky',\n'eviatar socolovsky', 'yaara socolovsky', 'caroline ertz', 'greg ertz',\n'maria socolovsky']\n\nMAUI_W_S_CHOICE = ['mendel socolovsky', 'nils socolovsky', 'eviatar socolovsky',\n'ronli socolovsky', 'ron li socolovsky', 'tomer socolovsky']\n\nSING_S_AM = ['janet tanzi', 'dena glasgow', 'jason glasgow', 'heather zacker',\n'david harlow', 'sheryl marcus', 'alan marcus', 'marcia leifer', 'alan leifer',\n'wes gardenswartz', 'wesley gardenswartz', 'shira gardenswartz', 'merle hass',\n'sylvain korzennik', 'sue bergman', 'barry bergman', 'susan bergman',\n'vicki isman', 'marshall isman', 'michael kane', 'sue kane', 'bob wake',\n'marcia wake', 'beth davis', 'maerton davis']\n\nSUN_W_AM = ['rob insoft', 'robert insoft', 'andie insoft', 'tova morcos',\n'samir morcos', 'jared kliger', 'philip freed', 'linda freed',\n'linda rich freed', 'linda rich', 'chris harvey', 'christopher harvey',\n'lauren orefice', 'shalva greenbaum']\n\nALEX = ['alex trott', 'alexander trott']\n\nCHEESE_W_AM_PM = ['dahlia greenbaum', 'daniel greenbaum',]\n\nJULIA = ['julia caine']\n\nARI = ['ari morcos']\n\nSHABBAT_ATTENDING_CHOICES = (\n ('yes', 'Don\\'t be meshuga, of course I\\'ll be there!'),\n ('no', 'Az och un vai! I can\\'t make it!')\n)\n\nWELCOME_ATTENDING_CHOICES = (\n ('yes', 'Definitely!'),\n ('no', 'I won\\'t be able to make it')\n)\n\nWEDDING_ATTENDING_CHOICES = (\n ('yes', 'Joyfully accept'),\n ('no', 'Regretfully decline'),\n)\n\nTUES_AM_ATTENDING_CHOICES = (\n ('yes', 'I love brunch! Count me in!'),\n ('no', 'I won\\'t be able to make it.'),\n)\n\nTUES_PM_ATTENDING_CHOICES = (\n ('yes', 'Fo\\' sho\\''),\n ('no', 'I\\'ve got other places to be')\n)\n\nWELCOME_CHOICES = (\n ('None', 'None'),\n ('Veggie', 'Vegetarian'),\n ('Vegan', 'Vegan'),\n ('GF', 'Gluten-free'),\n ('Kosh', 'No meat with milk'),\n ('Other', 'Other (please elaborate in comments section)'),\n)\n\nWEDDING_CHOICES = (\n ('malfatti', 'Ricotta and spinach malfatti with sage butter and parmesan crisps (vegetarian)',),\n ('curry', 'Red lentil coconut curry, grilled sweetcorn and courgette, and crisp rice balls (vegan)'),\n)\n\n\nclass RSVPFirstModel(models.Model):\n your_name = models.CharField(default=None, max_length=100)\n\n def __unicode__(self):\n return u\"%s\" % (self.your_name)\n\nclass GuestManager(models.Manager):\n def in_db(self):\n nm = self.filter(first_last='your_name').exists()\n return nm\n\n\nclass Guest(models.Model):\n \"\"\"\n A single guest\n \"\"\"\n app_label = 'weddingsite'\n first_last = models.CharField(verbose_name='Your name:', blank=True,\n default='%s', max_length=128)\n shabbat_dinner = models.NullBooleanField(\n choices=SHABBAT_ATTENDING_CHOICES,\n verbose_name='\\nWill you be able to attend Shabbat dinner on Friday, July 20th?\\n\\n',\n default='yes', blank=True)\n welcome_dinner = models.NullBooleanField(choices=WELCOME_ATTENDING_CHOICES, verbose_name=\n '\\nWill you be able to attend the welcome dinner on Sunday, July 22nd?\\n\\n',\n default='yes', blank=True)\n welcome_dietary_restrictions = models.CharField(verbose_name=\n '\\nDo you have any dietary restrictions?\\n\\n', max_length=6,\n choices=WELCOME_CHOICES, default='None', blank=True)\n wedding = models.NullBooleanField(choices = WEDDING_ATTENDING_CHOICES, verbose_name=\n '\\nWill you be able to attend the wedding on Monday, July 23rd?\\n\\n',\n default='yes', blank=True)\n wedding_meal = models.CharField(verbose_name=\n '\\nAt the wedding, I would like to eat:\\n\\n', max_length=2,\n choices=WEDDING_CHOICES, default='malfatti', blank=True)\n tues_am = models.NullBooleanField(choices = TUES_AM_ATTENDING_CHOICES,\n verbose_name='\\nWill you be able to come to brunch on Tuesday, July 24th?\\n\\n',\n default='yes', blank=True)\n tues_pm = models.NullBooleanField(choices = TUES_PM_ATTENDING_CHOICES,\n verbose_name='\\nWill you be able to come to the gathering on Tuesday, July 24th?\\n\\n',\n default='yes', blank=True, )\n song_request = models.TextField(\n verbose_name='I would dance if I heard this song:', default = 'YMCA by The Village People',\n blank=True, max_length=2048)\n comments = models.TextField(\n verbose_name='Comments for the couple!', default='I\\'m so excited to come!',\n blank=True, max_length=2048, )\n\n @property\n def name(self):\n return u'{}'.format(self.first_last)\n\n def __unicode__(self):\n return 'Guest: {}'.format(self.first_last)\n\n # objects = GuestManager()\n\n\n\n# class Party(models.Model):\n# \"\"\"\n# A party consists of one or more guests.\n# \"\"\"\n# name = models.TextField()\n# category = models.CharField(max_length=20, null=True, blank=True)\n# shabbat_dinner = models.NullBooleanField(default=None)\n# welcome_dinner = models.BooleanField(default=False)\n# wedding = models.NullBooleanField(default=None)\n# tues_am = models.NullBooleanField(default=None)\n# tues_pm = models.NullBooleanField(default=None)\n# comments = models.TextField(null=True, blank=True)\n#\n# def __unicode__(self):\n# return 'Party: {}'.format(self.name)\n#\n# @classmethod\n# def in_default_order(cls):\n# return cls.objects.order_by('category', 'name')\n#\n# @property\n# def ordered_guests(self):\n# return self.guest_set.order_by('is_child', 'pk')\n#\n# @property\n# def any_guests_attending(self):\n# return any(self.guest_set.values_list('is_attending', flat=True))\n","sub_path":"weddingsite/models_old.py","file_name":"models_old.py","file_ext":"py","file_size_in_byte":7213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"177926785","text":"class Pokemon:\r\n # A pokemon has a name, level, type,\r\n # maximum health points which are determined by its level,\r\n # current health points which are equal to the maximum at the beginning,\r\n # a knocked-out state which is False at the beginning,\r\n # experience points which are determined by its level, and\r\n # an evolution family list\r\n def __init__(self, name, level, type, fam):\r\n self.name = name\r\n self.level = level\r\n self.type = type\r\n self.hp = level * 3\r\n self.max_hp = level * 3\r\n self.knocked_out = False\r\n self.exp = (level-1) * 10**3\r\n self.fam = fam\r\n\r\n\r\n def __repr__(self):\r\n # When a pokemon is printed, its info and current state will be returned\r\n return '{name}! ({type} type, Lv.: {level}, HP: {hp}, Exp.Points: {exp})'.format(name=self.name, type=self.type, level=self.level, hp=self.hp, exp=self.exp)\r\n #'To Next Lv. {expleft}'.format())\r\n #isknockedout \r\n\r\n\r\n def lose_health(self, damage):\r\n # Pokemon loses health points\r\n self.hp -= damage\r\n # if pokemon's damage is greater or equal to its health points, then the pokemon faints\r\n # pokemon's health points are set to zero\r\n if self.hp <= 0:\r\n self.hp = 0\r\n self.knocked_out = True\r\n print('{} fainted!'.format(self.name))\r\n print('--------------------------------')\r\n else: \r\n print('{name} lost {amount} health point(s).'.format(name=self.name, amount=damage))\r\n print('--------------------------------')\r\n\r\n\r\n def gain_health(self, hp_gained):\r\n # if pokemon is fainted, it should be revived\r\n if self.knocked_out == True:\r\n print('{name} is knocked out. It should be revived first.\\n'.format(name=self.name)) \r\n # The sum of pokemon's health points and the gained amount cannot surpass its maximum health points\r\n elif (hp_gained + self.hp) > self.max_hp:\r\n print('{name}\\'s health points are now {maxhp}.'.format(name=self.name, maxhp=self.max_hp)) \r\n # The pokemon's health points is increased by the gained amount of hp\r\n else:\r\n self.hp += hp_gained\r\n print('{pok}\\'s health points are increased by {gain}.'.format(pok=self.name, gain=round(hp_gained)))\r\n\r\n\r\n def revive_pokemon(self):\r\n # Changes the pokemon's knocked-out status from True to False if a status healer is used\r\n if self.knocked_out == True:\r\n self.knocked_out = False\r\n print('{} recovered from fainting!'.format(self.name))\r\n print('--------------------------------')\r\n else: \r\n print('{name} is not fainted! (HP = {hp})'.format(name=self.name, hp=self.hp))\r\n print('--------------------------------') \r\n\r\n \r\n def attack(self, other_pokemon):\r\n # Checks if the pokemon is fainted\r\n if self.knocked_out == True:\r\n print(str(self.name) + ' is fainted! It cannot attack.')\r\n print('--------------------------------')\r\n elif other_pokemon.knocked_out == True:\r\n print('{pok} cannot attack a fainted pokemon!'.format(pok=self.name)) \r\n print('--------------------------------')\r\n # Depend on the pokemon's type, if it's advantageous over the opponent's type, \r\n # the damage to the opponent is twice the level of the pokemon that attacked\r\n elif (self.type == 'fire' and other_pokemon.type == 'grass') or (self.type == 'water' and other_pokemon.type == 'fire') or (self.type == 'grass' and other_pokemon.type == 'water'):\r\n print('{pok} attacks {otherpok}!'.format(pok=self.name, otherpok=other_pokemon.name))\r\n damage = 2 * self.level\r\n print('Very effective!')\r\n print('--------------------------------')\r\n other_pokemon.lose_health(damage)\r\n self.exp_gain(other_pokemon)\r\n # Depend on the pokemon's type, if it's disadvantageous over the opponent's type, \r\n # the damage to the opponent is half the level of the pokemon that attacked\r\n elif (self.type == 'fire' and other_pokemon.type == 'fire') or (self.type == 'grass' and other_pokemon.type == 'grass') or (self.type == 'water' and other_pokemon.type == 'water') or (self.type == 'fire' and other_pokemon.type == 'water') or (self.type == 'water' and other_pokemon.type == 'grass') or (self.type == 'grass' and other_pokemon.type == 'fire'):\r\n print('{pok} attacks {otherpok}!'.format(pok=self.name, otherpok=other_pokemon.name))\r\n damage = 0.5 * self.level\r\n print('Not very effective!')\r\n print('--------------------------------')\r\n other_pokemon.lose_health(round(damage))\r\n self.exp_gain(other_pokemon) \r\n \r\n\r\n def exp_gain(self, other_pokemon):\r\n # Experience points required to level up, are determined by pokemon's level\r\n levelup_exp = self.level * 10**3\r\n # if the opponent has fainted, then the winning pokemon gains health \r\n # and experience points based on the opponent's level \r\n if other_pokemon.knocked_out == True:\r\n self.exp += 100 * other_pokemon.level\r\n # Pokemon levels up when it reaches the required experience points,\r\n # maximum health is increased, and\r\n if self.exp >= levelup_exp:\r\n self.level += 1\r\n self.max_hp += 50 \r\n print(self.name + ' grew to Lv. ' + str(self.level) + '!')\r\n self.gain_health(round(1.5 * other_pokemon.level))\r\n self.evolution() \r\n print(str(self.name) + ': Exp. Points = ' + str(self.exp) + ', HP = ' + str(self.hp) + ', Max. HP = ' + str(self.max_hp)) \r\n else:\r\n print(self.name + ' won!')\r\n print(self.name + ' gained ' + str(self.exp) + ' Exp.Points!')\r\n print('--------------------------------')\r\n\r\n\r\n def evolution(self):\r\n # Pokemon evolves into the second pokemon in family list after it reaches a certain level\r\n if self.level >= 6:\r\n self.name = self.fam[1]\r\n print(self.fam[0] + ' evolves into ' + self.name + '!')\r\n # Pokemon evolves into the third pokemon in family list when it reaches level 20\r\n elif self.level >= 20:\r\n self.name = self.fam[2]\r\n print(self.fam[1] + ' evolves into ' + self.name + '!')\r\n\r\n\r\n# Subclasses of class Pokemon are defined with their names, type and evolution family\r\nclass Charmander(Pokemon):\r\n def __init__(self, level):\r\n super().__init__('Charmander', level, 'fire', ['Charmander', 'Charmeleon', 'Charizard']) \r\n\r\n\r\nclass Bulbasaur(Pokemon):\r\n def __init__(self, level):\r\n super().__init__('Bulbasaur', level, 'grass', ['Bulbasaur', 'Ivysaur', 'Venusaur'])\r\n\r\n\r\nclass Squirtle(Pokemon):\r\n def __init__(self, level):\r\n super().__init__('Squirtle', level, 'water', ['Squirtle', 'Wartortle', 'Blastoise']) \r\n\r\n\r\nclass Trainer:\r\n # A Trainer has a name, a list of pokemons, \r\n # a dictionary of different potions with each respective health points,\r\n # a list with status healers,\r\n # and an active pokemon which is the first pokemon of the list (represented with 0)\r\n def __init__(self, name, pokemons):\r\n self.name = name\r\n self.pokemons = pokemons\r\n self.potions = {'Potion': 20, 'Super Potion': 50, 'Hyper Potion': 200}\r\n self.status_healers = ['Revive', 'Max Revive']\r\n self.active_pok = 0\r\n\r\n \r\n def potion_use(self, potion):\r\n while True:\r\n # The user selects a pokemon to use the potion\r\n print('{} select a Pokemon to heal:'.format(self.name))\r\n pokemon_number = input(('Type 1 for ' + str(self.pokemons[0]) + '\\n'\r\n 'Type 2 for ' + str(self.pokemons[1]) + '\\n'\r\n 'Type 3 for ' + str(self.pokemons[2]) + '\\n'))\r\n\r\n if pokemon_number == '1':\r\n chosen_pokemon = self.pokemons[0] \r\n elif pokemon_number == '2':\r\n chosen_pokemon = self.pokemons[1] \r\n elif pokemon_number == '3':\r\n chosen_pokemon = self.pokemons[2]\r\n\r\n # Checks if there are potions in the dictionary\r\n if bool(self.potions) == False:\r\n print(self.name + ', you don\\'t have any potions\\n')\r\n print('--------------------------------')\r\n break\r\n # Checks if there is a specific potion in the dictionary\r\n elif potion not in self.potions:\r\n print(self.name + ', you don\\'t have ' + potion + ' at your disposal\\n')\r\n print('--------------------------------') \r\n continue\r\n # Uses the potion to restore the pokemon's health points,\r\n # removes the potion from the dictionary,\r\n # and prints which potions are left\r\n else: \r\n print('{pokemon}\\'s HP was restored by {num} point(s).'.format(pokemon=chosen_pokemon, num=self.potions[potion]))\r\n chosen_pokemon.gain_health(self.potions[potion])\r\n self.potions.pop(potion)\r\n print('Potions left: ' + str(self.potions))\r\n print('--------------------------------')\r\n break\r\n\r\n\r\n def healer_use(self, healer):\r\n while True:\r\n # The user selects a pokemon to use the healer\r\n print('{} select a Pokemon to recover:'.format(self.name))\r\n pokemon_number = input(('Type 1 for ' + str(self.pokemons[0]) + '\\n'\r\n 'Type 2 for ' + str(self.pokemons[1]) + '\\n'\r\n 'Type 3 for ' + str(self.pokemons[2]) + '\\n'))\r\n\r\n if pokemon_number == '1':\r\n chosen_pokemon = self.pokemons[0] \r\n elif pokemon_number == '2':\r\n chosen_pokemon = self.pokemons[1] \r\n elif pokemon_number == '3':\r\n chosen_pokemon = self.pokemons[2]\r\n\r\n # Checks if there are any status healers left in the list\r\n if bool(self.status_healers) == False:\r\n print(self.name + ', you don\\'t have any status healers left!')\r\n break\r\n # Checks if there is a specific healer in the list\r\n elif healer not in self.status_healers:\r\n print(self.name + ', you don\\'t have ' + healer + ' at your disposal')\r\n continue\r\n # Revives pokemon and removes the healer from the list if pokemon is fainted\r\n elif chosen_pokemon.knocked_out == True: \r\n print('{name} uses {healer}!'.format(name=self.name, healer=healer))\r\n print('--------------------------------')\r\n chosen_pokemon.revive_pokemon()\r\n if healer == self.status_healers[0]:\r\n chosen_pokemon.gain_health(chosen_pokemon.max_hp * 0.5)\r\n elif healer == self.status_healers[1]:\r\n chosen_pokemon.gain_health(chosen_pokemon.max_hp)\r\n self.status_healers.remove(healer)\r\n print('Remaining status healers: ' + str(self.status_healers))\r\n print('--------------------------------')\r\n break\r\n # If pokemon is not fainted, the pokemon cannot be revived \r\n else:\r\n chosen_pokemon.revive_pokemon()\r\n continue \r\n\r\n\r\n def attack_other_trainer(self, other_trainer):\r\n # The current trainer's active pokemon attacks the other trainer's active pokemon\r\n my_pokemon = self.pokemons[self.active_pok]\r\n other_pokemon = other_trainer.pokemons[other_trainer.active_pok] \r\n my_pokemon.attack(other_pokemon)\r\n \r\n\r\n def switch_pokemon(self):\r\n while True:\r\n # Switches the current active pokemon to another one that is in the pokemon list\r\n # Asks the user for input\r\n print('{} select Pokemon:'.format(self.name))\r\n pokemon_number = input(('Type 1 for ' + str(self.pokemons[0]) + '\\n'\r\n 'Type 2 for ' + str(self.pokemons[1]) + '\\n'\r\n 'Type 3 for ' + str(self.pokemons[2]) + '\\n'))\r\n\r\n if pokemon_number == '1':\r\n chosen_pokemon = self.pokemons[0] \r\n elif pokemon_number == '2':\r\n chosen_pokemon = self.pokemons[1] \r\n elif pokemon_number == '3':\r\n chosen_pokemon = self.pokemons[2]\r\n\r\n # Checks if the chosen pokemon is knocked out\r\n if chosen_pokemon.knocked_out == True:\r\n print(str(self.name) + ', the pokemon you want is fainted.\\nSelect another pokemon.')\r\n print('--------------------------------')\r\n continue\r\n # Checks if the pokemon selected is already the active one \r\n elif chosen_pokemon == self.pokemons[self.active_pok]:\r\n print(str(self.name) + ', your active pokemon is already ' + str(chosen_pokemon) + '\\nSelect another pokemon.')\r\n print('--------------------------------')\r\n continue\r\n # The selected pokemon becomes the active one\r\n elif chosen_pokemon != self.pokemons[self.active_pok]:\r\n print(str(self.name) + ' withdraw ' + str(self.pokemons[self.active_pok]))\r\n print('Go! {pok}'.format(pok=chosen_pokemon))\r\n print('--------------------------------')\r\n # Find the index of the selected pokemon in order to change it with the current active pokemon\r\n chosen_pokemon_index = self.pokemons.index(chosen_pokemon)\r\n self.pokemons[self.active_pok], self.pokemons[chosen_pokemon_index] = self.pokemons[chosen_pokemon_index], self.pokemons[self.active_pok]\r\n break\r\n\r\n\r\n def battle(self, other_trainer):\r\n # Start a battle\r\n my_pokemon = self.pokemons[self.active_pok]\r\n other_pokemon = other_trainer.pokemons[other_trainer.active_pok]\r\n print(str(other_trainer.name) + ' is challenged by ' + str(self.name) + '!')\r\n print('--------------------------------')\r\n print('{name} sent out {pok}'.format(name=self.name, pok=my_pokemon))\r\n print('--------------------------------')\r\n print('{othername} sent out {otherpok}'.format(othername=other_trainer.name, otherpok=other_pokemon))\r\n print('--------------------------------')\r\n while True:\r\n # Ask the user to select an action\r\n print('{} select action:'.format(self.name))\r\n selection = input(('Type 1 to attack \\n' \r\n 'Type 2 to switch Pokemon \\n' \r\n 'Type 3 to use a Potion \\n' \r\n 'Type 4 to use a Status Healer \\n'\r\n 'Type anything else to leave the battle\\n'))\r\n # Attack other trainer\r\n if selection == '1':\r\n # The first trainer attacks\r\n self.attack_other_trainer(other_trainer)\r\n # Checks if pokemons have fainted to end the battle\r\n if all([pok.knocked_out==True for pok in other_trainer.pokemons]):\r\n print('{name} has won the battle!'.format(name=self.name))\r\n break\r\n elif all([pok.knocked_out==True for pok in self.pokemons]):\r\n print('{name} has won the battle!'.format(name=other_trainer.name))\r\n break\r\n # If the opponent's active pokemon has fainted, the trainer switches pokemon\r\n elif other_pokemon.knocked_out == True:\r\n other_trainer.switch_pokemon() \r\n # The new pokemon is set as the current active\r\n other_pokemon = other_trainer.pokemons[other_trainer.active_pok]\r\n # The second trainer attacks\r\n elif other_pokemon.knocked_out == False: \r\n other_trainer.attack_other_trainer(self)\r\n # If the active pokemon has fainted, the trainer switches pokemon\r\n if my_pokemon.knocked_out == True:\r\n self.switch_pokemon()\r\n # The new pokemon is set as the current active\r\n my_pokemon = self.pokemons[self.active_pok]\r\n # The other trainer attacks again\r\n other_trainer.attack_other_trainer(self)\r\n \r\n continue\r\n\r\n # Switch Pokemon\r\n elif selection == '2':\r\n self.switch_pokemon()\r\n my_pokemon = self.pokemons[self.active_pok]\r\n # After the first trainer switches pokemon, the other trainer attacks\r\n other_trainer.attack_other_trainer(self)\r\n continue\r\n\r\n # Ask the user for input to select a potion \r\n elif selection == '3':\r\n potion_number = input(('Select Potion: \\n'\r\n 'Type 1 for ' + list(self.potions.keys())[0] + '\\n'\r\n 'Type 2 for ' + list(self.potions.keys())[1] + '\\n'\r\n 'Type 3 for ' + list(self.potions.keys())[2] + '\\n'))\r\n\r\n if potion_number == '1':\r\n chosen_potion = list(self.potions.keys())[0] \r\n self.potion_use(chosen_potion)\r\n # After the first trainer heals a pokemon, the other trainer attacks\r\n other_trainer.attack_other_trainer(self)\r\n continue\r\n elif potion_number == '2':\r\n chosen_potion = list(self.potions.keys())[1] \r\n self.potion_use(chosen_potion)\r\n other_trainer.attack_other_trainer(self)\r\n continue\r\n elif potion_number == '3':\r\n chosen_potion = list(self.potions.keys())[2] \r\n self.potion_use(chosen_potion)\r\n other_trainer.attack_other_trainer(self)\r\n continue\r\n\r\n # Ask the user for input to select a status healer \r\n elif selection == '4':\r\n healer_number = input(('Select a Status Healer: \\n'\r\n 'Type 1 for ' + self.status_healers[0] + '\\n'\r\n 'Type 2 for ' + self.status_healers[1] + '\\n'))\r\n \r\n if healer_number == '1':\r\n chosen_healer = self.status_healers[0] \r\n self.healer_use(chosen_healer)\r\n # After the first trainer revives a pokemon, the other trainer attacks\r\n other_trainer.attack_other_trainer(self)\r\n continue\r\n elif healer_number == '2':\r\n chosen_healer = self.status_healers[1] \r\n self.healer_use(chosen_healer)\r\n other_trainer.attack_other_trainer(self)\r\n continue\r\n else:\r\n break \r\n\r\n\r\n#Pokemon instances\r\nCharmander = Charmander(5)\r\nBulbasaur = Bulbasaur(5)\r\nSquirtle = Squirtle(5)\r\nTotodile = Pokemon('Totodile', 5, 'water', ['Totodile', 'Croconaw', 'Feraligatr'])\r\nCyndaquil = Pokemon('Cyndaquil', 5, 'fire', ['Cyndaquil', 'Quilava', 'Typhlosion'])\r\nChikorita = Pokemon('Chikorita', 5, 'grass', ['Chikorita', 'Bayleef', 'Meganium'])\r\n\r\n#Trainer instances\r\nCarol = Trainer('Carol', [Charmander, Bulbasaur, Squirtle])\r\nTim = Trainer('Tim', [Cyndaquil, Chikorita, Totodile])\r\n\r\n# Test\r\nCarol.battle(Tim)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"pokemon_master.py","file_name":"pokemon_master.py","file_ext":"py","file_size_in_byte":18674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"476724328","text":"#!/usr/local/bin/python\n__author__ = 'Christophe COMPAIN'\n\nimport sys\nimport requests\nimport json\nimport pprint\nfrom jinja2 import Template\nfrom jinja2 import Environment, PackageLoader\nimport logging\nimport yaml\n\n\n\ndef Renderer(Switches_list,CliConfigTemplate,logger1):\n pp = pprint.PrettyPrinter(indent=4)\n for i in range(0,len(Switches_list)):\n Switch = Switches_list[i]\n name = Switch['name']\n commands_to_push = CliConfigTemplate.render(Switch)\n filename = str(name)+\"-cli.cfg\"\n logger1.info('[RENDERER] Process equipment : %s',name)\n logger1.info('[RENDERER] Generate CLI in file : %s',filename)\n with open('./cli/%s' % filename,\"w\") as show_run:\n logger1.debug(commands_to_push)\n show_run.write(commands_to_push)\n\ndef CommandEngine(Switches_list,logger1):\n index=1\n payload=[]\n pp = pprint.PrettyPrinter(indent=4)\n myheaders={'content-type':'application/json-rpc'}\n for i in range(0,len(Switches_list)):\n index=0\n payload=[]\n payload=[{\n\t\t\"jsonrpc\": \"2.0\",\n\t\t\"method\": \"cli\",\n\t\t\"params\": {\n\t\t\t\"cmd\": \"conf t\",\n\t\t\t\"version\": 1\n\t\t },\n\t\t\"id\": 1\n }\n\t ]\n Switch=[]\n Switch = Switches_list[i]\n name = Switch['name']\n username = Switch['username']\n password = Switch['password']\n ip = Switch['ip']\n url = 'http://' + ip + '/ins'\n filename = str(name)+\"-cli.cfg\"\n logger1.info('[ENGINE] Process equipment : %s',name)\n with open('./cli/%s' % filename,\"r\") as show_run:\n for line in show_run:\n if line[0] != '\\n':\n index=index+1\n logger1.info('[ENGINE] Generate JSON/CLI Command # %d',index)\n logger1.debug('[ENGINE] ==> Raw CLI : %s',line.split('\\n')[0])\n logger1.debug('[ENGINE] ==> Create JSON element for command %d :',index)\n new_command={\"jsonrpc\": \"2.0\",\"method\": \"cli\",\"params\": {\"cmd\": line.split(\"\\n\")[0],\"version\": 1},\"id\":index}\n logger1.debug('[ENGINE] ==> JSON : %s',new_command)\n payload += [new_command]\n logger1.debug('[ENGINE] ==> Increment global list of commands')\n try:\n logger1.info('[ENGINE] Execute command for %s at %s',name,url)\n logger1.debug('[ENGINE] PAYLOAD=%s',payload)\n response = requests.post(url,data=json.dumps(payload), headers=myheaders,auth=(username,password)).json()\n except:\n logger1.error('[ENGINE] REST Call Failed')\n #if response:\n #print 'Success'\n\t\t #print '--------'\n\t #else:\n\t\t #print 'Error ', response[1]\n\t\t #print 'Remaining commands after this line were not parsed'\n\t\t #print '--------'\n\n \n","sub_path":"Library/CoreInfraLibrary.py","file_name":"CoreInfraLibrary.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"223918249","text":"import jinja2\nimport ruamel.yaml as yaml\nimport sys\n\n\ndef add_project_resources(prefix, name, maintainers, deps_current_release):\n project_prefix = prefix\n project_name = name\n project_maintainers = maintainers.split('\\n')\n release = deps_current_release\n\n jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(\n ['/tmp/']))\n jinja_template = jinja_env.get_template(\"resource.j2\")\n content = jinja_template.render(project_maintainers=project_maintainers,\n project_prefix=project_prefix,\n project_name=project_name,\n release=release)\n with open(\"resources/%s-%s.yaml\" % (project_prefix, project_name),\n 'w') as fp:\n fp.write(content)\n\n # Add project to gerritbot configuration\n with open(\"gerritbot/channels.yaml\") as gerritbot:\n info = yaml.load(gerritbot, Loader=yaml.RoundTripLoader)\n\n if prefix != 'deps':\n newkey = \"%s/%s-distgit\" % (project_prefix, project_name)\n if newkey not in info['rdo']['projects']:\n info['rdo']['projects'].append(newkey)\n else:\n print(\"Key %s already in gerritbot\" % newkey)\n\n newkey = \"%s/%s\" % (project_prefix, project_name)\n if newkey not in info['rdo']['projects']:\n info['rdo']['projects'].append(newkey)\n else:\n print(\"Key %s already in gerritbot\" % newkey)\n\n info['rdo']['projects'].sort()\n with open('gerritbot/channels.yaml', 'w') as outfile:\n outfile.write(yaml.dump(info, Dumper=yaml.RoundTripDumper,\n indent=4, block_seq_indent=2))\n\n # And also to the Zuul configuration, but only for distgits\n # The specific -distgit repo is only created for RDO Trunk packages,\n # not deps\n if prefix != 'deps':\n yml = yaml.YAML()\n yml.indent(mapping=2, sequence=4, offset=2)\n\n with open(\"zuul.d/projects.yaml\") as infile:\n info = yml.load(infile)\n\n data = yaml.comments.CommentedMap(\n [('project', yaml.comments.CommentedMap(\n [('name', \"review.rdoproject.org/%s/%s-distgit\" % (project_prefix, project_name)),\n ('default-branch', 'rpm-master'),\n ('templates', yaml.comments.CommentedSeq(['package-distgit-check-jobs', 'system-required']))])\n )])\n\n data['project']['templates'].ca.items[1] = [\n yaml.tokens.CommentToken('\\n\\n', yaml.error.CommentMark(0), None), None, None, None]\n\n if data not in info:\n info.append(data)\n else:\n print(\"Zuul config already includes %s/%s-distgit\" %\n (project_prefix, project_name))\n\n data = yaml.comments.CommentedMap(\n [('project', yaml.comments.CommentedMap(\n [('name', \"review.rdoproject.org/%s/%s\" % (project_prefix, project_name)),\n ('templates', yaml.comments.CommentedSeq(['package-check-jobs', 'system-required']))])\n )])\n\n data['project']['templates'].ca.items[1] = [\n yaml.tokens.CommentToken('\\n\\n', yaml.error.CommentMark(0), None), None, None, None]\n\n if data not in info:\n info.append(data)\n else:\n print(\"Zuul config already includes %s/%s\" %\n (project_prefix, project_name))\n\n info = sorted(info, key=lambda i: i['project']['name'])\n yml.dump(info, open('zuul.d/projects.yaml', 'w'))\n\n # Strip the extra 2 spaces that ruamel.yaml appends because we told it\n # to indent an extra 2 spaces. Because the top level entry is a list it\n # applies that indentation at the top. It doesn't indent the comment lines\n # extra though, so don't do them.\n with open('zuul.d/projects.yaml', 'r') as fp:\n content = fp.readlines()\n with open('zuul.d/projects.yaml', 'w') as fp:\n for line in content:\n if '#' in line:\n fp.write(line)\n elif len(line) < 2:\n fp.write(line)\n else:\n fp.write(line[2:])\n\n\ndef add_project_package(prefix, name, deps_current_release):\n # Add the project to the rdo.yaml resource, so it can be indexed\n # by RepoXplorer\n\n release = deps_current_release\n\n with open('resources/rdo.yaml') as fp:\n resource = yaml.load(fp, Loader=yaml.RoundTripLoader, preserve_quotes=True)\n\n if prefix != 'deps':\n project = \"%s/%s-distgit\" % (prefix, name)\n data = yaml.comments.CommentedMap(\n [(project, yaml.comments.CommentedMap([\n ('zuul/include', []),\n ('repoxplorer/branches', [\"rpm-master\"]),\n ('default-branch', 'rpm-master'),\n ]))])\n else:\n project = \"%s/%s\" % (prefix, name)\n data = yaml.comments.CommentedMap(\n [(project, yaml.comments.CommentedMap([\n ('zuul/include', []),\n ('repoxplorer/branches', [\"c9s-%s-rdo\" % release]),\n ('default-branch', 'c9s-%s-rdo' % release),\n ]))])\n\n if data not in resource['resources']['projects']['RDO']['source-repositories']:\n resource['resources']['projects']['RDO']['source-repositories'].append(data)\n else:\n print(\"Key %s is already in\" % project)\n\n resource['resources']['projects']['RDO']['source-repositories'] = sorted(\n resource['resources']['projects']['RDO']['source-repositories'], key=lambda i: sorted(i.keys()))\n\n with open('resources/rdo.yaml', 'w') as fp:\n fp.write(yaml.dump(resource,\n Dumper=yaml.RoundTripDumper,\n indent=2,\n block_seq_indent=0))\n\n\nif __name__ == '__main__':\n add_project_resources(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])\n add_project_package(sys.argv[1], sys.argv[2], sys.argv[4])\n","sub_path":"playbooks/rdoinfo/create-project/files/add-project-from-rdoinfo.py","file_name":"add-project-from-rdoinfo.py","file_ext":"py","file_size_in_byte":5975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"299054147","text":"# -*- coding: utf-8 -*-\nfrom os import path\nfrom sys import path as sys_path\nimport json\nimport time\nimport io\nimport logging\nfrom concurrent.futures import ThreadPoolExecutor\nsys_path.insert(0, path.dirname(path.dirname(path.abspath(__file__))))\nimport requests\nfrom elasticsearch import Elasticsearch\nimport certifi\n\n# 2分钟前时间从59结束\nend_time = time.strftime(\"%Y-%m-%d %H:%M:59\", time.localtime(time.time() - 120))\n# 2分钟前时间从00开始\nstart_time = time.strftime(\"%Y-%m-%d %H:%M:00\", time.localtime(time.time() - 120))\n# 数据文件\ndata_file = r\"./data.txt\"\nlog_file_path = r\"./run.log\"\nES = Elasticsearch(['https://vpc-rfinex-nxaxxpo3dm6fog2i7ltdsabkne.ap-northeast-2.es.amazonaws.com'], use_ssl=True, ca_certs=certifi.where())\n\n\ndef my_logger(log_file):\n \"\"\"\n 定义日志输出合格\n :return: 返回一个可以直接使用的logger对象\n \"\"\"\n logger = logging.getLogger()\n fh = logging.FileHandler(log_file, encoding='utf-8')\n formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s File:<%(filename)s line %(lineno)d>')\n logger.setLevel(logging.DEBUG) # 定义文件日志级别\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n return logger\n\n\ndef get_metrics_from_es(index, doc_type, query_para, s_time, e_time, time_field):\n \"\"\"\n 从elasticsearch获取信息\n :param index:\n :param doc_type:\n :param query_para:\n :param s_time:\n :param e_time:\n :param time_field:\n :return:\n \"\"\"\n query_body = {\n \"from\": \"0\",\n \"size\": \"0\",\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"range\": {\n \"{0}\".format(time_field): {\n \"gte\": s_time,\n \"lte\": e_time,\n \"format\": \"yyyy-MM-dd HH:mm:ss\"\n }\n }\n },\n {\n \"query_string\": {\n \"query\": query_para\n }\n }\n ]\n }\n }\n }\n response = ES.search(\n index=\"{0}\".format(index),\n doc_type=\"{0}\".format(doc_type),\n body=query_body\n )\n\n res = response['hits']['total']\n print(res)\n return int(res)\n\n\ndef report_data(show_key, value, endpoint_host):\n \"\"\"\n 上传信elasticsearch结果到127.0.0.1:198\n :param show_key:\n :param value:\n :param endpoint_host:\n :return:\n \"\"\"\n ts = int(time.time())\n payload = [\n {\n \"endpoint\": endpoint_host,\n \"metric\": show_key,\n \"timestamp\": ts,\n \"step\": 60,\n \"value\": value,\n \"counterType\": \"GAUGE\",\n \"tags\": \"idc=lg,loc=beijing\",\n }\n ] \n\n r = requests.post(\"http://127.0.0.1:1988/v1/push\", data=json.dumps(payload)) \n\n\ndef get_argv():\n \"\"\"\n 把文本转换成参数一次只取一行\n :return:\n \"\"\"\n with io.open(data_file, encoding=\"utf-8\") as f:\n for i in f:\n i = i.strip()\n if i:\n ret = i.split(\"|\")\n yield ret\n\n\ndef run(endpoint_host, show_key, index, doc_type, query_para, time_field, s_time, e_time):\n \"\"\"\n 多线程执行代码\n :param endpoint_host:\n :param show_key:\n :param index:\n :param doc_type:\n :param query_para:\n :param time_field:\n :param s_time:\n :param e_time:\n :return:\n \"\"\"\n metrics_1m = get_metrics_from_es(index, doc_type, query_para, s_time, e_time, time_field)\n report_data(show_key, metrics_1m, endpoint_host)\n\n\nif __name__ == '__main__':\n log = my_logger(log_file_path)\n t = ThreadPoolExecutor(50)\n try:\n g = get_argv()\n while True:\n try:\n a1, a2, a3, a4, a5, a6 = next(g)\n t.submit(run(a1, a2, a3, a4, a5, a6, start_time, end_time))\n # t.submit(run(*next(g), start_time, end_time))\n except StopIteration:\n break\n except Exception as e:\n log.debug(e)\n except Exception as e:\n log.debug(e)\n\n\n","sub_path":"小程序/zw/run_es.py","file_name":"run_es.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"314790092","text":"# Problem 146\n\nimport time\nimport gmpy2\n\ndef isPrime(n):\n a = gmpy2.is_prime(n)\n return a\n\ntStart = time.time()\nM = 5*10**7\ncnt = 0\nfor n in range(2, M):\n if isPrime(2*n**2-1):\n cnt += 1\nprint(cnt)\nprint(\"Run Time = \" + str(time.time() - tStart))","sub_path":"problem216.py","file_name":"problem216.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"445997803","text":"\"\"\"BOJ Q11497 - 통나무 건너뛰기 (https://www.acmicpc.net/problem/11497)\n\nGiven sorted list L1<=L2<=...<=Ln, the optimal solution is\n[L1, L3, ..., Ln, Ln-1, Ln-3, ..., L2].\n\nThen the mamimum diff of adjecent numbers is max_i(|Li - Li-2|).`\n\"\"\"\n\nt = int(input())\nfor test_no in range(t):\n n = int(input())\n l = sorted([int(x) for x in input().split()])\n print(max(abs(l[i] - l[i + 2]) for i in range(n - 2)))\n","sub_path":"q11497.py","file_name":"q11497.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"500852785","text":"#!/usr/local/bin/python3.8\n# -*-coding:Utf-8 -*\nimport time\nfrom src.classes.exceptions.ExceptionsJoueur import *\nclass Joueur :\n\tcompt = 0\n\n\tdef __init__(self,pseudo) :\n\t\tself.pseudo = pseudo\n\t\tself.niveau = 1\n\t\tself.xp = 0\n\t\tself.nbVictoiresEL = 0\n\t\tself.nbDefaitesEL = 0\n\t\tself.nbPartiesEL = 0\n\t\tself.nbVictoiresHL = 0\n\t\tself.nbDefaitesHL = 0\n\t\tself.nbPartiesHL = 0\n\t\tself.tempsDeJeuTotal = 0.0\n\t\tself.tempsDebutJeu = 0.0\n\t\tself.creation = time.time()\n\t\tself.enAttente = False\n\n\tdef debutPartie(self) :\n\t\tself.tempsDebutJeu = time.time()\n\n\tdef finPartie(self) :\n\t\tself.tempsDeJeuTotal += time.time() - self.tempsDebutJeu\n\t\tself.tempsDebutJeu = 0.0\n\n\tdef addXp(self,xp) :\n\t\tself.xp+=xp\n\t\tself.verifieLvl()\n\n\tdef partieHLGagnee(self) :\n\t\tself.nbVictoiresHL+=1\n\t\tself.nbPartiesHL+=1\n\t\tself.addXp(500)\n\n\tdef partieELGagnee(self) :\n\t\tself.nbVictoiresEL+=1\n\t\tself.nbPartiesEL+=1\n\t\tself.addXp(500)\n\n\tdef lancerChrono(self) :\n\t\tself.tempsDebutJeu = time.time()\n\n\tdef finChrono(self) :\n\t\ttempsFin = time.time()\n\t\tself.tempsDeJeuTotal+= tempsFin - self.tempsDebutJeu\n\t\tself.tempsDebutJeu = 0.0\n\n\tdef getRatioEL(self) :\n\t\tif(self.nbPartiesEL==0) :\n\t\t\treturn(0)\n\t\telse :\n\t\t\treturn(self.nbVictoiresEL/self.nbPartiesEL)\n\n\tdef getRatioHL(self) :\n\t\tif(self.nbPartiesHL==0) :\n\t\t\treturn(0)\n\t\telse :\n\t\t\treturn(self.nbVictoiresHL/self.nbPartiesHL)\n\n\tdef getDateCreation(self) :\n\t\tdate = time.localtime(self.creation)\n\t\tjourSem = date.tm_wday\n\t\tif(jourSem==0) :\n\t\t\tjourSem=\"Lundi\"\n\t\telif(jourSem==1) :\n\t\t\tjourSem=\"Mardi\"\n\t\telif(jourSem==2) :\n\t\t\tjourSem=\"Mercredi\"\n\t\telif(jourSem==3) :\n\t\t\tjourSem=\"Jeudi\"\n\t\telif(jourSem==4) :\n\t\t\tjourSem=\"Vendredi\"\n\t\telif(jourSem==5) :\n\t\t\tjourSem=\"Samedi\"\n\t\telif(jourSem==6) :\n\t\t\tjourSem=\"Dimanche\"\n\t\telse :\n\t\t\tjourSem=\"Unknown\"\n\n\t\tmois = date.tm_mon\n\t\tif(mois==0) :\n\t\t\tmois=\"Janvier\"\n\t\telif(mois==1) :\n\t\t\tmois=\"Février\"\n\t\telif(mois==2) :\n\t\t\tmois=\"Mars\"\n\t\telif(mois==3) :\n\t\t\tmois=\"Avril\"\n\t\telif(mois==4) :\n\t\t\tmois=\"Mai\"\n\t\telif(mois==5) :\n\t\t\tmois=\"Juin\"\n\t\telif(mois==6) :\n\t\t\tmois=\"Juillet\"\n\t\telif(mois==7) :\n\t\t\tmois=\"Août\"\n\t\telif(mois==8) :\n\t\t\tmois=\"Septembre\"\n\t\telif(mois==9) :\n\t\t\tmois=\"Octobre\"\n\t\telif(mois==10) :\n\t\t\tmois=\"Novembre\"\n\t\telif(mois==11) :\n\t\t\tmois=\"Décembre\"\n\t\telse :\n\t\t\tmois = \"Unknown\"\n\n\t\tminutes = date.tm_min\n\t\tif(minutes<10) :\n\t\t\tminutes = \"0\"+str(date.tm_min)\n\t\telse :\n\t\t\tminutes = str(date.tm_min)\n\n\t\trendu = \"Votre compte a été créé le \"+jourSem+\" \"+str(date.tm_mday)+\" \"+mois+\" \"+str(date.tm_year)+\" à \"+str(date.tm_hour)+\"h\"+minutes+\" et \"+str(date.tm_sec)+\" secondes\"\n\n\t\treturn(rendu)\n\n\tdef getTempsDeJeu(self) :\n\t\t_tmp = int(self.tempsDeJeuTotal)\n\t\theures = _tmp//3600\n\t\t_tmp = _tmp%3600\n\t\tminutes = _tmp//60\n\t\t_tmp = _tmp%60\n\t\tsecondes = _tmp\n\t\treturn(str(heures)+\" heure(s) \"+str(minutes)+\" minute(s) \"+str(secondes)+\" seconde(s)\")\n\n\tdef partieHLPerdue(self) :\n\t\tself.nbDefaitesHL+=1\n\t\tself.nbPartiesHL+=1\n\t\tself.addXp(0)\n\n\tdef partieELPerdue(self) :\n\t\tself.nbDefaitesEL+=1\n\t\tself.nbPartiesEL+=1\n\t\tself.addXp(0)\n\n\tdef matchNulEL(self) :\n\t\tself.nbPartiesEL+=1\n\t\tself.addXp(0)\n\n\tdef matchNulHL(self) :\n\t\tself.nbPartiesHL+=1\n\t\tself.addXp(0)\n\n\tdef verifieLvl(self) :\n\t\tif(self.niveau==1) :\n\t\t\tif(self.xp>=500):\n\t\t\t\tself.niveau+=1\n\t\t\t\tself.xp-=500\n\t\t\t\t#raise NewLevelException(\"Vous passez niveau 2\")\n\t\t\telse :\n\t\t\t\tpass\n\t\telif(self.niveau==2) :\n\t\t\tif(self.xp>=1500) :\n\t\t\t\tself.niveau+=1\n\t\t\t\tself.xp-=1500\n\t\t\t\t#raise NewLevelException(\"Vous passez niveau 3\")\n\t\t\telse :\n\t\t\t\tpass\n\t\telif(self.niveau==3) :\n\t\t\tif(self.xp>=5000) :\n\t\t\t\tself.niveau+=1\n\t\t\t\tself.xp-=5000\n\t\t\t\t#raise NewLevelException(\"Vous passez niveau 4\")\n\t\t\telse :\n\t\t\t\tpass\n\t\telif(self.niveau==4) :\n\t\t\tif(self.xp>=15000) :\n\t\t\t\tself.niveau+=1\n\t\t\t\tself.xp-=15000\n\t\t\t\t#raise NewLevelException(\"Vous passez niveau 5\")\n\t\t\telse :\n\t\t\t\tpass\n\t\telif(self.niveau==5) :\n\t\t\tif(self.xp>=20000) :\n\t\t\t\tself.niveau+=1\n\t\t\t\tself.xp-=20000\n\t\t\t\t#raise NewLevelException(\"Vous passez niveau 6\")\n\t\t\telse :\n\t\t\t\tpass\n\t\telse :\n\t\t\tpass\n\n\n\t\t","sub_path":"src/classes/Joueur.py","file_name":"Joueur.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"406253092","text":"from loguru import logger\nimport torch\n\nclass ModelInformationNotFoundException(Exception):\n pass\n\nclass ModelToolbox:\n def __init__(self, model_class, optimizer_class):\n self.model_class = model_class\n self.optimizer_class = optimizer_class\n\n\n def save_model(self, miner_path, model_info):\n \"\"\"Saves the model locally. \n\n Args:\n model_info (:obj:`dict`, `required`): Dictionary containing the epoch we are saving at, the loss, and the PyTorch model object.\n\n Raises:\n :obj:`ModelInformationNotFoundException`: Raised whenever the loss, epoch, or PyTorch model object is missing from the input dictionary.\n \"\"\"\n try:\n if 'epoch' not in model_info.keys():\n raise ModelInformationNotFoundException(\"Missing 'epoch' in torch save dict\")\n\n if 'loss' not in model_info.keys():\n raise ModelInformationNotFoundException(\"Missing 'loss' in torch save dict\")\n \n if 'model_state_dict' not in model_info.keys():\n raise ModelInformationNotFoundException(\"Missing 'model' in torch save dict\")\n\n if 'optimizer_state_dict' not in model_info.keys():\n raise ModelInformationNotFoundException(\"Missing 'optimizer' in torch save dict\")\n \n logger.info( 'Saving/Serving model: epoch: {}, loss: {}, path: {}/model.torch'.format(model_info['epoch'], model_info['loss'], miner_path))\n torch.save(model_info,\"{}/model.torch\".format(miner_path))\n\n except ModelInformationNotFoundException as e:\n logger.error(\"Encountered exception trying to save model: {}\", e)\n \n def load_model(self, config):\n \"\"\" Loads a model saved by save_model() and returns it. \n\n Returns:\n model (:obj:`torch.nn.Module`) : Model that was saved earlier, loaded back up using the state dict and optimizer. \n optimizer (:obj:`torch.optim`) : Model optimizer that was saved with the model.\n \"\"\"\n model = self.model_class( config )\n optimizer = self.optimizer_class(model.parameters(), lr = config.miner.learning_rate, momentum=config.miner.momentum)\n \n try:\n checkpoint = torch.load(\"{}/model.torch\".format(config.miner.full_path))\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n epoch = checkpoint['epoch']\n loss = checkpoint['loss']\n\n logger.info( 'Reloaded model: epoch: {}, loss: {}, path: {}/model.torch'.format(epoch, loss, config.miner.full_path))\n except Exception as e:\n logger.warning ( 'Exception {}. Could not find model in path: {}/model.torch', e, config.miner.full_path )\n\n\n return model, optimizer\n\n\n","sub_path":"bittensor/utils/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"155501353","text":"class Solution:\n def relativeSortArray(self, arr1: List[int], arr2: List[int]) -> List[int]:\n t = [0] * 1001\n r = []\n for i in arr1: t[i] += 1\n for i in arr2:\n for j in range(0, t[i]):\n r.append(i)\n t[i] = 0\n for k in range(0, len(t)):\n for j in range(0, t[k]):\n r.append(k)\n return r","sub_path":"Week_08/relative-sort-array.py","file_name":"relative-sort-array.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"324687565","text":"import math\nimport matplotlib.pyplot as plt\n\n# Asks the user for a boolean, and will block until a valid boolean is entered.\ndef inputbool(name):\n while True:\n value = input(name+'? (Y/N): ')\n if value == 'Y':\n return True\n elif value == 'N':\n return False\n else:\n print('Enter \"Y\" or \"N\" pls')\n\n# Asks the user for a float, and will block until a valid float is entered.\ndef inputfloat(name):\n while True:\n value = input('Enter ' + name + ': ')\n try:\n return float(value)\n except:\n print('Enter a valid number pls')\n\ndef getDiscountCost(cost, numberofitems):\n discountcost = 0\n if numberofitems <= 150: discountcost = cost\n if numberofitems >= 151 and numberofitems <= 300: discountcost = 0.98*cost\n if numberofitems >= 301 and numberofitems <= 800: discountcost = 0.95*cost\n if numberofitems >= 801: discountcost = 0.9*cost\n return discountcost\n\nsetupcost = inputfloat('setup cost')\nannualdemand = int(inputfloat('annual demand'))\nbasecost = inputfloat('item cost')\ninvtpercentage = inputfloat('inventory percentage')\nleadtime = inputfloat('lead time')\n\nisDiscount = inputbool('Enable discounts')\nprint()\n#Calculate replenishment level:\nreplenishedlevel = (annualdemand/365)*leadtime #lead (lead time) is the number of days waiting for the delivery arrival\nprint('Replenishment level: ', replenishedlevel)\n\nsmallestInvtCost = None\noptimalOrderQuantity = 0\noptimalHoldingCost = 0\noptimalEoq = 0\noptimalOpy = 0\noptimalDbo = 0\noptimalAPC = 0\noptimalAOC = 0\noptimalAHC = 0\nquantitylst = list()\ntotalinvtcostlst = list()\nholdingcostlst = list()\nordercostlst = list()\nfor quantity in range(1,annualdemand):\n if isDiscount:\n productcost = getDiscountCost(basecost, quantity)\n else:\n productcost = basecost\n holdingcost = productcost*invtpercentage\n #Calculate economic order quantity:\n EOQ = math.sqrt((2*setupcost*annualdemand)/holdingcost)\n #Calculate orders per year:\n opy = annualdemand/EOQ\n dbo = 365/opy\n #Determine increased cost of constrained purchases:\n ordercost = (annualdemand/quantity)*setupcost\n constrainedholdingcost = (0.5*quantity*holdingcost) #Assumption: 0.5 is the leftover inventory in the previous order\n Totalinvtcost = ordercost + constrainedholdingcost\n #print(quantity, Totalinvtcost)\n #Calculate annual indexes:\n annualproductcost = annualdemand*productcost\n annualordercost = annualproductcost/EOQ\n annualholdingcost = 0.5*EOQ*holdingcost #Assumption: 0.5 is the leftover inventory in the previous order\n total3costs = annualproductcost + annualordercost + annualholdingcost\n if smallestInvtCost is None or smallestInvtCost > Totalinvtcost:\n smallestInvtCost = Totalinvtcost\n optimalOrderQuantity = quantity\n optimalHoldingCost = holdingcost\n optimalEoq = EOQ\n optimalOpy = opy\n optimalDbo = dbo\n optimalAPC = annualproductcost\n optimalAOC = annualordercost\n optimalAHC = annualholdingcost\n if quantity%30 == 0:\n quantitylst.append(quantity)\n totalinvtcostlst.append(Totalinvtcost)\n holdingcostlst.append(constrainedholdingcost)\n ordercostlst.append(ordercost)\n#print(collectlst) #tuple list (quantity,Totalinvtcost)\n\nprint(\"Holding cost: \", optimalHoldingCost)\nprint('Economic order quantity: ', optimalEoq)\nprint('orders per year: ', optimalOpy)\nprint('days between orders: ', optimalDbo)\nprint('Annual product cost: ', optimalAPC)\nprint('Annual ordering cost: ', optimalAOC)\nprint('Annual holding cost: ', optimalAHC)\nprint('Optimal total inventory cost: ', smallestInvtCost)\nprint('Optimal quantity: ', optimalOrderQuantity)\nplt.plot(quantitylst, totalinvtcostlst, label = 'Ordering + C.Holding Cost')\nplt.plot(quantitylst, holdingcostlst, label = 'Constrained Holding Cost')\nplt.plot(quantitylst, ordercostlst, label = 'Ordering Cost')\nplt.xlabel('Quantity')\nplt.ylabel('Total Cost')\nplt.title('Order Optimisation')\nplt.legend()\nplt.show()\n","sub_path":"inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"346410371","text":"# Lint as: python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Runs an Image Classification model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport pprint\nfrom typing import Any, Tuple, Text, Optional, Mapping\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport tensorflow as tf\n\nfrom official.modeling import performance\nfrom official.modeling.hyperparams import params_dict\nfrom official.utils import hyperparams_flags\nfrom official.utils.logs import logger\nfrom official.utils.misc import distribution_utils\nfrom official.utils.misc import keras_utils\nfrom official.vision.image_classification import callbacks as custom_callbacks\nfrom official.vision.image_classification import dataset_factory\nfrom official.vision.image_classification import optimizer_factory\nfrom official.vision.image_classification.configs import base_configs\nfrom official.vision.image_classification.configs import configs\nfrom official.vision.image_classification.efficientnet import efficientnet_model\nfrom official.vision.image_classification.resnet import common\nfrom official.vision.image_classification.resnet import resnet_model\nfrom official.vision.image_classification.mobilenet_v1 import mobilenet_v1_model\nfrom official.vision.image_classification.pruning import cprune_from_config\nfrom official.vision.image_classification.pruning import pruning_base_configs\n#from official.vision.image_classification.pruning.efficientnet import efficientnet_pruning_config\nfrom official.vision.image_classification.pruning.mobilenet_v1 import mobilenet_v1_pruning_config\nfrom official.vision.image_classification.pruning.resnet_imagenet import resnet_imagenet_pruning_config\nfrom tensorflow_model_optimization.python.core.sparsity.keras import cpruning_callbacks\nfrom tensorflow_model_optimization.python.core.sparsity.keras import cprune\nfrom tensorflow_model_optimization.python.core.sparsity.keras import cprune_registry\n\n\npp = pprint.PrettyPrinter()\n\n\ndef get_models() -> Mapping[str, tf.keras.Model]:\n \"\"\"Returns the mapping from model type name to Keras model.\"\"\"\n return {\n 'efficientnet': efficientnet_model.EfficientNet.from_name,\n 'resnet': resnet_model.resnet50,\n 'mobilenet_v1': mobilenet_v1_model.mobilenet_v1\n }\n\n\ndef get_pruning() -> Mapping[str, pruning_base_configs.ModelPruningConfig]:\n \"\"\"Returns the mapping from model type name to model pruning config.\"\"\"\n return {\n #'efficientnet': efficientnet_pruning_config.EfficientNetPruningConfig(),\n 'resnet': resnet_imagenet_pruning_config.ResNet50PruningConfig(),\n 'mobilenet_v1': mobilenet_v1_pruning_config.MobileNetV1PruningConfig(),\n }\n\ndef get_dtype_map() -> Mapping[str, tf.dtypes.DType]:\n \"\"\"Returns the mapping from dtype string representations to TF dtypes.\"\"\"\n return {\n 'float32': tf.float32,\n 'bfloat16': tf.bfloat16,\n 'float16': tf.float16,\n 'fp32': tf.float32,\n 'bf16': tf.bfloat16,\n }\n\n\ndef _get_metrics(one_hot: bool) -> Mapping[Text, Any]:\n \"\"\"Get a dict of available metrics to track.\"\"\"\n if one_hot:\n return {\n # (name, metric_fn)\n 'acc': tf.keras.metrics.CategoricalAccuracy(name='accuracy'),\n 'accuracy': tf.keras.metrics.CategoricalAccuracy(name='accuracy'),\n 'top_1': tf.keras.metrics.CategoricalAccuracy(name='accuracy'),\n 'top_5': tf.keras.metrics.TopKCategoricalAccuracy(\n k=5,\n name='top_5_accuracy'),\n }\n else:\n return {\n # (name, metric_fn)\n 'acc': tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),\n 'accuracy': tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),\n 'top_1': tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),\n 'top_5': tf.keras.metrics.SparseTopKCategoricalAccuracy(\n k=5,\n name='top_5_accuracy'),\n }\n\n\ndef get_image_size_from_model(\n params: base_configs.ExperimentConfig) -> Optional[int]:\n \"\"\"If the given model has a preferred image size, return it.\"\"\"\n if params.model_name == 'efficientnet':\n efficientnet_name = params.model.model_params.model_name\n if efficientnet_name in efficientnet_model.MODEL_CONFIGS:\n return efficientnet_model.MODEL_CONFIGS[efficientnet_name].resolution\n return None\n\n\ndef _get_dataset_builders(params: base_configs.ExperimentConfig,\n strategy: tf.distribute.Strategy,\n one_hot: bool\n ) -> Tuple[Any, Any]:\n \"\"\"Create and return train and validation dataset builders.\"\"\"\n if one_hot:\n logging.warning('label_smoothing > 0, so datasets will be one hot encoded.')\n else:\n logging.warning('label_smoothing not applied, so datasets will not be one '\n 'hot encoded.')\n\n num_devices = strategy.num_replicas_in_sync if strategy else 1\n\n image_size = get_image_size_from_model(params)\n\n dataset_configs = [\n params.train_dataset, params.validation_dataset\n ]\n builders = []\n\n for config in dataset_configs:\n if config is not None and config.has_data:\n builder = dataset_factory.DatasetBuilder(\n config,\n image_size=image_size or config.image_size,\n num_devices=num_devices,\n one_hot=one_hot)\n else:\n builder = None\n builders.append(builder)\n\n return builders\n\n\ndef get_loss_scale(params: base_configs.ExperimentConfig,\n fp16_default: float = 128.) -> float:\n \"\"\"Returns the loss scale for initializations.\"\"\"\n loss_scale = params.runtime.loss_scale\n if loss_scale == 'dynamic':\n return loss_scale\n elif loss_scale is not None:\n return float(loss_scale)\n elif params.train_dataset.dtype == 'float32':\n return 1.\n else:\n assert params.train_dataset.dtype == 'float16'\n return fp16_default\n\n\ndef _get_params_from_flags(flags_obj: flags.FlagValues):\n \"\"\"Get ParamsDict from flags.\"\"\"\n model = flags_obj.model_type.lower()\n dataset = flags_obj.dataset.lower()\n params = configs.get_config(model=model, dataset=dataset)\n\n flags_overrides = {\n 'model_dir': flags_obj.model_dir,\n 'mode': flags_obj.mode,\n 'model': {\n 'name': model,\n },\n 'runtime': {\n 'run_eagerly': flags_obj.run_eagerly,\n 'tpu': flags_obj.tpu,\n },\n 'train_dataset': {\n 'data_dir': flags_obj.data_dir,\n },\n 'validation_dataset': {\n 'data_dir': flags_obj.data_dir,\n },\n 'train': {\n 'time_history': {\n 'log_steps': flags_obj.log_steps,\n },\n },\n }\n\n overriding_configs = (flags_obj.config_file,\n flags_obj.params_override,\n flags_overrides)\n\n logging.info('Base params: %s', pp.pformat(params.as_dict()))\n\n for param in overriding_configs:\n logging.info('Overriding params: %s', param)\n # Set is_strict to false because we can have dynamic dict parameters.\n params = params_dict.override_params_dict(params, param, is_strict=False)\n\n params.validate()\n params.lock()\n\n logging.info('Final model parameters: %s', pp.pformat(params.as_dict()))\n return params\n\n\ndef resume_from_checkpoint(model: tf.keras.Model,\n model_dir: str,\n train_steps: int) -> int:\n \"\"\"Resumes from the latest checkpoint, if possible.\n\n Loads the model weights and optimizer settings from a checkpoint.\n This function should be used in case of preemption recovery.\n\n Args:\n model: The model whose weights should be restored.\n model_dir: The directory where model weights were saved.\n train_steps: The number of steps to train.\n\n Returns:\n The epoch of the latest checkpoint, or 0 if not restoring.\n\n \"\"\"\n logging.info('Load from checkpoint is enabled.')\n latest_checkpoint = tf.train.latest_checkpoint(model_dir)\n logging.info('latest_checkpoint: %s', latest_checkpoint)\n if not latest_checkpoint:\n logging.info('No checkpoint detected.')\n return 0\n\n logging.info('Checkpoint file %s found and restoring from '\n 'checkpoint', latest_checkpoint)\n model.load_weights(latest_checkpoint)\n initial_epoch = model.optimizer.iterations // train_steps\n logging.info('Completed loading from checkpoint.')\n logging.info('Resuming from epoch %d', initial_epoch)\n return int(initial_epoch)\n\n\ndef initialize(params: base_configs.ExperimentConfig,\n dataset_builder: dataset_factory.DatasetBuilder):\n \"\"\"Initializes backend related initializations.\"\"\"\n keras_utils.set_session_config(\n enable_eager=params.runtime.run_eagerly,\n enable_xla=params.runtime.enable_xla)\n if params.runtime.gpu_threads_enabled:\n keras_utils.set_gpu_thread_mode_and_count(\n per_gpu_thread_count=params.runtime.per_gpu_thread_count,\n gpu_thread_mode=params.runtime.gpu_thread_mode,\n num_gpus=params.runtime.num_gpus,\n datasets_num_private_threads=params.runtime.dataset_num_private_threads)\n\n performance.set_mixed_precision_policy(dataset_builder.dtype)\n if tf.config.list_physical_devices('GPU'):\n data_format = 'channels_first'\n else:\n data_format = 'channels_last'\n tf.keras.backend.set_image_data_format(data_format)\n distribution_utils.configure_cluster(\n params.runtime.worker_hosts,\n params.runtime.task_index)\n if params.runtime.run_eagerly:\n # Enable eager execution to allow step-by-step debugging\n tf.config.experimental_run_functions_eagerly(True)\n\n\ndef define_classifier_flags():\n \"\"\"Defines common flags for image classification.\"\"\"\n hyperparams_flags.initialize_common_flags()\n flags.DEFINE_string(\n 'data_dir',\n default=None,\n help='The location of the input data.')\n flags.DEFINE_string(\n 'mode',\n default=None,\n help='Mode to run: `train`, `eval`, `train_and_eval`, `export`, '\n '`sensitivity_analysis`, or `prune_physically`.')\n flags.DEFINE_bool(\n 'run_eagerly',\n default=None,\n help='Use eager execution and disable autograph for debugging.')\n flags.DEFINE_string(\n 'model_type',\n default=None,\n help='The type of the model, e.g. EfficientNet, etc.')\n flags.DEFINE_string(\n 'dataset',\n default=None,\n help='The name of the dataset, e.g. ImageNet, etc.')\n flags.DEFINE_integer(\n 'log_steps',\n default=100,\n help='The interval of steps between logging of batch level stats.')\n flags.DEFINE_integer(\n 'verbose',\n default=1,\n help='0, 1, or 2. Verbosity mode.'\n '0 = silent, 1 = progress bar, 2 = one line per epoch.'\n 'Note that the progress bar is not particularly useful when logged to a file, '\n 'so verbose=2 is recommended when not running interactively '\n '(eg, in a production environment).')\n flags.DEFINE_string('pruning_config_file', None,\n 'Path to a yaml file of model pruning configuration.')\n flags.DEFINE_integer(\n 'sensitivity_layer_count',\n default=0,\n help='The ordinal number representing a layer whose pruning sensitivity '\n 'is to be analyzed. 0 for the first layer, 27 (MobileNet V1) and 53 '\n '(ResNet-50) for the last layer, etc. Valid only if '\n '`mode=sensitivity_analysis`.')\n flags.DEFINE_string(\n 'sensitivity_granularity',\n default='BlockSparsity',\n help='The granularity for analyzing pruning sensitivity. Valid only if '\n '`mode=sensitivity_analysis`.')\n flags.DEFINE_integer(\n 'sensitivity_gamma',\n default=2,\n help='The gamma parameter for ArayaMag or QuasiCyclic granularity.'\n ' for analyzing pruning sensitivity. Valid only if '\n '`mode=sensitivity_analysis`.')\n flags.DEFINE_bool(\n 'sensitivity_respect_submatrix',\n default=False,\n help='Whether or not to apply pruning masks submatrix-wise. Valid only '\n 'for ArayaMag, QuasiCyclic, and TwoOutOfFour granularity.')\n flags.DEFINE_bool(\n 'sensitivity_two_over_four_chin',\n default=False,\n help='Whether or not to realize two-out-of-four sparsity pattern along '\n 'input channels. Defaults to `False`, in which case the sparsity '\n 'pattern is achieved along the output channels.')\n\n\ndef serialize_config(params: base_configs.ExperimentConfig,\n model_dir: str):\n \"\"\"Serializes and saves the experiment config.\"\"\"\n params_save_path = os.path.join(model_dir, 'params.yaml')\n logging.info('Saving experiment configuration to %s', params_save_path)\n tf.io.gfile.makedirs(model_dir)\n params_dict.save_params_dict_to_yaml(params, params_save_path)\n\n\ndef train_and_eval(\n params: base_configs.ExperimentConfig,\n strategy_override: tf.distribute.Strategy) -> Mapping[str, Any]:\n \"\"\"Runs the train and eval path using compile/fit.\"\"\"\n logging.info('Running train and eval.')\n\n # Note: for TPUs, strategy and scope should be created before the dataset\n strategy = strategy_override or distribution_utils.get_distribution_strategy(\n distribution_strategy=params.runtime.distribution_strategy,\n all_reduce_alg=params.runtime.all_reduce_alg,\n num_gpus=params.runtime.num_gpus,\n tpu_address=params.runtime.tpu)\n\n strategy_scope = distribution_utils.get_strategy_scope(strategy)\n\n logging.info('Detected %d devices.',\n strategy.num_replicas_in_sync if strategy else 1)\n\n label_smoothing = params.model.loss.label_smoothing\n one_hot = label_smoothing and label_smoothing > 0\n\n builders = _get_dataset_builders(params, strategy, one_hot)\n datasets = [builder.build() if builder else None for builder in builders]\n\n # Unpack datasets and builders based on train/val/test splits\n train_builder, validation_builder = builders # pylint: disable=unbalanced-tuple-unpacking\n train_dataset, validation_dataset = datasets\n\n train_epochs = params.train.epochs\n train_steps = params.train.steps or train_builder.num_steps\n validation_steps = params.evaluation.steps or validation_builder.num_steps\n\n initialize(params, train_builder)\n\n logging.info('Global batch size: %d', train_builder.global_batch_size)\n\n with strategy_scope:\n model_params = params.model.model_params.as_dict()\n model = get_models()[params.model.name](**model_params)\n\n if params.model.model_weights_path:\n if os.path.isdir(params.model.model_weights_path):\n checkpoint = tf.train.latest_checkpoint(params.model.model_weights_path)\n else:\n checkpoint = params.model.model_weights_path\n logging.info('Load weights from %s', checkpoint)\n model.load_weights(checkpoint)\n\n if flags.FLAGS.mode == 'sensitivity_analysis' or flags.FLAGS.pruning_config_file:\n if flags.FLAGS.mode == 'sensitivity_analysis':\n if flags.FLAGS.pruning_config_file:\n raise ValueError\n\n layer = [\n layer for layer in model.layers\n if hasattr(layer, 'kernel') or hasattr(layer, 'depthwise_kernel')\n ][flags.FLAGS.sensitivity_layer_count]\n layer_name = layer.name\n weight_name = 'kernel' if hasattr(layer, 'kernel') else 'depthwise_kernel'\n\n pruning_params = cprune_from_config.generate_sensitivity_config(\n model_name=model.name,\n layer_name=layer_name,\n weight_name=weight_name,\n granularity=flags.FLAGS.sensitivity_granularity,\n gamma=flags.FLAGS.sensitivity_gamma,\n respect_submatrix=flags.FLAGS.sensitivity_respect_submatrix,\n two_over_four_chin=flags.FLAGS.sensitivity_two_over_four_chin)\n else:\n pruning_params = get_pruning()[params.model.name]\n\n params_dict.override_params_dict(\n pruning_params, flags.FLAGS.pruning_config_file, is_strict=False)\n logging.info('Specified pruning params: %s', pp.pformat(pruning_params.as_dict()))\n\n _pruning_params = cprune_from_config.predict_sparsity(model, pruning_params)\n logging.info('Understood pruning params: %s', pp.pformat(_pruning_params))\n\n model = cprune_from_config.cprune_from_config(model, pruning_params)\n\n else:\n weights_list = model.get_weights()\n model = tf.keras.models.clone_model(model)\n model.set_weights(weights_list)\n\n models = [model]\n\n if flags.FLAGS.mode == 'prune_physically':\n smaller_model = cprune_from_config.prune_physically(model)\n models.append(smaller_model)\n\n for _model in models:\n learning_rate = optimizer_factory.build_learning_rate(\n params=params.model.learning_rate,\n batch_size=train_builder.global_batch_size,\n train_steps=train_steps)\n optimizer = optimizer_factory.build_optimizer(\n optimizer_name=params.model.optimizer.name,\n base_learning_rate=learning_rate,\n params=params.model.optimizer.as_dict())\n\n metrics_map = _get_metrics(one_hot)\n metrics = [metrics_map[metric] for metric in params.train.metrics]\n\n if one_hot:\n loss_obj = tf.keras.losses.CategoricalCrossentropy(\n label_smoothing=params.model.loss.label_smoothing)\n else:\n loss_obj = tf.keras.losses.SparseCategoricalCrossentropy()\n\n _model.compile(optimizer=optimizer,\n loss=loss_obj,\n metrics=metrics)\n\n initial_epoch = 0\n if params.train.resume_checkpoint:\n initial_epoch = resume_from_checkpoint(model=model,\n model_dir=params.model_dir,\n train_steps=train_steps)\n\n callbacks = None\n if params.mode == 'train_and_eval':\n serialize_config(params=params, model_dir=params.model_dir)\n # TODO(dankondratyuk): callbacks significantly slow down training\n model_pruning_config = None\n if flags.FLAGS.pruning_config_file:\n model_pruning_config = cprune_from_config._expand_model_pruning_config(\n model, pruning_params\n )\n callbacks = custom_callbacks.get_callbacks(\n model_checkpoint=params.train.callbacks.enable_checkpoint_and_export,\n include_tensorboard=params.train.callbacks.enable_tensorboard,\n time_history=params.train.callbacks.enable_time_history,\n track_lr=params.train.tensorboard.track_lr,\n model_pruning_config=model_pruning_config,\n write_model_weights=params.train.tensorboard.write_model_weights,\n batch_size=train_builder.global_batch_size,\n log_steps=params.train.time_history.log_steps,\n model_dir=params.model_dir)\n if flags.FLAGS.pruning_config_file:\n callbacks += [\n cpruning_callbacks.UpdateCPruningStep(),\n # cpruning_callbacks.CPruningSummaries(log_dir=params.model_dir),\n ]\n\n if params.evaluation.skip_eval:\n validation_kwargs = {}\n else:\n validation_kwargs = {\n 'validation_data': validation_dataset,\n 'validation_steps': validation_steps,\n 'validation_freq': params.evaluation.epochs_between_evals,\n }\n\n history = None\n if params.mode == 'train_and_eval':\n history = model.fit(\n train_dataset,\n epochs=train_epochs,\n steps_per_epoch=train_steps,\n initial_epoch=initial_epoch,\n callbacks=callbacks,\n verbose=flags.FLAGS.verbose,\n **validation_kwargs)\n elif params.mode == 'eval':\n cprune.apply_cpruning_masks(model)\n\n if flags.FLAGS.pruning_config_file:\n _pruning_params = cprune_from_config.predict_sparsity(model, pruning_params)\n logging.info('Pruning result: %s', pp.pformat(_pruning_params))\n\n validation_output = None\n if params.evaluation.eval_data == 'train':\n eval_dataset = train_dataset\n eval_steps = train_steps\n elif params.evaluation.eval_data == 'validation':\n eval_dataset = validation_dataset\n eval_steps = validation_steps\n\n if params.mode == 'sensitivity_analysis':\n file_writer = tf.summary.create_file_writer(flags.FLAGS.model_dir + '/metrics')\n file_writer.set_as_default()\n cprune_registry.ConstraintRegistry.add_weight_constraint_pair(\n 'depthwise_kernel', 'depthwise_constraint')\n\n for sparsity_x_16 in range(16):\n cprune.apply_cpruning_masks(model, step=sparsity_x_16)\n _validation_output = model.evaluate(\n eval_dataset, steps=eval_steps, verbose=2, return_dict=True)\n _validation_output = [_validation_output['loss'],\n _validation_output['accuracy'],\n _validation_output['top_5_accuracy']]\n _stats = common.build_stats(history, _validation_output, callbacks)\n prefix = 'pruning_sensitivity/' + layer_name + '/' + weight_name + '/'\n for key, value in _stats.items():\n tf.summary.scalar(prefix + key, data=value, step=sparsity_x_16)\n _pruning_params = cprune_from_config.predict_sparsity(model, pruning_params)\n sparsity = _pruning_params['pruning'][0]['pruning'][0]['current_sparsity']\n tf.summary.scalar(prefix + 'sparsity', data=sparsity, step=sparsity_x_16)\n\n elif flags.FLAGS.mode == 'prune_physically':\n logging.info('Number of filters before and after physical pruning:')\n for layer, new_layer in zip(model.layers, smaller_model.layers):\n if type(layer) is tf.keras.layers.Conv2D:\n logging.info(' {}, {}, {}'.format(layer.name, layer.filters, new_layer.filters))\n if type(layer) is tf.keras.layers.Dense:\n logging.info(' {}, {}, {}'.format(layer.name, layer.units, new_layer.units))\n for i, _model in enumerate(models):\n situation = 'before' if i == 0 else 'after'\n logging.info('Model summary {} physical pruning:'.format(situation))\n _model.summary(print_fn=logging.info)\n _validation_output = _model.evaluate(\n eval_dataset, steps=eval_steps, verbose=2, return_dict=True)\n _validation_output = [_validation_output['loss'],\n _validation_output['accuracy'],\n _validation_output['top_5_accuracy']]\n _stats = common.build_stats(history, _validation_output, callbacks)\n logging.info('Evaluation {} physical pruning: {}'.format(situation, _stats))\n\n postfix = '' if i == 0 else '_small'\n export_path = os.path.join(flags.FLAGS.model_dir, 'saved_model' + postfix)\n _model.save(export_path, include_optimizer=False)\n\n elif not params.evaluation.skip_eval or params.mode == 'eval':\n logging.info('Evaluate %s data', params.evaluation.eval_data)\n validation_output = model.evaluate(\n eval_dataset, steps=eval_steps, verbose=2, return_dict=True)\n\n if validation_output:\n validation_output = [validation_output['loss'],\n validation_output['accuracy'],\n validation_output['top_5_accuracy']]\n\n # TODO(dankondratyuk): eval and save final test accuracy\n stats = common.build_stats(history, validation_output, callbacks)\n return stats\n\n\ndef export(params: base_configs.ExperimentConfig):\n \"\"\"Runs the model export functionality.\"\"\"\n if flags.FLAGS.pruning_config_file:\n raise ValueError\n logging.info('Exporting model.')\n model_params = params.model.model_params.as_dict()\n model = get_models()[params.model.name](**model_params)\n checkpoint = params.export.checkpoint\n if checkpoint is None:\n logging.info('No export checkpoint was provided. Using the latest '\n 'checkpoint from model_dir.')\n checkpoint = tf.train.latest_checkpoint(params.model_dir)\n\n model.load_weights(checkpoint)\n model.save(params.export.destination)\n\n\ndef run(flags_obj: flags.FlagValues,\n strategy_override: tf.distribute.Strategy = None) -> Mapping[str, Any]:\n \"\"\"Runs Image Classification model using native Keras APIs.\n\n Args:\n flags_obj: An object containing parsed flag values.\n strategy_override: A `tf.distribute.Strategy` object to use for model.\n\n Returns:\n Dictionary of training/eval stats\n \"\"\"\n params = _get_params_from_flags(flags_obj)\n if params.mode in ['train_and_eval', 'eval', 'sensitivity_analysis', 'prune_physically']:\n return train_and_eval(params, strategy_override)\n elif params.mode == 'export_only':\n export(params)\n else:\n raise ValueError('{} is not a valid mode.'.format(params.mode))\n\n\ndef main(_):\n with logger.benchmark_context(flags.FLAGS):\n stats = run(flags.FLAGS)\n if stats:\n logging.info('Run stats:\\n%s', stats)\n\n\nif __name__ == '__main__':\n logging.set_verbosity(logging.INFO)\n define_classifier_flags()\n flags.mark_flag_as_required('data_dir')\n flags.mark_flag_as_required('mode')\n flags.mark_flag_as_required('model_type')\n flags.mark_flag_as_required('dataset')\n\n app.run(main)\n","sub_path":"official/vision/image_classification/classifier_trainer.py","file_name":"classifier_trainer.py","file_ext":"py","file_size_in_byte":25244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"639843939","text":"#!/usr/bin/env python\n\n# An appeal to any one reading this code: Yes, I know my code is absolutely horrible\n# (at least I think it is). To be fair, this is pretty much the most hacked-together thing I've made.\n\nimport datetime\nimport os\nimport sys\nimport shutil\nimport markdown\nfrom mdx_gfm import GithubFlavoredMarkdownExtension\nfrom bs4 import BeautifulSoup\n\n# The markdown conversion object, included is the py-gfm GithubFlavoredMarkdownExtension\nmd = markdown.Markdown(extensions=[GithubFlavoredMarkdownExtension()])\n\n# Today's date, for putting blog entries in a folder\ntoday = datetime.date.today()\n\n# Get conversion of markdown to html, and put it where required into an html file\ndef compile():\n\tpath = '{}/{}'.format(os.getcwd(), \"blog.md\")\n\ttry:\n\t\tmd_input = open(path)\n\t\tsource = md_input.readlines()\n\t\tmd_input.close()\n\t\tsource = \"\\n\".join(source)\n\t\tcompiled_markdown = md.convert(source)\n\texcept FileNotFoundError:\n\t\tprint(\"No markdown file was found in the root directory of your site.\")\n\t\tprint(\"Please create a markdown file and try again.\")\n\t\tsys.exit()\n\n\t#try:\n\tpath = '{}/{}'.format(os.getcwd(), \"template.html\")\n\tshutil.copyfile(path, path + '_copy')\n\tpath = '{}/{}'.format(os.getcwd(), \"template.html_copy\")\n\ttemplate = BeautifulSoup(open(path), \"lxml\")\n\t# Parse copied template for divs with class 'blog-content'\n\tfor div in template.find_all('div'):\n\t\tif div[\"class\"] == \"blog-content\":\n\t\t\tcontent_div = div\n\t\t\tcontent_div.string.replace_with(compiled_markdown)\n\t\t\tprint(\"breaking\")\n\t\t\tbreak\n\t\n\t# Check if root blog directory exists\n\tif os.path.isdir(os.getcwd() + '/blog'):\n\t\tif os.path.isdir(os.getcwd() + '/blog/' + '{}'.format(today)):\n\t\t\tshutil.move(path, \"blog/{}/blog.html\".format(today))\n\t\telse:\n\t\t\tos.mkdir('blog/{}'.format(str(today)))\n\t\t\tshutil.move(path, \"blog/{}/blog.html\".format(today))\n\telse:\n\t\tprint(\"The directory 'blog' was not found in your site root.\")\n\t\tprint(\"Please create this directory and try again.\")\n\t\tsys.exit()\n\t#except:\n\t\t#print(\"Something went wrong. Check to make sure that all required directories and files exist, and try again.\")\n\ncompile()","sub_path":"pressdown.py","file_name":"pressdown.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"131498353","text":"\r\nclass Solution(object):\r\n def pushDominoes(self, dominoes):\r\n forces = [0] * len(dominoes)\r\n max_force = len(dominoes)\r\n\r\n force = 0\r\n for i, d in enumerate(dominoes):\r\n if d == 'R':\r\n force = max_force\r\n if d == 'L':\r\n force = 0\r\n else:\r\n force = max(0, force - 1)\r\n forces[i] += force\r\n\r\n for i in range(len(dominoes) - 1, -1, -1):\r\n # print('i', i)\r\n d = dominoes[i]\r\n if d == 'L':\r\n force = max_force\r\n if d == 'R':\r\n force = 0\r\n else:\r\n force = max(0, force - 1)\r\n forces[i] -= force\r\n\r\n result = ''\r\n for f in forces:\r\n if f == 0:\r\n result += '.'\r\n elif f > 0:\r\n result += 'R'\r\n else:\r\n result += 'L'\r\n return result\r\n\r\nprint(Solution().pushDominoes('..R...L..R.'))\r\n# ..RR.LL..RR","sub_path":"coderpro/push_dominos_pro.py","file_name":"push_dominos_pro.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"328693468","text":"from queue import Queue\nfrom kiteconnect import KiteConnect, KiteTicker\n#from orders import *\nimport time\n\n\nclass Stream:\n '''class for the websocket in which all the callback functions are present'''\n\n def __init__(self, kite, zerodha_access_token, tracker_token, instruments, df1, df2):\n # kite ticker initialization\n self.kws = KiteTicker(df2.iloc[0,0], zerodha_access_token)\n self.kite = kite\n # self.kite.set_access_token(self.access_token)\n self.zerodha_access_token = zerodha_access_token\n self.tracker_token = tracker_token\n self.df1 = df1\n self.df2 = df2\n self.instruments = instruments\n self.ticks_queue = Queue()\n self.exit = 0\n\n def on_ticks(self, ws, ticks):\n # print(ticks)\n self.ticks_queue.put(ticks)\n\n def on_connect(self, ws, response):\n # Callback on successful connect.\n print(\"connected\")\n ws.subscribe(self.tracker_token)\n #ws.set_mode(ws.MODE_FULL, tracker_token)\n\n def on_close(self, ws, code, reason):\n # On connection close stop the main loop\n # Reconnection will not happen after executing `ws.stop()`\n # ws.stop()\n print('socket closed')\n\n def computation(self):\n t=[]\n for k in range(len(self.tracker_token)):\n t.append(k)\n while not self.exit:\n # print(self.ticks_queue.get())\n # self.exit=1\n\n c = self.ticks_queue.get()\n #print(c)\n d = []\n e = []\n \n f = len(c)\n \n \n for i in range(f):\n d.append(c[i]['last_price'])\n e.append(c[i]['instrument_token'])\n #print(d, e)\n #print(t)\n for j in range(f):\n if j in t:\n try:\n\n if(self.df1.iloc[j, 1] == \"Greater then or equal to\"):\n if d[e.index(self.tracker_token[j])] >= self.df1.iloc[j, 2]:\n print(self.df1.iloc[j, 0],\n \"condition is fulfiled\")\n t.remove(j)\n elif(self.df1.iloc[j, 1] == \"Greater then\"):\n if d[e.index(self.tracker_token[j])] > self.df1.iloc[j, 2]:\n print(self.df1.iloc[j, 0],\n \"condition is fulfiled\")\n t.remove(j)\n elif(self.df1.iloc[j, 1] == \"Less then or equal to\"):\n if d[e.index(self.tracker_token[j])] <= self.df1.iloc[j, 2]:\n print(self.df1.iloc[j, 0],\n \"condition is fulfiled\")\n t.remove(j)\n elif(self.df1.iloc[j, 1] == \"Less then\"):\n if d[e.index(self.tracker_token[j])] < self.df1.iloc[j, 2]:\n print(self.df1.iloc[j, 0],\n \"condition is fulfiled\")\n t.remove(j)\n elif(self.df1.iloc[j, 1] == \"Equal to\"):\n if d[e.index(self.tracker_token[j])] == self.df1.iloc[j, 2]:\n print(self.df1.iloc[j, 0],\n \"condition is fulfiled\")\n t.remove(j)\n print(t)\n except:\n print(self.df1.iloc[j, 0],\n \"tracker token doesnt exist\")\n if len(t) == 0:\n self.exit=1\n # print(c,d,e)\n\n # def buy_call_option():\n\n # self.exit = 1 '''\n","sub_path":"Zerotha_scanner/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"239710024","text":"\n\nimport os\nfrom collections import defaultdict\n\npackages_to_submit = [\n ('scikit-xray', ['dev']),\n ('metadatastore', ['v0.1.0']),\n ('filestore', ['v0.1.0']),\n ('dataportal', ['v0.1.0']),\n]\n\nif __name__ == \"__main__\":\n import subprocess\n recipes_path = os.path.abspath('../')\n for package, versions in packages_to_submit:\n for version in versions:\n recipe_path = os.path.join(recipes_path, 'recipes', package, version)\n # subprocess.check_output(['anaconda-build', 'submit', recipe_path],\n # shell=True)\n output = subprocess.check_output('anaconda-build submit %s' %\n recipe_path, shell=True)\n print(output)\n","sub_path":"scripts/submit_binstar.py","file_name":"submit_binstar.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"276300746","text":"from functools import partial\nfrom itertools import product\nimport networkx as nx\nfrom .edge import Edge\n\n\nclass MNetwork:\n\n def __init__(self, networks: [nx.Graph]):\n\n # 对每层未命名的网络命名\n idx = 1\n for net in networks:\n if net.name == '':\n net.name = 't%s' % idx\n idx += 1\n\n # 初始化多网络数据\n self.networksList = networks\n self.networksDict = {x.name: x for x in networks}\n self.networksName = set((x.name for x in networks))\n\n # 初始化 project 网络\n self.projectNetwork = nx.Graph(name=\"fusion net\")\n self.projectLinkpair = nx.Graph(name=\"fusion linkpair\")\n\n for graph in self.networksList:\n self.projectNetwork.add_nodes_from(graph.nodes())\n self.projectNetwork.add_edges_from(graph.edges())\n\n # 算法相关\n # 连边相似性计算算法\n self.linkpair_simi_func = None\n # 最优社团划分目标函数\n self.object_func = None\n\n def linkpairs(self):\n \"\"\"提取融合后的单网络中的所有具有公共邻居的边对\n \"\"\"\n\n # _linkpair = set() # {(a,b,c)...}\n # for src, dst in self.project_nets.edges():\n #\n # for src_neighbors in self.project_nets.neighbors(src):\n # if src_neighbors < dst:\n # _linkpair.add((src_neighbors, src, dst,))\n # elif src_neighbors > dst:\n # _linkpair.add((dst, src, src_neighbors,))\n # else:\n # pass\n #\n # for dst_neighbors in self.project_nets.neighbors(dst):\n # if dst_neighbors < src:\n # _linkpair.add((dst_neighbors, dst, src,))\n # elif dst_neighbors > src:\n # _linkpair.add((src, dst, dst_neighbors,))\n # else:\n # pass\n\n _linkpair = []\n\n for mid in self.project_nets.nodes_iter():\n neighbors = self.project_nets.neighbors(mid)\n for x in range(len(neighbors)):\n for y in range(x+1,len(neighbors)):\n _linkpair.append((neighbors[x], mid, neighbors[y]))\n return _linkpair\n\n def link_similarity_table(self):\n\n linkpairs = self.linkpairs()\n linkpair_similarity = []\n\n linkpair_num = len(linkpairs)\n\n cnt = 0\n for src, mid, dst in linkpairs:\n cnt += 1\n if ( cnt%50000 == 0):\n print('计算相似性 %5d/%5d'%(cnt, linkpair_num))\n # 计算相似性\n simi = self.linkpair_simi_func(src=src, mid=mid, dst=dst)\n\n # 用自定义数据结构存储连边对和对应的相似性\n link1 = Edge(src, mid)\n link2 = Edge(mid, dst)\n linkpair_similarity.append((link1, link2, simi))\n\n # 相似性排序并返回\n linkpair_similarity.sort(key=lambda x: x[2], reverse=True)\n\n return linkpair_similarity\n\n def nodes(self):\n\n return list(self.projectNetwork.nodes())\n\n def links(self):\n _links = list()\n\n for n1, n2 in self.projectNetwork.edges():\n _links.append(Edge(n1, n2))\n\n return _links\n\n def objectfunc(self, link_coms, node_coms):\n\n return self.object_func(link_coms=link_coms, node_coms=node_coms)\n\n def set_linkpair_simi_algo(self, algo_func):\n\n self.linkpair_simi_func = \\\n partial(algo_func, networks=self.networksList)\n\n def set_objectfunc_algo(self, algo_func):\n self.object_func = \\\n partial(algo_func, networks=self.networksList)\n\n @property\n def project_nets(self):\n return self.projectNetwork\n\n @property\n def project_linkpairs(self):\n return self.projectLinkpair\n\n\n\n\n\n\n\n","sub_path":"mlcd/mnetwork.py","file_name":"mnetwork.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"197160215","text":"#!/usr/bin/env python\n'''\n Merges histograms from the output of\n callHistoFiller.cc into one file\n Author: T.M.Perry UW-Madison\n'''\nimport ROOT\nfrom ROOT import THStack,TH1F,TFile\nfrom ROOT import TLegend,TCanvas,TPad,TLatex,TLine\nfrom ROOT import gROOT,gStyle\n#import fnmatch as fnm\nimport cmsPrelim as cpr\n\nversion=\"Giuseppe\"\nrunname=\"Giuseppe\"\n#version=\"CestPiVV\"\n#runname=\"vvQCDb_mT0\"\n#version=\"CestPi\"\n#runname=\"stmMVmT0\" \n#runname=\"stdMVmT0\" \n#runname= \"vvWbbQCDnoMT\" #\"vvWbbQCDnoMT\" #\"stdMVmT0\" #\"typical\" # \"vvWbbQCD\" # \"diboson\" \n#runname=\"vvQCDprime\"\n\npath = '/Users/rhombus/Documents/Madison/CMS/WbbAnalysis/UWAnalysis/CRAB/MuNu/SampleInfo/%s/Plots_%s/'%(version,runname)\n#path = '/Users/rhombus/Documents/Madison/CMS/WbbAnalysis/UWAnalysis/CRAB/MuNu/SampleInfo/%s/Plots_%s/NoMT0bin'%(version,runname)\n\nleps = ['mu','ele']\n#cuts = ['wbb','ttme','ttjjj']\ncuts = ['wbb', 'ttme', 'ttjjj',]# 'wjj', 'stt', 'dyjj', 'dybb']\n#cuts = ['wjj',]\n\nvariables = [ \n \"mt\",\n# \"met\",\n# \"goodLep_pt\",\n# \"goodJ1_pt\",\n# \"goodJ2_pt\",\n# \"goodJ3_pt\",\n# \"goodJ1J2_dR\",\n# \"goodJ1J2_mass\",\n# \"goodJ1_CSV\",\n# #\"goodJ1_mass_SV_unweighted\",\n# \"goodJ1_mass_SV_corrected\",\n# #\"goodJ1_mass_SV_weighted\",\n# \"goodJ2_CSV\",\n# #\"goodJ3J4_mass\" \n# \"goodL1L2_pt\",\n# \"goodL1L2_eta\",\n# \"goodL1L2_phi\",\n #\"goodL1L2_mass\",\n]\n\nqcdSF_mu = [0.803,0.00001,0.541]\nqcdSF_ele = [0.827,0.00001,0.317]\nfor lep in leps:\n if lep=='mu': \n qcdSFs = qcdSF_mu\n else:\n qcdSFs = qcdSF_ele\n for variable in variables: \n for cut,qcdsf in zip(cuts,qcdSFs):\n\n #canvas attributes\n canx = 900 \n cany = 1200\n \n c = TCanvas('c','Canvas Named c',canx,cany)\n \n c.cd()\n p1 = TPad('p1','p1',0,0.25,1,1)\n p1.SetBottomMargin(0.04)\n p1.Draw()\n p1.SetLogy(0)\n p1.SetFrameLineWidth(2)\n c.cd()\n p2 = TPad('p2','p2',0,0,1,0.25)\n p2.SetTopMargin(0.02)\n p2.SetBottomMargin(0.3)\n p2.SetFrameLineWidth(2)\n p2.Draw()\n\n c.cd()\n p1.cd()\n \n theFilename = \"%sGiuseppe_%s_%s_%s\"%(path,cut,variable,lep)\n #theFilename = \"%sHistograms_%s_%s_%s\"%(path,cut,variable,lep)\n theFile = TFile(theFilename+\".root\")\n \n #\n #title = \"Transverse Mass\"\n #xlabel = \"Transverse Mass [GeV]\"\n \n # rebin factor\n rebin = 1\n \n # scale factors \n sf_qcd = qcdsf \n sf_z = 1. \n sf_vv = 1. \n sf_t = 1. \n sf_tb = 1. \n sf_ttb = 1. \n sf_wl = 1. \n sf_wc = 1. \n sf_wcc = 1. \n sf_wbb = 1.\n \n ratioRange = 0.3\n errorBand = True\n \n #color scheme\n c_data = 1\n c_qcd = ROOT.EColor.kRed+1\n c_z = ROOT.EColor.kOrange-3\n c_vv = ROOT.EColor.kYellow-3\n c_t = ROOT.EColor.kGreen+1\n c_tb = ROOT.EColor.kGreen-5\n c_ttb = ROOT.EColor.kGreen-9\n c_wl = ROOT.EColor.kAzure+10\n c_wc = ROOT.EColor.kBlue+0\n c_wcc = ROOT.EColor.kBlue-9\n c_wbb = 51\n \n fillStyle = 1\n fillStyleTTs = 1\n fillStyleTTf = 1\n \n tex = ROOT.TLatex()\n tex.SetTextSize(0.07)\n tex.SetTextAlign(13)\n tex.SetNDC(True)\n gStyle.SetOptStat('')\n gStyle.SetLineWidth(3)\n gStyle.SetPadTickY(1)\n\n\n data_obs = theFile.Get(\"Data\")\n data_obs.Rebin( rebin )\n data_obs.SetMarkerStyle(22)\n data_obs.SetMarkerSize(1.2)\n data_obs.SetMarkerSize(2)\n data_obs.Draw()\n max_data = data_obs.GetMaximum()\n \n h_qcd = theFile.Get(\"QCD\")\n h_qcd.SetFillColor( c_qcd )\n h_qcd.SetFillStyle( fillStyle )\n h_qcd.Rebin( rebin )\n h_qcd.Scale( sf_qcd )\n h_qcd.Draw()\n \n h_Drell = theFile.Get(\"DY\")\n h_Drell.SetFillColor( c_z )\n h_Drell.Rebin( rebin )\n h_Drell.Scale( sf_z )\n h_Drell.Draw()\n \n h_WW = theFile.Get(\"WW\")\n h_WW.SetFillColor( c_vv )\n h_WW.SetFillStyle( fillStyle )\n h_WW.Rebin( rebin )\n h_WW.Scale( sf_vv )\n h_WW.Draw()\n \n h_WZ = theFile.Get(\"WZ\")\n h_WZ.SetFillColor( c_vv )\n h_WZ.SetFillStyle( fillStyle )\n h_WZ.Rebin( rebin )\n h_WZ.Scale( sf_vv )\n h_WZ.Draw()\n \n h_ZZ = theFile.Get(\"ZZ\")\n h_ZZ.SetFillColor( c_vv )\n h_ZZ.SetFillStyle( fillStyle )\n h_ZZ.Rebin( rebin )\n h_ZZ.Scale( sf_vv )\n h_ZZ.Draw()\n \n h_T_s = theFile.Get(\"T\")\n h_T_s.SetFillColor( c_t )\n h_T_s.SetFillStyle( fillStyle )\n h_T_s.Rebin( rebin )\n h_T_s.Scale( sf_t )\n h_T_s.Draw()\n \n h_TTbar = theFile.Get(\"TTbar\")\n h_TTbar.SetFillColor( c_ttb )\n h_TTbar.SetFillStyle( fillStyleTTf )\n h_TTbar.Rebin( rebin )\n h_TTbar.Scale( sf_ttb )\n h_TTbar.Draw()\n \n h_Wl = theFile.Get(\"W+l\")\n h_Wl.SetFillColor( c_wl )\n h_Wl.SetFillStyle( fillStyle )\n h_Wl.Rebin( rebin )\n h_Wl.Scale( sf_wl )\n h_Wl.Draw()\n \n h_Wc = theFile.Get(\"W+c\")\n h_Wc.SetFillColor( c_wc )\n h_Wc.SetFillStyle( fillStyle )\n h_Wc.Rebin( rebin )\n h_Wc.Scale( sf_wc )\n h_Wc.Draw()\n \n h_Wcc = theFile.Get(\"W+tau\")\n h_Wcc.SetFillColor( c_wcc )\n h_Wcc.SetFillStyle( fillStyle )\n h_Wcc.Rebin( rebin )\n h_Wcc.Scale( sf_wcc )\n h_Wcc.Draw()\n \n h_Wbb5F = theFile.Get(\"W+b\")\n h_Wbb5F.SetFillColor( c_wbb )\n h_Wbb5F.SetFillStyle( fillStyle )\n h_Wbb5F.Rebin( rebin )\n h_Wbb5F.Scale( sf_wbb )\n h_Wbb5F.Draw()\n \n # make a stack to draw with\n s_mc = THStack('hs','')\n s_mc.SetTitle('')\n s_mc.Add(h_qcd)\n s_mc.Add(h_Drell)\n s_mc.Add(h_WW)\n s_mc.Add(h_WZ)\n s_mc.Add(h_ZZ)\n s_mc.Add(h_T_s)\n s_mc.Add(h_TTbar)\n s_mc.Add(h_Wl)\n s_mc.Add(h_Wc)\n s_mc.Add(h_Wcc)\n s_mc.Add(h_Wbb5F)\n s_mc.Draw()\n max_mc = s_mc.GetMaximum()\n \n # add all MC for ratio plot\n #h_mc = h_qcd.Clone()\n h_mc = h_Drell.Clone()\n h_mc.SetName(\"h_mc\")\n h_mc.Add(h_qcd)\n h_mc.Add(h_WW)\n h_mc.Add(h_WZ)\n h_mc.Add(h_ZZ)\n h_mc.Add(h_T_s)\n h_mc.Add(h_TTbar)\n h_mc.Add(h_Wl)\n h_mc.Add(h_Wc)\n h_mc.Add(h_Wcc)\n h_mc.Add(h_Wbb5F)\n if errorBand:\n h_mc_err = h_mc.Clone()\n h_mc_err.SetName('h_mc_err')\n h_mc_err.SetFillColor(ROOT.EColor.kBlue-6)\n h_mc_err.SetFillStyle(3001)\n # make ratio plot\n h_r = data_obs.Clone()\n h_r.SetName(\"h_r\")\n h_r.Divide(h_mc)\n if errorBand:\n h_r_err = h_mc.Clone()\n h_r_err.SetName(\"h_r_err\")\n h_r_err.SetFillColor(ROOT.EColor.kBlue-6)\n h_r_err.SetFillStyle(3001)\n h_r_errDivisor = h_mc.Clone()\n for i in range( h_r_err.GetNbinsX() + 1 ):\n h_r_errDivisor.SetBinError( i, 0 )\n h_r_err.Divide(h_r_errDivisor)\n \n # set p1 title and axis labels\n s_mc.GetXaxis().SetLabelSize(0.03)\n s_mc.GetYaxis().SetLabelSize(0.03)\n s_mc.GetYaxis().SetTitleOffset(1.5)\n s_mc.GetYaxis().SetTitle( \"Events / %s GeV\"%(h_mc.GetBinWidth(1)) )\n \n \n # set p2 y ranges\n the_max = max( max_mc,max_data )\n s_mc.SetMaximum( 1.2*the_max )\n h_r.GetYaxis().SetRangeUser(1.-ratioRange,1+ratioRange)\n h_r.GetYaxis().SetLabelSize(0.10)\n h_r.GetXaxis().SetLabelSize(0.10)\n h_r.GetYaxis().SetTitleSize(0.10)\n h_r.GetXaxis().SetTitleSize(0.10)\n h_r.GetYaxis().SetTitle(\"Data / MC\")\n h_r.GetYaxis().SetTitleOffset(0.5)\n xlabel = h_r.GetTitle()\n title = xlabel\n #title = cut+\" \"+lep+\" \"+xlabel\n h_r.GetXaxis().SetTitle( xlabel )\n h_r.SetTitle(\"\")\n \n # fill legend\n leg=TLegend(0.58,0.3,0.78,0.88)\n leg.AddEntry(data_obs,\"Data\")\n leg.AddEntry(h_Wbb5F,\"W+b#bar{b}\",\"f\")\n leg.AddEntry(h_Wcc,\"W+c#bar{c}\",\"f\")\n leg.AddEntry(h_Wc,\"W+c\",\"f\")\n leg.AddEntry(h_Wl,\"W+udsg\",\"f\")\n leg.AddEntry(h_TTbar,\"t#bar{t}\",\"f\")\n leg.AddEntry(h_T_s,\"t\",\"f\")\n leg.AddEntry(h_WW,\"WW,WZ,ZZ\",\"f\")\n leg.AddEntry(h_Drell,\"Drell-Yan\",\"f\")\n leg.AddEntry(h_qcd,\"QCD\",\"f\")\n leg.SetFillColor(0)\n leg.SetBorderSize(0)\n \n # and draw\n #c.cd()\n p1.cd()\n s_mc.Draw('hist')\n if errorBand: h_mc_err.Draw('sames,E2')\n data_obs.Draw('sames,E1')\n leg.Draw('sames')\n \n cpr.wip(19700,0.05)\n #cpr.prelim_alt(19700,0.05)\n tex.SetTextAlign(11) #left, bottom\n tex.DrawLatex(0.1,0.9,title)\n \n #c.cd()\n p2.cd() \n h_r.Draw(\"ep\")\n if errorBand: h_r_err.Draw(\"sames,E2\")\n \n l = TLine(h_r.GetXaxis().GetXmin(),1,h_r.GetXaxis().GetXmax(),1)\n l.SetLineStyle(3)\n l.Draw()\n c.Update()\n \n c.Print(theFilename+\".png\")\n print(\"you just finished with %s.png\"%theFilename)\n print(\"\")\n c.Close()\n #c.Clear()\n \n","sub_path":"Wbb8TeV/scripts/plotSystG.py","file_name":"plotSystG.py","file_ext":"py","file_size_in_byte":7992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"30584503","text":"import numpy as np\nimport os\nimport glob\nimport shutil\nimport sys\n\n\ndef split_on_chunks(datadir, src_folder):\n files = glob.glob(os.path.join(src_folder, '*.JPEG'))\n mode = src_folder.split('/')[-2]\n for i, src in enumerate(files):\n file_name = os.path.basename(src)\n label_folder = os.path.dirname(src).split('/')[-1]\n p = '{0}/chunk_{1:05}/'.format(mode, i)\n dst = os.path.join(p, label_folder, file_name)\n if not os.path.exists(p):\n shutil.copytree(datadir, p, ignore=shutil.ignore_patterns('*.JPEG'))\n if not os.path.exists(os.path.join(p, label_folder)):\n os.makedirs(os.path.join(p, label_folder))\n if not os.path.exists(dst):\n shutil.copy2(src, dst)\n print('=> {}'.format(dst))\n\n\nif __name__ == '__main__':\n source_dir = sys.argv[1]\n target_dir = sys.argv[2]\n print(\"Source dir: {}\".format(source_dir))\n print(\"Target dir: {}\".format(target_dir))\n\n # pool = multiprocessing.Pool(12)\n # pool.map(partial(split_on_chunks, datadir=traindir), subdirs)\n\n for subdir in sorted(glob.glob(os.path.join(source_dir, '*'))):\n print(\"processing dir: {}\".format(subdir))\n split_on_chunks(target_dir, subdir)\n\n","sub_path":"pytorch_imagenet/dataset_split.py","file_name":"dataset_split.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"515982556","text":"# -*- coding: utf-8 -*-\n# snapshottest: v1 - https://goo.gl/zC4yUc\nfrom __future__ import unicode_literals\n\nfrom snapshottest import GenericRepr, Snapshot\n\n\nsnapshots = Snapshot()\n\nsnapshots['test_get[uvloop-None-True] format_analysis'] = {\n '_id': 'foobar',\n 'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),\n 'ready': True,\n 'results': {\n },\n 'sample': {\n 'id': 'baz'\n },\n 'subtraction': {\n 'id': 'plum',\n 'name': 'Plum'\n },\n 'workflow': 'pathoscope_bowtie'\n}\n\nsnapshots['test_get[uvloop-None-True] 1'] = {\n 'created_at': '2015-10-06T20:00:00Z',\n 'formatted': True,\n 'id': 'foo'\n}\n\nsnapshots['test_get[uvloop-None-False] 1'] = {\n 'created_at': '2015-10-06T20:00:00Z',\n 'id': 'foobar',\n 'ready': False,\n 'results': {\n },\n 'sample': {\n 'id': 'baz'\n },\n 'subtraction': {\n 'id': 'plum',\n 'name': 'Plum'\n },\n 'workflow': 'pathoscope_bowtie'\n}\n","sub_path":"tests/analyses/snapshots/snap_test_api.py","file_name":"snap_test_api.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"158035343","text":"from PyQt5 import QtWidgets, uic\nimport sys\n\n# from PIL import Image,ImageTk\n# import tkinter as tk\n\nclass Ui(QtWidgets.QMainWindow):\n\n # def Open_Img(self):\n # global img_png\n # Img = Image.open('diaochan.jpg')\n # img_png = ImageTk.PhotoImage(Img)\n #\n # def Show_Img(self):\n # global img_png\n # label_Img = tk.Label(window,image=img_png)\n # label_Img.pack()\n\n def __init__(self):\n super(Ui, self).__init__()\n uic.loadUi('tt.ui', self)\n\n self.button1 = self.findChild(QtWidgets.QPushButton,'hellobutton')\n self.button1.clicked.connect(self.printButton1Pressed)\n\n self.button2 = self.findChild(QtWidgets.QPushButton, 'byebutton')\n self.button2.clicked.connect(self.printButton2Pressed)\n self.show()\n\n\n def printButton1Pressed(self):\n print('hello')\n\n def printButton2Pressed(self):\n print('bye')\n\n\napp = QtWidgets.QApplication(sys.argv)\nwindow = Ui()\napp.exec_()\n#\n#\n# Form, Window = uic.loadUiType(\"dtk.ui\")\n#\n# app = QApplication([])\n# window = Window()\n# form = Form()\n# form.setupUi(window)\n# window.show()\n# app.exec_()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"379122288","text":"from typing import Dict\n\nimport numpy as np\nimport pytest\n\nfrom jina import Document, DocumentArray\nfrom jina.drivers.craft import CraftDriver\nfrom jina.executors.decorators import single\nfrom jina.executors.crafters import BaseCrafter\nfrom jina.types.ndarray.generic import NdArray\n\n\nclass MockCrafter(BaseCrafter):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n @single\n def craft(self, text: str, *args, **kwargs) -> Dict:\n if text == 'valid':\n return {'blob': np.array([0.0, 0.0, 0.0]), 'weight': 10}\n else:\n return {'non_existing_key': 1}\n\n\nclass MockImageCrafter(BaseCrafter):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n @single\n def craft(self, blob: np.ndarray, *args, **kwargs) -> Dict:\n assert len(blob.shape) == 3\n assert blob.shape[0] == 1\n return {'blob': blob}\n\n\nclass SimpleCraftDriver(CraftDriver):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n @property\n def exec_fn(self):\n return self._exec_fn\n\n\n@pytest.fixture()\ndef text_craft_executor():\n return MockCrafter()\n\n\n@pytest.fixture()\ndef image_craft_executor():\n return MockImageCrafter()\n\n\n@pytest.fixture()\ndef craft_driver():\n driver = SimpleCraftDriver()\n executor = MockCrafter()\n driver.attach(executor=executor, runtime=None)\n return driver\n\n\ndef test_valid_document(craft_driver, text_craft_executor):\n craft_driver.attach(executor=text_craft_executor, runtime=None)\n valid_document = Document(content='valid')\n docs = DocumentArray([valid_document])\n craft_driver._apply_all(docs)\n np.testing.assert_equal(\n NdArray(valid_document.blob).value, np.array([0.0, 0.0, 0.0])\n )\n assert valid_document.weight == 10\n\n\ndef test_invalid_document(craft_driver, text_craft_executor):\n craft_driver.attach(executor=text_craft_executor, runtime=None)\n invalid_document = Document(content='invalid')\n docs = DocumentArray([invalid_document])\n with pytest.raises(AttributeError) as error:\n craft_driver._apply_all(docs)\n assert error.value.__str__() == '\\'non_existing_key\\' is not recognized'\n\n\ndef test_image_crafting(craft_driver, image_craft_executor):\n craft_driver.attach(executor=image_craft_executor, runtime=None)\n blob1 = np.random.random((1, 32, 64))\n blob2 = np.random.random((1, 64, 32))\n docs = DocumentArray([Document(blob=blob1), Document(blob=blob2)])\n craft_driver._apply_all(docs)\n np.testing.assert_equal(docs[0].blob, blob1)\n np.testing.assert_equal(docs[1].blob, blob2)\n","sub_path":"tests/unit/drivers/test_craft_driver.py","file_name":"test_craft_driver.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"579285668","text":"import numpy as np\nimport warnings\nfrom xskillscore import (\n crps_ensemble,\n crps_gaussian,\n mae,\n mse,\n pearson_r,\n pearson_r_p_value,\n rmse,\n)\n\n\ndef _get_norm_factor(comparison):\n \"\"\"Get normalization factor for PPP, NMSE, NRMSE, MSSS.\n\n Used in compute_perfect_model. Comparison 'm2e' gets smaller rmse's than\n 'm2m' by design, see Seferian et al. 2018. 'm2m', 'm2c' ensemble variance\n is divided by 2 to get control variance.\n\n Args:\n comparison (function): comparison function.\n\n Returns:\n fac (int): normalization factor.\n\n Raises:\n KeyError: if comparison is not matching.\n\n \"\"\"\n comparison_name = comparison.__name__\n if comparison_name in ['_m2e', '_e2c', '_e2r']:\n fac = 1\n elif comparison_name in ['_m2c', '_m2m', '_m2r']:\n fac = 2\n else:\n raise KeyError('specify comparison to get normalization factor.')\n return fac\n\n\ndef _pearson_r(forecast, reference, dim='svd', comparison=None):\n \"\"\"\n Calculate the Anomaly Correlation Coefficient (ACC).\n\n .. math::\n ACC = \\\\frac{cov(f, o)}{\\\\sigma_{f}\\\\cdot\\\\sigma_{o}}\n\n .. note::\n Use metric ``pearson_r_p_value`` to get the corresponding pvalue.\n\n Range:\n * perfect: 1\n * min: -1\n\n See also:\n * xskillscore.pearson_r\n * xskillscore.pearson_r_p_value\n \"\"\"\n return pearson_r(forecast, reference, dim=dim)\n\n\ndef _pearson_r_p_value(forecast, reference, dim='svd', comparison=None):\n \"\"\"\n Calculate the probability associated with the ACC not being random.\n \"\"\"\n # p-value returns a runtime error when working with NaNs, such as on a climate\n # model grid. We can avoid this annoying output by specifically suppressing\n # warning here.\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n pval = pearson_r_p_value(forecast, reference, dim=dim)\n return pval\n\n\ndef _mse(forecast, reference, dim='svd', comparison=None):\n \"\"\"\n Calculate the Mean Sqaure Error (MSE).\n\n .. math::\n MSE = \\\\overline{(f - o)^{2}}\n\n Range:\n * perfect: 0\n * min: 0\n * max: ∞\n\n See also:\n * xskillscore.mse\n \"\"\"\n return mse(forecast, reference, dim=dim)\n\n\ndef _rmse(forecast, reference, dim='svd', comparison=None):\n \"\"\"\n Calculate the Root Mean Sqaure Error (RMSE).\n\n .. math::\n RMSE = \\\\sqrt{\\\\overline{(f - o)^{2}}}\n\n Range:\n * perfect: 0\n * min: 0\n * max: ∞\n\n See also:\n * xskillscore.rmse\n \"\"\"\n return rmse(forecast, reference, dim=dim)\n\n\ndef _mae(forecast, reference, dim='svd', comparison=None):\n \"\"\"\n Calculate the Mean Absolute Error (MAE).\n\n .. math::\n MSE = \\\\overline{(f - o)^{2}}\n\n Range:\n * perfect: 0\n * min: 0\n * max: ∞\n\n See also:\n * xskillscore.mae\n \"\"\"\n return mae(forecast, reference, dim=dim)\n\n\ndef _crps(forecast, reference, dim='svd', comparison=None):\n \"\"\"\n Continuous Ranked Probability Score (CRPS) is the probabilistic MSE.\n\n Range:\n * perfect: 0\n * max: 0\n * else: negative\n\n References:\n * Matheson, James E., and Robert L. Winkler. “Scoring Rules for\n Continuous Probability Distributions.” Management Science 22, no. 10\n (June 1, 1976): 1087–96. https://doi.org/10/cwwt4g.\n\n See also:\n * properscoring.crps_ensemble\n \"\"\"\n return crps_ensemble(forecast, reference).mean(dim)\n\n\ndef _crps_gaussian(forecast, mu, sig, dim='svd', comparison=None):\n return crps_gaussian(forecast, mu, sig).mean(dim)\n\n\ndef _crpss(forecast, reference, dim='svd', comparison=None):\n \"\"\"\n Continuous Ranked Probability Skill Score is strictly proper.\n\n .. math::\n CRPSS = \\\\frac{CRPS_{clim}-CRPS_{init}}{CRPS_{clim}}\n\n Range:\n * perfect: 1\n * pos: better than climatology forecast\n * neg: worse than climatology forecast\n\n References:\n * Matheson, James E., and Robert L. Winkler. “Scoring Rules for\n Continuous Probability Distributions.” Management Science 22, no. 10\n (June 1, 1976): 1087–96. https://doi.org/10/cwwt4g.\n * Gneiting, Tilmann, and Adrian E Raftery. “Strictly Proper Scoring\n Rules, Prediction, and Estimation.” Journal of the American\n Statistical Association 102, no. 477 (March 1, 2007): 359–78.\n https://doi.org/10/c6758w.\n\n See also:\n * properscoring.crps_ensemble\n \"\"\"\n mu = reference.mean(dim)\n sig = reference.std(dim)\n ref_skill = _crps_gaussian(forecast, mu, sig, dim=dim)\n forecast_skill = _crps(forecast, reference, dim=dim)\n skill_score = (ref_skill - forecast_skill) / ref_skill\n return skill_score\n\n\ndef _less(forecast, reference, dim='svd', comparison=None):\n \"\"\"\n Logarithmic Ensemble Spread Score.\n\n .. math:: LESS = ln(\\\\frac{\\\\sigma^2_f}{\\\\sigma^2_o})\n\n References:\n * Kadow, Christopher, Sebastian Illing, Oliver Kunst, Henning W. Rust,\n Holger Pohlmann, Wolfgang A. Müller, and Ulrich Cubasch. “Evaluation\n of Forecasts by Accuracy and Spread in the MiKlip Decadal Climate\n Prediction System.” Meteorologische Zeitschrift, December 21, 2016,\n 631–43. https://doi.org/10/f9jrhw.\n\n Range:\n * pos: under-disperive\n * neg: over-disperive\n * perfect: 0\n \"\"\"\n if comparison.__name__ != '_m2r':\n raise KeyError(\n 'LESS requires member dimension and therefore '\n \"compute_hindcast(comparison='m2r')\"\n )\n numerator = _mse(forecast, reference, dim='member').mean(dim)\n # not corrected for conditional bias yet\n denominator = _mse(forecast.mean('member'), reference.mean('member'), dim=dim)\n less = np.log(numerator / denominator)\n return less\n\n\ndef _bias(forecast, reference, dim='svd', comparison=None):\n \"\"\"Calculate unconditional bias.\n\n .. math::\n bias = f - o\n\n Range:\n * pos: positive bias\n * neg: negative bias\n * perfect: 0\n\n References:\n * https://www.cawcr.gov.au/projects/verification/\n * https://www-miklip.dkrz.de/about/murcss/\n \"\"\"\n bias = (forecast - reference).mean(dim)\n return bias\n\n\ndef _msss_murphy(forecast, reference, dim='svd', comparison=None):\n \"\"\"Calculate Murphy's Mean Square Skill Score (MSSS).\n\n .. math::\n MSSS_{Murphy} = r_{fo}^2 - [\\\\text{conditional bias}]^2 -\\\n [\\\\frac{\\\\text{(unconditional) bias}}{\\\\sigma_o}]^2\n\n References:\n * https://www-miklip.dkrz.de/about/murcss/\n * Murphy, Allan H. “Skill Scores Based on the Mean Square Error and\n Their Relationships to the Correlation Coefficient.” Monthly Weather\n Review 116, no. 12 (December 1, 1988): 2417–24.\n https://doi.org/10/fc7mxd.\n \"\"\"\n acc = _pearson_r(forecast, reference, dim=dim)\n conditional_bias = _conditional_bias(forecast, reference, dim=dim)\n uncond_bias = _bias(forecast, reference, dim=dim) / reference.std(dim)\n skill = acc ** 2 - conditional_bias ** 2 - uncond_bias ** 2\n return skill\n\n\ndef _conditional_bias(forecast, reference, dim='svd', comparison=None):\n \"\"\"Calculate the conditional bias between forecast and reference.\n\n .. math:: \\\\text{conditional bias} = r_{fo} - \\\\frac{\\\\sigma_f}{\\\\sigma_o}\n\n References:\n * https://www-miklip.dkrz.de/about/murcss/\n \"\"\"\n acc = _pearson_r(forecast, reference, dim=dim)\n conditional_bias = acc - _std_ratio(forecast, reference, dim=dim) ** -1\n return conditional_bias\n\n\ndef _std_ratio(forecast, reference, dim='svd', comparison=None):\n \"\"\"Calculate the ratio of standard deviations of reference over forecast.\n\n .. math:: \\\\text{std ratio} = \\\\frac{\\\\sigma_o}{\\\\sigma_f}\n\n References:\n * https://www-miklip.dkrz.de/about/murcss/\n \"\"\"\n ratio = reference.std(dim) / forecast.std(dim)\n return ratio\n\n\ndef _bias_slope(forecast, reference, dim='svd', comparison=None):\n \"\"\"Calculate bias slope between reference and forecast standard deviations.\n\n .. math:: \\\\text{bias slope}= r_{fo} \\\\cdot \\\\text{std ratio}\n\n References:\n * https://www-miklip.dkrz.de/about/murcss/\n \"\"\"\n std_ratio = _std_ratio(forecast, reference, dim=dim)\n acc = _pearson_r(forecast, reference, dim=dim)\n b_s = std_ratio * acc\n return b_s\n\n\ndef _ppp(forecast, reference, dim='svd', comparison=None):\n \"\"\"Prognostic Potential Predictability (PPP) metric.\n\n .. math:: PPP = 1 - \\\\frac{MSE}{ \\\\sigma_{ref} \\\\cdot fac}\n\n Range:\n * 1: perfect forecast\n * positive: better than climatology forecast\n * negative: worse than climatology forecast\n\n References:\n * Griffies, S. M., and K. Bryan. “A Predictability Study of Simulated\n North Atlantic Multidecadal Variability.” Climate Dynamics 13, no. 7–8\n (August 1, 1997): 459–87. https://doi.org/10/ch4kc4.\n * Pohlmann, Holger, Michael Botzet, Mojib Latif, Andreas Roesch, Martin\n Wild, and Peter Tschuck. “Estimating the Decadal Predictability of a\n Coupled AOGCM.” Journal of Climate 17, no. 22 (November 1, 2004):\n 4463–72. https://doi.org/10/d2qf62.\n * Bushuk, Mitchell, Rym Msadek, Michael Winton, Gabriel Vecchi, Xiaosong\n Yang, Anthony Rosati, and Rich Gudgel. “Regional Arctic Sea–Ice\n Prediction: Potential versus Operational Seasonal Forecast Skill.\n Climate Dynamics, June 9, 2018. https://doi.org/10/gd7hfq.\n \"\"\"\n mse_skill = _mse(forecast, reference, dim=dim)\n var = reference.std(dim)\n fac = _get_norm_factor(comparison)\n ppp_skill = 1 - mse_skill / var / fac\n return ppp_skill\n\n\ndef _nrmse(forecast, reference, dim='svd', comparison=None):\n \"\"\"Normalized Root Mean Square Error (NRMSE) metric.\n\n .. math:: NRMSE = \\\\frac{RMSE}{\\\\sigma_{o} \\\\cdot \\\\sqrt{fac} }\n = \\\\sqrt{ \\\\frac{MSE}{ \\\\sigma^2_{o} \\\\cdot fac} }\n\n Range:\n * 0: perfect forecast\n * 0 - 1: better than climatology forecast\n * > 1: worse than climatology forecast\n\n References:\n * Bushuk, Mitchell, Rym Msadek, Michael Winton, Gabriel Vecchi, Xiaosong\n Yang, Anthony Rosati, and Rich Gudgel. “Regional Arctic Sea–Ice\n Prediction: Potential versus Operational Seasonal Forecast Skill.”\n Climate Dynamics, June 9, 2018. https://doi.org/10/gd7hfq.\n * Hawkins, Ed, Steffen Tietsche, Jonathan J. Day, Nathanael Melia, Keith\n Haines, and Sarah Keeley. “Aspects of Designing and Evaluating\n Seasonal-to-Interannual Arctic Sea-Ice Prediction Systems.” Quarterly\n Journal of the Royal Meteorological Society 142, no. 695\n (January 1, 2016): 672–83. https://doi.org/10/gfb3pn.\n\n \"\"\"\n rmse_skill = _rmse(forecast, reference, dim=dim)\n var = reference.std(dim)\n fac = _get_norm_factor(comparison)\n nrmse_skill = rmse_skill / np.sqrt(var) / np.sqrt(fac)\n return nrmse_skill\n\n\ndef _nmse(forecast, reference, dim='svd', comparison=None):\n \"\"\"\n Calculate Normalized MSE (NMSE) = Normalized Ensemble Variance (NEV).\n\n .. math:: NMSE = NEV = \\\\frac{MSE}{\\\\sigma^2_{o} \\\\cdot fac}\n\n Range:\n * 0: perfect forecast: 0\n * 0 - 1: better than climatology forecast\n * > 1: worse than climatology forecast\n\n References:\n * Griffies, S. M., and K. Bryan. “A Predictability Study of Simulated\n North Atlantic Multidecadal Variability.” Climate Dynamics 13,\n no. 7–8 (August 1, 1997): 459–87. https://doi.org/10/ch4kc4.\n \"\"\"\n mse_skill = _mse(forecast, reference, dim=dim)\n var = reference.std(dim)\n fac = _get_norm_factor(comparison)\n nmse_skill = mse_skill / var / fac\n return nmse_skill\n\n\ndef _nmae(forecast, reference, dim='svd', comparison=None):\n \"\"\"\n Normalized Ensemble Mean Absolute Error metric.\n\n .. math:: NMAE = \\\\frac{MAE}{\\\\sigma^2_{o} \\\\cdot fac}\n\n Range:\n * 0: perfect forecast: 0\n * 0 - 1: better than climatology forecast\n * > 1: worse than climatology forecast\n\n References:\n * Griffies, S. M., and K. Bryan. “A Predictability Study of Simulated\n North Atlantic Multidecadal Variability.” Climate Dynamics 13, no.\n 7–8 (August 1, 1997): 459–87. https://doi.org/10/ch4kc4.\n\n \"\"\"\n mae_skill = _mae(forecast, reference, dim=dim)\n # TODO: check if this is the expected normalization\n var = reference.std(dim)\n fac = _get_norm_factor(comparison)\n nmse_skill = mae_skill / var / fac\n return nmse_skill\n\n\ndef _uacc(forecast, reference, dim='svd', comparison=None):\n \"\"\"\n Calculate Bushuk's unbiased ACC (uACC).\n\n .. math:: uACC = \\\\sqrt{PPP} = \\\\sqrt{MSSS}\n\n Range:\n * 1: perfect\n * 0 - 1: better than climatology\n\n References:\n * Bushuk, Mitchell, Rym Msadek, Michael Winton, Gabriel\n Vecchi, Xiaosong Yang, Anthony Rosati, and Rich Gudgel. “Regional\n Arctic Sea–Ice Prediction: Potential versus Operational Seasonal\n Forecast Skill. Climate Dynamics, June 9, 2018.\n https://doi.org/10/gd7hfq.\n \"\"\"\n return _ppp(forecast, reference, dim=dim, comparison=comparison) ** 0.5\n","sub_path":"climpred/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":13292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"102704623","text":"import pandas as pd\nimport numpy as np\nimport sys\nimport math\n\nfrom typing import Optional\n\nimport xgboost as xgb\n\nfrom sklearn.metrics import log_loss\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler\nfrom sklearn.model_selection import learning_curve, cross_validate\n\nfrom sklearn.neighbors import KNeighborsClassifier\n\n\nclass MyKNN:\n def __init__(self, random_state: int, num_features, cat_features) -> None:\n self.random_state = random_state\n\n self.clf = KNeighborsClassifier(n_jobs=-1)\n\n self.num_features = num_features\n self.cat_features = cat_features\n\n self.missing_level = \"Missing\"\n self.unknown_level = -1\n\n def _create_pipeline(self, X: pd.DataFrame, y: Optional[pd.Series],\n training_or_scoring: str,\n imputation_strategy: str = 'median') -> [pd.DataFrame, pd.Series]:\n\n num_pipeline = Pipeline([\n ('imputer', SimpleImputer(strategy=imputation_strategy, add_indicator=True)),\n ('scaler', StandardScaler()) # Required for RBF kernel\n ])\n\n cat_pipeline = Pipeline([\n ('missing', SimpleImputer(strategy=\"constant\", fill_value=self.missing_level)),\n ('imputer', OneHotEncoder(handle_unknown='ignore'))\n ])\n\n full_pipeline = ColumnTransformer([\n (\"numeric\", num_pipeline, self.num_features),\n (\"cat\", cat_pipeline, self.cat_features)\n ])\n\n if training_or_scoring == 'training':\n self.pipeline = full_pipeline\n X = self.pipeline.fit_transform(X)\n\n elif training_or_scoring == 'scoring':\n X = self.pipeline.transform(X)\n\n else:\n raise ValueError(\"Please specify either 'training' or 'scoring\")\n\n return X, y\n\n def tune_parameters(self, X: pd.DataFrame, y: pd.Series) -> dict:\n \"\"\"Runs k-fold validation to find the best parameters\n\n Note: In general you run tune_parameters on the training set\n and leave the validation set as an out of sample check on\n performance\n\n Note: if the spec were up to me I'd include parameters for num of folds,\n which hyperparams to tune, etc.\n\n :param X: pandas dataframe to be used for training\n :param y: pandas series that contains the targets\n :return: a dictionary containing the best params and the average scores\n \"\"\"\n\n X, _ = self._create_pipeline(X, y, \"training\")\n\n parameters = { \"n_neighbors\": [25],\n \"weights\": ['uniform', 'distance'],\n # \"metric\": ['euclidean', 'manhattan', 'mahalanobis']\n }\n\n self.clf = GridSearchCV(self.clf, parameters,\n scoring='neg_log_loss', n_jobs=-1, verbose=3)\n\n self.clf.fit(X, y)\n\n cv_results = self.clf.cv_results_\n results_df = pd.DataFrame({\"params\": cv_results['params'],\n \"mean_fit_time\": cv_results['mean_fit_time'],\n \"mean_score_time\": cv_results['mean_score_time'],\n \"logloss_rank\": cv_results['rank_test_score'],\n \"logloss_results\": cv_results['mean_test_score'],\n })\n\n\n return self.clf, results_df\n\n def run_learning_curve(self, X, y, parameters):\n\n clf = KNeighborsClassifier(**parameters)\n\n X, _ = self._create_pipeline(X, y, \"training\")\n clf.fit(X,y)\n\n train_sizes, train_scores, valid_scores, \\\n fit_times, score_times = learning_curve(clf, X, y,\n n_jobs=-1, verbose=3, shuffle=True,\n scoring='neg_log_loss',\n random_state=self.random_state,\n return_times=True)\n\n return train_sizes, np.mean(train_scores, axis=1), np.mean(valid_scores, axis=1), \\\n np.mean(fit_times, axis=1), np.mean(score_times, axis=1)\n\n def run_cv(self, X, y, parameters, k):\n\n clf = KNeighborsClassifier(**parameters)\n\n scores = cross_validate(clf, X, y,\n n_jobs=-1, verbose=3,\n scoring='neg_mean_absolute_error',\n cv=k, return_train_score=True)\n\n\n return {k: np.mean(v) for k, v in scores.items()}\n\n","sub_path":"project_1/src/lemons/my_knn.py","file_name":"my_knn.py","file_ext":"py","file_size_in_byte":4742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"304495115","text":"# Mendoza Morelos Martin Mathier\n# Programa para auto-generar código máquina a partir de instrucciones en ensamblador para MIPS\n# 1 instruccion por línea\n# Comentarios usando # al inicio de la linea\n\n# Espaciado entre elementos\nespaciado = \"\"\n# Dividir cada linea en N caracteres\ncorteLinea = 8 #37 #8\n# Cantidad de lineas minimas\nlineasMinimas = 128 #32 #128\n\nclass Main:\n def __init__(self):\n self.leerArchivos()\n pass\n\n def leerArchivos(self):\n ensamblador = open(\"instruccionesEnsamblador.asm\", \"r\")\n codigoMaquina = open(\"instruccionesBinario.bin\",\"w+\")\n i = 0\n for line in ensamblador:\n print(line.rstrip())\n if line.rstrip() != \"\":\n if line.rstrip()[0] != \"#\":\n #Cortar la primer palabra de instrucción y leerla\n binarioInstr = self.leerInstruccion(line)\n print(binarioInstr)\n # Dividir cada linea en N caracteres\n for j in range(0, len(binarioInstr), corteLinea):\n lineaNueva = binarioInstr[j:j+corteLinea]\n codigoMaquina.write(lineaNueva)\n codigoMaquina.write(\"\\n\")\n i = i + 1\n # Completar la memoria de instrucciones con 0\n for k in range(i,lineasMinimas,1):\n lineaVacia = \"0\" * corteLinea\n codigoMaquina.write(lineaVacia)\n #Evitar escribir más de las lineas minimas\n if k != lineasMinimas-1:\n codigoMaquina.write(\"\\n\")\n ensamblador.close()\n codigoMaquina.close()\n\n def leerInstruccion(self,linea):\n instruccion = linea.split()[0]\n op = \"\"\n instCode = \"\"\n shamt = \"\"\n if instruccion.upper() == \"ADD\":\n op = \"000000\"\n instCode = \"100000\"\n shamt = \"00000\"\n parametros = self.leerParametros(op, linea)\n return op + espaciado + parametros + shamt + espaciado + instCode\n elif instruccion.upper() == \"SUB\":\n op = \"000000\"\n instCode = \"100010\"\n shamt = \"00000\"\n parametros = self.leerParametros(op, linea)\n return op + espaciado + parametros + shamt + espaciado + instCode\n elif instruccion.upper() == \"MUL\":\n op = \"000000\"\n instCode = \"000010\"\n shamt = \"00000\"\n parametros = self.leerParametros(op, linea)\n return op + espaciado + parametros + shamt + espaciado + instCode\n elif instruccion.upper() == \"DIV\":\n op = \"000000\"\n instCode = \"011010\"\n shamt = \"00000\"\n parametros = self.leerParametros(op, linea)\n return op + espaciado + parametros + shamt + espaciado + instCode\n elif instruccion.upper() == \"AND\":\n op = \"000000\"\n instCode = \"100100\"\n shamt = \"00000\"\n parametros = self.leerParametros(op, linea)\n return op + espaciado + parametros + shamt + espaciado + instCode\n elif instruccion.upper() == \"OR\":\n op = \"000000\"\n instCode = \"100101\"\n shamt = \"00000\"\n parametros = self.leerParametros(op, linea)\n return op + espaciado + parametros + shamt + espaciado + instCode\n elif instruccion.upper() == \"NOR\":\n op = \"000000\"\n instCode = \"100111\"\n shamt = \"00000\"\n parametros = self.leerParametros(op, linea)\n return op + espaciado + parametros + shamt + espaciado + instCode\n elif instruccion.upper() == \"SLL\":\n op = \"000000\"\n instCode = \"000000\"\n shamt = \"00000\"\n parametros = self.leerParametros(op, linea)\n return op + espaciado + shamt + espaciado + parametros + instCode\n elif instruccion.upper() == \"SRL\":\n op = \"000000\"\n instCode = \"000011\"\n shamt = \"00000\"\n parametros = self.leerParametros(op, linea)\n return op + espaciado + shamt + espaciado + parametros + instCode\n elif instruccion.upper() == \"SLT\":\n op = \"000000\"\n instCode = \"101010\"\n shamt = \"00000\"\n parametros = self.leerParametros(op, linea)\n return op + espaciado + parametros + shamt + espaciado + instCode\n elif instruccion.upper() == \"XOR\":\n op = \"000000\"\n instCode = \"100110\"\n shamt = \"00000\"\n parametros = self.leerParametros(op, linea)\n return op + espaciado + parametros + shamt + espaciado + instCode\n\n def leerParametros(self, op, linea):\n parametros = \"\"\n if op == \"000000\":\n # Cortar parametros de la instruccion\n for palabra in linea.split():\n if palabra[0] == \"$\":\n palabra = palabra[1:] # Cortar el signo $\n if palabra[-1] == \",\":\n palabra = palabra[:-1] # Cortar el signo ,\n parametros = parametros + f\"{int(palabra):05b}\" + espaciado\n return parametros\n\n# Primera función que se ejecuta\n# Aqui se hacen todas las llamadas a las clases\nAplicacion = Main()\n","sub_path":"compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":5281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"458642951","text":"import nose.tools as ntools\nimport unittest\n\nfrom smqtk.representation.descriptor_element.local_elements import DescriptorFileElement\n\n\n__author__ = \"paul.tunison@kitware.com\"\n\n\nclass TestDescriptorFileElement (unittest.TestCase):\n\n def test_configuration(self):\n default_config = DescriptorFileElement.get_default_config()\n ntools.assert_equal(default_config,\n {\n 'save_dir': None,\n 'subdir_split': None,\n })\n\n default_config['save_dir'] = '/some/path/somewhere'\n default_config['subdir_split'] = 4\n\n inst1 = DescriptorFileElement.from_config(default_config,\n 'test', 'abcd')\n ntools.assert_equal(default_config, inst1.get_config())\n ntools.assert_equal(inst1._save_dir, '/some/path/somewhere')\n ntools.assert_equal(inst1._subdir_split, 4)\n\n # vector-based equality\n inst2 = DescriptorFileElement.from_config(inst1.get_config(),\n 'test', 'abcd')\n ntools.assert_equal(inst1, inst2)\n","sub_path":"python/smqtk/tests/representation/DescriptorElement/test_DescriptorFileElement.py","file_name":"test_DescriptorFileElement.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"557795978","text":"from behave import given, when, then\r\n\r\nfrom algoritimo import recebe\r\n\r\n@given('tres numeros (4, 3, 5)')\r\ndef step_recebe_numeros(context):\r\n context.A = 4\r\n context.B = 3\r\n context.C = 5\r\n\r\n\r\n@when('quero saber qual e o do meio')\r\ndef step_organiza_numeros(context):\r\n A = context.A\r\n B = context.B\r\n C = context.C\r\n context.result = recebe(A, B, C)\r\n\r\n\r\n@then('o resultado e 4.')\r\ndef step_retorna_numero_meio(context):\r\n assert context.result == 4\r\n","sub_path":"aula_3/numero_meio/features/steps/meio.py","file_name":"meio.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"424170974","text":"import RNN_arch as r\nimport base as b\nimport dynet as dy\nimport numpy as np\n\n# dynet Params\n# dyparams = dy.DynetParams()\n# dyparams.set_mem(2056)\n# dyparams.set_requested_gpus(1)\n# dyparams.init()\n\npath_to_features = \"/media/mcapizzi/data/Datasets/Segmentation/dataset_swbd_with_concat/swbd_with_concat_features-rs1-train.csv\"\npath_to_tags = \"/media/mcapizzi/data/Datasets/Segmentation/dataset_swbd_with_concat/swbd_with_concat_tags-rs1-train.csv\"\npath_to_labels = \"/media/mcapizzi/data/Datasets/Segmentation/dataset_swbd_with_concat/swbd_with_concat_labels-rs1-train.csv\"\n\npath_to_test_features = \"/media/mcapizzi/data/Datasets/Segmentation/dataset_swbd_with_concat/swbd_with_concat_features-rs1-dev.csv\"\npath_to_test_tags = \"/media/mcapizzi/data/Datasets/Segmentation/dataset_swbd_with_concat/swbd_with_concat_tags-rs1-dev.csv\"\npath_to_test_labels = \"/media/mcapizzi/data/Datasets/Segmentation/dataset_swbd_with_concat/swbd_with_concat_labels-rs1-dev.csv\"\n\n## STEP 1: build class\n\nrnn = r.RNN(\n num_labels=2,\n # lookup_names=[\"words\", \"tags\", \"whatever\"],\n lookup_names=[\"words\", \"tags\"],\n # lookup_names=[\"words\"],\n # update_embeddings=[True, True, True],\n update_embeddings=[True, True],\n # update_embeddings=[True],\n # embedding_sizes=[300, 10, 5],\n # embedding_sizes=[5, 3, 2],\n # embedding_sizes=[300],\n # embedding_sizes=[5, 3],\n embedding_sizes=[300, 20],\n # embedding_sizes=[5],\n dropout_rate=.4,\n # word_dropout_rate=.4,\n bi_directional=False,\n RNN_type=\"LSTM\",\n hidden_size=250\n)\n\n## STEP 2: import data\n### this imports data *and* builds vocabulary (if second argument is `True`)\nfeatures, features_voc = rnn.process_raw_data(path_to_features, True)\ntags, tags_voc = rnn.process_raw_data(path_to_tags, True)\nlabels, _ = rnn.process_raw_data(path_to_labels, False)\n\ntest_features, _ = rnn.process_raw_data(path_to_test_features, False)\ntest_tags, _ = rnn.process_raw_data(path_to_test_tags, False)\ntest_labels, _ = rnn.process_raw_data(path_to_test_labels, False)\n\n## STEP 3: build lookups\n### for each element in rnn.lookup_names\n\n# IF LOADING PRETRAINED\nrnn.lookup_info[\"words\"][\"w2i\"], rnn.lookup_info[\"words\"][\"i2pw\"] = \\\n rnn.build_pretrained_lookup(\n # vector_file_path=\"/Users/mcapizzi/Desktop/vectors_Goldberg_sample.txt\",\n # vector_file_path=\"/media/mcapizzi/data/Datasets/Segmentation/vectors_Goldberg_sample.txt\",\n vector_file_path=\"/media/mcapizzi/data/Datasets/Segmentation/vectors_Goldberg.txt\",\n vocabulary=features_voc\n )\n\n# IF JUST INITIALIZING\n# # build for \"words\"\n# rnn.lookup_info[\"words\"][\"w2i\"] = rnn.build_vocabulary_lookup(vocabulary=features_voc)\n# # build for \"tags\"\nrnn.lookup_info[\"tags\"][\"w2i\"] = rnn.build_vocabulary_lookup(vocabulary=tags_voc)\n\n# # build for \"whatever\"\n# rnn.lookup_info[\"whatever\"][\"w2i\"] = rnn.build_vocabulary_lookup(\n# vocabulary=[\"99\", \"88\", \"77\", \"55\"],\n# keep=2\n# )\n\n## STEP 3: initialize parameters\n\nrnn.initialize_parameters()\n\n\n## STEP 4: set up model\nm = dy.Model()\n# trainer_ = dy.RMSPropTrainer(m)\ntrainer_ = dy.SimpleSGDTrainer(m)\n\n\n## STEP 5: train\nEPOCHS = 30\nprint(\"train\")\n# for every epoch\nfor e in range(EPOCHS):\n e_loss = b.train_seq_one_epoch(\n train_X={\"words\": features, \"tags\": tags},\n train_y=labels,\n forward_graph=rnn.forward,\n trainer=trainer_,\n loss_method=b.calculate_batch_RNN_loss,\n reporting_method=b.training_reporting,\n reporting_increment=1000,\n total_loss=0,\n shuffle=True,\n minibatch_size=200\n )\n print(\"epoch {} complete: {} loss\".format(e + 1, e_loss))\n\n## STEP 6: predict\nprint(\"predict\")\n### single prediction\npredictions = rnn.forward(\n X=zip([features[3][0]], [tags[3][0]]),\n predict=True\\\n )\nprint([p for p in predictions])\n\n### over test set\nprint(\"test\")\n\n# build a features dict for input\nfeatures_dict = {\"words\": test_features, \"tags\": test_tags}\n\n# run through\nall_gold, all_preds = b.test_seq(\n test_X=features_dict,\n test_y=test_labels,\n forward_graph=rnn.forward\n)\n\n","sub_path":"src/estimator_dynet/sample_RNN_script.py","file_name":"sample_RNN_script.py","file_ext":"py","file_size_in_byte":4076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"637800512","text":"# Standard\nfrom contextvars import Context\nimport typing as T\n\n# Project\nfrom ._types import Listeners, ListenerCb, ListenerOpts\nfrom ._context import CONTEXT, context as emitter_context\nfrom ._helpers import retrieve_listeners_from_namespace\n\nK = T.TypeVar(\"K\")\n\n\ndef _remove_context_listener(\n context: emitter_context,\n listener: ListenerCb[K],\n listeners: T.MutableMapping[ListenerCb[T.Any], T.Tuple[ListenerOpts, Context]],\n) -> bool:\n if listener not in listeners:\n return False\n\n _, ctx = listeners[listener]\n if ctx[CONTEXT] not in context:\n return False\n\n del listeners[listener]\n return True\n\n\ndef _remove_context_listeners(\n context: emitter_context,\n listeners: T.MutableMapping[ListenerCb[T.Any], T.Tuple[ListenerOpts, Context]],\n) -> bool:\n removed = False\n\n for (listener, (_, ctx)) in tuple(listeners.items()):\n if ctx[CONTEXT] in context:\n removed = True\n del listeners[listener]\n\n return removed\n\n\ndef _remove_all_context_listeners(context: emitter_context, listeners: Listeners) -> bool:\n removed = False\n for _, event_listeners in listeners.types.items():\n removed = _remove_context_listeners(context, event_listeners) or removed\n\n for _, scoped_listeners in listeners.scope.items():\n removed = _remove_context_listeners(context, scoped_listeners) or removed\n\n return removed\n\n\ndef _remove_scoped_context_listener(\n scope: T.Tuple[str, ...],\n context: emitter_context,\n listener: ListenerCb[K],\n listeners: T.MutableMapping[\n T.Tuple[str, ...], T.MutableMapping[ListenerCb[T.Any], T.Tuple[ListenerOpts, Context]],\n ],\n) -> bool:\n removed = False\n for step in range(len(scope), 0, -1):\n removed = (\n _remove_context_listener(context, listener, listeners[scope[: (step + 1)]]) or removed\n )\n\n return removed\n\n\ndef _remove_all_scoped_context_listener(\n scope: T.Tuple[str, ...],\n context: emitter_context,\n listeners: T.MutableMapping[\n T.Tuple[str, ...], T.MutableMapping[ListenerCb[T.Any], T.Tuple[ListenerOpts, Context]],\n ],\n) -> bool:\n removed = False\n for listener_scope, scoped_listeners in listeners.items():\n if scope > listener_scope:\n continue\n\n removed = _remove_context_listeners(context, scoped_listeners) or removed\n\n return removed\n\n\ndef remove(\n event: T.Union[str, None, T.Type[K]],\n namespace: object,\n listener: T.Optional[ListenerCb[K]] = None,\n context: T.Optional[emitter_context] = None,\n) -> bool:\n \"\"\"Remove listeners, limited by scope, from given event type.\n\n When no context is provided assumes current context.\n\n When no event_type and no listener are passed removes all listeners from the given namespace\n and context.\n\n When no event_type is specified but a listener is given removes all references to the listener,\n whetever scoped or typed, from the given namespace and context.\n\n When both event and listener are specified, remove only the correspondent match from the given\n namespace and context.\n\n Raises:\n\n ValueError: event_type is None, but scope or listener are not.\n\n Args:\n\n event: Define from which event types the listeners will be removed.\n\n listener: Define the listener to be removed.\n\n namespace: Define from which namespace to remove the listener\n\n context: Define context to restrict listener removal\n\n Returns:\n\n Boolean indicating whether any listener removal occurred.\n\n \"\"\"\n listeners = retrieve_listeners_from_namespace(namespace)\n\n if context is None:\n context = listeners.context or CONTEXT.get()\n\n if event is None:\n if listener is None:\n return _remove_all_context_listeners(context, listeners)\n\n removed = False\n for scoped_listeners in listeners.scope.values():\n removed = _remove_context_listener(context, listener, scoped_listeners) or removed\n\n for typed_listeners in listeners.types.values():\n removed = _remove_context_listener(context, listener, typed_listeners) or removed\n\n return removed\n elif isinstance(event, str):\n if event == \"\":\n raise ValueError(\"Event scope must be a valid string\")\n\n scope = tuple(event.split(\".\"))\n return (\n _remove_all_scoped_context_listener(scope, context, listeners.scope)\n if listener is None\n else _remove_scoped_context_listener(scope, context, listener, listeners.scope)\n )\n elif event in listeners.types:\n typed_listeners = listeners.types[event]\n return (\n _remove_context_listeners(context, typed_listeners)\n if listener is None\n else _remove_context_listener(context, listener, typed_listeners)\n )\n return False\n","sub_path":"emitter/_remove.py","file_name":"_remove.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"576985893","text":"from apicem import * # APIC-EM IP is assigned in apicem_config.py\n\ntry:\n resp = get(api=\"policy/tag/count\")\n response_json = resp.json()\n count = response_json[\"response\"] # policy tags\n if count == 0 :\n print (\"No policy tag found\")\n sys.exit()\nexcept:\n print (\"Something wrong with getting policy tag count!\")\n sys.exit()\n\ntry:\n resp = get(api=\"policy/tag/association\")\n response_json = resp.json()\n tag = response_json[\"response\"] # policy tag association\nexcept:\n print (\"Something wrong with GET policy/tag/association!\")\n sys.exit()\n\ntag_list = []\n\ni=0\nfor item in tag:\n if \"policyTag\" in item:\n if item[\"networkDevices\"] == []:\n i+=1\n tag_list.append([i,item[\"policyTag\"],\"\",\"\"])\n else:\n for item1 in item[\"networkDevices\"]:\n i+=1\n tag_list.append([i,item[\"policyTag\"],item1[\"deviceName\"],item1[\"deviceIp\"]])\n\n \nprint (\"*** If policy tag is associated with network device, it cannot be deleted ***\\n\")\nprint (\"---------------- Select one with no network device attached -----------------\\n\") \nprint (tabulate(tag_list, headers=['Number','Policy Tag associated with','Device Name','Device IP'],tablefmt=\"rst\"),'\\n')\n\n# Ask user's input \n# In the loop until tag is selected or user select 'exit'\ntag_to_delet=\"\"\ntag_idx = 1\nwhile True:\n tag_num = input('=> Enter a number from above to delete policy tag: ')\n tag_num = tag_num.replace(\" \",\"\") # ignore space\n if tag_num.lower() == 'exit': \n sys.exit()\n if tag_num.isdigit():\n if int(tag_num) in range(1,len(tag_list)+1):\n tag_to_delet=tag_list[int(tag_num)-1][tag_idx] # 1 is the position of policy tag\n break\n else:\n print (\"Oops! number is out of range, please try again or enter 'exit'\")\n else:\n print (\"Oops! input is not a digit, please try again or enter 'exit'\")\n# End of while loop\n\nif tag_to_delet==\"\":\n print (\"For some reason, tag name is NULL!\")\n sys.exit()\n\n#### Delete ####\n\nparams={'policyTag':tag_to_delet}\ntry:\n resp= delete(api=\"policy/tag/\",params=params)\n print (\"status: \",resp.status_code)\n print (\"Response:\",json.dumps(resp.json(),indent=4))\nexcept:\n print (\"Something wrong with deleting policy/tag\")\n sys.exit() \n \n","sub_path":"lab5-4-delete-policy-tag.py","file_name":"lab5-4-delete-policy-tag.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"216535718","text":"import os\nimport sys\nfrom PySide2.QtCore import Qt\nfrom PySide2.QtGui import QGuiApplication\nfrom PySide2.QtWidgets import QAction, QApplication, QGridLayout, QMainWindow, QMenu, QSplitter, QWidget\nfrom CheckUpdateDialog import CheckUpdateDialog\nfrom CourseTreeWidget import CourseTreeWidget\nfrom DownloadDirTreeWidget import DownloadDirTreeWidget\nfrom ExceptionHook import UncaughtHook\nfrom FileDownloaderWidget import FileDownloaderWidget\nfrom FileListWidget import FileListWidget\nfrom Settings import Settings\nfrom UserLoginDialog import UserLoginDialog\n\n\nclass ABookDownloaderMainWindow(QMainWindow):\n\n def __init__(self, path, settings, session):\n QMainWindow.__init__(self)\n self.settings = settings\n self.course_tree_widget = CourseTreeWidget(path, settings, session)\n self.file_list_widget = FileListWidget()\n self.download_dir_tree_widget = DownloadDirTreeWidget(settings['download_path'])\n self.file_downloader = FileDownloaderWidget()\n self.vSplitter = QSplitter(Qt.Vertical)\n self.hSplitter1 = QSplitter(Qt.Horizontal)\n self.hSplitter2 = QSplitter(Qt.Horizontal)\n self.hSplitter1.addWidget(self.course_tree_widget)\n self.hSplitter1.addWidget(self.file_list_widget)\n self.hSplitter2.addWidget(self.download_dir_tree_widget)\n self.hSplitter2.addWidget(self.file_downloader)\n self.vSplitter.addWidget(self.hSplitter1)\n self.vSplitter.addWidget(self.hSplitter2)\n self.maxWidth = QGuiApplication.primaryScreen().size().width()\n self.maxHeight = QGuiApplication.primaryScreen().size().height()\n self.hSplitter1.setSizes([self.maxWidth, self.maxWidth])\n self.hSplitter2.setSizes([self.maxWidth, self.maxWidth])\n self.vSplitter.setSizes([self.maxHeight, self.maxHeight])\n mainWidget = QWidget(self)\n mainLayout = QGridLayout()\n mainWidget.setLayout(mainLayout)\n mainLayout.addWidget(self.vSplitter)\n self.course_tree_widget.signal.clearFileListWidget.connect(self.file_list_widget.clear)\n self.course_tree_widget.signal.appendRowFileListWidget.connect(self.file_list_widget.appendRow)\n self.course_tree_widget.signal.addDownloadTask.connect(self.file_downloader.addDownloadTask)\n self.setCentralWidget(mainWidget)\n self.init_menubar()\n self.setWindowTitle(\"ABookDownloader Dev\")\n self.showMaximized()\n\n def init_menubar(self):\n exitAction = QAction('Exit', self)\n exitAction.setShortcut('Alt+F4')\n exitAction.setStatusTip('Quit')\n exitAction.triggered.connect(self.close)\n\n aboutAction = QAction('About', self)\n aboutAction.setStatusTip('About')\n\n updateAction = QAction('Check Updates', self)\n updateAction.setStatusTip('Check Update')\n updateAction.triggered.connect(self.checkUpdate)\n\n aboutQtAction = QAction(\"About Qt\", self)\n aboutQtAction.triggered.connect(QApplication.aboutQt)\n\n debugAction = QAction('Debug', self)\n debugAction.triggered.connect(self.debug)\n\n maximizeCourseWindow = QAction('Maximize Course Window', self)\n maximizeCourseWindow.triggered.connect(self.maximizeCourse)\n maximizeCourseWindow.setShortcut('Alt+D')\n maximizeResourceWindow = QAction('Maximize Resource Window', self)\n maximizeResourceWindow.triggered.connect(self.maximizeResource)\n maximizeResourceWindow.setShortcut('Alt+F')\n maximizeLocalFilesWindow = QAction('Maximize Local Files Window', self)\n maximizeLocalFilesWindow.triggered.connect(self.maximizeLocalFiles)\n maximizeLocalFilesWindow.setShortcut('Alt+C')\n maximizeDownloaderWindow = QAction('Maximize Downloader Window', self)\n maximizeDownloaderWindow.triggered.connect(self.maximizeDownloader)\n maximizeDownloaderWindow.setShortcut('Alt+V')\n resetWindow = QAction('Reset Window Layout', self)\n resetWindow.triggered.connect(self.resetWindow)\n resetWindow.setShortcut('Alt+R')\n\n self.menuBar().setNativeMenuBar(True)\n fileMenu = QMenu('About')\n fileMenu.addAction(exitAction)\n fileMenu.addAction(aboutQtAction)\n fileMenu.addAction(updateAction)\n fileMenu.addAction(debugAction)\n\n windowMenu = QMenu('Window')\n windowMenu.addAction(maximizeCourseWindow)\n windowMenu.addAction(maximizeResourceWindow)\n windowMenu.addAction(maximizeLocalFilesWindow)\n windowMenu.addAction(maximizeDownloaderWindow)\n windowMenu.addAction(resetWindow)\n\n self.menuBar().addMenu(fileMenu)\n self.menuBar().addMenu(windowMenu)\n\n def maximizeCourse(self):\n self.hSplitter1.setSizes([self.maxWidth, 0])\n self.vSplitter.setSizes([self.maxHeight, 0])\n\n def maximizeResource(self):\n self.hSplitter1.setSizes([0, self.maxWidth])\n self.vSplitter.setSizes([self.maxHeight, 0])\n\n def maximizeLocalFiles(self):\n self.hSplitter2.setSizes([self.maxWidth, 0])\n self.vSplitter.setSizes([0, self.maxHeight])\n\n def maximizeDownloader(self):\n self.hSplitter2.setSizes([0, self.maxWidth])\n self.vSplitter.setSizes([0, self.maxHeight])\n\n def resetWindow(self):\n self.hSplitter1.setSizes([self.maxWidth, self.maxWidth])\n self.hSplitter2.setSizes([self.maxWidth, self.maxWidth])\n self.vSplitter.setSizes([self.maxHeight, self.maxHeight])\n\n def checkUpdate(self):\n checkUpdateDialog = CheckUpdateDialog(self.settings)\n checkUpdateDialog.exec_()\n\n def debug(self):\n raise SystemError('An debug error')\n\n\ndef init():\n dirList = ['./Downloads', './temp', './temp/jsonCache', './temp/picCache']\n for dir in dirList:\n os.makedirs(dir, exist_ok=True)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n\n # Basic init\n init()\n\n exceptionHook = UncaughtHook()\n\n # Load settings\n settings = Settings('./temp/settings.json')\n\n # Set QSS\n if settings['stylesheet_path'] is not None:\n path = settings['stylesheet_path']\n with open(path, 'r', encoding='utf-8') as file:\n app.setStyleSheet(file.read())\n\n # User login\n user = UserLoginDialog(settings)\n if settings['debug'] is False:\n user.exec_()\n if user.loginStatus is False:\n exit(0)\n\n # Main window\n abook = ABookDownloaderMainWindow('./temp', settings, user)\n abook.show()\n if settings['debug'] is True:\n abook.course_tree_widget.importCourseButton.setDisabled(True)\n\n sys.exit(app.exec_())\n","sub_path":"src/ABookDownloader.py","file_name":"ABookDownloader.py","file_ext":"py","file_size_in_byte":6601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"374135169","text":"from struct import *\nfrom logger import Logger\nfrom prefs import Prefs\nfrom web import Webpage\nfrom stats import Stats\nimport socket\nimport binascii\nimport time\nimport sys\n\n\nclass LegionsClient:\n\n\tdef __init__(self):\n\t\tself.ip_list = []\n\t\tself.server_info = {}\n\n\t\t#Structs\n\t\tself.reply_struct \t\t= Struct(' 0:\n\t\t\t\tfor i in range(packet_count-1):\n\t\t\t\t\tdata.append(req_sock.recv(4096))\n\t\t\t\t\tprint(\"MSG: \", binascii.hexlify(data[-1]))\n\t\t\treq_sock.close()\n\t\t\treturn data\n\n\t\texcept socket.timeout:\n\t\t\tmessage = \"ERR: Master server timed out, will retry...\"\n\t\t\tprint(message)\n\t\t\ttime.sleep(1)\n\t\t\treturn False\n\t\texcept socket.gaierror:\n\t\t\tmessage = \"ERR: Master server DNS fail, will retry...\"\n\t\t\tprint(message)\n\t\t\ttime.sleep(1)\n\t\t\treturn False\n\t\texcept ConnectionRefusedError:\n\t\t\tmessage = \"ERR: Master server refused the connection, will retry...\"\n\t\t\tprint(message)\n\t\t\ttime.sleep(1)\n\n\tdef parse_master(self, data):\n\t\toffset = 10\n\t\tserver_data = []\n\n\t\tfor packet in data:\n\t\t\tservers_in_packet = self.reply_struct.unpack_from(packet)[-1]\n\t\t\tfor servers in range(servers_in_packet):\n\t\t\t\tserver_data.append(self.server_struct.unpack_from(packet, offset = offset))\n\t\t\t\toffset += self.server_struct.size\n\n\t\tfor server in server_data:\n\t\t\tself.ip_list.append((\".\".join(str(i) for i in server[0]),server[1]))\n\n\tdef query_master(self):\n\t\tpacket = self.pack_master()\n\t\tmaster_data = self.send_master(packet)\n\t\tif master_data:\n\t\t\tself.parse_master(master_data)\n\t\telse:\n\t\t\tself.retry = True\n\n\n\tdef pack_single(self, header):\n\t\tflags = 2\n\t\tsession = 0\n\t\treturn self.info_struct_req.pack(header, flags, session)\n\n\tdef send_single(self, host, packet):\n\t\tdata = None\n\t\ttry:\n\t\t\treq_sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n\t\t\treq_sock.connect(host)\n\t\t\treq_sock.send(packet)\n\t\t\treq_sock.settimeout(4)\n\t\t\tdata = req_sock.recv(4096)\n\t\texcept socket.timeout:\n\t\t\tprint(\"MSG: {0} timed out...\".format(host))\n\t\texcept socket.gaierror:\n\t\t\tprint(\"MSG: {0} DNS error...\".format(host))\n\t\texcept ConnectionRefusedError:\n\t\t\tprint(\"MSG: {0} refused the connection...\".format(host))\n\t\treq_sock.close()\n\t\treturn data\n\n\tdef parse_single(self, data):\n\t\toffset = 1\n\t\tif self.byte_struct.unpack_from(data)[0] == self.game_info_resp:\n\t\t\tflags, key = Struct(\" int:\n # base condition\n if len(height) < 2:\n return []\n left = 0\n right = len(height) - 1\n maxi = 0\n # 2 pointer approcah\n while left < right:\n mini = min(height[left], height[right])\n maxi = max(maxi, mini * (right - left))\n if height[left] < height[right]:\n left += 1\n else:\n right -= 1\n return maxi\n\n # time - O(n^2)\n # space - o(1)\n\n# ans = 0\n# for i in range(len(height)):\n# for j in range(1,len(height)):\n# mini = min(height[i], height[j])\n# ans = max(ans, mini * (j-i))\n# return ans\n","sub_path":"Container_with_most_water.py","file_name":"Container_with_most_water.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"30602607","text":"from time import time\n\ndef select_sort(origin_items, comp=lambda x, y: x < y):\n \"\"\"简单选择排序\"\"\"\n items = origin_items[:]\n for i in range(len(items) - 1):\n min_index = 1\n for j in range(i+1 , len(items)):\n if comp(items[j], items[min_index]):\n min_index = j\n items[i] , items[min_index] = items[min_index], items[i]\n return items\n\ndef main():\n start = time()\n list = [1,20,5,2342,61,301,486,184,283,12,593,21,1234,1234,4576,4567,456,245,7,3456,2,1345,34,124,124]\n comp_list = select_sort(list)\n end = time()\n print(comp_list)\n print('处理时间:%.5f' %(end - start))\n\nif __name__ == '__main__':\n main()","sub_path":"Day16-20/select_sort.py","file_name":"select_sort.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"300613994","text":"import collections\nimport itertools\n\n\nclass Solution(object):\n def largestTimeFromDigits(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: str\n \"\"\"\n largest = None\n for h1, h2, m1, m2 in itertools.permutations(A):\n h = h1 * 10 + h2\n m = m1 * 10 + m2\n if h > 23 or m > 59:\n continue\n time = h1 * 1000 + h2 * 100 + m1 * 10 + m2\n largest = largest if largest and largest > time else time\n if largest is None:\n return \"\"\n res = str(largest)\n while len(res) < 4:\n res = \"0\" + res\n return res[0:2] + \":\" + res[2:]\n","sub_path":"src2/largest-time-for-given-digits/s.py","file_name":"s.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"90234731","text":"# =============================================================================\n# Casanova Enricher\n# =============================================================================\n#\n# A CSV reader/writer combo that can be used to read an input CSV file and\n# easily ouput a similar CSV file while editing, adding and filtering cell_count.\n#\nimport os\nimport csv\nfrom threading import Lock\n\nfrom casanova.contiguous_range_set import ContiguousRangeSet\nfrom casanova.exceptions import (\n NotResumableError,\n ResumeError,\n MissingColumnError,\n CorruptedIndexColumn\n)\nfrom casanova.reader import (\n CasanovaReader,\n get_column_index,\n collect_column_indices\n)\nfrom casanova.utils import (\n is_resumable_buffer,\n is_empty_buffer,\n is_mute_buffer\n)\n\n\ndef make_enricher(name, namespace, Reader):\n\n class AbstractCasanovaEnricher(Reader):\n __name__ = name\n\n def __init__(self, input_file, output_file, no_headers=False,\n resumable=False, auto_resume=True, keep=None, add=None,\n listener=None, prepend=None):\n\n # Inheritance\n super().__init__(\n input_file,\n no_headers=no_headers\n )\n\n # Sanity tests\n if resumable and not is_resumable_buffer(output_file):\n raise NotResumableError('%s: expecting an \"a+\" or \"a+b\" buffer.' % namespace)\n\n self.output_file = output_file\n self.writer = csv.writer(output_file)\n self.keep_indices = None\n self.output_fieldnames = self.fieldnames\n self.added_count = 0\n self.padding = None\n\n self.resumable = resumable\n self.should_resume = False\n self.already_done_count = 0\n\n self.listener = listener\n\n if keep is not None:\n self.keep_indices = collect_column_indices(self.pos, keep)\n self.output_fieldnames = self.filterrow(self.output_fieldnames)\n\n if add is not None:\n self.output_fieldnames += add\n self.added_count = len(add)\n self.padding = [''] * self.added_count\n\n if prepend is not None:\n self.output_fieldnames = prepend + self.output_fieldnames\n\n # Need to write headers?\n output_buffer_is_empty = is_mute_buffer(output_file) or is_empty_buffer(output_file)\n\n if not no_headers:\n\n if not resumable or output_buffer_is_empty:\n self.writeheader()\n\n # Resuming\n if resumable and not output_buffer_is_empty:\n self.should_resume = True\n\n if auto_resume:\n self.resume()\n\n def __repr__(self):\n columns_info = ' '.join('%s=%s' % t for t in zip(self.pos._fields, self.pos))\n\n return '<%s%s%s %s>' % (\n namespace,\n ' resumable' if self.resumable else '',\n ' unordered' if getattr(self, 'unordered', False) else '',\n columns_info\n )\n\n def resume(self):\n\n if not self.should_resume:\n return\n\n self.should_resume = False\n\n # Rolling back to beginning of file\n output_file = self.output_file\n\n if self.binary:\n output_file = open(output_file.name, 'rb')\n else:\n output_file.seek(0, os.SEEK_SET)\n\n reader = Reader(output_file, no_headers=self.fieldnames is None)\n\n should_emit = callable(self.listener)\n\n if should_emit:\n self.listener('resume.start', None)\n\n for row in reader:\n self.already_done_count += 1\n\n if should_emit:\n self.listener('resume.output', row)\n\n if self.binary:\n output_file.close()\n\n i = 0\n\n while i < self.already_done_count:\n try:\n row = next(self.reader)\n\n if should_emit:\n self.listener('resume.input', row)\n\n i += 1\n except StopIteration:\n raise ResumeError('%s.resume: output has more lines than input.' % namespace)\n\n def filterrow(self, row):\n if self.keep_indices is not None:\n row = [row[i] for i in self.keep_indices]\n\n return row\n\n def formatrow(self, row, add=None, index=None):\n\n # Additions\n if self.added_count > 0:\n if add is None:\n add = self.padding\n else:\n assert len(add) == self.added_count, '%s.writerow: expected %i additional cells but got %i.' % (namespace, self.added_count, len(add))\n\n row = self.filterrow(row) + add\n\n # No additions\n else:\n assert add is None, '%s.writerow: expected no additions.' % namespace\n\n row = self.filterrow(row)\n\n if index is not None:\n row = [index] + row\n\n return row\n\n def writeheader(self):\n self.writer.writerow(self.output_fieldnames)\n\n def writerow(self, row, add=None):\n self.writer.writerow(self.formatrow(row, add))\n\n class AbstractThreadsafeCasanovaEnricher(AbstractCasanovaEnricher):\n __name__ = 'Threadsafe' + name\n\n def __init__(self, input_file, output_file, no_headers=False,\n resumable=False, auto_resume=True, keep=None, add=None,\n listener=None, index_column='index'):\n\n self.index_column = index_column\n self.event_lock = Lock()\n self.already_done = ContiguousRangeSet()\n\n # Inheritance\n super().__init__(\n input_file,\n output_file,\n no_headers=no_headers,\n resumable=resumable,\n keep=keep,\n add=add,\n listener=listener,\n prepend=[index_column]\n )\n\n def __iter__(self):\n iterator = enumerate(super().__iter__())\n should_emit = callable(self.listener)\n\n for index, row in iterator:\n if self.already_done.stateful_contains(index):\n if should_emit:\n with self.event_lock:\n self.listener('resume.input', row)\n\n continue\n\n yield index, row\n\n def resume(self):\n\n # Rolling back to beginning of file\n output_file = self.output_file\n\n if self.binary:\n output_file = open(output_file.name, 'rb')\n else:\n output_file.seek(0, os.SEEK_SET)\n\n reader = Reader(output_file, no_headers=self.fieldnames is None)\n\n should_emit = callable(self.listener)\n\n i = get_column_index(reader.pos, self.index_column)\n\n if i is None:\n raise MissingColumnError(self.index_column)\n\n for row in reader:\n try:\n current_index = int(row[i])\n except ValueError:\n raise CorruptedIndexColumn\n\n self.already_done.add(current_index)\n\n if should_emit:\n with self.event_lock:\n self.listener('resume.output', row)\n\n self.already_done_count = len(self.already_done)\n\n if self.binary:\n output_file.close()\n\n def cells(self, column, with_rows=False):\n if with_rows:\n index = 0\n for row, value in super().cells(column, with_rows=True):\n yield index, row, value\n index += 1\n else:\n yield from enumerate(super().cells(column))\n\n def writerow(self, index, row, add=None):\n self.writer.writerow(self.formatrow(row, add, index=index))\n\n return AbstractThreadsafeCasanovaEnricher, AbstractCasanovaEnricher\n\n\nThreadsafeCasanovaEnricher, CasanovaEnricher = make_enricher(\n 'CasanovaEnricher',\n 'casanova.enricher',\n CasanovaReader\n)\n","sub_path":"casanova/enricher.py","file_name":"enricher.py","file_ext":"py","file_size_in_byte":8306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"197481826","text":"import pandas as pd\nimport numpy as np\nimport os\nfrom sklearn.model_selection import train_test_split\nLEN=1500\nTEST_SIZE=0.5\nF_LEN=12328\nLEN_TRAIN =int( LEN * (1- TEST_SIZE))\nLEN_TEST = int(LEN * TEST_SIZE)\ndef readTable(name):\n table = pd.read_table(name,sep=',',index_col=0)\n #table = table.sample(frac=1.0)\n table = table.values\n return table\ndef saveTable(data,name):\n table=pd.DataFrame(data)\n table.to_csv(name,header=None,index=None)\n\nlistname=os.listdir('data2')\nprint(listname)\ntables=[]\ntrainTable=np.zeros([LEN_TRAIN,12329],np.float32)\nvalidationTable=np.zeros([LEN_TEST,12329],np.float32)\nfor i in range(15):\n csv='data2/'+ listname[i]\n tables.append(readTable(csv))\n\ntable = np.concatenate(tables,axis=0)\nprint(table.shape)\ninds = np.arange(LEN)\nprint(inds)\nprint(inds.shape)\ninds_train, inds_test = train_test_split(inds, test_size=TEST_SIZE)\nnp.save('inds_train_9.npy',inds_train)\nnp.save('inds_test_9.npy',inds_test)\n\n\nfor i in range(inds_train.shape[0]):\n trainTable[i][:F_LEN]=table[inds_train[i]]\n trainTable[i][F_LEN]= int(inds_train[i]/100)\n\nfor i in range(inds_test.shape[0]):\n validationTable[i][:F_LEN]=table[inds_test[i]]\n validationTable[i][F_LEN] = int(inds_test[i]/100)\n\n\nsaveTable(trainTable,'trainsample_9.csv')\nsaveTable(validationTable,'validationsample_9.csv')","sub_path":"code/new_sample.py","file_name":"new_sample.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"269852403","text":"import numpy as np\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n# import seaborn as sns\n# import math\n# import time\n\nnp.random.seed(100)\n\nfont = {'family': 'sans-serif',\n 'weight': 'normal',\n 'size': 12}\n\nmatplotlib.rc('font', **font)\n\ndef import_data():\n \"\"\"\n import simulation data from csv file\n :return: data: Dataframe\n \"\"\"\n sim_data = pd.read_csv('simulation_results.csv')\n # remove results from first dummy simulation\n sim_data = sim_data.iloc[sim_data[sim_data.t == 0].index[1]:, :]\n sim_data.reset_index(drop=True, inplace=True)\n print(sim_data.shape)\n print(sim_data.head())\n\n return sim_data\n\n\ndef construct_outed_line():\n start = 0\n current_sample_no = no_samples_per_line\n for line in out_line_loc:\n ix = current_sample_no\n if ix == len(sim_index):\n ix -= 1\n outed_line[start:] = line\n break\n outed_line[start:sim_index[ix]] = line\n start = sim_index[ix]\n current_sample_no += no_samples_per_line\n # print(ix, current_sample_no)\n\n\ndef construct_simu_no():\n sim_no = []\n for i in range(len(out_line_loc)):\n sim_no.append(np.arange(1, no_samples_per_line + 1))\n start = 0\n for ix, no in zip(sim_index.insert(len(sim_index), len(pmu_results) + 1).drop(0), np.ravel(sim_no)):\n simulation_no[start:ix] = no\n start = ix\n\n\ndef plot_lines(line_no, simulation_no):\n # line_no, i = 0, 1, ..., L\n # simulation_no, j = 1, 2, ..., S\n data_to_plot = pmu_results.loc[(pmu_results.outed_line == out_line_loc[line_no])\n & (pmu_results.simulation_no == simulation_no)]\n data_to_plot_2 = pmu_results.loc[(pmu_results.outed_line == out_line_loc[line_no + 1])\n & (pmu_results.simulation_no == simulation_no)]\n plt.figure(figsize=[12, 6])\n plt.xlabel('Time (s)')\n plt.ylabel('Theta (p.u.)')\n plt.plot(data_to_plot.t, data_to_plot.iloc[:, 1:(len(pmu_loc) + 1)], '--')\n plt.legend(['PMU_' + str(i) for i in pmu_loc])\n plt.plot(data_to_plot_2.t, data_to_plot_2.iloc[:, 1:4])\n plt.tight_layout()\n\n\n# Visualize PMU data for line #line_no\ndef plot_data_by_line(line_no):\n \"\"\"\n plots all instances of simulation of the PMU data by outage line\n line_no: int, the index for outed line, i = 0, 1, ..., L\n \"\"\"\n # simulation_no = 20 # j = 1, 2, ..., S\n plt.figure(figsize=[10, 5])\n plt.xlabel('Time (s)')\n plt.ylabel('Theta (p.u.)')\n plt.legend(['PMU_' + str(i) for i in pmu_loc])\n plt.title('Outed: Line %i' % int(out_line_loc[line_no]))\n plt.tight_layout()\n\n for i in range(no_samples_per_line):\n data_to_plot = pmu_results.loc[(pmu_results.outed_line == out_line_loc[line_no])\n & (pmu_results.simulation_no == i)]\n plt.plot(data_to_plot.t, data_to_plot.iloc[:, 1:(len(pmu_loc) + 1)])\n\n\ndef find_data_to_plot(num):\n return pmu_results.loc[(pmu_results.outed_line == out_line_loc[1])\n & (pmu_results.simulation_no == num)]\n\n\nif __name__ == '__main__':\n data = import_data()\n\n # Get simulation options such as\n # location of buses with PMUs\n # location of outage lines\n # number of samples simulated per line\n pmu_loc = [23, 26, 39]\n # out_line_loc = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,\n # 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34]\n out_line_loc = [0, 5, 10]\n no_samples_per_line = 5\n\n # Keep only data from buses with PMUs\n data_index = pmu_loc.copy()\n data_index.insert(0, 0) # to include column \"t\"\n pmu_results = data.iloc[:, data_index]\n\n # Separate data by lines which are outed\n sim_index = pmu_results[pmu_results.t == 0].index\n\n # Create columns for outed line and simulation no\n outed_line = np.zeros(len(pmu_results))\n simulation_no = np.zeros(len(pmu_results))\n\n construct_outed_line()\n construct_simu_no()\n plt.plot(outed_line)\n plt.plot(simulation_no)\n plt.show()\n\n pmu_results = pmu_results.assign(outed_line=outed_line)\n pmu_results = pmu_results.assign(simulation_no=simulation_no)\n print(pmu_results.head())\n\n plot_lines(1, 2)\n plt.show()\n\n plt.figure(figsize=[15, 10])\n plt.plot(find_data_to_plot(1).t, find_data_to_plot(1).iloc[:, 3], '-.')\n plt.plot(find_data_to_plot(2).t, find_data_to_plot(2).iloc[:, 3], '-')\n plt.plot(find_data_to_plot(3).t, find_data_to_plot(3).iloc[:, 3], '--')\n plt.plot(find_data_to_plot(4).t, find_data_to_plot(4).iloc[:, 3], '-*')\n plt.plot(find_data_to_plot(5).t, find_data_to_plot(5).iloc[:, 3], '-o')\n plt.legend('12345')\n plt.tight_layout()\n plt.show()\n","sub_path":".ipynb_checkpoints/main_script-checkpoint.py","file_name":"main_script-checkpoint.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"113838609","text":"from itertools import izip\nimport json\nfrom datetime import datetime\n\nfrom scipy.weave.ext_tools import indent\n\nn = 0.0\navg_description = 0\navg_title = 0\navg_version = 0\navg_license = 0\navg_no_of_fields = 0\n\ndef no_of_fields_score(json_line):\n count = 0\n score = 0.0\n for key in json_line:\n count += 1\n if count<20:\n score = count/20.0\n if count >= 20:\n score =1\n return count # changed from score\n\ndef get_metadata_score(json_line):\n global n, avg_description, avg_title, avg_version, avg_license, avg_no_of_fields\n metadata_score = 0\n if 'description' in json_line:\n avg_description += 1\n if 'title' in json_line:\n avg_title += 1\n if 'version' in json_line:\n avg_version += 1\n if 'license' in json_line:\n avg_license += 1\n #calculates scores based on the keys in the json file\n avg_no_of_fields += no_of_fields_score(json_line)\n #adds DOI score\n metadata_score+=1\n #for long term management constant score 1\n metadata_score+=1\n #normalizing the score\n metadata_score = '%.3f'%(metadata_score/6) \n n += 1\n\n\n \n \ndef main():\n global n, avg_description, avg_title, avg_version, avg_license, avg_no_of_fields\n\n merged_file_path = 'merged_features.json'\n\n with open(merged_file_path) as merged_data:\n for line in merged_data:\n json_line = json.loads(line.strip())\n get_metadata_score(json_line)\n \n avg_description /= n\n avg_title /= n\n avg_version /= n\n avg_license /= n\n avg_no_of_fields /= n\n print(\"\"\"Percentage of files with description: {}%\nPercentage of files with title: {}%\nPercentage of files with version: {}%\nPercentage of files with license: {}%\nAverage number of fields: {}\nPercentage of files with DOI: 100%\nPercentage of files that are indexed: 100%\"\"\"\n .format(avg_description * 100, avg_title * 100, avg_version * 100, avg_license * 100, avg_no_of_fields))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"metadata-quality/metadata_quality_average.py","file_name":"metadata_quality_average.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"272875852","text":"from django.conf import settings\nfrom django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('shoppingcart.views',\n url(r'^$', 'shoppingcart', name='shoppingcart'),\n url(r'^add_to_cart/(?P.*)', 'add_to_cart', name=\"add_to_cart\"),\n url(r'^remove_from_cart/(?P.*)', 'remove_from_cart', name=\"remove_from_cart\"),\n url(r'^apply_coupon', 'apply_coupon', name=\"apply_coupon\"),\n url(r'^thankyou', 'thankyou', name=\"thankyou\"),\n url(r'^paypal_notification/(?P.*)', 'paypal_notification', name=\"paypal_notification\"),\n)\n","sub_path":"shoppingcart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"154219033","text":"W = 4\r\nn = 2\r\ncs = [10, 12]\r\nws = [2, 3]\r\n\r\nitems = []\r\n\r\nfor i in range(n):\r\n items.append((cs[i], ws[i]))\r\n\r\nitems.sort(key=lambda item: item[0] / item[1], reverse=True)\r\n\r\ncur_w = 0\r\ncur_c = 0\r\ncount = 0\r\n\r\nfor c, w in items:\r\n new_w = cur_w + w\r\n if new_w <= W:\r\n count += 1\r\n cur_w = new_w\r\n cur_c += c\r\n else:\r\n break\r\n\r\nprint('Items: ', count)\r\nprint('Weight: ', cur_w, '/', W)\r\nprint('Total cost: ', cur_c)\r\n","sub_path":"discrete_bag.py","file_name":"discrete_bag.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"197656825","text":"\"\"\"\nCopyright 2014 Paul Montgomery\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\nSystem Resource Demo\n--------------------\nInstallation:\n* pip install psutil\n* cp resource_linegraph.cfg linegraph.cfg\nExecute: python resouce_demo.py\n\nThis demo reads the system CPU, memory and disk usage percentages and\ndisplays the results as a percentage on a line graph. The library\nused should work on Linux, Mac and Windows platforms.\n\"\"\"\n\nimport datetime\nimport os\nimport shutil\nimport time\n\nimport psutil\n\n\ndef run():\n psutil.cpu_percent() # Needs an initialization\n print(\"Cleaing up old linegraph data...\")\n try:\n os.remove(\"linegraph.tsv\")\n except Exception:\n pass\n time.sleep(2)\n shutil.copy2(\"linegraph.cfg\", \"linegraph.del\")\n print(\"Starting resource monitoring...\")\n while True:\n timestamp = \"{}\".format(datetime.datetime.utcnow())[:-7]\n cpu_percent = psutil.cpu_percent()\n virt_mem_data = psutil.virtual_memory()\n virt_mem_percent = virt_mem_data.percent\n disk_data = psutil.disk_usage('/')\n disk_percent = disk_data.percent\n print(\"{} - CPU: {}%, Memory: {}%, Disk Used: {}%\".format(\n timestamp, cpu_percent, virt_mem_percent, disk_percent))\n out_str = \"date\\tfloat0\\tfloat1\\tfloat2\\n\"\n out_str += \"{}\\t{}\\t{}\\t{}\\n\".format(\n timestamp, cpu_percent, virt_mem_percent, disk_percent)\n fptr = open(\"linegraph.tsv\", \"w\")\n fptr.write(out_str)\n fptr.close()\n time.sleep(5)\n\n\nif __name__ == \"__main__\":\n\tprint(\"Resource utilization demo beginning...\")\n\trun()\n\tprint(\"Resource utilization demo finished.\")","sub_path":"gui_widgets/linegraph/resource_demo.py","file_name":"resource_demo.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"90353999","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.model_selection import validation_curve\r\nfrom sklearn.decomposition import PCA\r\n\r\n\r\n\r\n#Reading the training and testing data\r\nX_train = np.loadtxt('X_train.txt')\r\nY_train = np.loadtxt('y_train.txt')\r\nX_test = np.loadtxt('X_test.txt')\r\nY_test = np.loadtxt('y_test.txt')\r\nprint('x_train shape:',X_train.shape)\r\nprint('y_train shape:',Y_train.shape)\r\nprint('x_test shape:',X_test.shape)\r\nprint('y_test shape:',Y_test.shape)\r\n\r\n#Normalizing the data\r\nscaler = StandardScaler()\r\nscaler.fit(X_train)\r\nX_train = scaler.transform(X_train)\r\nprint(X_train)\r\nprint(X_train.shape)\r\n\r\n#PCA\r\n# pca = PCA(n_components=5)\r\n# pca.fit(X_train)\r\n# X_train = pca.transform(X_train)\r\n# print(X_train.shape)\r\n\r\ndef mlpPlot():\r\n param_range_gamma = [1, 0.1, 0.01, 0.001, 0.0001]\r\n train_scores, valid_scores = validation_curve(SVC(random_state=101), X_train, Y_train, 'gamma', param_range_gamma,\r\n cv=3, verbose=True, n_jobs=-1)\r\n train_scores_mean = np.mean(train_scores, axis=1)\r\n train_scores_std = np.std(train_scores, axis=1)\r\n test_scores_mean = np.mean(valid_scores, axis=1)\r\n test_scores_std = np.std(valid_scores, axis=1)\r\n plt.title(\"Validation Curve with SVM PCA(5)\")\r\n plt.xlabel(\"gamma\")\r\n plt.ylabel(\"Score\")\r\n plt.ylim(0.0, 1.1)\r\n lw = 2\r\n plt.semilogx(param_range_gamma, train_scores_mean, label=\"Training score\",\r\n color=\"darkorange\", lw=lw)\r\n plt.fill_between(param_range_gamma, train_scores_mean - train_scores_std,\r\n train_scores_mean + train_scores_std, alpha=0.2,\r\n color=\"darkorange\", lw=lw)\r\n plt.semilogx(param_range_gamma, test_scores_mean, label=\"Cross-validation score\",\r\n color=\"navy\", lw=lw)\r\n plt.fill_between(param_range_gamma, test_scores_mean - test_scores_std,\r\n test_scores_mean + test_scores_std, alpha=0.2,\r\n color=\"navy\", lw=lw)\r\n plt.legend(loc=\"best\")\r\n plt.show()\r\n\r\nif __name__ == '__main__':\r\n mlpPlot()","sub_path":"SVMgamma.py","file_name":"SVMgamma.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"595339307","text":"\"\"\"\nThis module can be used to download data from the observatory server.\nTypically you first track your model data with :func:`start_run `.\n\nOnce you have collected data you can download the model data using the :func:`download_model `\nfunction. \n\nPlease refer to the individual function specs for more information how to use these functions.\n\"\"\"\nimport re\nimport tempfile\n\nimport requests\nfrom observatory import archive, settings\nfrom observatory.constants import LABEL_PATTERN\n\n\ndef download_model(**kwargs):\n \"\"\"\n Downloads a model from the server and stores it in a local folder.\n\n This method will download a tarball from the server and extract it in a folder specified with the path argument.\n The model folder will contain all outputs you stored for the model.\n\n Additionally a settings.json file is included, which contains the settings that you stored earlier.\n Finally, a metadata.json file is included, which contains all the necessary metadata for the model,\n the name, version, experiment ID and run ID.\n\n Parameters\n ----------\n model : str\n The name of the model\n version : int\n The version number of the model\n experiment : str, optional\n The name of the experiment\n run_id : str\n The ID of the run\n path : str, optional\n The path to store the model, defaults to the current working folder.\n\n Returns\n -------\n The path to the model folder. You can access all the files in this folder.\n \"\"\"\n\n model = kwargs.get('model', None)\n version = kwargs.get('version', None)\n run_id = kwargs.get('run_id', None)\n experiment = kwargs.get('experiment', 'default')\n path = kwargs.get('path', '.')\n\n if model is None:\n raise AssertionError('Please provide a model to download')\n\n if version is None:\n raise AssertionError('Please provide a version to download')\n\n if run_id is None:\n raise AssertionError('Please provide the ID of the run to download')\n\n if version <= 0:\n raise AssertionError('Version must be greater than zero')\n\n if not re.match(LABEL_PATTERN, model):\n raise AssertionError('name is invalid. It can contain ' +\n 'lower-case alpha-numeric characters and dashes only.')\n\n if experiment != 'default' and not re.match(LABEL_PATTERN, model):\n raise AssertionError('experiment is invalid. It can contain ' +\n 'lower-case alpha-numeric characters and dashes only.')\n\n handler_url = f'{settings.server_url}/api/models/{model}/versions/{version}/experiments/{experiment}/runs/{run_id}/archive'\n response = requests.get(handler_url)\n\n if response.status_code == 200:\n _, temp_filename = tempfile.mkstemp('.tar.gz')\n\n with open(temp_filename, 'wb') as archive_file:\n for chunk in response.iter_content(1024):\n archive_file.write(chunk)\n\n archive.extract(temp_filename, path)\n else:\n raise RuntimeError(f'Failed to download model, the server returned an error with status code {response.status_code}: {response.json()[\"message\"]}')\n","sub_path":"observatory/serving.py","file_name":"serving.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"606958103","text":"from api import utils\nfrom api import strings\nfrom api.models import CallDetail\nfrom api.models import PriceRule\nfrom api.models import PriceRuleDetail\nfrom rest_framework import status\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\nimport datetime\n\n\nclass TelephoneBillViewSet(viewsets.ViewSet):\n \"\"\"\n API endpoint to TelephoneBill\n \"\"\"\n\n def __get_call_bill(self, call):\n \"\"\"\n Method to get the bill for a call.\n\n Parameters:\n call (dictionary): dictionary containing the call informations\n used to calculate the bill.\n\n return:\n response (dictionary): if the bill was correctly calculated, this\n method will return the bill inside a dictionary with a success\n message. Otherwise, a error message informing the error cause will\n be sent.\n \"\"\"\n response = None\n call_id = list(call.keys())[0]\n call = call.get(call_id)\n start = utils.standardize_date(call.get(strings.START_KEY),\n strings.COMPLETE_DATE_PATTERN)\n end = utils.standardize_date(call.get(strings.END_KEY),\n strings.COMPLETE_DATE_PATTERN)\n destination = call.get(strings.DESTINATION_KEY)\n total = end - start\n total_minutes = ((total.days * 24 * 60) + (total.seconds // 60))\n hours = (total.days * 24) + (total.seconds / (60 * 60))\n minutes = (total.seconds % (60 * 60)) / 60\n seconds = total.seconds % 60\n duration = (strings.HOUR_MINUTE_SECOND_PATTERN %\n (hours, minutes, seconds))\n call_bill = {strings.DESTINATION_KEY: destination,\n strings.START_DATE_KEY: start.date(),\n strings.START_TIME_KEY: start.time(),\n strings.DURATION_KEY: duration}\n price_rules = PriceRule.objects.filter(\n created_date__lt=end).order_by('-id')\n price_rule = None\n if price_rules:\n price_rule = price_rules[0]\n price_rule_details = PriceRuleDetail.objects.filter(\n price_id=price_rule.id)\n final_price = 0\n left_time = total_minutes\n while left_time > 0:\n selected_rule = self.__select_price_rule(start,\n price_rule_details)\n if selected_rule:\n rule_period = utils.get_period_between_time(\n selected_rule.start,\n selected_rule.end)\n left_time = total_minutes - rule_period\n if left_time >= 0:\n start += datetime.timedelta(seconds=rule_period * 60)\n total_minutes -= rule_period\n final_price += (selected_rule.standing_charge +\n rule_period *\n selected_rule.call_charge)\n else:\n final_price += (selected_rule.standing_charge +\n total_minutes *\n selected_rule.call_charge)\n call_bill[strings.PRICE_KEY] = round(final_price, 2)\n response = call_bill\n else:\n left_time = 0\n response = {strings.ERROR_KEY: strings.NO_PRICE_RULE_ERROR}\n else:\n response = {strings.ERROR_KEY: strings.NO_PRICE_RULE_ERROR}\n return response\n\n def __get_period_calls(self, source, period):\n \"\"\"\n Method to get all the calls of the same source for a given period.\n\n Parameters:\n source (string): the source number of the calls\n period (string): the period in the YYYY-MM format informing the\n month and the year that we want to get the calls.\n\n Return\n period_calls (list): a list of calls if it exists, or an empty\n list.\n \"\"\"\n call_details = CallDetail.objects.filter(call_id__source=source)\n period_calls = []\n if call_details:\n call_data = {}\n end_calls = []\n for call_detail in call_details:\n if not call_detail.start:\n check_same_year = call_detail.timestamp.year == period.year\n check_same_month = (call_detail.timestamp.month ==\n period.month)\n if check_same_year and check_same_month:\n end_calls.append(call_detail)\n complete_calls = []\n for end_call in end_calls:\n for call_detail in call_details:\n if call_detail.call_id == end_call.call_id:\n if call_detail.start:\n complete_calls.append(call_detail)\n complete_calls.append(end_call)\n for call_detail in complete_calls:\n call_id = call_detail.call_id.id\n destination = call_detail.call_id.destination\n if call_id not in call_data.keys():\n call_data[call_id] = {}\n if call_detail.start:\n call_data[call_id][strings.START_KEY] =\\\n call_detail.timestamp\n else:\n call_data[call_id][strings.END_KEY] =\\\n call_detail.timestamp\n call_data[call_id][strings.DESTINATION_KEY] = destination\n check_start = call_data[call_id].get(strings.START_KEY)\n check_end = call_data[call_id].get(strings.END_KEY)\n if check_start and check_end:\n period_calls.append(call_data)\n call_data = {}\n return period_calls\n\n def __select_price_rule(self, start, price_rule_details):\n \"\"\"\n Method to select a price rule from a list of price rules, according to\n the call start.\n\n Parameters:\n start (datetime.time) - when the call started\n price_rule_details - list of price rules\n\n Return:\n selected_rule (PriceRuleDetail) - the selected rule or None.\n \"\"\"\n selected_rule = None\n for rule_detail in price_rule_details:\n if rule_detail.start > rule_detail.end:\n if not (rule_detail.end < start.time() <\n rule_detail.start):\n selected_rule = rule_detail\n break\n else:\n if rule_detail.start <= start.time() < rule_detail.end:\n selected_rule = rule_detail\n break\n return selected_rule\n\n def list(self, request):\n \"\"\"\n GET /api/telephonebill?source=&period=\n \"\"\"\n response = None\n source = request.query_params.get(strings.SOURCE_KEY)\n if source:\n period = request.query_params.get(strings.PERIOD_KEY)\n if not period:\n period = utils.get_last_month_period()\n period = utils.standardize_date(period, strings.YEAR_MONTH_PATTERN)\n if not period:\n content = {strings.INPUT_ERROR_KEY:\n strings.PERIOD_FORMAT_ERROR}\n response = Response(content, status.HTTP_400_BAD_REQUEST)\n else:\n period_calls = self.__get_period_calls(source, period)\n bill_list = []\n complete_bill = {}\n final_price = 0\n for call in period_calls:\n print(\"****\")\n print(call)\n bill = self.__get_call_bill(call)\n if strings.ERROR_KEY not in bill.keys():\n final_price += float(bill.get(strings.PRICE_KEY))\n bill_list.append(bill)\n if bill_list:\n complete_bill[strings.FINAL_PRICE_KEY] = round(final_price,\n 2)\n complete_bill[strings.CALLS_KEY] = bill_list\n response = Response(complete_bill, status.HTTP_200_OK)\n else:\n content = {strings.ERROR_KEY:\n strings.BILLS_NOT_FOUND_ERROR}\n response = Response(content, status.HTTP_404_NOT_FOUND)\n else:\n content = {strings.INPUT_ERROR_KEY: strings.SOURCE_MISSED_ERROR}\n response = Response(content, status.HTTP_400_BAD_REQUEST)\n return response\n","sub_path":"api/views/telephone_bill_viewset.py","file_name":"telephone_bill_viewset.py","file_ext":"py","file_size_in_byte":8751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"418456045","text":"from flask import render_template, Blueprint, jsonify, request\n\nfrom catalog.models import Book\nfrom catalog.users.forms import LoginForm\n\nbooks = Blueprint('books', __name__)\n\n\n@books.route('/get_book_list', methods=['POST'])\ndef get_book_list():\n page = request.json['page']\n # author filter\n book_list = Book.query.paginate(page=page, per_page=10)\n data = {\n 'num_pages': [idx for idx in range(book_list.pages)],\n }\n book_data = []\n for book in book_list.items:\n book_data.append({\n 'id': book.id,\n 'title': book.title,\n 'author': [author.name for author in book.author],\n 'isbn': book.isbn,\n 'rating': book.average_rating,\n 'availability': book.availability()\n })\n data['book_data'] = book_data\n data['page_num'] = page\n return jsonify(data)\n\n\n@books.route('/book/')\ndef display_book_details(book_id):\n book = Book.query.get(book_id)\n login_form = LoginForm()\n return render_template('book.html', book=book, login_form=login_form)\n","sub_path":"catalog/books/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"647044653","text":"def path_finder(a):\n matrix = list(map(list, a.splitlines()))\n length = len(matrix)\n s = (0,0)\n t = (length - 1,length - 1)\n level = {s: 0}\n parent = {s: 0}\n i = 1\n frontier = [s]\n while frontier:\n next = []\n for u in frontier:\n x,y = u\n for x, y in (x, y-1), (x, y+1), (x-1, y), (x+1, y):\n if 0 <= x < length and 0 <= y < length:\n if (x,y) not in level and matrix[x][y] != 'W':\n level[(x,y)] = i\n parent[(x,y)] = u\n next.append((x,y))\n if (x,y) == t:\n return level[(x,y)]\n\n frontier = next\n i += 1\n return False\n","sub_path":"4kyu/path_finder_2.py","file_name":"path_finder_2.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"241508134","text":"# Licensed with the MIT License, see LICENSE for details\n\n__all__ = [\n 'SkyMapper'\n]\n\nimport io\nimport requests\nimport numpy as np\nfrom astropy.io import votable\nfrom astroquery.utils.tap.core import Tap\nfrom .catalog import Catalog, TableDefinition\n\n# column names and SQLite type\nCOLUMN_DEFS = (\n ('object_id', 'INTEGER PRIMARY KEY'),\n ('raj2000', 'FLOAT'),\n ('dej2000', 'FLOAT'),\n ('e_raj2000', 'FLOAT'),\n ('e_dej2000', 'FLOAT'),\n ('smss_j', 'TEXT'),\n ('flags', 'INTEGER'),\n ('ngood', 'INTEGER'),\n ('ngood_min', 'INTEGER'),\n ('u_flags', 'INTEGER'),\n ('u_ngood', 'INTEGER'),\n ('v_flags', 'INTEGER'),\n ('v_ngood', 'INTEGER'),\n ('g_flags', 'INTEGER'),\n ('g_ngood', 'INTEGER'),\n ('r_flags', 'INTEGER'),\n ('r_ngood', 'INTEGER'),\n ('i_flags', 'INTEGER'),\n ('i_ngood', 'INTEGER'),\n ('z_flags', 'INTEGER'),\n ('z_ngood', 'INTEGER'),\n ('class_star', 'FLOAT'),\n ('a', 'FLOAT'),\n ('e_a', 'FLOAT'),\n ('b', 'FLOAT'),\n ('e_b', 'FLOAT'),\n ('u_psf', 'FLOAT'),\n ('e_u_psf', 'FLOAT'),\n ('v_psf', 'FLOAT'),\n ('e_v_psf', 'FLOAT'),\n ('g_psf', 'FLOAT'),\n ('e_g_psf', 'FLOAT'),\n ('r_psf', 'FLOAT'),\n ('e_r_psf', 'FLOAT'),\n ('i_psf', 'FLOAT'),\n ('e_i_psf', 'FLOAT'),\n ('z_psf', 'FLOAT'),\n ('e_z_psf', 'FLOAT'),\n ('prox', 'FLOAT'),\n ('prox_id', 'INTEGER')\n)\n\n\nclass SkyMapper(Catalog):\n def __init__(self, dbfile, max_records=2000, **kwargs):\n filter2col = {}\n for f in 'uvgriz':\n filter2col[f] = {\n 'mag': f + '_psf',\n 'err': 'e_' + f + '_psf'\n }\n skym = TableDefinition('skymapper', COLUMN_DEFS, 'object_id',\n 'raj2000', 'dej2000', filter2col)\n super().__init__(dbfile, skym, max_records=max_records, **kwargs)\n\n def fetch_field(self, sources, scale=1.25):\n \"\"\"Fetch catalog sources for this field and save to database.\n\n Search radius and center are derived from the source list.\n\n Parameters\n ----------\n sources : SkyCoord\n Sources to be matched.\n\n scale : float, optional\n Search radius scale factor.\n\n \"\"\"\n sr = max((sources.separation(c).max() for c in sources)) * scale / 2\n\n self.logger.debug(\n ('Fetching SkyMapper catalog from ASVO over {:.2g}'\n ' field-of-view.').format(sr))\n\n q = '''\n SELECT TOP {max}\n {columns}\n FROM dr1.master\n WHERE 1=CONTAINS(POINT('ICRS', raj2000, dej2000),\n CIRCLE('ICRS', {ra}, {dec}, {sr}))\n ORDER BY ngood DESC\n '''.format(\n max=self.max_records,\n columns=','.join(self.table.columns),\n ra=np.mean(sources.ra.deg),\n dec=np.mean(sources.dec.deg),\n sr=sr.deg\n )\n # self.logger.debug(q)\n\n skym = Tap(url='http://skymappertap.asvo.nci.org.au/ncitap/tap/')\n job = skym.launch_job(q)\n tab = job.get_results()\n\n self.logger.debug('Updating {} with {} sources.'.format(\n self.table.name, len(tab)))\n\n self.db.executemany('''\n INSERT OR IGNORE INTO {}\n VALUES({})\n '''.format(self.table.name, ','.join('?' * len(self.table.columns))),\n tab)\n self.db.commit()\n","sub_path":"calviacat/skymapper.py","file_name":"skymapper.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"537809730","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom detectors_upload.FastSaliency_lts import FastSaliency\nfrom evaluation.uploaded.rocauc_ljh import Roc\nfrom utils.datasets import *\n\nif __name__ == '__main__':\n images = MDFA(base_dir=\"C:/image/MDFA/\")\n # images = SIRST()\n detector = FastSaliency(\"facet\")\n\n\n tp=np.zeros(len(images))\n fp=np.zeros(len(images))\n for i in range(2):\n imgin, mask = images[i]\n detector.process(imgin)\n imgout = detector.result\n Evalution=Roc()\n Evalution.update(imgout,mask)\n X,Y,auc=Evalution.get_all()\n # print(fp[i],tp[i])\n plt.axis([0, 1, 0, 1])\n plt.xlabel(\"FAR\")\n plt.ylabel(\"TPR\")\n plt.title(\"ROC\")\n plt.plot(X, Y)\n print(\"第%d张图的AUC值为\"%(i+1),auc)\n plt.legend()\n plt.show()\n\n\n # plt.plot(np.sort(fp), np.sort(tp), marker='o', mec='r', mfc='w', label=u'roc')\n # plt.legend()\n # plt.show()\n\n","sub_path":"evaluation/uploaded/examplerocauc_;jh.py","file_name":"examplerocauc_;jh.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"497841254","text":"\"\"\"\nInterconversion between arrays, lists, dictionaries\n\"\"\"\n\nimport numpy as np\n\ndef dict_from_list(arglist, dictionary):\n \"\"\"\n Returns a new dictionary with the same keys as `dictionary` where values\n are set from arglist. \n \n Assumes that `arglist` has the same length as `dictionary`, and that the\n list items are ordered in the same order that `dictionary.keys()` were \n defined. Alternatively, `dictionary` can be a dictionary.keys() instance.\n \n Parameters\n ----------\n arglist : array-like\n Must have the same number of elements as `dictionary`.\n dictionary : dictionary, or dictionary.keys()\n The output of this function will be a new dictionary with the same \n set of keys as `dictionary`.\n \n Raises\n ------\n TypeEerror : If `dictionary` is not of the correct type.\n \n Returns\n -------\n new_dict : dictionary\n A new dictionary with same set of keys as `dictionary` and corresponding\n values set to values of `arglist`.\n \"\"\"\n import collections\n new_dict = {}\n \n if isinstance(dictionary, dict):\n for i, key in enumerate(list(dictionary.keys())):\n new_dict[key] = arglist[i]\n elif isinstance(dictionary,collections.abc.KeysView):\n for i, key in enumerate(list(dictionary)):\n new_dict[key] = arglist[i]\n else:\n raise TypeError('dictionary must be of type dict or dict_keys.')\n #issue_error\n \n return new_dict\n\ndef list_to_array(listorarray):\n if isinstance(listorarray, list):\n return np.array(listorarray)\n elif isinstance(listorarray, np.ndarray):\n return listorarray\n #issue_error\n\n \ndef make_2d(array):\n array = list_to_array(array)\n if len(array.shape) ==1:\n return(np.reshape(array, (1,len(array))))\n elif len(array.shape) > 2:\n return np.concatenate(array)\n else:\n return array\n\n\n","sub_path":"KinetiKit/kit/_ald.py","file_name":"_ald.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"100785727","text":"#rotation using python\n\n#CODE FROM GEEKS FOR GEEKS\n\"\"\"# Python3 program to rotate an array by \n# d elements \n# Function to left rotate arr[] of size n by d*/ \ndef leftRotate(arr, d, n): \n\tfor i in range(d): \n\t\tleftRotatebyOne(arr, n) \n\n# Function to left Rotate arr[] of size n by 1*/ \ndef leftRotatebyOne(arr, n): \n\ttemp = arr[0] \n\tfor i in range(n-1): \n\t\tarr[i] = arr[i + 1] \n\tarr[n-1] = temp \n\t\t\n\n# utility function to print an array */ \ndef printArray(arr, size): \n\tfor i in range(size): \n\t\tprint (\"% d\"% arr[i], end =\" \") \n\n\n# Driver program to test above functions */ \narr = [1, 2, 3, 4, 5, 6, 7] \nleftRotate(arr, 2, 7) \nprintArray(arr, 7) \n\n# This code is contributed by Shreyanshi Arun \n\"\"\"\n\n#rotation using a temp array\ndef rotation_1(lst, d):\n lst1 = []\n lst1 = lst1+(lst[d:len(lst)])\n lst1 = lst1+(lst[:d])\n return lst1\n\n\n#rotation using one-by-one\ndef rotation_2(lst,d):\n n = len(lst)\n #for rotatin d no of times\n for i in range(d):\n j = 0\n temp = lst[0]\n #for shifting of arrays\n while(j

Kernel

\"))\r\n self.gamma.setText(_translate(\"SVM\", \"

Gamma

\"))\r\n self.decision_function_shape.setText(_translate(\"SVM\", \"

Dec_Func_Shape

\"))\r\n self.kernellist.setItemText(0, _translate(\"SVM\", \"rbf\"))\r\n self.kernellist.setItemText(1, _translate(\"SVM\", \"linear\"))\r\n self.kernellist.setItemText(2, _translate(\"SVM\", \"poly\"))\r\n self.kernellist.setItemText(3, _translate(\"SVM\", \"sigmoid\"))\r\n self.kernellist.setItemText(4, _translate(\"SVM\", \"precomputed\"))\r\n self.gammalist.setItemText(0, _translate(\"SVM\", \"scale\"))\r\n self.gammalist.setItemText(1, _translate(\"SVM\", \"auto\"))\r\n self.decision_function_shapelist.setItemText(0, _translate(\"SVM\", \"ovr\"))\r\n self.decision_function_shapelist.setItemText(1, _translate(\"SVM\", \"ovo\"))\r\n self.verbose.setText(_translate(\"SVM\", \"

Verbose

\"))\r\n self.C.setText(_translate(\"SVM\", \"

C

\"))\r\n self.probability.setText(_translate(\"SVM\", \"

Probability

\"))\r\n self.max_iter.setText(_translate(\"SVM\", \"

Max_iter

\"))\r\n self.Shrinking.setText(_translate(\"SVM\", \"

Shrinking

\"))\r\n self.degree.setText(_translate(\"SVM\", \"

Degree

\"))\r\nimport Images_rc\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n app.setStyle('Fusion')\r\n SVM = QtWidgets.QDialog()\r\n ui = Ui_SVM()\r\n ui.setupUi(SVM)\r\n SVM.show()\r\n sys.exit(app.exec_())\r\n","sub_path":"SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":14843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"132956889","text":"\"\"\"\nMySQl连接类\n\"\"\"\nfrom scrapy.utils.project import get_project_settings\nimport pymysql\nimport logging\nfrom XiaomanCustoms.settings import MYSQL_LOCALHOST\n\nsettings = get_project_settings()\n\n\nclass MySQLConnection(object):\n \"\"\"MySQL连接类\"\"\"\n\n def __init__(self, settings):\n self.conn = pymysql.connect(**MYSQL_LOCALHOST['mysql'])\n self.cursor = self.conn.cursor()\n\n def _check(self):\n \"\"\"检查连接\"\"\"\n try:\n self.conn.ping()\n except:\n # log.msg(\"MySQL断开连接,重新连接。\", level=log.WARNING)\n logging.WARNING(\"MySQL断开连接,重新连接。\")\n self.conn.close()\n self.conn = pymysql.connect(**MYSQL_LOCALHOST['mysql'])\n self.cursor = self.conn.cursor()\n\n def __getattribute__(self, attr):\n try:\n # conn这个名字是在__init__中固定的,请勿乱修改\n # conn这个判断一定不能省略,否则会造成无限递归!\n if 'conn' != attr:\n super(MySQLConnection, self).__getattribute__('_check')()\n return super(MySQLConnection, self).__getattribute__(attr)\n except KeyError:\n return '不正确的MySQL属性!'","sub_path":"XiaomanCustoms_/MySQL.py","file_name":"MySQL.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"510674460","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\n'''\n Solution uses DFS and pre-order traversal to iterate through the binary tree.\n \n Time Complexity: O(n) where n is the number of nodes in the binary tree.\n \n Notes:\n The solution appends the right child node first then the left child node of the current node. This is because we are utilizing a stack data structure which operates\n on a LIFO basis. Since the question asks us to iterate through the tree in a pre-order traversal manner, we want to first visit the left child node then the right\n child node.\n'''\nclass Solution:\n def preorderTraversal(self, root: Optional[TreeNode]) -> List[int]:\n stack = [root] if root else []\n res = []\n \n while(len(stack) > 0):\n currNode = stack.pop(-1)\n res.append(currNode.val) \n if (currNode.right != None):\n stack.append(currNode.right)\n if (currNode.left != None):\n stack.append(currNode.left)\n \n return res\n \n \n \n \n \n \n","sub_path":"Binary Tree Preorder Traversal/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"207554112","text":"\"\"\"\n# 41. First Missing Positive\n\n# Given an unsorted integer array, find the first missing positive integer.\n\n# For example,\n# Given [1,2,0] return 3,\n# and [3,4,-1,1] return 2.\n\n#Your algorithm should run in O(n) time and uses constant space.\n\n\n# The basic idea is for any k positive numbers (duplicates allowed), the first missing positive number must be within [1,k+1].\n# The reason is like you put k balls into k+1 bins, there must be a bin empty, the empty bin can be viewed as the missing number.\n\nFollow up:\n\nYour algorithm should run in O(n) time and uses constant extra space.\n\"\"\"\n\n\nclass FirstMissingPositive:\n\n def doit(self, nums):\n # space requirement\n buf = set(nums)\n for i in range(1, len(nums) + 1):\n if i not in buf:\n return i\n else:\n return len(nums) + 1\n\n\n def doit_array(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n i, length = 0, len(nums)\n\n while i < length:\n while 0 < nums[i] <= length and nums[i] != nums[nums[i]-1]:\n nums[nums[i]-1], nums[i] = nums[i], nums[nums[i]-1]\n # nums[i], nums[nums[i] - 1] = nums[nums[i] - 1], nums[i]\n # incorrect, Here is the tricky sting. the first assignment, nums[i] = nums[nums[i] - 1] already changed the nums[i]\n # and then second assignment will be affect, nums[nums[i] - 1], using the new value.\n # it seems it get all right value first, then assign to the left one by one.\n i += 1\n\n i = 0\n while i < length and nums[i] == i+1:\n i += 1\n\n return i + 1\n\n# 1. Unfortunately, there are 0 and negative numbers in the array, so firstly I think of using partition technique (used in quick sort) \n# to put all positive numbers together in one side. This can be finished in O(n) time, O(1) space.\n\n# 2. After partition step, you get all the positive numbers lying within A[0,k-1]. \n# Now, According to the basic idea, I infer the first missing number must be within [1,k+1]. \n# I decide to use A[i] (0<=i<=k-1) to indicate whether the number (i+1) exists. But here I still have to main the original information A[i] holds. \n# Fortunately, A[i] are all positive numbers, so I can set them to negative to indicate the existence of (i+1) and I can still use abs(A[i]) to get the original information A[i] holds.\n\n# 3. After step 2, I can again scan all elements between A[0,k-1] to find the first positive element A[i], that means (i+1) doesn't exist, which is what I want.\n\n def doit1(self, A):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n def swap(A, i, j):\n if i != j:\n A[i] ^= A[j]\n A[j] ^= A[i]\n A[i] ^= A[j]\n\n def partition(A):\n n, q = len(A), -1\n i = 0\n while i < n:\n if A[i] > 0:\n q += 1\n swap(A, q, i)\n i += 1\n\n return q\n\n if not A:\n return 1\n\n k = partition(A) + 1\n \n first_missing_Index = k\n i, temp = 0, 0\n\n while i < k:\n temp = math.fabs(A[i])\n if temp <= k:\n A[temp-1] = A[temp-1] if A[temp-1] < 0 else -A[temp-1]\n i += 1\n\n for i in range(k):\n if A[i] > 0:\n first_missing_Index = i\n break\n\n return first_missing_Index + 1\n \n \n\n\nif __name__ == \"__main__\":\n\n res = FirstMissingPositive().doit_array([3, 4, -1, -1])\n\n res = FirstMissingPositive().doit([1, 1])\n\n res = FirstMissingPositive().doit([1, 2, 0])\n\n res = FirstMissingPositive().doit([3, 4, -1, 1])\n\n res = FirstMissingPositive().doit([-3,9,16,4,5,16,-4,9,26,2,1,19,-1,25,7,22,2,-7,14,2,5,-6,1,17,3,24,-4,17,15])\n\n\n\n\n pass\n\n\n\n\n\n","sub_path":"PythonLeetcode/Leetcode/41_FirstMissingPositive.py","file_name":"41_FirstMissingPositive.py","file_ext":"py","file_size_in_byte":3919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"618714250","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 23 12:05:59 2018\r\n\r\n@author: cheny\r\n\"\"\"\r\n# 默认参数\r\ndef stu(name, age, sex = \"男\", hobby = \"Python\"):\r\n print(\"我的名字是{0},我的年龄是{1}, 我是{2}生, 我的爱好是{3}\"\\\r\n .format(name, age, sex, hobby))\r\n print(\"*\" * 20)\r\n\r\n#默认参数调用\r\nstu(\"张三\", 20) # 常规调用,正确。后面两个参数为默认参数\r\nstu(\"李四\", 45, \"女\", \"C++\") #常规调用,正确,第三个参数为默认参数重新赋值,第四个默认参数不赋值\r\nstu(\"李四\", 45, sex = \"女\") #正确,第三个参数为默认参数,第四个参数为默认参数\r\n#stu(\"李四\", 45, sex = \"女\", \"Java\") #调用错误,它会将第三个参数当成关键字参数,则第四个参数必为关键字参数,而不会将第四个参数当成默认参数,因为关键字参数肯定是在最后面的\r\nstu(\"李四\", 45, hobby = \"Java\") #正确,表示第三个参数是默认参数\r\nstu(\"李四\", 45, \"女\", hobby = \"Java\") #正确,第三个为默认参数,第四个为关键字参数\r\nstu(\"李四\", 45, sex = \"女\", hobby = \"Java\") #正确,第三个参数和第四个参数都会当成关键字参数\r\nstu(\"李四\", 45, hobby = \"Java\", sex = \"女\") #正确,进一步证实了第三个和第四个参数是关键字参数\r\n\r\n\r\n\r\n#收集参数\r\ndef stu(*args):\r\n print(type(args)) #打印看一下args 是什么数据类型\r\n for item in args:\r\n print(item)\r\n\r\n#调用\r\nstu(\"zhangsan\", \"lisi\", \"wangwu\", \"zhaoliu\")\r\nstu(\"zhangsan\")\r\nstu()\r\n\r\n\r\n\r\n# 收集参数关键字参数用法\r\ndef stu(**kwargs):\r\n print(type(kwargs))\r\n for k,v in kwargs.items():\r\n print(k, \"--->\", v)\r\n \r\nstu(name=\"zhangsan\", age=18, hobby=\"Python\")\r\nstu(name=\"lisi\")\r\nstu()\r\n\r\n\r\n#参数混用\r\ndef stu(name, age, *args, hobby=\"Python\", **kwargs):\r\n print(\"My name is {0}, and I am {1} years old, My hobby is {2}\".format(name,age, hobby))\r\n for item in args:\r\n print(item)\r\n for k, v in kwargs.items():\r\n print(k, \"--->\", v)\r\n print(\"*\" * 20)\r\n\r\n#调用\r\nstu(\"zhangsan\", 25) #正确,args和 kwargs都可以没有,hobby为默认参数\r\nstu(\"zhangsan\", 25, hobby=\"Java\") #正确,args和kwargs都可以没有,hobby为关键字参数\r\nstu(\"zhangsan\", 25, \"aaaa\",\"bbbb\") #正确,此时有args参数,hobby为默认参数,所以hobby和kwargs都可以没有\r\nstu(\"zhangsan\", 25, \"aaaa\", \"bbbb\", \"cccc\", hobby=\"C++\") #正确,此时没有kwargs肯定没有问题 \r\nstu(\"zhangsan\", 25, sex=\"nv\", hight=175) #正确,此时args和hobby都没有,可知hobby为默认参数,但是kwargs是有值的\r\nstu(\"zhangsan\", 25, \"aaaa\", \"bbbb\", \"cccc\", hobby=\"C++\",sex=\"nv\", hight=175) #所有参数都有值,肯定是可以的\r\n\r\n\r\n#函数文档\r\ndef stu(name, age, *args, hobby=\"Python\", **kwargs):\r\n \"\"\"\r\n 函数功能:打印学生信息\r\n :param name: 学生姓名\r\n :param age: 学生年龄\r\n :param args: 收集参数列表\r\n :param hobby: 学生爱好\r\n :param kwargs: 关键字收集参数列表\r\n :return: None\r\n \"\"\"\r\n print(\"My name is {0}, and I am {1} years old, My hobby is {2}\".format(name, age, hobby))\r\n for item in args:\r\n print(item)\r\n for k, v in kwargs.items():\r\n print(k, \"--->\", v)\r\n print(\"*\" * 20)\r\n\r\nhelp(stu)\r\nprint(stu.__doc__)\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"foundation/src/day05.py","file_name":"day05.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"37546166","text":"from os.path import join, isdir\nfrom os import makedirs\nfrom multiprocessing import cpu_count\nimport matplotlib.pyplot as plt\nimport pytest\nfrom Tests import save_validation_path as save_path\n\nfrom numpy import exp, sqrt, pi, max as np_max\nfrom numpy.testing import assert_array_almost_equal\n\nfrom pyleecan.Classes.OPdq import OPdq\nfrom pyleecan.Classes.Simu1 import Simu1\nfrom pyleecan.Classes.InputCurrent import InputCurrent\nfrom pyleecan.Classes.LamHoleNS import LamHoleNS\nfrom pyleecan.Classes.NotchEvenDist import NotchEvenDist\nfrom pyleecan.Classes.MagFEMM import MagFEMM\nfrom pyleecan.Classes.ForceMT import ForceMT\n\nfrom pyleecan.Functions.load import load\nfrom pyleecan.Functions.Plot import dict_2D\n\nfrom pyleecan.definitions import DATA_DIR\n\n\n@pytest.mark.long_5s\n@pytest.mark.long_1m\n@pytest.mark.MagFEMM\n@pytest.mark.IPMSM\n@pytest.mark.periodicity\n@pytest.mark.SingleOP\ndef test_FEMM_LamHoleNS():\n \"\"\"Validation of LamHoleNS in FEMM\"\"\"\n res_path = join(save_path, \"LamHoleNS\")\n if not isdir(res_path):\n makedirs(res_path)\n TP = load(join(DATA_DIR, \"Machine\", \"Toyota_Prius.json\"))\n TPNS = TP.copy()\n # Change rotor type to have different North/South Pole\n TPNS.rotor = LamHoleNS(init_dict=TP.rotor.as_dict())\n # North pole is unchange\n TPNS.rotor.hole_north = TP.rotor.hole\n # Change magnet dimensions on south pole\n TPNS.rotor.hole_south = [TP.rotor.hole[0].copy()]\n TPNS.rotor.hole_south[0].H3 *= 2\n TPNS.rotor.hole_south[0].H2 *= 8\n # First magnet of south pole have different material\n TPNS.rotor.hole_south[0].magnet_0.mat_type.name += \"_2\"\n TPNS.rotor.hole_south[0].magnet_0.mat_type.mag.mur_lin = 2\n\n # Check plot machine\n fig, ax = TPNS.plot(\n sym=4,\n is_clean_plot=True,\n is_show_fig=False,\n )\n fig.savefig(join(res_path, \"machine_sym.png\"))\n fig.savefig(join(res_path, \"machine_sym.svg\"), format=\"svg\")\n\n fig, ax = TPNS.plot(\n save_path=join(res_path, \"machine_full.png\"),\n is_clean_plot=True,\n is_show_fig=False,\n )\n fig.savefig(join(res_path, \"machine_full.png\"))\n fig.savefig(join(res_path, \"machine_full.svg\"), format=\"svg\")\n\n fig, ax = TPNS.rotor.plot(is_add_arrow=True, is_clean_plot=True, is_show_fig=False)\n fig.savefig(join(res_path, \"rotor.png\"))\n fig.savefig(join(res_path, \"rotor.svg\"), format=\"svg\")\n\n # Check periodicity\n assert TPNS.comp_periodicity_spatial() == (4, False)\n\n # Check machine in FEMM with sym\n simu = Simu1(name=\"test_FEMM_LamHoleNS\", machine=TPNS)\n simu.path_result = join(save_path, simu.name)\n simu.input = InputCurrent(\n OP=OPdq(N0=1000, Id_ref=0, Iq_ref=0),\n Na_tot=2048,\n Nt_tot=1,\n )\n\n # Definition of the magnetic simulation: with periodicity\n simu.mag = MagFEMM(\n type_BH_stator=1,\n type_BH_rotor=1,\n is_periodicity_a=True,\n is_periodicity_t=False,\n nb_worker=cpu_count(),\n # Kmesh_fineness=2,\n )\n simu.path_result = join(res_path, simu.name)\n\n # Same simu without symetry\n simu2 = simu.copy()\n simu2.name = simu.name + \"_Full\"\n simu2.path_result = join(res_path, simu2.name)\n simu2.mag.is_periodicity_a = False\n\n # Run simulations\n out = simu.run()\n out2 = simu2.run()\n\n out.mag.B.plot_2D_Data(\n \"angle{°}\",\n \"time[0]\",\n data_list=[out2.mag.B],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_B_space.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n # Compare both simu\n Bflux = out.mag.B\n arg_list = [\"angle\"]\n result = Bflux.get_rphiz_along(*arg_list)\n Brad = result[\"radial\"]\n angle = result[\"angle\"]\n\n Bflux2 = out2.mag.B\n arg_list = [\"angle\"]\n result2 = Bflux2.get_rphiz_along(*arg_list)\n Brad2 = result2[\"radial\"]\n\n assert_array_almost_equal(Brad, Brad2, decimal=1)\n return out\n\n\n# To run it without pytest\nif __name__ == \"__main__\":\n\n out = test_FEMM_LamHoleNS()\n print(\"Done\")\n","sub_path":"Tests/Validation/Magnetics/test_FEMM_LamHoleNS.py","file_name":"test_FEMM_LamHoleNS.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"617104490","text":"#!/usr/env/bin python32\n\n\"\"\"\nDeclaration: Copyright (c), by i_dovelemon, 2017. All right reserved.\nAuthor: i_dovelemon[1322600812@qq.com]\nDate: 2017/05/26\nBrief: Test level\n\"\"\"\n\nfrom config import *\nfrom host_api import *\n\ndef main():\n DebugPrint(\"-----------------testlevel Start-----------------\\n\")\n\n # Create camera entity\n cam = EntityCreate()\n EntityAddCameraCom(cam, 0.0, 11.0, 0.1, 0.0, 0.0, 0.0)\n DebugPrint(\"Create Camera Entity: OK\\n\")\n\n # Create ground block entity\n # TEST: This is only for test, in the final game, you can not use too many entities just for ground\n sx = -1.0 * GAME_WORLD_WIDTH / 2 + 0.5\n sy = 0.0\n sz = 1.0 * GAME_WORLD_DEPTH / 2 - 0.5\n white_block = \"res\\model\\TD_L1\\Ground\\TD_L1_Ground_Block_White.obj\"\n gray_block = \"res\\model\\TD_L1\\Ground\\TD_L1_Ground_Block_Gray.obj\"\n for i in range(GAME_WORLD_DEPTH):\n for j in range(GAME_WORLD_WIDTH):\n block = EntityCreate()\n EntityAddTransformCom(block, sx + j * 1.0, sy, sz - i * 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0)\n if ((i + j) % 2) == 0:\n EntityAddRenderCom(block, white_block, sx + j * 1.0, sy, sz - i * 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0)\n else:\n EntityAddRenderCom(block, gray_block, sx + j * 1.0, sy, sz - i * 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0)\n DebugPrint(\"Create Ground Blocks Entity: OK\\n\")\n\n # Create player\n player = EntityCreate()\n EntityAddTransformCom(player, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0)\n EntityAddRenderCom(player, \"res\\model\\Player\\TD_Player.obj\", 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0)\n EntityAddRoleCom(player, RMT_PLAYER, RST_MAINTOWER)\n EntityAddScriptCom(player, \"player\")\n EntityAddDataCom(player)\n EntityAddIntData(player, \"coin\", 0)\n EntityAddFloatData(player, \"speed\", MAIN_TOWER_MOVE_SPEED)\n EntityAddFloatData(player, \"press_delta\", MAIN_TOWER_PRESS_DELTA)\n EntityAddFloatData(player, \"cur_press_delta\", 0.0)\n EntityAddFloatData(player, \"hp\", MAIN_TOWER_HP)\n EntityAddArsenalCom(player)\n EntityAddWeapon(player, WT_LASER, \"wplaser\")\n EntityActiveWeapon(player, WT_LASER)\n EntityAddCollisionCom(player)\n EntityUpdateCollision(player)\n DebugPrint(\"Create Player: OK\\n\")\n\n # Create crystal\n crystal = EntityCreate()\n EntityAddTransformCom(crystal, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0)\n EntityAddRoleCom(crystal, RMT_PLAYER, RST_CRYSTAL)\n EntityAddRenderCom(crystal, \"res\\model\\Crystal\\TD_Crystal.obj\", 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0)\n EntityAddScriptCom(crystal, \"crystal\")\n EntityAddDataCom(crystal)\n EntityAddFloatData(crystal, \"delta\", 0.0)\n EntityAddCollisionCom(crystal)\n EntityUpdateCollision(crystal)\n DebugPrint(\"Create Crystal: OK\\n\") \n\n DebugPrint(\"-----------------testlevel End-----------------\\n\")","sub_path":"glbcodebase/graphicslab/glb_td/res/script/testlevel.py","file_name":"testlevel.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"192585998","text":"def solution(A):\n # write your code in Python 3.6\n #counter=0\n numberofopendiscs=0\n Open=[]\n Closed=[]\n counter=0\n if A==[] or len(A)==1:\n return 0\n for i in range(0,len(A)):\n a = (i) - A[i]\n b = (i) + A[i]\n Open.append(a)\n Closed.append(b)\n Open.sort()\n Closed.sort()\n #print(\"open=\",Open)\n #print(\"close=\",Closed)\n i=0\n j=0\n while i1:\n counter+=(numberofopendiscs-1)\n i+=1\n else:\n numberofopendiscs-=1\n j+=1\n if counter > 10000000:\n return -1\n else:\n return counter\n\n\nprint(solution([1, 5, 2, 1, 4, 0]))","sub_path":"NumberOfDiscIntersections.py","file_name":"NumberOfDiscIntersections.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"43760145","text":"import os\nimport bpy\n\ndef fetch():\n s = bpy.data.filepath.split(os.path.sep)\n name = s.pop()\n name = name.split(\".\")\n name = name[0]\n fp = os.path.sep.join(s)\n\n # Update scripts\n os.chdir(fp + \"/Libraries/zblend/blender\")\n os.system(\"git pull\")\n\n # Clone kha\n #self.report({'INFO'}, \"Fetching Kha...\")\n os.chdir(fp)\n if not os.path.exists('Kha'):\n os.system(\"git clone --depth=1 --recursive https://github.com/ktxsoftware/Kha\")\n\n os.chdir(fp + \"/Kha\")\n os.system(\"git pull && git submodule foreach --recursive git checkout master && git submodule foreach --recursive git pull origin master\")\n \n # Create sources directories\n os.chdir(fp)\n if not os.path.exists('Sources/Shaders'):\n os.makedirs('Sources/Shaders')\n if not os.path.exists('Libraries/zblend/Sources'):\n os.makedirs('Libraries/zblend/Sources')\n if not os.path.exists('Libraries/dependencies'):\n os.makedirs('Libraries/dependencies')\n \n # Clone dependencies\n #self.report({'INFO'}, \"Fetching dependencies...\")\n os.chdir(fp + \"/Libraries/dependencies\")\n if not os.path.exists('Sources'):\n os.system(\"git clone --depth=1 https://github.com/luboslenco/zblend_dependencies Sources\")\n \n os.chdir(fp + \"/Libraries/dependencies/Sources\")\n os.system(\"git pull\")\n \n # Clone shaders\n #self.report({'INFO'}, \"Fetching shaders...\")\n os.chdir(fp + \"/Libraries/zblend/Sources\")\n if not os.path.exists('Shaders'):\n os.system(\"git clone --depth=1 https://github.com/luboslenco/zblend_shaders Shaders\")\n \n os.chdir(fp + \"/Libraries/zblend/Sources/Shaders\")\n os.system(\"git pull\")\n\n # Clone oimo \n os.chdir(fp + \"/Libraries\")\n if not os.path.exists('oimo'):\n os.system(\"git clone --depth=1 https://github.com/luboslenco/oimo oimo\")\n \n os.chdir(fp + \"/Libraries/oimo\")\n os.system(\"git pull\")\n\n # Clone haxebullet\n #self.report({'INFO'}, \"Fetching physics...\")\n os.chdir(fp + \"/Libraries\")\n if not os.path.exists('haxebullet'):\n os.system(\"git clone --depth=1 https://github.com/luboslenco/haxebullet haxebullet\")\n\n os.chdir(fp + \"/Libraries/haxebullet\")\n os.system(\"git pull\")\n\n # Clone zblend\n #self.report({'INFO'}, \"Fetching zblend...\")\n os.chdir(fp + \"/Libraries/zblend/Sources\")\n if not os.path.exists('zblend'):\n os.system(\"git clone --depth=1 https://github.com/luboslenco/zblend\")\n \n os.chdir(fp + \"/Libraries/zblend/Sources/zblend\")\n os.system(\"git pull\")\n \n # Create assets dir\n os.chdir(fp)\n if not os.path.exists('Assets'):\n os.makedirs('Assets')\n os.makedirs('Assets/raw')\n\n print(\"Fetch complete!\")\n\nfetch()\n","sub_path":"zblend_fetch.py","file_name":"zblend_fetch.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"177132813","text":"import gym, yumi_gym\nimport pybullet as p\nimport numpy as np\nimport h5py\nimport time\nfrom math import pi\n\ndef linear_map(x_, min_, max_, min_hat, max_hat):\n \n x_hat = 1.0 * (x_ - min_) / (max_ - min_) * (max_hat - min_hat) + min_hat\n print(x_, x_hat, min_, max_, min_hat, max_hat)\n return x_hat\n\ndef map_glove_to_inspire_hand(glove_angles):\n\n ### This function linearly maps the Wiseglove angle measurement to Inspire hand's joint angles.\n\n ## preparation, specify the range for linear scaling\n hand_start = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3, 0.4, 0.0, 0.0]) # radius already\n hand_final = np.array([-1.6, -1.7, -1.6, -1.7, -1.6, -1.7, -1.6, -1.7, -1.0, 0.0, -0.4, -1.0])\n glove_start = np.array([0, 0, 53, 0, 0, 22, 0, 0, 22, 0, 0, 35, 0, 0])# * pi / 180.0 # degree to radius\n glove_final = np.array([45, 100, 0, 90, 120, 0, 90, 120, 0, 90, 120, 0, 90, 120])# * pi / 180.0\n length = glove_angles.shape[0]\n hand_angles = np.zeros((length, 12)) # 12 joints\n\n ## Iterate to map angles\n for i in range(length):\n # four fingers' extension/flexion (abduction/adduction are dumped)\n hand_angles[i, 0] = linear_map(glove_angles[i, 3], glove_start[3], glove_final[3], hand_start[0], hand_final[0]) # Link1 (joint name)\n hand_angles[i, 1] = linear_map(glove_angles[i, 4], glove_start[4], glove_final[4], hand_start[1], hand_final[1]) # Link11\n hand_angles[i, 2] = linear_map(glove_angles[i, 6], glove_start[6], glove_final[6], hand_start[2], hand_final[2]) # Link2\n hand_angles[i, 3] = linear_map(glove_angles[i, 7], glove_start[7], glove_final[7], hand_start[3], hand_final[3]) # Link22\n hand_angles[i, 4] = linear_map(glove_angles[i, 9], glove_start[9], glove_final[9], hand_start[4], hand_final[4]) # Link3\n hand_angles[i, 5] = linear_map(glove_angles[i, 10], glove_start[10], glove_final[10], hand_start[5], hand_final[5]) # Link33\n hand_angles[i, 6] = linear_map(glove_angles[i, 12], glove_start[12], glove_final[12], hand_start[6], hand_final[6]) # Link4\n hand_angles[i, 7] = linear_map(glove_angles[i, 13], glove_start[13], glove_final[13], hand_start[7], hand_final[7]) # Link44\n\n # thumb\n hand_angles[i, 8] = (hand_start[8] + hand_final[8]) / 2.0 # Link5 (rotation about z axis), fixed!\n hand_angles[i, 9] = linear_map(glove_angles[i, 2], glove_start[2], glove_final[2], hand_start[9], hand_final[9]) # Link 51\n hand_angles[i, 10] = linear_map(glove_angles[i, 0], glove_start[0], glove_final[0], hand_start[10], hand_final[10]) # Link 52\n hand_angles[i, 11] = linear_map(glove_angles[i, 1], glove_start[1], glove_final[1], hand_start[11], hand_final[11]) # Link 53\n\n return hand_angles\n\n\nhf = h5py.File('yumi_intro_YuMi.h5', 'r')\nkey = '会-hui'\nl_glove_angle = hf[key + '/l_glove_angle'][:]\nr_glove_angle = hf[key + '/r_glove_angle'][:]\nl_hand_angle = map_glove_to_inspire_hand(l_glove_angle)\nr_hand_angle = map_glove_to_inspire_hand(r_glove_angle)\nl_joint_default = [-1.341905951499939, -1.7764934301376343, 0.5122540473937988, -0.315954327583313, 1.8956027030944824, 0.47532641887664795, -0.7984092235565186]\nr_joint_default = [1.2869668006896973, -1.7874925136566162, -0.581124496459961, -0.5961062908172607, -0.1543283462524414, 0.47532641887664795, -0.7984092235565186]\ntotal_frames = l_hand_angle.shape[0]\n\nenv = gym.make('yumi-v0')\nenv.render()\nobservation = env.reset()\n\nwhile True:\n env.render()\n for t in range(total_frames):\n # t = 100\n # if t < 30 or t > 180:\n # continue\n print(t, l_hand_angle.shape)\n\n # action = [0 for i in range(14)] + l_hand_angle[t].tolist() + r_hand_angle[t].tolist()\n # action[5], action[12], action[6], action[13] = 0, 0, 0, 0\n action = l_joint_default + r_joint_default + l_hand_angle[t].tolist() + r_hand_angle[t].tolist()\n # print(action)\n print(l_hand_angle[t], r_glove_angle[t])\n observation, reward, done, info = env.step(action)\n time.sleep(0.1)","sub_path":"h5_hand_control.py","file_name":"h5_hand_control.py","file_ext":"py","file_size_in_byte":4041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"10793341","text":"'''\n二分查找,针对有序数列\n'''\ndef binary_search(nums, num):\n\tn = len(nums)\n\tif n > 0:\n\t\tmid = n // 2\n\t\tif nums[mid] == num:\n\t\t\treturn True\n\t\telif num < nums[mid]:\n\t\t\treturn binary_search(nums[:mid], num)\n\t\telse:\n\t\t\treturn binary_search(nums[mid+1:], num)\n\treturn False\n\n\ndef binary_search2(nums, num):\n\tn = len(nums)\n\tstart = 0\n\tstop = n-1\n\twhile start <= stop:\n\t\tmid = (start + stop)//2\n\t\tif nums[mid] == num:\n\t\t\treturn True\n\t\telif num < nums[mid]:\n\t\t\tstop = mid -1\n\t\telse:\n\t\t\tstart = mid + 1\n\treturn False\n\nnums = [1,2,3,5]\nprint(binary_search2(nums, 4))\n","sub_path":"sort/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"323621681","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom college_management_app.EmailBackend import EmailBackend\nfrom django.contrib import messages\nfrom django.urls import reverse\nfrom django.views.decorators.csrf import csrf_exempt\n\n\ndef showDemoPage(request):\n return render(request,\"demo.html\")\n\ndef ShowLoginPage(request):\n return render(request,\"login_page.html\")\n\ndef doLogin(request):\n if request.method!=\"POST\":\n return HttpResponse(\"

Method Not Allowed

\")\n else:\n user=EmailBackend.authenticate(request,username=request.POST.get(\"email\"),password=request.POST.get(\"password\"))\n if user!=None:\n login(request,user)\n if user.user_type==\"1\":\n return HttpResponseRedirect('/admin_home')\n elif user.user_type==\"2\":\n return HttpResponseRedirect(reverse(\"teacher_home\"))\n else:\n return HttpResponseRedirect(reverse(\"student_home\"))\n\n else:\n messages.error(request,\"Invalid Login Details\")\n return HttpResponseRedirect(\"/\")\n\ndef GetUserDetails(request):\n if request.user!=None:\n return HttpResponse(\"User : \"+request.user.email+\" usertype : \"+request.user.user_type)\n else:\n return HttpResponse(\"Please Login First\")\n\ndef logout_user(request):\n logout(request)\n return HttpResponseRedirect(\"/\")\n\n\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nlemmatizer = WordNetLemmatizer()\nimport pickle\nimport numpy as np\n\nfrom keras.models import load_model\nmodel = load_model('chatbotmodel.h5')\nimport json\nimport random\nfrom django.shortcuts import render\n\nDATA_PATH = 'intents.json'\nintents = json.loads(open(DATA_PATH,encoding=\"utf8\").read())\nwords = pickle.load(open('words.pkl', 'rb'))\nclasses = pickle.load(open('classes.pkl', 'rb'))\n\n\ndef clean_up_sentence(sentence):\n sentence_words = nltk.word_tokenize(sentence)\n sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]\n return sentence_words\n\n\ndef bow(sentence, words, show_details=False):\n\n sentence_words = clean_up_sentence(sentence)\n bag = [0]*len(words)\n for s in sentence_words:\n for i,w in enumerate(words):\n if w == s:\n # assign 1 if current word is in the vocabulary position\n bag[i] = 1\n if show_details:\n print (\"found in bag: %s\" % w)\n return(np.array(bag))\n\ndef predict_class(sentence):\n res = model.predict(np.array([bow(sentence, words)]))[0]\n ERROR_THRESHOLD = 0.50\n\n\n\n results = [[i,r] for i,r in enumerate(res) if r > ERROR_THRESHOLD]\n results.sort(key=lambda x: x[1], reverse=True)\n\n\n\n return_list = list()\n for r in results:\n rr = [classes[r[0]]],[r[1]]\n return_list.extend(rr)\n return return_list\n\ndef chat(request):\n msg = request.POST.get(\"msg\")\n results = predict_class(sentence=msg)\n\n results_index = np.array(results)\n confidence = results_index[1]\n co = (confidence.astype('float64'))\n val = np.float32(co)\n pyval = val.item()\n\n if pyval > 0.6:\n tag = results_index[0]\n\n list_of_intents = intents['intents']\n for i in list_of_intents:\n if (i['tag'] == tag):\n result = random.choice(i['responses'])\n break\n return HttpResponse(result)\n else:\n return HttpResponse('Sorry, I did not understand that')\n \n\n","sub_path":"college_management_system/college_management_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"233709425","text":"\"\"\"\nCombines vehicle input geomtry for export as a skeletal mesh.\n\"\"\"\n\nimport maya.cmds as MCmds\nimport U4_ObjectSetUtils\nimport U4_DisplayLayerUtils\nimport U4_RigidSkinBuilder\nimport U4_SkinUtils\nimport U4_Naming\nimport U4_ResultJointsSet\nimport U4_FileUtils\nimport U4_Unreal.U4_UnrealSocketsToJoints as U4_UnrealSocketsToJoints\nfrom U4_ScopedCleanJointHierarchy import U4_ScopedCleanJointHierarchy\nfrom U4_Const import *\n\nimport U4_Unreal.U4_UnrealExportUtils as U4_UnrealExportUtils\nfrom U4_Unreal.U4_UnrealConst import *\n\ndef build():\n\t\"\"\"\n\tEntry point. Build vehicle in current scene.\n\t\"\"\"\n\t\n\t# Clear selection to prevent it from potentially interfering with our commands.\n\tMCmds.select(clear = True)\n\n\t# Prepare object sets, display layers, etc if they don't exist yet.\n\t_setupScene()\n\n\t# Delete objects from previous call to build().\n\t_cleanupScene()\n\n\tinputSet = _getInputSet()\n\tinputSetMembers = U4_ObjectSetUtils.getMembers(inputSet)\n\tif len(inputSetMembers) < 1:\n\t\tprint(\"No input geometry to merge yet\")\n\t\treturn\n\n\t# Merge input geometry into output skeletal mesh.\n\t_mergeGeo(inputSetMembers)\n\n\t# Save a copy of the scene specialized for animation.\n\t_exportAnimRef()\n\ndef _getInputSet():\n\t\"\"\"\n\t@returns: Objectset of geometry to merge into skeletal mesh.\n\t\"\"\"\n\treturn \"VehicleInput\" + U4_Naming.suffix(set = True)\n\ndef _getOutputGeo():\n\t\"\"\"\n\t@returns: Merged skeletal mesh geometry.\n\t\"\"\"\n\treturn \"VehicleOutput\" + U4_Naming.suffix(geo = True)\n\ndef _getOutputDisplayLayer():\n\t\"\"\"\n\t@returns: Display layer for export skeletal mesh geometry. Makes it easy to hide generated mesh.\n\t\"\"\"\n\treturn \"VehicleSkin\" + U4_Naming.suffix(lyr = True) \n\ndef _getResultJointsDisplayLayer():\n\t\"\"\"\n\t@returns: Display layer for export skeleton.\n\t\"\"\"\n\treturn \"ResultJoints\" + U4_Naming.suffix(lyr = True)\n\ndef _setupScene():\n\t\"\"\"\n\tPrepare scene for vehicle modeling.\n\t\"\"\"\n\n\t# Create skeletal mesh set if it does not exist yet.\n\tU4_ObjectSetUtils.create(_getInputSet())\n\n\t# Create display layer for merged geo if it does not exist yet. \n\tU4_DisplayLayerUtils.createDisplayLayer(_getOutputDisplayLayer(), color = colorOrange, isVisible = False, displayType = U4_DisplayLayerUtils.displayType_Reference)\n\tU4_DisplayLayerUtils.createDisplayLayer(_getResultJointsDisplayLayer(), color = colorOrange, isVisible = True, displayType = U4_DisplayLayerUtils.displayType_Normal)\n\ndef _cleanupScene():\n\t\"\"\"\n\tCleanup nodes generated by previous run.\n\t\"\"\"\n\tgeo = _getOutputGeo()\n\tif MCmds.objExists(geo):\n\t\tMCmds.delete(geo)\n\ndef _mergeGeo(inputSetMembers):\n\t\"\"\"\n\tDuplicate input geometry and merge it into a skeletal mesh.\n\t\"\"\"\n\toutputGeo = U4_RigidSkinBuilder.build(inputSetMembers)\n\tif not outputGeo:\n\t\tprint(\"Unable to merge any inputs\")\n\t\treturn\n\t\n\toutputGeo = MCmds.rename(outputGeo, _getOutputGeo())\n\n\texport = U4_UnrealExportUtils.getOrCreateExport(\"Vehicle\", type = unrealExportType_SkeletalMesh)\n\tU4_UnrealExportUtils.addMembers(export, outputGeo)\n\n\t# Output will be in input set still by default because we duplicated the geometry.\n\tU4_ObjectSetUtils.removeMembers(_getInputSet(), outputGeo)\n\tU4_DisplayLayerUtils.addMembers(_getOutputDisplayLayer(), outputGeo)\n\n\t# Non-deformer history must be baked before export.\n\tMCmds.bakePartialHistory(outputGeo, prePostDeformers = True)\n\n\tskinCluster = U4_SkinUtils.findSkinCluster(outputGeo)\n\tresultJoints = U4_SkinUtils.getRelatedJoints(skinCluster)\n\n\tresultJointsDisplayLayer = _getResultJointsDisplayLayer()\n\tU4_DisplayLayerUtils.clearMembers(resultJointsDisplayLayer)\n\tU4_DisplayLayerUtils.addMembers(resultJointsDisplayLayer, resultJoints)\n\n\t# ResultJoints set\n\tjointsSet = U4_ResultJointsSet.createOrEmpty()\n\tU4_ResultJointsSet.addJoints(jointsSet, resultJoints)\n\ndef _exportAnimRef():\n\t\"\"\"\n\tExport *_Ref.mb version of scene primed for animation.\n\tStrips input geometry from animation scene.\n\t\"\"\"\n\n\toutputGeo = _getOutputGeo()\n\tif not MCmds.objExists(outputGeo):\n\t\t# We don't error for this because chances are the geo was not able to be generated\n\t\treturn\n\n\trootJoint = \"Root\"\n\n\tif not MCmds.objExists(rootJoint):\n\t\traise RuntimeError(\"Unable to export anim ref without 'Root' joint\")\n\n\t# Sanity-check that Root has only one bind pose. This proved to be a confusing issue in one case. \n\tbindPoses = MCmds.dagPose(rootJoint, query = True, bindPose = True)\n\tif not bindPoses or len(bindPoses) != 1:\n\t\traise RuntimeError(\"Root joint should only have one bind pose\")\n\n\t# We convert sockets to joints for the animation scene so that Denizen root motion can be attached to seats.\n\twith U4_UnrealSocketsToJoints.fromRootJoint(rootJoint):\n\t\twith U4_ScopedCleanJointHierarchy(rootJoint):\n\t\t\toutputDisplayLayer = _getOutputDisplayLayer()\n\t\t\tjointsDisplayLayer = _getResultJointsDisplayLayer()\n\n\t\t\t# It's confusing to import animref with mesh invisible, so we temporarily make it visible.\n\t\t\trestoreMeshVisible = U4_DisplayLayerUtils.getDisplayLayerIsVisible(outputDisplayLayer)\n\t\t\trestoreJointsDisplayType = U4_DisplayLayerUtils.getDisplayLayerDisplayType(jointsDisplayLayer)\n\t\t\tU4_DisplayLayerUtils.setDisplayLayerIsVisible(outputDisplayLayer, True)\n\t\t\tU4_DisplayLayerUtils.setDisplayLayerDisplayType(jointsDisplayLayer, U4_DisplayLayerUtils.displayType_Reference)\n\n\t\t\tjointsSet = U4_ResultJointsSet.getResultJointsSet()\n\n\t\t\ttry:\n\t\t\t\tU4_FileUtils.exportRef( [ outputGeo, jointsSet ] )\n\t\t\tfinally:\n\t\t\t\tU4_DisplayLayerUtils.setDisplayLayerIsVisible(outputDisplayLayer, restoreMeshVisible)\n\t\t\t\tU4_DisplayLayerUtils.setDisplayLayerDisplayType(jointsDisplayLayer, restoreJointsDisplayType)\n","sub_path":"scripts/U4_VehicleBuilder.py","file_name":"U4_VehicleBuilder.py","file_ext":"py","file_size_in_byte":5512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"119157032","text":"import pytest\nfrom tasks_hw3.task2 import mproc_sum, runtime_timer, slow_calculate\n\n\n@pytest.mark.parametrize(\n [\"func\", \"value\", \"expected_result\"],\n [\n (slow_calculate, 500, 1024259),\n (slow_calculate, 10, 21846),\n ],\n)\ndef test_slow_calc(func, value, expected_result):\n mproc_sum_w_timer = runtime_timer(mproc_sum)\n actual_result, runtime = mproc_sum_w_timer(func, value)\n assert actual_result == expected_result\n assert float(runtime) < 60\n","sub_path":"homework3/tests/test_task2.py","file_name":"test_task2.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"507441767","text":"\"\"\"\nThis file is part of Linspector (https://linspector.org/)\nCopyright (c) 2022 Johannes Findeisen . All Rights Reserved.\nSee LICENSE (MIT license).\n\"\"\"\n\n\n# This class can maybe be used for a general data model for Linspector data processing. currently\n# the is no use for it and no place known where it could be used with sense.\nclass Model:\n\n def __init__(self, configuration, environment, log):\n self.__configuration = configuration\n self.__environment = environment\n self.__log = log\n","sub_path":"linspector/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"192042709","text":"# msmb AtomPairsFeaturizer --out mainnode-pair_indices_stride20-2 --pair_indices AtomIndices.txt --top A2.prmtop --trjs 'MD*/*.mdcrd' --stride 20\nimport numpy as np\nfrom msmbuilder.utils import io\nimport msmbuilder.cluster\nimport glob\nimport pickle\n\ndataset = []\ninf = {}\n\nfor i in sorted(glob.glob('featurizes_RMSD+drugDist/*.npy')):\n dataset.append(np.load(i))\n inf[i] = len(dataset)\n print(i)\n print(len(dataset))\n\n\nwith open('maping_2OIQ.txt', 'wb') as handle:\n pickle.dump(inf, handle)\n \n\"\"\"\nwith open('file.txt', 'rb') as handle:\n b = pickle.loads(handle.read())\n\"\"\"\n\nstates = msmbuilder.cluster.KMeans(n_clusters=500)\nstates.fit(dataset)\n\nio.dump(states,'clustering_2OIQ_db.pkl')\n","sub_path":"mkClusterFeatures.py","file_name":"mkClusterFeatures.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"287220692","text":"from typing import List\nfrom usps_lib.error import Error\nfrom karrio.core.utils import Element, XP\nfrom karrio.core.models import Message\nfrom karrio.providers.usps_international.utils import Settings\n\n\ndef parse_error_response(response: Element, settings: Settings) -> List[Message]:\n error_nodes = (\n [response] if response.tag == 'Error' else\n response.xpath(\".//*[local-name() = $name]\", name=\"Error\")\n )\n errors = [XP.to_object(Error, node) for node in error_nodes]\n\n return [\n Message(\n carrier_name=settings.carrier_name,\n carrier_id=settings.carrier_id,\n code=str(error.Number),\n message=error.Description,\n )\n for error in errors\n ]\n","sub_path":"sdk/extensions/usps_international/karrio/providers/usps_international/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"489601436","text":"from PIL import Image, ImageDraw\nimport random\n\nprint(\"Dog\")\n\nvisual_map = [['W', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'W'],\n['W', ' ', 'W', 'W', ' ', 'W', 'W', 'S', 'W', 'W'],\n['W', ' ', ' ', ' ', ' ', ' ', ' ', 'C', ' ', 'W'],\n['W', ' ', ' ', ' ', ' ', ' ', 'R', 'C', ' ', 'W'],\n['W', ' ', ' ', 'C', 'R', 'R', 'R', ' ', ' ', 'W'],\n['W', ' ', 'C', 'C', 'R', 'R', 'R', ' ', ' ', 'W'],\n['W', 'S', 'R', 'R', 'R', 'C', 'C', ' ', ' ', 'W'],\n['W', ' ', ' ', ' ', ' ', ' ', 'S', ' ', ' ', 'W'],\n['W', 'W', ' ', ' ', ' ', 'W', ' ', 'W', 'W', 'W'],\n['W', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'W']]\n\nclass Image_Maker:\n def __init__(self, our_list, our_name):\n self.image_list_iterator(our_list, our_name)\n\n def image_list_iterator(self, input_list, pic_name):\n size = (len(input_list[0])*10, len(input_list[0]*10))\n im = Image.new(\"RGB\", size)\n draw = ImageDraw.Draw(im)\n count = 0\n\n def position_fiddler(pos):\n pos = list(pos)\n pos[0] += 10\n pos = tuple(pos)\n return pos\n\n for i in input_list:\n print(i, input_list.index(i))\n print(count)\n our_index = input_list.index(i)\n position = (0, count*10)\n position2 = (10, (count*10) + 10)\n for j in i:\n if j == \"W\":\n draw.rectangle((position, position2), fill=\"blue\")\n position = position_fiddler(position)\n position2 = position_fiddler(position2)\n elif j == \"S\":\n draw.rectangle((position, position2), fill=\"red\")\n position = position_fiddler(position)\n position2 = position_fiddler(position2)\n elif j == \"C\":\n draw.rectangle((position, position2), fill=\"green\")\n position = position_fiddler(position)\n position2 = position_fiddler(position2)\n elif j == \"R\":\n draw.rectangle((position, position2), fill=\"purple\")\n position = position_fiddler(position)\n position2 = position_fiddler(position2)\n elif j == \"X\":\n draw.rectangle((position, position2), fill=\"orange\")\n position = position_fiddler(position)\n position2 = position_fiddler(position2)\n else:\n draw.rectangle((position, position2), fill=\"yellow\")\n position = position_fiddler(position)\n position2 = position_fiddler(position2)\n count += 1\n im.save(\"C:/Users/iamja_000/Documents/GitHub/worldgen2/images/lemons_experiment_\" + str(pic_name) + \".jpg\")\n\n\n#image_list_iterator(visual_map, 20)\n\ndef pil_image():\n size = (100, 100)\n im = Image.new(\"RGB\", size)\n draw = ImageDraw.Draw(im)\n #red = (255,210,10)\n position = (0, 0)\n position2 = (10, 10)\n draw.rectangle((position, position2), fill=\"red\")\n position = (10, 0)\n position2 = (20, 10)\n draw.rectangle((position, position2), fill=\"blue\")\n position = (20, 0)\n position2 = (30, 10)\n draw.rectangle((position, position2), fill=\"yellow\")\n print(im)\n im.save(\"C:/Users/iamja_000/Documents/GitHub/worldgen2/lemonsyyy.jpeg\")\n\n#print(visual_map)\n#pil_image()\n\ndef pil_image_iterative():\n size = (100, 100)\n im = Image.new(\"RGB\", size)\n draw = ImageDraw.Draw(im)\n position = (0, 0)\n position2 = (10, 10)\n colors = [\"red\", \"blue\", \"yellow\"]\n print(\"Iterating\")\n while position2[0] <= 100:\n color_choice = random.choice(colors)\n draw.rectangle((position, position2), fill=color_choice)\n position = list(position)\n position[0] += 10\n position = tuple(position)\n position2 = list(position2)\n position2[0] += 10\n position2 = tuple(position2)\n print(\"Position1 {} position2 {}\".format(position, position2))\n im.save(\"C:/Users/iamja_000/Documents/GitHub/worldgen2/lemonsxx.jpeg\")\n\n\n#pil_image_iterative()\n","sub_path":"temp_scrap.py","file_name":"temp_scrap.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"271947830","text":"from __future__ import absolute_import\n\nfrom django import forms\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sentry import roles\nfrom sentry.models import AuditLogEntryEvent, Organization\nfrom sentry.signals import data_scrubber_enabled\nfrom sentry.web.frontend.base import OrganizationView\n\n\nclass OrganizationSettingsForm(forms.ModelForm):\n name = forms.CharField(help_text=_('The name of your organization. i.e. My Company'))\n slug = forms.SlugField(\n label=_('Short name'),\n help_text=_('A unique ID used to identify this organization.'),\n )\n allow_joinleave = forms.BooleanField(\n label=_('Open Membership'),\n help_text=_('Allow organization members to freely join or leave any team.'),\n required=False,\n )\n default_role = forms.ChoiceField(\n label=_('Default Role'),\n choices=roles.get_choices(),\n help_text=_('The default role new members will receive.'),\n )\n enhanced_privacy = forms.BooleanField(\n label=_('Enhanced Privacy'),\n help_text=_(\n 'Enable enhanced privacy controls to limit personally identifiable information (PII) as well as source code in things like notifications.'\n ),\n required=False,\n )\n allow_shared_issues = forms.BooleanField(\n label=_('Allow Shared Issues'),\n help_text=_('Enable sharing of limited details on issues to anonymous users.'),\n required=False,\n )\n require_scrub_data = forms.BooleanField(\n label=_('Require Data Scrubber'),\n help_text=_('Require server-side data scrubbing be enabled for all projects.'),\n required=False\n )\n require_scrub_defaults = forms.BooleanField(\n label=_('Require Using Default Scrubbers'),\n help_text=_(\n 'Require the default scrubbers be applied to prevent things like passwords and credit cards from being stored for all projects.'\n ),\n required=False\n )\n sensitive_fields = forms.CharField(\n label=_('Global additional sensitive fields'),\n help_text=_(\n 'Additional field names to match against when scrubbing data for all projects. '\n 'Separate multiple entries with a newline.
Note: These fields will be used in addition to project specific fields.'\n ),\n widget=forms.Textarea(\n attrs={\n 'placeholder': mark_safe(_('e.g. email')),\n 'class': 'span8',\n 'rows': '3',\n }\n ),\n required=False,\n )\n safe_fields = forms.CharField(\n label=_('Global safe fields'),\n help_text=_(\n 'Field names which data scrubbers should ignore. '\n 'Separate multiple entries with a newline.
Note: These fields will be used in addition to project specific fields.'\n ),\n widget=forms.Textarea(\n attrs={\n 'placeholder': mark_safe(_('e.g. email')),\n 'class': 'span8',\n 'rows': '3',\n }\n ),\n required=False,\n )\n require_scrub_ip_address = forms.BooleanField(\n label=_('Prevent Storing of IP Addresses'),\n help_text=_('Preventing IP addresses from being stored for new events on all projects.'),\n required=False\n )\n early_adopter = forms.BooleanField(\n label=_('Early Adopter'),\n help_text=_('Opt-in to new features before they\\'re released to the public.'),\n required=False\n )\n\n class Meta:\n fields = ('name', 'slug', 'default_role')\n model = Organization\n\n def __init__(self, has_delete, *args, **kwargs):\n super(OrganizationSettingsForm, self).__init__(*args, **kwargs)\n if not has_delete:\n del self.fields['default_role']\n\n def clean_sensitive_fields(self):\n value = self.cleaned_data.get('sensitive_fields')\n if not value:\n return\n\n return filter(bool, (v.lower().strip() for v in value.split('\\n')))\n\n def clean_safe_fields(self):\n value = self.cleaned_data.get('safe_fields')\n if not value:\n return\n\n return filter(bool, (v.lower().strip() for v in value.split('\\n')))\n\n\nclass OrganizationSettingsView(OrganizationView):\n required_scope = 'org:write'\n\n def get_form(self, request, organization):\n has_delete = request.access.has_scope('org:admin')\n\n return OrganizationSettingsForm(\n has_delete=has_delete,\n data=request.POST or None,\n instance=organization,\n initial={\n 'default_role':\n organization.default_role,\n 'allow_joinleave':\n bool(organization.flags.allow_joinleave),\n 'enhanced_privacy':\n bool(organization.flags.enhanced_privacy),\n 'allow_shared_issues':\n bool(not organization.flags.disable_shared_issues),\n 'require_scrub_data':\n bool(organization.get_option('sentry:require_scrub_data', False)),\n 'require_scrub_defaults':\n bool(organization.get_option('sentry:require_scrub_defaults', False)),\n 'sensitive_fields':\n '\\n'.join(organization.get_option('sentry:sensitive_fields', None) or []),\n 'safe_fields':\n '\\n'.join(organization.get_option('sentry:safe_fields', None) or []),\n 'require_scrub_ip_address':\n bool(organization.get_option('sentry:require_scrub_ip_address', False)),\n 'early_adopter':\n bool(organization.flags.early_adopter),\n }\n )\n\n def handle(self, request, organization):\n form = self.get_form(request, organization)\n if form.is_valid():\n organization = form.save(commit=False)\n organization.flags.allow_joinleave = form.cleaned_data['allow_joinleave']\n organization.flags.enhanced_privacy = form.cleaned_data['enhanced_privacy']\n organization.flags.disable_shared_issues = not form.cleaned_data['allow_shared_issues']\n organization.flags.early_adopter = form.cleaned_data['early_adopter']\n organization.save()\n\n data_scrubbing_options = (\n 'require_scrub_data', 'require_scrub_defaults', 'sensitive_fields', 'safe_fields',\n 'require_scrub_ip_address'\n )\n\n for opt in data_scrubbing_options:\n value = form.cleaned_data.get(opt)\n if value is None:\n organization.delete_option('sentry:%s' % (opt, ))\n else:\n organization.update_option('sentry:%s' % (opt, ), value)\n\n self.create_audit_entry(\n request,\n organization=organization,\n target_object=organization.id,\n event=AuditLogEntryEvent.ORG_EDIT,\n data=organization.get_audit_log_data(),\n )\n\n messages.add_message(\n request, messages.SUCCESS, _('Changes to your organization were saved.')\n )\n\n if any(\n (\n scrubbing_field in form.cleaned_data\n for scrubbing_field in data_scrubbing_options\n )\n ):\n data_scrubber_enabled.send(organization=organization, sender=request.user)\n\n return HttpResponseRedirect(\n reverse('sentry-organization-settings', args=[organization.slug])\n )\n\n context = {\n 'form': form,\n }\n\n return self.respond('sentry/organization-settings.html', context)\n","sub_path":"src/sentry/web/frontend/organization_settings.py","file_name":"organization_settings.py","file_ext":"py","file_size_in_byte":7921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"292166089","text":"s = input()\ncount = 0\nflag = 0\nfor i in range(len(s)):\n if s[i] != '.' and flag == 0:\n count += 1\n flag = 1\n else:\n if s[i] == '.':\n flag = 0\nprint(count)","sub_path":"lesson5/sentences.py","file_name":"sentences.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"307768623","text":"from Source.Model.main.params.t_default import TDefaultParams\n\nTParams = dict()\n\nTParams['WDC_SCALE_ID'] = TDefaultParams.WDC_SCALE_ID\n\nTParams['ALPHA_MM_WINDOW'] = TDefaultParams.ALPHA_MM_WINDOW\nTParams['ALPHA_BEGIN_SHIFT'] = TDefaultParams.ALPHA_BEGIN_SHIFT\nTParams['ALPHA_QRS_BTW'] = TDefaultParams.ALPHA_QRS_BTW\nTParams['ALPHA_QRS_GAP'] = TDefaultParams.ALPHA_QRS_GAP\nTParams['ALPHA_MM_SHARP'] = TDefaultParams.ALPHA_MM_SHARP\nTParams['ALPHA_PEAK_ZC_AMPL_DEC_LEFT'] = TDefaultParams.ALPHA_PEAK_ZC_AMPL_DEC_LEFT\nTParams['ALPHA_PEAK_ZC_AMPL_DEC_RIGHT'] = TDefaultParams.ALPHA_PEAK_ZC_AMPL_DEC_RIGHT\nTParams['ALPHA_PEAK_ZC_AMPL_DEC_BEGIN'] = TDefaultParams.ALPHA_PEAK_ZC_AMPL_DEC_BEGIN\nTParams['ALPHA_PEAK_ZC_AMPL_DEC_POW'] = TDefaultParams.ALPHA_PEAK_ZC_AMPL_DEC_POW\nTParams['ALPHA_PEAK_ZC_AMPL_DEC_COEFF'] = TDefaultParams.ALPHA_PEAK_ZC_AMPL_DEC_COEFF\nTParams['ALPHA_FLEX_SHIFT'] = TDefaultParams.ALPHA_FLEX_SHIFT\nTParams['ALPHA_FLEX_AMPL_NGBR'] = TDefaultParams.ALPHA_FLEX_AMPL_NGBR\nTParams['ALPHA_FLEX_AMPL_OLD_ZC'] = TDefaultParams.ALPHA_FLEX_AMPL_OLD_ZC\nTParams['ALPHA_FLEX_BEGIN_PART'] = TDefaultParams.ALPHA_FLEX_BEGIN_PART\nTParams['ALPHA_FLEX_END_PART'] = TDefaultParams.ALPHA_FLEX_END_PART\nTParams['ALPHA_BIPHASE_AMPL_LEFT'] = TDefaultParams.ALPHA_BIPHASE_AMPL_LEFT\nTParams['ALPHA_BIPHASE_AMPL_RIGHT'] = TDefaultParams.ALPHA_BIPHASE_AMPL_RIGHT\nTParams['ALPHA_BIPHASE_AMPL_SHIFT'] = TDefaultParams.ALPHA_BIPHASE_AMPL_SHIFT\nTParams['ALPHA_ONSET_OFFSET_MM'] = TDefaultParams.ALPHA_ONSET_OFFSET_MM\n\nTParams['BETA_SCALE'] = TDefaultParams.BETA_SCALE\nTParams['BETA_PEAK_ZC_AMPL'] = TDefaultParams.BETA_PEAK_ZC_AMPL\n\n\n\n\n\n\n\n\n\n","sub_path":"Source/Model/main/params/t.py","file_name":"t.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"626494409","text":"# csapat; versenyzo; eletkor; palya; korido; kor\r\n#Versenylovak; Fürge Ferenc; 29; Gran Prix Circuit; 00:01:11; 1\r\n# 0 1 2 3 4 5\r\n\r\n#Függvény\r\ndef köridő(sor):\r\n '''Bedobjuk a köridőt string-ként (00:00:00), és visszatér másodpercekkel integer-ként.'''\r\n perc = int(sor[4][3:5])\r\n sec = int(sor[4][6:])\r\n return perc*60 + sec\r\n\r\n#Előkészületek\r\nwith open('autoverseny.csv', 'r', encoding='UTF-8-sig') as f:\r\n fejlec = f.readline()\r\n matrix = [sor.strip().split(';') for sor in f]\r\n \r\n#3. feladat\r\nprint(f'3. feladat: {len(matrix)}')\r\n\r\n#4. feladat\r\nfor sor in matrix:\r\n if sor[1] == 'Fürge Ferenc' and sor[5] == '3' and sor[3] == 'Gran Prix Circuit':\r\n print(f'4. feladat: {köridő(sor)} másodperc')\r\n \r\n#5. feladat\r\nprint(f'5. feladat:')\r\nnev = input(f'Kérem egy versenyző nevét:\\n')\r\n\r\n#6. feladat\r\nmini = '99:99:99'\r\npalya =''\r\nfor sor in matrix:\r\n if sor[1] == nev:\r\n if sor[4] < mini:\r\n mini = sor[4]\r\n palya = sor[3]\r\nif palya == '':\r\n print(f'Nincs ilyen versenyző az állományban!')\r\nelse:\r\n print(f'6. feladat: {palya}, {mini}')","sub_path":"korido.py","file_name":"korido.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"535974297","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\n# Author Abner.C\n\nimport os\n\nimport numpy\nimport theano\nimport json\nimport random\n\n\ndef prepare_data(seqs, labels):\n \"\"\"Create the matrices from the datasets.\n\n This pad each sequence to the same length: the length of the\n longest sequence or maxlen.\n\n if maxlen is set, we will cut all sequence to this maximum\n length.\n\n This swap the axis!\n\n Parameters\n ----------\n seqs: x\n\n labels: y\n \"\"\"\n lengths = [len(s) for s in seqs]\n\n n_samples = len(seqs)\n maxlen = numpy.max(lengths)\n dim = len(seqs[0][0])\n\n x = numpy.zeros((maxlen, n_samples, dim)).astype('int64')\n x_mask = numpy.zeros((maxlen, n_samples)).astype(theano.config.floatX)\n for idx, s in enumerate(seqs):\n x[:lengths[idx], idx] = s\n x_mask[:lengths[idx], idx] = 1.\n\n return x, x_mask, numpy.asarray(labels)\n\n\ndef load_data():\n '''Loads the dataset\n '''\n\n #############\n # LOAD DATA #\n #############\n\n # Load the dataset\n dir = \"/Users/Abner/WorkStation/MedicalProject/code/python/lstmforpredictpatientdensity/data/\"\n file_list = os.listdir(dir)\n\n data_set = []\n for file_name in file_list:\n if os.path.isfile(dir + file_name):\n f = open(dir + file_name)\n for line in f:\n tmp_data = json.loads(line)\n data_set.append(tmp_data)\n f.close()\n #\n data_set_x = []\n data_set_y = []\n week_n = 0\n for i in range(len(data_set) - 24):\n if i % 168 == 0:\n y_duration = numpy.asarray(json.loads(data_set[i + 167]['duration']))\n if i + 168 < len(data_set):\n y_this = numpy.asarray(json.loads(data_set[i]['duration'])[6])\n y_next = numpy.asarray(json.loads(data_set[i + 168]['duration'])[6])\n for j in range(24):\n for k in range(96):\n if y_this[j][k] < 0:\n y_this[j][k] = 0\n if y_next[j][k] < 0:\n y_next[j][k] = 0\n y_duration[6] = y_duration[6] - y_this + y_next\n\n data_x = data_set[i]\n new_x = []\n new_y = []\n w_d = data_x['w_d']\n h = data_x['h']\n duration_y = y_duration[w_d][h]\n new_x.append(w_d)\n new_x.append(h)\n new_x.append(data_x['num'])\n duration_x = numpy.asarray(json.loads(data_x['duration']))\n\n # build x\n for j in range(15):\n\n def x_sum_h(t1):\n sum = 0;\n for i in duration_x[w_d][h][t1 * 4:(t1 + 1) * 4]:\n if i > 0:\n sum += i\n return sum\n\n for k in range(5):\n # 计算一个小时的人数和(duration本来是15分钟为一个时段的)\n time = (j // 5) * 5 + k\n new_x.append(x_sum_h(time))\n h -= 1\n if h < 0:\n h = 23\n w_d -= 1\n if w_d < 0:\n w_d = 6\n\n data_set_x.append(new_x)\n\n # initial y\n def y_sum_h(t1, t2=len(duration_y) / 4):\n sum = 0;\n for i in duration_y[t1 * 4:t2 * 4]:\n if i > 0:\n sum += i\n return sum\n\n new_y.append(data_x['num'])\n\n for i in range(6):\n new_y.append(y_sum_h(i, i + 1))\n new_y.append(y_sum_h(6, 8))\n new_y.append(y_sum_h(8, 11))\n new_y.append(y_sum_h(11, 14))\n new_y.append(y_sum_h(14, 17))\n new_y.append(y_sum_h(17))\n\n data_set_y.append(new_y)\n\n # build y\n for i in range(len(data_set_y) - 28):\n new_y = []\n for j in range(28):\n new_y += data_set_y[i + j]\n data_set_y[i] = new_y\n data_set_y = data_set_y[:-28]\n\n n_data = min(len(data_set_x), len(data_set_y))\n\n data_set_x = data_set_x[:n_data]\n data_set_y = data_set_y[:n_data]\n\n # normalization\n e = 1e-8\n data_set_x = numpy.asarray(data_set_x).astype(theano.config.floatX)\n x_max = data_set_x[:, :3].max(axis=0)\n x_min = data_set_x[:, :3].min(axis=0)\n x_deno = (x_max - x_min) + e\n data_set_x[:, :3] = (data_set_x[:, :3] - x_min) / x_deno\n data_set_x[:, :2] = data_set_x[:, :2]*600\n data_set_x[:, 2:3] = numpy.tanh(data_set_x[:, 2:3]) * 500\n data_set_y = numpy.asarray(data_set_y).astype(theano.config.floatX)\n y_max = data_set_y.max(axis=0)\n y_min = data_set_y.min(axis=0)\n y_deno = (y_max - y_min) + e\n data_set_y = ((data_set_y - y_min) / y_deno) - 0.5\n data_set_y = numpy.tanh(data_set_y) * 1000\n normalization_data = dict()\n normalization_data['y_min'] = y_min.tolist()\n normalization_data['y_deno'] = y_deno.tolist()\n\n def rand_data(l, r, n):\n new_set_x = []\n new_set_y = []\n\n for I in range(n):\n length = int(l + random.random() * (r - l))\n begin = int(random.random() * (n_data - length))\n new_set_x.append(data_set_x[begin:begin + length])\n new_set_y.append(data_set_y[begin + length - 1])\n\n return new_set_x, new_set_y\n\n train_set_x = []\n train_set_y = []\n test_set_x = []\n test_set_y = []\n for I in range(1000):\n set_x, set_y = rand_data(80, 100, 10)\n train_set_x += set_x\n train_set_y += set_y\n set_x, set_y = rand_data(50, 80, 15)\n train_set_x += set_x\n train_set_y += set_y\n set_x, set_y = rand_data(20, 50, 30)\n train_set_x += set_x\n train_set_y += set_y\n set_x, set_y = rand_data(10, 20, 25)\n train_set_x += set_x\n train_set_y += set_y\n set_x, set_y = rand_data(5, 10, 15)\n train_set_x += set_x\n train_set_y += set_y\n set_x, set_y = rand_data(1, 5, 5)\n train_set_x += set_x\n train_set_y += set_y\n set_x, set_y = rand_data(50, 100, 10)\n test_set_x += set_x\n test_set_y += set_y\n set_x, set_y = rand_data(20, 50, 10)\n test_set_x += set_x\n test_set_y += set_y\n set_x, set_y = rand_data(1, 5, 10)\n test_set_x += set_x\n test_set_y += set_y\n\n return (train_set_x, train_set_y), (test_set_x, test_set_y), normalization_data\n\n\n","sub_path":"traindata.py","file_name":"traindata.py","file_ext":"py","file_size_in_byte":6278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"589813592","text":"\"\"\"Extends the pyaudio.Stream class for further info\"\"\"\n\nimport pyaudio\nfrom typing import Optional\n\nclass Device(pyaudio.Stream):\n \"\"\"\n Extends the pyaudio.Stream class and represents an Audio I/O device.\n\n ...\n\n Attributes\n ----------\n indi : Optional[int]\n The input device's index.\n indo : Optional[int]\n The output device's index.\n \"\"\"\n\n def __init__(self, pa: pyaudio.PyAudio, rate: int, channels: int, format: int, *args, input_device_index: Optional[int] = None, output_device_index: Optional[int] = None, **kwargs):\n self.indi: Optional[int] = input_device_index\n self.indo: Optional[int] = output_device_index\n super(Device, self).__init__(pa, *args, rate=rate, channels=channels, format=format, *args, input_device_index=input_device_index, output_device_index=output_device_index, **kwargs)\n pa._streams.add(self)","sub_path":"lib/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"476249855","text":"# P3.py\n#\n# prompts user for two floating-point values, print the result of the \n# first number divided by the second number, with exactly six decimal \n# places scientific notation.\n#\n# date: 08/28/2016\n# author: Chiayo Lin\n# license: GPL 3.0\n#\n\ndividend = int(input(\"Enter the first number: \"))\ndivisor = int(input(\"Enter the second number: \"))\n\nprint(format((dividend / divisor), '.6e'))\n","sub_path":"Chapter 2 Scripts/P3.py","file_name":"P3.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"425729172","text":"# Copyright 2017 The Sunset Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Sequential Module for TensorFlow snt.\n\nA Module that wraps a list of other modules and ops, connecting the output of\neach to the input of the next.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nfrom sunset.python.modules import base\nimport tensorflow as tf\n\n\nclass Sequential(base.AbstractModule):\n \"\"\"Builds a module out of a sequence of callables.\n\n Note that `Sequential` is limited in the range of possible architectures\n it can handle. This is a deliberate design decision; `Sequential` is only\n meant to be used for the simple case of fusing together modules/ops where\n the input of a particular module/op is the output of the previous one. Another\n restriction is that it is not possible to have extra arguments in the `_build`\n method that are passed to the constituents of the module - for example,\n if there is a `BatchNorm` module in `Sequential` and the user wishes to switch\n the `is_training` flag. If this is the desired use case, the recommended\n solution is to use `snt.Module` to wrap a custom function, as shown in the\n following example:\n\n https://github.com/synthai/sunset/examples/module_with_build_args.py\n \"\"\"\n\n def __init__(self, layers, name=\"sequential\"):\n \"\"\"Constructs a Sequential module.\n\n This feeds the output of each layer into the next and returns the output\n of the final layer.\n\n If a layer returns a tuple, it is assumed that this must be unpacked into\n the argument list of the next layer. If it is not a tuple, it is simply\n passed through to the next layer unchanged.\n\n Args:\n layers: Iterable of callables to stack together, which can be modules\n or ops.\n name: Name of the module.\n\n Raises:\n TypeError: If `layers` is None or contains any non-callable items.\n \"\"\"\n super(Sequential, self).__init__(name=name)\n\n # Store a copy of the iterable in a tuple to ensure users cannot modify the\n # iterable later, and protect against iterables which can only be read once.\n self._layers = tuple(layers)\n\n is_not_callable = [(i, mod) for i, mod in enumerate(self._layers)\n if not callable(mod)]\n\n if is_not_callable:\n raise TypeError(\"Items {} not callable with types: {}\".format(\n \", \".join(str(i) for i, _ in is_not_callable),\n \", \".join(type(layer).__name__ for _, layer in is_not_callable)))\n\n def _build(self, *args):\n \"\"\"Connects the Sequential module into the graph.\n\n Args:\n *args: A tuple of inputs, to be unpacked as the arguments to the first\n layer.\n\n Returns:\n The output value of the last layer.\n \"\"\"\n net = args\n\n if not self._layers:\n # If the sequential is passed a single arg, this will end up being\n # wrapped in an extra layer of tuple by *args. Normally we internally\n # handle this in the loop below, but if there are no layers we unpack here\n # in order to make Sequential([]) act like an identity, which seems right.\n if len(args) == 1:\n return args[0]\n else:\n return args\n\n for layer in self._layers:\n if isinstance(net, tuple):\n net = layer(*net)\n else:\n net = layer(net)\n\n return net\n\n @property\n def layers(self):\n return self._layers\n\n def get_variables(self, *args, **kwargs):\n \"\"\"Provide a warning that get_variables on Sequential always returns ().\"\"\"\n tf.logging.warning(\n \"Calling Sequential.get_variables, which will always return an empty \"\n \"tuple. get_variables() can only return variables created directly by \"\n \"a Module, or created by submodules directly created inside the \"\n \"Module. Sequential is constructed from already constructed submodules \"\n \"and so this will always be empty. See the documentation for more \"\n \"details, but tl;dr if you need to connect some modules sequentially \"\n \"and call get_variables on the result, writing a simple custom module \"\n \"is the simplest way. Another option is to call get_all_variables().\")\n return super(Sequential, self).get_variables(*args, **kwargs)\n","sub_path":"sunset/sunset/python/modules/sequential.py","file_name":"sequential.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"552462228","text":"import os\n\npc='duycuongAI'\npc='300'\n\n\nif(pc=='duycuongAI'):\n dataset_dir='/media/duycuong/Data/Dataset/ZaloAIChallenge2018/landmark'\n train_dir = os.path.join(dataset_dir, 'TrainVal/train')\n val_dir = os.path.join(dataset_dir, '/TrainVal/val')\n test_dir = os.path.join(dataset_dir, 'Test_Public')\nelse:\n dataset_dir='/media/atsg/Data/datasets/ZaloAIChallenge2018/landmark'\n train_dir = os.path.join(dataset_dir, 'TrainVal1_fixed_class2/train')\n val_dir = os.path.join(dataset_dir, 'TrainVal1_fixed_class2/val')\n test_dir = os.path.join(dataset_dir, 'landmark/Test_Public')\n\n # dataset_dir = '/media/atsg/Data/datasets/ImageNet/imagenet'\n # train_dir = os.path.join(dataset_dir, 'train')\n # val_dir = os.path.join(dataset_dir, 'val')\n # test_dir = os.path.join(dataset_dir, 'test')\n\nclasses = 103\nmodel_name= 'resnext50_32x4d' #'resnet18_v2' #'resnext50_32x4d'\ninput_sz=224\n\n#hyper parameters\n\ndataset='ZaloAILandmark'\nbatch_size=32\nepochs=200\nlog_interval=200\nnum_workers=6\nbase_lr=0.01\nlr_decay=0.75\nlr_mode='step'\nlr_decay_epoch='10,20,30,50,80,110,150,200,450,900,1500'\nsave_frequency=5\n\n#training\nresume_param='densenet161_224/2019-05-28_21.24/ZaloAILandmark-densenet161-best-21.params'\nresume_state='densenet161_224/2019-05-28_21.24/ZaloAILandmark-densenet161-best-21.states'\nresume_epoch=22\n\n#testing\npretrained_param='resnext50_32x4d_224/2019-05-24_16.33/ZaloAILandmark-resnext50_32x4d-best-59.params'\nsubmission_prefix='38'\n\n#data analyze\ndata_analyze_dir='data_analyze'\n","sub_path":"scripts/classification/general/config_classification.py","file_name":"config_classification.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"116003127","text":"#!/usr/bin/env python\n\n\nfrom multiprocessing import Process, Pipe, Event\nimport logging\nimport time\n\nimport matplotlib\nmatplotlib.use('GTKAgg')\nfrom gi.repository import Gtk, GObject\nfrom matplotlib.figure import Figure\nfrom backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas\n\nGObject.threads_init()\n\nimport numpy as np\n\n\n\nfrom amps import gtec\nfrom amps import emotiv\nfrom amps.randomamp import RandomAmp\n\nlogging.basicConfig(format='%(asctime)s %(name)-10s %(levelname)8s %(message)s', level=logging.NOTSET)\nlogger = logging.getLogger(__name__)\nlogger.info('Logger started')\n\n\n#amp = gtec.GTecAmp()\n#amp = RandomAmp()\namp = emotiv.Epoc()\n\nclass Gui(object):\n\n\n def __init__(self, q):\n self.q = q\n self.builder = Gtk.Builder()\n self.builder.add_from_file('gusbamptool.glade')\n handler = {\n 'onDeleteWindow' : self.onDeleteWindow,\n 'onConnectButtonClicked' : self.onConnectButtonClicked,\n 'onDisconnectButtonClicked' : self.onDisconnectButtonClicked,\n 'onStartButtonClicked' : self.onStartButtonClicked,\n 'onStopButtonClicked' : self.onStopButtonClicked,\n 'onSetFilterButtonClicked' : self.onSetFilterButtonClicked,\n 'onComboBoxChanged' : self.onComboBoxChanged,\n 'onComboBox2Changed' : self.onComboBox2Changed,\n 'onSamplingFrequencyComboBoxChanged' : self.onSamplingFrequencyComboBoxChanged\n }\n self.builder.connect_signals(handler)\n window = self.builder.get_object('window1')\n window.show_all()\n\n # set up the figure\n fig = Figure()\n self.canvas = FigureCanvas(fig)\n self.canvas.show()\n self.canvas.set_size_request(800, 600)\n self.axis = fig.add_subplot(111)\n place = self.builder.get_object('box1')\n place.pack_start(self.canvas, True, True, 0)\n place.reorder_child(self.canvas, 1)\n\n self.CHANNELS = 18\n self.PAST_POINTS = 256\n self.SCALE = 30000\n\n self.init_plot()\n GObject.idle_add(self.visualizer)\n\n\n def onDeleteWindow(self, *args):\n Gtk.main_quit(*args)\n\n def onConnectButtonClicked(self, button):\n logger.debug('Connect.')\n\n def onDisconnectButtonClicked(self, button):\n logger.debug('Disconnect.')\n\n def onStartButtonClicked(self, button):\n logger.debug('Start.')\n amp.start()\n\n def onStopButtonClicked(self, button):\n logger.debug('Stop.')\n amp.stop()\n\n def onSetFilterButtonClicked(self, button):\n channels = [True for i in range(16)]\n\n combo = self.builder.get_object('comboboxtext_fs')\n tree_iter = combo.get_active_iter()\n if tree_iter != None:\n model = combo.get_model()\n row_id, name = model[tree_iter][:2]\n fs = int(row_id)\n\n\n if self.builder.get_object('checkbutton_notch').get_active():\n notch_order = self.builder.get_object('spin_order_notch').get_value_as_int()\n notch_hp = self.builder.get_object('spin_hp_notch').get_value()\n notch_lp = self.builder.get_object('spin_lp_notch').get_value()\n notchfilter = (notch_hp, notch_lp, fs, notch_order)\n else:\n notchfilter = None\n\n if self.builder.get_object('checkbutton_band').get_active():\n band_order = self.builder.get_object('spin_order_band').get_value_as_int()\n band_hp = self.builder.get_object('spin_hp_band').get_value()\n band_lp = self.builder.get_object('spin_lp_band').get_value()\n bpfilter = (band_hp, band_lp, fs, band_order)\n else:\n bpfilter = None\n\n amp.set_sampling_ferquency(fs, channels, bpfilter, notchfilter)\n pass\n\n\n def onComboBoxChanged(self, combo):\n logger.debug('ComboBox changed.')\n tree_iter = combo.get_active_iter()\n if tree_iter != None:\n model = combo.get_model()\n row_id, name = model[tree_iter][:2]\n logger.debug(\"Selected: ID=%s, name=%s\" % (row_id, name))\n if row_id == 'Data':\n amp.set_mode('data')\n elif row_id == 'Impedance':\n amp.set_mode('impedance')\n elif row_id == 'Calibration':\n amp.set_mode('calibrate')\n\n\n def onComboBox2Changed(self, combo):\n tree_iter = combo.get_active_iter()\n if tree_iter != None:\n model = combo.get_model()\n row_id, name = model[tree_iter][:2]\n if row_id == 'Sine':\n amp.set_calibration_mode('sine')\n elif row_id == 'Sawtooth':\n amp.set_calibration_mode('sawtooth')\n elif row_id == 'White Noise':\n amp.set_calibration_mode('whitenoise')\n elif row_id == 'Square':\n amp.set_calibration_mode('square')\n elif row_id == 'DLR':\n amp.set_calibration_mode('dlr')\n else:\n logger.error('Unknown row_id: %s' % row_id)\n\n\n def onSamplingFrequencyComboBoxChanged(self, combo):\n tree_iter = combo.get_active_iter()\n if tree_iter != None:\n model = combo.get_model()\n row_id, name = model[tree_iter][:2]\n fs = int(row_id)\n amp.set_sampling_ferquency(fs, [False for i in range(16)], None, None)\n\n\n def init_plot(self):\n for i in range(self.CHANNELS):\n self.axis.plot(0)\n self.canvas.draw()\n self.data = np.array([]).reshape(-1, self.CHANNELS)\n self.data_buffer = []\n self.t2 = time.time()\n self.k = 0\n self.nsamples = 0\n\n\n def visualizer(self):\n t = time.time()\n tmp = []\n tmp.append(self.q.recv())\n while self.q.poll():\n i = self.q.recv()\n if i == 'quit':\n return False\n if i is None:\n return True\n tmp.append(i)\n # display #samples / second\n if tmp != None:\n self.nsamples += sum([i.shape[0] for i in tmp])\n self.k += 1\n if self.k == 100:\n sps = self.nsamples / (time.time() - self.t2)\n logger.debug('%.2f samples / second\\r' % sps)\n self.t2 = time.time()\n self.nsamples = 0\n self.k = 0\n # append the new data\n new_data = np.concatenate(tmp)\n self.data = np.concatenate([self.data, new_data])\n self.data = self.data[-self.PAST_POINTS:]\n # plot the data\n dmin = self.data.min()\n dmax = self.data.max()\n dr = (dmax - dmin) * 0.7\n SCALE = dr\n x = [i for i in range(len(self.data))]\n for j, line in enumerate(self.axis.lines):\n line.set_xdata(x)\n line.set_ydata(self.data[:, j] + j * SCALE)\n self.axis.set_ylim(-SCALE, (1 + self.CHANNELS) * SCALE)\n self.axis.set_xlim(i - self.PAST_POINTS, i)\n self.canvas.draw()\n #logger.debug('%.2f FPS' % (1 / (time.time() - t)))\n return True\n\n\ndef data_fetcher(amp, q, e):\n while not e.is_set():\n try:\n data_buffer = amp.get_data()\n except:\n data_buffer = None\n q.send(data_buffer)\n logger.debug('Sending visualizer process the stop marker.')\n q.send('quit')\n logger.debug('Terminating data fetcher thread.')\n\n\nif __name__ == '__main__':\n # setup the visualizer process\n parent_conn, child_conn = Pipe()\n # setup the gtk gui\n gui = Gui(child_conn)\n # setup the data fetcher\n e = Event()\n p = Process(target=data_fetcher, args=(amp, parent_conn, e))\n p.daemon = True\n logger.debug(p.daemon)\n p.start()\n Gtk.main()\n logger.debug('Waiting for thread and process to stop...')\n e.set()\n p.join()\n\n","sub_path":"mushu.py","file_name":"mushu.py","file_ext":"py","file_size_in_byte":7849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"134145276","text":"import os\r\nimport shutil\r\nimport numpy as np\r\n\r\nfrom fmpy import read_model_description, extract\r\nfrom fmpy.fmi1 import FMU1Slave\r\nfrom fmpy.fmi2 import FMU2Slave\r\nfrom fmpy.ssp.ssd import System, read_ssd, get_connections, find_connectors, find_components, build_path\r\n\r\n\r\ndef get_value(component, name):\r\n \"\"\" Get a Real variable from a component \"\"\"\r\n\r\n # vr = component.vrs[name]\r\n variable = component.variables[name]\r\n\r\n if variable.type == 'Real':\r\n return component.fmu.getReal([variable.valueReference])[0]\r\n elif variable.type in ['Integer', 'Enumeration']:\r\n return component.fmu.getInteger([variable.valueReference])[0]\r\n elif variable.type == 'Boolean':\r\n value = component.fmu.getBoolean([variable.valueReference])[0]\r\n # return 0.0 if value == 0 else 1.0\r\n return value != 0\r\n else:\r\n raise Exception(\"Unsupported type: \" + variable.type)\r\n\r\n\r\ndef set_value(component, name, value):\r\n \"\"\" Set a Real variable to a component \"\"\"\r\n\r\n variable = component.variables[name]\r\n\r\n if variable.type == 'Real':\r\n component.fmu.setReal([variable.valueReference], [value])\r\n elif variable.type in ['Integer', 'Enumeration']:\r\n component.fmu.setInteger([variable.valueReference], [int(value)])[0]\r\n elif variable.type == 'Boolean':\r\n component.fmu.setBoolean([variable.valueReference], [value != 0.0])\r\n else:\r\n raise Exception(\"Unsupported type: \" + variable.type)\r\n\r\n\r\ndef add_path(element, path=''):\r\n\r\n if isinstance(element, System):\r\n for child in element.elements:\r\n add_path(child, path + child.name + '.')\r\n\r\n for connector in element.connectors:\r\n connector.path = path + connector.name\r\n\r\n\r\ndef set_parameters(component, parameters):\r\n \"\"\" Apply the parameters (start values) to a component \"\"\"\r\n\r\n path = component.name\r\n\r\n parent = component.parent\r\n\r\n while parent.parent is not None:\r\n path = parent.name + '.' + path\r\n parent = parent.parent\r\n\r\n for name, value in parameters.items():\r\n if name.startswith(path):\r\n variable_name = name[len(path) + 1:]\r\n set_value(component, variable_name, value)\r\n\r\n\r\ndef instantiate_fmu(component, ssp_unzipdir, start_time, parameters={}):\r\n\r\n fmu_filename = os.path.join(ssp_unzipdir, component.source)\r\n\r\n component.unzipdir = extract(fmu_filename)\r\n\r\n # read the model description\r\n model_description = read_model_description(fmu_filename, validate=False)\r\n\r\n # collect the value references\r\n component.variables = {}\r\n for variable in model_description.modelVariables:\r\n # component.vrs[variable.name] = variable.valueReference\r\n component.variables[variable.name] = variable\r\n\r\n fmu_kwargs = {'guid': model_description.guid,\r\n 'unzipDirectory': component.unzipdir,\r\n 'modelIdentifier': model_description.coSimulation.modelIdentifier,\r\n 'instanceName': component.name}\r\n\r\n if model_description.fmiVersion == '1.0':\r\n component.fmu = FMU1Slave(**fmu_kwargs)\r\n component.fmu.instantiate()\r\n set_parameters(component, parameters)\r\n component.fmu.initialize()\r\n else:\r\n component.fmu = FMU2Slave(**fmu_kwargs)\r\n component.fmu.instantiate()\r\n component.fmu.setupExperiment(startTime=start_time)\r\n set_parameters(component, parameters)\r\n component.fmu.enterInitializationMode()\r\n component.fmu.exitInitializationMode()\r\n\r\n\r\ndef free_fmu(component):\r\n\r\n component.fmu.terminate()\r\n component.fmu.freeInstance()\r\n try:\r\n shutil.rmtree(component.unzipdir)\r\n except Exception as e:\r\n print(\"Failed to remove unzip directory. \" + str(e))\r\n\r\n\r\ndef do_step(component, time, step_size):\r\n\r\n # set inputs\r\n for connector in component.connectors:\r\n if connector.kind == 'input':\r\n set_value(component, connector.name, connector.value)\r\n\r\n # do step\r\n component.fmu.doStep(currentCommunicationPoint=time, communicationStepSize=step_size)\r\n\r\n # get outputs\r\n for connector in component.connectors:\r\n if connector.kind == 'output':\r\n connector.value = get_value(component, connector.name)\r\n\r\n\r\ndef simulate_ssp(ssp_filename, start_time=0.0, stop_time=None, step_size=None, parameters={}, input={}):\r\n \"\"\" Simulate a system of FMUs \"\"\"\r\n\r\n if stop_time is None:\r\n stop_time = 1.0\r\n\r\n if step_size is None:\r\n step_size = stop_time * 1e-2\r\n\r\n ssd = read_ssd(ssp_filename)\r\n\r\n add_path(ssd.system)\r\n\r\n components = find_components(ssd.system)\r\n connectors = find_connectors(ssd.system)\r\n connections = get_connections(ssd.system)\r\n\r\n # resolve connections\r\n connections_reversed = {}\r\n\r\n for a, b in connections:\r\n connections_reversed[b] = a\r\n\r\n new_connections = []\r\n\r\n # trace connections back to the actual start connector\r\n for a, b in connections:\r\n\r\n # if isinstance(b.parent, System):\r\n # continue\r\n\r\n while isinstance(a.parent, System) and a.parent.parent is not None:\r\n a = connections_reversed[a]\r\n\r\n new_connections.append((a, b))\r\n\r\n # for a, b in new_connections:\r\n # #print(type(a.parent), a.kind, '->', type(b.parent), b.kind)\r\n # print(a.path, '->', b.path)\r\n\r\n connections = new_connections\r\n\r\n # extract the SSP\r\n ssp_unzipdir = extract(ssp_filename)\r\n\r\n # initialize the connectors\r\n for connector in connectors:\r\n connector.value = 0.0\r\n\r\n # instantiate the FMUs\r\n for component in components:\r\n instantiate_fmu(component, ssp_unzipdir, start_time, parameters)\r\n\r\n time = start_time\r\n\r\n rows = [] # list to record the results\r\n\r\n # simulation loop\r\n while time < stop_time:\r\n\r\n # apply input\r\n for connector in ssd.system.connectors:\r\n if connector.kind == 'input' and connector.name in input:\r\n connector.value = input[connector.name](time)\r\n\r\n # perform one step\r\n for component in components:\r\n do_step(component, time, step_size)\r\n\r\n # apply connections\r\n for start_connector, end_connector in connections:\r\n end_connector.value = start_connector.value\r\n\r\n # get the results\r\n row = [time]\r\n\r\n for connector in connectors:\r\n row.append(connector.value)\r\n\r\n # append the results\r\n rows.append(tuple(row))\r\n\r\n # advance the time\r\n time += step_size\r\n\r\n # free the FMUs\r\n for component in components:\r\n free_fmu(component)\r\n\r\n # clean up\r\n shutil.rmtree(ssp_unzipdir)\r\n\r\n dtype = [('time', np.float64)]\r\n\r\n for connector, value in zip(connectors, rows[0][1:]):\r\n if type(value) == bool:\r\n dtype.append((connector.path, np.bool_))\r\n elif type(value) == int:\r\n dtype.append((connector.path, np.int32))\r\n else:\r\n dtype.append((connector.path, np.float64))\r\n\r\n # convert the results to a structured NumPy array\r\n return np.array(rows, dtype=np.dtype(dtype))\r\n","sub_path":"fmpy/ssp/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":7160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"576080751","text":"from abstract.AbstractSolution import AbstractSolution\nfrom helpers.math.naturals import Naturals\n\nclass Euler14(AbstractSolution):\n def getSolution(self): return 837799\n\n def solve(self):\n nat = Naturals()\n\n high_count = 0\n candidate = 0\n\n for i in range(2, 10**6):\n count = nat.collatz_count(i)\n\n if count > high_count:\n high_count = count\n candidate = i\n\n return candidate\n","sub_path":"batch2/euler14.py","file_name":"euler14.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"496691245","text":"from rest_framework import serializers\nfrom .models import (\n Event,\n FamilyTree,\n Mariage,\n Media,\n Person,\n)\n\nfrom django.contrib.auth import get_user_model\n\nUser = get_user_model()\n\n\nclass BasicFamilyTreeSerializer(serializers.ModelSerializer):\n class Meta:\n model = FamilyTree\n fields = [\n 'id',\n 'name',\n 'description',\n ]\n\n\nclass MariageSerializer(serializers.ModelSerializer):\n class Meta:\n model = Mariage\n fields = [\n 'id',\n 'person_1',\n 'person_2',\n 'mariage_date',\n 'divorce_date'\n ]\n\n def to_internal_value(self, data):\n if(data['mariage_date'] == ''):\n data['mariage_date'] = None\n if(data['divorce_date'] == ''):\n data['divorce_date'] = None\n return super().to_internal_value(data)\n\n\nclass BasicPersonSerializer(serializers.ModelSerializer):\n mariages = MariageSerializer(many=True)\n\n class Meta:\n model = Person\n fields = [\n 'id',\n 'name',\n 'surname',\n 'x',\n 'y',\n 'sex',\n 'mariages'\n ]\n\n\nclass EventSerializer(serializers.ModelSerializer):\n class Meta:\n model = Event\n fields = [\n 'id',\n 'person',\n 'title',\n 'description',\n 'date',\n 'icon'\n ]\n\n def to_internal_value(self, data):\n if(data['date'] == ''):\n data['date'] = None\n return super().to_internal_value(data)\n\n\nclass FamilyTreeSerializer(serializers.ModelSerializer):\n persons = BasicPersonSerializer(many=True)\n\n class Meta:\n model = FamilyTree\n fields = [\n 'id',\n 'name',\n 'description',\n 'persons',\n ]\n\n\nclass MediaSerializer(serializers.ModelSerializer):\n class Meta:\n model = Media\n fields = [\n 'id',\n 'person',\n 'name',\n 'file'\n ]\n\n\nclass PersonSerializer(serializers.ModelSerializer):\n medias = MediaSerializer(many=True, read_only=True)\n events = EventSerializer(many=True, read_only=True)\n mariages = MariageSerializer(many=True, read_only=True)\n\n class Meta:\n model = Person\n fields = [\n 'id',\n 'family_tree',\n 'father',\n 'mother',\n 'name',\n 'surname',\n 'x',\n 'y',\n 'birth_date',\n 'nationality',\n 'sex',\n 'birth_place',\n 'death_date',\n 'death_cause',\n 'medias',\n 'events',\n 'mariages'\n ]\n\n def to_internal_value(self, data):\n if(data['birth_date'] == ''):\n data['birth_date'] = None\n if(data['death_date'] == ''):\n data['death_date'] = None\n return super().to_internal_value(data)\n\n\nclass UserSerializer(serializers.ModelSerializer):\n password = serializers.CharField()\n\n class Meta:\n model = User\n fields = [\n 'username',\n 'password'\n ]\n\n def create(self, validated_data):\n user = User.objects.create_user(\n username=validated_data['username'],\n password=validated_data['password']\n )\n\n return user\n","sub_path":"kronikarz/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"19081366","text":"#Author: Eric Gann\n#Date: 5.2.16\n#Name: Methylated_Sites \n#Description: This program takes fasta sequences, and determines the number of each methylated motif, \n#the number of individual bases methylated, and the percentage of bases methylated. \n\n#import division\n\nfrom __future__ import division\n\n#All user input\nuser1 = raw_input('What is the file that is being searched in? ')\nuser2 = raw_input('What is the motif/important position file? ')\nuser3 = str(raw_input('What is the output file name (Do not need to include file type [.txt e.g])? '))\nuser4 = raw_input('Do you want a visual output of the data? [YES or NO] ')\n\n#Append user3 to have the output attached to it \noutfile_name = user3 + '.txt'\noutfile_visual_name = user3 + '_visual.txt'\noutfile_special = user3 + \"_overlaps.txt\"\n\n#import file to search within\n\nfrom collections import OrderedDict\nID = ''\ndict_fasta = OrderedDict() # Make an empty dictionary of genes\n\n# Open the file\nfasta_file = open(user1, 'r')\n\n# Read the first lines into list and remove the new line\nfasta_format = fasta_file.readlines()\n\nfor line in fasta_format:\n # Clear out leading/trailing whitespace (e.g. '\\n')\n line = line.strip()\n # Check to see if the line is a fasta header\n if line.startswith(\">\") : \n # Make the ID the fasta headers\n ID = line\n # Add the GenBank_ID to the dictionary\n dict_fasta[ID] = \"\"\n else :\n # Add the sequence as values to the GenBank ID\n dict_fasta[ID] += line\n\nfasta_headers = []\nsequences_of_fasta = []\n\nlen_seq = []\n\nfor a in dict_fasta:\n\tfasta_headers.append(a) \n\tsequences_of_fasta.append(dict_fasta[a])\n\t\nfor x in sequences_of_fasta:\n\ty = len(x)\n\tlen_seq.append(y)\n\n#import file with motif of interest and important position in motif \nf = open(user2,'r') \ninfile = f.readlines() \nf.close()\n\n#Remove headers \ndel infile[0]\n\n#Separate by columns\nper_row = []\nfor x in infile:\n\tper_row.append(x.strip().split('\\t'))\n\t\nper_column = zip(*per_row) \n\n#Motifs being searched\nmotifs_to_search = per_column[0]\nimportant_pos_in_motif = per_column[1]\n\nMotif_and_Imp_pos = OrderedDict(zip(motifs_to_search,important_pos_in_motif))\n\n#import regular expressions \nimport re\n\n#Use re.compile/p.finditer to get location of each motif \n#searched, and the position of the special character based \n# on where it is located in the motif\ncoordinates = []\nnumber_of_motifs = []\nnames_of_motifs = []\nimportant_pos_location = []\nlocation_and_motif = []\n\nfor seq in dict_fasta:\n\tstring = dict_fasta[seq]\n\tc = []\n\td = []\n\te = []\n\tf = []\n\t\n\th = []\n\tfor a in Motif_and_Imp_pos: \n\t\tg = []\n\t\tf.append(a)\n\t\tg.append(a)\n\t\tpos_in_mot = int(Motif_and_Imp_pos[a])\n\t\tp = re.compile(a)\n\t\tmotif_start = []\n\t\tmotif_end = []\n\t\tpositon_of_special_pos = []\n\t\tfor m in p.finditer(string):\n\t\t\tmotif_start.append(m.start())\n\t\t\tmotif_end.append(m.end())\n\t\tfor b in motif_start:\n\t\t\tpos = b + pos_in_mot\n\t\t\tpositon_of_special_pos.append(pos)\n\t\tcoordinate_pairs = zip(motif_start,motif_end)\n\t\te.append(len(coordinate_pairs))\n\t\tc.append(coordinate_pairs)\n\t\td.append(positon_of_special_pos)\n\t\tg.append(positon_of_special_pos)\n\t\th.append(g)\n\tcoordinates.append(c)\n\timportant_pos_location.append(d)\n\tnumber_of_motifs.append(e)\n\tnames_of_motifs.append(f)\n\t\n\tlocation_and_motif.append(h)\n#This combines all duplicate locations \nAll_important_positions = []\n\nfor x in important_pos_location:\n\tpositions = []\n\tfor y in x:\n\t\tfor z in y:\n\t\t\tpositions.append(z)\n\tAll_important_positions.append(positions)\n\t\n#This gets rid of all duplicates to give a count of the \n#total number of potential methylated bases \nImportant_pos_no_duplicates = []\nImportant_pos_plus_duplicates = []\nImportant_pos_no_dup_counts = []\nfor x in All_important_positions:\n\tfixed = list(set(x))\n\tImportant_pos_no_duplicates.append(fixed) \n\tImportant_pos_no_dup_counts.append(len(fixed))\n\tfor y in x:\n\t\tImportant_pos_plus_duplicates.append(y)\n\n#This gives just the duplicates in each string searched\nImportant_position_duplicates = []\n\nfor x in All_important_positions:\n\tz = []\n\tfor y in x:\n\t\tif x.count(y) > 1:\n\t\t\tz.append(y) \n\tImportant_position_duplicates.append(list(set(z)))\n\t\ndictionary_list = [] \n\nfrom collections import defaultdict\n\nfor a in location_and_motif:\n\tposition_dict = defaultdict(list)\n\tfor b in a:\n\t\tmotif_name = b[0]\n\t\tmotif_positions = b[1]\n\t\tfor c in motif_positions:\n\t\t\tposition_dict[motif_name].append(c)\n\tdictionary_list.append(position_dict)\n\t\t \noutput_special = zip(Important_position_duplicates,dictionary_list)\n\noutput_special_corrected = []\n\nfor x in output_special:\n\ta = []\n\tb = []\n\timportant_pos_list = x[0]\n\tmotif_and_pos = x[1]\n\tfor motif in motif_and_pos:\n\t\tfor y in motif_and_pos[motif]:\n\t\t\tif y in important_pos_list:\n\t\t\t\ta.append(y)\n\t\t\t\tb.append(motif)\n\tdict_by_position = defaultdict(list)\n\tfor k,v in zip(a,b):\n\t\tdict_by_position[k].append(v)\n\toutput_special_corrected.append(dict_by_position)\n\n#Last step to write to a file\nend = zip(fasta_headers, output_special_corrected)\n\nw = open(outfile_special,'w')\nw.write('Position Methylated\\tMotifs which overlap at that position\\n')\nfor x in end:\n\tw.write('\\n')\n\tw.write(x[0])\n\tw.write('\\n')\n\tdict1 = x[1]\n\tfor y,z in sorted(dict1.iteritems()):\n\t\tw.write(str(y))\n\t\tw.write('\\t')\n\t\tw.write(', '.join(z))\n\t\tw.write('\\n')\n\n\nw.close()\n#Determine the percent of each sequence that has an important site \npercent_of_unique_bases_zip = zip(len_seq, Important_pos_no_dup_counts)\n\npercent_of_unique_bases = []\nfor x in percent_of_unique_bases_zip:\n\tcounts = x[1]\n\tlenseq = x[0]\n\tpercent_count = counts/lenseq\n\tpercent_of_unique_bases.append(percent_count)\n\t\n#write out file \nout_file_headers = ('fasta header', 'length of sequence', motifs_to_search, 'unique methylated bases', 'Percent of unique bases')\n\nfinal_out_file_headers = []\n\nfinal_out_file_headers.append(out_file_headers[0])\nfinal_out_file_headers.append(out_file_headers[1])\nfor y in out_file_headers[2]:\n\tfinal_out_file_headers.append(y)\n\nfinal_out_file_headers.append(out_file_headers[3])\nfinal_out_file_headers.append(out_file_headers[4])\n\nout_file_data = zip(fasta_headers, len_seq, number_of_motifs, Important_pos_no_dup_counts, percent_of_unique_bases)\n\nfinal_out_file_data = []\nfor x in out_file_data:\n\ty = []\n\ty.append(x[0])\n\ty.append(x[1])\n\tfor z in (x[2]):\n\t\ty.append(z)\n\ty.append(x[3])\n\ty.append(x[4])\n\tfinal_out_file_data.append(y)\n\nimport csv\n\nwith open(outfile_name, 'w') as j:\n\twriter = csv.writer(j,delimiter='\\t')\n\twriter.writerow(final_out_file_headers)\n\tfor x in final_out_file_data:\n\t\twriter.writerow(x)\n\n#Ends program if no visual output is wanted\nif user4 == \"NO\":\n\tquit()\n\t\n#Visualize the important positions in a sequences in string \t\n\nset_including_position_of_duplicates = zip(fasta_headers, sequences_of_fasta, Important_pos_no_duplicates)\n\n#Using the locations of the important characters, this builds strings to identify \n#important characters by an asterisk in the outfile \n\nvisualizing_important_pos_list = []\n\nfor x in set_including_position_of_duplicates:\n\tvisualizing_important_pos_list_entry = []\n\tcompare_to_important = x[2]\n\t\n\tvisualizing_important_pos = ''\n\t\n\tcount = 0\n\twhile count < len(x[1]):\n\t\tif count in compare_to_important: \n\t\t\tvisualizing_important_pos += '*'\n\t\telse:\n\t\t\tvisualizing_important_pos += ' '\n\t\tcount = count + 1 \n\tvisualizing_important_pos_list.append(visualizing_important_pos)\n\n#Break the strings into a list of the strings chunked to 60 characters\nlist_fasta_split = []\n\nfor x in sequences_of_fasta:\n\ty = []\n\tfor i in range(0, len(x), 60): \n\t\ty.append(x[i:i+60])\n\tlist_fasta_split.append(y) \n\nlist_visual_split = []\n\nfor x in visualizing_important_pos_list:\n\ty = []\n\tfor i in range(0, len(x), 60): \n\t\ty.append(x[i:i+60])\n\tlist_visual_split.append(y) \n\n#Get the strings into one list\nvisualization_output = zip(list_visual_split, list_fasta_split)\n\nvis_out_a = []\nfor a in visualization_output:\n\tvis = a[0]\n\tseq = a[1]\n\tb = zip(vis, seq) \n\tvis_out_a.append(b)\n\n\nvis_out_final = []\nfor a in vis_out_a:\n\tc = []\n\tfor b in a:\n\t\tc.append(b[0])\n\t\tc.append(b[1])\n\tvis_out_final.append(c)\n\nVis_out_with_headers = zip(fasta_headers, vis_out_final) \n\nVis_out_final_with_headers = []\n\nfor x in Vis_out_with_headers:\n\tadd = []\n\tadd.append(x[0])\n\tseq = x[1]\n\tfor a in seq:\n\t\tadd.append(a)\n\tVis_out_final_with_headers.append(add)\n\n#write to a file\n\nimport csv \n\nwith open(outfile_visual_name, 'w') as h:\n\twriter = csv.writer(h,delimiter='\\n')\n\tfor row in Vis_out_final_with_headers:\n\t\twriter.writerow(row)\n\nquit()\n","sub_path":"Insilico-Motifs.py","file_name":"Insilico-Motifs.py","file_ext":"py","file_size_in_byte":8446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"579641565","text":"# Stealth-Skill by Caffeine\n#\n# Version 1.0\n\n\n# Imports\n\n#Python-Imports\nimport math\n\n# ES-Imports\nimport es\nimport playerlib\nimport gamethread\n\n# RPG-Imports\nfrom infinityrpg import playerlist\n\n\n# Script\nskillname = 'Poisonnade'\n\n\nnades = []\n\n\ndef unload():\n gamethread.cancelDelayed('rpg_%s' %(skillname)) \n\n\ndef round_start(ev):\n gamethread.delayedname(1.0, 'rpg_%s' %(skillname), rpg_poison, ())\n \n \ndef round_end(ev):\n gamethread.cancelDelayed('rpg_%s' %(skillname)) \n del nades[:]\n\n \ndef smokegrenade_detonate(ev): \n userid = ev['userid']\n level = playerlist[userid].GetSkillLevel(skillname)\n if level > 0:\n team = int(ev['es_userteam'])\n save = (float(ev['x']), float(ev['y']), float(ev['z']), team, level) \n nades.append(save)\n gamethread.delayed(19.0, rpg_remove_poison, (save,))\n \n # Make the smoke colourfull\n index = es.createentity(\"light_dynamic\",\"mylight%s\" % userid)\n es.entitysetvalue(index, \"angles\", \"-90 0 0\")\n if team == 3:\n es.entitysetvalue(index,\"_light\", \"0 0 255\")\n elif team == 2:\n es.entitysetvalue(index,\"_light\", \"255 0 0\")\n es.entitysetvalue(index, \"_inner_cone\",\"-89\")\n es.entitysetvalue(index, \"_cone\",\"-89\")\n es.entitysetvalue(index, \"pitch\",\"-90\")\n es.entitysetvalue(index, \"distance\",\"256\")\n es.entitysetvalue(index, \"spotlight_radius\",\"96\")\n es.entitysetvalue(index, \"origin\",\"%s %s %s\"% (ev['x'], ev['y'], ev['z']))\n es.entitysetvalue(index, \"brightness\",\"5\")\n es.entitysetvalue(index, \"style\",\"6\")\n es.entitysetvalue(index, \"spawnflags\",\"1\")\n es.spawnentity(index)\n gamethread.delayed(20.0, es.remove, index)\n es.server.queuecmd('es_xfire %s mylight%s DisableShadow' % (userid,userid))\n es.server.queuecmd('es_xfire %s mylight%s addoutput \"OnUser1 !self,kill,-1,24\"' % (userid,userid))\n es.server.queuecmd('es_xfire %s mylight%s addoutput \"OnUser2 !self,Toggle,-1,21\"' % (userid,userid))\n es.server.queuecmd('es_xfire %s mylight%s addoutput \"OnUser3 !self,TurnOff,-1,23\"' % (userid,userid))\n es.server.queuecmd('es_xfire %s mylight%s addoutput \"OnUser4 !self,spawnflags,3,19\"' % (userid,userid)) \n \n \ndef rpg_poison():\n gamethread.delayedname(1.0, 'rpg_%s' %(skillname), rpg_poison, ())\n for i in nades:\n x = i[0]\n y = i[1]\n z = i[2]\n team = i[3]\n damage = i[4]\n if team == 2:\n for j in playerlib.getUseridList('#ct, #alive'):\n xp, yp, zp = es.getplayerlocation(j)\n zp += 68 #the head is 68 units above a players feet\n # Inside the smoke\n if math.sqrt((xp-x)**2 + (yp-y)**2 + (zp-z)**2) <= 170:\n es.server.queuecmd('damage %s %s' %(j, damage))\n elif team == 3:\n for j in playerlib.getUseridList('#t, #alive'):\n xp, yp, zp = es.getplayerlocation(j)\n # Inside the smoke\n if math.sqrt((xp-x)**2 + (yp-y)**2 + (zp-z)**2) <= 170:\n es.server.queuecmd('damage %s %s' %(j, damage))\n \n \n \ndef rpg_remove_poison(save):\n try:\n nades.remove(save)\n except:\n pass ","sub_path":"Skills/Poisonnade/Poisonnade.py","file_name":"Poisonnade.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"502879562","text":"#!usr/bin/python\n# -*- coding: utf-8 -*-\n\n# 加载模块\nfrom ctypes import *\nfrom threading import Thread\nfrom Package.LogView import *\n\nImageAcqDLL = cdll.LoadLibrary(r\"ImageAcq.dll\")\nThresholdDLL = cdll.LoadLibrary(r\"C:\\Users\\SUNRISE\\Documents\\Visual Studio 2013\\Projects\\Threshold\\x64\\Debug\\Threshold.dll\")\n\nclass ReadImage_Threshold_T1:\n\n\n imageAcqHandle = c_longlong(0)\n image = c_longlong(0)\n lRet = 0\n\n @staticmethod\n def OnCreate_Event():\n LogView.normal(b\"*** ReadImage_Threshold::Open Camera ***\")\n ImageAcqDLL.Init(byref(ReadImage_Threshold_T1.imageAcqHandle))\n ThresholdDLL.InstallLogger(GlobalSetting.logViewHandle)\n ThresholdDLL.Initialize()\n\n @staticmethod\n def OnRunning_Event():\n ImageAcqDLL.ReadImage(ReadImage_Threshold_T1.imageAcqHandle, \\\n r'D:\\straight_edge.bmp', byref(ReadImage_Threshold_T1.image))\n LogView.normal(b\"*** ReadImage 123***\")\n UtilDLL.Vision3000_Halcon_ImageFitWindow(ReadImage_Threshold_T1.image, GlobalSetting.windowHandle2)\n UtilDLL.Vision3000_Halcon_DisplayObj(GlobalSetting.windowHandle2, ReadImage_Threshold_T1.image)\n ThresholdDLL.SetParameter(b'winHandle', GlobalSetting.windowHandle1)\n ThresholdDLL.SetParameter(b'image', ReadImage_Threshold_T1.image)\n ThresholdDLL.SetParameter(b'lowValue', 50)\n ThresholdDLL.SetParameter(b'highValue', 90)\n ThresholdDLL.Action()\n \n @staticmethod\n def OnDestory_Event():\n LogView.normal(b\"*** ReadImage_Threshold::OnDestory_Event ***\")\n ThresholdDLL.Release()\n ImageAcqDLL.Release(ReadImage_Threshold_T1.imageAcqHandle)\n\n\n#!\nif __name__ == \"__main__\":\n pass\n#!\n\n","sub_path":"script/ReadImage_Threshold_T1.py","file_name":"ReadImage_Threshold_T1.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"512823146","text":"#!/usr/bin/env python\nfrom resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions\nfrom resource_management import *\n\n# server configurations\nconfig = Script.get_config()\n\nr_libs = filter(lambda x: len(x.strip()) > 0, config['configurations']['r-config']['r.libs'].split(','))\n\npackage_dir = '/var/lib/ambari-agent/cache/stacks/HDP/2.2/services/r-stack'\nresources_dir = package_dir + '/package/resources/'\nscripts_dir = package_dir + '/package/scripts/'\n","sub_path":"package/scripts/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"170811475","text":"# Exercise 1: A Circle of Squares\n'''Write and run a function that draws 40 squares, turning right 10\ndegrees after each square.'''\n\nfrom turtle import *\n\ndef square():\n for i in range(4):\n forward(100)\n right(90)\n\nfor i in range(40):\n square()\n right(10)\n \n","sub_path":"turtleEx1.py","file_name":"turtleEx1.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"30889585","text":"import argparse\nimport sys\nfrom datetime import datetime\nimport cv2\nimport imageio\nimport torch\nsys.path.insert(0, '../')\nsys.dont_write_bytecode = True\n\nfrom torch.optim import lr_scheduler\nfrom tensorboardX import SummaryWriter\n\nimport torch\nfrom torch.utils.data import Dataset,DataLoader\nfrom torch import nn, optim\nimport torchvision\nfrom PIL import Image\n\nfrom util import *\nfrom net import *\n\n\ndef train(train_loader, model, optimizer, args):\n # multi resolution\n size_rates = [0.5, 0.75, 1, 1.25, 1.5]\n\n args.log_path = '../log/' + args.model\n save_path = '../model/' + args.model\n sw = SummaryWriter(args.log_path)\n\n loss_sal_record, loss_edge_record = AvgMeter(), AvgMeter()\n # loss_dice_record, loss_record = AvgMeter(), AvgMeter()\n total_step = len(train_loader)\n\n scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.2)\n\n global_step = 0\n test(model, -1, args)\n for epoch in range(0, args.epoch):\n model.train()\n\n for step, data in enumerate(train_loader, start=1):\n for rate in size_rates:\n optimizer.zero_grad()\n ims, gts, names = data\n # Load data\n ims = ims.cuda()\n gts = gts.cuda()\n #data argument\n apply(ims, torchvision.transforms.RandomHorizontalFlip())\n apply(ims, torchvision.transforms.RandomVerticalFlip())\n # Forward\n trainsize = int(round(args.train_size * rate / 32) * 32)\n if rate != 1:\n ims = F.upsample(ims, size=(trainsize, trainsize), mode='bilinear', align_corners=True)\n gts = F.upsample(gts, size=(trainsize, trainsize), mode='bilinear', align_corners=True)\n pred_sal = model(ims)\n\n loss_sal = nn.BCEWithLogitsLoss()(pred_sal, gts)\n # loss_dice=Loss.dice_loss(pred_sal,gts)\n # loss = Loss.structure_loss(pred_sal,gts)\n loss = loss_sal\n loss.backward()\n\n optimizer.step()\n # log = 'Iteration: {:d} SalLoss: {:.4f} DiceLoss:{:.4f} Loss:{:.4f}'.format(global_step,\n # loss_sal.data.cpu().numpy(),loss_dice.data.cpu().numpy(),loss.data.cpu().numpy())\n # open(args.log_path + '.log', 'a').write(log + '\\n')\n if rate == 1:\n loss_sal_record.update(loss_sal.data, args.batch_size)\n # loss_dice_record.update(loss_dice.data,args.batch_size)\n #loss_record.update(loss.data,args.batch_size)\n\n sw.add_scalar('lr', scheduler.get_lr()[0], global_step=global_step)\n sw.add_scalars('SalLoss', {'SalLoss': loss_sal_record.show()},\n global_step=global_step)\n # sw.add_scalars('DiceLoss',{'DiceLoss':loss_dice_record.show()},\n # global_step=global_step)\n # sw.add_scalars('Loss', {'Loss':loss_record.show()},\n # global_step=global_step)\n # log = 'Iteration: {:d} SalLoss: {:.4f} DiceLoss:{:.4f} Loss:{:.4f}'.format(global_step,loss_sal_record.show(),\n # loss_dice_record.show(),loss_record.show())\n log = 'Iteration: {:d} SalLoss: {:.4f}'.format(global_step,loss_sal_record.show())\n open(args.log_path + '.log','a').write(log + '\\n')\n if step % 10 == 0 or step == total_step:\n # print('{} Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], LR: {:.6f}, SalLoss: {:.4f}, DiceLoss:{:.4f}, Loss:{:.4f}'.\n # format(datetime.now(), epoch, args.epoch, step, total_step, scheduler.get_lr()[0],\n # loss_sal_record.show(),loss_dice_record.show(),loss_record.show()), flush=True)\n print('{} Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], LR: {:.6f}, SalLoss: {:.4f}'.format(datetime.now(),epoch,args.epoch,\n step,total_step,scheduler.get_lr()[0],loss_sal_record.show()),flush=True)\n global_step += 1\n\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n if epoch >= 20:\n torch.save(model.state_dict(), save_path + args.model + '_' + '.%d' % epoch + '.pth')\n\n scheduler.step()\n\n test(model, -1, args)\n\n\ndef test(model, epoch, args):\n model.eval()\n for dataset in args.valset:\n save_path = './out/' + args.model + '/' + dataset + '/sal/'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n image_root = args.data_path + dataset + '/'\n gt_root = args.data_path + '/gt/'\n test_loader = SKDataset(image_root, gt_root, args.train_size)\n for i in range(test_loader.size):\n image, name = test_loader.load_data()\n image = image.cuda()\n attention = model(image)\n attention = F.upsample(attention, size=(256, 256), mode='bilinear', align_corners=True)\n res = attention.sigmoid().data.cpu().numpy().squeeze()\n # res = (res - res.min()) / (res.max() - res.min() + 1e-8)\n #ret,res_bi=cv2.threshold(res,7,255,cv2.THRESH_BINARY)\n imageio.imsave('../data/result/' + name + '.png', res)\n\n\ndef main():\n # init parameters\n parser = argparse.ArgumentParser()\n parser.add_argument('--epoch', type=int, default=50, help='epoch number')\n parser.add_argument('--lr', type=float, default=0.005, help='learning rate')\n parser.add_argument('--batch_size', type=int, default=8, help='training batch size')\n parser.add_argument('--train_size', type=int, default=256, help='training dataset size')\n parser.add_argument('--trainset', type=str, default='DUTS_TRAIN', help='training dataset')\n parser.add_argument('--channel', type=int, default=30, help='channel number of convolutional layers in decoder')\n parser.add_argument('--is_resnet', type=bool, default=True, help='VGG or ResNet backbone')\n parser.add_argument('--model', type=str, default='baseline', help='VGG or ResNet backbone')\n args = parser.parse_args()\n\n np.random.seed(2020)\n torch.manual_seed(2020)\n torch.cuda.manual_seed(2020)\n\n print('Learning Rate: {} ResNet: {} Trainset: {}'.format(args.lr, args.is_resnet, args.trainset))\n\n # build model\n model = globals()[args.model]()\n model.cuda()\n\n params = model.parameters()\n optimizer = torch.optim.SGD(params, args.lr, momentum=0.9, weight_decay=5e-4)\n # optimizer = torch.optim.Adam(params, args.lr, weight_decay=5e-4)\n\n # dataset\n args.data_path = '../data/'\n image_root = args.data_path + 'train/'\n gt_root = args.data_path + 'gt/'\n train_loader = sk_loader(image_root, gt_root, args.batch_size, args.train_size)\n args.valset = ['test']\n\n # begin training\n print(\"Time to witness the mirracle!\")\n train(train_loader, model, optimizer, args)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"373064846","text":"import os, sys\nfrom os.path import join, exists\nimport argparse\nimport json\nimport pickle\nimport tensorflow as tf\nfrom pathos.multiprocessing import ProcessingPool as Pool\n\n\ndef add_stopwords(sentences: list):\n def adding(text):\n return \" \" + text + \" \"\n\n with Pool() as p:\n sentences = p.map(adding, sentences)\n\n return sentences\n\n\ndef tokenization(tokenizer, trans, num_words=10000):\n images, captions = map(list, zip(*trans))\n\n if tokenizer is None:\n tokenizer = tf.keras.preprocessing.text.Tokenizer(\n num_words=num_words, oov_token=\"\", filters='!\"#$%&()*+.,-/:;=?@[\\]^_`{|}~ '\n )\n tokenizer.fit_on_texts(add_stopwords(captions))\n tokenizer.word_index[\"\"] = 0\n tokenizer.index_word[0] = \"\"\n\n # Create the tokenized vectors for train set\n # Pad each vector to the max_length of the captions\n # If you do not provide a max_length value, pad_sequences calculates it automatically\n seqs = tokenizer.texts_to_sequences(add_stopwords(captions))\n seqs = tf.keras.preprocessing.sequence.pad_sequences(seqs, padding=\"post\")\n seqs = list(zip(images, seqs))\n\n return tokenizer, seqs\n\n\ndef load_image_list(image_dir, list_file):\n image_list = []\n with open(list_file, \"r\") as fh:\n for line in fh:\n image_list.append(join(image_dir, line.strip()))\n return image_list\n\n\ndef load_trans(image_list, trans_files):\n trans_all = []\n\n for file in trans_files:\n trans = []\n with open(file, \"r\") as fh:\n for line in fh:\n text = line.rstrip()\n trans.append(text)\n\n trans_all = trans_all + list(zip(image_list, trans))\n\n return trans_all\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Processing Multi30K dataset.\")\n parser.add_argument(\"--image_list\")\n parser.add_argument(\"--tokenizer\")\n parser.add_argument(\"--en_trans\", nargs=\"+\")\n parser.add_argument(\"--de_trans\", nargs=\"+\")\n parser.add_argument(\"--image_dir\")\n parser.add_argument(\"--output_prefix\")\n args = parser.parse_args()\n\n # images\n image_list = load_image_list(args.image_dir, args.image_list)\n\n # raw trans\n en_trans = load_trans(image_list, args.en_trans)\n de_trans = load_trans(image_list, args.de_trans)\n\n # tokenized trans\n if exists(args.tokenizer):\n with open(args.tokenizer, \"rb\") as fh:\n tokenizer = pickle.load(fh)\n en_tokenizer = tokenizer[0]\n de_tokenizer = tokenizer[1]\n else:\n en_tokenizer = None\n de_tokenizer = None\n\n en_tokenizer, en_seqs = tokenization(en_tokenizer, en_trans, 5000)\n de_tokenizer, de_seqs = tokenization(de_tokenizer, de_trans, 10000)\n\n # save files\n with open(args.output_prefix + \".tokenizer.pkl\", \"wb\") as fh:\n pickle.dump([en_tokenizer, de_tokenizer], fh, protocol=pickle.HIGHEST_PROTOCOL)\n\n with open(args.output_prefix + \".trans.pkl\", \"wb\") as fh:\n pickle.dump(\n [en_trans, en_seqs, de_trans, de_seqs], fh, protocol=pickle.HIGHEST_PROTOCOL\n )\n\n with open(args.output_prefix + \".image.list.pkl\", \"wb\") as fh:\n pickle.dump(image_list, fh, protocol=pickle.HIGHEST_PROTOCOL)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"prepare_multi30k-data/scps/load_multi30k_data.py","file_name":"load_multi30k_data.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"479680535","text":"#!/usr/bin/env python\n\nimport test\n\nb = test.Base()\nd = test.Derived()\n\ntest.fb(b)\ntest.fb(d)\n\n# not possible, fd is only for Derived objects\n# test.fd(b)\ntest.fd(d)\n\nx = test.factory()\ntest.fb(x)\n","sub_path":"boostpython/inheritance/test/inheritance.py","file_name":"inheritance.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"219208605","text":"import scipy.stats\nimport math\n\nalpha = 0.01\nreward = 0.01\npunish = 0.01\nbotWeight = 10\n\n\n# 2 tail 1-prop z test\ndef calc_p_value(p, n):\n std = math.sqrt(0.25 / n)\n z = (p - 0.5) / std\n if z > 0:\n z *= -1\n p_value = 2 * scipy.stats.norm(0, 1).cdf(z)\n if p_value >= alpha:\n return 0\n return 1\n\n\n# power of the test\n# probability of correctly rejecting a false null hypothesis\ndef calc_power(p, n):\n std = math.sqrt(.25 / n)\n z = (p - 0.5) / std\n zAlpha = scipy.stats.norm(0, 1).ppf(1 - alpha / 2)\n return scipy.stats.norm(0, 1).cdf(z - zAlpha) + scipy.stats.norm(0, 1).cdf(-z - zAlpha)\n\n#userWeight: list of weights from .1 to 1 representing how important the user is\n#user answer: list of user answers for a specific image\n#map, id, category: input from deep learning network\n#returns updated list of user weights\ndef user_weights(userWeights, userAnswers, map, id, category):\n yesWeights = 0\n totalWeights = 0\n for i in range(len(userWeights)):\n totalWeights += userWeights[i]\n yesWeights += userAnswers[i] * userWeights[i]\n yesWeights += handle_bot(map, id, category)\n totalWeights += botWeight\n accept = calc_p_value(yesWeights / totalWeights, totalWeights)\n newWeights = []\n ans = yesWeights / totalWeights\n if (ans > 0.5):\n ans = 1\n else:\n ans = 0\n if (accept == 1):\n for i in range(len(userWeights)):\n if userAnswers[i] == ans:\n newWeights.append(min(userWeights[i] + reward, 1))\n else:\n newWeights.append(max(userWeights[i] - punish, 0.1))\n return newWeights\n\n\ndef handle_bot(map, id, category):\n return map[id][category] * botWeight\n\n\n#Testing the z-test\nfor i in range(10, 31):\n for k in range(math.floor(i / 5) + 1):\n print(\"With sample size \" + str(i) + \" and \" + str(k) + \" trolls: \" + str(calc_p_value(0.5, (i - k) / i, i)))\n print(calc_power(0.5, (i - k) / i, i))\n","sub_path":"Bitwise/zTest.py","file_name":"zTest.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"467860794","text":"import os\nimport string\nimport re\nimport time\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport sys\n\n\ndef get_corpus(): # Tokenizes corpus and returns it\n corpus_path = '../'\n transform = str.maketrans('', '', string.punctuation)\n with open(corpus_path + 'corpus.txt', 'r', ) as f:\n corpus = f.read().casefold().split()\n corpus = [i.translate(transform) for i in corpus] # removes ' and - as well which can exist in correct spelling\n\n return corpus\n\n\ndef get_spelling_errors():\n spell_path = '../'\n errors = []\n with open(spell_path + 'spell-errors.txt', 'r', ) as f:\n spell = re.split(r'[\\n]', f.read())\n for i in range(len(spell)):\n temp = []\n errors.append([])\n temp.extend(re.split(r'[:/,]', spell[i]))\n errors[i].append(temp)\n return errors\n\n\ndef process_spelling_errors(): # duplicates multiple instances of error\n errors = get_spelling_errors()\n for i in errors:\n for j in i:\n for k in j:\n if '*' in k:\n index = k.find('*')\n num = int(k[index+1:])\n j[j.index(k)] = k[:index]\n for t in range(num-1):\n j.append(k[:index])\n return errors\n\n\ndef corpus_probability(): # calculates probability of each word in the corpus and saves it\n corpus = get_corpus()\n words = []\n prob = []\n for i in corpus:\n if i not in words:\n words.append(i)\n prob.append((corpus.count(i))/len(corpus))\n with open('../corpus_word_list.txt', 'w') as f:\n for i in words:\n f.write('%s\\n' % i)\n with open('../corpus_prob_list.txt', 'w') as f:\n for i in prob:\n f.write('%s\\n' % i)\n return words, prob\n\n\ndef load_corpus_prob(): # since it takes long time to compute\n path = '../'\n if not os.path.exists(path + 'corpus_word_list.txt') or not os.path.exists(path + 'corpus_prob_list.txt'):\n words, prob = corpus_probability()\n else:\n with open(path + 'corpus_word_list.txt', 'r') as f:\n words = f.read().split()\n with open(path + 'corpus_prob_list.txt', 'r') as f:\n prob = f.read().split()\n prob = np.asarray(prob, dtype=float)\n return words, prob\n\n\ndef compute_posterior(possible, word): # This method is used to implement an alternative method which is not a correct implementation of noisy channel model\n errors = process_spelling_errors()\n correct = []\n posterior = []\n word = ' ' + word\n for i in possible:\n for j in errors:\n for k in j:\n correct.append(k[0])\n if k[0] == i:\n num = k.count(word)\n prob = num/(len(k)-1)\n posterior.append(prob)\n else:\n posterior.append(0)\n return posterior, correct\n\n\ndef produce_correction(possible, word): # This method is used to implement an alternative method which is not a correct implementation of noisy channel model\n posterior, correct = compute_posterior(possible, word)\n corpus_words, prob = load_corpus_prob()\n prior = []\n num = 0\n for i in range(len(posterior)):\n if posterior[i] != 0:\n num += 1\n true_spell = correct[i]\n if corpus_words.__contains__(true_spell):\n prior_index = corpus_words.index(true_spell)\n prior.append(prob[prior_index])\n else:\n prior.append(0)\n else:\n prior.append(0)\n prior = np.asarray(prior, dtype=float)\n posterior = np.asarray(posterior, dtype=float)\n total = np.multiply(prior, posterior)\n if np.max(total) == 0:\n corrected_spelling = ''\n else:\n true_index = np.argmax(total)\n corrected_spelling = correct[true_index]\n return corrected_spelling\n\n\ndef create_predictions(word): # This function creates all 1 Damerau-Levenshtein edit distance words for the given word.\n possible = []\n possible = insertion(word, possible)\n possible = deletion(word, possible)\n possible = substitution(word, possible)\n possible = transpose(word, possible)\n return possible\n\n\ndef get_alphabet(): # returns alphabet\n list = [i for i in 'abcçdefgğhıijklmnoöprsştuüvyz']\n return list\n\n\ndef insertion(word, possible): # (N+1)*alphabet insertion\n n = len(word)\n alph = get_alphabet()\n for i in range(n+1):\n for j in alph:\n temp = word[0:i] + j + word[i:]\n possible.append(temp)\n return possible\n\n\ndef deletion(word, possible): # N deletion\n n = len(word)\n for i in range(n):\n temp = word[0:i] + word[i+1:]\n possible.append(temp)\n return possible\n\n\ndef substitution(word, possible): # N*alphabet substitution\n n = len(word)\n alph = get_alphabet()\n for i in range(n):\n for j in alph:\n temp = word[0:i] + j + word[i+1:]\n possible.append(temp)\n return possible\n\n\ndef transpose(word, possible): # N-1 transpose\n n = len(word)\n for i in range(n-1):\n p1 = word[i]\n p2 = word[i+1]\n temp = word[0:i] + p2 + p1 + word[i+2:]\n possible.append(temp)\n return possible\n\n\ndef confusion_insertion(correct, errors):\n alph = get_alphabet()\n c_insertion = np.zeros([28, 28])\n for i in range(len(correct)):\n true_temp = correct[i]\n for j in errors[i]:\n for k in range(len(j)):\n temp = j[0:k] + j[k+1:]\n if temp == true_temp:\n if k+1 == len(j):\n try:\n fc = alph.index(j[k])\n tc = alph.index(j[k - 1])\n c_insertion[fc][tc] += 1\n except ValueError:\n break\n break\n else:\n if j[k] != j[k+1]:\n try:\n fc = alph.index(j[k])\n tc = alph.index(j[k - 1])\n c_insertion[fc][tc] += 1\n except ValueError:\n break\n break\n return c_insertion\n\n\ndef confusion_deletion(correct, errors):\n alph = get_alphabet()\n c_deletion = np.zeros([28, 28])\n for i in range(len(correct)):\n true_temp = correct[i]\n for j in errors[i]:\n for k in range(len(j)):\n for l in range(len(alph)):\n temp = j[0:k+1] + alph[l] + j[k+1:]\n if temp == true_temp:\n try:\n tc = alph.index(j[k])\n fc = l\n c_deletion[fc][tc] += 1\n except ValueError:\n break\n break\n return c_deletion\n\n\ndef confusion_substitution(correct, errors):\n alph = get_alphabet()\n c_subs = np.zeros([28, 28])\n for i in range(len(correct)):\n true_temp = correct[i]\n for j in errors[i]:\n for k in range(len(j)):\n for l in range(len(alph)):\n temp = j[0:k] + alph[l] + j[k+1:]\n if true_temp == temp:\n try:\n tc = l\n fc = alph.index(j[k])\n c_subs[fc][tc] += 1\n except ValueError:\n break\n break\n return c_subs\n\n\ndef confusion_transposition(correct, errors):\n alph = get_alphabet()\n t_subs = np.zeros([28, 28])\n for i in range(len(correct)):\n true_temp = correct[i]\n for j in errors[i]:\n for k in range(len(j)-1):\n p1 = j[k]\n p2 = j[k+1]\n temp = j[0:k] + p2 + p1 + j[k+2:]\n if true_temp == temp:\n try:\n tc = alph.index(p2)\n fc = alph.index(p1)\n t_subs[fc][tc] += 1\n except ValueError:\n break\n break\n return t_subs\n\n\ndef get_correct_words(errors):\n correct = []\n for i in errors:\n for j in i:\n correct.append(j[0])\n j.remove(j[0])\n errors = remove_spaces(errors)\n return correct, errors\n\n\ndef remove_spaces(errors):\n e = []\n for i in range(len(errors)):\n e.append([])\n for j in (errors[i]):\n for k in j:\n e[i].append(k[1:].casefold())\n return e\n\n\ndef count_correct(correct): # counts usage of single and double letters and saves it\n if os.path.exists('../characters.npy') and os.path.exists('../bigram.npy'):\n characters = np.load('../characters.npy')\n bigram = np.load('../bigram.npy')\n else:\n alph = get_alphabet()\n characters = np.zeros([28, 1])\n bigram = np.zeros([28, 28])\n for i in range(len(alph)):\n for j in correct:\n num1 = j.count(alph[i])\n characters[i] += num1\n for i in range(len(alph)):\n for j in range(len(alph)):\n for k in correct:\n num2 = k.count(alph[i]+alph[j])\n bigram[i][j] += num2\n np.save(file='../characters.npy', arr=characters)\n np.save(file='../bigram.npy', arr=bigram)\n return characters, bigram\n\n\ndef correction(word, correct, errors, smoothing=False):\n alph = get_alphabet()\n alph_size = len(alph)\n corpus_words, corpus_prob = load_corpus_prob()\n letters, d_letters = count_correct(correct)\n c_deletion = confusion_deletion(correct, errors)\n c_insertion = confusion_insertion(correct, errors)\n c_subs = confusion_substitution(correct, errors)\n c_trans = confusion_transposition(correct, errors)\n if smoothing:\n c_deletion += 1\n c_insertion += 1\n c_subs += 1\n c_trans += 1\n letters += alph_size\n d_letters += alph_size\n corrected_words = []\n for k in word:\n possible = create_predictions(k)\n max_len = max(len(i) for i in possible)\n correct_prob = np.zeros([4, max_len])\n for i in range(len(possible)):\n for j in range(len(possible[i])):\n possible_word = possible[i][j]\n\n if possible_word in corpus_words and possible_word in correct: # possible_word in corpus_words and correct vocabulary:\n apriori_prob = corpus_prob[corpus_words.index(possible_word)]\n if i == 0: # insertion corrected the error\n word_index = j//alph_size\n prev_letter = alph.index(k[word_index-1]) # position of the addition\n inserted_letter = j - word_index*alph_size # index of the inserted letter in the alphabet\n no_confusion = c_deletion[inserted_letter, prev_letter]\n posterior_prob = no_confusion/d_letters[prev_letter, inserted_letter]\n prob = apriori_prob*posterior_prob\n correct_prob[i, j] = prob\n elif i == 1: # deletion corrected the error\n try:\n prev_letter = alph.index(k[j-1]) # position of the deletion\n inserted_letter = alph.index(possible_word[j]) # following letter\n no_confusion = c_insertion[prev_letter, inserted_letter]\n posterior_prob = no_confusion / letters[alph.index(possible_word[j-1])]\n prob = apriori_prob*posterior_prob\n except IndexError:\n prob = 0\n correct_prob[i, j] = prob\n elif i == 2: # substitution corrected the error\n word_index = j // alph_size\n prev_letter = alph.index(k[word_index])\n subs_letter = j - word_index*alph_size\n no_confusion = c_subs[prev_letter, subs_letter]\n posterior_prob = no_confusion / letters[subs_letter]\n if not math.isinf(posterior_prob):\n prob = apriori_prob*posterior_prob\n else:\n prob = 0\n correct_prob[i, j] = prob\n\n else: # transposition corrected the error\n current_letter = alph.index(possible_word[j])\n next_letter = alph.index(possible_word[j + 1])\n no_confusion = c_trans[next_letter, current_letter]\n posterior_prob = no_confusion/d_letters[current_letter, next_letter]\n prob = apriori_prob * posterior_prob\n correct_prob[i, j] = prob\n else:\n correct_prob[i, j] = 0\n\n col = np.argmax(np.max(correct_prob, axis=0))\n row = np.argmax(np.max(correct_prob, axis=1))\n if correct_prob[row, col] == 0:\n corrected_word = ''\n else:\n corrected_word = possible[row][col]\n corrected_words.append(corrected_word)\n return corrected_words, c_deletion, c_insertion, c_subs, c_trans\n\n\nif __name__ == '__main__':\n start = time.time()\n if len(sys.argv) > 2:\n input_name = sys.argv[1]\n correct_name = sys.argv[2]\n else:\n input_name = \"test-words-misspelled.txt\"\n correct_name = \"test-words-correct.txt\"\n errors = process_spelling_errors()\n correct, errors = get_correct_words(errors)\n smoothing = [False, True]\n save_imgs = False\n for k in smoothing:\n txt = \"without_smoothing\" if not k else \"with_smoothing\"\n corrected = []\n with open('../' + input_name, 'r') as f:\n word = f.read().split()\n corrected, c_deletion, c_insertion, c_subs, c_trans = correction(word, correct, errors, smoothing=k)\n with open('../' + txt + '_output.txt', 'w') as f:\n for j in corrected:\n f.write('%s\\n' % j)\n with open('../' + correct_name, 'r') as f:\n true_correction = f.read().split()\n if save_imgs:\n plt.imshow(c_deletion)\n plt.savefig('deletion' + txt + '.png')\n plt.imshow(c_insertion)\n plt.savefig('insertion' + txt + '.png')\n plt.imshow(c_subs)\n plt.savefig('subs' + txt + '.png')\n plt.imshow(c_trans)\n plt.savefig('trans' + txt + '.png')\n common = 0\n if len(true_correction) == len(corrected):\n for i in range(len(true_correction)):\n if true_correction[i] == corrected[i]:\n common += 1\n success_percentage = (common/len(corrected))*100\n print(\"Accuracy \" + txt + \" : %\" + str(success_percentage))\n print(\"%s minutes\" % ((time.time() - start)/60))\n","sub_path":"Text Preprocesser Project/src/spelling_correction.py","file_name":"spelling_correction.py","file_ext":"py","file_size_in_byte":15169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"83526948","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n # dotabox start\n url(r'dotabox/(?P[0-9]+)/$', views.dotabox, name='dotabox'),\n url(r'dotabox/dotabox_action/', views.dotabox_action, name='dotabox_action'),\n # dotabox end\n\n # briefcases start\n url(r'briefcases/rdcase_action/', views.rdcase_action, name='rdcase_action'),\n url(r'briefcases/(?P[0-9]+)/$', views.briefcases_full, name='briefcases_full'),\n url(r'briefcases/', views.briefcases, name='briefcases'),\n # briefcases end\n\n # magick_ball start\n url(r'magick_ball/', views.magick_ball, name='magick_ball'),\n # magick_ball end\n\n # techies start\n url(r'techies/', views.techies, name='techies'),\n # techies end\n\n url(r'lottery/sell_stuff', views.sell_stuff, name='sell_stuff'),\n]\n","sub_path":"lottery/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"115224442","text":"\"\"\"\n先得到行列最大值列表,然后累加行、列最小值减去当前值的值\n\"\"\"\n\n\nclass Solution:\n def maxIncreaseKeepingSkyline(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n added = 0\n top = [] # type:list\n left = [] # type:list\n for i in range(len(grid)):\n left.append(max(grid[i]))\n top.append(max([x[i] for x in grid]))\n for i, row in enumerate(top):\n for j, col in enumerate(left):\n added += min(row, col) - grid[i][j]\n return added\n\n\nif __name__ == \"__main__\":\n grid = [[3, 0, 8, 4], [2, 4, 5, 7], [9, 2, 6, 3], [0, 3, 1, 0]]\n solution = Solution()\n print(solution.maxIncreaseKeepingSkyline(grid))\n","sub_path":"medium/807-max-increase-to-keep-city-skyline.py","file_name":"807-max-increase-to-keep-city-skyline.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"388770543","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.integrate import odeint\r\n\r\ndef edo(y, t, k, m): \r\n u, omega = y # y é o vetor [u, omega]\r\n dydt = [omega, -k/m*u] # dydt é o vetor contendo as EDOs\r\n return dydt\r\n\r\n# Constantes do sistema\r\nm = c1\r\nk = c2\r\n# Condições iniciais\r\ny0 = [u0, omega0] # u(0)=u0 omega(0)=omega0 (lembre-se, omega é a derivada primeira de u)\r\n\r\nt = np.linspace(Início, Fim, QntPontos) # Estes são os pontos usados para gerar o gráfico\r\n\r\nsol = odeint(edo, y0, t, args=(k, m)) # A função odeint gera a solução\r\n\r\nprint(sol)\r\n# A solução é uma matriz(QntPontos,2). A primeira coluna é u(t), e a segunda é omega(t)\r\n\r\n#Apesar da solução nos fornecer valores para u e para omega, vamos apenas visualizar o gráfico de u(t)\r\n#plt.plot(t, sol[:, 1], 'g', label='omega(t)') Para visualizar omega(t)\r\nplt.plot(t, sol[:, 0], 'b', label='u(t)') \r\nplt.legend(loc='best')\r\nplt.xlabel('t')\r\nplt.grid()\r\nplt.show()","sub_path":"Sistema massa-mola/NaoAmortecido.py","file_name":"NaoAmortecido.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"28741002","text":"#! /usr/bin/env python\n\nimport sys\nimport os\nimport argparse\nfrom array import *\nimport numpy as mp\nimport ROOT\nimport yaml\nimport math\nROOT.gROOT.SetBatch(True)\nimport utils\n\nfrom pyjetty.mputils import treereader\n\nparser = argparse.ArgumentParser(description='D0 inv mass')\nparser.add_argument('-f', '--ifile', help='input rootfile', type=str, default=None, required=True)\nparser.add_argument('-e', '--energy', help='system energy', type=str, default='5TeV', required=False)\nparser.add_argument('-s', '--system', help='collision system', type=str, default='pp', required=False)\nparser.add_argument('-t', '--trailer', help='special trailer', type=str, default='', required=False)\nparser.add_argument('-r', '--runevents', help='run over tree', type=int, default=1, required=False)\nargs = parser.parse_args()\n\nglobal main_dir, plot_dir, root_dir, ofile, png\n\nmain_dir = \"/home/software/users/napadula/\"\nplot_dir = main_dir + \"plots/\"\nroot_dir = main_dir + \"rootfiles/\"\nofile = \"HistsFromTree_\" + args.energy + \"_\" + args.system + \"_\" + args.trailer + \".root\"\npng = args.system + args.energy + args.trailer + \".png\"\n\ndef run_over_tree(hnamelist=[]):\n\troot_filename = root_dir + ofile\n\trootfile = ROOT.TFile(root_filename, 'RECREATE')\n\trootfile.Close()\n\n\th_invmass_2D = []\n\n\tfor h in range(len(hnamelist)):\n\t\tname = \"InvMass_vs_pt_\" + hnamelist[h]\n\n\t\th_invmass_2D.insert(h, utils.make_hist(name, 55, 1.6, 2.15, 40, 0, 40))\n\n\tutils.fill_hist_from_tree('d0', utils.branch_names, args.ifile, h_invmass_2D, utils.cut_names, utils.cut_type, utils.cut_value_13TeV_Note, hnamelist)\n\n\trootfile = ROOT.TFile(root_filename, 'UPDATE')\n\tfor h in range(len(hnamelist)):\n\t\th_invmass_2D[h].Write()\n\trootfile.Close()\n\ndef open_hist(histname):\n\troot_filename = root_dir + ofile\n\trootfile = ROOT.TFile.Open(root_filename, \"READ\")\t\n\n\tname = \"InvMass_vs_pt_\" + histname\n\t#histname = \"InvMass_vs_pt_reflection\"\n\thist = utils.get_hist_from_file(rootfile, name)\n\n\trootfile.Close()\n\treturn hist\n\ndef fit_and_plot(histname, fittype, fitrange=[], fitparam=[], fitparlim=[]):\n\th_invmass = []\n\tc = []\n\n\thist = open_hist(histname)\n\n\tprint(utils.nbins)\n\n\tfor p in range(utils.nbins):\n\t\tname = \"InvMass\" + histname + str(utils.pt_low[p]) + \"_to_\" + str(utils.pt_high[p])\n\t\n\t\tthefit = utils.set_fit_function(fittype, \"thefit\", fitrange, fitparam, fitparlim)\n\n\t\tptlow = utils.get_bin(hist, utils.pt_low[p], False)\n\t\tpthigh = utils.get_bin(hist, utils.pt_high[p], False)\n\t\t\n\t\th_invmass.insert(p, hist.ProjectionX(name, ptlow, pthigh-1))\n\t\th_invmass[p].Fit(\"thefit\",\"R\")\n\t\n\t\tc.insert(p, ROOT.TCanvas(\"c\"+str(p), \"c\"+str(p), 700, 700))\n\t\tROOT.gStyle.SetOptStat(0)\n\t\ttitle_name = \"Inv Mass \" + histname + \" {} < pt < {}\".format(utils.pt_low[p], utils.pt_high[p])\n\t\tutils.setup_hist_to_draw(h_invmass[p], title_name, \"inv mass\", \"\", [1.7, 2.05])\n\t\th_invmass[p].Draw(\"E0\")\n\t\ttitle_name = \"Inv Mass {} {} < pt < {}\".format(\"Reflections\",utils.pt_low[p],utils.pt_high[p])\n\t\tc[p].SaveAs(plot_dir + histname + \"_InvMass_pt_\" + str(utils.pt_low[p]) + \"_to_\" + str(utils.pt_high[p]) + png)\n\n\ndef integrate_hist(hist, xvalues=[], yvalues=[]):\n\tif xvalues:\n\t\tif yvalues:\n\t\t\tintegral_val = hist.Integral(utils.get_bin(hist, xvalues[0]), utils.get_bin(hist, xvalues[1]), utils.get_bin(hist, yvalues[0], False), utils.get_bin(hist, yvalues[1], False))\n\t\telse:\n\t\t\tintegral_val = hist.Integral(utils.get_bin(hist, xvalues[0]), utils.get_bin(hist, xvalues[1]))\n\telse:\n\t\tintegral_val = hist.Integral()\n\n\treturn integral_val\n\ndef main():\n\t#print(\"WTF\")\n\t#run_over_tree([\"signal\",\"reflection\"])\n\thists = [\"signal\",\"reflection\"]\n\thinvmass=[]\n\tfor i, hist in enumerate(hists):\n\t\thinvmass.insert(i, open_hist(hist))\n\t#h_refoversig = make_var_hist(\"ReflectionPercentage\", utils.nbins, utils.pt_edge)\n\t#xrange = [utils.get_bin(hinvmass[0], 1.7), utils.get_bin(hinvmass[0], 2.05)]\n\tfor p in range(utils.nbins):\n\t\tsig_int = integrate_hist(hinvmass[0], [1.7, 2.05], [utils.pt_low[p], utils.pt_high[p]])\n\t\tref_int = integrate_hist(hinvmass[1], [1.7, 2.05], [utils.pt_low[p], utils.pt_high[p]])\n\t\tref_per = ref_int/sig_int\n\t\tref_err = (ref_int/sig_int)*((math.sqrt(ref_int)/ref_int) + (math.sqrt(sig_int)/sig_int))\n\t\tprint(\"{} +/- {}\".format(ref_per, ref_err))\n\t\t\t\n\n\t#fit_and_plot(\"reflection\", \"dgaus\", [1.7, 2.05], [0, 1.87, 0.05, 0, 1.87, 0.1])\n\tfit_and_plot(\"reflection\", \"dgaus\", [1.7, 2.05], [20, 1.8, 0.5, 20, 1.8, 0.1])\t\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"pyjetty/alihfjets/makeinvmass.py","file_name":"makeinvmass.py","file_ext":"py","file_size_in_byte":4397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"566861927","text":"import random\nfrom urllib import request\n\n\nurl='https://httpbin.org/get'\n#发请求,获取响应对象\n# res=request.urlopen('https://www.sina.com.cn/')\n\n# print(res.getcode())\n\nreq=request.Request(url=url,headers={'User-Agent':\"Mozilla/5.0 (X11; Linux x86_64) \"\n \"AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/73.0.3683.75 Safari/537.36\"})\nres=request.urlopen(req)\nprint(res.read().decode('utf-8'))\n\n","sub_path":"untitled/month05/sprider/day01/day01.py","file_name":"day01.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"608687414","text":"'''\n被反爬压制\n'''\n\nimport requests\nimport re\nimport os\nimport time\n\ndef get_folder_url_and_name(url, headers):\n html = requests.get(url, headers = headers)\n html = html.text\n url_and_name = re.findall(r'.*?(.*?).*?', html, re.S)\n for url_name in url_and_name:\n yield{\n 'folder_url' : url_name[0],\n 'folder_name': url_name[1].strip()\n }\n\ndef get_img_url_page(url, headers):\n html = requests.get(url, headers = headers)\n html = html.text\n page = re.findall(r'.*?(\\d{2}).*?', html, re.S)\n url = re.findall(r'.*?', html, re.S)\n yield {\n 'url' : url[0],\n 'page': int(page[0])\n }\n\n\ndef download(url, headers, folder_name): \n pages = url.get('page')\n img_url = url.get('url')[:-6] # https://i.meizitu.net/2019/06/26c\n os.chdir(folder_name)\n \n for page in range(pages):\n image_url = img_url + '%02d.jpg' % (page + 1) # https://i.meizitu.net/2019/06/26c23.jpg\n image = requests.get(image_url, headers = headers)\n image = image.text\n with open(image_url[-6:], 'a') as f:\n f.write(image)\n time.sleep(1)\n \ndef main():\n os.mkdir('mm_img')\n os.chdir('mm_img')\n\n url = 'https://www.mzitu.com/xinggan'\n global headers\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3722.400 QQBrowser/10.5.3738.400'\n }\n folder_url_and_name = list(get_folder_url_and_name(url, headers))\n # [{'folder_url': 'https://www.mzitu.com/199385', 'folder_name': 'xxx'}, {}, {}]\n for folder in folder_url_and_name:\n folder_url = folder.get('folder_url')\n folder_name = folder.get('folder_name')\n os.mkdir(folder_name)\n\n img_url_page = get_img_url_page(folder_url, headers)\n for url_page in img_url_page:\n # {'url': 'https://i.meizitu.net/2019/08/14a01.jpg', 'page': 60}\n download(url_page, headers, folder_name)\n\n \nif __name__ == main():\n main()","sub_path":"下载小猫照片(自写完结)/download_mm.py","file_name":"download_mm.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"463193064","text":"#!/usr/bin/env python\nimport re\nimport sys\nfrom time import sleep\n\nimport serial\n\nimport pionUploader_esptool as esptool\nimport json\n\nfrom datetime import datetime\n\nfrom PyQt5.QtCore import QUrl, Qt, QThread, QObject, pyqtSignal, pyqtSlot, QSettings, QTimer, QSize, QIODevice\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtNetwork import QNetworkRequest, QNetworkAccessManager, QNetworkReply\nfrom PyQt5.QtSerialPort import QSerialPortInfo, QSerialPort\nfrom PyQt5.QtWidgets import QApplication, QDialog, QLineEdit, QPushButton, QComboBox, QWidget, QCheckBox, QRadioButton, \\\n QButtonGroup, QFileDialog, QProgressBar, QLabel, QMessageBox, QDialogButtonBox, QGroupBox, QFormLayout, QStatusBar\n\nimport banner\nimport firmwareURL\n\nfrom gui import HLayout, VLayout, GroupBoxH, GroupBoxV, SpinBox, dark_palette\nfrom utils import NoBinFile, NetworkError\n\n__version__ = '1.0.0'\n\nclass ESPWorker(QObject):\n error = pyqtSignal(Exception)\n waiting = pyqtSignal()\n done = pyqtSignal()\n\n def __init__(self, port, actions, **params):\n super().__init__()\n self.command = [\n '--chip', 'esp32',\n '--port', port,\n '--baud', '921600'\n ]\n\n self._actions = actions\n self._params = params\n self._continue = False\n\n @pyqtSlot()\n def run(self):\n esptool.sw.setContinueFlag(True)\n\n try:\n if esptool.sw.continueFlag() and 'write' in self._actions:\n file_path = self._params['file_path']\n file_pathBoot = self._params['file_pathBoot']\n file_pathBootloader = self._params['file_pathBootloader']\n file_pathPart = self._params['file_pathPart']\n command_write = ['--before','default_reset','--after','hard_reset','write_flash','-z','--flash_mode','dio','--flash_freq','40m','--flash_size','detect','0xe000',file_pathBoot,'0x1000',file_pathBootloader,'0x10000',file_path ,'0x8000', file_pathPart]\n if 'erase' in self._actions:\n command_write.append('--erase-all')\n esptool.main(self.command + command_write)\n\n except (esptool.FatalError) as e:\n self.error.emit(e)\n self.done.emit()\n\n def wait_for_user(self):\n self._continue = False\n self.waiting.emit()\n while not self._continue:\n sleep(.1)\n\n def continue_ok(self):\n self._continue = True\n\n def abort(self):\n esptool.sw.setContinueFlag(False)\n\nclass ProcessDialog(QDialog):\n def __init__(self, port, **kwargs):\n super().__init__()\n\n self.setWindowTitle('Preparando seu Satélite Educacional...')\n self.setFixedWidth(600)\n\n self.exception = None\n\n esptool.sw.progress.connect(self.update_progress)\n\n self.nam = QNetworkAccessManager()\n \n self.nrDownloads = 0\n\n self.nrBinFile1 = QNetworkRequest()\n self.bin_data1 = b''\n \n self.nrBinFile2 = QNetworkRequest()\n self.bin_data2 = b''\n\n self.nrBinFile3 = QNetworkRequest()\n self.bin_data3 = b''\n\n self.nrBinFile4 = QNetworkRequest()\n self.bin_data4 = b''\n\n self.setLayout(VLayout(5, 5))\n self.actions_layout = QFormLayout()\n self.actions_layout.setSpacing(5)\n\n self.layout().addLayout(self.actions_layout)\n\n self._actions = []\n self._action_widgets = {}\n\n self.port = port\n\n self.file_path = kwargs.get('file_url')\n self.file_pathBoot = kwargs.get('file_urlBoot')\n self.file_pathBootloader = kwargs.get('file_urlBootloader')\n self.file_pathPart = kwargs.get('file_urlPart')\n \n self._actions.append('download')\n \n self.erase = kwargs.get('erase')\n if self.erase:\n self._actions.append('erase')\n\n if self.file_path:\n self._actions.append('write')\n\n self.create_ui()\n self.start_process()\n\n def create_ui(self):\n for action in self._actions:\n pb = QProgressBar()\n pb.setFixedHeight(35)\n self._action_widgets[action] = pb\n self.actions_layout.addRow(action.capitalize(), pb)\n\n self.btns = QDialogButtonBox(QDialogButtonBox.Abort)\n self.btns.rejected.connect(self.abort)\n self.layout().addWidget(self.btns)\n\n self.sb = QStatusBar()\n self.layout().addWidget(self.sb)\n\n def appendBinFile1(self):\n self.bin_data1 += self.bin_reply1.readAll()\n \n def appendBinFile2(self):\n self.bin_data2 += self.bin_reply2.readAll()\n \n def appendBinFile3(self):\n self.bin_data3 += self.bin_reply3.readAll()\n \n def appendBinFile4(self):\n self.bin_data4 += self.bin_reply4.readAll()\n \n def downloadsFinished(self):\n if self.nrDownloads == 4:\n self.run_esp()\n\n def saveBinFile1(self):\n if self.bin_reply1.error() == QNetworkReply.NoError:\n self.file_path = self.file_path.split('/')[-1]\n with open(self.file_path, 'wb') as f:\n f.write(self.bin_data1)\n self.nrDownloads += 1\n self.downloadsFinished()\n else:\n raise NetworkError\n\n def saveBinFile2(self):\n if self.bin_reply2.error() == QNetworkReply.NoError:\n self.file_pathBoot = self.file_pathBoot.split('/')[-1]\n with open(self.file_pathBoot, 'wb') as f:\n f.write(self.bin_data2)\n self.nrDownloads += 1\n self.downloadsFinished()\n else:\n raise NetworkError\n\n def saveBinFile3(self):\n if self.bin_reply3.error() == QNetworkReply.NoError:\n self.file_pathBootloader = self.file_pathBootloader.split('/')[-1]\n with open(self.file_pathBootloader, 'wb') as f:\n f.write(self.bin_data3)\n self.nrDownloads += 1\n self.downloadsFinished()\n else:\n raise NetworkError\n\n def saveBinFile4(self):\n if self.bin_reply4.error() == QNetworkReply.NoError:\n self.file_pathPart = self.file_pathPart.split('/')[-1]\n with open(self.file_pathPart, 'wb') as f:\n f.write(self.bin_data4)\n self.nrDownloads += 1\n self.downloadsFinished()\n else:\n raise NetworkError\n\n def updateBinProgress(self, recv, total):\n self._action_widgets['download'].setValue(recv//total*100)\n\n def download_bin(self):\n self.nrBinFile1.setAttribute(QNetworkRequest.FollowRedirectsAttribute, True)\n self.nrBinFile1.setUrl(QUrl(self.file_path))\n self.bin_reply1 = self.nam.get(self.nrBinFile1)\n self.bin_reply1.readyRead.connect(self.appendBinFile1)\n self.bin_reply1.downloadProgress.connect(self.updateBinProgress)\n self.bin_reply1.finished.connect(self.saveBinFile1)\n\n self.nrBinFile2.setAttribute(QNetworkRequest.FollowRedirectsAttribute, True)\n self.nrBinFile2.setUrl(QUrl(self.file_pathBoot))\n self.bin_reply2 = self.nam.get(self.nrBinFile2)\n self.bin_reply2.readyRead.connect(self.appendBinFile2)\n self.bin_reply2.finished.connect(self.saveBinFile2)\n \n self.nrBinFile3.setAttribute(QNetworkRequest.FollowRedirectsAttribute, True)\n self.nrBinFile3.setUrl(QUrl(self.file_pathBootloader))\n self.bin_reply3 = self.nam.get(self.nrBinFile3)\n self.bin_reply3.readyRead.connect(self.appendBinFile3)\n self.bin_reply3.finished.connect(self.saveBinFile3)\n\n self.nrBinFile4.setAttribute(QNetworkRequest.FollowRedirectsAttribute, True)\n self.nrBinFile4.setUrl(QUrl(self.file_pathPart))\n self.bin_reply4 = self.nam.get(self.nrBinFile4)\n self.bin_reply4.readyRead.connect(self.appendBinFile4)\n self.bin_reply4.finished.connect(self.saveBinFile4)\n\n def show_connection_state(self, state):\n self.sb.showMessage(state, 0)\n\n def run_esp(self):\n params = {\n 'file_path': self.file_path,\n 'file_pathBoot': self.file_pathBoot,\n 'file_pathBootloader': self.file_pathBootloader,\n 'file_pathPart': self.file_pathPart,\n 'erase': self.erase\n }\n\n self.esp_thread = QThread()\n self.esp = ESPWorker(\n self.port,\n self._actions,\n **params\n )\n esptool.sw.connection_state.connect(self.show_connection_state)\n self.esp.done.connect(self.accept)\n self.esp.error.connect(self.error)\n self.esp.moveToThread(self.esp_thread)\n self.esp_thread.started.connect(self.esp.run)\n self.esp_thread.start()\n\n def start_process(self):\n if 'download' in self._actions:\n self.download_bin()\n self._actions = self._actions[1:]\n else:\n self.run_esp()\n\n def update_progress(self, action, value):\n self._action_widgets[action].setValue(value)\n\n @pyqtSlot()\n\n def stop_thread(self):\n self.esp_thread.wait(2000)\n self.esp_thread.exit()\n\n def accept(self):\n self.stop_thread()\n self.done(QDialog.Accepted)\n\n def abort(self):\n self.sb.showMessage('Aborting...', 0)\n QApplication.processEvents()\n self.esp.abort()\n self.stop_thread()\n self.reject()\n\n def error(self, e):\n self.exception = e\n self.abort()\n\n def closeEvent(self, e):\n self.stop_thread()\n\nclass Tasmotizer(QDialog):\n\n def __init__(self):\n super().__init__()\n self.settings = QSettings('tasmotizer.cfg', QSettings.IniFormat)\n\n self.port = ''\n\n self.nam = QNetworkAccessManager()\n\n self.esp_thread = None\n\n self.setWindowTitle(f'PION Kits Educacionais {__version__}')\n self.setMinimumWidth(480)\n\n self.mode = 0 # BIN file\n self.file_path = ''\n\n self.create_ui()\n\n self.refreshPorts()\n\n def create_ui(self):\n vl = VLayout(3)\n self.setLayout(vl)\n\n # Banner\n banner = QLabel()\n banner.setPixmap(QPixmap(':/banner.png'))\n vl.addWidget(banner)\n\n # Port groupbox\n gbPort = GroupBoxH('Selecionar porta', 3)\n self.cbxPort = QComboBox()\n pbRefreshPorts = QPushButton('Atualizar')\n gbPort.addWidget(self.cbxPort)\n gbPort.addWidget(pbRefreshPorts)\n gbPort.layout().setStretch(0, 4)\n gbPort.layout().setStretch(1, 1)\n\n # Buttons\n self.flash = QPushButton('Gravar firmware!')\n self.flash.setFixedHeight(60)\n self.flash.setStyleSheet('background-color: #0D2556;')\n\n hl_btns = HLayout([50, 3, 50, 3])\n hl_btns.addWidgets([self.flash])\n\n vl.addWidgets([gbPort])\n vl.addLayout(hl_btns)\n\n pbRefreshPorts.clicked.connect(self.refreshPorts)\n self.flash.clicked.connect(self.start_process)\n\n def refreshPorts(self):\n self.cbxPort.clear()\n ports = reversed(sorted(port.portName() for port in QSerialPortInfo.availablePorts()))\n for p in ports:\n port = QSerialPortInfo(p)\n self.cbxPort.addItem(port.portName(), port.systemLocation())\n\n def start_process(self):\n try:\n if self.mode == 0:\n self.file_url = firmwareURL.URL+firmwareURL.FIRMWARE\n self.file_urlBoot = firmwareURL.URL+firmwareURL.BOOT\n self.file_urlBootloader = firmwareURL.URL+firmwareURL.BOOTLOADER\n self.file_urlPart = firmwareURL.URL+firmwareURL.PARTITIONS\n\n process_dlg = ProcessDialog(\n self.cbxPort.currentData(),\n file_url=self.file_url,\n file_urlBoot=self.file_urlBoot,\n file_urlBootloader=self.file_urlBootloader,\n file_urlPart=self.file_urlPart,\n backup=False,\n backup_size=0,\n erase=True,\n auto_reset=True\n )\n result = process_dlg.exec_()\n if result == QDialog.Accepted:\n message = 'Programado com Sucesso! \\n\\nSeu kit está sendo reiniciado, isso pode levar algum tempo.'\n QMessageBox.information(self, 'OK', message)\n elif result == QDialog.Rejected:\n if process_dlg.exception:\n QMessageBox.critical(self, 'Error', str(process_dlg.exception))\n else:\n QMessageBox.critical(self, 'Processo Cancelado', 'O processo foi cancelado pelo usuário')\n \n except NetworkError as e:\n QMessageBox.critical(self, 'Erro de rede', e.message)\n\n\ndef main():\n app = QApplication(sys.argv)\n app.setAttribute(Qt.AA_DisableWindowContextHelpButton)\n app.setQuitOnLastWindowClosed(True)\n app.setStyle('Fusion')\n app.setPalette(dark_palette)\n app.setStyleSheet('QToolTip { color: #ffffff; background-color: #2a82da; border: 1px solid white; }')\n\n mw = Tasmotizer()\n mw.show()\n\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pionUploader.py","file_name":"pionUploader.py","file_ext":"py","file_size_in_byte":13076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"121533715","text":"from __future__ import print_function\n\nfrom nose.tools import eq_, ok_, raises\nimport unittest\nimport os\nimport datetime\nimport random\nimport logging\n\nfrom flask import Flask\nfrom flask_appbuilder import AppBuilder, SQLA, Model, has_access, expose\nfrom flask_appbuilder.models.sqla.interface import SQLAInterface\nfrom flask_appbuilder.views import ModelView, BaseView\n\nfrom sqlalchemy import Column, Integer, String, Date, Float\n\nfrom airflow_webserver.security import init_role\n\nlogging.basicConfig(format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')\nlogging.getLogger().setLevel(logging.DEBUG)\nlog = logging.getLogger(__name__)\n\n\nclass SomeModel(Model):\n id = Column(Integer, primary_key=True)\n field_string = Column(String(50), unique=True, nullable=False)\n field_integer = Column(Integer())\n field_float = Column(Float())\n field_date = Column(Date())\n\n def __repr__(self):\n return str(self.field_string)\n\n\nclass SomeModelView(ModelView):\n datamodel = SQLAInterface(SomeModel)\n base_permissions = ['can_list', 'can_show', 'can_add', 'can_edit', 'can_delete']\n list_columns = ['field_string', 'field_integer', 'field_float', 'field_date']\n\n\nclass SomeBaseView(BaseView):\n route_base = ''\n\n @expose('/some_action')\n @has_access\n def some_action(self):\n return \"action!\"\n\n\nclass TestSecurity(unittest.TestCase):\n def setUp(self):\n self.app = Flask(__name__)\n self.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'\n self.app.config['SECRET_KEY'] = 'secret_key'\n self.app.config['CSRF_ENABLED'] = False\n self.app.config['WTF_CSRF_ENABLED'] = False\n self.db = SQLA(self.app)\n self.appbuilder = AppBuilder(self.app, self.db.session)\n self.appbuilder.add_view(SomeBaseView, \"SomeBaseView\", category=\"BaseViews\")\n self.appbuilder.add_view(SomeModelView, \"SomeModelView\", category=\"ModelViews\")\n\n role_admin = self.appbuilder.sm.find_role('Admin')\n self.user = self.appbuilder.sm.add_user('admin', 'admin', 'user', 'admin@fab.org', role_admin, 'general')\n log.debug(\"Complete setup!\")\n\n def tearDown(self):\n self.appbuilder = None\n self.app = None\n self.db = None\n log.debug(\"Complete teardown!\")\n\n def test_init_role_baseview(self):\n role_name = 'MyRole1'\n role_perms = ['can_some_action']\n role_vms = ['SomeBaseView']\n init_role(self.appbuilder.sm, role_name, role_vms, role_perms)\n role = self.appbuilder.sm.find_role(role_name)\n self.assertIsNotNone(role)\n self.assertEqual(len(role_perms), len(role.permissions))\n\n def test_init_role_modelview(self):\n role_name = 'MyRole2'\n role_perms = ['can_list', 'can_show', 'can_add', 'can_edit', 'can_delete']\n role_vms = ['SomeModelView']\n init_role(self.appbuilder.sm, role_name, role_vms, role_perms)\n role = self.appbuilder.sm.find_role(role_name)\n self.assertIsNotNone(role)\n self.assertEqual(len(role_perms), len(role.permissions))\n\n def test_invalid_perms(self):\n role_name = 'MyRole3'\n role_perms = ['can_foo']\n role_vms = ['SomeBaseView']\n with self.assertRaises(Exception) as context:\n init_role(self.appbuilder.sm, role_name, role_vms, role_perms)\n self.assertEqual(\"The following permissions are not valid: ['can_foo']\", str(context.exception))\n\n def test_invalid_vms(self):\n role_name = 'MyRole4'\n role_perms = ['can_some_action']\n role_vms = ['NonExistentBaseView']\n with self.assertRaises(Exception) as context:\n init_role(self.appbuilder.sm, role_name, role_vms, role_perms)\n self.assertEqual(\"The following view menus are not valid: ['NonExistentBaseView']\", str(context.exception))\n","sub_path":"tests/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"421590981","text":"\"\"\"Adds monitoring info so that logging server can parse.\"\"\"\nimport logging\nfrom common import experiment_framework_cfg\nimport random\nimport json\n# List of monitoring Ids.\n\n\n# Total unique requests received.\nTOTAL_REQUESTS = 'TOTAL_REQUESTS'\n\n# Total mentions received, it is different from total request since a equest can contains\n# multiple mentions.\nTOTAL_MENTIONS = 'TOTAL_MENTIONS'\n\n# Latency per one request\nREQUEST_LATENCY = 'REQUEST_LATENCY'\n\n# Since we are doing batch processing, we can only measure average latency\n# per batch.\nAVERAGE_LATENCY_PER_MENTION = 'AVERAGE_LATENCY_PER_MENTION'\n\n# Information about rated mentions\nSPAM_PREDICTION_LATENCY = 'SPAM_PREDICTION_LATENCY'\nPOSITIVE_PREDICTION_LATENCY = 'POSITIVE_PREDICTION_LATENCY'\nRULE_BASED_PREDICTION_LATENCY = 'RULE_BASED_PREDICTION_LATENCY'\nPOSITIVE_BASED_ON_RECOMMEND_LATENCY = 'POSITIVE_BASED_ON_RECOMMEND_LATENCY'\n\n# To keep track of statistics about returned results.\nSENTIMENT_RESULT = 'SENTIMENT_RESULT'\nMENTION_LENGTH = 'MENTION_LENGTH'\n\nMENTION_TYPE = 'MENTION_TYPE'\nTOPIC_ID = 'TOPIC_ID'\n\n# errors from v1 interface\nV1_ERRORS = 'V1_ERRORS'\nV1_ERRORS_TRACE = 'V1_ERRORS_TRACE'\n# errors from v2 interface\nV2_ERRORS = 'V2_ERRORS'\nV2_ERRORS_TRACE = 'V2_ERRORS_TRACE'\n# errors from sending data for extraction\nDATAFLOW_EXTRACTION_ERRORS = 'DATAFLOW_EXTRACTION_ERRORS'\n\n# experiment name\ncurrent_experiment_name = ''\n\n\ndef add(monitor_id, int_value=0, float_value=0, message='', severity=\"INFO\"):\n \"\"\"\n Args:\n monitor_id -- unique id to identify the monitoring metric, pattern: [a-zA-Z0-9._-]+.\n int_value -- integer value of the metric\n message -- a string containing other information for this message\"\"\"\n logging.info(\n _get_log_string(\n monitor_id,\n current_experiment_name,\n int_value,\n float_value,\n message,\n severity=\"INFO\"))\n\n\ndef _get_log_string(\n monitor_id,\n experiment_name='',\n int_value=0,\n float_value=0,\n message='',\n severity=\"INFO\"):\n kwargs = {\"monitor_id\": monitor_id, \"experiment_name\": experiment_name,\n \"int_value\": int_value, \"float_value\": float_value,\n \"message\": message, \"severity\": severity}\n return '%s' % (json.dumps(kwargs))","sub_path":"common/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"372215018","text":"\"\"\"\nTheExecuSearch spider created on the top of ATSSpider\n\nscrapy crawl the_execu_search -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://www.execu-search.com/job-seekers/search-results\"\n\nSample URL:\n http://www.execu-search.com/job-seekers/search-results\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix\n\npattern = {\n 'count': compile(r'of\\s*(\\d+)\\s*$'),\n 'ref_id': compile(r'job-seekers\\/([^\\/]*)'),\n}\n\n\nclass TheExecuSearch(ATSSpider):\n\n name = 'the_execu_search'\n\n def parse(self, response):\n sel = Selector(response)\n\n if not self.expected_job_count_set:\n expected_count = sel.xpath(\n '//div[@class=\"result-count\"]/div/text()'\n ).re(pattern['count'])\n if expected_count:\n self.expected_job_count = expected_count[0]\n\n for href in sel.xpath(\n '//table/tbody[@id=\"ResultsTable\"]/tr/td[1]/a/@href'\n ).extract():\n yield Request(\n callback=self.parse_job_callback(),\n url=urljoin(response.url, href)\n )\n\n next_page = sel.xpath(\n '//ul[contains(@class, \"pagination\")]/li[@class=\"active\"]/following-sibling::li[1]/a/@href'\n ).extract()\n if next_page:\n yield Request(\n callback=self.parse,\n url=urljoin(response.url, next_page[0])\n )\n\n def parse_job(self, response):\n \"\"\"\n Extract all required information.\n \"\"\"\n sel = Selector(response)\n\n loader = BrightcorpItemLoader(selector=sel)\n\n loader.add_xpath(\n 'title',\n '//div[contains(@class, \"job-title\")]/div/h2/span/text()'\n )\n loader.add_xpath(\n 'location',\n '//ul/li/h11[contains(text(), \"LOCATION\")]/following-sibling::p[1]/text()'\n )\n loader.add_value(\n 'referencenumber',\n response.url,\n Prefix('%s-' % self.name),\n re=pattern['ref_id']\n )\n loader.add_value('url', response.url)\n loader.add_xpath(\n 'description',\n '//div/div[@class=\"job-description\"]'\n )\n loader.add_xpath(\n 'jobtype',\n '//ul/li/h11[contains(text(), \"JOB TYPE\")]/following-sibling::p[1]/text()'\n )\n loader.add_xpath(\n 'baseSalary',\n '//ul/li/h11[contains(text(), \"SALARY\")]/following-sibling::p[1]/text()'\n )\n loader.add_xpath(\n 'experiencerequirements',\n '//ul/li/h11[contains(text(), \"EXPERIENCE\")]/following-sibling::p[1]/text()'\n )\n loader.add_value('apply_url', response.url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/the_execu_search.py","file_name":"the_execu_search.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"457638818","text":"from .utils import (\n create_folder,\n set_name,\n unique_everseen,\n is_valid_url,\n get_html,\n get_elements,\n get_page_links,\n get_image_links,\n get_imagebam_htmlcode_links\n)\nimport os\nimport re\nimport sys\nimport urllib\n\n\ndef download_file(url, name, dest=\".\", number=1):\n print(\" {0}) In: {1}\".format(number, url))\n filepath = os.path.join(create_folder(dest), name)\n try:\n res = urllib.request.urlopen(url)\n pic = res.read()\n with open(filepath, 'wb') as f:\n f.write(pic)\n except:\n print(\" !!!! FAIL:\", url)\n print(\" Out: {}\\n\".format(filepath))\n\n\ndef download_album(host, url, name, dest=\".\", delim=\" - \", digits=3, number=1):\n if not is_valid_url(url):\n sys.exit(1)\n\n host = host.lower()\n name = name.lower()\n\n if host == \"imagebam\":\n imagebam(url, name, dest, delim, digits, number)\n elif host == \"imagevenue\":\n imagevenue(url, name, dest, delim, digits, number)\n elif host == \"imgbox\":\n imgbox(url, name, dest, delim, digits, number)\n elif host == \"imgur\":\n imgur(url, name, dest, delim, digits, number)\n else:\n print(\"ERROR: Unsupported image host '{}'\".format(host))\n\n\ndef imagebam(url, name, dest, delim, digits, number):\n print(\"Downloading images from [imagebam]...\\n\")\n\n # gallery page numbers (ascending)\n pages = [int(el.contents[0])\n for el in get_elements(url, \"a.pagination_link\")]\n\n if len(pages) > 0:\n # multi-page gallery\n links = get_imagebam_htmlcode_links(url, pages[-1])\n else:\n # single-page gallery\n links = get_page_links(url, lambda x: \"imagebam.com\" in x)\n\n # remove any duplicate links\n links = list(unique_everseen(links))\n\n # duplicate first link to deal with \"Continue to your image\"\n if len(links) > 0:\n links.insert(0, links[0])\n\n indexes = list(range(len(links)))\n filenumbers = list(range(number, number + len(links)))\n\n regex = re.compile(r'\\.[a-zA-Z]*$', re.IGNORECASE)\n\n from robobrowser import RoboBrowser\n browser = RoboBrowser(parser=\"html.parser\")\n\n from concurrent.futures import ThreadPoolExecutor\n\n def download_helper(index, filenumber):\n browser.open(links[index])\n tags = browser.select('meta')\n image_url = [\n el['content']\n for el in tags if 'property' in el.attrs and el.attrs[\n 'property'] == 'og:image'\n ][0]\n match = regex.search(image_url)\n ext = \".jpg\" if match is None else match.group(0)\n filename = set_name(name, ext, delim, filenumber, digits)\n download_file(image_url, filename, dest, filenumber)\n\n with ThreadPoolExecutor(max_workers=20) as executor:\n executor.map(download_helper, indexes, filenumbers)\n\n\ndef imgbox(url, name, dest, delim, digits, number):\n print(\"Downloading images from [imgbox]...\\n\")\n\n links = ['https://imgbox.com/' + el['href']\n for el in get_elements(url, '#gallery-view-content a')]\n\n indexes = list(range(len(links)))\n filenumbers = list(range(number, number + len(links)))\n\n regex = re.compile(r'(\\.[a-zA-Z]*)$', re.IGNORECASE)\n\n from concurrent.futures import ThreadPoolExecutor\n\n def download_helper(index, filenumber):\n image_url = [el['src'] for el in get_elements(links[index], '#img')][0]\n ext = regex.search(image_url).group(1)\n filename = set_name(name, ext, delim, filenumber, digits)\n download_file(image_url, filename, dest, filenumber)\n\n with ThreadPoolExecutor(max_workers=20) as executor:\n executor.map(download_helper, indexes, filenumbers)\n\n\ndef imagevenue(url, name, dest, delim, digits, number):\n print(\"Downloading images from [imagevenue]...\\n\")\n\n links = get_page_links(url, lambda x: \"imagevenue.com\" in x)\n\n regex_base_url = re.compile(r'.*imagevenue.com', re.IGNORECASE)\n regex_ext = re.compile(r'\\.[a-zA-Z]*$', re.IGNORECASE)\n\n for link in links:\n try:\n # source image (i.e. \"Open image in a new tab\")\n img = get_elements(link, \"img#thepic\")\n\n base_url_match = regex_base_url.search(link)\n if base_url_match and img is not []:\n src_url = img[0]['src']\n ext = regex_ext.search(src_url).group(0)\n new_name = set_name(name, ext, delim, number, digits)\n image_url = \"{0}/{1}\".format(base_url_match.group(0), src_url)\n download_file(image_url, new_name, dest, number)\n number += 1\n except:\n pass\n\n\ndef imgur(url, name, dest, delim, digits, number):\n print(\"Downloading images from [imgur]...\\n\")\n\n links = ['https:' + el['src']\n for el in get_elements(url, '.post-image-placeholder, .post-image img')]\n\n regex = re.compile(r'\\.com/\\w*(\\.[a-zA-Z]*)$', re.IGNORECASE)\n\n for image_url in links:\n try:\n ext = regex.search(image_url).group(1)\n new_name = set_name(name, ext, delim, number, digits)\n download_file(image_url, new_name, dest, number)\n number += 1\n except:\n pass\n","sub_path":"image_dl/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":5168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"155571763","text":"#! python3\n# flightScraper.py - loops through tailored json inputs for google flights api\n\n# COMMAND LINE INPUTS\n# 1: From Airport\n# 2: To Airport\n# 3: Number of Days from Today to start looking from\n# 4: Number of Days to search\n# 5: Number of Flights to return with each email\n# 5: Email Password\n\nimport requests\nimport datetime\nimport json\nimport sys\nimport pprint\nimport smtplib\nimport pymysql\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\n# init smtplib and smtpserver\nto = 'mackey.nichols@gmail.com'\ngmail_user = 'mackey.nichols@gmail.com'\ngmail_pwd = sys.argv[6]\nsmtpserver = smtplib.SMTP(\"smtp.gmail.com\",587)\nsmtpserver.ehlo()\nsmtpserver.starttls()\nsmtpserver.ehlo\nsmtpserver.login(gmail_user, gmail_pwd)\n\n# init url and api key\napiKey = \"AIzaSyColmjm796njJk4eoDp24ygH64xkeK0q0E\"\nurl = \"https://www.googleapis.com/qpxExpress/v1/trips/search?key=\"+apiKey\n\n# init vars for json\ntodayDate = datetime.date.today()\nnumTickets = 1\norigin = sys.argv[1]\ndestination = sys.argv[2]\ndaysFromToday = int(sys.argv[3])\ndaysToSearch = int(sys.argv[4])\nflightsToReturn = int(sys.argv[5])\n\nnumResponses = 50\n\nresponses = []\n\n# For each day from today + daysFromToday to today + daysFromToday + daysToSearch\nfor searchDate in [ datetime.date.today() + datetime.timedelta(days=daysFromToday+i) for i in range(daysToSearch) ]:\n # create input json\n inputJson = {\n \"request\": {\n \"passengers\": {\n \"kind\": \"qpxexpress#passengerCounts\",\n \"adultCount\": numTickets,\n \"infantInLapCount\": 0,\n \"infantInSeatCount\": 0,\n \"childCount\": 0,\n \"seniorCount\": 0\n },\n \"slice\": [\n {\n \"kind\": \"qpxexpress#sliceInput\",\n \"origin\": origin,\n \"destination\": destination,\n \"date\": searchDate.strftime(\"%Y-%m-%d\")\n }\n ],\n \n #\"maxPrice\": maxPrice,\n \"refundable\": \"false\",\n \"solutions\": numResponses\n }\n }\n\n # send json to google flights, parse response json\n r = json.loads(requests.post(url, json = inputJson).text)\n\n # for each response in this request, summarize the response and add it to responses array\n for i in range(len(r['trips']['tripOption'])):\n dateObject = datetime.datetime.strptime(r['trips']['tripOption'][i]['slice'][0]['segment'][0]['leg'][0]['departureTime'][:-6], \"%Y-%m-%dT%H:%M\")\n\n date = dateObject.strftime('%A, %B %d, %G at %H:%M')\n dayOfWeek = dateObject.strftime('%A')\n time = dateObject.strftime('%H:%M')\n \n thisFlight = {\n \"price\": r['trips']['tripOption'][i]['saleTotal'],\n \"flightNo\": r['trips']['tripOption'][i]['slice'][0]['segment'][0]['flight']['carrier']+r['trips']['tripOption'][i]['slice'][0]['segment'][0]['flight']['number'],\n \"departureFromDate\": date,\n \"departureFromDayofweek\": dayOfWeek,\n \"departureFromTime\": time\n }\n \n responses.append(thisFlight)\n\n \n# Sort reponses by price\ngoodResponses = sorted(responses, key = lambda k: float(k['price'][3:]) )\nbestResponses = [goodResponses[i] for i in range(flightsToReturn)]\n\n\n# Email top responses\nmsg = MIMEMultipart('alternative') \n\nsubject = origin+' to '+destination+' Report - Min price: '+bestResponses[0]['price']\nmsg['Subject'] = subject\nmsg['From'] = gmail_user\nmsg['To'] = to\n\nbody = '

Last Night\\'s '+origin+' -> '+destination+' Flight Report

\\n'\n\nfor response in bestResponses:\n body += ''+response['flightNo']+': '+ response['departureFromDate'] + ' costs '+response['price']+'
' \n\n#msg.attach( MIMEText(header, 'plain') )\nmsg.attach( MIMEText(body, 'html') )\n\n\nsmtpserver.sendmail(gmail_user, to, msg.as_string()) \nsmtpserver.close()\n\n# 3: Store all of the responses into some DB\nconnection = pymysql.connect(host='localhost',\n user='root',\n password='r2d2-C3PO',\n db='scrapes'\n )\n\nwith connection.cursor() as cursor:\n for response in bestResponses:\n sql = \"INSERT INTO `flights` (`price`, `flightNo`, `origin`, `destination`, `departureDatetime`) VALUES (%s, %s, %s, %s, %s)\"\n cursor.execute(sql, (response['price'], response['flightNo'], origin, destination, response['departureFromDate']) )\n connection.commit()\n\nconnection.close()\n","sub_path":"flightScraperLoop.py","file_name":"flightScraperLoop.py","file_ext":"py","file_size_in_byte":4437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"619428489","text":"#! /usr/bin/python3\r\n\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtCore import *\r\nimport sys\r\n\r\napp = QApplication([])\r\nsize = app.primaryScreen().size()\r\n\r\nwidth = 400\r\nheight = 400\r\n\r\nw1 = 0\r\nh1 = height-((height-(1/3*(height)))/4)\r\nw2 = width/4\r\nh2 = height-(2*(height-(1/3*(height)))/4)\r\nw3 = (2*(width/4))\r\nh3 = height-(3*(height-(1/3*(height)))/4)\r\nw4 = (3*(width/4))\r\nh4 = height-(4*(height-(1/3*(height)))/4)\r\nw = 0\r\nh = 0\r\ncon = True\r\nscreen = ''\r\n\r\nclass main(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n #global w\r\n #global h\r\n \r\n self.setGeometry(500, 250, width, height)\r\n self.setWindowTitle('calculator')\r\n\r\n self.text = QLineEdit(self)\r\n self.text.setReadOnly(True)\r\n self.text.setAlignment(Qt.AlignRight)\r\n self.text.resize(width, (1/3*(height)))\r\n self.text.move(0, 0)\r\n\r\n btn = ['0','1','2','3','4','5','6','7','8','9','.','=','+','/','x','-']\r\n for n in btn:\r\n if n=='0':\r\n w = w1\r\n h = h1\r\n command = self.num\r\n if n=='.':\r\n w = w2\r\n h = h1\r\n command = self.point\r\n if n=='=':\r\n w = w3\r\n h = h1\r\n command = self.Equal\r\n if n=='+':\r\n w = w4\r\n h = h1\r\n command = self.sym\r\n if n=='1':\r\n w = w1\r\n h = h2\r\n command = self.num\r\n if n=='2':\r\n w = w2\r\n h = h2\r\n command = self.num\r\n if n=='3':\r\n w = w3\r\n h = h2\r\n command = self.num\r\n if n=='-':\r\n w = w4\r\n h = h2\r\n command = self.sym\r\n if n=='4':\r\n w =w1\r\n h = h3\r\n command = self.num\r\n if n=='5':\r\n w = w2\r\n h = h3\r\n command = self.num\r\n if n=='6':\r\n w = w3\r\n h = h3\r\n command = self.num\r\n if n=='x':\r\n w = w4\r\n h = h3\r\n command = self.sym\r\n if n=='7':\r\n w = w1\r\n h = h4\r\n command = self.num\r\n if n =='8':\r\n w = w2\r\n h = h4\r\n command = self.num\r\n if n=='9':\r\n w = w3\r\n h = h4\r\n command = self.num\r\n if n=='/':\r\n w = w4\r\n h = h4\r\n command = self.sym\r\n \r\n button = QPushButton(n, self)\r\n button.resize(width/4, (height-(1/3*(height)))/4)\r\n button.move(w, h)\r\n button.clicked.connect(command)\r\n\r\n action1 = QAction('clear', self)\r\n action1.triggered.connect(self.clear)\r\n\r\n action2 = QAction('del', self)\r\n action2.triggered.connect(self.delete)\r\n \r\n cleared = self.addToolBar('clear')\r\n cleared.addAction(action1)\r\n\r\n deleted = self.addToolBar('del')\r\n deleted.addAction(action2)\r\n \r\n self.show()\r\n \r\n def num(self):\r\n global con\r\n global screen\r\n\r\n sender = self.sender()\r\n number = str(sender.text())\r\n\r\n if con==False:\r\n self.text.setText(number)\r\n con = True\r\n \r\n else:\r\n self.text.setText(self.text.text() + number)\r\n\r\n def sym(self):\r\n global con\r\n global screen\r\n\r\n sender = self.sender()\r\n symbol = str(sender.text())\r\n\r\n screen = (self.text.text())\r\n\r\n if len(screen.split())>=3:\r\n con = False\r\n\r\n elif con == True:\r\n self.text.setText(self.text.text() + ' ' + symbol + ' ')\r\n\r\n def point(self):\r\n global con\r\n if con==True:\r\n self.text.setText(self.text.text() + '.')\r\n\r\n def clear(self):\r\n self.text.clear()\r\n \r\n def delete(self):\r\n self.text.backspace()\r\n\r\n def Equal(self):\r\n global con\r\n global screen\r\n\r\n equal = (self.text.text()).split()\r\n\r\n if len(equal) >= 3:\r\n con = False\r\n\r\n if con == False:\r\n if equal[1] == '+':\r\n result = float(equal[0]) + float(equal[2])\r\n self.text.setText(str(result))\r\n\r\n elif equal[1] == '-':\r\n result = float(equal[0]) - float(equal[2])\r\n self.text.setText(str(result))\r\n\r\n elif equal[1] == 'x':\r\n result = float(equal[0]) * float(equal[2])\r\n self.text.setText(str(result))\r\n\r\n elif equal[1] == '/':\r\n result = float(equal[0]) / float(equal[2])\r\n self.text.setText(str(result))\r\n\r\n con = True\r\n\r\n \r\nif __name__ == '__main__':\r\n \r\n app.setStyleSheet('QPushButton {background-color:black; color:white; font-size: 30px} QPushButton:hover {background-color:#ddd} QLineEdit {font-size:30px;}')\r\n ex = main()\r\n app.exec_()\r\n","sub_path":"src/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":5270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"531127501","text":"from PIL import Image\nfrom keras.applications.vgg16 import preprocess_input\nimport base64\nfrom io import BytesIO\nimport json\nimport random\nimport cv2\nfrom keras.models import load_model\nimport numpy as np\n\nfrom keras.preprocessing import image\n\nmodel = load_model(\n \"C:/Summer Training/Face Recognition/facefeatures_new_model_5classes.h5\"\n)\n\nimg = cv2.imread(\"C:/Summer Training/Face Recognition/daddario.jpg\")\nface_cascade = cv2.CascadeClassifier(\n \"C:/Summer Training/Face Recognition/haarcascade_frontalface_default.xml\"\n)\ncv2.imshow(\"image\", img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\ndef face_extractor(img):\n # Function detects faces and returns the cropped face\n # If no face detected, it returns the input image\n\n # gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(img, 1.3, 5)\n\n if faces is ():\n return None\n\n # Crop all faces found\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 255), 2)\n cropped_face = img[y : y + h, x : x + w]\n\n return cropped_face\n\n\n# Without bounding box\ndef face_extractor2(img):\n faces = face_cascade.detectMultiScale(img, 1.1, 5)\n\n if faces is ():\n return None\n for (x, y, w, h) in faces:\n x = x - 10\n y = y - 10\n cropped_face = img[y : y + h + 50, x : x + w + 50]\n\n return cropped_face\n\n\nimport os\n\nc = 0\nvideo_capture = cv2.VideoCapture(0)\nwhile True:\n _, frame = video_capture.read()\n # canvas = detect(gray, frame)\n # image, face =face_detector(frame)\n\n face = face_extractor(frame)\n if type(face) is np.ndarray:\n face = cv2.resize(face, (224, 224))\n im = Image.fromarray(face, \"RGB\")\n\n img_array = np.array(im)\n # Our keras model used a 4D tensor, (images x height x width x channel)\n\n img_array = np.expand_dims(img_array, axis=0)\n pred = model.predict(img_array)\n print(pred)\n name = \"None matching\"\n\n if name == \"None matching\":\n c += 1\n\n if pred[0][0] > 0.7:\n name = \"Kshitij\"\n elif pred[0][1] > 0.7:\n name = \"Adriana\"\n elif pred[0][2] > 0.7:\n name = \"Alex Lawther\"\n elif pred[0][3] > 0.7:\n name = \"Alexandra Daddario\"\n elif pred[0][4] > 0.7:\n name = \"Alvaro Morte\"\n cv2.putText(frame, name, (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)\n else:\n cv2.putText(\n frame,\n \"No face found\",\n (50, 50),\n cv2.FONT_HERSHEY_COMPLEX,\n 1,\n (0, 255, 0),\n 2,\n )\n cv2.imshow(\"Video\", frame)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\nvideo_capture.release()\ncv2.destroyAllWindows()\n\n\nif c > 2:\n print(\"Do you want to add this to dataset??\")\n print(\"y/n\")\n x = input()\n if x == \"y\":\n print(\"enter a name\")\n folder_name = input()\n print(type(folder_name))\n path_train = os.path.join(\n \"C:/Summer Training/Face Recognition/Datasets/Train/\", folder_name\n )\n path_test = os.path.join(\n \"C:/Summer Training/Face Recognition//Datasets/Test/\", folder_name\n )\n if not os.path.exists(path_train):\n os.mkdir(path_train)\n if not os.path.exists(path_test):\n os.mkdir(path_test)\n else:\n print(\"Folder already exists\")\n\n vid = cv2.VideoCapture(0)\n count = 0\n while True:\n ret, frame = vid.read()\n if face_extractor2(frame) is not None:\n count += 1\n face = cv2.resize(face_extractor2(frame), (400, 400))\n file_name_path = path_train + \"/\" + folder_name + str(count) + \".jpg\"\n if count > 70:\n file_name_path = path_test + \"/\" + folder_name + str(count) + \".jpg\"\n cv2.imwrite(file_name_path, face)\n cv2.putText(\n face,\n str(count),\n (50, 50),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1,\n (0, 0, 0),\n 2,\n )\n cv2.imshow(\"Face Cropper\", face)\n\n else:\n print(\"Face not found\")\n pass\n if cv2.waitKey(1) == 13 or count == 100: # 13 is the Enter Key\n break\n vid.release()\n cv2.destroyAllWindows()\n print(\"DONE but you still gotta train the model again!\")\n\n else:\n print(\"Thank You\")\nelse:\n print(\"Try running again\")\n\n\n\"\"\"\nname = \"None Matching\"\n\nface = face_extractor(img)\ncv2.imshow(\"face\", face)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\nif type(face) is np.ndarray:\n face = cv2.resize(face, (224, 224))\n im = Image.fromarray(face, \"RGB\")\n img_array = np.array(im)\n\n img_array = np.expand_dims(img_array, axis=0)\n\n pred = model.predict(img_array)\n print(\"Pred\", pred)\n\n if pred[0][2] > 0.5:\n name = \"Daddario\"\n print(name)\n\n\n\n\n vid = cv2.VideoCapture(0)\n count = 0\n\n while True:\n ret, frame = cap.read()\n \n if cv2.waitKey(1) == 13 or count == 100: # 13 is the Enter Key\n break\n cap.release()\n cv2.destroyAllWindows()\n print(\"DONE\")\n\n else:\n print(\"Thank You\")\n\"\"\"\n","sub_path":"face_frontend.py","file_name":"face_frontend.py","file_ext":"py","file_size_in_byte":5408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"400090196","text":"from backend.helpers.flask_helper import convert\nfrom backend.models import quiz\nfrom backend.models.user import User\nfrom backend.serializers import quiz_serializer\n\ndef create_quiz_services(request_data):\n params = convert(request_data)\n quiz_obj = quiz.Quiz(params)\n if 'questions' in params:\n questions = [quiz.Question(param) for param in params]\n else:\n questions = quiz.Question.query.filter_by(quiz_id=quiz_obj.id).all()\n if questions: quiz_obj.questions = questions\n if 'admin' in params:\n admin = User(params['admin'])\n else:\n admin = User.query.filter_by(id=quiz_obj.created_by).first()\n if admin: quiz_obj.admin = admin\n response = quiz_obj.create_or_update()\n if response: return response.__dict__\n\ndef get_all():\n data = quiz.Quiz.query.all()\n return quiz_serializer.quiz_schema.dump(data)","sub_path":"backend/services/quiz_services.py","file_name":"quiz_services.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"302349993","text":"\"\"\"\nAuthor: Vy Thai\nDefault Final Project for CS224N.\n\nDescription: This file is to perform back-translation for a given json file. There are multiple options: different translation models, and different generation methods such as greddy search or top-k or beam. The output will be similar to input but the new asnwer key is not found yet. Instead, there is additional field for each datapoint to contains the sentence that should contain the answer key in the translated context. Please run post_process.py to clean up and finalize the BT dataset.\n\nContact me at vythai @ stanford . edu for more information.\n\"\"\"\n\nfrom transformers import AutoTokenizer, AutoModelForSeq2SeqLM\nfrom transformers import MarianMTModel, MarianTokenizer\nfrom google_trans_new import google_translator\nimport requests\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\n\nimport Levenshtein\nimport nlpaug.augmenter.char as nac\nimport nlpaug.augmenter.word as naw\nimport nlpaug.augmenter.sentence as nas\nimport nlpaug.flow as nafc\nimport numpy as np\nfrom nlpaug.util import Action\naug_swap = naw.RandomWordAug(action=\"swap\", aug_p = 0.1)\naug_delete = naw.RandomWordAug(action=\"delete\", aug_p = 0.1)\n\n\nfrom itertools import zip_longest\n\nfrom random import randrange\n\nimport spacy\nfrom itertools import zip_longest\n\n#nlp = spacy.load(\"en_core_web_sm\")\nfrom spacy.lang.en import English\n\nnlp = English()\nnlp.add_pipe('sentencizer')\n\nimport torch\nimport os\nimport json\nimport argparse\nimport sys\nimport math\nimport random\n\nimport uuid\n\nnum_sentences = 0\ndistance = 0\n\n#TODO\napi_key = \"Add your API Key for Google API here\"\n\n\ntorch_device = 'cuda' if torch.cuda.is_available() else 'cpu'\nprint(torch_device)\n\n\n\"\"\"\ntarget_model_name = 'Helsinki-NLP/opus-mt-en-ru'\ntarget_tokenizer = MarianTokenizer.from_pretrained(target_model_name)\ntarget_model = MarianMTModel.from_pretrained(target_model_name)\ntarget_model.to(torch_device)\n\n\nen_model_name = 'Helsinki-NLP/opus-mt-ru-en'\nen_tokenizer = MarianTokenizer.from_pretrained(en_model_name)\nen_model = MarianMTModel.from_pretrained(en_model_name)\nen_model.to(torch_device)\n\"\"\"\n\nRU_model_name = \"facebook/wmt19-en-ru\"\nRU_tokenizer = FSMTTokenizer.from_pretrained(RU_model_name)\nRU_model = FSMTForConditionalGeneration.from_pretrained(RU_model_name)\nRU_model.to(torch_device)\n\nRUEN_model_name = \"facebook/wmt19-ru-en\"\nRUEN_tokenizer = FSMTTokenizer.from_pretrained(RUEN_model_name)\nRUEN_model = FSMTForConditionalGeneration.from_pretrained(RUEN_model_name)\nRUEN_model.to(torch_device)\n\nDE_model_name = \"facebook/wmt19-en-de\"\nDE_tokenizer = FSMTTokenizer.from_pretrained(DE_model_name)\nDE_model = FSMTForConditionalGeneration.from_pretrained(DE_model_name)\nDE_model.to(torch_device)\n\nDEEN_model_name = \"facebook/wmt19-de-en\"\nDEEN_tokenizer = FSMTTokenizer.from_pretrained(DEEN_model_name)\nDEEN_model = FSMTForConditionalGeneration.from_pretrained(DEEN_model_name)\nDEEN_model.to(torch_device)\n\n#CHAIN MODEL FOR DOUBLE BACK TRANSLATION\n#chain_model_name = 'Helsinki-NLP/opus-mt-de-nl'\n#chain_tokenizer = MarianTokenizer.from_pretrained(chain_model_name)\n#chain_model = MarianMTModel.from_pretrained(chain_model_name)\n#chain_model.to(torch_device)\n\ntranslator = google_translator()\n\n#This function is to perform translation for a given language.\ndef translate(src_texts, model, tokenizer):\n\n\n input_ids = [ tokenizer.encode(input, return_tensors=\"pt\").to(torch_device) for input in src_texts ]\n\n #UNCOMMENT THE FOLLOWING LINE FOR TOP_K SAMPLING\n #outputs = [model.generate(\n # inp, do_sample=True, max_length=70, top_k=50, temperature=1.7) for inp in input_ids]\n\n # This is beam search\n outputs = [model.generate(\n inp,\n max_length=70,\n num_beams=5,\n early_stopping=True) for inp in input_ids]\n\n\n decoded = [tokenizer.decode(output[0], skip_special_tokens=True) for output in outputs]\n\n # If you are using Helsinki model, you can perform batch for a faster implementation, uncomment these following lines.\n\n #encoded = tokenizer.prepare_seq2seq_batch(src_texts,return_tensors='pt').to(torch_device)\n # Generate translation using model\n #translated = model.generate(**encoded)\n # Convert the generated tokens indices back into text\n #translated_texts = tokenizer.batch_decode(translated, skip_special_tokens=True)\n\n return decoded\n\n#This function is to perform backtranslation\ndef back_translate_fairseq(texts, check_list):\n global distance\n global num_sentences\n\n #data augmentation, turn this off to prevent adding random swapping an deletion\n texts = [ aug_swap.augment(text) if np.random.choice(np.arange(0, 2), p=[0.5, 0.5]) else text for text in texts]\n texts = [ aug_delete.augment(text) if np.random.choice(np.arange(0, 2), p=[0.5, 0.5]) else text for text in texts]\n\n #Change the model name accordingly to the model you want to perform BT\n forward_texts = translate(texts, target_model, target_tokenizer)\n\n #This line is for chained BT\n #chain_texts = translate(forward_texts, chain_model, chain_tokenizer)\n\n # Translate from target language back to source language\n back_translated_texts = translate(forward_texts, en_model, en_tokenizer)\n dist = sum([ Levenshtein.distance(texts[i], back_translated_texts[i])/max(len(texts[i]), len(back_translated_texts[i])) for i in range(len(texts)) if check_list[i]])\n\n distance = distance + dist\n num_sentences = num_sentences + sum(check_list)\n\n #Levenshtein distance calculation\n print(distance/num_sentences)\n\n return back_translated_texts\n #return chain_texts\n\ndef google_translate(texts, check_list):\n\n imme_lang = random.choice(['fr'])\n\n global distance\n global num_sentences\n\n\n r = requests.get(\n \"https://translation.googleapis.com/language/translate/v2\",\n params = {\n \"key\": api_key,\n \"q\": texts,\n \"target\": imme_lang,\n \"alt\":\"json\",\n \"source\":\"en\",\n \"format\": \"text\"\n }\n )\n r = r.json()\n new_text = [ sentence['translatedText'] for sentence in r['data']['translations']]\n\n new_r = requests.get(\n \"https://translation.googleapis.com/language/translate/v2\",\n params = {\n \"key\": api_key,\n \"q\": new_text,\n \"target\": \"en\",\n \"alt\":\"json\",\n \"source\": imme_lang,\n \"format\": \"text\"\n }\n )\n\n new_r = new_r.json()\n\n back_translated_texts = [ sentence['translatedText'] for sentence in new_r['data']['translations']]\n dist = sum([ Levenshtein.distance(texts[i], back_translated_texts[i])/max(len(texts[i]), len(back_translated_texts[i])) for i in range(len(texts)) if check_list[i]])\n\n distance = distance + dist\n num_sentences = num_sentences + sum(check_list)\n\n print(distance/num_sentences)\n\n return back_translated_texts\n\ndef partition(list_in, n):\n random.shuffle(list_in)\n return [(list_in[i::n]) for i in range(n)]\n\ndef backtranslation(texts):\n tup = [(i,texts[i]) for i in range(len(texts))]\n\n list_dict = partition(tup, 2)\n dict_1 = list_dict[0]\n dict_2 = list_dict[1]\n\n key1 = [ i[0] for i in dict_1]\n value1 = [ i[1] for i in dict_1]\n\n key2 = [ i[0] for i in dict_2]\n value2 = [ i[1] for i in dict_2]\n\n tran_1 = back_translate_fairseq(value1)\n tran_2 = google_translate(value2)\n\n dict_1 = { key1[i]:tran_1[i] for i in range(len(key1))}\n\n dict_2 = { key2[i]:tran_2[i] for i in range(len(key2))}\n\n mydict = {**dict_1, **dict_2}\n\n translated = [ mydict[key] for key in sorted(mydict.keys())]\n return translated\n\ndef calculate_percentage(in_file, length):\n in_file = os.path.join(in_file + '.json')\n\n with open(in_file) as f:\n data = json.load(f)['data']\n data = data[:length]\n # output = []\n num = 0\n for index, article in enumerate(data):\n for paragraph in article['paragraphs']:\n for qa in paragraph[\"qas\"]:\n num+=1\n\n print(\"This dataset has \", num)\n print(\"This dataset has % \", num/50000)\n\ndef check_size(in_file):\n in_file = os.path.join(in_file + '.json')\n\n with open(in_file) as f:\n data = json.load(f)['data']\n\n # output = []\n num = 0\n total = 0\n for index, article in enumerate(data):\n for paragraph in article['paragraphs']:\n total = total + len(paragraph['qas'])\n if (paragraph[\"qas\"] == []):\n num+=1\n\n print(\"This dataset has \", num, \" and total is: \", total)\n\ndef split_dataset(dir, start, length):\n in_path = os.path.join( dir + '.json')\n out_path = os.path.join( dir + '_split_' + str(start) + '_' + str(start+ length) + '.json')\n\n with open(in_path) as f:\n data = json.load(f)['data']\n\n print(\"This dataset has: \", len(data), \" titles.\")\n output = data[start:start+length]\n\n output_json = {\n \"data\": output\n }\n with open(out_path, 'w') as fp:\n json.dump(output_json, fp)\n\ndef sentence_separate(text):\n #doc = doc.rstrip()\n \"\"\"\n doc = nlp(doc)\n doc1 = [sent.text[:512] for sent in doc.sents]\n test_list = list(filter(None, doc1))\n return test_list\n \"\"\"\n doc = nlp(text)\n return [str(sent).strip() for sent in doc.sents]\n\n\ndef grouper(iterable, n, fillvalue=None):\n \"Collect data into fixed-length chunks or blocks\"\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\"\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\ndef grouping_multiple(tran_qa, grouping, answers_positions):\n\n test = [ tran_qa[x[0]:x[0]+x[1]] for x in grouping]\n group_by_2 = grouper(test, 2)\n new_group_qas = [ [ x[0][0], x[1], answers_positions[i] ] for i,x in enumerate(group_by_2)]\n\n return new_group_qas\n\n#This function is to find the sentence that contains the original answer\ndef find_sent_position(context, answer_pos, test, test_file):\n orig_answer_pos = answer_pos\n\n for index, sen in enumerate(context):\n answer_pos -= len(sen) + 1\n if test == True:\n test_file.write( str(answer_pos) + \" | \" + str(orig_answer_pos) + \"\\n\")\n if(answer_pos < 0):\n return index\n test_file.write(\"Erroe!\\n\")\n return 0\n\n#Check two json files if they have the same number of datapoints or not\ndef check_dataset_clean(dir_orig, dir_tran):\n \"\"\"Load json file and store fields separately.\"\"\"\n orig_file = os.path.join(dir_orig + '.json')\n tran_file = os.path.join(dir_tran + '.json')\n\n \"\"\"\n out_file = os.path.join( dir_orig + '_BT' + '.json')\n\n if os.path.exists(out_file):\n append_write = 'a' # append if already exists\n else:\n append_write = 'w' # make a new file if not\n out_file = open(out_file, append_write)\n \"\"\"\n with open(orig_file) as f1:\n data_orig = json.load(f1)['data']\n\n with open(tran_file) as f2:\n data_tran = json.load(f2)['data']\n\n count = 0\n # output = []\n print(\"Size of orig: \", len(data_orig))\n print(\"Size of tran: \", len(data_tran))\n\n if(len(data_tran)!= len(data_orig)):\n print(\"they have different size!\")\n for index_ar, article_tran in enumerate(data_tran):\n article_orig = data_orig[index_ar]\n\n #cheeck if title is the same\n if(article_orig[\"title\"]!=article_tran[\"title\"]):\n print(\"Title number \", index_ar, \" not the same!Pls check!\")\n\n if(len(article_orig[\"paragraphs\"]) != len(article_tran[\"paragraphs\"])):\n print(\"Title number \", index_ar, \" does not have the same # of prams\")\n\n for index_pa, paragraph_tran in enumerate(article_tran['paragraphs']):\n paragraph_orig = article_orig['paragraphs'][index_pa]\n # random sampling for questions\n #check if they have the same number of paragraphs\n if(len(paragraph_orig[\"qas\"]) != len(paragraph_tran[\"qas\"])):\n print(\"Title number \", index_ar, \" does not have the same # of prams\")\n\n # check if they have the same number of paragraphs\n for index_qa, qa_orig in enumerate(paragraph_orig['qas']):\n\n qa_tran = paragraph_tran['qas'][index_qa]\n\n #if(qa_orig['answers'][0]['sentence'] != qa_tran['answers'][0]['sentence']):\n # count+= 1\n\n print(\"Everything should be fine now!\")\n print(\"Different sentences is: \", count)\n\n#This function is to remove all the empty datapoint\ndef remove_empty(dir_in):\n in_file = os.path.join(dir_in + '.json')\n out_file = os.path.join(dir_in + '_cleaned' + '.json')\n\n if os.path.exists(out_file):\n append_write = 'a' # append if already exists\n else:\n append_write = 'w' # make a new file if not\n\n out_file = open(out_file, append_write)\n\n with open(in_file) as f:\n data = json.load(f)['data']\n\n total = 0\n data_copy = {\"data\": [] }\n num = 0\n for index, article in enumerate(data):\n paragraph_json = []\n\n for paragraph in article['paragraphs']:\n if paragraph['qas'] == []:\n continue\n paragraph_json.append(paragraph)\n for qa in paragraph[\"qas\"]:\n total += 1\n\n if paragraph_json ==[]:\n continue\n new_article_json = {\"title\" : article[\"title\"], \"paragraphs\": paragraph_json}\n data_copy[\"data\"].append(new_article_json)\n\n\n new_data = json.dumps(data_copy)\n out_file.write(new_data)\n print(total)\n\n\n\"\"\"\nMAIN FUNCTION TO PERFORM BT\n\n\"\"\"\ndef load_dataset(dir):\n \"\"\"Load json file and store fields separately.\"\"\"\n test_file = os.path.join( 'test.txt')\n test_file = open(test_file, 'a')\n\n in_file = os.path.join(dir + '.json')\n out_file = os.path.join( dir + '_helsinki_beam' + '.json')\n\n if os.path.exists(out_file):\n append_write = 'a' # append if already exists\n else:\n append_write = 'w' # make a new file if not\n out_file = open(out_file, append_write)\n\n with open(in_file) as f:\n data = json.load(f)['data']\n\n #output = []\n total = 0\n for index, article in enumerate(data):\n paragraph_json = []\n\n for paragraph in article['paragraphs']:\n if paragraph['qas'] == []:\n continue\n sent = sentence_separate(paragraph['context'])\n request = sent.copy()\n len_context = len(sent)\n\n answers_positions = []\n #output['contexts'].append(paragraph['context'])\n grouping = []\n check_list = [ True for req in request]\n temp = 0\n for qa in paragraph['qas']:\n\n\n total += 1\n request.append(qa['question'][:512])\n grouping.append((temp, 1))\n temp = temp + 1\n\n check_list.append(True)\n request.extend([ ans['text'][:512] for i,ans in enumerate(qa['answers']) ])\n check_list.extend([ False for i in range(len(qa['answers']))])\n grouping.append((temp, len(qa['answers'])))\n temp = temp + len(qa['answers'])\n #grouping.append(len(qa['answers']))\n #request.append(qa['answers'][0]['text'][:512])\n test = False\n if(article['title'] == \"Brink (Sir Cedric Hardwicke) has recently taken Pu\"):\n test = True\n answers_positions.append([find_sent_position(sent, ans['answer_start'],test, test_file ) for i,ans in enumerate(qa['answers']) ])\n #answer_pos = qa['answers'][0]['answer_start']\n #answer_positions.append(find_sent_position(sent, answer_pos))\n len_req = len(request)\n\n if(article['title'] == \"Brink (Sir Cedric Hardwicke) has recently taken Pu\"):\n doc = nlp(paragraph['context'])\n l = [str(sent).strip() for sent in doc.sents]\n print([ [i, len(i)] for i in l])\n print(answers_positions)\n\n #lower the batch for GPU\n if len_req > 30:\n num_lem = math.ceil(len_req/(math.ceil(len_req/30)))\n request_batch = [request[i:i+num_lem] for i in range(0, len_req, num_lem)]\n translate_batch = [back_translate_fairseq(req, check_list) for req in request_batch]\n translate_result = [j for i in translate_batch for j in i]\n #output['qids'].append(qa['id'])\n #output['questions'].append(qa['question'])\n #output['qid2cid'].append(len(output['contexts']) - 1)\n else:\n translate_result = back_translate_fairseq(request, check_list)\n\n\n tran_context_orig = translate_result[:len_context]\n tran_qa = translate_result[len_context:]\n tran_context = ' '.join(tran_context_orig)\n\n qas = grouper(tran_qa, 2, fillvalue=None)\n\n new_group_qas = grouping_multiple(tran_qa, grouping, answers_positions)\n\n qas_json = [{\"question\": x[0], \"id\": str(uuid.uuid4().hex), \"answers\": [{\"answer_start\": 0, \"text\": y, \"sentence\": tran_context_orig[x[2][i]]} for i,y in enumerate(x[1])]} for x in new_group_qas]\n\n para_json = {\n \"context\": tran_context,\n \"qas\": qas_json\n }\n paragraph_json.append(para_json)\n\n if (paragraph_json == []):\n continue\n new_article_json = {\n \"title\": article['title'],\n \"paragraphs\": paragraph_json,\n }\n new_article = json.dumps(new_article_json)\n out_file.write(',' + new_article)\n print(\"finished writing \", index)\n\n print(\"Translated totally of \", total, \" pairs.\")\n out_file.close()\n global num_sentences\n global distance\n print(\"Average distance is: \" + str(distance/num_sentences))\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('mode', type=str, help='Path to SQuAD data directory')\nparser.add_argument('--BT_dir_in', type=str, help='Path to SQuAD data directory')\nparser.add_argument('--NMT', type=str, help='Path to SQuAD data directory')\n\nparser.add_argument('--split_in', type=str, help='Path to SQuAD data directory')\n\nparser.add_argument('--start', type=int, help='Path to SQuAD data directory')\nparser.add_argument('--length', type=int, help='Path to SQuAD data directory')\n\nargs = parser.parse_args()\n\nif(args.mode == \"split\"):\n split_dataset(args.split_in, args.start, args.length)\nelif(args.mode == \"BT\"):\n print('Loading dataset %s', file=sys.stderr)\n dataset = load_dataset(args.BT_dir_in)\nelif(args.mode == \"check_size\"):\n print('Loading dataset %s', file=sys.stderr)\n dataset = check_size(args.split_in)\nelif(args.mode == \"percentage\"):\n calculate_percentage(args.split_in, args.length)\nelif(args.mode == \"check_position\"):\n check_wrong_position(args.split_in, args.BT_dir_in)\nelif(args.mode == \"remove_empty\"):\n remove_empty(args.split_in)\nelif(args.mode == \"check_consistent\"):\n check_dataset_clean(args.split_in, args.BT_dir_in)\n","sub_path":"back_translation/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":18997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"118618843","text":"import cv2 as cv\nimport numpy as np\n\ndef main():\n img = cv.imread('./image/shape.png')\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n gray = np.float32(gray)\n\n dst = cv.cornerHarris(gray, 20, 3, 0.05)\n dst = cv.dilate(dst, None)\n\n img[dst>0.01*dst.max()]=[255,0,255]\n\n cv.imshow('img', img)\n cv.waitKey(0)\n cv.destroyWindow('img')\n\nif __name__ == \"__main__\":\n main()","sub_path":"openCV/chapter07/Harris角点检测算法.py","file_name":"Harris角点检测算法.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"21435975","text":"from ..db.channel.base import Channel\nfrom ..db.channel_stats import ChannelStats, get_levels\nfrom ..db.channel_hot_topics import ChannelHotTopics\nimport base\n\n\nclass PostCase(base.MainCase):\n 'Test multi-channel scenarios'\n\n def setUp(self):\n super(PostCase, self).setUp()\n\n self.permissive_channel = Channel.objects.create_by_user(\n self.user, title='Permissive',\n type='twitter', intention_types=base.SA_TYPES)\n\n self.restrictive_channel = Channel.objects.create_by_user(\n self.user, title='Restrictive',\n type='twitter', intention_types=[\"Asks for Something\"])\n\n self.channel = Channel.objects.create_by_user(\n self.user, title='TestChannel',\n type='twitter', intention_types=base.SA_TYPES)\n\n self.channels = [self.channel,\n self.permissive_channel,\n self.restrictive_channel]\n\n self.channel.add_perm(self.user)\n\n def test_simple_post_creation(self):\n content = 'I need a bike. I like honda.'\n post = self._create_db_post(\n channels=self.channels,\n content=content)\n\n for channel in [self.channel,\n self.permissive_channel]:\n for stats in get_levels(self.channel):\n stats.reload()\n self.assertEqual(\n stats.number_of_posts, 1)\n self.assertEqual(\n stats.feature_counts['2'], 1)\n self.assertEqual(\n stats.feature_counts['4'], 1)\n\n self.assertEqual(ChannelHotTopics.objects(\n hashed_parents=[],\n topic='bike',\n channel_num=channel.counter)[0].filter(intention=0, is_leaf=True)[0].topic_count, #\"all intentions\".\"leaf\"\n 1)\n\n def test_permissive(self):\n 'ALL Channels Fit'\n content = 'Where can I find a good bike?'\n post = self._create_db_post(\n channels=self.channels,\n content=content)\n\n self.assertEqual(ChannelStats.objects.count(), 3*3)\n self.assertEqual(ChannelHotTopics.objects(hashed_parents=[],\n topic='bike').count(), 3*2)\n\n","sub_path":"tests/test_multi_channel.py","file_name":"test_multi_channel.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"345188331","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nEfetua a operacao de potenciacao, considerando\nexpoente positivo.\n\n@author: Prof. Diogo SM\n\"\"\"\n\ndef potencia(b, e):\n p = 1\n \n for i in range(e):\n p = p * b\n \n return p\n\na = potencia(2, 3)\nb = potencia(4, a)\nc = potencia(4, potencia(potencia(2,1), 3))\n\nprint(f\"2**3 = {a}\")\nprint(f\"4**{a} = {b}\")\nprint(f\"4**{a} = {c}\")\n","sub_path":"aula13-funcoes-i/potencia.py","file_name":"potencia.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"482123687","text":"import tensorflow as tf\nimport numpy as np\nimport math\nimport sys\nimport os\nimport random\nimport pdb\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '../utils'))\nimport tf_util\nfrom transform_nets import input_transform_net, feature_transform_net\n\ndef placeholder_inputs(batch_size, num_point):\n pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))\n labels_pl = tf.placeholder(tf.int32, shape=(batch_size))\n return pointclouds_pl, labels_pl\n\n\ndef get_model(point_cloud, is_training, bn_decay=None):\n \"\"\" Classification PointNet, input is BxNx3, output Bx40 \"\"\"\n batch_size = point_cloud.get_shape()[0].value\n num_point = point_cloud.get_shape()[1].value\n end_points = {}\n netStructure=[]\n numberOfTransforms = 0\n convNumber=1\n maxpoolNumber=0\n fcNumber=0\n dropoutNumber=0\n numberOfTransforms = 0\n\n net = point_cloud\n\n if random.randint(1,2) == 1:\n layer=[\"expand\",0,0]\n netStructure.append(layer)\n while True:\n i=random.randint(1,3)\n if i == 1:\n convNumber +=1\n layer=[\"conv2d\",math.pow(2,random.randint(4,10)),convNumber]\n netStructure.append(layer)\n elif i == 2:\n maxpoolNumber += 1\n layer=[\"maxpool\",0,maxpoolNumber]\n netStructure.append(layer)\n elif i == 3:\n layer=[\"transform\",0,1]\n netStructure.append(layer)\n break\n else:\n pass\n else:\n layer=[\"transform\",0,1]\n netStructure.append(layer)\n if random.randint(1,2) == 1:\n convNumber +=1\n layer=[\"conv2d_trans1\",math.pow(2,random.randint(4,10)),convNumber]\n netStructure.append(layer)\n while True:\n i=random.randint(1,3)\n if i == 1:\n convNumber +=1\n layer=[\"conv2d\",math.pow(2,random.randint(4,10)),convNumber]\n netStructure.append(layer)\n elif i == 2:\n maxpoolNumber += 1\n layer=[\"maxpool\",0,maxpoolNumber]\n netStructure.append(layer)\n elif i == 3:\n convNumber +=1\n layer=[\"conv2d\",64,convNumber]\n netStructure.append(layer)\n layer=[\"transform\",0,2]\n netStructure.append(layer)\n convNumber +=1\n layer=[\"conv2d\",math.pow(2,random.randint(4,10)),convNumber]\n netStructure.append(layer)\n break\n else:\n pass\n else:\n convNumber +=1\n layer=[\"conv2d\",64,convNumber]\n netStructure.append(layer)\n layer=[\"transform\",0,2]\n netStructure.append(layer)\n convNumber +=1\n layer=[\"conv2d_trans2\",math.pow(2,random.randint(4,10)),convNumber]\n netStructure.append(layer)\n\n while True:\n i=random.randint(1,3)\n if i == 1:\n convNumber +=1\n layer=[\"conv2d\",math.pow(2,random.randint(4,10)),convNumber]\n netStructure.append(layer)\n elif i == 2:\n maxpoolNumber += 1\n layer=[\"maxpool\",0,maxpoolNumber]\n netStructure.append(layer)\n elif i == 3:\n fcNumber += 1\n layer=[\"fc\",math.pow(2,random.randint(4,10)),fcNumber]\n netStructure.append(layer)\n break\n else:\n pass\n while True:\n i=random.randint(1,3)\n if i==1:\n fcNumber += 1\n layer=[\"fc\",math.pow(2,random.randint(4,10)),fcNumber]\n netStructure.append(layer)\n elif i==2:\n dropoutNumber += 1\n layer=[\"dropout\",0,dropoutNumber]\n netStructure.append(layer)\n fcNumber += 1\n layer=[\"fc\",math.pow(2,random.randint(4,10)),fcNumber]\n netStructure.append(layer)\n elif i==3:\n fcNumber += 1\n layer=[\"fc\",4,fcNumber]\n netStructure.append(layer)\n break\n else:\n pass\n print(netStructure)\n netStructure = [[\"transform\",0,1],[\"conv2d\",64,1],[\"transform\",0,2],[\"conv2d\",32,2],[\"maxpool\",0,1],[\"fc\",64,1],[\"dropout\",0,1],[\"fc\",4,2]]\n for layer in netStructure:\n\n print(layer)\n if layer[0] == \"conv2d\":\n print(\"conv\")\n if layer[2] == 1:\n net = tf_util.conv2d(input_image,layer[1],[1,3],padding='VALID',stride=[1,1],\n bn=True, is_training=is_training, scope='conv%d'%(layer[2]), bn_decay=bn_decay)\n else:\n net = tf_util.conv2d(net, layer[1], [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv%d'%(layer[2]), bn_decay=bn_decay)\n print(layer[:])\n elif layer[0] == \"maxpool\":\n net = tf_util.max_pool2d(net, [num_point,1],padding='VALID', scope='maxpool%d'%(layer[2]))\n print(layer[:])\n elif layer[0] == \"transform\":\n if layer[2]==1:\n with tf.variable_scope('transform_net%d' %(layer[2])) as sc:\n transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)\n point_cloud_transformed = tf.matmul(point_cloud, transform)\n input_image = tf.expand_dims(point_cloud_transformed, -1)\n pdb.set_trace()\n else:\n with tf.variable_scope('transform_net%d' %(layer[2])) as sc:\n transform = feature_transform_net(net, is_training, bn_decay, K=64)\n end_points['transform'] = transform\n net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)\n net_transformed = tf.expand_dims(net_transformed, [2])\n pdb.set_trace()\n\n\n elif layer[0] == \"fc\":\n if layer [2] == 1:\n pdb.set_trace()\n net = tf.reshape(net, [batch_size, -1])\n net = tf_util.fully_connected(net, layer[1], bn=True, is_training=is_training, scope='fc%d'%(layer[2]), bn_decay = bn_decay)\n\n elif layer[1] == 4:\n net = tf_util.fully_connected(net, 4, activation_fn=None, scope='fc%d'%(layer[2]))\n else:\n net = tf_util.fully_connected(net, layer[1], bn=True, is_training=is_training,scope='fc%d'%(layer[2]), bn_decay=bn_decay)\n print(layer[:])\n elif layer[0] == \"dropout\":\n net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,scope='dp%d'%(layer[2]))\n print(layer[:])\n elif layer[0] == \"expand\":\n print(\"expand\")\n net = tf.expand_dims(net, -1)\n elif layer[0] == \"conv2d_trans1\":\n net = tf_util.conv2d(input_image, layer[1], [1,3],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv%d'%(layer[2]), bn_decay=bn_decay)\n elif layer[0] == \"conv2d_trans2\":\n net = tf_util.conv2d(net_transformed, layer[1], [1, 1],\n padding = 'VALID', stride = [1, 1],\n bn = True, is_training = is_training,\n scope = 'conv%d'%(layer[2]), bn_decay = bn_decay)\n else:\n pass\n\n return netStructure, net, end_points\n\n\ndef get_loss(pred, label, end_points, reg_weight=0.001):\n \"\"\" pred: B*NUM_CLASSES,\n label: B, \"\"\"\n pdb.set_trace()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)\n classify_loss = tf.reduce_mean(loss)\n tf.summary.scalar('classify loss', classify_loss)\n\n # Enforce the transformation as orthogonal matrix\n transform = end_points['transform'] # BxKxK\n K = transform.get_shape()[1].value\n mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1]))\n mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)\n mat_diff_loss = tf.nn.l2_loss(mat_diff)\n tf.summary.scalar('mat loss', mat_diff_loss)\n\n return classify_loss + mat_diff_loss * reg_weight\n\n\nif __name__=='__main__':\n with tf.Graph().as_default():\n inputs = tf.zeros((32,1024,3))\n outputs = get_model(inputs, tf.constant(True))\n print(outputs)\n","sub_path":"log/pointnet_cls1.py","file_name":"pointnet_cls1.py","file_ext":"py","file_size_in_byte":8333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"156476561","text":"import hashlib\nfrom typing import List\n\nfrom bs4 import BeautifulSoup\n\nfrom dedoc.readers.docx_reader.data_structures.run import Run\n\n\nclass DocxTable:\n def __init__(self,\n xml: BeautifulSoup,\n styles_extractor: \"StylesExtractor\") -> None:\n \"\"\"\n contains information about table properties\n :param xml: BeautifulSoup tree with table properties\n \"\"\"\n self.xml = xml\n self._uid = hashlib.md5(xml.encode()).hexdigest()\n self.styles_extractor = styles_extractor\n\n @property\n def uid(self) -> str:\n return self._uid\n\n def get_cells(self) -> List[List[str]]:\n \"\"\"\n returns list of lists with table cells\n merged cells are split and duplicated in result table\n \"\"\"\n # tbl tag defines table\n # tr tag defines table row\n # tc tag defines table cell\n result_cells = []\n\n # delete tables inside tables\n for tbl in self.xml.find_all(\"w:tbl\"):\n tbl.extract()\n\n rows = self.xml.find_all(\"w:tr\")\n prev_row = []\n for row in rows:\n cells = row.find_all(\"w:tc\")\n cells_text = []\n\n cell_ind = 0\n for cell in cells:\n # gridSpan tag describes number of horizontally merged cells\n if cell.gridSpan:\n grid_span = int(cell.gridSpan[\"w:val\"])\n else:\n grid_span = 1\n # get text of the cell\n cell_text = self.__get_cell_text(cell)\n # vmerge tag for vertically merged set of cells (or horizontally split cells)\n # attribute val may be \"restart\" or \"continue\" (\"continue\" if omitted)\n if cell.vMerge:\n value = cell.vMerge.get(\"w:val\", \"continue\")\n if value == \"continue\":\n cell_text += prev_row[cell_ind]\n # split merged cells\n for span in range(grid_span):\n cell_ind += 1\n cells_text.append(cell_text)\n\n result_cells.append(cells_text)\n prev_row = cells_text\n\n return result_cells\n\n def __get_cell_text(self, cell: BeautifulSoup) -> str:\n cell_text = \"\"\n paragraphs = cell.find_all(\"w:p\")\n for paragraph in paragraphs:\n for run_bs in paragraph.find_all(\"w:r\"):\n run = Run(None, self.styles_extractor)\n run.get_text(run_bs)\n cell_text += run.text\n cell_text += '\\n'\n if cell_text:\n cell_text = cell_text[:-1] # remove \\n in the end\n return cell_text\n","sub_path":"dedoc/readers/docx_reader/data_structures/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"416012593","text":"#!/usr/bin/env python3\n##############################################################################\n# Function: Evaluation on SemEval 2017 Spatial Roles Labeling\n##############################################################################\nfrom fuzzy_match import *\nimport re\nfrom lxml import etree\nimport nltk\n#from nltk import sent_tokenize, word_tokenize\n\ndef generate_relation_list(relation_trajector_landmark_list):\n for item in relation_trajector_landmark_list:\n if len(item) == 0:\n return []\n\n relation_list = list(itertools.product(*relation_trajector_landmark_list))\n return relation_list\n\ndef insert_relation_into_node(node, relation_list, relation_index):\n for relation, trajector, landmark in relation_list:\n rel_node = etree.SubElement(node, 'RELATION')\n rel_node.set('id', 'SR'+str(relation_index) )\n rel_node.set('trajector_id', trajector)\n rel_node.set('landmark_id', landmark)\n rel_node.set('spatial_indicator_id', relation)\n rel_node.set('general_type', '')\n rel_node.set('specific_type', '')\n rel_node.set('RCC8_value', '')\n rel_node.set('FoR', '')\n relation_index = relation_index +1\n return node, relation_index\n\n\n\n\ndef get_indicator_trajactor_landmark(number, relation_index, sentence, node):\n parse = get_dependency_analysis_of_sentence(sentence)\n semantic_unit_list = get_semantic_unit_list(sentence, parse)\n semantic_unit_list = add_buddy_to_unit(semantic_unit_list)\n word_list = get_word_list(semantic_unit_list)\n semantic_unit_list = construction_recognition(word_list, semantic_unit_list, all_prep_list, 'RP')\n semantic_unit_list = remove_repeating_semantic_unit(semantic_unit_list)\n father_children_dict = get_father_children_list(parse)\n root_list = get_all_root_list(parse)\n root_children_dict = get_root_children_dict(root_list, father_children_dict)\n event_dict = get_event_dict(sentence, root_children_dict, semantic_unit_list)\n\n for event, children in event_dict.items():\n print('---------------------semantic_unit_list---------------------')\n print_semantic_unit_list(children)\n print('--------------------- end line ---------------------')\n\n locative_repr_list, motion_repr_list = get_final_spatial_representation(children, final_pattern_dict)\n for locative_repr in locative_repr_list:\n relation_trajector_landmark_list = []\n number = number+1\n relation_index = relation_index +1\n print('----------- locative construction -----------')\n print_locative_expr(locative_repr)\n\n relation = locative_repr.get_relation()\n spatial_indicator = etree.SubElement(node, 'SPATIALINDICATOR')\n spatial_indicator.set('id', 'S'+str(number))\n spatial_indicator.set('start', str(relation.get_start_index()))\n spatial_indicator.set('end', str(relation.get_end_index()))\n spatial_indicator.set('text', relation.get_word())\n\n figure_list = list(set(locative_repr.get_figure()))\n ground_list = list(set(locative_repr.get_ground()))\n prep_id_list = ['S'+str(number)]\n trajector_id_list = []\n landmark_id_list = []\n for figure in figure_list:\n if figure.get_start_index() != None:\n print(figure.get_word(), figure.get_start_index(), figure.get_end_index())\n trajector = etree.SubElement(node, 'TRAJECTOR')\n trajector.set('id', 'T'+str(number))\n trajector.set('start', str(figure.get_start_index()))\n trajector.set('end', str(figure.get_end_index()))\n trajector.set('text', figure.get_word())\n trajector_id_list.append('T'+str(number))\n if len(figure_list) > 1:\n number = number+1\n\n if len(ground_list) == 0:\n landmark = etree.SubElement(node, 'LANDMARK')\n landmark.set('id', 'L'+str(number))\n landmark.set('start', '-1')\n landmark.set('end', '-1')\n landmark_id_list.append('L'+str(number))\n else:\n for ground in ground_list:\n print(ground.get_word(), ground.get_start_index(), ground.get_end_index())\n if ground.get_start_index() != None:\n landmark = etree.SubElement(node, 'LANDMARK')\n landmark.set('id', 'L'+str(number))\n landmark.set('start', str(ground.get_start_index()))\n landmark.set('end', str(ground.get_end_index()))\n landmark.set('text', str(ground.get_word()))\n landmark_id_list.append('L'+str(number))\n if len(ground_list) > 1:\n number = number+1\n\n\n print(prep_id_list, trajector_id_list, landmark_id_list)\n relation_trajector_landmark_list.append(prep_id_list)\n relation_trajector_landmark_list.append(trajector_id_list)\n relation_trajector_landmark_list.append(landmark_id_list)\n relation_list = generate_relation_list(relation_trajector_landmark_list)\n print(relation_list)\n node, relation_index = insert_relation_into_node(node, relation_list, relation_index)\n if len(motion_repr_list) > 1:\n relation_index = relation_index +1\n print('----------- locative construction -----------')\n\n for motion_repr in motion_repr_list:\n relation_trajector_landmark_list = []\n number = number+1\n relation_index = relation_index +1\n print('----------- motion construction -----------')\n print_motion_event(motion_repr)\n\n trajector_id_list = []\n landmark_id_list = []\n prep_id_list = []\n figure_list = motion_repr.get_figure()\n\n for relation, ground in motion_repr.get_ground().items():\n if ground.get_start_index() != None:\n landmark = etree.SubElement(node, 'LANDMARK')\n landmark.set('id', 'L'+str(number))\n landmark.set('start', str(ground.get_start_index()))\n landmark.set('end', str(ground.get_end_index()))\n landmark.set('text', str(ground.get_word()))\n landmark_id_list.append('L'+str(number))\n\n if relation.get_start_index() != None:\n spatial_indicator = etree.SubElement(node, 'SPATIALINDICATOR')\n spatial_indicator.set('id', 'S'+str(number))\n spatial_indicator.set('start', str(relation.get_start_index()))\n spatial_indicator.set('end', str(relation.get_end_index()))\n spatial_indicator.set('text', relation.get_word())\n prep_id_list.append('S'+str(number))\n if len(motion_repr.get_ground()) > 1:\n number = number+1\n\n for figure in figure_list:\n if figure.get_start_index() != None:\n print(figure.get_word(), figure.get_start_index(), figure.get_end_index())\n trajector = etree.SubElement(node, 'TRAJECTOR')\n trajector.set('id', 'T'+str(number))\n trajector.set('start', str(figure.get_start_index()))\n trajector.set('end', str(figure.get_end_index()))\n trajector.set('text', figure.get_word())\n trajector_id_list.append('T'+str(number))\n if len(figure_list) > 1:\n number = number+1\n print(prep_id_list, trajector_id_list, landmark_id_list)\n relation_trajector_landmark_list.append(prep_id_list)\n relation_trajector_landmark_list.append(trajector_id_list)\n relation_trajector_landmark_list.append(landmark_id_list)\n relation_list = generate_relation_list(relation_trajector_landmark_list)\n print(relation_list)\n node, relation_index = insert_relation_into_node(node, relation_list, relation_index)\n relation_index = relation_index +1\n\n print('----------- motion construction -----------')\n return node, number, relation_index\n\n\nfinal_pattern_dict = get_final_pattern_dict(regular_pattern_dict)\n#file_name = 'semeval2017/train.xml'\nfile_name = 'param/SpRL_test.xml'\nxml_file = open(file_name, 'rb')\nxml_str = xml_file.read()\nxml_file.close()\n\nroot = etree.fromstring(xml_str)\nscene_list = root.findall('SCENE')\nnumber = 0\nrelation_index = 0\n#\n# sentence_number = 0\n# word_number = 0\n# for scene in scene_list:\n# sentence_list = scene.findall('SENTENCE')\n# number = number + len(sentence_list)\n# #print(type(sentence_list), len(sentence_list), sentence_list)\n# for sentence in sentence_list:\n#\n# sentence_number = sentence_number + 1\n# text_str = sentence.find('TEXT').text\n# word_list = nltk.word_tokenize(str(text_str))\n# word_number = word_number + len(word_list)\n# print('sentence_number', sentence_number)\n# print('word_number', word_number)\n# print('average length', str(word_number/sentence_number))\n\n\n\nfor scene in scene_list:\n sentence_list = scene.findall('SENTENCE')\n number = number + len(sentence_list)\n #print(type(sentence_list), len(sentence_list), sentence_list)\n for sentence in sentence_list:\n\n text_str = sentence.find('TEXT').text\n id = sentence.get('id')\n start = sentence.get('start')\n end = sentence.get('end')\n sentence.clear()\n sentence.set('id', id)\n sentence.set('start', start)\n sentence.set('end', end)\n id = sentence.get('id')\n\n text = etree.SubElement(sentence, 'TEXT')\n text_str = text_str.replace('?', ' ')\n text.text = text_str\n sentence,number, relation_index = get_indicator_trajactor_landmark(number, relation_index, text_str, sentence)\n number = number+1\n relation_index = relation_index +1\n\n\ntree = etree.ElementTree(root)\n# if file_name == 'semeval2017/train.xml':\n# tree.write('output/train-output.xml', pretty_print=True, xml_declaration=True, encoding='utf-8')\nif file_name == 'param/SpRL_test.xml':\n tree.write('output/test-output.xml', pretty_print=True, xml_declaration=True, encoding='utf-8')\n","sub_path":"semeval2017.py","file_name":"semeval2017.py","file_ext":"py","file_size_in_byte":10545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"457653546","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nIRSA Query Tool\n===============\n\nThis module contains various methods for querying the\nIRSA Catalog Query Service(CatQuery) and the Moving\nObject Search Tool (MOST).\n\"\"\"\nfrom astropy import config as _config\n\n\nclass Conf(_config.ConfigNamespace):\n \"\"\"\n Configuration parameters for `astroquery.ipac.irsa`.\n \"\"\"\n\n irsa_server = _config.ConfigItem(\n 'https://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-query',\n 'Name of the IRSA mirror to use.')\n gator_list_catalogs = _config.ConfigItem(\n 'https://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-scan',\n 'URL from which to list all the public catalogs in IRSA.')\n most_server = _config.ConfigItem(\n 'https://irsa.ipac.caltech.edu/cgi-bin/MOST/nph-most',\n 'URL address of the MOST service.')\n most_interface_url = _config.ConfigItem(\n 'https://irsa.ipac.caltech.edu/applications/MOST/',\n 'URL address of the MOST application interface.'\n )\n row_limit = _config.ConfigItem(\n 500,\n 'Maximum number of rows to retrieve in result')\n timeout = _config.ConfigItem(\n 60,\n 'Time limit for connecting to the IRSA server.')\n\n\nconf = Conf()\n\n\nfrom .core import Irsa, IrsaClass\nfrom .most import Most, MostClass\n\n__all__ = ['Irsa', 'IrsaClass', 'Most', 'MostClass', 'Conf', 'conf', ]\n","sub_path":"astroquery/ipac/irsa/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"28232768","text":"from Buttons import*\nfrom Platformer import*\nfrom Menu import*\n\n\nbig_button_width = 320\nbig_button_height = 120\n\n\nclass Settings():\n def __init__(self, fs):\n self.win_width = 640\n self.win_height = 640\n self.timer = pygame.time.Clock()\n self.back_button = Button(160, 240, big_button_width, big_button_height, \"buttons/back.png\", \"buttons/dback.png\")\n self.mp = True\n self.fs = fs\n self.fs0 = False\n pygame.init()\n if self.fs:\n self.screen = pygame.display.set_mode((self.win_width, self.win_height), pygame.FULLSCREEN)\n else:\n self.screen = pygame.display.set_mode((self.win_width, self.win_height))\n pygame.display.set_caption('Settings')\n self.bg = Surface((self.win_width, self.win_height))\n self.bg.fill(Color(\"#FFFFFF\"))\n self.run()\n\n def run(self):\n run = True\n while run:\n if self.fs0:\n if self.fs:\n self.screen = pygame.display.set_mode((self.win_width, self.win_height), pygame.FULLSCREEN)\n else:\n self.screen = pygame.display.set_mode((self.win_width, self.win_height))\n self.fs0 = False\n self.timer.tick(60)\n run = self.happenings(event.get(), run)\n self.screen.blit(self.bg, (0, 0))\n self.back_button.draw(self.screen)\n display.update()\n\n\n def happenings(self, events, run):\n for e in events:\n if e.type == QUIT or (e.type == KEYDOWN and e.key == K_ESCAPE):\n raise SystemExit(\"QUIT\")\n if e.type == KEYDOWN and e.key == K_f:\n if self.fs:\n self.fs = False\n else:\n self.fs = True\n self.fs0 = True\n if self.back_button.pressed(mouse, mouse.get_pos()) and self.mp:\n self.mp = False\n run = False\n return run","sub_path":"Settings.py","file_name":"Settings.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"334557851","text":"import numpy as np\n\nimport hmsolver.geometry as geometry\nfrom hmsolver.app import PdSimulation2d\nfrom hmsolver.basis import Quad4Node\nfrom hmsolver.meshgrid import Zone2d, HybridMesh2d\nfrom hmsolver.material import PdMaterial2d\nfrom hmsolver.femcore import point_criteria, segment_criteria\nfrom hmsolver.femcore import boundary_cond2d, BoundaryConds2d\n\nimport sys\nimport argparse\n\narg = argparse.ArgumentParser(f\"python {sys.argv[0]}\")\narg.add_argument(\"-p\",\n \"--parallelized\",\n metavar=\"str\",\n default=\"True\",\n type=str,\n help=\"open parallelized\")\narg.add_argument(\"-s\",\n \"--gridsize\",\n metavar=\"float\",\n default=0.1,\n type=float,\n help=\"grid size\")\n\nif __name__ == '__main__':\n args = arg.parse_args()\n args.parallelized = args.parallelized == \"True\"\n zone_xl, zone_xr = 0, 1\n zone_yl, zone_yr = 0, 1\n zone_xmid = 0.5 * (zone_xl + zone_xr)\n zone_ymid = 0.5 * (zone_yl + zone_yr)\n grid_size = args.gridsize\n zone = Zone2d(zone_xl, zone_xr, zone_yl, zone_yr)\n mesh2d = zone.meshgrid_zone(HybridMesh2d, grid_size)\n\n material2d = PdMaterial2d(3e11, 1.0 / 3)\n\n stretch = 0.1\n boundary_0 = point_criteria(zone_xmid, zone_yl)\n boundary_1 = segment_criteria(zone_xl, zone_yl, zone_xr, zone_yl)\n boundary_2 = segment_criteria(zone_xl, zone_yr, zone_xr, zone_yr)\n _bc_ = boundary_cond2d # abbreviate the word for type & read\n boundarys = BoundaryConds2d(\n _bc_(\"point\", boundary_0, \"fixed\", None, None),\n _bc_(\"segment\", boundary_1, \"set_uy\", \"constant\", 0),\n _bc_(\"segment\", boundary_2, \"set_uy\", \"constant\", +stretch))\n del _bc_ # delete the abbreviation\n boundarys.manually_verify()\n\n app = PdSimulation2d(mesh2d, material2d, boundarys)\n app.app_name = \"plate\"\n app.parallelized = args.parallelized\n horizon_radius, inst_len = 3 * grid_size, 0.015\n app.material.setPeridynamic(horizon_radius, grid_size, inst_len)\n app.mesh.peridynamic_construct(horizon_radius, 2 * horizon_radius,\n 4 * horizon_radius)\n app.apply_basis(Quad4Node())\n app.check_engine()\n app.export_to_tecplot(\"peridynamic\", *app.provied_solutions)\n","sub_path":"Examples/example-05-PdSimulation2d-Benchmark.py","file_name":"example-05-PdSimulation2d-Benchmark.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"602559016","text":"def solution(s):\n min_zip_length = len(s) # 최소길이 (최소 길이는 적어도 주어진 문자열 길이를 넘어가지 않음)\n\n # 문자열 길이가 N일 때, 길이가 N/2 보다 크게 잘랐을 때는 길이가 줄지 않음 (절반만 탐색해도 됨)\n # 따라서 1 ~ N/2 길이로 자르는 방법을 모두 탐색한 후 그중 가장 짧은 방법을 선택하면 됩니다.\n for index in range(1, len(s) // 2 + 1):\n zip_str = '' # 압축한 문자열\n compare_str = s[0:index] # 비교 문자열\n zip_count = 1 # 압축 횟수\n\n # 문자 비교 - index 개의 글자씩 잘라서 비교 \n for j in range(index, len(s) + 1, index):\n if compare_str == s[j:j + index]: # 압축 문자와 비교 문자가 같으면 압축 횟수 증가, (범위를 넘어가는 경우 '' 빈 문자임.)\n zip_count += 1\n else: # 그렇지 않으면 즉, 같은 문자가 아닌 경우,\n zip_str += f\"{zip_count if zip_count > 1 else ''}{compare_str}\" # 여러 번 압축된 경우 해당 압축 횟수 쓰고 문자열 작성, 아니면 해당 문자 작성\n compare_str = s[j:j + index] # 비교 문자열 재 할당\n zip_count = 1\n\n # 마지막 비교 문자열을 더해준다 🤩\n zip_str += compare_str\n\n # 최소 길이 update\n if min_zip_length > len(zip_str):\n min_zip_length = len(zip_str)\n\n return min_zip_length","sub_path":"programmers/level2/gyu/4.문자열압축.py","file_name":"4.문자열압축.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"339144647","text":"import socket as Socket\nfrom threading import Thread\n\nfrom clientHandler import ClientHandler\nfrom logger import Logger\nfrom models import ClientInfo, ChangesEvent, Response\n\nclass Server(Thread):\n def __init__(self, address, port):\n super().__init__()\n self.socket = Socket.socket()\n self.socket.bind((address, port))\n\n self.__isWorking = False\n self.clients = {}\n self.clientIndex = 0\n\n @property\n def isWorking(self):\n return self.__isWorking\n\n def run(self):\n self.__isWorking = True\n self.listenClients()\n\n def listenClients(self):\n while self.__isWorking:\n self.socket.listen()\n clientInfo = self.waitClientConnection()\n if not self.__isWorking:\n return\n self.serveClient(clientInfo)\n\n def serveClient(self, clientInfo: ClientInfo):\n clientHandler = ClientHandler(clientInfo, self.clientIndex, self.sendChangesUpdateEvent)\n self.clients[self.clientIndex] = clientHandler\n self.clientIndex += 1\n clientHandler.onClientDisconnected = self.onClientDisconnected\n clientHandler.start()\n\n Logger.log(f\"Client #{clientHandler.index} {clientHandler.address} has connected\")\n\n def sendChangesUpdateEvent(self, updateEvent: ChangesEvent):\n client: ClientHandler\n for client in self.clients.values():\n condition = not client.role in updateEvent.roles\n condition = condition and client.userId != updateEvent.includeClientId\n condition = condition or client.index == updateEvent.exceptClientId\n if condition:\n continue\n response = Response(True, \"\", updateEvent.tables, True)\n client.respond(response.toJson())\n\n def waitClientConnection(self):\n try:\n clientInfo = self.socket.accept()\n return ClientInfo(clientInfo)\n except OSError:\n if self.__isWorking:\n raise\n\n def onClientDisconnected(self, client: ClientHandler):\n try:\n self.clients.pop(client.index)\n except KeyError as e:\n Logger.log(f\"Error {e} clients={self.clients}\")\n Logger.log(f\"Client #{client.index} {client.address} has disconnected\")\n\n def stop(self):\n self.__isWorking = False\n self.socket.close()\n client: ClientHandler\n for client in self.clients.values():\n client.pendedToDisconnect = True\n client.disconnect()\n Logger.command(\"Server has stopped\")\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"194308534","text":"#!/usr/bin/env python\n\n# Script info at the bottom\n\nimport os, time, glob, subprocess\nfrom datetime import datetime\n\nprotocol = 'afp' # set your connection protocol, afp by default\ntm_share = 'afp://tm:pass@10.1.1.1/TimeMachine' # user:pass @ ip address /share\nmount_path = '/Volumes/TimeMachine' # Set your mount path\nfiles = glob.glob(mount_path + '/*') # Change to fit the path to your tm backups\nthreshold = 30 # how many days until reporting no backup\n\ndef mutt(backup_list):\n mutt_email = ''\n echo_cmd = ['echo',\\\n 'The following TimeMachine backups are older than %s days: \\n%s'\\\n % (threshold, backup_list)]\n send_cmd = [\"/usr/local/bin/mutt\", \"-s\",\\\n \"'TimeMachine backups older than %s days'\" % threshold, mutt_email]\n \n echo = subprocess.Popen(echo_cmd, stdout=subprocess.PIPE)\n output = subprocess.Popen(send_cmd, stdin=echo.stdout,\\\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stuff = output.stdout.read()\n errors = output.stderr.read()\n \n print(\"stuff to know: \" + stuff)\n print(\"errors: \" + errors)\n exit(0)\n \ndef backup_check(files):\n file_dates = []\n old_backups = []\n \n time_today = time.strftime(\"%Y-%m-%d\")\n today = datetime.strptime(time_today, '%Y-%m-%d')\n \n for file in files:\n meta = os.stat(file)\n file_date = time.gmtime(meta[-2])\n clean_date = time.strftime('%Y-%m-%d', file_date)\n file_date_append = file, clean_date\n file_dates.append(file_date_append)\n for _ in file_dates:\n file_path, raw_date = _\n last_backup_date = datetime.strptime(raw_date, '%Y-%m-%d')\n raw_backup_date = str(last_backup_date).split(' ')[0]\n backup_date = datetime.strptime(raw_backup_date, '%Y-%m-%d')\n last_backup = str(abs((today - backup_date).days))\n if int(last_backup) > threshold:\n old_backups.append(\"Last backup:\\t\" + last_backup\n + \"\\tdays ago, file: \" + file_path)\n \n if old_backups:\n backup_list = '\\n'.join(old_backups)\n print(backup_list)\n # mutt(backup_list)\n \ndef tm_volume(protocol, tm_share, mount_path, files):\n mounted = os.path.isdir(mount_path)\n while mounted != True:\n mount_cmd = ['mount', '-t', protocol, tm_share, mount_path]\n os.mkdir(mount_path)\n subprocess.call(mount_cmd)\n mounted = os.path.isdir(mount_path)\n backup_check(files)\n\ntm_volume(protocol, tm_share, mount_path, files)\n\n\n# Script overview: This script checks the last modified date on the TimeMachine\n# .sparsebundle and reports which files haven't been modified within the last\n# 30 (default) days.\n#\n# The script first checks that the specified TimeMachine volume is mounted, if\n# it isn't it mounts the volume.\n# The script then gets a list of all of the backups, checks their last modified\n# date, and compares it to the current date. It then makes a report of all of\n# the backups that haven't run in the last 30 days.\n# It passes this data to the mutt function which sends an email report to the\n# specified email address.\n# \n# I created a user LaunchAgent on my server to run this script once a week to \n# get weekly TimeMachine reports.\n# This script isn't useful for users who let the backup start running but\n# cancel midway through since the file's then been modified.\n# \n# \n# Variables:\n# Roughly lines 8-11 are the only variables you'll need to change to get\n# working in your enviroment.\n# I've disabled emailing and instead the script will just print out a report.\n# To enable the mutt emailing un-comment (roughly) line 58 which calls the mutt\n# function ( mutt(backup_list) )\n# \n#\n# Issues:\n# If you find any bugs or anything please just create a new issue and I'll\n# take a look.\n# \n# Known bugs:\n# When TimeMachine isn't mounted and the script mounts the volume it exits\n# without properly checking and reporting the TimeMachine backup info.","sub_path":"python/utilities/tm_check.py","file_name":"tm_check.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"105752722","text":"from openmdao.lib.datatypes.api import Float, Dict, Array, List, Int\nfrom openmdao.main.api import Component, Assembly\nimport numpy as np\nimport cv2\n\n\"\"\"\nImage frame analysis components written to operate only on inputted regions\n(slices of numpy arrays) which are inputted. \n\nTypically these recieve, as input, the output of some object detection components\n\"\"\"\n\nclass processRect(Component):\n \"\"\"\n Process inputted rectangles, using specification \n [ [x pos, y pos, width, height], ... ]\n into an inputted frame.\n \n (Used as a prototype for most of the region-specific image analysis \n components)\n \"\"\"\n \n def __init__(self, channels = [0,1,2], zerochannels = []):\n super(processRect,self).__init__()\n self.add(\"frame_in\", Array(iotype=\"in\"))\n self.add(\"rects_in\", Array(iotype=\"in\"))\n self.add(\"frame_out\", Array(iotype=\"out\"))\n self.channels = channels\n self.zerochannels = zerochannels\n \n def execute(self):\n temp = np.array(self.frame_in) # bugfix for strange cv2 error\n if self.rects_in.size > 0:\n for rect in self.rects_in:\n if len(self.frame_in.shape) == 3:\n for chan in self.channels:\n temp[:,:,chan] = self.process(rect, temp[:,:,chan])\n x,y,w,h = rect\n for chan in self.zerochannels:\n temp[y:y+h,x:x+w,chan]= 0*temp[y:y+h,x:x+w,chan]\n else:\n temp = self.process(rect, temp)\n self.frame_out = temp\n \n def process(self):\n return \n\nclass drawRectangles(processRect):\n \"\"\"\n Draws rectangles outlines in a specific region within the inputted frame\n \"\"\"\n \n def process(self, rect, frame):\n x,y,w,h = rect\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0,255,0), 3)\n return frame\n\nclass VariableEqualizerBlock(processRect):\n \"\"\"\n Equalizes the contrast in a specific region within the inputted frame\n \n Balance between fully equalized contrast and the un-altered frame can be\n varied by setting the 'alpha' and 'beta' inputs.\n \"\"\"\n beta = Float(0., iotype=\"in\")\n alpha = Float(1., iotype=\"in\")\n def process(self, rect, frame):\n x,y,w,h = rect\n subimg = np.array(frame[y:y+h,x:x+w]) \n subimg = self.beta*subimg + self.alpha*cv2.equalizeHist(subimg) \n frame[y:y+h,x:x+w] = subimg\n return frame\n \n\nclass frameSlices(Component):\n \"\"\"\n Collect slices of inputted frame using rectangle specifications. \n \n This component is typically used to grab regions of interest of an image for\n GUI display.\n \"\"\"\n def __init__(self, channels = [0,1,2]):\n super(frameSlices,self).__init__() \n self.add(\"frame_in\", Array(iotype=\"in\"))\n self.add(\"rects_in\", Array(iotype=\"in\"))\n self.add(\"slices\", List([ np.array([0,0]) ],iotype=\"out\"))\n self.add(\"combined\", Array(iotype=\"out\"))\n\n self.add(\"zero_mean\", Float(0., iotype=\"out\"))\n \n self.channels = channels\n \n def combine(self,left, right):\n \"\"\"Stack images horizontally.\n \"\"\"\n h = max(left.shape[0], right.shape[0])\n w = left.shape[1] + right.shape[1]\n hoff = left.shape[0]\n \n shape = list(left.shape)\n shape[0] = h\n shape[1] = w\n \n comb = np.zeros(tuple(shape),left.dtype)\n \n # left will be on left, aligned top, with right on right\n comb[:left.shape[0],:left.shape[1]] = left\n comb[:right.shape[0],left.shape[1]:] = right\n \n return comb \n \n def execute(self):\n comb = 150*np.ones((2,2))\n if self.rects_in.size > 0:\n self.slices = []\n for x,y,w,h in self.rects_in:\n output = self.frame_in[y:y+h,x:x+w]\n self.slices.append(output)\n \n comb = self.combine(output, comb)\n self.combined = comb\n self.zero_mean = self.slices[0].mean()\n \n\n ","sub_path":"lib/sliceops.py","file_name":"sliceops.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"241583717","text":"# Maximum contiguous subarray\n\n# Divide and conquer approach takes O(n log n)\n#\n# 1. Divide array into two halves\n# 2. return the maximum of:\n# a. maximum subarray in left half\n# b. maximum subarray in right half\n# c. maximum subarray that crosses the midpoint\n\nfrom sys import maxint\n\ndef max_crossing_sum(a, lo, mid, hi):\n \n sum = 0\n left_sum = -maxint - 1\n \n for i in xrange(mid, lo-1, -1):\n sum += a[i]\n if sum > left_sum:\n left_sum = sum\n \n sum = 0\n right_sum = -maxint - 1\n for i in xrange(mid+1, hi+1):\n sum += a[i]\n if sum > right_sum:\n right_sum = sum\n \n return left_sum + right_sum\n\n\ndef max_subarray(a, lo, hi):\n \n if lo == hi:\n return a[lo]\n \n mid = (lo + hi) // 2\n \n return max(\n max_subarray(a, lo, mid),\n max_subarray(a, mid+1, hi),\n max_crossing_sum(a, lo, mid, hi))\n\n\n\n# Kadane's algorithm solves the largest contiguous subarray in O(n)\n\ndef max_subarray_kadane(a):\n\n max_so_far = 0\n max_ending_here = 0\n \n for n in a:\n max_ending_here = max(0, max_ending_here + n)\n max_so_far = max(max_so_far, max_ending_here)\n \n return max_so_far\n\n\n# Kadane's algorithm does not work for arrays with all negative numbers\n# here is a generalisation\ndef generalised_kadane(a):\n \n max_so_far = max_ending_here = a[0]\n \n for x in a[1:]:\n max_ending_here = max(x, max_ending_here + x)\n max_so_far = max(max_so_far, max_ending_here)\n \n return max_so_far","sub_path":"algorithms and data structures/MaxSubarray.py","file_name":"MaxSubarray.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"35880641","text":"import printFile\n\ndef gauss(x, mu, sigma):\n import numpy as np\n return 1.0/np.sqrt(2*np.pi*sigma**2)*np.exp(-(x-mu)**2/(2*sigma**2))\n\ndef trapezoid(f, lower, upper):\n s = 0.0\n N = 1000\n dx = (upper - lower)/float(N)\n for i in range(N):\n xa = lower + i*dx\n xb = lower + (i+1)*dx\n s = s + 0.5*(f(xa) + f(xb))*dx\n return s\n\ndef D(omega):\n return gauss(omega, 0, 1)\n\ndef A(omega): \n return 0.5*gauss(omega, -1.2, 0.5) + 0.5*gauss(omega, 1.2, 0.4)\n\ndef defaultG(omega_n):\n import numpy as np\n \n lower = -20\n upper = -lower\n N = 600\n domega = (upper - lower)/float(N)\n\n def integrand(omega_n, omega):\n return 0.5/np.pi*(A(omega))/(-omega + omega_n*1j)\n\n s = 0.0 + 0.0j\n for i in range(N):\n omega_a = lower + i*domega\n omega_b = lower + (i+1)*domega\n s = s + 0.5*domega*(integrand(omega_n, omega_a) + integrand(omega_n, omega_b))\n return s\n\ndef main():\n import os\n import sys\n import numpy as np\n\n beta = 10.0\n Niom = 100\n omega_n = []\n for nw in range(Niom):\n omega_n.append((2*nw+1)*np.pi/beta)\n\n G_real = []\n G_imag = []\n for nw in range(Niom):\n temp = defaultG(omega_n[nw])\n G_real.append(temp.real)\n G_imag.append(temp.imag)\n\n randomNumbers = np.random.normal(0, 0.0001, Niom)\n for nw in range(len(G_real)):\n G_real[nw] = G_real[nw] + randomNumbers[nw]\n randomNumbers = np.random.normal(0, 0.00015, Niom)\n for nw in range(len(G_imag)):\n G_imag[nw] = G_imag[nw] + randomNumbers[nw]\n printFile.printFile(omega_n, G_real, \"G_real.txt\")\n printFile.printFile(omega_n, G_imag, \"G_imag.txt\")\n ofile = open(\"G.txt\", \"w\")\n for i in range(len(omega_n)):\n ofile.write(str(omega_n[i]) + \" \" + str(G_real[i]) + \" \" + str(G_imag[i]) + \"\\n\")\n ofile.close()\n ofile = open(\"G_error.txt\", \"w\")\n for i in range(len(omega_n)):\n ofile.write(str(omega_n[i]) + \" \" + str(0.0001) + \" \" + str(0.00015) + \"\\n\")\n ofile.close()\n\n omega = []\n spectral = []\n omega_lower = -5\n omega_upper = 5\n Nomega = 60\n delta = (omega_upper - omega_lower)/float(Nomega)\n for i in range(Nomega+1):\n omega.append(omega_lower + i*delta)\n for i in range(len(omega)):\n spectral.append(A(omega[i]))\n printFile.printFile(omega, spectral, \"spectral.txt\")\n return 0\n\nif __name__ == \"__main__\":\n import sys\n sys.exit(main())\n","sub_path":"default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"531722613","text":"import sys\nimport heapq\nfrom bisect import *\nfrom collections import *\n\ndef solve(s, n):\n ans = sum([c == 'a' for c in s]) * (n//len(s))\n ans += sum([c == 'a' for c in s[:n%len(s)]])\n return ans\n\n\ndef solve2(s, n):\n ans = s.count('a') * (n//len(s))\n ans += s[:n%len(s)].count('a')\n return ans\n\nif __name__ == '__main__':\n s = sys.stdin.readline().strip()\n n = int(input())\n\n res = solve2(s, n)\n print(res)\n","sub_path":"hackerrank/repeated-string.py","file_name":"repeated-string.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"410148190","text":"\n#\n#-*- coding:utf-8 -*-\n# ocr_card_filter.py\nimport os\nfrom PIL import Image\nimport pyocr\nimport pyocr.builders\nimport argparse\nimport numpy as np\n\ndef doparse():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-t\", '--threshold', type=int, help=\"threshold\")\n parser.add_argument(\"--prop\", action='store_true', help=\"prop\")\n parser.add_argument(\"--prop2\", action='store_true', help=\"prop2 \")\n parser.add_argument(\"--eval\", action='store_true', help=\"eval\")\n \n parser.add_argument(\"-r\",\"--ratio\", type=float, help=\"eval\")\n args = parser.parse_args()\n return args\n\nclass ocr:\n tool = None\n # path_tesseract = \"C:\\\\Program Files (x86)\\\\Tesseract-OCR\"\n path_tesseract = \"C:\\\\Program Files\\\\Tesseract-OCR\"\n imgsrc = \"./pics/Screenshot_20200508-122913.png\"\n c_max = 200\n ratio = 0.28\n\n def __init__(self, args, imgsrc = None):\n self.args = args\n if imgsrc == None:\n self.imgsrc = ocr.imgsrc\n else:\n self.imgsrc = imgsrc\n # self.c_max = 169\n self.c_max = ocr.c_max\n if args.threshold:\n self.c_max = args.threshold\n if ocr.tool != None:\n return\n # インストール済みのTesseractのパスを通す\n if ocr.path_tesseract not in os.environ[\"PATH\"].split(os.pathsep):\n os.environ[\"PATH\"] += os.pathsep + ocr.path_tesseract\n # OCRエンジンの取得\n tools = pyocr.get_available_tools()\n # print(path_tesseract)\n # print(tools)\n ocr.tool = tools[0]\n\n def evaluate(self):\n ratio = ocr.ratio\n if self.args.ratio:\n ratio = args.ratio\n res = self.prop2()\n center = res[0]\n best = 0\n mtext = 0\n dic = {}\n for th in (0.8, 0.9, 1.0, 1.1, 1.2):\n ith = int( th * center )\n text = self.scan( ith )\n text2 = text.replace(' ','')\n ntext = len(text2)\n print( ith, ntext )\n dic[ith] = text\n if ntext > mtext :\n best = ith\n mtext = ntext\n\n print( '- BEGIN --' )\n print( dic[best] )\n print( '- END --' )\n \n def edit_image1(self, img_rgb, cmax):\n # Use only fullblack or fullwhite basd on threshold of the sum of RGB.\n pixels = img_rgb.load()\n for j in range(img_rgb.size[1]):\n for i in range(img_rgb.size[0]):\n v3 = sum( pixels[i,j] )\n color = (0,0,0)\n if v3 > cmax * 3:\n color = (255, 255, 255)\n pixels[i, j] = color\n \n return img_rgb\n\n\n def edit_image_orig(self, img_rgb):\n pixels = img_rgb.load()\n for j in range(img_rgb.size[1]):\n for i in range(img_rgb.size[0]):\n if (pixels[i, j][0] > self.c_max or pixels[i, j][1] > self.c_max or\n pixels[i, j][2] > self.c_max):\n pixels[i, j] = (255, 255, 255)\n pass\n return img_rgb\n \n def doit(self):\n result = self.scan( self.c_max )\n print(result)\n\n def scan(self, cmax):\n img_org = Image.open( self.imgsrc )\n img_rgb = img_org.convert(\"RGB\")\n\n img = self.edit_image1(img_rgb, cmax)\n\n # OCR実行\n builder = pyocr.builders.TextBuilder()\n result = ocr.tool.image_to_string(img, lang=\"jpn\", builder=builder)\n\n return result\n\n def prop(self):\n # 原稿画像の読み込み\n # img_org = Image.open(\"./card_image/zairyucard_omote.jpg\")\n img_org = Image.open(\"./pics/Screenshot_20200508-122913.png\")\n img_rgb = img_org.convert(\"RGB\")\n pixels = img_rgb.load()\n\n # 原稿画像加工(黒っぽい色以外は白=255,255,255にする)\n hist = [0,0,0]\n for c in range(3):\n hist[c] = np.zeros(30)\n \n for j in range(img_rgb.size[1]):\n for i in range(img_rgb.size[0]):\n for c in range(3):\n v = pixels[i,j][c]/10\n hist[c][int(v)] += 1.0\n\n for n in range(30):\n mark = 0\n for c in range(3):\n v = hist[c][n]\n if v > 100:\n mark = 1\n if mark == 1:\n print( n, hist[0][n], hist[1][n], hist[2][n] )\n \n def prop2(self, pos=0.28, div=16):\n # 原稿画像の読み込み\n # img_org = Image.open(\"./card_image/zairyucard_omote.jpg\")\n img_org = Image.open(\"./pics/Screenshot_20200508-122913.png\")\n img_rgb = img_org.convert(\"RGB\")\n pixels = img_rgb.load()\n\n # 原稿画像加工(黒っぽい色以外は白=255,255,255にする)\n hist = [0,0,0,0]\n ndiv = int( 256 / div + 2 )\n for c in range(4):\n hist[c] = np.zeros( ndiv )\n\n for j in range(img_rgb.size[1]):\n for i in range(img_rgb.size[0]):\n for c in range(3):\n v = pixels[i,j][c]/ div \n hist[c][int(v)] += 1.0\n s = sum(pixels[i,j]) / div / 3\n hist[3][ int(s) ] += 1\n\n acc = 0\n total = sum(hist[3])\n th = 0\n for n in range( ndiv ):\n mark = 0\n acc += hist[3][n]\n for c in range(4):\n v = hist[c][n]\n if v > 100:\n mark = 1\n ratio = 1.0 * acc / total\n if mark == 1:\n print( \"{} {} {} {} {} {:.1f}\".format( n, hist[0][n], hist[1][n], hist[2][n], hist[3][n], ratio * 100 ))\n pass\n if ratio < pos:\n th = n * div\n return (th, hist[3])\n \n def doit_orig(self):\n # Use only fullblack or fullwhite basd on threshold of the sum of RGB.\n # 原稿画像の読み込み\n # img_org = Image.open(\"./card_image/zairyucard_omote.jpg\")\n img_org = Image.open(\"./pics/Screenshot_20200508-122913.png\")\n img_rgb = img_org.convert(\"RGB\")\n pixels = img_rgb.load()\n\n # 原稿画像加工(黒っぽい色以外は白=255,255,255にする)\n # c_max = 169\n for j in range(img_rgb.size[1]):\n for i in range(img_rgb.size[0]):\n if (pixels[i, j][0] > self.c_max or pixels[i, j][1] > self.c_max or\n pixels[i, j][2] > self.c_max):\n pixels[i, j] = (255, 255, 255)\n\n # OCR実行\n builder = pyocr.builders.TextBuilder()\n result = ocr.tool.image_to_string(img_rgb, lang=\"jpn\", builder=builder)\n\n print(result)\n\n def doit5(self):\n # decrease the pixel of less than threshold to 1/10.\n # 原稿画像の読み込み\n # img_org = Image.open(\"./card_image/zairyucard_omote.jpg\")\n img_org = Image.open(\"./pics/Screenshot_20200508-122913.png\")\n img_rgb = img_org.convert(\"RGB\")\n pixels = img_rgb.load()\n\n # 原稿画像加工(黒っぽい色以外は白=255,255,255にする)\n for j in range(img_rgb.size[1]):\n for i in range(img_rgb.size[0]):\n v3 = 0\n for c in range(3):\n v3 += pixels[i, j][c]\n if v3 > self.c_max:\n pixels[i, j] = (255, 255, 255)\n else:\n v = [0,0,0]\n for c in range(3):\n v[c] = int( pixels[i, j][c]/10 )\n pixels[i, j] = tuple(v)\n\n # OCR実行\n builder = pyocr.builders.TextBuilder()\n result = ocr.tool.image_to_string(img_rgb, lang=\"jpn\", builder=builder)\n\n print(result)\n\nif __name__ == '__main__':\n args = doparse()\n\n oi = ocr(args)\n if args.prop:\n oi.prop()\n elif args.eval:\n oi.evaluate()\n elif args.prop2:\n res = oi.prop2()\n th = res[0]\n print( th )\n else:\n oi.doit()\n\n#\n# EOF\n#\n","sub_path":"ocr/ocr6.py","file_name":"ocr6.py","file_ext":"py","file_size_in_byte":7992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"342778948","text":"from typing import List\n\nfrom . import ThrottlingBucket, ThrottlingRule, ThrottlingOptions\nfrom datetime import timedelta\n\n\nRuleList = List[ThrottlingRule]\nBucketList = List[ThrottlingBucket]\n\n\ndef get_buckets(rules: RuleList, arguments_bundle: dict, options: ThrottlingOptions=None) -> BucketList:\n \"\"\"\n Возвращает вёдра, созданные путём комбинации списка правил с аргументами запроса.\n \"\"\"\n ret = []\n if not rules:\n return ret\n for rule in rules:\n ret.append(ThrottlingBucket(rule, arguments_bundle, options))\n return ret\n\n\ndef check_throttle(buckets: BucketList) -> timedelta:\n \"\"\"Возвращает интервал, который осталось подождать до истечения таймаута вёдер\"\"\"\n ret = timedelta()\n for b in buckets:\n t = b.check_throttle()\n if t and t > ret:\n ret = t\n return ret\n\n\ndef commit_request(buckets: BucketList):\n \"\"\"Уведомляет каждое ведро о том, что запрос исполнен\"\"\"\n [b.commit_request() for b in buckets]\n","sub_path":"bucket_throttling/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"609581788","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n# -*- coding : utf-8 -*-\n\"\"\"\n@projectname : Epass\n@author : zhangaihua\n@Time : 2019/8/27\n@File : test_saveChildUser.py\n@describe : 该模块用于测试鉴权1.0 bim融合-企业用户管理-新建应用子账号接口(新增用户在一个应用下的子账号)\n\n\"\"\"\nimport time\nimport unittest\nfrom ep_common.ConfigServer import conf_url\nfrom ep_common.ConfigServer import conf_appid\nfrom ep_common.ConfigPerson import ConfigPerson\nfrom ep_common.JsonToStrUtil import jsonTostr\nfrom ep_common.SceneDecide import sceneDecide\nfrom ep_common.MysqldbUtil import MySqlUtil\nfrom ep_common.child_user_sql import ChildUserSqlUtil\nfrom ep_common.GenerateRandomStr import generate_random_str\n\n\nclass SaveChildUser(unittest.TestCase):\n def setUp(self):\n api_url = \"coding/outUserAndApp/saveChildUser\"\n self.url = conf_url() + api_url\n self.mysql = MySqlUtil()\n self.appId = conf_appid()\n self.login_name = ConfigPerson().conf_loginName()\n # 获取测试用户id:使用配置文件配置的数据\n self.mysql.cursor.execute(\"select id from ep_users where login_name='%s'\" % self.login_name)\n self.user_id = self.mysql.cursor.fetchall()[0][0]\n # 获取测试应用id(使用配置文件配置的数据), 添加应用授权\n self.mysql.cursor.execute(\"select id from ep_company_app where app_id='%s'\" % self.appId)\n self.company_app_id = self.mysql.cursor.fetchall()[0][0]\n self.auth_id = ChildUserSqlUtil.insert_auth(self.user_id, self.company_app_id)\n\n\n def test_save_child_user_01(self):\n u'新增子账号(appId和loginName存在且用户已有该应用的权限),请求成功'\n data = {\n \"appId\": self.appId,\n \"description\": \"新建子账号接口测试用例1\",\n \"loginName\": self.login_name,\n \"name\": \"child1_apiTest\"\n }\n new_child_user=data['name']\n data = jsonTostr(data)\n re = sceneDecide(self.url, data)\n print('新建应用子账号接口入参:=================>\\n', data)\n print('新建应用子账号接口响应:=================>\\n', re)\n # 断言响应消息\n expect_message=\"操作成功\"\n actual_message = re['message']\n self.assertEqual(expect_message, actual_message)\n # 断言响应数据\n expect_data = new_child_user\n actual_data = ChildUserSqlUtil.find_child_user(self.user_id, self.company_app_id)\n print(actual_data)\n self.assertIn(expect_data, actual_data)\n\n def test_save_child_user_02(self):\n u'子账号名称长度超过45,请求失败'\n data = {\n \"appId\": self.appId,\n \"description\": \"新建子账号测试用例2\",\n \"loginName\": self.login_name,\n \"name\": generate_random_str(46)\n }\n data = jsonTostr(data)\n re = sceneDecide(self.url, data)\n print('新建应用子账号接口入参:=================>\\n', data)\n print('新建应用子账号接口响应:=================>\\n', re)\n # 断言响应消息\n expect_message=\"子账号名称长度不超过45;\"\n actual_message = re['message']\n self.assertEqual(expect_message, actual_message)\n\n def test_save_child_user_03(self):\n u'子账号描述长度超过300,请求失败'\n data = {\n \"appId\": self.appId,\n \"description\": generate_random_str(301),\n \"loginName\": self.login_name,\n \"name\": \"child3_apiTest\"\n }\n data = jsonTostr(data)\n re = sceneDecide(self.url, data)\n print('新建应用子账号接口入参:=================>\\n', data)\n print('新建应用子账号接口响应:=================>\\n', re)\n # 断言响应消息\n expect_message=\"子账号描述长度不超过300;\"\n actual_message = re['message']\n self.assertEqual(expect_message, actual_message)\n\n def test_save_child_user_04(self):\n u'应用appId为空,请求失败'\n data = {\n \"appId\": \"\",\n \"description\": \"\",\n \"loginName\": self.login_name,\n \"name\": \"child4_apiTest\"\n }\n data = jsonTostr(data)\n re = sceneDecide(self.url, data)\n print('新建应用子账号接口入参:=================>\\n', data)\n print('新建应用子账号接口响应:=================>\\n', re)\n # 断言响应消息\n expect_message=\"应用id不能为空;\"\n actual_message = re['message']\n self.assertEqual(expect_message, actual_message)\n\n def test_save_child_user_05(self):\n u'主账号loginName为空,请求失败'\n data = {\n \"appId\": self.appId,\n \"description\": \"\",\n \"loginName\": \"\",\n \"name\": \"child5_apiTest\"\n }\n data = jsonTostr(data)\n re = sceneDecide(self.url, data)\n print('新建应用子账号接口入参:=================>\\n', data)\n print('新建应用子账号接口响应:=================>\\n', re)\n # 断言响应消息\n expect_message=\"参数错误;\"\n actual_message = re['message']\n self.assertEqual(expect_message, actual_message)\n\n def test_save_child_user_06(self):\n u'子账号name为空,请求失败'\n data = {\n \"appId\": self.appId,\n \"description\": \"\",\n \"loginName\": self.login_name,\n \"name\": \"\"\n }\n data = jsonTostr(data)\n re = sceneDecide(self.url, data)\n print('新建应用子账号接口入参:=================>\\n', data)\n print('新建应用子账号接口响应:=================>\\n', re)\n # 断言响应消息\n expect_message=\"参数错误;\"\n actual_message = re['message']\n self.assertEqual(expect_message, actual_message)\n\n def test_save_child_user_07(self):\n u'主账号不存在,请求失败'\n data = {\n \"appId\": self.appId,\n \"description\": \"\",\n \"loginName\": \"notfounduser\",\n \"name\": \"child7_apiTest\"\n }\n data = jsonTostr(data)\n re = sceneDecide(self.url, data)\n print('新建应用子账号接口入参:=================>\\n', data)\n print('新建应用子账号接口响应:=================>\\n', re)\n # 断言响应消息\n expect_message=\"主账号不存在\"\n actual_message = re['message']\n self.assertEqual(expect_message, actual_message)\n\n def test_save_child_user_08(self):\n u'用户不具有此应用的权限,请求失败'\n data = {\n \"appId\": \"elogin\",\n \"description\": \"\",\n \"loginName\": self.login_name,\n \"name\": \"child8_apiTest\"\n }\n data = jsonTostr(data)\n re = sceneDecide(self.url, data)\n print('新建应用子账号接口入参:=================>\\n', data)\n print('新建应用子账号接口响应:=================>\\n', re)\n # 断言响应消息\n expect_message=\"用户不具有此应用的权限\"\n actual_message = re['message']\n self.assertEqual(expect_message, actual_message)\n\n def test_save_child_user_09(self):\n u'子账号已存在,请求失败'\n # 添加子账号\n child_id_list, child_name_list = ChildUserSqlUtil.insert_child_users(1, self.user_id,\n self.company_app_id)\n data = {\n \"appId\": self.appId,\n \"description\": \"\",\n \"loginName\": self.login_name,\n \"name\": child_name_list[0]\n }\n data = jsonTostr(data)\n re = sceneDecide(self.url, data)\n print('新建应用子账号接口入参:=================>\\n', data)\n print('新建应用子账号接口响应:=================>\\n', re)\n # 断言响应消息\n expect_message=\"子账号已存在\"\n actual_message = re['message']\n self.assertEqual(expect_message, actual_message)\n\n def tearDown(self):\n # 删除应用授权\n self.mysql.sql_delete(\"delete from ep_user_company_app where id='%s'\" % self.auth_id)\n # 删除子账号\n self.mysql.sql_delete(\n \"delete from ep_child_user where user_id='%s' and company_app_id=%s\" % (\n self.user_id, self.company_app_id))\n self.mysql.db_sql.close()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"testCases(v1.0)/ProductIntegrationCases(产品融合相关接口)/test_saveChildUser.py","file_name":"test_saveChildUser.py","file_ext":"py","file_size_in_byte":8570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"325817686","text":"from PeopleCounter.TrackingObject.centroidtracker import CentroidTracker\nfrom PeopleCounter.TrackingObject.TrackableObject import TrackableObject\n\nimport cv2\nimport imutils\nimport numpy as np\nimport streamlit as st\nimport tempfile\nimport dlib\ndef hide_streamlit_widgets():\n \"\"\"\n hides widgets that are displayed by streamlit when running\n \"\"\"\n hide_streamlit_style = \"\"\"\n \n \"\"\"\n st.markdown(hide_streamlit_style, unsafe_allow_html=True)\ndef app_count_people():\n\n st.title(\"People Counter: \")\n st.write(\"In this tutorial, I apply object tracking technique to implement a people counter model. The model using a simple \"\n \"technique is centroid tracker that re-assigns the new centroid of the object ID based on the distance in each frame\"\n \"That is followed by correlation tracker algorithm to update the centroid according to the next frame. \"\n )\n st.subheader(\"1. Centroid Tracking: \")\n st.write(\"- __Step 1__: Accept bounding box coordinates and compute centroids:\"\n \"\\n + For every frame of video we need to provide the bounding boxes for the objects in current frame \"\n \"(the object detector can be any model you like, HOGs, FasterRCNN,...), then basing on these bounding boxes, we compute centroids for each.\"\n \"\\n + At the initial frame, besides feeding the bounding boxes to the tracker, we need to assign them a particular identifier for each to perform tracking later.\"\n \"\\n- __Step2__: Re-identification:\"\n \"\\n + In the current frame, we still need to provide bounding boxes, and the according new centroids to \"\n \"the tracker. However, one thing different is that we need to re-assign the id for each object again \"\n \"(which is actually the main purpose of any tracking algorithms). \"\n \"\\n + The ideal is we compute the euclidean distances between the centroid of current centroids and \"\n \"those in the previous frame. The Id will be assigned to the closest one.\"\n \"\\n + Finally, we update (x,y) coordinates of existing objects; however, we may face a problem that \"\n \"the new object will appear like in the fig2. May be it can be a false positive but that depends on your detectors. \"\n \"In this case, the tracker will assign new ID for this new centroids (represents to the new objects appears)\"\n \"\\n- __Step3__: How about when an object disappears, we need to deregister old objects.\"\n \"\\n + we will deregister the old object when they can not match any existing objects for total N subsequence frames\")\n st.subheader(\"2. Correlation Tracker: \")\n st.write(\"- The algorithm is from the paper (http://www.bmva.org/bmvc/2014/files/paper038.pdf) of Danelljian et al.’s \"\n \"with approach for robust scale estimation in a tracking-by-detection framework. \"\n \"The proposed approach works by learning discriminative correlation filters based on scale pyramids\"\n \"\\n- __Performance__: \"\n \"\\n + The tracker achieves a quite high speed of 40 fps \"\n \"\\n + We cannot report if the tracker fails in tracking right object \"\n \"\\n + The tracker will totally depend on the performance of your object detection. \"\n \"\\n + Working well when overlap\"\n \"\\n - However, in the case the tracker loses the object. We can use the detector again to reidentify objects.\")\n\n MODEL_URL = \"/home/tuyen/ObjectTracking-Tutorials/PeopleCounter/mobilenet_ssd/MobileNetSSD_deploy.caffemodel\"\n PROTXT_URL = \"/home/tuyen/ObjectTracking-Tutorials/PeopleCounter/mobilenet_ssd/MobileNetSSD_deploy.prototxt\"\n\n net = cv2.dnn.readNetFromCaffe(PROTXT_URL, MODEL_URL)\n\n CLASSES = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\n \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\",\n \"sofa\", \"train\", \"tvmonitor\"]\n hide_streamlit_widgets()\n conf = st.sidebar.slider(\"Confidence\", min_value=0.0, max_value=1.0, step=0.1)\n skipframe = st.sidebar.slider(\"The number of frame to rerun detector\", min_value=0, max_value=30, step=5)\n maxDistance = st.sidebar.slider(\"Max Distance to assign object in subsequence frame\", min_value=30, max_value=70, step=10)\n maxDisappear = st.sidebar.slider(\"Max Disappear frames of objects to be removed\", min_value=30, max_value=70, step=10)\n totalFrame = 0\n totalUp = 0\n totalDown = 0\n trackers = []\n status = \"\"\n ct = CentroidTracker(maxDistance, maxDisappear)\n trackable_object_dict = {}\n start_button = st.empty()\n stop_button = st.empty()\n total_Ups = st.empty()\n total_Downs = st.empty()\n status_space = st.empty()\n stframe = st.empty()\n f = st.file_uploader(\"Upload your video\", type=['mp4', \"avi\"])\n if f is not None:\n tfile = tempfile.NamedTemporaryFile(delete=True)\n tfile.write(f.read())\n vs = cv2.VideoCapture(tfile.name)\n\n _start = start_button.button(\"Start\")\n\n if _start:\n _stop = stop_button.button(\"Stop\")\n while True:\n grabbed, frame = vs.read()\n if frame is None:\n st.write(\"No more frame is detected. Exitting!!!\")\n break\n if _stop:\n break\n frame = imutils.resize(frame, width=500)\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n H, W = frame.shape[:2]\n rects = []\n if totalFrame % skipframe == 0:\n status = \"Detecting\"\n trackers = []\n blob = cv2.dnn.blobFromImage(frame, 1/127.5, (W, H), 127.5)\n net.setInput(blob)\n detections = net.forward()\n\n for i in range(detections.shape[2]):\n confidence = detections[0,0,i,2]\n if confidence >= conf:\n class_idx = int(detections[0,0,i,1])\n\n if CLASSES[class_idx] == \"person\":\n box = detections[0,0,i,3:7] * np.array([W, H, W, H])\n (x0, y0, x1, y1) = box.astype(\"int\")\n\n tracker = dlib.correlation_tracker()\n rect = dlib.rectangle(x0,y0, x1, y1)\n tracker.start_track(rgb, rect)\n trackers.append(tracker)\n\n\n else:\n status = \"Waiting\"\n\n for tracker in trackers:\n status = \"Tracking\"\n tracker.update(rgb)\n pos = tracker.get_position()\n\n startX = int(pos.left())\n startY = int(pos.top())\n endX = int(pos.right())\n endY = int(pos.bottom())\n\n rects.append([startX, startY, endX, endY])\n\n objects = ct.update(rects)\n\n for objectID, centroid in objects.items():\n to = trackable_object_dict.get(objectID, None)\n if to is None:\n to = TrackableObject(objectID, centroid)\n else:\n if not to.counted:\n if len(to.centroids) >= 5:\n y = [c[1] for c in to.centroids]\n\n direction = centroid[1] - np.mean(y)\n\n if direction > 0 and centroid[1] > H//2:\n totalDown += 1\n to.counted = True\n if direction < 0 and centroid[1] < H//2:\n totalUp += 1\n to.counted = True\n to.centroids.append(centroid)\n trackable_object_dict[objectID] = to\n cv2.circle(frame, (centroid[0], centroid[1]), radius=3, color=(0,255,0), thickness=-1)\n cv2.putText(frame, \"ID {}\".format(objectID), (centroid[0], centroid[1]-10), cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0,255,0),2, True)\n frm = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n stframe.image(frm)\n total_Ups.markdown(\"Total Ups: {}\".format(totalUp))\n total_Downs.markdown(\"Total Downs: {}\".format(totalDown))\n status_space.markdown(\"Status: {}\".format(status))\n totalFrame += 1\n vs.release()\n\n\n\n\n\n\n","sub_path":"PeopleCounter/app_count_people.py","file_name":"app_count_people.py","file_ext":"py","file_size_in_byte":8929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"77290894","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('aristotle_dse', '0010_datasource_specification'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='datasource',\n name='frequency',\n field=models.CharField(max_length=512, null=True, blank=True),\n ),\n migrations.AlterField(\n model_name='datasource',\n name='specification',\n field=models.ForeignKey(blank=True, to='aristotle_dse.DataSetSpecification', help_text='The dataset specification to which this data source conforms', null=True),\n ),\n ]\n","sub_path":"aristotle_dse/migrations/0011_auto_20160726_2015.py","file_name":"0011_auto_20160726_2015.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"563044082","text":"#!/bin/python3\n\nimport os\n\n# Complete the caesarCipher function below.\ndef caesarCipher(s, k):\n l = []\n for x in s:\n num = ord(x)\n if 97 <= num <= 122:\n l.append(chr(((num-97+k)%26)+97))\n elif 65 <= num <= 90:\n l.append(chr(((num-65+k)%26)+65))\n else:\n l.append(x)\n return ''.join(l)\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n n = int(input())\n s = input()\n k = int(input())\n result = caesarCipher(s, k)\n fptr.write(result + '\\n')\n fptr.close()","sub_path":"HackerRank/ProblemSolving/Algorithms/Strings/PythonSolutions/CaesarCipher.py","file_name":"CaesarCipher.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"567970269","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nsyslog2irc.processor\n~~~~~~~~~~~~~~~~~~~~\n\n:Copyright: 2007-2015 Jochen Kupperschmidt\n:License: MIT, see LICENSE for details.\n\"\"\"\n\nfrom .runner import Runner\nfrom .signals import irc_channel_joined, message_approved, message_received, \\\n shutdown_requested, syslog_message_received\nfrom .syslog import format_message as format_syslog_message\nfrom .util import log\n\n\nclass Processor(Runner):\n\n def __init__(self, router):\n super(Processor, self).__init__()\n self.router = router\n\n def connect_to_signals(self):\n irc_channel_joined.connect(self.router.enable_channel)\n shutdown_requested.connect(self.request_shutdown)\n syslog_message_received.connect(self.handle_syslog_message)\n message_received.connect(self.handle_message)\n\n def handle_syslog_message(self, port, source_address=None,\n message=None):\n \"\"\"Process an incoming syslog message.\"\"\"\n channel_names = self.router.get_channel_names_for_port(port)\n\n formatted_source = '{0[0]}:{0[1]:d}'.format(source_address)\n formatted_message = format_syslog_message(message)\n text = '{} {}'.format(formatted_source, formatted_message)\n\n message_received.send(channel_names=channel_names,\n text=text,\n source_address=source_address)\n\n def handle_message(self, sender, channel_names=None, text=None,\n source_address=None):\n \"\"\"Process an incoming message.\"\"\"\n for channel_name in channel_names:\n if self.router.is_channel_enabled(channel_name):\n message_approved.send(channel_name=channel_name,\n text=text)\n","sub_path":"syslog2irc/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"425691994","text":"#imported opencv cause they have the facial REC feature.\r\nimport cv2\r\n\r\ndef face_rec():\r\n cap = cv2.VideoCapture(0)\r\n\r\n # Create the haar cascade\r\n #######################################################################################################\r\n # This came from Github!!!\r\n ########################################################################################################\r\n #This file has data based on the facial rec stuff.\r\n faceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\r\n prev_num = 0\r\n while(True):\r\n # Capture frame-by-frame\r\n ret, frame = cap.read()\r\n\r\n # Our operations on the frame come here\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n\r\n # Detect faces in the image\r\n faces = faceCascade.detectMultiScale(\r\n gray,\r\n scaleFactor=1.1,\r\n minNeighbors=5,\r\n minSize=(30, 30)\r\n # flags = cv2.CV_HAAR_SCALE_IMAGE\r\n )\r\n\r\n num_faces = len(faces)\r\n print(\"Found {0} faces!\".format(num_faces))\r\n\r\n if num_faces != prev_num:\r\n print(\"New Face!\")\r\n\r\n # Draw a rectangle around the faces\r\n for (x, y, w, h) in faces:\r\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\r\n\r\n\r\n prev_num = num_faces\r\n # Display the resulting frame\r\n cv2.imshow('frame', frame)\r\n # Display quit\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n # When everything done, release the capture\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n# Tiding it together with the others\r\nif __name__ == '__main__':\r\n face_rec()\r\n","sub_path":"Facial_Rec.py","file_name":"Facial_Rec.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"613341731","text":"from unittest import mock\nfrom collections import namedtuple, OrderedDict, Counter\nfrom datetime import datetime\n\n\nimport pandas\nfrom pandas import Timestamp\n\n\nimport schedule\nfrom schedule import make_week\nfrom chore import Room, FREE\n\nclass BadHistory:\n as_dict = {'Hannes':\n {Timestamp('2019-01-07 00:00:00'): Room(name='Kitchen', priority=1, deleted=False, fake=False),\n Timestamp('2019-01-14 00:00:00'): Room(name='Cellar', priority=3, deleted=False, fake=False),\n Timestamp('2019-01-21 00:00:00'): Room(name='Porch', priority=2, deleted=False, fake=False),\n Timestamp('2019-01-28 00:00:00'): Room(name='Living', priority=1, deleted=False, fake=False)},\n 'Jannes':\n {Timestamp('2019-01-07 00:00:00'): Room(name='Living', priority=1, deleted=False, fake=False),\n Timestamp('2019-01-14 00:00:00'): Room(name='Kitchen', priority=1, deleted=False, fake=False),\n Timestamp('2019-01-21 00:00:00'): Room(name='Cellar', priority=3, deleted=False, fake=False),\n Timestamp('2019-01-28 00:00:00'): Room(name='Porch', priority=2, deleted=False, fake=False)},\n 'Sanne':\n {Timestamp('2019-01-07 00:00:00'): Room(name='Porch', priority=2, deleted=False, fake=False),\n Timestamp('2019-01-14 00:00:00'): Room(name='Living', priority=1, deleted=False, fake=False),\n Timestamp('2019-01-21 00:00:00'): Room(name='Kitchen', priority=1, deleted=False, fake=False),\n Timestamp('2019-01-28 00:00:00'): Room(name='Cellar', priority=3, deleted=False, fake=False)},\n 'Wannes':\n {Timestamp('2019-01-07 00:00:00'): Room(name='Cellar', priority=3, deleted=False, fake=False),\n Timestamp('2019-01-14 00:00:00'): Room(name='Porch', priority=2, deleted=False, fake=False),\n Timestamp('2019-01-21 00:00:00'): Room(name='Living', priority=1, deleted=False, fake=False),\n Timestamp('2019-01-28 00:00:00'): Room(name='Kitchen', priority=1, deleted=False, fake=False)}}\n\n\n history = pandas.DataFrame.from_dict(as_dict)\n\n rooms = [Room(name='Porch', priority=2, deleted=False, fake=False),\n Room(name='Kitchen', priority=1, deleted=False, fake=False),\n Room(name='Living', priority=1, deleted=False, fake=False),\n Room(name='Cellar', priority=3, deleted=False, fake=False)]\n names = ['Hannes', 'Jannes', 'Sanne', 'Wannes']\n\n\nrooms = [Room(name='Keuken', priority=1, deleted=False, fake=False),\n Room(name='Kelder', priority=3, deleted=False, fake=False),\n Room(name='Kleedkamer', priority=2, deleted=False, fake=False),\n Room(name='Kamertje', priority=2, deleted=False, fake=False)]\nnames = ['Hannes', 'Jannes', 'Sanne', 'Wannes']\nhistory = pandas.DataFrame.from_records(\n [(rooms[0], rooms[1], rooms[2], rooms[3])], columns=names)\ndate = datetime(1990, 4, 20, 0, 0)\ndate_interval = 'W-MON'\nempty_genesis = pandas.DataFrame()\n\n\n@mock.patch('random.shuffle', sorted)\ndef test_make_week_simple():\n # Set up test:\n names = ['Han', 'Jan', 'San']\n Chore = namedtuple('Chore', 'name priority fake')\n A = Chore(name='Attic', priority=3, fake=False)\n P = Chore(name='Porch', priority=2, fake=False)\n S = Chore(name='Study', priority=1, fake=False)\n history = pandas.DataFrame.from_records([(P, FREE, S), (FREE, S, FREE)], columns=names)\n # Run test:\n run_make_week = make_week(names, [P, A, S], history).items()\n expected = [('Han', Chore(name='Study', priority=1, fake=False)),\n ('Jan', Chore(name='Porch', priority=2, fake=False)),\n ('San', Chore(name='Attic', priority=3, fake=False))]\n assert sorted(run_make_week) == expected\n\n\n@mock.patch('random.shuffle', sorted)\ndef test_make_week_fair_distribution_empty_genesis():\n count = 12\n history = schedule.make_schedule(names,\n rooms,\n count,\n date,\n date_interval,\n pandas.DataFrame(columns=names))\n for name in names:\n counted = Counter(history[name])\n for room in rooms:\n if room.priority == 1:\n assert counted[room] == 3\n elif room.priority == 2:\n assert counted[room] in [1, 2]\n elif room.priority == 3:\n assert counted[room] in [1, 2]\n elif room.name == FREE.name:\n assert counted[room] in [4, 5]\n\n\n@mock.patch('random.shuffle', sorted)\ndef test_make_week_fair_distribution_bad_history():\n history = schedule.make_schedule(BadHistory.names,\n BadHistory.rooms,\n 8,\n date,\n date_interval,\n BadHistory.history)\n for name in BadHistory.names:\n counted = Counter(history[name])\n for room in BadHistory.rooms:\n if room.priority == 1:\n assert counted[room] in [2, 3, 4]\n elif room.priority == 2:\n assert counted[room] in [1, 2, 3]\n elif room.priority == 3:\n assert counted[room] in [1, 2]\n elif room.name == FREE.name:\n assert counted[room] in [4, 5]\n\n\ndef test_make_week_random_shuffle_works():\n run_test_list = [make_week(names, rooms, history) for _ in range(20)]\n assert any(True if run_test_list[0] != other else False\n for other in run_test_list[1:])\n","sub_path":"tests/test_make_week.py","file_name":"test_make_week.py","file_ext":"py","file_size_in_byte":5782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"219222329","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: mcxiaoke\n# @Date: 2017-05-26 20:30:00\n\nfrom __future__ import print_function\nimport os\nimport sys\nimport shutil\nimport re\n\nPY2 = sys.version_info.major == 2\nPY3 = sys.version_info.major == 3\n\n# compat 2.x and 3.x\n\ntry:\n input = raw_input\nexcept Exception:\n pass\n\nos_encoding = sys.getfilesystemencoding()\nos_win = sys.platform.startswith('win')\n\nprint(sys.version_info)\nprint(sys.platform, sys.stdin.encoding, sys.stdout.encoding, os_encoding)\n\n# reload(sys)\n# if os_win:\n# sys.setdefaultencoding('utf-8')\n\n\ndef log(s):\n if os_win and PY2 and type(s) == str:\n print(s.decode('utf-8'))\n else:\n print(s)\n\n'''\nmatch all these names:\n\nDSC_20170516_213125.jpg\nIMG_20170526_213125.jpg\n20170526_213125.jpg\n20160301_114104303_iOS.jpg\n2015-11-23 001126.jpg\n20170428_132844_005.jpg\n20160301_013717000_iOS.png\n'''\n\nIMG_NAME_PATTERN = r'(?:[a-zA-Z]{1,4})?_?(20\\d{2})[-_/]?(\\d{2})[-_/]?(\\d{2}).*\\.(\\w{3})'\n# 4(year)+2(month)+2(day)+4(ext) = 12\nIMG_NAME_MIN_LEN = 12\nIMG_FILE_MIN_SIZE = 10*1024\n\n\ndef bad_filename(s):\n return repr(s)[1:-1]\n\n\ndef fix_print(s):\n try:\n print(s)\n except UnicodeEncodeError:\n print(bad_filename(s))\n\n\ndef backup(source, destination, dry_run=False):\n ip = re.compile(IMG_NAME_PATTERN, re.I)\n log(u'Process: {}'.format(source))\n if not os.path.isdir(source):\n log(u'Not Directory: {}'.format(source))\n return\n for name in os.listdir(source):\n current = os.path.join(source, name)\n if name[0] in '._~':\n log(u'Invalid: {}'.format(current))\n continue\n if os.path.isfile(current):\n # log(u'Process file: {}'.format(current))\n if len(name) < IMG_NAME_MIN_LEN:\n log(u'Invalid:', current)\n continue\n pic_size = os.path.getsize(current)\n if pic_size < IMG_FILE_MIN_SIZE:\n log(u'Invalid: {}'.format(current))\n # os.remove(current)\n continue\n m = ip.match(name)\n if m:\n # year, month, day, ext\n # print(m.group(1), m.group(2), m.group(3))\n src = current\n output = os.path.join(destination, m.group(1), m.group(2))\n if not os.path.exists(output) and not dry_run:\n os.makedirs(output)\n dst = os.path.join(output, name)\n if not os.path.exists(dst):\n log(u'Copy: {} -> {}'.format(src, dst))\n if not dry_run:\n shutil.copy2(src, dst)\n else:\n log(u'Exist: {}'.format(dst))\n else:\n log(u'Not Matched: {}'.format(name))\n elif os.path.isdir(current):\n backup(current, destination, dry_run)\n else:\n log(u'Invalid: {}'.format(current))\n\nif __name__ == '__main__':\n print(sys.argv)\n if len(sys.argv) < 3:\n print(u'Usage: {} source_dir destination_dir -n'.format(sys.argv[0]))\n sys.exit(1)\n src = os.path.abspath(sys.argv[1])\n dst = os.path.abspath(sys.argv[2])\n print(os.path.isdir(sys.argv[1]), os.path.isdir(sys.argv[2]))\n if PY2:\n src = src.decode(os_encoding)\n dst = dst.decode(os_encoding)\n log(u'SRC: {}'.format(src))\n log(u'DST: {}'.format(dst))\n dry_run = False\n if len(sys.argv) == 4 and sys.argv[3] == '-n':\n dry_run = True\n log(u\"Mode: dry run mode, no files will be copied.\")\n # else:\n # msg = \"Are you sure to process files [y/n]? \"\n # if input(msg).lower() not in ('y', 'yes'):\n # log('Cancelled.')\n # sys.exit(2)\n backup(src, dst, dry_run)\n","sub_path":"labs/backup_photos_by_date.py","file_name":"backup_photos_by_date.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"86823983","text":"from OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\nimport datetime\nimport math\n\n\nclass Texture(object):\n\n def __init__(self, enable=False):\n self.__pixel_number = 800\n self.__divider = self.__pixel_number / 16\n self.__color_1 = [1, 0, 0]\n self.__color_2 = [0, 0, 1]\n self.__i_buffer = [None] * self.__pixel_number\n for i in range(0, len(self.__i_buffer)):\n if 1 <= i / self.__divider <= 3 or 5 <= i / self.__divider <= 7 \\\n or 9 <= i / self.__divider <= 11 or 13 <= i / self.__divider <= 15:\n self.__i_buffer[i] = [self.__color_2] * self.__pixel_number\n else:\n self.__i_buffer[i] = [self.__color_1] * self.__pixel_number\n self.enable = enable\n\n def __draw(self):\n print('Rendering ...')\n start = datetime.datetime.now()\n glClear(GL_COLOR_BUFFER_BIT)\n glBegin(GL_POINTS)\n for i in range(0, len(self.__i_buffer)):\n for j in range(0, len(self.__i_buffer[i])):\n glColor3fv(self.__i_buffer[i][j])\n glVertex2i(j, i)\n glEnd()\n glFlush()\n render_cost = datetime.datetime.now() - start\n print('Finish. (cost = ' + str(render_cost) + ')\\n')\n\n def show(self):\n glutInit()\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGBA)\n glutInitWindowSize(self.__pixel_number, self.__pixel_number)\n glutCreateWindow('')\n gluOrtho2D(0, self.__pixel_number, 0, self.__pixel_number)\n glutDisplayFunc(self.__draw)\n glutMainLoop()\n\n def calculate(self, mr4c1):\n point = mr4c1\n x = float(point[0][0])\n y = float(point[1][0])\n z = float(point[2][0])\n if x > 1:\n x = 1\n elif x < -1:\n x = -1\n if y > 1:\n y = 1\n elif y < -1:\n y = -1\n try:\n if z >= 0:\n z = math.sqrt(1 - x ** 2 - y ** 2)\n else:\n z = -math.sqrt(1 - x ** 2 - y ** 2)\n except ValueError:\n return self.__color_1\n theta = math.acos(z)\n v = 16 * (theta / math.pi)\n if 1 <= v <= 3 or 5 <= v <= 7 or 9 <= v <= 11 or 13 <= v <= 15:\n return self.__color_2\n return self.__color_1\n\n\nif __name__ == '__main__':\n texture = Texture()\n texture.show()\n","sub_path":"lab4/texture.py","file_name":"texture.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"467567827","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport Artus.Utility.logger as logger\nlog = logging.getLogger(__name__)\n\nimport array\nimport collections\nimport copy\nimport numpy\nimport pprint\nimport ROOT\n\nimport Artus.HarryPlotter.plot1d as plot1d\nimport Artus.HarryPlotter.tools.utils as utils\nimport Artus.HarryPlotter.tools.labels as labels\n\n\nclass CutEfficiency(object):\n\tdef __init__(self, rootHistogram):\n\t\t\"\"\" Constructor taking a 1D ROOT histogram. Caches the efficiencies. \"\"\"\n\t\t\n\t\t# prepare histogram\n\t\tself._histogram = copy.deepcopy(rootHistogram)\n\t\tif self._histogram.Integral() > 0.0:\n\t\t\tself._histogram.Scale(1.0 / self._histogram.Integral())\n\t\tself._histogram.SetBinContent(0, 0.0)\n\t\tself._histogram.SetBinContent(self._histogram.GetNbinsX()+1, 0.0)\n\t\t\n\t\t# calculate efficiencies\n\t\tself._cut_efficiencies = self.get_cut_efficiencies(update=True)\n\t\n\tdef get_cut_efficiencies(self, invertCut=False, scaleFactor=1.0, update=False):\n\t\t\"\"\"\n\t\tCalculate efficiencies for sliding cut thresholds (bin edges)\n\t\t\n\t\tinvertCut = True: Select events above cut\n\t\tinvertCut = False: Select events below cut\n\t\t\n\t\tscaleFactor for scaling the efficiencies\n\t\t\n\t\tupdate = True: calculate the efficiencies\n\t\tupdate = False: use cached efficiencies\n\t\t\"\"\"\n\t\t\n\t\tcut_efficiencies = None\n\t\t\n\t\t# use cache if requested\n\t\tif not update:\n\t\t\tcut_efficiencies = self._cut_efficiencies\n\t\t\n\t\t# determine efficiencies\n\t\tbinContents = numpy.array([self._histogram.GetBinContent(xBin) for xBin in xrange(0, self._histogram.GetNbinsX()+1)])\n\t\tcut_efficiencies = numpy.array([efficiency if efficiency < 1.0 else 1.0 for efficiency in binContents.cumsum()])\n\t\t\n\t\t# invert direction of cut if requested\n\t\tif invertCut:\n\t\t\tcut_efficiencies = (1.0 - cut_efficiencies)\n\t\t\n\t\tif scaleFactor != 1.0:\n\t\t\tcut_efficiencies = (scaleFactor * cut_efficiencies)\n\t\t\n\t\treturn cut_efficiencies\n\t\n\tdef get_cut_values(self):\n\t\t\"\"\" Return cut thresholds (bin edges). \"\"\"\n\t\treturn [self._histogram.GetBinLowEdge(xBin) for xBin in xrange(1, self._histogram.GetNbinsX()+2)]\n\n\ndef _perpareUserargsForScaleFactors(plotdict):\n\t\"\"\" Prepare userargs to be treated as scale factors. \"\"\"\n\tif plotdict[\"userargs\"] == None:\n\t\tplotdict[\"userargs\"] = 1.0\n\tif isinstance(plotdict[\"userargs\"], basestring) or not isinstance(plotdict[\"userargs\"], collections.Iterable):\n\t\tplotdict[\"userargs\"] = [plotdict[\"userargs\"]]\n\tif len(plotdict[\"userargs\"]) == 1:\n\t\tplotdict[\"userargs\"] = len(plotdict[\"roothistos\"]) * plotdict[\"userargs\"]\n\tplotdict[\"userargs\"] = [float(arg) for arg in plotdict[\"userargs\"]]\n\n\ndef cutEffPlot(plotdict, invertCut=False, yName=\"Efficiency\"):\n\t\"\"\" Plot cut efficiencies for all 1D histograms.\n\t --userargs can be used for scaling the efficiencies\n\t\"\"\"\n\t\n\tplot1d.get_root_histos(plotdict)\n\t_perpareUserargsForScaleFactors(plotdict)\n\t\n\t# replace ROOT histograms by efficiency graphs\n\tfor index, (roothisto, scaleFactor) in enumerate(zip(plotdict[\"roothistos\"], plotdict[\"userargs\"])):\n\t\tcutEfficiency = CutEfficiency(roothisto)\n\t\tefficiencyGraph = ROOT.TGraph(roothisto.GetNbinsX()+1,\n\t\t array.array(\"d\", cutEfficiency.get_cut_values()),\n\t\t array.array(\"d\", cutEfficiency.get_cut_efficiencies(invertCut=invertCut,\n\t\t scaleFactor=scaleFactor)))\n\t\tplotdict[\"roothistos\"][index] = efficiencyGraph\n\t\n\tplotdict[\"yname\"] = yName\n\tplot1d.get_mpl_histos(plotdict)\n\tplot1d.plot1d_mpl(plotdict)\n\tutils.setaxislimits(plotdict)\n\tlabels.add_labels(plotdict)\n\tutils.save(plotdict)\n\ndef invCutEffPlot(plotdict):\n\t\"\"\" Plot inverted cut efficiencies for all 1D histograms.\n\t --userargs can be used for scaling the efficiencies\n\t\"\"\"\n\tcutEffPlot(plotdict, invertCut=True, yName=\"1 - Efficiency\")\n\n\ndef bkgRejVsSigEffPlot(plotdict, invertCutX=False, invertCutY=True,\n xName=\"Signal Efficiency\", yName=\"Background Rejection\"):\n\t\"\"\" Plot background rejection vs. signal efficiency for 1D histograms.\n\t Pairs of signal and background histograms are specified by --groups (keywords are sig and bkg)\n\t i.e. --groups sig_1 bgk_1 sig2 bkg2 provides 2 graphs in case four histograms are read in\n\t --userargs can be used for scaling the efficiencies\n\t\"\"\"\n\t\n\tplot1d.get_root_histos(plotdict)\n\t_perpareUserargsForScaleFactors(plotdict)\n\t\n\t# determine indices for multiple plots\n\tplots = {}\n\tfor index, group in enumerate(plotdict[\"groups\"]):\n\t\tplotLabel = group.lower().replace(\"sig\", \"\").replace(\"bkg\", \"\")\n\t\tif \"sig\" in group.lower():\n\t\t\tplots.setdefault(plotLabel, {})[\"sig\"] = index\n\t\telif \"bkg\" in group.lower():\n\t\t\tplots.setdefault(plotLabel, {})[\"bkg\"] = index\n\t\n\t# create efficiency graph\n\tfor plotLabel, plotIndices in plots.items():\n\t\tsigCutEfficiency = CutEfficiency(plotdict[\"roothistos\"][plotIndices[\"sig\"]])\n\t\tbkgCutEfficiency = CutEfficiency(plotdict[\"roothistos\"][plotIndices[\"bkg\"]])\n\t\t\n\t\tscaleFactorX = plotdict[\"userargs\"][plotIndices[\"sig\"]]\n\t\tscaleFactorY = plotdict[\"userargs\"][plotIndices[\"bkg\"]]\n\t\t\n\t\tefficiencyGraph = ROOT.TGraph(plotdict[\"roothistos\"][plotIndices[\"sig\"]].GetNbinsX()+1,\n\t\t array.array(\"d\", sigCutEfficiency.get_cut_efficiencies(invertCut=invertCutX,\n\t\t scaleFactor=scaleFactorX)),\n\t\t array.array(\"d\", bkgCutEfficiency.get_cut_efficiencies(invertCut=invertCutY,\n\t\t scaleFactor=scaleFactorY)))\n\t\t\n\t\tplotdict[\"roothistos\"][plotIndices[\"sig\"]] = efficiencyGraph\n\t\n\t# modify plotdict\n\tutils.removeplots(plotdict, [plots[plotLabel][\"bkg\"] for plotLabel in plots])\n\t\n\tplotdict[\"xname\"] = xName\n\tplotdict[\"yname\"] = yName\n\tplotdict[\"xlims\"] = [0.0, 1.0] # TODO\n\tplot1d.get_mpl_histos(plotdict)\n\tplot1d.plot1d_mpl(plotdict)\n\tutils.setaxislimits(plotdict)\n\tlabels.add_labels(plotdict)\n\tutils.save(plotdict)\n\ndef bkgRejVsSigRejPlot(plotdict, invertCutX=True, invertCutY=True,\n xName=\"Signal Rejection\", yName=\"Background Rejection\"):\n\t\"\"\" Plot background rejection vs. signal rejection for 1D histograms.\n\t Pairs of signal and background histograms are specified by --groups (keywords are sig and bkg)\n\t i.e. --groups sig_1 bgk_1 sig2 bkg2 provides 2 graphs in case four histograms are read in\n\t --userargs can be used for scaling the efficiencies\n\t\"\"\"\n\tbkgRejVsSigEffPlot(plotdict, invertCutX=invertCutX, invertCutY=invertCutY,\n xName=xName, yName=yName)\n\ndef fakeRateVsSigRejPlot(plotdict, invertCutX=True, invertCutY=False,\n xName=\"Signal Rejection\", yName=\"Fake Rate\"):\n\t\"\"\" Plot fake rate vs. signal rejection for 1D histograms.\n\t Pairs of signal and background histograms are specified by --groups (keywords are sig and bkg)\n\t i.e. --groups sig_1 bgk_1 sig2 bkg2 provides 2 graphs in case four histograms are read in\n\t --userargs can be used for scaling the efficiencies\n\t\"\"\"\n\tbkgRejVsSigEffPlot(plotdict, invertCutX=invertCutX, invertCutY=invertCutY,\n xName=xName, yName=yName)\n\ndef fakeRateVsSigEffPlot(plotdict, invertCutX=False, invertCutY=False,\n xName=\"Signal Efficiency\", yName=\"Fake Rate\"):\n\t\"\"\" Plot fake rate vs. signal efficiency for 1D histograms.\n\t Pairs of signal and background histograms are specified by --groups (keywords are sig and bkg)\n\t i.e. --groups sig_1 bgk_1 sig2 bkg2 provides 2 graphs in case four histograms are read in\n\t --userargs can be used for scaling the efficiencies\n\t\"\"\"\n\tbkgRejVsSigEffPlot(plotdict, invertCutX=invertCutX, invertCutY=invertCutY,\n xName=xName, yName=yName)\n\n","sub_path":"HarryPlotter/python/analysis_modules/cutEfficiencyPlots.py","file_name":"cutEfficiencyPlots.py","file_ext":"py","file_size_in_byte":7731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"215761575","text":"import os\nfrom collections import OrderedDict\nfrom os import listdir\nfrom os.path import isfile, join\n\nimport torchvision\nfrom PIL import Image\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nimport torch\nimport numpy as np\nimport cv2\nfrom torch.utils.data import Dataset, DataLoader, random_split, Subset\nimport glob\nimport torch.backends.cudnn as cudnn\nfrom torchvision.utils import save_image, make_grid\nimport torch.nn as nn\nimport torch.optim as optim\nimport time\nimport argparse\nfrom PIL import Image\nimport torch.nn.functional as F\n\nfrom mvtecad_pytorch.dataset import MVTecADDataset\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom datetime import datetime\nimport random\n\nfrom autoenc.ae_model import tmpTransGan, facebook_vit\nfrom trans_discrim.trans_discrim import ViT_Discrim\nfrom trans_discrim.trans_discrim_seperate import ViT_Discrim as ViT_Discrim_Seperate\nfrom trans_discrim.trans_enc import VisionTransformer\nfrom sklearn.metrics import roc_auc_score\n\n\ndef main(args):\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n # device = 'cpu'\n print(\"DEVICE:\", device)\n if args.train == 'yes':\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n # device = 'cpu'\n print(\"DEVICE:\", device)\n\n args.world_size = args.gpus * args.nodes\n args.port = random.randint(49152, 65535)\n print(args.master_addr)\n os.environ['MASTER_ADDR'] = args.master_addr\n os.environ['MASTER_PORT'] = str(args.port)\n mp.spawn(train, nprocs=args.gpus, args=(args,))\n\n\ndef get_discrim(d_net: torch.nn.Module, x_input: torch.Tensor):\n anom_pred, patch_pred = d_net(x_input)\n print(\"SIGMOID ANOM_PRED\", torch.sigmoid(anom_pred))\n # print(\"PATCH_PRED\", patch_pred)\n anom_pred = torch.round(torch.sigmoid(anom_pred))\n #print(\"ROUNDED SIGMOID ANOM_PRED\", anom_pred)\n patch_pred = torch.round(patch_pred)\n # anom_pred = pred.sum(dim=1, keepdim=True).squeeze()\n # #anom_pred[anom_pred > 1] = 1\n return anom_pred, patch_pred\n\n\ndef adv_loss(d_net: torch.nn.Module, x: torch.Tensor, isFake) -> torch.Tensor:\n if isFake is True:\n pred_fake, _ = d_net(x)\n y_fake = torch.ones_like(pred_fake)\n fake_loss = F.binary_cross_entropy_with_logits(pred_fake, y_fake)\n return fake_loss\n else:\n pred_real, _ = d_net(x)\n y_real = torch.zeros_like(pred_real)\n real_loss = F.binary_cross_entropy_with_logits(pred_real, y_real)\n return real_loss\n\n\n# from ALOC- modified\ndef discrim_loss(d_net: torch.nn.Module, x_real: torch.Tensor, x_fake: torch.Tensor) -> torch.Tensor:\n pred_real, _ = d_net(x_real)\n pred_fake, _ = d_net(x_fake.detach()) # new leaf in graph because these values are\n # calculated by the other model.\n # print(\"real prediction\", torch.sigmoid(pred_real))\n # print(\"fake prediction\", torch.sigmoid(pred_fake))\n\n # print(\"PRED REAL OUTPUT: \", pred_real)\n # print(\"PRED FAKE OUTPUT: \", pred_fake)\n\n # pred_real = pred_real.sum(dim=1, keepdim=True).squeeze()\n # pred_real[pred_real > 1] = 1\n # pred_fake = pred_fake.sum(dim=1, keepdim=True).squeeze()\n # pred_fake[pred_fake > 1] = 1\n\n y_real = torch.zeros_like(pred_real)\n y_fake = torch.ones_like(pred_fake) # these are ones because anom = 1, normal = 0\n # print(\"REAL PRED\", pred_real)\n # print(\"FAKE PRED\", pred_fake)\n real_loss = F.binary_cross_entropy_with_logits(pred_real, y_real)\n fake_loss = F.binary_cross_entropy_with_logits(pred_fake, y_fake)\n\n # print(\"REAL LOSS\", real_loss)\n # print(\"FAKE LOSS\", fake_loss)\n\n return real_loss + fake_loss\n\ndef r_loss(d_net, x_real, x_fake, lamb):\n anom_pred, patch_pred = d_net(x_fake)\n y = torch.ones_like(anom_pred)\n\n rec_loss = F.mse_loss(x_fake, x_real)\n gen_loss = F.binary_cross_entropy_with_logits(anom_pred, y) # Generator loss\n\n L_r = gen_loss + lamb * rec_loss\n\n return rec_loss, gen_loss, L_r\n\n\ndef train(gpu, args):\n trainset = torchvision.datasets.MNIST('./mnist/', train=True, download=True,\n transform=torchvision.transforms.Compose([\n transforms.Resize((32, 32)),\n torchvision.transforms.ToTensor()\n ]))\n\n testset = torchvision.datasets.MNIST('./mnist/', train=False, download=True,\n transform=torchvision.transforms.Compose([\n transforms.Resize((32, 32)),\n torchvision.transforms.ToTensor()\n ]))\n\n allSampleSet = trainset + testset\n\n classes = ('0-zero', '1-one', '2-two', '3-three', '4-four', '5-five', '6-six', '7-seven', '8-eight', '9-nine')\n mnist_targets = list(trainset.class_to_idx.values())\n print(\"TARGETS\", mnist_targets)\n train_inliers = [np.where(np.array(trainset.targets) == class_idx)[0]\n for class_idx in trainset.class_to_idx.values()]\n train_outliers = [np.where(np.array(trainset.targets) != class_idx)[0]\n for class_idx in trainset.class_to_idx.values()]\n test_inliers = [np.where(np.array(testset.targets) == class_idx)[0]\n for class_idx in testset.class_to_idx.values()]\n test_outliers = [np.where(np.array(testset.targets) != class_idx)[0]\n for class_idx in testset.class_to_idx.values()]\n\n for i in range(len(classes)):\n test_inliers[i] += len(trainset)\n test_outliers[i] += len(trainset)\n\n # Drop elements\n train_outliers[i] = np.random.choice(train_outliers[i], size=500, replace=False)\n test_outliers[i] = np.random.choice(test_outliers[i], size=500, replace=False)\n\n inliers_zip = zip(train_inliers, test_inliers)\n inliers = [np.concatenate((i, j), dtype=np.int64) for i, j in inliers_zip]\n\n for i in inliers:\n print(\"Inlier size: \", len(i))\n\n outliers_zip = zip(train_outliers, test_outliers)\n outliers = [np.concatenate((i, j), dtype=np.int64) for i, j in outliers_zip]\n\n for i in outliers:\n print(\"Outlier size: \", len(i))\n\n trainloader = [\n DataLoader(\n dataset=Subset(allSampleSet, inds),\n batch_size=20,\n shuffle=True,\n num_workers=2\n ) for inds in inliers]\n\n testloader = [\n DataLoader(\n dataset=Subset(allSampleSet, inds),\n batch_size=50,\n shuffle=True,\n num_workers=2\n ) for inds in outliers]\n\n unified_loaders = list(zip(trainloader, testloader))\n\n for idx, loaders in enumerate(unified_loaders):\n\n # Model\n print('==> Building models..')\n encoder = VisionTransformer(image_size=32, patch_size=4, dim=128, depth=3, heads=4, mlp_dim=64,\n channels=1)\n\n generator = tmpTransGan(depth1=5, depth2=2, depth3=2, initial_size=8, dim=128,\n heads=4, mlp_ratio=4, drop_rate=0.5).get_TransGan()\n discriminator = ViT_Discrim_Seperate(image_size=32, patch_size=4, dim=128,\n depth=3, heads=4, mlp_dim=64, channels=1,\n dim_disc_head=64)\n encoder = encoder.cuda()\n generator = generator.cuda()\n discriminator = discriminator.cuda()\n\n ae_criterion = nn.MSELoss()\n\n enc_optimizer = optim.Adam(encoder.parameters(), lr=0.0002, betas=(0.5, 0.999))\n gen_optimizer = optim.Adam(generator.parameters(), lr=0.0002, betas=(0.5, 0.999))\n disc_optimizer = optim.Adam(discriminator.parameters(), lr=0.0002, betas=(0.5, 0.999))\n\n '''\n if args.resume_enc:\n # Load checkpoint.\n print('==> Resuming encoder from checkpoint..')\n assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'\n checkpoint_encoder = torch.load('./checkpoint/ckpt_enc.pth')\n encoder.load_state_dict(checkpoint_encoder['encoder'])\n decoder.load_state_dict(checkpoint_encoder['decoder'])\n start_epoch = checkpoint_encoder['epoch']\n\n if args.resume_dec:\n # Load checkpoint.\n print('==> Resuming decoder from checkpoint..')\n assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'\n checkpoint_decoder = torch.load('./checkpoint/ckpt_dec.pth')\n encoder.load_state_dict(checkpoint_encoder['encoder'])\n decoder.load_state_dict(checkpoint_encoder['decoder'])\n start_epoch = checkpoint_decoder['epoch']\n '''\n\n print(\"Training inlier class \", classes[idx])\n for epoch in range(args.start_epoch, args.start_epoch + args.epochs):\n print('\\nEpoch: %d' % epoch)\n # net.train()\n running_recon_loss = 0\n running_generator_loss = 0.0\n running_disc_loss = 0.0\n for batch_idx, (inputs, targets) in enumerate(loaders[0]):\n inputs, targets = inputs.cuda(), targets.cuda()\n disc_optimizer.zero_grad()\n #forward pass real examples through network D and calculate loss on real batch\n d_real_loss = adv_loss(discriminator, inputs, False)\n d_real_loss.backward()\n\n # train with all fake batch\n encodings = encoder(inputs)\n recons = generator(encodings[:, 0]) # first element of each sequence of patches is cls embedding\n # Generator ability to trick discriminator\n d_fake_loss = adv_loss(discriminator, recons.detach(), True)\n d_fake_loss.backward()\n disc_optimizer.step()\n\n # -----------------------\n # AE network update\n enc_optimizer.zero_grad()\n gen_optimizer.zero_grad()\n g_fake_loss = adv_loss(discriminator, recons, True)\n g_fake_loss.backward()\n\n enc_optimizer.step()\n gen_optimizer.step()\n\n running_generator_loss = g_fake_loss.mean().item()\n running_disc_loss = d_fake_loss.mean().item() + d_real_loss.mean().item()\n #running_recon_loss += recon_loss.item()\n # _, predicted = outputs.max(1)\n # total += targets.size(0)\n # correct += predicted.eq(targets).sum().item()\n if gpu == 0:\n print(\"Class:\", classes[idx], \"Epoch No. \", epoch, \"Batch Index.\", batch_idx,\n \"_gen_loss_\", running_generator_loss, \"_disc loss_\",\n running_disc_loss)\n if epoch % 2 == 0:\n save_image(make_grid(inputs, nrows=10),\n \"./cifar_imgs/ae_recons/train_input_Inlier: \" + classes[idx] + \"_epoch_\" + str(\n epoch) + \"_\" + str(batch_idx) + \".jpg\")\n save_image(make_grid(recons, nrows=10),\n \"./cifar_imgs/ae_recons/train_recon_Inlier:\" + classes[idx] + \"_epoch_\" + str(\n epoch) + \"_\" + str(batch_idx) + \".jpg\")\n\n if epoch % 2 == 0:\n test(gpu, epoch, loaders[1], idx, encoder, generator, discriminator, ae_criterion, classes,\n mnist_targets)\n\n if gpu == 0:\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n print(\"saving model...\")\n torch.save({\n 'model_state_dict': encoder.state_dict(),\n 'optimizer_state_dict': enc_optimizer.state_dict(),\n }, \"./checkpoint/ckpt_enc_\" + classes[idx] + \".pth\")\n torch.save({\n 'model_state_dict': generator.state_dict(),\n 'optimizer_state_dict': gen_optimizer.state_dict(),\n }, \"./checkpoint/ckpt_gen_\" + classes[idx] + \".pth\")\n torch.save({\n 'model_state_dict': discriminator.state_dict(),\n 'optimizer_state_dict': disc_optimizer.state_dict(),\n }, \"./checkpoint/ckpt_disc_\" + classes[idx] + \".pth\")\n print(\"Save complete.\")\n\n\ndef test(gpu, epoch, testloader, loader_idx, encoder, generator, discriminator, ae_criterion, classes, mnist_targets):\n with torch.no_grad():\n print(\"TESTING\")\n global best_acc\n # net.eval()\n test_loss = 0\n correct = 0\n total = 0\n # with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n inputs, targets = inputs.cuda(), targets.cuda()\n targets[targets != mnist_targets[loader_idx]] = 1\n encodings = encoder(inputs)\n recons = generator(encodings[:,\n 0]) # first element of each sequence of patches is cls embedding. Only use this to generate reconstruction\n normal_pred, anom_patch_pred = get_discrim(discriminator, inputs)\n print(\"TEST IMAGE PREDICTIONS\", normal_pred.squeeze())\n\n recon_pred, recon_patch_pred = get_discrim(discriminator, recons)\n print(\"TEST RECON IMAGE PREDICTIONS\", recon_pred.squeeze())\n print(\"TARGETS\", targets)\n\n #auc = roc_auc_score(y_true=targets.cpu(), y_score=normal_pred.cpu())\n\n #print(\"TEST AUC\", auc)\n\n\n cpu_inp = inputs.cpu()\n cpu_recons = recons.cpu()\n cpu_errors = cpu_inp - cpu_recons\n if gpu == 0:\n save_image(make_grid(cpu_inp, nrows=10),\n \"./cifar_imgs/ae_recons/test_input_Inlier: \" + classes[loader_idx] + \"_epoch_\" + str(\n epoch) + \"_\" + str(batch_idx) + \".jpg\")\n save_image(make_grid(cpu_recons, nrows=10),\n \"./cifar_imgs/ae_recons/test_recon_Inlier:\" + classes[loader_idx] + \"_epoch_\" + str(\n epoch) + \"_\" + str(batch_idx) + \".jpg\")\n save_image(make_grid(cpu_errors, nrows=10),\n \"./cifar_imgs/ae_recons/test_err_Inlier:\" + classes[loader_idx] + \"_epoch_\" + str(\n epoch) + \"_\" + str(\n batch_idx) + \".jpg\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-ma', '--master_addr', default='', type=str,\n help='Master Address of node')\n parser.add_argument('-n', '--nodes', default=1, type=int, metavar='N')\n parser.add_argument('-g', '--gpus', default=2, type=int,\n help='number of gpus per node')\n parser.add_argument('-nr', '--nr', default=0, type=int,\n help='ranking within the nodes')\n parser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='number of total epochs to run')\n parser.add_argument('--epochs', default=100, type=int, metavar='N',\n help='number of total epochs to run')\n parser.add_argument('--resume_enc', '-re', action='store_true',\n help='resume encoder from checkpoint')\n parser.add_argument('--resume_dec', '-rd', action='store_true',\n help='resume decoder from checkpoint')\n parser.add_argument('--train', default='yes', type=str,\n help='Whether to train or to test')\n parser.add_argument('--checkpoint_directory', default='./checkpoint/ae/', type=str,\n help='Which directory the checkpoints are stored in')\n parser.add_argument('--test_cls', default='', type=str,\n help='What class to test')\n parser.add_argument('--train_cls', nargs=\"*\", default=[],\n help='List of classes to test')\n args = parser.parse_args()\n main(args)\n","sub_path":"ddp_nloss_mnist.py","file_name":"ddp_nloss_mnist.py","file_ext":"py","file_size_in_byte":16099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"10040976","text":"import cv2 as cv\nimport numpy as np\n\ncap = cv.VideoCapture('/home/pugdel/Vídeos/aaaa.mp4')\n_, frame1 = cap.read()\nframe1 = np.float_(frame1)\nframe1 = cv.normalize(frame1, 0, 1, norm_type=cv.NORM_MINMAX)\n\n\n_, frame2 = cap.read()\nframe2 = cv.normalize(frame2, 0, 1, norm_type=cv.NORM_MINMAX)\nframe2 = np.float_(frame2)\n_, frame3 = cap.read()\nframe_rgb = frame3\nframe3 = np.float_(frame3)\nframe3 = cv.normalize(frame3, 0, 1, norm_type=cv.NORM_MINMAX)\nBg = frame3\nThr = np.zeros((frame1.shape[0], frame1.shape[1], 3))\nThr = np.float_(Thr)\nThr = Thr + 0.7\nalpha = 0.01\n\nkernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (10, 10))\nkernel1 = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))\nkernel2 = cv.getStructuringElement(cv.MORPH_CROSS, (5, 10))\nkernel3 = cv.getStructuringElement(cv.MORPH_CROSS, (5, 1))\ncont = 0\n\nwhile True:\n D1 = np.abs(frame3 - frame2)\n D2 = np.abs(frame3 - frame1)\n\n D1_B = D1 > Thr\n D1_T = D1_B * frame3\n D2_B = D2 > Thr\n D2_T = D2_B * frame3\n moving = cv.bitwise_and(D1_T, D2_T)\n moving_B = D1_B & D2_B\n moving_B_N = 1 - moving_B\n\n # blob = np.abs(frame3 - Bg)\n # blob_T = blob > Thr\n # blob = blob * blob_T\n # blob = cv.GaussianBlur(blob, (3, 3), 0)\n # blob = cv.normalize(blob, 0, 255, norm_type=cv.NORM_MINMAX)\n # blob = np.int16(blob)\n # blob = cv.cvtColor(blob, cv.COLOR_BGR2GRAY)\n # _, blob = cv.threshold(blob, 0.5, 1, cv.THRESH_BINARY)\n\n DB = np.abs(frame3 - Bg)\n _, DB = cv.threshold(DB, 0.1, 1, cv.THRESH_BINARY)\n MO = cv.bitwise_and(DB, Bg)\n a, b, c = cv.split(MO)\n MO = a + b + c\n _, MO = cv.threshold(MO, 0.5, 1, cv.NORM_MINMAX)\n MO = cv.morphologyEx(MO, cv.MORPH_CLOSE, kernel)\n MO = cv.morphologyEx(MO, cv.MORPH_OPEN, kernel1)\n\n # MO = cv.erode(MO, kernel3, iterations=1)\n\n cv.imshow('MO', MO)\n\n Thr1 = Thr * moving_B\n Thr2 = alpha * Thr * moving_B_N + (1 - alpha) * (5 * np.abs(frame3 - Bg) * moving_B_N)\n Thr = Thr1 + Thr2\n\n Bg1 = Bg * moving_B\n Bg2 = alpha * Bg * moving_B_N + (1 - alpha) * frame3 * moving_B_N\n Bg = Bg1 + Bg2\n MO = cv.dilate(MO, kernel2)\n MO = cv.normalize(MO, 0, 255, norm_type=cv.NORM_MINMAX)\n MO = np.uint8(MO)\n _, coutours, _ = cv.findContours(MO, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n\n hsv = cv.cvtColor(frame_rgb, cv.COLOR_BGR2HSV)\n hsv = np.float_(hsv)\n hsv = cv.normalize(hsv, 0, 1, norm_type=cv.NORM_MINMAX)\n frame_aux = np.float_(frame_rgb)\n frame_aux = cv.normalize(frame_aux, 0, 1, norm_type=cv.NORM_MINMAX)\n B, G, R = cv.split(frame_aux)\n I_ = (B + G + R) / 3\n hsv[:, :, 2] = I_\n H_mean = np.mean(hsv[:, :, 0])\n S_mean = np.mean(hsv[:, :, 1])\n I_mean = np.mean(hsv[:, :, 2])\n bottom = np.array([((H_mean - 0.15)/10) - 0.012, ((S_mean + 0.1)/10) - 0.07, (I_mean*1.12) - 0.06])\n upper = np.array([((H_mean - 0.15)/10) + 0.012, ((S_mean + 0.1)/10) + 0.07, (I_mean*1.12) + 0.06])\n teste = cv.inRange(hsv, bottom, upper)\n cv.imshow('teste', teste)\n for count in coutours:\n x, y, w, h = cv.boundingRect(count)\n\n if 0.231 <= (w / h) <= 0.9:\n cv.rectangle(frame_rgb, (x, y), (x + w, y + h), (0, 0, 255), 2)\n\n print(cont)\n cont += 1\n\n cv.imshow('aaa', frame_rgb)\n frame3 = np.float_(frame3)\n frame3 = cv.normalize(frame3, 0, 1, norm_type=cv.NORM_MINMAX)\n\n cv.imshow('Bg', Bg)\n cv.waitKey()\n frame1 = frame2\n frame2 = frame3\n _, frame3 = cap.read()\n frame_rgb = frame3\n frame3 = np.float_(frame3)\n frame3 = cv.normalize(frame3, 0, 1, norm_type=cv.NORM_MINMAX)\n\n\n\n\n \n","sub_path":"aa.py","file_name":"aa.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"613221662","text":"# ========================================================\n# File Name : practice.py\n# Description : Stack Overflow Survey study\n#\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++\n# Date Who Description\n# 10/18/2019 Kevin Zhang initial creation\n# ========================================================\n\n# LanguageWorkedWith\n\nimport csv\nfrom collections import defaultdict, Counter\n\nwith open('survey_results_public.csv', encoding='utf-8') as f:\n # csv_reader is a generator\n csv_reader = csv.DictReader(f)\n\n dev_type_info = {}\n\n for line in csv_reader:\n dev_types = line['DevType'].split(';')\n\n for dev_type in dev_types:\n dev_type_info.setdefault(dev_type, {\n 'total': 0,\n 'language_counter': Counter()\n })\n\n languages = line['LanguageWorkedWith'].split(';')\n dev_type_info[dev_type]['language_counter'].update(languages)\n dev_type_info[dev_type]['total'] += 1\n\nfor dev_type, info in dev_type_info.items():\n print(dev_type)\n\n for language, value in info['language_counter'].most_common(5):\n language_pct = round((value / info['total']) * 100, 2)\n\n print(f'\\t{language}: {language_pct}%')\n","sub_path":"practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"61763796","text":"import torch\nfrom torchvision import transforms, models\n\n\nPATH = \"weights.pth\"\n\n\ndef evaluate(image):\n \"\"\"\n :Image image: - image to evaluate\n :int return: - 1 if it's a hot dog, else 0\n \"\"\"\n transform = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n image_tensor = transform(image).float()\n image_tensor = image_tensor.unsqueeze(0)\n\n model = models.resnet18(pretrained=True)\n n_features = model.fc.in_features\n model.fc = torch.nn.Linear(n_features, 2)\n\n model.load_state_dict(torch.load(PATH, map_location=torch.device('cpu')))\n model.eval()\n\n _, indices = torch.max(model(image_tensor), 1)\n return int(indices.data)\n","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"195632438","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\nimport rdflib\nfrom itertools import count\n\nfrom .misc import *\n\n\n\n\nclass RDFResource(object):\n \"\"\"\n Pythonic abstraction for any RDF Resource object.\n\n Allows to store an RDF 'minigraph' containing all metadata associcated to a resource,\n and to query the minigraph e.g. for descriptions, types, class info etc..\n \"\"\"\n\n DEFAULT_NAMESPACES = []\n\n # http://stackoverflow.com/questions/8628123/counting-instances-of-a-class\n _ids = count(0)\n\n def __init__(self, uri, minigraph, namespaces={}, format=\"n3\"):\n super(RDFResource, self).__init__()\n self.id = next(self._ids)\n self.uri = rdflib.URIRef(uri)\n self.graph = rdflib.Graph()\n self.graph.parse(data=minigraph, format=format)\n self.namespaces = namespaces\n self.namespaces_sorted = sorted([ [k,v] for k, v in namespaces.items() ])\n for k,v in self.namespaces.items():\n self.graph.bind(k, rdflib.Namespace(v))\n\n self.qname = qname(self.uri, self.namespaces)\n\n\n def getValuesForProperty(self, aPropURIRef):\n \"\"\"\n helper method\n generic way to extract some prop value eg\n In [11]: c.getValuesForProperty(rdflib.RDF.type)\n Out[11]:\n [rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#Class'),\n rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#Class')]\n NOTE: the subject is always implicit\n\n \"\"\"\n return list(self.graph.objects(self.uri, aPropURIRef))\n\n\n\n def is_class(self):\n test = self.getValuesForProperty(rdflib.RDF.type)\n if rdflib.OWL.Class in test or rdflib.RDFS.Class in test:\n return True\n else:\n return False\n\n def is_property(self):\n test = self.getValuesForProperty(rdflib.RDF.type)\n if rdflib.OWL.AnnotationProperty in test or rdflib.OWL.DatatypeProperty in test or rdflib.OWL.ObjectProperty in test or rdflib.RDF.Property in test:\n return True\n else:\n return False\n\n\n def all_types(self):\n test = self.getValuesForProperty(rdflib.RDF.type)\n return [str(x) for x in test]\n\n\n\n def bestLabel(self, prefLanguage=\"en\"):\n \"\"\"\n facility for extrating the best available label for an entity\n\n ..This checks RFDS.label, SKOS.prefLabel and finally the qname local component\n \"\"\"\n\n test = self.getValuesForProperty(rdflib.RDFS.label)\n out = \"\"\n\n if test:\n out = firstStringInList(test)\n else:\n test = self.getValuesForProperty(rdflib.namespace.SKOS.prefLabel)\n if test:\n out = firstStringInList(test)\n else:\n test = self.getValuesForProperty(rdflib.URIRef(\"http://www.springernature.com/scigraph/ontologies/core/title\"))\n if test:\n out = firstStringInList(test)\n\n if not out:\n out = self.qname # default\n return out\n\n\n\n def bestDescription(self, prefLanguage=\"en\"):\n \"\"\"\n facility for extrating the best available description for an entity\n\n ..This checks RFDS.label, SKOS.prefLabel and finally the qname local component\n \"\"\"\n\n test_preds = [rdflib.RDFS.comment, rdflib.namespace.DCTERMS.description, rdflib.namespace.DC.description, rdflib.namespace.SKOS.definition ]\n\n for pred in test_preds:\n test = self.getValuesForProperty(pred)\n if test:\n return firstStringInList(test)\n return \"\"\n\n\n\n def triples(self, unique_nodes=False, unique_predicates=False, unique_subjobj=False):\n \"\"\"\n return all triples in the minigraph having the primary instance URI as subject\n (note: there may be other triples in the minigraph but we're not returning them)\n\n unique_nodes => if True, return a list of the unique nodes for these triples (including literals)\n unique_predicates => returns a list of only the predicates\n unique_subjobj => returns a list of only the subj/obj\n \"\"\"\n if unique_nodes:\n exit = [self.uri]\n for x, y, z in self.graph.triples((self.uri, None, None)):\n exit += [y, z] # NOTE: x is already in the exit list (=> self.uri)\n return list(set(exit))\n elif unique_predicates:\n exit = []\n for x, y, z in self.graph.triples((self.uri, None, None)):\n exit += [y]\n return list(set(exit))\n elif unique_subjobj:\n exit = [self.uri]\n for x, y, z in self.graph.triples((self.uri, None, None)):\n exit += [z] # NOTE: x is already in the exit list (=> self.uri)\n return list(set(exit))\n else:\n return sorted(list(self.graph.triples((self.uri, None, None))))\n\n def triples_no_literals(self, unique_nodes=False, unique_predicates=False, unique_subjobj=False):\n \"\"\"\n return all triples in the minigraph that\n a) have the primary instance URI as subject\n b) only connect two URI references (RDF resources, not Literals)\n\n unique_nodes => if True, return a list of the unique nodes for these triples (excluding literals)\n unique_predicates => returns a list of only the predicates\n unique_subjobj => returns a list of only the subj/obj\n \"\"\"\n if unique_nodes:\n exit = [self.uri]\n for x, y, z in self.graph.triples((self.uri, None, None)):\n if type(z) == rdflib.term.URIRef:\n exit += [y, z]\n return list(set(exit))\n elif unique_predicates:\n exit = []\n for x, y, z in self.graph.triples((self.uri, None, None)):\n if type(z) == rdflib.term.URIRef:\n exit += [y]\n return list(set(exit))\n elif unique_subjobj:\n exit = [self.uri]\n for x, y, z in self.graph.triples((self.uri, None, None)):\n if type(z) == rdflib.term.URIRef:\n exit += [z]\n return list(set(exit))\n else:\n exit = []\n for x, y, z in self.graph.triples((self.uri, None, None)):\n if type(z) == rdflib.term.URIRef:\n exit += [(x, y, z)]\n return sorted(exit)\n\n\n def list_unique_nodes(self):\n \"\"\"\n wrapper for .triples(unique_nodes=True) so that it can be called from templates\n \"\"\"\n return self.triples(unique_nodes=True)\n\n\n def list_unique_uris(self):\n \"\"\"\n wrapper for .triples_no_literals(unique_nodes=True) so that it can be called from templates\n \"\"\"\n return self.triples_no_literals(unique_nodes=True)\n\n\n def list_unique_predicates_uris(self):\n \"\"\"\n wrapper for .triples(unique_predicates=True) so that it can be called from templates\n \"\"\"\n return self.triples_no_literals(unique_predicates=True)\n\n def list_unique_subobj_uris(self):\n \"\"\"\n wrapper for .triples(unique_subjobj=True) so that it can be called from templates\n \"\"\"\n return self.triples_no_literals(unique_subjobj=True)\n","sub_path":"src/libs/rdf_utils/rdf_resource.py","file_name":"rdf_resource.py","file_ext":"py","file_size_in_byte":7228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"384529910","text":"from flask import Flask, render_template, request\nimport sqlite3\nimport time\n\napp = Flask(__name__)\n\nsqlFilename = \"../db/main.db\"\ncmdLogfile = \"history.log\"\n\ndef logCommand(command):\n if cmdLogfile is not None:\n with open(cmdLogfile, \"a\") as f:\n timestamp = time.ctime()\n f.write(\"[{}] {}\\n\".format(timestamp, command))\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n if request.method == \"GET\":\n return render_template(\"index.html\")\n else:\n stmt = request.form.get(\"stmt\")\n logCommand(stmt)\n conn = sqlite3.connect(sqlFilename)\n c = conn.cursor()\n try:\n if stmt.strip().lower().startswith(\"select\"):\n c.execute(\"SELECT * FROM (\" + stmt.replace(\";\", \"\") + \") WHERE 1=0;\")\n columns = [col[0] for col in c.description]\n c.execute(stmt)\n rows = c.fetchall()\n conn.close()\n return render_template(\"index.html\", columns=columns, rows=rows, status=\"Command executed successfully\", command=stmt)\n else:\n c.execute(stmt)\n conn.commit()\n conn.close()\n return render_template(\"index.html\", status=\"Command executed successfully\", command=stmt)\n except Exception as e:\n return render_template(\"index.html\", status=e.args[0], command=stmt)\n","sub_path":"src/server/manageDB/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"372150108","text":"__author__ = 'josip'\n\nfrom django.core.management.base import BaseCommand, make_option, CommandError\nfrom django.utils import timezone\n\n\nclass Command(BaseCommand):\n URL_BASE = 'http://www.hnb.hr/tecajn/f{:0>2}{:0>2}{:0>4}.dat'\n\n help = 'Takes an URL that leads to raw data and tries to parse it.'\n option_list = BaseCommand.option_list + (\n make_option('--today', action='store_true', default=False,\n help='Takes the current date and injects it into the URL. Using'\n ' this option ignores any provided URL argument'),\n )\n\n def handle(self, *args, **options):\n url = ''\n opt_today = options['today']\n if opt_today:\n date = timezone.now()\n url = self.URL_BASE.format(date.day, date.month, date.year)\n\n try:\n if not url:\n url = args[0]\n if get_raw_data(url):\n print(\"Success\")\n else:\n raise CommandError(\"Failed to finish the request\")\n except IndexError:\n raise CommandError(\"URL must be supplied\")\n\n\nfrom urllib import request\nfrom urllib.error import HTTPError\nfrom data_receiver import data_parser\n\n\ndef get_raw_data(url):\n try:\n s = request.urlopen(url).read()\n return data_parser.parse(s.decode())\n except HTTPError:\n return False\n","sub_path":"data_receiver/management/commands/easy_get.py","file_name":"easy_get.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"383837185","text":"# pylint: disable=missing-docstring,redefined-outer-name,protected-access\nimport pytest\nfrom ray.rllib import RolloutWorker\nfrom ray.rllib import SampleBatch\n\n\n@pytest.fixture\ndef worker(envs, env_name, policy_cls):\n return RolloutWorker(\n env_creator=envs[env_name],\n policy=policy_cls,\n policy_config={\"env\": env_name},\n rollout_fragment_length=1,\n batch_mode=\"complete_episodes\",\n )\n\n\ndef test_collect_traj(worker):\n traj = worker.sample()\n assert isinstance(traj, SampleBatch)\n","sub_path":"tests/general/test_worker.py","file_name":"test_worker.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"405647600","text":"#Do Thanh Trung - MaSV: 10020382\n#triangle.py\n\n#import sys\nimport math\ndef detect_triangle(x,y,z):\n if ((type(x) not in [float, int, long])\n or (type(y) not in [float, int, long])\n or (type(z) not in [float, int, long])\n or x<0 or y<0 or z<0\n or (x>2**32-1) or (y>2**32-1) or (z>2**32-1)):\n return \"gia tri khong hop le\"\n else:\n e = 1e-10\n if x==y==z:\n return \"tam giac deu\"\n elif (x+y)>z and (x+z)>y and (y+z)>x:\n if ((x==y and math.fabs(z**2-x**2-y**2) \")\n # sys.exit()\n # Step 1: put input text to the test_data_dir.story\n input_text = sys.argv[1]\n write_test_file(input_text)\n # Step 2: Preprocess test_data_dir.story; output test_data_tokenized + test_finished_files\n os.system(\"python make_data_test.py %s\" % test_data_dir)\n\n # Step 3: Call NLP model\n os.system(\"python run_summarization.py --mode=decode \\\n --data_path=%s --vocab_path=%s --log_root=%s --exp_name=pretrained_model_tf1.2.1 \\\n --max_enc_steps=400 --max_dec_steps=120 \\\n --coverage=1 --single_pass=1\" % (data_path_dir,vocab_path_dir,pretrained_model_dir))\n # Step 4: return .txt, then delete the output folder\n #os.remove(\"mapping.txt\")\n","sub_path":"NLP.py","file_name":"NLP.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"579592203","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport re\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import r2_score\nfrom sklearn.linear_model import LinearRegression \n\n\n# In[157]:\n\n\ndf = pd.read_csv('machine-learning-ex1/ex1/ex1data2.txt', sep=\",\", header=None, names=[\"a\", \"b\",\"c\"])\nx_trainfirst=df[['a','b']]\nx_trainfirst = np.array(x_trainfirst)\ny_train=df['c']\nsize=len(x_trainfirst)\ny_train = np.array(y_train).reshape(-1,1)\nx_trainfirst = np.array(x_trainfirst)\nx_trainfirst=(x_trainfirst-np.mean(x_trainfirst, axis=0))/np.std(x_trainfirst, axis=0)\nx_train=np.hstack((x_trainfirst[:,:0], np.ones((size,1)), x_trainfirst[:,0:]))\n\n\n# In[158]:\n\n\ndef calCostFunction(theta):\n return np.sum((np.sum(x_train*theta,axis=1).reshape(-1,1)-y_train)**2)/(size*2)\n\n\n# In[198]:\n\n\nalpha=0.001\ndef graDescent(theta):\n return theta-alpha*np.sum((np.sum(x_train*theta,axis=1).reshape(-1,1)-y_train)*x_train,axis=0)/size\n\n\n# In[200]:\n\n\ntheta=np.array([0,0,0])\nitter=[]\ncostfunction=[]\nfor i in range(0,500000):\n theta=graDescent(theta)\n itter.append(i)\n costfunction.append(calCostFunction(theta))\n\n\n# In[201]:\n\n\nplt.ylabel('Cost Function')\nplt.xlabel('Iterater')\nplt.plot(np.array(itter),np.array(costfunction),'-')\nplt.rcParams[\"figure.figsize\"] = [16,9]\nplt.show()\n\n\n# In[202]:\n\n\ntheta[0]+theta[1]*1650+theta[2]*3\n\n\n# In[203]:\n\n\nx_train\n\n","sub_path":"Linear_Regression/MultiLinearRegression.py","file_name":"MultiLinearRegression.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"200942093","text":"## Copyright (c) 2017 Robert Bosch GmbH\n## All rights reserved.\n##\n## This source code is licensed under the MIT license found in the\n## LICENSE file in the root directory of this source tree.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\nimport numpy as np\nfrom torch.nn.utils import weight_norm\nimport pickle\nimport sys\nfrom termcolor import colored\n\nfrom modules.hierarchical_embedding import HierarchicalEmbedding\nfrom modules.embeddings import LearnableEmbedding, SineEmbedding\n\n\ndef sqdist(A,B):\n return (A**2).sum(dim=2)[:,:,None] + (B**2).sum(dim=2)[:,None,:] - 2*torch.bmm(A,B.transpose(1,2))\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, d_in, d_out, groups=1, dropout=0.0):\n super().__init__()\n assert d_in % groups == 0, \"Input dimension must be a multiple of groups\"\n assert d_out % groups == 0, \"Output dimension must be a multiple of groups\"\n self.d_in = d_in\n self.d_out = d_out\n self.proj = nn.Sequential(nn.Conv1d(d_in, d_out, kernel_size=1, groups=groups),\n nn.ReLU(inplace=True),\n nn.Dropout(dropout),\n nn.Conv1d(d_out, d_out, kernel_size=1, groups=groups),\n nn.Dropout(dropout))\n if d_in != d_out:\n self.downsample = nn.Conv1d(d_in, d_out, kernel_size=1, groups=groups)\n \n def forward(self, x):\n assert x.size(1) == self.d_in, \"x dimension does not agree with d_in\"\n return x + self.proj(x) if self.d_in == self.d_out else self.downsample(x) + self.proj(x)\n\n \nclass GraphLayer(nn.Module):\n def __init__(self, d_model, d_inner, n_head, d_head, dropout=0.0, attn_dropout=0.0, wnorm=False, use_quad=False, lev=0):\n super().__init__()\n self.d_model = d_model\n self.d_inner = d_inner\n self.n_head = n_head\n self.d_head = d_head\n self.dropout = nn.Dropout(dropout)\n self.attn_dropout = nn.Dropout(attn_dropout)\n self.lev = lev\n self.use_quad = use_quad\n \n # To produce the query-key-value for the self-attention computation\n self.qkv_net = nn.Linear(d_model, 3*d_model)\n self.o_net = nn.Linear(n_head*d_head, d_model, bias=False)\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n \n self.proj1 = nn.Linear(d_model, d_inner)\n self.proj2 = nn.Linear(d_inner, d_model)\n self.gamma = nn.Parameter(torch.ones(4,4)) # For different sub-matrices of D\n self.sqrtd = np.sqrt(d_head)\n \n if wnorm:\n self.wnorm()\n \n def wnorm(self):\n self.qkv_net = weight_norm(self.qkv_net, name=\"weight\")\n self.o_net = weight_norm(self.o_net, name=\"weight\")\n self.proj1 = weight_norm(self.proj1, name=\"weight\")\n self.proj2 = weight_norm(self.proj2, name=\"weight\")\n \n def forward(self, Z, D, new_mask, mask, RA, RB, RT, RQ, store=False):\n # RA = slice(0,N), RB = slice(N,N+M), RT = slice(N+M, N+M+P)\n bsz, n_elem, nhid = Z.size()\n n_head, d_head, d_model = self.n_head, self.d_head, self.d_model\n assert nhid == d_model, \"Hidden dimension of Z does not agree with d_model\"\n \n # Create gamma mask\n gamma_mask = torch.ones_like(D)\n all_slices = [RA, RB, RT, RQ] if self.use_quad else [RA, RB, RT]\n for i, slice_i in enumerate(all_slices):\n for j, slice_j in enumerate(all_slices):\n gamma_mask[:, slice_i, slice_j] = self.gamma[i, j]\n \n # Self-attention\n inp = Z\n Z = self.norm1(Z)\n Z2, Z3, Z4 = self.qkv_net(Z).view(bsz, n_elem, n_head, 3*d_head).chunk(3, dim=3) # \"V, Q, K\"\n W = -(gamma_mask*D)[:,None] + torch.einsum('bnij, bmij->binm', Z3, Z4).type(D.dtype) / self.sqrtd + new_mask[:,None]\n W = self.attn_dropout(F.softmax(W, dim=3).type(mask.dtype) * mask[:,None]) # softmax(-gamma*D + Q^TK)\n if store:\n pickle.dump(W.cpu().detach().numpy(), open(f'analysis/layer_{self.lev}_W.pkl', 'wb'))\n attn_out = torch.einsum('binm,bmij->bnij', W, Z2.type(W.dtype)).contiguous().view(bsz, n_elem, d_model)\n attn_out = self.dropout(self.o_net(F.leaky_relu(attn_out)))\n Z = attn_out + inp\n \n # Position-wise feed-forward\n inp = Z\n Z = self.norm2(Z)\n return self.proj2(self.dropout(F.relu(self.proj1(Z)))) + inp\n\n \nclass GraphTransformer(nn.Module):\n def __init__(self, dim, n_layers, final_dim, d_inner, \n fdim=30,\n dropout=0.0, \n dropatt=0.0,\n final_dropout=0.0, \n n_head=10,\n num_atom_types=[5,13,27], \n num_bond_types=[28,53,69],\n num_triplet_types=[29,118],\n num_quad_types=[62],\n min_bond_dist=0.9586,\n max_bond_dist=3.9244,\n dist_embedding=\"sine\",\n atom_angle_embedding=\"learnable\",\n trip_angle_embedding=\"learnable\",\n quad_angle_embedding=\"learnable\",\n wnorm=False,\n use_quad=False\n ):\n super().__init__()\n self.fdim = fdim\n num_atom_types = np.array(num_atom_types)\n num_bond_types = np.array(num_bond_types)\n num_triplet_types = np.array(num_triplet_types)\n num_quad_types = np.array(num_quad_types)\n self.atom_embedding = LearnableEmbedding(len(num_atom_types), num_atom_types+1, \n d_embeds=dim-self.fdim, d_feature=self.fdim, n_feature=2) \\\n if atom_angle_embedding == \"learnable\" else SineEmbedding(len(num_atom_types), num_atom_types+1, dim, n_feature=2)\n self.bond_embedding = LearnableEmbedding(len(num_bond_types), num_bond_types+1, \n d_embeds=dim-self.fdim, d_feature=self.fdim, n_feature=1) \\\n if dist_embedding == \"learnable\" else SineEmbedding(len(num_bond_types), num_bond_types+1, dim, n_feature=1)\n self.triplet_embedding = LearnableEmbedding(len(num_triplet_types), num_triplet_types+1, \n d_embeds=dim-self.fdim, d_feature=self.fdim, n_feature=1) \\\n if trip_angle_embedding == \"learnable\" else SineEmbedding(len(num_triplet_types), num_triplet_types+1, dim)\n \n if use_quad:\n self.quad_embedding = LearnableEmbedding(len(num_quad_types), num_quad_types+1, \n d_embeds=dim-self.fdim, d_feature=self.fdim, n_feature=1) \\\n if quad_angle_embedding == \"learnable\" else SineEmbedding(len(num_quad_types), num_quad_types+1, dim)\n\n self.dim = dim\n self.min_bond_dist = min_bond_dist\n self.max_bond_dist = max_bond_dist\n self.wnorm = wnorm\n self.use_quad = use_quad\n print(f\"{'' if use_quad else colored('Not ', 'cyan')}Using Quadruplet Features\")\n\n self.n_head = n_head\n assert dim % n_head == 0, \"dim must be a multiple of n_head\"\n self.layers = nn.ModuleList([GraphLayer(d_model=dim, d_inner=d_inner, n_head=n_head, d_head=dim//n_head, dropout=dropout,\n attn_dropout=dropatt, wnorm=wnorm, use_quad=use_quad, lev=i+1) for i in range(n_layers)])\n\n self.final_norm = nn.LayerNorm(dim)\n \n # TODO: Warning: we are predicting with the second-hierarchy bond (sub)types!!!!!\n self.final_dropout = final_dropout\n self.final_dim = num_bond_types[1]*final_dim\n self.final_lin1 = nn.Conv1d(dim, self.final_dim, kernel_size=1)\n self.final_res = nn.Sequential(\n # ResidualBlock(self.final_dim, self.final_dim, groups=int(num_bond_types[1]), dropout=final_dropout),\n ResidualBlock(self.final_dim, self.final_dim, groups=int(num_bond_types[1]), dropout=final_dropout),\n nn.Conv1d(self.final_dim, num_bond_types[1], kernel_size=1, groups=int(num_bond_types[1]))\n )\n self.apply(self.weights_init)\n \n def forward(self,x_atom,x_atom_pos, x_bond, x_bond_dist, x_triplet, x_triplet_angle, x_quad, x_quad_angle):\n # PART I: Form the embeddings and the distance matrix\n bsz = x_atom.shape[0]\n N = x_atom.shape[1]\n M = x_bond.shape[1]\n P = x_triplet.shape[1]\n Q = x_quad.shape[1] if self.use_quad else 0\n\n D = torch.zeros(x_atom.shape[0], N+M+P+Q, N+M+P+Q, device=x_atom.device)\n RA = slice(0,N)\n RB = slice(N,N+M)\n RT = slice(N+M, N+M+P)\n RQ = slice(N+M+P, N+M+P+Q)\n\n D[:,RA,RA] = sqdist(x_atom_pos[:,:,:3], x_atom_pos[:,:,:3]) # Only the x,y,z information, not charge/angle\n\n for i in range(D.shape[0]):\n # bonds\n a1,a2 = x_bond[i,:,3], x_bond[i,:,4]\n D[i, RA, RB] = torch.min(D[i, RA, a1], D[i, RA, a2])\n D[i, RB, RA] = D[i, RA, RB].transpose(0,1)\n D[i, RB, RB] = (D[i,a1,RB] + D[i,a2,RB])/2\n D[i, RB ,RB] = (D[i,RB,RB] + D[i,RB,RB].transpose(0,1))/2\n\n # triplets\n a1,a2,a3 = x_triplet[i,:,1], x_triplet[i,:,2], x_triplet[i,:,3]\n b1,b2 = x_triplet[i,:,4], x_triplet[i,:,5]\n D[i,RA,RT] = torch.min(torch.min(D[i,RA,a1], D[i,RA,a2]), D[i,RA, a3]) + D[i,RA,a1]\n D[i,RT,RA] = D[i,RA,RT].transpose(0,1)\n D[i,RB,RT] = torch.min(D[i,RB,b1], D[i,RB,b2])\n D[i,RT,RB] = D[i,RB,RT].transpose(0,1)\n D[i,RT,RT] = (D[i,b1,RT] + D[i,b2,RT]) / 2\n D[i,RT,RT] = (D[i,RT,RT] + D[i,RT,RT].transpose(0,1)) / 2\n \n if self.use_quad:\n # quad\n a1,a2,a3,a4 = x_quad[i,:,1], x_quad[i,:,2], x_quad[i,:,3], x_quad[i,:,4]\n b1,b2,b3 = x_quad[i,:,5], x_quad[i,:,6], x_quad[i,:,7]\n t1,t2 = x_quad[i,:,8], x_quad[i,:,9]\n D[i,RA,RQ] = torch.min(torch.min(torch.min(D[i,RA,a1], D[i,RA,a2]), D[i,RA, a3]), D[i,RA,a4]) + \\\n torch.min(D[i,RA,a1], D[i,RA,a2])\n D[i,RQ,RA] = D[i,RA,RQ].transpose(0,1)\n D[i,RB,RQ] = torch.min(torch.min(D[i,RB,b1], D[i,RB,b2]), D[i,RB, b3]) + D[i,RB,b1]\n D[i,RQ,RB] = D[i,RB,RQ].transpose(0,1)\n D[i,RT,RQ] = torch.min(D[i,RT,t1], D[i,RT,t2])\n D[i,RQ,RT] = D[i,RT,RQ].transpose(0,1)\n D[i,RQ,RQ] = (D[i,t1,RQ] + D[i,t2,RQ]) / 2\n D[i,RQ,RQ] = (D[i,RQ,RQ] + D[i,RQ,RQ].transpose(0,1))/2\n \n # No interaction (as in attention = 0) if query or key is the zero padding...\n if self.use_quad:\n mask = torch.cat([x_atom[:,:,0] > 0, x_bond[:,:,0] > 0, x_triplet[:,:,0] > 0, x_quad[:,:,0] > 0], dim=1).type(x_atom_pos.dtype)\n else:\n mask = torch.cat([x_atom[:,:,0] > 0, x_bond[:,:,0] > 0, x_triplet[:,:,0] > 0], dim=1).type(x_atom_pos.dtype)\n mask = torch.einsum('bi, bj->bij', mask, mask)\n new_mask = -1e20 * torch.ones_like(mask).to(mask.device)\n new_mask[mask > 0] = 0\n if self.use_quad:\n Z = torch.cat([\n self.atom_embedding(x_atom[:,:,:3], x_atom_pos[:,:,3:]), \n self.bond_embedding(x_bond[:,:,:3], x_bond_dist), \n self.triplet_embedding(x_triplet[:,:,:2], x_triplet_angle), \n self.quad_embedding(x_quad[:,:,:1], x_quad_angle), \n ], dim=1)\n else:\n Z = torch.cat([\n self.atom_embedding(x_atom[:,:,:3], x_atom_pos[:,:,3:]), \n self.bond_embedding(x_bond[:,:,:3], x_bond_dist), \n self.triplet_embedding(x_triplet[:,:,:2], x_triplet_angle), \n ], dim=1)\n \n # PART II: Pass through a bunch of self-attention and position-wise feed-forward blocks\n seed = np.random.uniform(0,1)\n for i in range(len(self.layers)):\n Z = self.layers[i](Z, D, new_mask, mask, RA, RB, RT, RQ, store=False)\n \n # PART III: Coupling type based (grouped) transformations\n Z = self.final_norm(Z)\n Z_group = self.final_lin1(Z.transpose(1,2)[:,:,RB])\n return self.final_res(Z_group), Z\n\n @staticmethod\n def init_weight(weight):\n nn.init.uniform_(weight, -0.1, 0.1)\n\n @staticmethod\n def init_bias(bias):\n nn.init.constant_(bias, 0.0)\n\n @staticmethod\n def weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1 or classname.find('Conv1d') != -1:\n if hasattr(m, 'weight') and m.weight is not None:\n GraphTransformer.init_weight(m.weight)\n if hasattr(m, 'bias') and m.bias is not None:\n GraphTransformer.init_bias(m.bias)\n \n","sub_path":"src/graph_transformer.py","file_name":"graph_transformer.py","file_ext":"py","file_size_in_byte":13065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"43227627","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nfrom ast import parse\nfrom inspect import getmembers, isfunction\nfrom unittest.mock import MagicMock\n\nfrom sphinx.ext.napoleon.docstring import GoogleDocstring\n\nconfpath = os.path.dirname(__file__)\nsys.path.append(confpath)\nfrom docutil import insert_inheritance_diagram, package_classes\n\n\n## See\n## https://github.com/sphinx-doc/sphinx/issues/2115\n## https://michaelgoerz.net/notes/extending-sphinx-napoleon-docstring-sections.html\n##\n# first, we define new methods for any new sections and add them to the class\ndef parse_keys_section(self, section):\n return self._format_fields(\"Keys\", self._consume_fields())\n\n\nGoogleDocstring._parse_keys_section = parse_keys_section\n\n\ndef parse_attributes_section(self, section):\n return self._format_fields(\"Attributes\", self._consume_fields())\n\n\nGoogleDocstring._parse_attributes_section = parse_attributes_section\n\n\ndef parse_class_attributes_section(self, section):\n return self._format_fields(\"Class Attributes\", self._consume_fields())\n\n\nGoogleDocstring._parse_class_attributes_section = parse_class_attributes_section\n\n# we now patch the parse method to guarantee that the the above methods are\n# assigned to the _section dict\ndef patched_parse(self):\n self._sections[\"keys\"] = self._parse_keys_section\n self._sections[\"class attributes\"] = self._parse_class_attributes_section\n self._unpatched_parse()\n\n\nGoogleDocstring._unpatched_parse = GoogleDocstring._parse\nGoogleDocstring._parse = patched_parse\n\n\nconfpath = os.path.dirname(__file__)\nsys.path.append(confpath)\n\non_rtd = os.environ.get(\"READTHEDOCS\") == \"True\"\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nrootpath = os.path.abspath(\"../..\")\nsys.path.insert(0, rootpath)\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = \"3.3\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autodoc\",\n \"sphinx_autodoc_typehints\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.viewcode\",\n \"sphinxcontrib.bibtex\",\n \"sphinx.ext.inheritance_diagram\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.todo\",\n \"nbsphinx\",\n]\n\nbibtex_bibfiles = [\"references.bib\"]\n\nnbsphinx_execute = \"never\"\nnbsphinx_prolog = \"\"\"\n.. raw:: html\n\n \n\"\"\"\n\n\n# See\n# https://stackoverflow.com/questions/2701998#62613202\n# https://github.com/JamesALeedham/Sphinx-Autosummary-Recursion\nautosummary_generate = True\n\n# Copied from scikit-learn sphinx configuration\nif os.environ.get(\"NO_MATHJAX\"):\n extensions.append(\"sphinx.ext.imgmath\")\n imgmath_image_format = \"svg\"\nelse:\n extensions.append(\"sphinx.ext.mathjax\")\n mathjax_path = \"https://cdn.mathjax.org/mathjax/latest/\" \"MathJax.js?config=TeX-AMS_HTML\"\n\nmathjax_config = {\n \"TeX\": {\n \"Macros\": {\n \"mb\": [r\"\\mathbf{#1}\", 1],\n \"mbs\": [r\"\\boldsymbol{#1}\", 1],\n \"mbb\": [r\"\\mathbb{#1}\", 1],\n \"norm\": [r\"\\lVert #1 \\rVert\", 1],\n \"abs\": [r\"\\left| #1 \\right|\", 1],\n \"argmin\": [r\"\\mathop{\\mathrm{argmin}}\"],\n \"sign\": [r\"\\mathop{\\mathrm{sign}}\"],\n \"prox\": [r\"\\mathop{\\mathrm{prox}}\"],\n \"loss\": [r\"\\mathop{\\mathrm{loss}}\"],\n \"kp\": [r\"k_{\\|}\"],\n \"rp\": [r\"r_{\\|}\"],\n }\n }\n}\n\n\n# See https://stackoverflow.com/questions/5599254\nautoclass_content = \"both\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The encoding of source files.\nsource_encoding = \"utf-8\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"SCICO\"\ncopyright = \"2020-2021, SCICO Developers\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nwith open(os.path.join(\"../../scico\", \"__init__.py\")) as f:\n version = parse(next(filter(lambda line: line.startswith(\"__version__\"), f))).body[0].value.s\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"tmp\", \"*.tmp.*\", \"*.tmp\"]\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\nadd_function_parentheses = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n# html_theme = \"sphinx_rtd_theme\"\nhtml_theme = \"faculty-sphinx-theme\"\n\nhtml_theme_options = {\n \"includehidden\": False,\n \"logo_only\": True,\n}\n\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = None\nhtml_logo = \"_static/logo.svg\"\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\nhtml_favicon = \"_static/scico.ico\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nif on_rtd:\n html_static_path = []\nelse:\n html_static_path = [\"_static\"]\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"SCICOdoc\"\n\n# Include TOODs\ntodo_include_todos = True\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\"index\", \"scico.tex\", \"SCICO Documentation\", \"The SCICO Developers\", \"manual\"),\n]\n\n\nlatex_engine = \"xelatex\"\n\n# latex_use_xindy = False\n\nlatex_macros = []\nfor k, v in mathjax_config[\"TeX\"][\"Macros\"].items():\n if len(v) == 1:\n latex_macros.append(r\"\\newcommand{\\%s}{%s}\" % (k, v[0]))\n else:\n latex_macros.append(r\"\\newcommand{\\%s}[1]{%s}\" % (k, v[0]))\n\nlatex_elements = {\"preamble\": \"\\n\".join(latex_macros)}\n\n\n# Intersphinx mapping\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"numpy\": (\"https://docs.scipy.org/doc/numpy/\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy/reference/\", None),\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n \"jax\": (\"https://jax.readthedocs.io/en/latest/\", None),\n \"objax\": (\"https://objax.readthedocs.io/en/latest/\", None),\n}\n# Added timeout due to periodic scipy.org down time\n# intersphinx_timeout = 30\n\n# napoleon_include_init_with_doc = True\nnapoleon_use_ivar = True\nnapoleon_use_rtype = False\n\ngraphviz_output_format = \"svg\"\ninheritance_graph_attrs = dict(rankdir=\"LR\", fontsize=9, ratio=\"compress\", bgcolor=\"transparent\")\ninheritance_node_attrs = dict(\n shape=\"box\",\n fontsize=9,\n height=0.4,\n margin='\"0.08, 0.03\"',\n style='\"rounded,filled\"',\n fillcolor='\"#f4f4ffff\"',\n)\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(\"index\", \"scico\", \"SCICO Documentation\", [\"SCICO Developers\"], 1)]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n \"index\",\n \"SCICO\",\n \"SCICO Documentation\",\n \"SCICO Developers\",\n \"SCICO\",\n \"Scientific Computational Imaging COde (SCICO)\",\n \"Miscellaneous\",\n ),\n]\n\n\nif on_rtd:\n print(\"Building on ReadTheDocs\")\n print\n print(\"Current working directory: {}\".format(os.path.abspath(os.curdir)))\n import numpy as np\n\n print(\"NumPy version: %s\" % np.__version__)\n import matplotlib\n\n matplotlib.use(\"agg\")\n\n\nMOCK_MODULES = [\"astra\", \"svmbir\"]\n\n\nclass Mock(MagicMock):\n @classmethod\n def __getattr__(cls, name):\n return MagicMock()\n\n\nsys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)\n\n\nprint(\"rootpath: %s\" % rootpath)\nprint(\"confpath: %s\" % confpath)\n\n# Sort members by type\nautodoc_default_options = {\n \"member-order\": \"bysource\",\n \"inherited-members\": True,\n \"ignore-module-all\": False,\n \"show-inheritance\": True,\n \"special-members\": \"__call__\",\n}\nautodoc_docstring_signature = True\nautoclass_content = \"both\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\", \"**tests**\", \"**spi**\"]\n\n\n# Rewrite module names for certain functions imported into scico.numpy so that they are\n# included in the docs for that module. While a bit messy to do so here rather than in a\n# function run via app.connect, it is necessary (for some yet to be identified reason)\n# to do it here to ensure that the relevant API docs include a table of functions.\nimport scico.numpy\n\nsnp_func = getmembers(scico.numpy, isfunction)\nfor _, f in snp_func:\n if f.__module__[0:14] == \"jax._src.numpy\" or f.__module__ == \"scico.numpy._create\":\n # Rewrite module name so that function is included in docs\n f.__module__ = \"scico.numpy\"\n # Attempt to fix incorrect cross-reference\n if f.__name__ == \"compare_chararrays\":\n modname = \"numpy.char\"\n else:\n modname = \"numpy\"\n f.__doc__ = re.sub(\n r\"^LAX-backend implementation of :func:`([\\w_]+)`.\",\n r\"LAX-backend implementation of :obj:`%s.\\1`.\" % modname,\n str(f.__doc__),\n flags=re.M,\n )\n # Improve formatting of jax.numpy warning\n f.__doc__ = re.sub(\n r\"^\\*\\*\\* This function is not yet implemented by jax.numpy, and will \"\n \"raise NotImplementedError \\*\\*\\*\",\n \"**WARNING**: This function is not yet implemented by jax.numpy, \"\n \" and will raise :exc:`NotImplementedError`.\",\n f.__doc__,\n flags=re.M,\n )\n # Remove cross-reference to numpydoc style references section\n f.__doc__ = re.sub(r\" \\[(\\d+)\\]_\", \"\", f.__doc__, flags=re.M)\n # Remove entire numpydoc references section\n f.__doc__ = re.sub(r\"References\\n----------\\n.*\\n\", \"\", f.__doc__, flags=re.DOTALL)\n\n# Remove spurious two-space indentation of entire docstring\nscico.numpy.vectorize.__doc__ = re.sub(\"^ \", \"\", scico.numpy.vectorize.__doc__, flags=re.M)\n\n\ndef class_inherit_diagrams(_):\n # Insert inheritance diagrams for classes that have base classes\n import scico\n\n clslst = package_classes(scico)\n for cls in clslst:\n insert_inheritance_diagram(cls)\n\n\ndef setup(app):\n\n app.add_css_file(\"scico.css\")\n app.add_css_file(\n \"http://netdna.bootstrapcdn.com/font-awesome/4.7.0/\" \"css/font-awesome.min.css\"\n )\n app.connect(\"builder-inited\", class_inherit_diagrams)\n","sub_path":"docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":12214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"635252828","text":"import entitymanager\nfrom math import sqrt, pow\n\nclass gamemanager:\n def __init__(self, terrain, entitymanager):\n self.terrain=terrain\n self.entitymanager=entitymanager\n entitymanager.gamemanager = self\n \n def stepGame(self, timedelta):\n self.timedelta=timedelta\n for e in self.entitymanager.entities:\n self.stepEntity(self.entitymanager.entities[e], self.timedelta)\n \n def stepEntity(self, entity, timedelta):\n self.moveEntity(entity, timedelta)\n \n def moveEntity(self, ce, timedelta):\n if(ce.position!=ce.destination):\n dist = self._getDistance(ce.position, ce.destination)\n if(dist<=timedelta*ce.movespeed):\n ce.position=ce.destination\n else:\n ratio=timedelta*ce.movespeed/dist\n ce.position=(\n ce.position[0]+ratio*(ce.destination[0]-ce.position[0]),\n ce.position[1]+ratio*(ce.destination[1]-ce.position[1])\n )\n \n def _getDistance(self, a,b):\n return sqrt( (a[0]-b[0])**2 + (a[1]-b[1])**2 )\n \n","sub_path":"src/common/gamemanager.py","file_name":"gamemanager.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"432382739","text":"\"\"\"\r\nrottenprob.problems.urls\r\n\"\"\"\r\nfrom django.conf import settings\r\nfrom django.conf.urls.static import static\r\nfrom django.conf.urls import url, patterns\r\n\r\nfrom . import views\r\nfrom myauth.forms import LoginForm\r\n\r\nurlpatterns = [\r\n url(r'^$', views.lists, name='lists'), \r\n #url(r'^(?P\\w+)/(?P\\d{0,10})/$', views.lists, name='lists'), \r\n url(r'^lists/$', views.lists, name='lists'), \r\n url(r'^lists/(?P\\w+)/$', views.lists, name='lists'), \r\n url(r'^lists/(?P\\w+)/(?P\\d{0,10})/$', views.lists, name='lists'), \r\n url(r'^category$', views.category, name='category'), \r\n url(r'^delete$', views.delete, name='delete'), \r\n url(r'^try_solve$', views.try_solve, name='try_solve'), \r\n url(r'^solved_now$', views.solved_now, name='solved_now'), \r\n url(r'^cannot_solve_now$', views.cannot_solve_now, name='cannot_solve_now'), \r\n url(r'^ajax_post_test$', views.ajax_post_test, name='ajax_post_test'), \r\n]\r\n\r\n# if settings.DEBUG:\r\n # urlpatterns += patterns('',\r\n # url(r'^media/(?P.*)$', 'django.views.static.serve', {\r\n # 'document_root': settings.MEDIA_ROOT,\r\n # }),\r\n# )","sub_path":"problems/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"113591194","text":"import urllib3\nimport os\nimport xml.etree.ElementTree as ET\nimport random\nimport pandas as pd\nimport numpy as np\n\nhttp = urllib3.PoolManager()\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nbook_df = pd.read_pickle('book_df.pickle')\n\nseries_ids = np.load('series_ids_arr.npy').tolist()\n\nseries_arr = []\n\nbook_series_arr = []\n\nfor book_id in book_df['book_id']:\n\n print(float(book_df['book_id'][book_df['book_id'] == book_id].index[0]) / float(len(book_df['book_id'])))\n\n book_url = 'https://www.goodreads.com/book/show/' + str(book_id) + '.xml?key=KcoXxTmT5V02yZxpy87HgA'\n\n r = http.request('GET', book_url)\n data = r.data\n if r.status == 404:\n continue\n\n e = ET.fromstring(data)\n\n book = e.find('./book')\n\n series_works = book.findall('./series_works/series_work')\n\n for series_work in series_works:\n series = series_work.find('./series')\n series_id = series.find('./id').text\n book_series_position = series_work.find('./user_position').text\n\n if series_id not in series_ids:\n series_title = series.find('./title').text\n series_description = series.find('./description').text\n series_notes = series.find('./note').text\n series_works_count = series.find('./series_works_count').text\n series_primary_work_count = series.find('./primary_work_count').text\n series_arr.append([series_id, series_title, series_description, series_notes,\n series_works_count, series_primary_work_count])\n series_ids.append(series_id)\n\n book_series_arr.append([book_id, series_id, book_series_position])\n\n\nseries_df = pd.DataFrame(np.array(series_arr), columns=['series_id', 'series_title', 'series_description', 'series_notes',\n 'series_works_count', 'series_primary_work_count'])\nif os.path.exists('series_df.pickle'):\n series_df = pd.read_pickle('series_df.pickle').append(series_df, ignore_index=True)\n\nbook_series_df = pd.DataFrame(np.array(book_series_arr), columns=['book_id', 'series_id', 'book_series_position'])\n\nbook_series_df.to_pickle('book_series_df.pickle')\nseries_df.to_pickle('series_df.pickle')\nnp.save('series_ids_arr', np.array(series_ids))\n","sub_path":"Archive/book-series-work.py","file_name":"book-series-work.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"492981029","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nfrom io import (\n BytesIO,\n StringIO,\n )\nfrom json.decoder import JSONDecodeError\nfrom os import (\n path,\n makedirs,\n)\nimport time\n\ntry:\n import httpx as requests\nexcept ImportError:\n import requests\nfrom PIL import Image\nimport urllib3\n\nPATH = path.join('..', '.assets', 'smite')\nAPI_URL = 'https://cms.smitegame.com/wp-json/smite-api'\nCDN_URL = 'https://webcdn.hirezstudios.com/smite'\nCARDS_URL = f'{CDN_URL}/god-cards'\nICONS_URL = f'{CDN_URL}/god-icons'\n\nfix_name = lambda name: str(name).lower().replace(' ', '-').replace(\"'\", '').replace(',', '').replace('/', '-').replace('!', '')\nget_path = lambda d: path.join(PATH, d)\n\ndef http_requests(url, **kw):\n buffer = BytesIO()\n max_retries = kw.pop('max_tries', 5)\n for n in range(max_retries):\n try:\n r = requests.get(url)\n return r.content\n except:\n time.sleep(n)\n\ndef download_img(url):\n try:\n return Image.open(BytesIO(http_requests(url)))\n except:\n pass\n\ndef make_dir(p):\n if not path.exists(p):\n try:\n makedirs(p)\n except OSError as e:\n pass\n\ndef save_img(image, folder, name, ext='png'):\n folder = get_path(folder)\n make_dir(folder)\n img_path = path.join(folder, f'{str(name).lower()}.{ext}')\n if image and not path.isfile(img_path):\n try:\n try:\n image.save(img_path, image.mode)\n except:\n image.save(img_path, 'PNG')\n except Exception as e:\n print(f'Could not save {img_path} as an image. {e}')\n else:\n print(f'Saving {img_path} - {image.format}, {image.mode}, {image.size}')\n\ndef god_card(name, god_id, ext='jpg'):\n save_img(download_img(f'{CARDS_URL}/{name}.{ext}'), 'cards', god_id, ext)\n\ndef god_icon(name, god_id, ext='jpg'):\n save_img(download_img(f'{ICONS_URL}/{name}.{ext}'), 'characters', god_id, ext)\n\ndef fetch_all(lang=(1,)):\n for l in lang:\n for c in requests.get(f'{API_URL}/all-gods/{l}').json() or {}:\n god_name = fix_name(c.get('god_name_EN') or '')\n god_id = c.get('id') or 0\n if god_id and god_name:\n god_card(god_name, god_id)\n god_icon(god_name, god_id)\n\nfetch_all()\n","sub_path":".scripts/smite_gods.py","file_name":"smite_gods.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"510695306","text":"import argparse\nimport numpy as np\nfrom evomini.ga import SimpleGA\nfrom evomini.nn import Module, Linear, LSTM\nfrom evomini.eval import Evaluator\nfrom cartpole_swingup import CartPoleSwingUpEnv\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--seed\", type=int, default=0)\nparser.add_argument(\"--num-workers\", type=int, default=16)\nparser.add_argument(\"--models-per-worker\", type=int, default=16)\nparser.add_argument(\"--num-gen\", type=int, default=1000)\nparser.add_argument(\"--num-evals\", type=int, default=1)\nparser.add_argument(\"--num-topk-evals\", type=int, default=5)\nparser.add_argument(\"--precision\", type=int, default=4)\nparser.add_argument(\"--sigma\", type=float, default=0.1)\nparser.add_argument(\"--mut_sigma\", type=float, default=0.01)\nargs = parser.parse_args()\n\nnp.random.seed(args.seed)\n\nclass Model(Module):\n # small world model agent\n def __init__(self, obs_size, action_size, hidden_size):\n super().__init__()\n self.obs_size = obs_size\n self.action_size = action_size\n self.hidden_size = hidden_size\n self.register_module(\"C\", Linear(obs_size + hidden_size, action_size))\n self.register_module(\"M\", LSTM(obs_size + action_size, hidden_size))\n\n def __call__(self, *args, module=\"C\"):\n if module == \"C\":\n obs, h = args\n x = np.concatenate([obs, h])\n action = self.C(x)\n return action\n if module == \"M\":\n obs, action = args\n x = np.concatenate([obs, action])\n h = self.M(x)\n return h\n\nclass CartPoleSwingUpEvaluator(Evaluator):\n def _build_env(self):\n return CartPoleSwingUpEnv()\n \n def _build_model(self):\n return Model(5, 1, 16)\n\n def _evaluate_once(self, env, model):\n obs = env.reset()\n h = model.M.reset()\n rewards = 0\n done = False\n while not done:\n action = model(obs, h, module=\"C\")\n obs, reward, done, _ = env.step(action)\n h = model(obs, action, module=\"M\")\n rewards += reward\n return rewards\n\nenv = CartPoleSwingUpEnv()\nnum_params = len(Model(5, 1, 16))\nga = SimpleGA(num_params, sigma=args.sigma, mut_sigma=args.mut_sigma)\nglobal_best_fitness = -np.inf\n\nwith CartPoleSwingUpEvaluator(args.num_workers,\n args.models_per_worker,\n args.precision) as evaluator:\n popsize = len(evaluator)\n seeds, solutions = ga.sample(popsize)\n\n for gen in range(args.num_gen):\n fitness, success = evaluator.evaluate(seeds, solutions, args.num_evals)\n assert success, f\"evaluation failed at generation {gen}\"\n\n topk_seeds, topk_solutions = ga.set_elite_candidates(fitness)\n topk_fitness, success = evaluator.evaluate(topk_seeds, topk_solutions, args.num_topk_evals)\n assert success, f\"topk evaluation failed at generation {gen}\"\n elite_solution = ga.set_elite(topk_fitness)\n\n seeds, solutions = ga.step()\n\n elite_fitness = np.max(topk_fitness)\n if elite_fitness > global_best_fitness:\n print(f\"improvement detected: {global_best_fitness} -> {elite_fitness}\")\n np.save(\"model_final.npy\", elite_solution)\n global_best_fitness = elite_fitness\n\n stats = {\n \"gen\": gen,\n \"pop_fitness_mean\": np.mean(fitness),\n \"pop_fitness_std\": np.std(fitness),\n \"pop_fitness_max\": np.max(fitness),\n \"pop_fitness_min\": np.min(fitness),\n \"topk_fitness_mean\": np.mean(topk_fitness),\n \"topk_fitness_std\": np.std(topk_fitness),\n \"elite_fitness\": elite_fitness,\n }\n print(stats)\n","sub_path":"examples/cartpole_swingup/ga_main.py","file_name":"ga_main.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"10582372","text":"import pytesseract as pt\n#import tensorflow as ts\nimport PyPDF2\nimport numpy as np\nimport pdf2image\nimport cv2\nfrom imutils.object_detection import non_max_suppression\nimport tempfile\n\nimport sklearn as sklearn\nfrom matplotlib import pyplot as plt\nimport os\n\nfrom skimage import filters, morphology, measure, transform\nfrom scipy.ndimage.morphology import binary_fill_holes\nfrom sklearn import cluster\nfrom scipy.fftpack import dct, idct\n\n\"\"\"\"\nsample = pdf2image.convert_from_path('C:\\\\Users\\\\bisib\\\\PycharmProjects\\\\SampleOCR\\\\sample_data\\\\Scanned_20200928-1633.pdf')\nfor page in sample:\n page.save('out.jpg', 'JPEG')\npt.pytesseract.tesseract_cmd =\"C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe\"\nimg = cv2.imread('sample_data/20200928_115921.jpg')\n#imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ntreated=cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)\ntreated= cv2.fastNlMeansDenoisingColored(treated,None,10,10,7,21)\nplt.subplot(121),plt.imshow(img)\nplt.subplot(122),plt.imshow(treated)\nplt.show()\n\n\nprint(pt.image_to_string(imgray))\n\"\"\"\n\"\"\"\nfhandle = open(r'sample_data/Scanned_20200928-1633.pdf', 'rb')\npdfReader = PyPDF2.PdfFileReader(fhandle)\npagehandle = pdfReader.getPage(0)\nprint(pagehandle.extractText())\n\nval_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=0.2,\n subset=\"validation\",\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\n\"\"\"\n#cv2.imwrite('C:/Temp/person-masked.jpg', masked) # Save\n\nfrom PIL import ImageFilter, Image, ImageEnhance, ImageOps\n\npt.pytesseract.tesseract_cmd =\"C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe\"\nfiles = os.listdir('testingdata')\npath = os.getcwd()\nfor file in files:\n\n filepath = os.path.join(path, 'testingdata', file)\n if str(file).endswith('.pdf'):\n sample = pdf2image.convert_from_path(filepath)\n for page in sample:\n page.save('out.jpg', 'JPEG')\n filepath = os.path.join(path, 'out.jpg')\n\n # first of many steps of image preprocessing\n im = Image.open(filepath)\n orig = im.copy()\n orig = np.array(orig)\n im = ImageEnhance.Contrast(im).enhance(1.2)\n imm = ImageEnhance.Contrast(im).enhance(1.2)\n imm = ImageOps.grayscale(imm)\n\n frequencies = dct(dct(imm, axis=0), axis=1)\n frequencies[:2, :2] = 0\n gray = idct(idct(frequencies, axis=1), axis=0)\n\n gray = (gray - gray.min()) / (gray.max() - gray.min()) # renormalize to range [0:1]\n plt.subplot(121), plt.imshow(im)\n plt.subplot(122), plt.imshow(gray)\n plt.show()\n # blur\n im1 = imm.filter(ImageFilter.BLUR)\n\n # filter\n im2 = imm.filter(ImageFilter.MinFilter(3))\n im3 = imm.filter(ImageFilter.MinFilter)\n plt.subplot(121),plt.imshow(im)\n plt.subplot(122),plt.imshow(im3)\n plt.show()\n #open cv\n openCVim = np.array(im3)\n treated = cv2.fastNlMeansDenoising(openCVim, None, 10, 10, 7)\n\n thresh = cv2.threshold(treated, 150, 255, cv2.THRESH_BINARY)[1]\n plt.subplot(121), plt.imshow(treated)\n plt.subplot(122), plt.imshow(thresh)\n plt.show()\n #treated = cv2.fastNlMeansDenoising(treated, None, 10, 10, 7)\n #treated = cv2.cvtColor(treated, cv2.COLOR_RGB2GRAY)\n kernel = np.ones((5, 5), np.uint8)\n treated = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)\n\n\n\n treated = cv2.bitwise_not(treated)\n plt.subplot(121), plt.imshow(openCVim)\n plt.subplot(122), plt.imshow(treated)\n plt.show()\n treated2 = cv2.morphologyEx(treated, cv2.MORPH_CLOSE, kernel)\n plt.subplot(121), plt.imshow(treated)\n plt.subplot(122), plt.imshow(treated2)\n plt.show()\n\n # find contours\n\n\n mask = np.ones((treated2.shape[0],treated2.shape[1]), np.uint8)\n masked = cv2.bitwise_or(treated2, treated2, mask=mask)\n transposed = cv2.cvtColor(masked,cv2.COLOR_GRAY2RGB)\n transposed = cv2.bitwise_not(transposed)\n plt.subplot(121), plt.imshow(masked)\n plt.subplot(122), plt.imshow(transposed)\n plt.show()\n # read text\n image_data = pt.image_to_data(treated)\n txt1 = pt.image_to_string(treated)\n #txt_original = pt.image_to_string(im)\n filename = str(file)+'.txt'\n textpath = os.path.join(path, 'testingoutput', filename)\n with open(textpath, 'w') as txtfile:\n txtfile.write(txt1)\n\n\n\n\n\n","sub_path":"sampleOCR.py","file_name":"sampleOCR.py","file_ext":"py","file_size_in_byte":4254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"547524612","text":"import time\nfrom telethon import *\nfrom os import system, name\nfrom pytz import timezone\nimport os\nfrom InstagramAPI import InstagramAPI\nfrom getpass import getpass\nimport re\nimport argparse\nimport sys\nimport pytz\nfrom datetime import datetime, timezone\nimport pickle\nfrom Spinner import Spinner\nimport json\nfrom random import randint\nimport random\nimport schedule\nfrom bs4 import BeautifulSoup\nimport requests\nfrom telethon.tl.functions.messages import GetHistoryRequest\nfrom telethon.sync import TelegramClient\nimport logging as logger\nimport logging.config\n\n\ncommegram = \"\"\" \n ___ ___ _ __ ___ _ __ ___ ___ __ _ _ __ __ _ _ __ ___ \n / __/ _ \\| '_ ` _ \\| '_ ` _ \\ / _ \\/ _` | '__/ _` | '_ ` _ \\ \n | (_| (_) | | | | | | | | | | | __/ (_| | | | (_| | | | | | |\n \\___\\___/|_| |_| |_|_| |_| |_|\\___|\\__, |_| \\__,_|_| |_| |_|\n |___/ \n\"\"\"\n\nlogging.basicConfig(\n filename='logs.txt',\n filemode='a',\n format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',\n datefmt='%H:%M:%S',\n level=logging.DEBUG)\n\ncredentials = dict()\napi_id=17349\napi_hash='344583e45741c457fe1862106095a5eb'\nurl_list = list()\nsubstitue=2\nwaiting_minutes_between_two_messages = 60\nwaiting_time_to_links = 600 # second\nDEVICE_SETTINTS = {'manufacturer': 'Xiaomi',\n 'model': 'HM 1SW',\n 'android_version': 18,\n 'android_release': '4.3'}\nUSER_AGENT = 'Instagram 10.26.0 Android ({android_version}/{android_release}; 320dpi; 720x1280; {manufacturer}; {model}; armani; qcom; en_US)'.format(\n **DEVICE_SETTINTS)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n '--filename', help='Configuration filename to using this application')\nargs = parser.parse_args()\nspinner = Spinner()\n\ndef main():\n clear()\n print(commegram)\n if args.filename:\n with open(args.filename, 'r') as file:\n jsonFromUser = json.load(file)\n set_credentials(jsonFromUser)\n else:\n get_credentials()\n\n client = TelegramClient(credentials['telegram_username'], api_id, api_hash)\n instagram_api = InstagramAPI(credentials['instagram_username'],credentials['instagram_password'])\n client.start(credentials['phone'], credentials['telegram_password'])\n \n if not is_blank(credentials['proxy']):\n instagram_api.setProxy(credentials['proxy'])\n\n if(instagram_api.login()):\n print(time.asctime(), '-', ' Login succes! started...')\n else:\n logger.info(\"Can't login!\")\n sys.exit('stopped '+datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n print('Here we go!')\n check_last_message_and_wait_if_needed(client)\n run(client, instagram_api)\n client.disconnect()\n sys.exit('See Ya! '+datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n print(time.asctime(), '-', 'Stopped!')\n\ndef check_message_pattern(message):\n patterns = [\n '\\D(\\s*)(x|X)(\\s*)[\\d]([\\d]|\\s*)(\\s*)(@[\\S]+|\\s*)(\\s*)(\\s(\\S+?)\\s*|\\s*)+(?:(?:http|https):\\/\\/)?(?:www.)?(?:instagram.com|instagr.am)\\/p/([A-Za-z0-9-_\\.]+)',\n 'Admin\\s*post:\\s*(?:(?:http|https):\\/\\/)?(?:www.)?(?:instagram.com|instagr.am)\\/p/([A-Za-z0-9-_\\.]+)'\n ]\n for pattern in patterns:\n if re.findall(pattern, message):\n return True\n return False\n \ndef get_url_from_message(message):\n return re.search('(?:(?:http|https):\\/\\/)?(?:www.)?(?:instagram.com|instagr.am)\\/p/([A-Za-z0-9-_\\.]+)',message)[0]\n\n\ndef get_media_id(url):\n req = requests.get('https://api.instagram.com/oembed/?url={}'.format(url))\n media_id = req.json()['media_id']\n return media_id\n\ndef send_action(instagram_api):\n counter = 0\n for url in url_list:\n wait_second = randint(10, 25)\n liked = instagram_api.like(get_media_id(url))\n commented = instagram_api.comment(get_media_id(\n url), random_line('comments.txt'))\n if commented and liked:\n counter = counter+1\n print('\\n['+str(counter)+'] '+'Liked and commented : ' +\n url + ' ' + datetime.now().strftime('%Y-%m-%d %H:%M:%S')) \n if counter == int(credentials['comment_size']):\n break \n print('Sleeping ' + str(wait_second) + ' second(s)\\n')\n spinner.start()\n time.sleep(wait_second)\n spinner.stop()\n\ndef prepare_url_list(client):\n for message in client.iter_messages(entity=client.get_entity(credentials['group_username'])):\n if check_message_pattern(str(message.text)):\n url = get_url_from_message(message.text)\n if not check_message_is_mine(str(message.text)):\n url_list.append(url)\n else:\n waitForLinks()\n if len(url_list) == int(credentials['comment_size'])+substitue:\n break\n\ndef send_message(client,instagram_api):\n client.send_message(entity=client.get_entity(credentials['group_username']), message='Dx'+str(credentials['comment_size']) +\n ' '+'@'+credentials['instagram_username']+' ' + get_last_media_url(instagram_api)+'/',link_preview=False)\n print('Message sent '+datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n\ndef random_line(fname):\n lines = open(fname).read().splitlines()\n return random.choice(lines)\n\ndef get_last_media_url(instagram_api):\n session = instagram_api.s\n session.headers.update({'Connection': 'close',\n 'Accept': '*/*',\n 'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Cookie2': '$Version=1',\n 'Accept-Language': 'en-US',\n 'User-Agent': USER_AGENT})\n\n data = session.get('https://instagram.com/'+credentials['instagram_username']+'/?__a=1', verify=False)\n feed = json.loads(data.text)\n return 'https://instagram.com/p/'+feed[\"graphql\"][\"user\"][\"edge_owner_to_timeline_media\"][\"edges\"][0][\"node\"][\"shortcode\"]\n\ndef clear():\n # for windows\n if name == 'nt':\n _ = system('cls')\n # for mac and linux(here, os.name is 'posix')\n else:\n _ = system('clear')\n\ndef get_credentials():\n global credentials\n credentials['phone'] = input(\"Please enter your phone with country code (Telegram): \")\n credentials['telegram_username'] = input(\"Please enter your username without '@' (Telegram): \")\n credentials['telegram_password'] = getpass(\"Please enter your password (Telegram): \")\n credentials['group_username'] = input(\"Please enter your Engagement Group's username (Telegram): \")\n credentials['comment_size'] = input(\"Please enter comment size (Default = 10): \")\n if not credentials['comment_size']:\n credentials['comment_size'] = '10'\n credentials['instagram_username'] = input(\n \"Please enter your username without '@' (Instagram): \")\n credentials['instagram_password'] = getpass(\"Please enter your password (Instagram): \")\n credentials['proxy'] = input(\"Set proxy (If you haven't, leave it empty) (Format = user:password@ip:port): \")\n data = {\n 'instagram_username': credentials['instagram_username'],\n 'instagram_password': credentials['instagram_password'],\n 'phone'\t: credentials['phone'],\n 'telegram_username': credentials['telegram_username'],\n 'telegram_password': credentials['telegram_password'],\n 'group_username': credentials['group_username'],\n 'comment_size': credentials['comment_size'],\n 'proxy': credentials['proxy']\n }\n with open('credentials_'+credentials['instagram_username']+'.json', 'w') as file:\n file = json.dump(data,file) \n print('Successfully saved!\\n If you wanna change, check credentials'+credentials['instagram_username']+'.json file.')\n \ndef set_credentials(data):\n for (attr, value) in data.items():\n credentials[attr] = value\n\ndef is_blank(s):\n return not bool(s and s.strip())\n\ndef run(client,instagram_api):\n prepare_url_list(client)\n\n send_action(instagram_api)\n\n get_last_media_url(instagram_api)\n\n send_message(client,instagram_api)\n \ndef check_message_is_mine(message):\n if credentials['instagram_username'] in message:\n return True\n else:\n return False\n\ndef check_last_message_and_wait_if_needed(client):\n for message in client.iter_messages(entity=client.get_entity(credentials['group_username']), from_user='me'):\n diff = datetime.now(timezone.utc)-message.date\n minutes = diff.seconds/60\n if waiting_minutes_between_two_messages > int(minutes) and int(diff.days) < 1:\n waiting_time = waiting_minutes_between_two_messages-int(minutes)\n print('Waiting '+str(waiting_time)+' min...')\n time.sleep(waiting_time * 60)\n break\n\n# def printLog(*args, **kwargs):\n# print(*args, **kwargs)\n# with open('output.out', 'a') as file:\n# print(*args, **kwargs, file=file)\n\ndef waitForLinks():\n url_list.clear()\n print('10 Links not found.Waiting 10 min...')\n time.sleep(waiting_time_to_links)\n main()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"612836264","text":"#!/usr/bin/python3.7\nimport requests\nfrom bs4 import BeautifulSoup\nimport itertools\nimport time\ndef get_html(url):\n r = s.get(url)\n return r.text\ndef get_token(html):\n soup=BeautifulSoup(html,'lxml')\n token=soup.find_all('script', type=\"text/javascript\")[1].text\n return token[token.find('_csrfToken'):].split(',')[0][14:-1]\ndef gen(start_len,stop_len,token):\n ru = \"абвгдеёжзийклмнопрстуфхцчшщъыьэюя\"\n digits = \"1234567890 \"\n abc=ru\n l=0\n for i in range(start_len,stop_len):\n res = itertools.permutations(abc ,i)\n for i in res:\n k=''.join(i)\n get_promo(token,k)\n l=l+1\n if l % 1000==0:\n print(l)\ndef get_promo(token,code):\n data={'_token': token,\n 'coupon': code }\n response=s.post('https://spb.pizzahut.ru/cart/coupon/activate',data=data).json()[\"success\"]\n if response==True:\n print(code)\n send_mess(chat_id, code)\ndef send_mess(chat, text):\n session = requests.session()\n params = {'chat_id': chat, 'text': text}\n response = session.post(url + 'sendMessage', data=params)\n return response\nurl='https://api.telegram.org/bot626390266:AAEiOnfvAsaj20sMLwRJOFO82Gu56TWsPu4/'\nchat_id=-1001308921194\nprint('Плехали')\nurl='https://spb.pizzahut.ru'\ns=requests.Session()\ntoken1=get_token(get_html(url))\ngen(5,6,token1)\n","sub_path":"promo.py","file_name":"promo.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"653617425","text":"# Python\r\nimport os\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\r\n\r\nimport random\r\nimport pickle\r\n\r\n# Torch\r\nimport torch\r\nimport numpy as np\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nfrom torch.utils.data import DataLoader\r\nimport torch.optim.lr_scheduler as lr_scheduler\r\nfrom torch.utils.data.sampler import SubsetRandomSampler\r\n\r\n# Torchvison\r\nimport torchvision\r\nimport torchvision.transforms as T\r\n\r\n# Utils\r\nfrom tqdm import tqdm\r\n\r\n# Custom\r\nimport models.resnet224_bbox as resnet\r\nfrom config import *\r\nfrom util.utility import *\r\nfrom util.load_Imgnet224 import Imgnet224\r\nfrom util.load_CAR import CarsDataset\r\nfrom util.transformer import Rescale, CenterCrop, RandomCrop, ToTensor, Normalize\r\n\r\n# Seed\r\nrandom.seed(\"Anonymized\")\r\ntorch.manual_seed(0)\r\ntorch.backends.cudnn.deterministic = True\r\n\r\n# Load Data\r\n_IMAGE_MEAN_VALUE = [0.5, 0.5, 0.5] # [0.485, 0.456, 0.406]\r\n_IMAGE_STD_VALUE = [0.5, 0.5, 0.5] # [0.229, 0.224, 0.225]\r\n\r\n# transform image, labe, bbox altogether\r\ndataset_transforms = dict(\r\n train=T.Compose([Rescale((256, 256)),RandomCrop(224),ToTensor(),Normalize()]),\r\n test=T.Compose([Rescale((224, 224)),ToTensor(),Normalize()])\r\n)\r\nImgnet_normalize = T.Normalize(_IMAGE_MEAN_VALUE,_IMAGE_STD_VALUE)\r\n\r\nimgnet224_transform = T.Compose([T.ToPILImage(),T.RandomResizedCrop(size=224),T.RandomHorizontalFlip(),T.ToTensor(),Imgnet_normalize])\r\nplace365_transform = T.Compose([T.RandomResizedCrop(size=224, padding=4),T.RandomHorizontalFlip(),T.ToTensor(),Imgnet_normalize])\r\n\r\ndata_dir = '/data/anonymized/car/'\r\nmetadata_root = '/data/anonymized/car/devkit/'\r\n\r\n# in_distribution\r\nCar_train_in = CarsDataset(mode='train',data_dir=data_dir,metas=os.path.join(metadata_root, 'cars_train_annos.mat'),transform=dataset_transforms['train'],limit=NUM_TRAIN)\r\nprint(\"len(Car_train_in): \", len(Car_train_in))\r\n\r\nCar_test = CarsDataset(mode='test',data_dir=data_dir,metas=os.path.join(metadata_root, 'cars_test_annos.mat'),transform=dataset_transforms['test'],limit=None)\r\n\r\n# ood\r\nimgnet224_train_ood = Imgnet224(ood = True, train=True, transform=imgnet224_transform, in_class=[])\r\n\r\nfolder = '/data/anonymized/places365/places365_standard/'\r\n#traindir = os.path.join(folder, 'train')\r\nvaldir = os.path.join(folder, 'val')\r\nplace365_train_ood = torchvision.datasets.ImageFolder(valdir, transform=place365_transform)\r\n#\r\ndef IoULoss(pred_box, gt_box):\r\n xymin = torch.max(pred_box, gt_box) # [:,[0,1]]\r\n xymax = torch.min(pred_box, gt_box) # [:,[2,3]]\r\n\r\n ixmin = xymin[:, 0]\r\n iymin = xymin[:, 1]\r\n ixmax = xymax[:, 2]\r\n iymax = xymax[:, 3]\r\n\r\n iw = torch.max(ixmax - ixmin + 1., torch.tensor(0.).cuda())\r\n ih = torch.max(iymax - iymin + 1., torch.tensor(0.).cuda())\r\n\r\n # 2. calculate the area of inters\r\n inters = iw * ih\r\n\r\n # 3. calculate the area of union\r\n unis = ((pred_box[:, 2] - pred_box[:, 0] + 1.) * (pred_box[:, 3] - pred_box[:, 1] + 1.) +\r\n (gt_box[:, 2] - gt_box[:, 0] + 1.) * (gt_box[:, 3] - gt_box[:, 1] + 1.) -\r\n inters)\r\n\r\n ious = inters/unis\r\n return 1-torch.mean(ious)\r\n\r\ndef DIoULoss(pred_box, gt_box):\r\n pred_center_x = (pred_box[:, 2] + pred_box[:, 0]) / 2\r\n pred_center_y = (pred_box[:, 3] + pred_box[:, 1]) / 2\r\n\r\n gt_center_x = (gt_box[:, 2] + gt_box[:, 0]) / 2\r\n gt_center_y = (gt_box[:, 3] + gt_box[:, 1]) / 2\r\n\r\n dist_center = (pred_center_x-gt_center_x)**2+(pred_center_y-gt_center_y)**2\r\n\r\n a = (pred_box[:, 0] - gt_box[:, 2]) ** 2 + (pred_box[:, 1] - gt_box[:, 3]) ** 2\r\n b = (pred_box[:, 0] - gt_box[:, 2]) ** 2 + (pred_box[:, 3] - gt_box[:, 1]) ** 2\r\n c = (pred_box[:, 2] - gt_box[:, 0]) ** 2 + (pred_box[:, 1] - gt_box[:, 3]) ** 2\r\n d = (pred_box[:, 2] - gt_box[:, 0]) ** 2 + (pred_box[:, 3] - gt_box[:, 1]) ** 2\r\n\r\n a = a.reshape((-1, a.size(0)))\r\n b = b.reshape((-1, b.size(0)))\r\n c = c.reshape((-1, c.size(0)))\r\n d = d.reshape((-1, d.size(0)))\r\n\r\n dist_diag = torch.cat((a,b,c,d),1)\r\n\r\n dist_diag_min = torch.max(dist_diag, 1).values\r\n\r\n return torch.mean(dist_center/dist_diag_min)\r\n\r\ndef train_epoch(models, criterion, optimizers, dataloaders, epoch, weight):\r\n models['backbone'].train()\r\n\r\n for data in tqdm(dataloaders['train_in'], leave=False, total=len(dataloaders['train_in'])):\r\n inputs = data[0].type(torch.FloatTensor).cuda()\r\n bboxs = data[1].type(torch.FloatTensor).cuda()\r\n\r\n optimizers['backbone'].zero_grad()\r\n\r\n out_c, out_r, features = models['backbone'](inputs)\r\n\r\n coord_loss_r = criterion[\"regression\"](out_r, bboxs)\r\n iou_loss = IoULoss(out_r, bboxs)\r\n #diou_loss = DIoULoss(out_r, bboxs)\r\n\r\n loss = coord_loss_r + 0.4*iou_loss #+ 0.4*diou_loss # + weight * Blackhole_loss\r\n #print(coord_loss_r) #, iou_loss, diou_loss)\r\n\r\n loss.backward()\r\n optimizers['backbone'].step()\r\n\r\n del data\r\n\r\n#\r\ndef train(models, criterion, optimizers, schedulers, dataloaders, num_epochs, weight):\r\n print('>> Train a Model.')\r\n logs = []\r\n for epoch in range(num_epochs):\r\n schedulers['backbone'].step()\r\n train_epoch(models, criterion, optimizers, dataloaders, epoch, weight)\r\n\r\n if epoch % 5 == 4:\r\n iou = test(models, dataloaders)\r\n print('epoch: {}, iou: {}'.format(epoch, iou))\r\n\r\n # save logs\r\n logs.append([epoch, iou])\r\n #np.savetxt('../logs_sup/Car_localization_Standard_n2000_per_epoch_v1.txt', logs, delimiter=',', fmt=\"%.4f\")\r\n print('>> Finished.')\r\n\r\ndef get_iou(pred_box, gt_box):\r\n \"\"\"\r\n pred_box : the coordinate for predict bounding box\r\n gt_box : the coordinate for ground truth bounding box\r\n return : the iou score\r\n the left-down coordinate of pred_box:(pred_box[0], pred_box[1])\r\n the right-up coordinate of pred_box:(pred_box[2], pred_box[3])\r\n \"\"\"\r\n # 1.get the coordinate of inters\r\n\r\n xymin = torch.max(pred_box, gt_box)#[:,[0,1]]\r\n ixmin = xymin[:,0]\r\n iymin = xymin[:,1]\r\n xymax = torch.min(pred_box, gt_box)#[:,[2,3]]\r\n ixmax = xymax[:,2]\r\n iymax = xymax[:,3]\r\n\r\n #print(xymin)\r\n #print(xymax)\r\n\r\n iw = torch.max(ixmax-ixmin+1., torch.tensor(0.).cuda())\r\n ih = torch.max(iymax-iymin+1., torch.tensor(0.).cuda())\r\n\r\n # 2. calculate the area of inters\r\n inters = iw*ih\r\n\r\n # 3. calculate the area of union\r\n unis = ((pred_box[:, 2] - pred_box[:, 0] + 1.) * (pred_box[:, 3] - pred_box[:, 1] + 1.) +\r\n (gt_box[:, 2] - gt_box[:, 0] + 1.) * (gt_box[:, 3] - gt_box[:, 1] + 1.) -\r\n inters)\r\n\r\n #print(gt_box, pred_box)\r\n #print(inters, unis)\r\n\r\n for i, u in enumerate(unis.cpu()):\r\n if u.data < 0:\r\n print(i)\r\n\r\n # 4. calculate the overlaps between pred_box and gt_box\r\n ious = inters / unis\r\n\r\n #print(ious)\r\n\r\n return torch.mean(ious)\r\n\r\n#\r\ndef test(models, dataloaders):\r\n models['backbone'].eval()\r\n\r\n total_iou = 0\r\n\r\n f = nn.Softmax(dim=1)\r\n to_np = lambda x: x.data.cpu().tolist()\r\n with torch.no_grad():\r\n for (inputs, bbox) in dataloaders['test_in']:\r\n\r\n inputs = inputs.type(torch.FloatTensor).cuda()\r\n bbox = bbox.type(torch.FloatTensor).cuda()\r\n\r\n out_c, out_r, features = models['backbone'](inputs)\r\n\r\n smax, preds = torch.max(f(out_c.data), 1)\r\n pred_bbox = out_r.data\r\n\r\n batch_iou = get_iou(pred_bbox, bbox)\r\n total_iou += batch_iou.item()\r\n\r\n #acc = 100 * correct / total\r\n total_iou = total_iou/len(dataloaders['test_in'])\r\n\r\n return total_iou\r\n\r\n\r\ndef get_stratified_indices(labels, n):\r\n C = max(labels) + 1 # 10\r\n sampled_idx = []\r\n for c in range(C):\r\n idx = []\r\n for i, l in enumerate(labels):\r\n if l == c:\r\n idx += [i]\r\n random.shuffle(idx)\r\n sampled_idx += idx[:n]\r\n print(\"========Stratified Sampling========\")\r\n print(\"Total: {}, Class: {}, per class: {}\".format(len(sampled_idx), C, n))\r\n return sampled_idx\r\n\r\n\r\n'''\r\n#save as pickle\r\nwith open('../data/Imgnet224/imgnet224_train_in.txt', 'wb') as f:\r\n pickle.dump(imgnet224_train_in, f)\r\nwith open('../data/Imgnet224/imgnet224_train_ood.txt', 'wb') as f:\r\n pickle.dump(imgnet224_train_ood, f)\r\nwith open('../data/Imgnet224/imgnet224_test.txt', 'wb') as f:\r\n pickle.dump(imgnet224_test, f)\r\n\r\n\r\n#read from pickle\r\nwith open('../data/Imgnet224/imgnet224_train_in.txt', 'rb') as f:\r\n imgnet224_train_in = pickle.load(f)\r\nwith open('../data/Imgnet224/imgnet224_train_ood.txt', 'rb') as f:\r\n imgnet224_train_ood = pickle.load(f)\r\nwith open('../data/Imgnet224/imgnet224_test.txt', 'rb') as f:\r\n imgnet224_test = pickle.load(f)\r\n'''\r\n\r\ntrain_in = Car_train_in\r\ntest_in = Car_test\r\n\r\n# AL Main\r\nif __name__ == '__main__':\r\n indices_in = list(range(len(train_in)))\r\n random.shuffle(indices_in)\r\n print(\"# of train_in samples\", len(train_in))\r\n\r\n train_in_loader = DataLoader(train_in, batch_size=BATCH,\r\n sampler=SubsetRandomSampler(indices_in),\r\n pin_memory=True)\r\n\r\n test_in_loader = DataLoader(test_in, batch_size=BATCH)\r\n\r\n dataloaders = {'train_in': train_in_loader, 'test_in': test_in_loader}\r\n\r\n # Model\r\n resnet34 = resnet.ResNet34(num_classes=NUM_CLASS).cuda()\r\n # If use a Pre-train Model\r\n '''\r\n pretrained_model = torchvision.models.resnet34(pretrained=True)\r\n\r\n for name, param in resnet34.named_parameters():\r\n for n, p in pretrained_model.named_parameters():\r\n if name not in ['linear_classify.weight', 'linear_classify.weight', 'linear_reg.bias']:\r\n if n not in ['linear.weight', 'linear.bias']:\r\n param = p\r\n '''\r\n models = {'backbone': resnet34}\r\n torch.backends.cudnn.benchmark = False\r\n\r\n # Loss, criterion and scheduler (re)initialization\r\n criterion_c = nn.CrossEntropyLoss(reduction='mean')\r\n #criterion_r = nn.MSELoss(reduction='mean')\r\n criterion_r = nn.SmoothL1Loss(reduction='mean').cuda()\r\n criterion = {\"classification\": criterion_c, \"regression\": criterion_r}\r\n optim_backbone = optim.SGD(models['backbone'].parameters(), lr=LR,momentum=MOMENTUM, weight_decay=WDECAY)\r\n sched_backbone = lr_scheduler.MultiStepLR(optim_backbone, milestones=MILESTONES)\r\n\r\n optimizers = {'backbone': optim_backbone}\r\n schedulers = {'backbone': sched_backbone}\r\n\r\n # Training and test\r\n train(models, criterion, optimizers, schedulers, dataloaders, EPOCH, WEIGHT)\r\n iou = test(models, dataloaders)\r\n print('Test iou: ', iou)","sub_path":"model/bbox_Standard_CAR.py","file_name":"bbox_Standard_CAR.py","file_ext":"py","file_size_in_byte":10627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"161704860","text":"import argparse\nimport os\nimport pickle\nimport time\n\nimport dgl\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom dataset import LanderDataset\nfrom models import LANDER\n\n###########\n# ArgParser\nparser = argparse.ArgumentParser()\n\n# Dataset\nparser.add_argument(\"--data_path\", type=str, required=True)\nparser.add_argument(\"--levels\", type=str, default=\"1\")\nparser.add_argument(\"--faiss_gpu\", action=\"store_true\")\nparser.add_argument(\"--model_filename\", type=str, default=\"lander.pth\")\n\n# KNN\nparser.add_argument(\"--knn_k\", type=str, default=\"10\")\nparser.add_argument(\"--num_workers\", type=int, default=0)\n\n# Model\nparser.add_argument(\"--hidden\", type=int, default=512)\nparser.add_argument(\"--num_conv\", type=int, default=1)\nparser.add_argument(\"--dropout\", type=float, default=0.0)\nparser.add_argument(\"--gat\", action=\"store_true\")\nparser.add_argument(\"--gat_k\", type=int, default=1)\nparser.add_argument(\"--balance\", action=\"store_true\")\nparser.add_argument(\"--use_cluster_feat\", action=\"store_true\")\nparser.add_argument(\"--use_focal_loss\", action=\"store_true\")\n\n# Training\nparser.add_argument(\"--epochs\", type=int, default=100)\nparser.add_argument(\"--batch_size\", type=int, default=1024)\nparser.add_argument(\"--lr\", type=float, default=0.1)\nparser.add_argument(\"--momentum\", type=float, default=0.9)\nparser.add_argument(\"--weight_decay\", type=float, default=1e-5)\n\nargs = parser.parse_args()\nprint(args)\n\n###########################\n# Environment Configuration\nif torch.cuda.is_available():\n device = torch.device(\"cuda\")\nelse:\n device = torch.device(\"cpu\")\n\n##################\n# Data Preparation\nwith open(args.data_path, \"rb\") as f:\n features, labels = pickle.load(f)\n\nk_list = [int(k) for k in args.knn_k.split(\",\")]\nlvl_list = [int(l) for l in args.levels.split(\",\")]\ngs = []\nnbrs = []\nks = []\nfor k, l in zip(k_list, lvl_list):\n dataset = LanderDataset(\n features=features,\n labels=labels,\n k=k,\n levels=l,\n faiss_gpu=args.faiss_gpu,\n )\n gs += [g for g in dataset.gs]\n ks += [k for g in dataset.gs]\n nbrs += [nbr for nbr in dataset.nbrs]\n\nprint(\"Dataset Prepared.\")\n\n\ndef set_train_sampler_loader(g, k):\n fanouts = [k - 1 for i in range(args.num_conv + 1)]\n sampler = dgl.dataloading.MultiLayerNeighborSampler(fanouts)\n # fix the number of edges\n train_dataloader = dgl.dataloading.DataLoader(\n g,\n torch.arange(g.num_nodes()),\n sampler,\n batch_size=args.batch_size,\n shuffle=True,\n drop_last=False,\n num_workers=args.num_workers,\n )\n return train_dataloader\n\n\ntrain_loaders = []\nfor gidx, g in enumerate(gs):\n train_dataloader = set_train_sampler_loader(gs[gidx], ks[gidx])\n train_loaders.append(train_dataloader)\n\n##################\n# Model Definition\nfeature_dim = gs[0].ndata[\"features\"].shape[1]\nmodel = LANDER(\n feature_dim=feature_dim,\n nhid=args.hidden,\n num_conv=args.num_conv,\n dropout=args.dropout,\n use_GAT=args.gat,\n K=args.gat_k,\n balance=args.balance,\n use_cluster_feat=args.use_cluster_feat,\n use_focal_loss=args.use_focal_loss,\n)\nmodel = model.to(device)\nmodel.train()\n\n#################\n# Hyperparameters\nopt = optim.SGD(\n model.parameters(),\n lr=args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay,\n)\n\n# keep num_batch_per_loader the same for every sub_dataloader\nnum_batch_per_loader = len(train_loaders[0])\ntrain_loaders = [iter(train_loader) for train_loader in train_loaders]\nnum_loaders = len(train_loaders)\nscheduler = optim.lr_scheduler.CosineAnnealingLR(\n opt, T_max=args.epochs * num_batch_per_loader * num_loaders, eta_min=1e-5\n)\n\nprint(\"Start Training.\")\n\n###############\n# Training Loop\nfor epoch in range(args.epochs):\n loss_den_val_total = []\n loss_conn_val_total = []\n loss_val_total = []\n for batch in range(num_batch_per_loader):\n for loader_id in range(num_loaders):\n try:\n minibatch = next(train_loaders[loader_id])\n except:\n train_loaders[loader_id] = iter(\n set_train_sampler_loader(gs[loader_id], ks[loader_id])\n )\n minibatch = next(train_loaders[loader_id])\n input_nodes, sub_g, bipartites = minibatch\n sub_g = sub_g.to(device)\n bipartites = [b.to(device) for b in bipartites]\n # get the feature for the input_nodes\n opt.zero_grad()\n output_bipartite = model(bipartites)\n loss, loss_den_val, loss_conn_val = model.compute_loss(\n output_bipartite\n )\n loss_den_val_total.append(loss_den_val)\n loss_conn_val_total.append(loss_conn_val)\n loss_val_total.append(loss.item())\n loss.backward()\n opt.step()\n if (batch + 1) % 10 == 0:\n print(\n \"epoch: %d, batch: %d / %d, loader_id : %d / %d, loss: %.6f, loss_den: %.6f, loss_conn: %.6f\"\n % (\n epoch,\n batch,\n num_batch_per_loader,\n loader_id,\n num_loaders,\n loss.item(),\n loss_den_val,\n loss_conn_val,\n )\n )\n scheduler.step()\n print(\n \"epoch: %d, loss: %.6f, loss_den: %.6f, loss_conn: %.6f\"\n % (\n epoch,\n np.array(loss_val_total).mean(),\n np.array(loss_den_val_total).mean(),\n np.array(loss_conn_val_total).mean(),\n )\n )\n torch.save(model.state_dict(), args.model_filename)\n\ntorch.save(model.state_dict(), args.model_filename)\n","sub_path":"examples/pytorch/hilander/train_subg.py","file_name":"train_subg.py","file_ext":"py","file_size_in_byte":5764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"555656474","text":"# -*- coding: utf-8 -*-\n# pylint: disable=C0103, E0401\n\n\"\"\"rulemanager URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom rest_framework.authtoken import views as drf_views\nfrom django.conf import settings\n\n# 仅在设置中存在SSO客户端时才启用SSO登录链接\n# 否则使用本地登录页面\nif 'django_cas_ng' in settings.INSTALLED_APPS:\n from django_cas_ng import views\nelse:\n from django.contrib.auth import views\n from rulemanagerweb import forms\n\nurlpatterns = [\n\n # Admin site\n url(r'^admin/', admin.site.urls),\n\n # Rest API authentication\n url(r'^api/api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n\n # Token Infra\n url(r'^api/token/', drf_views.obtain_auth_token),\n\n # RESTful API\n url(r'^api/', include('api.urls')),\n\n # django select2\n url(r'^select2/', include('django_select2.urls')),\n\n # Web pages\n url(r'^', include('rulemanagerweb.urls')),\n]\n\nif 'django_cas_ng' in settings.INSTALLED_APPS:\n urlpatterns += [\n # Login and Logout\n url(r'^accounts/login', views.login, name='login'),\n url(r'^accounts/logout', views.logout, name='logout'),\n ]\nelse:\n urlpatterns += [\n # Local Login and Logout\n url(r'^accounts/login', views.login, {\n 'template_name': 'rulemanagerweb/login.html',\n 'authentication_form': forms.LoginForm,\n 'extra_context': {\n 'title': '登录'\n }\n }, name='login'),\n url(r'^accounts/logout', views.logout, {\n 'next_page': '/',\n }, name='logout'),\n ]\n","sub_path":"rulemanager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"116416622","text":"\"\"\"\nParameters\nvar: square of the standared deviation from the mean \nexpiry: The period of contract\nvolatlity: The fluctuation of stock price\nmean: mean of the stock price\nstrike: price at which the stocks can be sold or bought (at expiry)\nspot: The current market price when the stock is bought or sold\ninterst rate: Interst charged per year\nmc_paths: Number of simulted paths\n\"\"\"\n\nimport random\nimport math\nfrom numpy import zeros\nfrom scipy.stats import norm\nfrom six.moves import input\nimport matplotlib.pyplot as plt\n# import matlab.engine\n# eng = matlab.engine.start_matlab()\n\nclass Server:\n def __init__(self, expiry, strike, spot, vol, rate, mc_paths):\n self.expiry = expiry\n self.strike = strike\n self.spot = spot\n self.vol = vol\n self.rate = rate\n self.mc_paths = mc_paths\n\n def display(self):\n print(\"expiry : \",self.expiry)\n print(\"Strike price : \",self.strike)\n print(\"Spot value : \",self.spot)\n print(\"Volatility : \",self.vol)\n print(\"Rate of interest : \",self.rate)\n print(\"Number of iterations : \",self.mc_paths)\n\n def monte_carlo_pricer(self):\n \"\"\"\n Monte carlo method is unique compared to the other pricing techniques because they generate future assset prices.\n The function plots the stock_list after iterating through the number of paths by using the formula for calculating the stock value.\n It also returns the exponenent of the product of rate and expiry with the math module.\n \"\"\"\n var = self.vol * self.vol * self.expiry\n std_dev = math.sqrt(var)\n ito = -0.5 * var\n spot_changed = self.spot * math.exp(self.rate * self.expiry + ito)\n sum = 0\n stock_list = []\n for i in range(0, self.mc_paths):\n normal = random.normalvariate(0, 1)\n stock_val = spot_changed * math.exp(std_dev * normal)\n stock_list.append(stock_val)\n sum += max(stock_val - self.strike, 0.0)\n result = sum / self.mc_paths\n plot = plt.plot(stock_list)\n result *= math.exp(-self.rate * self.expiry)\n return result\n\n var = np.square(vol)\n\n def black_scholes(self):\n \"\"\"\n The model assumes the price of heavily traded assets follows a geometric Brownian motion with constant vol.\n The Black Scholes call option formula is calculated by multiplying the stock price by the cumulative standard normal probability distribution function.\n \"\"\"\n d_1 = math.log(self.spot / self.strike) + ((self.rate + (self.vol * self.vol)/2) *\n self.expiry)\n d_2 = d_1 - self.vol * math.sqrt(self.expiry)\n # if self.option_type.lower() == 'call':\n result = self.spot * norm.cdf(d_1) - self.strike * math.exp(-self.rate * self.expiry) * norm.cdf(d_2)\n return result\n #elif self.option_type.lower() == 'put':\n # result = self.strike * math.exp(-self.rate * expiry) * norm.cdf(-d_2) - self.spot * norm.cdf(-d_1)\n # return result\n \n # def Price_calculation():\n # standard_deviation = self.variance ** (0.5)\n # time_steps = 1/365\n # Asset_path = eng.AssetPaths(self.spot, self.mean, standard_deviation, self.expiry, self.number_of_paths);\n # # option = input(\"Mention your choice (call or put) : \")\n # # option == \"call\"\n # call_pay = max(mean(Asset_path-self.spot,0)\n # callPrice = mean(call_pay)*math.exp(-self.interest_rate * self.expiry)\n # return callPrice\n # # else\n # # put_pay = max(self.spot - mean(Asset_path))\n # # putPrice = mean(put_pay)*exp(-self.interest_rate * self.expiry)\n # # return putPrice\n\n\n","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"516151241","text":"import numpy as np\nimport cv2\n\nblack=np.zeros([150,200,1],'uint8')\ncv2.imshow('black',black)\n\nwhite=np.ones([150,200,3],'uint8')\nwhite[0:50,:,0]*=255\nwhite[50:100,:,1]*=255\nwhite[100:150,:,2]*=255\ncv2.imshow('white',white)\nprint(white[0:50,:,1])\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"18161322","text":"# encoding: utf-8\nfrom django.core.management.base import BaseCommand, CommandError\n\nimport json\nimport urllib2\nfrom datetime import datetime\n\nfrom bulls.models import Player\n\n\nclass Command(BaseCommand):\n # Show this when the user types help\n help = \"Command for fetching playerdata\"\n\n # A command must define handle()\n def handle(self, *args, **options):\n self.stdout.write(\"Fetching playerdata.\")\n json_string = urllib2.urlopen('http://tilastopalvelu.fi/fb/modules/mod_statisticsplayers/helper/statistics.php?statgroupid=191').read()\n data = json.loads(json_string,\"utf-8\")\n # print data.items()[0]['games']\n for player in data.items()[0][1]:\n if player['TeamName'] == 'UHV Bulls':\n name = player['PlayerName']\n first_name_lowercase = name.split()[1].lower()\n first_name = first_name_lowercase[:1].upper() + first_name_lowercase[1:]\n last_name_lowercase = name.split()[0].lower()\n last_name = last_name_lowercase[:1].upper() + last_name_lowercase[1:]\n player_id = player['UniqueID']\n goals = player['Goals']\n assists = player['Assists']\n points = player['Points']\n penalties = player['PenaltyMin']\n\n if Player.objects.filter(player_id=player_id).first():\n player = Player.objects.filter(player_id=player_id).first()\n self.stdout.write(\"Player \"+ player.name +\" exists. Updating player stats.\")\n player.points = points\n player.goals = goals\n player.assists = assists\n player.penalties = penalties\n player.save()\n else:\n player = Player()\n player.player_id = player_id\n player.first_name = first_name\n player.last_name = last_name\n self.stdout.write(\"Creating new player: \" + player.first_name+ \" \" + player.last_name + \".\")\n player.points = points\n player.goals = goals\n player.assists = assists\n player.penalties = penalties\n player.position = \"Hyökkääjä\"\n player.save()\n\n self.stdout.write(\"Playerdata saved to database.\")","sub_path":"bulls/management/commands/getplayerdata.py","file_name":"getplayerdata.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"540594811","text":"from flask import Flask, abort, request\nfrom req import get_weather\nfrom datetime import datetime\nfrom news import all_news\nfrom flask import render_template\nimport json\n\ncity_id=524901\napikey='cf90636aa494e41b0365ce34aa48722a'\n\n\napp=Flask(__name__)\n\n@app.route('/')\ndef index():\n url='http://api.openweathermap.org/data/2.5/weather?id=%s&APPID=%s&units=metric'%(city_id,apikey)\n weather=get_weather(url)\n cur_date=datetime.now().strftime('%d.%m.%Y')\n print(cur_date)\n result='

Temperature: %s

' % weather ['main'] ['temp'] #

-новый параграф, - жирный текст\n result+='

City: %s

' % weather ['name']\n result+='

Date: %s

' % cur_date\n return result \n\n@app.route('/news')\ndef all_the_news():\n colors=['green','black','red', 'blue']\n try:\n limit=int(request.args.get('limit'))\n except:\n limit=10\n color=request.args.get('color') if request.args.get('color') in colors else 'yellow'\n\n return '

news: %s

' % (color,limit)\n\n\n\n@app.route('/news/')\ndef news_by_id(news_id):\n news_to_show=[news for news in all_news if news ['id'] == news_id] \n if len(news_to_show)==1:\n result='

%(title)s

%(date)s

%(text)s

'\n result=result%news_to_show[0]\n return result # всегда должна быть строка\n else:\n abort (404\n\n )\n \n\n\nif __name__=='__main__':\n # app.run(port=5010) #меняем порт на незанятый\n app.run(debug=True)# сервер будет сам перегружаться после каждого изменения в коде.\n\n\n\n","sub_path":"lesson3/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"404583923","text":"# Lint as: python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests of streamable Keyword Spotting models implemented in Keras.\"\"\"\n\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom pyiree.tf.support import tf_test_utils\nfrom pyiree.tf.support import tf_utils\nimport tensorflow.compat.v2 as tf\n\nfrom kws_streaming.layers import modes\nfrom kws_streaming.models import model_flags\nfrom kws_streaming.models import model_params\nfrom kws_streaming.models import models\nfrom kws_streaming.models import utils\n\nFLAGS = flags.FLAGS\n\nALL_MODELS = list(model_params.HOTWORD_MODEL_PARAMS.keys())\nMODELS_HELP = [f\"'{name}'\" for name in ALL_MODELS]\nMODELS_HELP = f'{\", \".join(MODELS_HELP[:-1])}, or {MODELS_HELP[-1]}'\n\nflags.DEFINE_string(\n 'model', 'svdf', f'Name of the model to compile. Either {MODELS_HELP}.\\n'\n 'See https://github.com/google-research/google-research/blob/master/kws_streaming/models/models.py#L38-L58'\n)\nflags.DEFINE_enum('mode', 'non_streaming',\n ['non_streaming', 'internal_streaming', 'external_streaming'],\n 'Mode to execute the model in.')\n\nMODE_ENUM_TO_MODE = {\n 'non_streaming': modes.Modes.NON_STREAM_INFERENCE,\n 'internal_streaming': modes.Modes.STREAM_INTERNAL_STATE_INFERENCE,\n 'external_streaming': modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE,\n}\n\n\nclass KeywordSpottingModule(tf_test_utils.TestModule):\n\n def __init__(self):\n super().__init__()\n self.m = utils.get_model_with_default_params(FLAGS.model,\n MODE_ENUM_TO_MODE[FLAGS.mode])\n\n call = lambda *args: self.m(*args, training=False)\n input_signature = [tf.TensorSpec(tensor.shape) for tensor in self.m.inputs]\n self.call = tf_test_utils.tf_function_unit_test(\n input_signature=input_signature, name=\"call\", atol=1e-5)(call)\n\n\nclass KeywordSpottingTest(tf_test_utils.TracedModuleTestCase):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._modules = tf_test_utils.compile_tf_module(\n KeywordSpottingModule,\n exported_names=['call'],\n relative_artifacts_dir=os.path.join('kws_streaming', FLAGS.model,\n FLAGS.mode))\n\n\ndef main(argv):\n del argv # Unused.\n if hasattr(tf, 'enable_v2_behavior'):\n tf.enable_v2_behavior()\n\n if FLAGS.model not in ALL_MODELS:\n raise ValueError(f'Unsupported model: {FLAGS.model}.\\n'\n f'Expected one of {MODELS_HELP}.')\n\n KeywordSpottingTest.generate_unit_tests(KeywordSpottingModule)\n tf.test.main()\n\n\nif __name__ == '__main__':\n app.run(main)\n","sub_path":"integrations/tensorflow/e2e/keras/keyword_spotting_streaming_test.py","file_name":"keyword_spotting_streaming_test.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"51094705","text":"import xlwt\nfrom datetime import datetime\nfrom openerp.osv import orm\nfrom openerp import tools\nfrom openerp.addons.report_xls.report_xls import report_xls\nfrom openerp.addons.report_xls.utils import rowcol_to_cell, _render\nfrom .report_bank_book import dym_bank_book_report_print\nfrom openerp.tools.translate import translate\nimport logging\n_logger = logging.getLogger(__name__)\nimport time\n\n_ir_translation_name = 'dym.bank.book'\n\nclass dym_bank_book_print_xls(dym_bank_book_report_print):\n\n def __init__(self, cr, uid, name, context):\n super(dym_bank_book_print_xls, self).__init__(\n cr, uid, name, context=context)\n moveline_obj = self.pool.get('account.move.line')\n self.context = context\n wl_overview = moveline_obj._report_xls_bank_book_fields(\n cr, uid, context)\n self.localcontext.update({\n 'datetime': datetime,\n 'wanted_list_overview': wl_overview,\n '_': self._,\n })\n\n def _(self, src):\n lang = self.context.get('lang', 'en_US')\n return translate(\n self.cr, _ir_translation_name, 'report', lang, src) or src\n\n\nclass bank_book_report_xls(report_xls):\n\n def __init__(self, name, table, rml=False,\n parser=False, header=True, store=False):\n super(bank_book_report_xls, self).__init__(\n name, table, rml, parser, header, store)\n\n # Cell Styles\n _xs = self.xls_styles\n # header\n\n # Report Column Headers format\n rh_cell_format = _xs['bold'] + _xs['fill'] + _xs['borders_all']\n self.rh_cell_style = xlwt.easyxf(rh_cell_format)\n self.rh_cell_style_center = xlwt.easyxf(\n rh_cell_format + _xs['center'])\n self.rh_cell_style_right = xlwt.easyxf(rh_cell_format + _xs['right'])\n\n # Partner Column Headers format\n fill_blue = 'pattern: pattern solid, fore_color 27;'\n ph_cell_format = _xs['bold'] + _xs['fill'] + _xs['borders_all']\n self.ph_cell_style = xlwt.easyxf(ph_cell_format)\n self.ph_cell_style_center = xlwt.easyxf(ph_cell_format + _xs['center'] )\n self.ph_cell_style_decimal = xlwt.easyxf(\n ph_cell_format + _xs['right'],\n num_format_str=report_xls.decimal_format)\n\n # Partner Column Data format\n pd_cell_format = _xs['borders_all']\n self.pd_cell_style = xlwt.easyxf(pd_cell_format)\n self.pd_cell_style_center = xlwt.easyxf(\n pd_cell_format + _xs['center'])\n self.pd_cell_style_date = xlwt.easyxf(\n pd_cell_format + _xs['left'],\n num_format_str=report_xls.date_format)\n self.pd_cell_style_decimal = xlwt.easyxf(\n pd_cell_format + _xs['right'],\n num_format_str=report_xls.decimal_format)\n self.pd_cell_style_decimal_fill = xlwt.easyxf(\n pd_cell_format + _xs['right'] + _xs['fill'] + _xs['bold'] ,\n num_format_str=report_xls.decimal_format)\n # totals\n rt_cell_format = _xs['bold'] + _xs['fill'] + _xs['borders_all']\n self.rt_cell_style = xlwt.easyxf(rt_cell_format)\n self.rt_cell_style_right = xlwt.easyxf(rt_cell_format + _xs['right'])\n self.rt_cell_style_decimal = xlwt.easyxf(\n rt_cell_format + _xs['right'],\n num_format_str=report_xls.decimal_format)\n\n # XLS Template\n self.col_specs_template_overview = {\n 'no': {\n 'header': [1, 5, 'text', _render(\"_('No')\"),None,self.rh_cell_style_center],\n 'lines': [1, 0, 'number', _render(\"p['no']\"),None,self.pd_cell_style_center],\n 'totals': [1, 5, 'text', None]}, \n 'branch_status': {\n 'header': [1, 20, 'text', _render(\"_('Branch Status')\"),None,self.rh_cell_style_center],\n 'lines': [1, 0, 'text', _render(\"p['branch_status'] or ''\")],\n 'totals': [1, 0, 'text', None]}, \n 'branch_name': {\n 'header': [1, 20, 'text', _render(\"_('Branch')\"),None,self.rh_cell_style_center],\n 'lines': [1, 0, 'text', _render(\"p['branch_name'] or ''\")],\n 'totals': [1, 0, 'text', None]}, \n 'account_code': {\n 'header': [1, 20, 'text', _render(\"_('No Account')\"),None,self.rh_cell_style_center],\n 'lines': [1, 0, 'text', _render(\"p['account_code'] or ''\")],\n 'totals': [1, 0, 'text', None]}, \n 'account_name': {\n 'header': [1, 20, 'text', _render(\"_('Keterangan')\"),None,self.rh_cell_style_center],\n 'lines': [1, 0, 'text', _render(\"p['account_name'] or ''\")],\n 'totals': [1, 0, 'text', None]},\n 'partner_name': {\n 'header': [1, 20, 'text', _render(\"_('Partner')\"),None,self.rh_cell_style_center],\n 'lines': [1, 0, 'text', _render(\"p['partner_name'] or ''\")],\n 'totals': [1, 0, 'text', None]},\n 'finance_company': {\n 'header': [1, 20, 'text', _render(\"_('Finance Company')\"),None,self.rh_cell_style_center],\n 'lines': [1, 0, 'text', _render(\"p['finance_company'] or ''\")],\n 'totals': [1, 0, 'text', None]},\n 'name': {\n 'header': [1, 20, 'text', _render(\"_('Name')\"),None,self.rh_cell_style_center],\n 'lines': [1, 0, 'text', _render(\"p['name'] or ''\")],\n 'totals': [1, 0, 'text', None]},\n 'ref': {\n 'header': [1, 20, 'text', _render(\"_('Ref')\"),None,self.rh_cell_style_center],\n 'lines': [1, 0, 'text', _render(\"p['ref'] or ''\")],\n 'totals': [1, 0, 'text', None]},\n 'date': {\n 'header': [1, 15, 'text', _render(\"_('Tanggal')\"),None,self.rh_cell_style_center],\n 'lines': [1, 0, 'text', _render(\"p['date']\")],\n 'totals': [1, 0, 'text', None]}, \n 'value_date': {\n 'header': [1, 15, 'text', _render(\"_('Value Date')\"),None,self.rh_cell_style_center],\n 'lines': [1, 0, 'text', _render(\"p['value_date']\")],\n 'totals': [1, 0, 'text', None]}, \n 'analytic_combination': {\n 'header': [1, 20, 'text', _render(\"_('Analytic Combination')\"),None,self.rh_cell_style_center],\n 'lines': [1, 0, 'text', _render(\"p['analytic_combination'] or ''\")],\n 'totals': [1, 0, 'text', None]},\n 'analytic_1': {\n 'header': [1, 20, 'text', _render(\"_('Analytic Company')\"),None,self.rh_cell_style_center],\n 'lines': [1, 0, 'text', _render(\"p['analytic_1'] or ''\")],\n 'totals': [1, 0, 'text', None]},\n 'analytic_2': {\n 'header': [1, 20, 'text', _render(\"_('Analytic Bisnis Unit')\"),None,self.rh_cell_style_center],\n 'lines': [1, 0, 'text', _render(\"p['analytic_2'] or ''\")],\n 'totals': [1, 0, 'text', None]},\n 'analytic_3': {\n 'header': [1, 20, 'text', _render(\"_('Analytic Branch')\"),None,self.rh_cell_style_center],\n 'lines': [1, 0, 'text', _render(\"p['analytic_3'] or ''\")],\n 'totals': [1, 0, 'text', None]},\n 'analytic_4': {\n 'header': [1, 20, 'text', _render(\"_('Analytic Cost Center')\"),None,self.rh_cell_style_center],\n 'lines': [1, 0, 'text', _render(\"p['analytic_4'] or ''\")],\n 'totals': [1, 0, 'text', None]},\n 'debit': {\n 'header': [1, 20, 'text', _render(\"_('Debit')\"),None,self.rh_cell_style_center],\n 'lines': [1, 0, 'number', _render(\"p['debit'] or ''\"),None,self.pd_cell_style_decimal],\n 'totals': [1, 0, 'text', None]}, \n 'credit': {\n 'header': [1, 20, 'text', _render(\"_('Credit')\"),None,self.rh_cell_style_center],\n 'lines': [1, 20, 'number', _render(\"p['credit'] or ''\"),None,self.pd_cell_style_decimal],\n 'totals': [1, 0, 'text', None]}, \n } \n\n # XLS Template\n self.col_specs_template_details = {\n }\n\n def generate_xls_report(self, _p, _xs, data, objects, wb):\n\n wanted_list_overview = _p.wanted_list_overview\n _ = _p._\n \n username = self.pool.get('res.users').browse(self.cr,self.uid,self.uid).name\n for r in _p.reports:\n title_short = r['title_short'].replace('/', '-')\n ws_o = wb.add_sheet(title_short)\n \n for ws in [ws_o]:\n ws.panes_frozen = True\n ws.remove_splits = True\n ws.portrait = 0 # Landscape\n ws.fit_width_to_pages = 1\n row_pos_o = 0\n row_pos_d = 0\n\n # set print header/footer\n for ws in [ws_o]:\n ws.header_str = self.xls_headers['standard']\n ws.footer_str = self.xls_footers['standard']\n\n # COMPANY NAME\n cell_style_company = xlwt.easyxf(_xs['left'])\n c_specs_o = [\n ('company_name', 1, 0, 'text', str(_p.company.name)),\n ]\n row_data = self.xls_row_template(c_specs_o, ['company_name'])\n row_pos_o += self.xls_write_row(\n ws_o, row_pos_o, row_data, row_style=cell_style_company)\n \n #TITLE\n cell_style = xlwt.easyxf(_xs['xls_title']) \n report_name = ' '.join(\n [_('LAPORAN BANK BOOK')])\n c_specs_o = [\n ('title', 1, 20, 'text', report_name),\n ]\n row_data = self.xls_row_template(c_specs_o, ['title'])\n row_pos_o = self.xls_write_row(\n ws_o, row_pos_o, row_data, row_style=cell_style)\n \n #JOURNAL\n ws_o.write(row_pos_o, 0, data['journal_id'][1], cell_style) \n row_pos_o += 1\n\n ## Tanggal Start Date & End Date ##\n cell_style = xlwt.easyxf(_xs['left'])\n report_name = ' '.join(\n [_('Tanggal'), _('-' if data['start_date'] == False else str(data['start_date'])), _('s/d'), _('-' if data['end_date'] == False else str(data['end_date'])),\n _p.report_info])\n c_specs_o = [\n ('report_name', 1, 0, 'text', report_name),\n ]\n row_data = self.xls_row_template(c_specs_o, ['report_name'])\n row_pos_o = self.xls_write_row(\n ws_o, row_pos_o, row_data, row_style=cell_style)\n row_pos_o += 1\n\n # Report Column Headers\n c_specs_o = map(\n lambda x: self.render(\n x, self.col_specs_template_overview, 'header',\n render_space={'_': _p._}),\n wanted_list_overview)\n \n row_data = self.xls_row_template(\n c_specs_o, [x[0] for x in c_specs_o])\n row_pos_o = self.xls_write_row(\n ws_o, row_pos_o, row_data, row_style=self.rh_cell_style,\n set_column_size=True)\n ws_o.set_horz_split_pos(row_pos_o)\n \n row_data_begin = row_pos_o\n \n no = 0\n for p in r['move_lines']:\n c_specs_o = map(\n lambda x: self.render(\n x, self.col_specs_template_overview, 'lines'),\n wanted_list_overview)\n for x in c_specs_o :\n if x[0] == 'no' :\n no += 1\n x[4] = no \n row_data = self.xls_row_template(\n c_specs_o, [x[0] for x in c_specs_o])\n row_pos_o = self.xls_write_row(\n ws_o, row_pos_o, row_data, row_style=self.pd_cell_style)\n\n row_pos_d += 1 \n row_data_end = row_pos_o\n \n ws_o.write(row_pos_o, 0, None,self.ph_cell_style)\n ws_o.write(row_pos_o, 1, None,self.ph_cell_style)\n ws_o.write(row_pos_o, 2, None,self.ph_cell_style) \n ws_o.write(row_pos_o, 3, None,self.ph_cell_style) \n ws_o.write(row_pos_o, 4, None,self.ph_cell_style) \n ws_o.write(row_pos_o, 5, None,self.ph_cell_style)\n ws_o.write(row_pos_o, 6, None,self.ph_cell_style)\n ws_o.write(row_pos_o, 7, None,self.ph_cell_style) \n ws_o.write(row_pos_o, 8, None,self.ph_cell_style) \n ws_o.write(row_pos_o, 9, None,self.ph_cell_style) \n ws_o.write(row_pos_o, 10, None,self.ph_cell_style) \n ws_o.write(row_pos_o, 11, \"Total\",self.ph_cell_style) \n ws_o.write(row_pos_o, 12, None,self.ph_cell_style) \n ws_o.write(row_pos_o, 13, None,self.ph_cell_style) \n ws_o.write(row_pos_o, 14, None,self.ph_cell_style) \n ws_o.write(row_pos_o, 15, None,self.ph_cell_style) \n ws_o.write(row_pos_o, 16, xlwt.Formula(\"SUM(Q\"+str(row_data_begin)+\":Q\"+str(row_data_end)+\")\"),self.pd_cell_style_decimal_fill) \n ws_o.write(row_pos_o, 17, xlwt.Formula(\"SUM(R\"+str(row_data_begin)+\":R\"+str(row_data_end)+\")\"),self.pd_cell_style_decimal_fill) \n\n row_pos_o += 2\n ws_o.write(row_pos_o, 1, \"SALDO AWAL\", self.ph_cell_style)\n ws_o.write(row_pos_o, 2, r['saldo_awal'], self.pd_cell_style_decimal_fill) \n row_pos_o += 1\n saldo_awal_pos = row_pos_o\n\n ws_o.write(row_pos_o, 1, \"TOTAL DEBIT\", self.ph_cell_style)\n ws_o.write(row_pos_o, 2, xlwt.Formula(\"Q\"+str(row_data_end+1)), self.pd_cell_style_decimal_fill)\n row_pos_o += 1\n total_debit_pos = row_pos_o\n\n ws_o.write(row_pos_o, 1, \"TOTAL CREDIT\", self.ph_cell_style)\n ws_o.write(row_pos_o, 2, xlwt.Formula(\"R\"+str(row_data_end+1)), self.pd_cell_style_decimal_fill) \n row_pos_o += 1\n total_credit_pos = row_pos_o\n\n ws_o.write(row_pos_o, 1, \"SALDO AKHIR\", self.ph_cell_style)\n ws_o.write(row_pos_o, 2, xlwt.Formula(\"SUM(C\"+str(saldo_awal_pos)+\",C\"+str(total_debit_pos)+\",-C\"+str(total_credit_pos)+\")\"), self.pd_cell_style_decimal_fill) \n row_pos_o += 1\n\n ws_o.write(row_pos_o+1, 0, _p.report_date+\" \"+username)\n\n\n\nbank_book_report_xls(\n 'report.dym2_report_bank_book_xls',\n 'account.move.line',\n parser=dym_bank_book_print_xls)","sub_path":"dym_journal_consolidation/report/report_bank_book_xls.py","file_name":"report_bank_book_xls.py","file_ext":"py","file_size_in_byte":14635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"30332961","text":"# Load all the required libraries\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom pandas import read_csv\nfrom pandas.plotting import scatter_matrix\nfrom matplotlib import pyplot\nimport logging\nimport time\nimport cProfile\n\ndef load_data(test,loglevel):\n \n #This fuction provides data ingestion automation\n \n # Start time of the function\n start_time = time.time()\n \n p = cProfile.Profile()\n\n # Enable profiling for performance monitoring\n if test==\"profile\":\n p.enable()\n \n logger = logging.getLogger()\n if loglevel==\"debug\":\n logger.setLevel(logging.DEBUG)\n elif loglevel==\"info\":\n logger.setLevel(logging.INFO)\n elif loglevel==\"warn\":\n logger.setLevel(logging.WARNING)\n elif loglevel==\"error\":\n logger.setLevel(logging.ERROR)\n elif loglevel==\"critical\":\n logger.setLevel(logging.CRITICAL)\n else:\n logger.setLevel(logging.INFO)\n \n logger.info(\"Model compare and prediction START!\")\n if test is None:\n logger.warning(\"Testmode is empty\")\n elif loglevel is None:\n logger.warning(\"Kog level not set default to info\")\n \n # Load the dataset\n url = \"https://github.com/prahul/capstone-ai-production/blob/master/iris.csv\"\n logger.debug(\"Loading data from: %s\", url )\n if url == \"\":\n logger.critical(\"No location specfied for data file\")\n sepalname = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']\n dataset = read_csv(url, names=sepalname)\n logger.info(\"Dataset loaded successfully\")\n \n # End time of the function after the the code to be evaluated\n end_time = time.time()\n\n # Time taken in seconds to complete the entire process for performance monitoring.\n time_taken = end_time - start_time\n print(\"Time to complete the load_data function in seconds:\", time_taken)\n \n # Disable profiling\n if test==\"profile\":\n p.disable()\n\n # Print the stats\n p.print_stats()\n\n # Dump the stats to a file\n p.dump_stats(\"capprojresults.prof\") \n \n return dataset\n\ndef data_visualization(test,loglevel,dataset):\n \n #This function provides the data visualiation for the injested data\n # Start time of the function\n start_time = time.time()\n \n p = cProfile.Profile()\n\n # Enable profiling for performance monitoring\n if test==\"profile\":\n p.enable()\n \n logger = logging.getLogger()\n if loglevel==\"debug\":\n logger.setLevel(logging.DEBUG)\n elif loglevel==\"info\":\n logger.setLevel(logging.INFO)\n elif loglevel==\"warn\":\n logger.setLevel(logging.WARNING)\n elif loglevel==\"error\":\n logger.setLevel(logging.ERROR)\n elif loglevel==\"critical\":\n logger.setLevel(logging.CRITICAL)\n else:\n logger.setLevel(logging.INFO)\n\n # draw box and whisker plots\n dataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)\n pyplot.show()\n logger.info(\"Plot drawn successfully\")\n\n\n # draw histograms\n dataset.hist()\n pyplot.show()\n logger.info(\"Plot drawn successfully\")\n\n\n # draw scatter plot matrix\n scatter_matrix(dataset)\n pyplot.show()\n logger.info(\"Scatter plot matrix drawn successfully\")\n \n # End time of the function after the the code to be evaluated\n end_time = time.time()\n\n # Time taken in seconds to complete the entire process for performance monitoring.\n time_taken = end_time - start_time\n print(\"Time to complete the data_visualization function in seconds:\", time_taken)\n \n # Disable profiling\n if test==\"profile\":\n p.disable()\n\n # Print the stats\n p.print_stats()\n\n # Dump the stats to a file\n p.dump_stats(\"capprojresults.prof\") \n\ndef datavalid_models_predict(test,loglevel,dataset): \n \n #This function compares multiple models and does predictions based on the models\n \n # Start time of the function\n start_time = time.time()\n \n p = cProfile.Profile()\n\n # Enable profiling for performance monitoring\n if test==\"profile\":\n p.enable()\n \n logger = logging.getLogger()\n if loglevel==\"debug\":\n logger.setLevel(logging.DEBUG)\n elif loglevel==\"info\":\n logger.setLevel(logging.INFO)\n elif loglevel==\"warn\":\n logger.setLevel(logging.WARNING)\n elif loglevel==\"error\":\n logger.setLevel(logging.ERROR)\n elif loglevel==\"critical\":\n logger.setLevel(logging.CRITICAL)\n else:\n logger.setLevel(logging.INFO)\n\n # Split the validation dataset to keep data seperate\n array = dataset.values\n X = array[:,0:4]\n y = array[:,4]\n X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.20, random_state=1)\n logger.debug(\"X_train: %s\", X_train)\n logger.debug(\"X_validation: %s\", X_validation)\n logger.debug(\"Y_train: %s\", Y_train)\n logger.debug(\"Y_validation: %s\", Y_validation)\n if X_train is None:\n logger.error(\"X_train value empty\")\n elif X_validation is None:\n logger.error(\"X_validation value empty\")\n elif Y_train is None:\n logger.error(\"Y_train value empty\")\n elif Y_validation is None:\n logger.error(\"Y_validation value empty\")\n else:\n logger.info(\"Validation dataset split successfully\")\n \n\n # Check Algorithms\n mlmodels = []\n mlmodels.append(('LR', LogisticRegression(solver='liblinear', multi_class='ovr')))\n mlmodels.append(('LDA', LinearDiscriminantAnalysis()))\n mlmodels.append(('KNN', KNeighborsClassifier()))\n mlmodels.append(('CART', DecisionTreeClassifier()))\n mlmodels.append(('NB', GaussianNB()))\n mlmodels.append(('SVM', SVC(gamma='auto')))\n \n logger.debug(\"Models: %s\", mlmodels)\n logger.info(\"Check algorithms successfully\")\n # evaluate each model in turn\n modelresults = []\n sepalname = []\n for name, model in mlmodels:\n kfold = StratifiedKFold(n_splits=10, random_state=1, shuffle=True)\n cv_modelresults = cross_val_score(model, X_train, Y_train, cv=kfold, scoring='accuracy')\n modelresults.append(cv_modelresults)\n sepalname.append(name)\n print('%s: %f (%f)' % (name, cv_modelresults.mean(), cv_modelresults.std()))\n \n\n # Compare Algorithms\n logger.debug(\"Models results: %s\", modelresults)\n if modelresults is None:\n logger.debug(\"No data found for model results\")\n \n pyplot.boxplot(modelresults, labels=sepalname)\n pyplot.title('Algorithm Comparison')\n pyplot.show()\n logger.info(\"Algorithm compare success\")\n\n # Make predictions on validation dataset\n logger.debug(\"X_train: %s\", X_train)\n logger.debug(\"Y_train: %s\", Y_train)\n model = SVC(gamma='auto')\n model.fit(X_train, Y_train)\n predictions = model.predict(X_validation)\n logger.info(\"Make prediction success\")\n\n\n # Evaluate predictions\n print(accuracy_score(Y_validation, predictions))\n print(confusion_matrix(Y_validation, predictions))\n print(classification_report(Y_validation, predictions))\n logger.info(\"Prediction eveluation complete\")\n \n # End time of the function after the the code to be evaluated\n end_time = time.time()\n\n # Time taken in seconds to complete the entire process for performance monitoring.\n time_taken = end_time - start_time\n print(\"Time to complete the function datavalid_models_predict in seconds:\", time_taken)\n \n\n # Disable profiling\n if test==\"profile\":\n p.disable()\n\n # Print the stats\n p.print_stats()\n\n # Dump the stats to a file\n p.dump_stats(\"capprojresults.prof\") \n \n logger.info(\"Model compare and prediction COMPLETE!\")\n \ndef data_model_predict():\n dataset = load_data(\"false\",\"info\")\n data_visualization(\"false\",\"info\",dataset)\n datavalid_models_predict(\"false\",\"info\",dataset)\n\n# Run the project \ndata_model_predict()","sub_path":"capstone-proj-docker/capstoneproject.py","file_name":"capstoneproject.py","file_ext":"py","file_size_in_byte":8455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"613843167","text":"class Node:\n def __init__(self,val):\n self.data=val\n self.right=self.left=None\n\n\ndef MorrisTraversal(root):\n # Set current to root of binary tree\n current = root\n\n while (current is not None):\n\n if current.left is None:\n print(current.data)\n current = current.right\n else:\n # Find the inorder predecessor of current\n pre = current.left\n while (pre.right is not None and pre.right != current):\n pre = pre.right\n\n # Make current as right child of its inorder predecessor\n if (pre.right is None):\n pre.right = current\n current = current.left\n\n # Revert the changes made in if part to restore the\n # original tree i.e., fix the right child of predecessor\n else:\n pre.right = None\n print(current.data)\n current = current.right\nn=Node(1)\nn.left=Node(2)\nn.right=Node(3)\n\nMorrisTraversal(n)","sub_path":"data_structure/TreeStructure.py","file_name":"TreeStructure.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"405405997","text":"#!/usr/bin/env python3\n\"\"\"\n udp_client.py - UDP client that talks to a remote chat server on UDP port 9000.\n Authors: Matt & Ell\n Date: 4/10/2020\n\n\"\"\"\nimport socket\nimport sys\n\n\nclass Client:\n\n\tUDP_ADDRESS = ''\n\tUDP_PORT = 0\n\tsock = None\n\n\tdef __init__(self, udp_addr = '127.0.0.1', udp_port = 9000):\n\t\tself.UDP_ADDRESS = udp_addr\n\t\tself.UDP_PORT = udp_port\n\t\tself.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n\n\tdef disconnect(self):\n\t\t# Sends a message to the chat server in order to be taken off the list\n\t\tprint(\"Disconnected\")\n\t\tself.sock.sendto(\"/disconnect\".encode(), (self.UDP_ADDRESS, self.UDP_PORT))\n\n\n\n\tdef connect(self):\n\t\t# set timeout in case the server doesn't call us back\n\t\tself.sock.settimeout(5)\n\t\tself.sock.sendto(\"/connect\".encode(), (self.UDP_ADDRESS, self.UDP_PORT))\n\t\tprint(\"Type a message and press enter to send! Type /disconnect to quit.\")\n\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tdata, addr = self.sock.recvfrom(1024)\n\t\t\t\tprint(data.decode())\n\t\t\t\tmessage = input()\n\t\t\t\tif (message == \"/disconnect\"):\n\t\t\t\t\tself.disconnect()\n\t\t\t\t\tbreak\n\t\t\t\tself.sock.sendto(message.encode(), (self.UDP_ADDRESS, self.UDP_PORT))\n\t\t\t# You can also just control + C ;)\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tself.disconnect()\n\t\t\t\tsys.exit()\n\n\nif __name__ == '__main__':\n\tclient = Client()\n\tclient.connect()\n","sub_path":"Client/udp_client.py","file_name":"udp_client.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"198632820","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport cv2\n\ndef dilate(img):\n num_row = img.shape[0]\n num_col = img.shape[1]\n color = np.array([255,255,255])\n for y in range(num_row-2, -1, -1):\n for x in range(num_col-2, -1, -1):\n if all(img[y,x]==color):\n if not all(img[y,x-1] ==color):\n img[y,x] = img[y,x-1]\n elif not all(img[y,x-2] == color):\n img[y,x] = img[y,x-2]\n elif not all(img[y,x-3] == color):\n img[y,x] = img[y,x-3]\n elif not all(img[y-1,x] == color):\n img[y,x] = img[y-1,x]\n elif not all(img[y-2,x] == color):\n img[y,x] = img[y-2,x]\n elif not all(img[y-3,x] == color):\n img[y,x] = img[y-3,x]\n return img\n\n\ndef dilate_and_sky(img, shift=0):\n sky = cv2.imread('sky.jpg')\n num_row = img.shape[0]\n num_col = img.shape[1]\n color = np.array([255,255,255])\n for y in range(num_row-150, -1, -1):\n for x in range(num_col-2, 76, -1):\n if all(img[y,x]==color):\n # dilate\n if not all(img[y,x-1] ==color):\n img[y,x] = img[y,x-1]\n elif not all(img[y,x-2] == color):\n img[y,x] = img[y,x-2]\n elif not all(img[y,x-3] == color):\n img[y,x] = img[y,x-3]\n elif not all(img[y-1,x] == color):\n img[y,x] = img[y-1,x]\n elif not all(img[y-2,x] == color):\n img[y,x] = img[y-2,x]\n elif not all(img[y-3,x] == color):\n img[y,x] = img[y-3,x]\n elif not all(img[y,x-4] ==color):\n img[y,x] = img[y,x-4]\n elif not all(img[y,x-5] == color):\n img[y,x] = img[y,x-5]\n elif not all(img[y-4,x] == color):\n img[y,x] = img[y-4,x]\n elif not all(img[y-5,x] == color):\n img[y,x] = img[y-5,x]\n # put sky\n elif 75 <= x < sky.shape[1] - shift and y < 740 and x + shift >= 0:\n img[y,x] = sky[y,round(x+shift)]\n return img\n\ndef quatmult(p, q):\n # quaternion multiplication\n out = [p[0]*q[0] - p[1]*q[1] - p[2]*q[2] - p[3]*q[3],\n p[0]*q[1] + p[1]*q[0] + p[2]*q[3] - p[3]*q[2],\n p[0]*q[2] - p[1]*q[3] + p[2]*q[0] + p[3]*q[1],\n p[0]*q[3] + p[1]*q[2] - p[2]*q[1] + p[3]*q[0]]\n return out\n\ndef conjugate(p):\n return [p[0],-p[1],-p[2],-p[3]]\n\n#print quatmult([1,0,1,0], [1,0.5,0.5,0.75])\n#print quatmult([-math.sin(np.pi),3,4,3], [4,3.9,-1,-3])\n\ndef quat2rot(q):\n \"\"\"\n :param q:\n :return a 3x3 rotation matrix parameterized with the elements of a given input quaternion.\n \"\"\"\n q0 = q[0]\n q1 = q[1]\n q2 = q[2]\n q3 = q[3]\n R = [[q0**2+q1**2-q2**2-q3**2, 2*((q1*q2)-(q0*q3)), 2*((q1*q3)+(q0*q2))],\n [2*((q1*q2)+(q0*q3)), q0**2+q2**2-q1**2-q3**2, 2*((q2*q3)-(q0*q1))],\n [2*((q1*q3)-(q0*q2)), 2*((q2*q3)+(q0*q1)), q0**2+q3**2-q1**2-q2**2]]\n return np.matrix(R)\n","sub_path":"projection/projection_lib.py","file_name":"projection_lib.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"539311640","text":"import numbers\n\nfrom modules.validator.ValidatorResultPassed import ValidatorResultPassed\nfrom modules.validator.ValidatorResultFailed import ValidatorResultFailed\n\nfrom modules.movierating.public.MovieWithRating import MovieWithRating\nfrom modules.movierating.public.MovieRating import MovieRating\n\n\nclass Validator:\n def check_for_rating(self, rated_movie):\n if not rated_movie:\n return ValidatorResultFailed(\"Rated movie not exist\")\n if not isinstance(rated_movie, MovieWithRating):\n return ValidatorResultFailed(\"Rated movie is not instance of RatedMovie\")\n abnormalities_list = self._check(rated_movie)\n\n rating = rated_movie.get_rating()\n # if not (rating == MovieRating.LIKE or rating == MovieRating.NEUTRAL or rating == MovieRating.UNLIKE):\n # abnormalities_list.append(\"Invalid rating value!\")\n return self._abnormalities_list_to_validator_result(abnormalities_list)\n\n def check(self, rated_movie):\n if not rated_movie:\n return ValidatorResultFailed(\"Rated movie not exist\")\n if not isinstance(rated_movie, MovieWithRating):\n return ValidatorResultFailed(\"Rated movie is not instance of RatedMovie\")\n\n return self._abnormalities_list_to_validator_result(self._check(rated_movie))\n\n def _check(self, rated_movie):\n abnormalities_list = []\n check_user_id_error = self._check_user_id(rated_movie.get_user_id())\n if check_user_id_error:\n abnormalities_list.append(check_user_id_error)\n\n check_movie_id_error = self._check_movie_id(rated_movie.get_movie_id())\n if check_movie_id_error:\n abnormalities_list.append(check_movie_id_error)\n\n check_rating_error = self._check_rating(rated_movie.get_rating())\n if check_rating_error:\n abnormalities_list.append(check_rating_error)\n return abnormalities_list\n\n\n def _check_user_id(self, user_id):\n if not user_id:\n return \"Invalid user id: Property not exist\"\n if not isinstance(user_id, numbers.Number):\n return \"Invalid user id: Is not a number\"\n\n def _check_movie_id(self, movie_id):\n if not movie_id:\n return \"Invalid movie id: Property not exist\"\n if not isinstance(movie_id, numbers.Number):\n return \"Invalid movie id: Is not a number\"\n\n def _abnormalities_list_to_validator_result(self, abnormalities_list):\n if len(abnormalities_list) > 0:\n return ValidatorResultFailed( self._abnormalities_list_to_string(abnormalities_list))\n return ValidatorResultPassed()\n\n def _check_rating(self, movie_rating):\n if not movie_rating:\n return \"Invalid movie rating: Property not exist\"\n if not isinstance( movie_rating, MovieRating):\n return \"Invalid movie rating: Is not instance of MovieRating\"\n\n\n def _abnormalities_list_to_string(self, abnormalities_list):\n abnormalities_str = \"Movie details is incorrect.\\n\"\n for error in abnormalities_list:\n abnormalities_str += error + \"\\n\"\n return abnormalities_str\n","sub_path":"modules/movierating/private/validator/Validator.py","file_name":"Validator.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"397326641","text":"import re\nimport time\nimport sys\nimport pywinauto\nfrom splinter import Browser\n\ndef ebay_kleinanzeigen(login_name, login_pw, title, pic_path, description, price, plz, street, company, phone):\n url = \"https://www.ebay-kleinanzeigen.de/p-anzeige-aufgeben.html#?path=210/306/teile&isParent=false\"\n browser = Browser('chrome')\n browser.driver.set_window_size(1200, 900)\n browser.visit(url)\n browser.fill('loginMail', login_name)\n browser.fill('password', login_pw)\n browser.click_link_by_id(\"login-submit\")\n browser.find_by_id(\"cat_210\").click()\n browser.find_by_id(\"cat_306\").click()\n browser.find_by_id(\"cat_teile\").click()\n browser.find_by_css('.button').first.click()\n browser.fill('title', title)\n browser.fill('description', description)\n browser.fill('priceAmount', price)\n browser.find_by_id(\"priceType2\").click()\n browser.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight/4);\")\n browser.find_by_id('pictureupload-pickfiles').click()\n time.sleep(2)\n apps = pywinauto.findwindows.find_elements(title_re='Öffnen')\n for app in apps:\n print(app)\n prozess = re.search('.+#([0-9]+)', str(app))\n prozess = int(prozess.group(1))\n print(prozess)\n app = pywinauto.Application().connect(title='Öffnen')\n # app = pywinauto.Application().connect(process=prozess)\n window = app.Dialog\n window.Wait('ready')\n edit = window.Edit\n edit.ClickInput()\n edit.TypeKeys(pic_path)\n button = window.Button\n button.Click()\n time.sleep(10)\n browser.fill('zipCode', plz)\n browser.fill('streetName', street)\n browser.fill('contactName', company)\n browser.fill('phoneNumber', phone)\n browser.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n browser.find_by_id('pstad-submit').click()\n time.sleep(10)\n browser.quit()\n\nebay_kleinanzeigen('##USER##', '##PASSWORD##', \"\"\"##TITLE##\"\"\", '##PIC##', \"\"\"##DESC##\"\"\", '##PRICE##',\n '##PLZ##', '##STREET##', '##COMPANY##', '##PHONE##')\n","sub_path":"postad/ebay_kleinanzeigen/motorradteile/post_ebay_kleinanzeigen_cat_motorradteile.py","file_name":"post_ebay_kleinanzeigen_cat_motorradteile.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"445798126","text":"# -*- coding: UTF-8 -*-\n\n\nfrom django.conf import settings\nfrom jira import JIRA\n\n__gm_jira = None\n\n\ndef get_jira():\n global __gm_jira\n if __gm_jira is None:\n __gm_jira = JIRA('http://jira.gengmei.cc/', basic_auth=(settings.JIRA_USR, settings.JIRA_PWD))\n return __gm_jira\n","sub_path":"medivh/common/utils/jira_tool.py","file_name":"jira_tool.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"395009023","text":"from flask import Flask,jsonify,request,render_template\nfrom wit import Wit\nimport json\n\napp = Flask(__name__)\n@app.route('/')\ndef index():\n\treturn render_template('index.html')\n\n@app.route('/query',methods=['POST'])\ndef query():\n access_token = \"PJUSBTGJ6VXSGG3HERAZEBT3NKKV7JSH\"\n query = request.form['search']\n client = Wit(access_token)\n res = client.message(query)\n if(('song' in res['entities']) and ('code' in res['entities']) and ('singer' in res['entities'])):\n result = {\"error\": 1,\"message\":\"I don't know\"}\n elif (('song' in res['entities']) and ('code' in res['entities'])):\n result = searchTuneCode(res[\"entities\"][\"song\"][0][\"value\"].lower())\n elif (('song' in res['entities']) and ('singer' in res['entities'])):\n result = searchTuneArtist(res[\"entities\"][\"song\"][0][\"value\"].lower())\n else:\n result = {\"error\": 1,\"message\":\"I don't know\"}\n # if('singer' in res['entities']):\n # return \"mean\"\n # else:\n # return \"ot mean\"\n # i=0\n # for a in res[\"entities\"]:\n # i+=1\n # if(i<2):\n # result = {\"error\": 1,\"message\":\"I don't know\"}\n # else:\n # result = searchTuneCode(res[\"entities\"][\"song\"][0][\"value\"].lower())\n return jsonify(result)\n\ndef searchTuneCode(title):\n if(title==\"peakly\"):\n result = {\"error\":0,\"message\":\"189187\",\"title\":title}\n else:\n result = {\"error\":2,\"message\":\"Calltune not found\",\"title\":title}\n return result\n\ndef searchTuneArtist(title):\n if(title==\"peakly\"):\n result = {\"error\":0,\"message\":\"Tena\",\"title\":title}\n else:\n result = {\"error\":2,\"message\":\"Singer not found\",\"title\": title}\n return result\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"124510797","text":"from otree.api import (\n models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,\n Currency as c, currency_range\n)\nimport random\n\nauthor = \"Owen Powell\"\n\ndoc = \"\"\"\n5 firms complete in a market by setting prices for homogenous goods.\n\nSee \"Kruse, J. B., Rassenti, S., Reynolds, S. S., & Smith, V. L. (1994).\nBertrand-Edgeworth competition in experimental markets.\nEconometrica: Journal of the Econometric Society, 343-371.\"\n\"\"\"\n\n\nclass Constants(BaseConstants):\n players_per_group = 5\n name_in_url = 'Simultaneouspricecompetition'\n num_rounds = 10\n pricesC = [c(0.3), c(0.4), c(0.6), c(0.8), c(1)]\n demands = [600, 480, 360, 240, 120]\n cost = c(0.26)\n\n\nclass Subsession(BaseSubsession):\n def creating_session(self):\n self.group_randomly()\n\n\nclass Group(BaseGroup):\n winning_price = models.CurrencyField(\n choices=Constants.pricesC,\n doc=\"\"\"Cheapest price\"\"\"\n )\n winning_demand = models.PositiveIntegerField(\n choices=Constants.demands,\n doc=\"\"\"Demand at cheapest price\"\"\"\n )\n\n price1 = models.PositiveIntegerField(\n min=0, max=Constants.players_per_group,\n doc=\"\"\"Number of sellers who chose a price of 0,30 E$.\"\"\"\n )\n\n price2 = models.PositiveIntegerField(\n min=0, max=Constants.players_per_group,\n doc=\"\"\"Number of sellers who chose a price of 0,40 E$.\"\"\"\n )\n\n price3 = models.PositiveIntegerField(\n min=0, max=Constants.players_per_group,\n doc=\"\"\"Number of sellers who chose a price of 0,60 E$.\"\"\"\n )\n\n price4 = models.PositiveIntegerField(\n min=0, max=Constants.players_per_group,\n doc=\"\"\"Number of sellers who chose a price of 0,80 E$.\"\"\"\n )\n\n price5 = models.PositiveIntegerField(\n min=0, max=Constants.players_per_group,\n doc=\"\"\"Number of sellers who chose a price of 1,00 E$.\"\"\"\n )\n\n def checkzero(self):\n player = self.get_players()\n for p in player:\n if p.price == 0:\n p.price = c(0.3)\n\n def set_payoffs(self):\n players = self.get_players()\n winning_price = min([p.price for p in players])\n self.winning_demand = Constants.demands[Constants.pricesC.index(winning_price)]\n self.winning_price = c(winning_price)\n winners = [p for p in players if p.price == self.winning_price]\n\n self.price1 = len([p for p in players if p.price == Constants.pricesC[0]])\n self.price2 = len([p for p in players if p.price == Constants.pricesC[1]])\n self.price3 = len([p for p in players if p.price == Constants.pricesC[2]])\n self.price4 = len([p for p in players if p.price == Constants.pricesC[3]])\n self.price5 = len([p for p in players if p.price == Constants.pricesC[4]])\n\n for p in players:\n p.payoff = c(0) # store as currency\n if p in winners:\n p.is_a_winner = True\n p.demand = self.winning_demand/len(winners)\n p.payoff = c((p.price - Constants.cost) * p.demand)\n\n\nclass Player(BasePlayer):\n price = models.CurrencyField(\n choices=Constants.pricesC,\n doc=\"\"\"Price player chooses to sell product for\"\"\",\n widget=widgets.RadioSelect()\n )\n\n is_a_winner = models.BooleanField(\n initial=False,\n doc=\"\"\"Whether this player offered lowest price\"\"\"\n )\n\n demand = models.IntegerField(\n initial=0,\n doc=\"\"\"Share of total demand served by this player\"\"\"\n )","sub_path":"vendorc/exp02/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"578408445","text":"# 1966 프린터 큐\n\nt = int(input())\n\nfor i in range(t):\n n, m = map(int, input().split())\n \n queue = list(map(int, input().split()))\n\n \n #print(queue)\n\n idx = list(range(len(queue)))\n idx[m] = 'target'\n \n# print(idx)\n \n order = 0\n while True:\n if queue[0] == max(queue):\n order+=1\n \n if( idx[0] == 'target'):\n print(order)\n break\n \n else:\n\n x = queue.pop(0)\n idx.pop(0)\n# print('pop', x)\n \n else:\n queue.append(queue.pop(0))\n idx.append(idx.pop(0))\n \n# print(idx)\n# print(queue)","sub_path":"백준/큐,덱/1966_프린터큐.py","file_name":"1966_프린터큐.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"405171103","text":"from cursesmenu import SelectionMenu\n\nclass Donors:\n\n\tdef __init__(self, donors_dict):\n\t\tself.donors = donors_dict\n\n\tdef __iter__(self):\n\t\treturn iter(self.donors)\n\n\tdef __contains__(self,donor_str):\n\t\treturn donor_str in self.donors.keys()\n\n\tdef report_gen(self):\n\t\theader = ('Donor Name','Total Given','Num Gifts','Average Gift')\n\t\trow_format, row_format0 = '{:<14}','{:<14}'\n\t\tfor item in header[1:]:\n\t\t\trow_format += f' | {{:>{len(item)}}}'\n\t\t\trow_format0 += f' {{}}{{:>{len(item)}}}'\n\t\tprint(row_format.format(*header))\n\t\tprint('-'*len(row_format.format(*header)))\n\t\tfor item in self.donors.keys():\n\t\t\tprint(row_format0.format(item,'$',sum(self.donors[item]),' ', len(self.donors[item]),'$',round(sum(self.donors[item])/len(self.donors[item]),1)))\n\n\tdef letters(self):\n\t\tfor names in self.donors.keys():\n\t\t\twith open(f'{names}.txt','w') as text_file:\n\t\t\t\tprint(f'Dear {names},\\nThank you for your very kind donation of ${sum(self.donors[names])}.\\nIt will be put to very good use.\\nSincerely,\\n-The Team', file=text_file)\n\n\tdef see_list(self):\n\t\tfor item in self.donors.keys():\n\t\t\tprint(item)\n\n\tdef add_donor(self, name_str, amount):\n\t\ttry:\n\t\t\tif name_str in self.donors:\n\t\t\t\tself.donors[name_str].append(int(amount))\n\t\t\telse:\n\t\t\t\tself.donors[name_str] = [int(amount)]\n\t\texcept ValueError:\n\t\t\tprint(\"Please Enter a number.\")\n\ndef first_selection(donors_obj):\n\tresponse = input(\"Enter a Full Name or 'list' to see donor\\n\")\n\tif response == 'list':\n\t\tdonors_obj.see_list()\n\telse:\n\t\td_amount = input(\"Enter a donation amount\\n\")\n\t\tdonors_obj.add_donor(response, d_amount)\n\t\tprint(response + ', thank you for your donation.')\n\ndef second_selection(donors_obj):\n\tdonors_obj.report_gen()\n\ndef third_selection(donors_obj):\n\tdonors_obj.letters()\n\nif __name__ == \"__main__\":\n\tdonors = {'Batman':[100,50,30],'Ironman':[70,80],'Hulk':[10],'Spiderman':[40,20],'Superman':[40,60,10]}\n\ttemp = Donors(donors)\n\ta_list = ['Send a Thank You','Create a Report','Send letters to everyone']\n\tselection_dict = {0: first_selection, 1: second_selection, 2: third_selection}\n\twhile True: \n\t\tselection = SelectionMenu.get_selection(a_list)\n\t\tif selection == 3:\n\t\t\tbreak\n\t\tselection_dict.get(selection)(temp)\n\t\tinput(\"Press Enter to continue...\")","sub_path":"students/Zhengtang_Yang/Lesson09/mailroom.py","file_name":"mailroom.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"387388775","text":"# coding: utf-8\n# Elimina tarea\n\nimport webapp2\nimport time\n\nfrom model.tarea import Tarea\n\n\nclass EliminaTareaHandler(webapp2.RequestHandler):\n def get(self):\n tarea = Tarea.recupera(self.request)\n tarea.key.delete()\n time.sleep(1)\n\n return self.redirect(\"/\")\n\n\napp = webapp2.WSGIApplication([\n ('/tareas/elimina', EliminaTareaHandler)\n], debug=True)\n","sub_path":"handlers/Tareas/elimina.py","file_name":"elimina.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"315965166","text":"import os\nimport random\nimport socket\nimport threading\nimport time\nfrom queue import Queue\n\nfrom header import *\n\nrecvSize = 2048\ncontent_size = 1024\ndefault_port = 10000\ngain_port = 10001\nsplit = '|:|'\nwindSize = 50\n\n\ndef get_command():\n global gain_port\n global split\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind(('', default_port))\n data, addr = sock.recvfrom(recvSize)\n pk = Packet()\n pk.decode(data)\n str_to_decode = pk.data.decode('utf-8').split(split)\n method = str_to_decode[0]\n filename = str_to_decode[1]\n print(method)\n print(filename)\n\n str_to_send = method + split + filename + split + str(gain_port)\n pk_return = Packet()\n pk_return.make({'seqNo': 0, 'ackNo': 0, 'receiveWindowSize': windSize, 'ACK': 0, 'SYN': 0, 'FIN': 0,\n 'data': str_to_send.encode('utf-8')})\n sock.sendto(pk_return.bitStream, addr)\n sock.settimeout(2)\n data, addr = sock.recvfrom(recvSize)\n time.sleep(0.3)\n # sock.close()\n return method, filename, addr\n\n\ndef handle(method, filename, addr, server_port):\n temp = server(server_port, addr, method, filename)\n temp.server_client()\n\n\nclass server:\n method = ''\n filename = ''\n client_port = 0\n sock = 0\n f = 0\n buffer = []\n pk_recv = []\n current_ack = 0\n current_send = 0\n fin = 0\n address = 0\n need_re_send = False\n client_win_size = 50\n client_ack = -1\n rwnd = 50\n addr = 0\n\n written_ack = -1\n\n #网络丢包的情况\n retransmit = False\n\n # 阻塞控制\n sleep_time = 0.001\n duplicate_ack = 0\n cwnd = 0\n ssthresh = 32\n time_out = 1\n state = \"slow start\"\n\n def from_slow_start_get_into_fast_recovery(self):\n self.ssthresh = int(self.cwnd/2)\n self.cwnd = self.ssthresh + 3\n self.duplicate_ack = 0\n self.state = \"fast recovery\"\n print(\"From slow start into Fast recovery\")\n\n def from_slow_start_into_congestion_avoidance(self):\n self.state = \"congestion avoidance\"\n print(\"slow_start into congestion avoidance\")\n self.state = \"congestion avoidance\"\n pass\n\n def still_in_slow_start_when_new_ack(self):\n self.cwnd = self.cwnd + 1\n self.state = \"slow start\"\n print(\"still in slow start when new ack\")\n\n def still_in_slow_start_when_timeout(self):\n self.ssthresh = int(self.cwnd/2)\n self.cwnd = self.ssthresh + 3\n self.state = \"slow start\"\n print(\"still slow start when timeout\")\n\n def still_in_slow_start_when_duplicate_ack(self):\n self.duplicate_ack += 1\n self.state = \"slow start\"\n print(\"still slow start when timeout\")\n\n def from_fast_recovery_get_into_congestion_avoidance(self):\n self.cwnd = self.ssthresh\n self.state = \"congestion avoidance\"\n print(\"Fast recovery into Congestion avoidance\")\n\n def from_fast_recovery_get_into_slow_start(self):\n self.cwnd = 1\n self.state = \"slow start\"\n print(\"Fast recoverye into Slow start\")\n\n def still_in_fast_recovery_when_duplicate_ack(self):\n self.cwnd = self.cwnd + 1\n self.state = \"fast recovery\"\n print(\"still in fast recovery when duplicate ack\")\n\n def from_congestion_avoidance_get_into_slow_start(self):\n self.ssthresh = int(self.cwnd)\n self.cwnd = 1\n self.state = \"slow start\"\n print(\"Congestion avoidance into Slow start\")\n\n def from_congestion_avoidance_into_fast_recovery(self):\n self.ssthresh = int(self.cwnd/2)\n self.duplicate_ack = 0\n self.cwnd = self.ssthresh + 3\n self.state = \"fast recovery\"\n print(\"Congestion avoidance into Slow start\")\n\n def still_in_congestion_avoidance_when_ne_ack(self):\n self.cwnd = self.cwnd + 1\n self.state = \"congestion avoidance\"\n print(\"still in congestion avoidance when new ack\")\n\n def still_in_congestion_avoidance_when_duplicate_ack(self):\n self.duplicate_ack += 1\n self.state = \"congestion avoidance\"\n print(\"still in congestion avoidance when duplicate ack\")\n\n def is_pk_in_cache(self, ack):\n for pk in self.pk_recv:\n if ack == pk.ackNo:\n return True\n\n def __init__(self, port, addr, method, filename):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock.bind(('', port))\n self.method = method\n self.filename = filename\n self.addr = addr\n self.need_re_send = False\n\n self.fin = 0\n\n print('new a thread to server ' + str(addr))\n\n # mothod lsend\n def lsend(self):\n try:\n self.f = open('server/' + self.filename, 'wb')\n except:\n print('open file error')\n currAck = 0\n resend = 0\n\n print(\"Ready to get data from client\", self.addr)\n while True:\n if len(self.pk_recv) < windSize:\n pk = Packet()\n try:\n data, addr = self.sock.recvfrom(recvSize)\n pk.decode(data)\n except:\n p =Packet()\n if len(self.pk_recv) == 0:\n p.make({'seqNo': 0, 'ackNo': resend, 'receiveWindowSize': self.rwnd, 'ACK': 0, 'SYN': 0, 'FIN': 0})\n self.sock.sendto(p.bitStream, self.addr)\n else:\n p_b = self.pk_recv[0]\n e_m = 0\n if p_b.FIN == 1:\n e_m = 1\n p.make({'seqNo': 0, 'ackNo': p_b.ackNo, 'receiveWindowSize': self.rwnd, 'ACK': 0, 'SYN': 0, 'FIN': e_m})\n self.sock.sendto(p.bitStream, self.addr)\n if p_b.FIN == 1:\n print(\"File getting is over\")\n break\n # read data from cache randomly\n random_read_cache = random.randint(1, 10)\n random_read_num = random.randint(1, 10)\n print(\"size\", len(self.pk_recv))\n if random_read_cache > 5:\n count = 0\n while count < random_read_num and len(self.pk_recv) > 0:\n print(\"PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP\")\n e_pk_read = self.pk_recv[0]\n del self.pk_recv[0]\n self.rwnd += 1\n count += 1\n if self.written_ack >= e_pk_read.ackNo:\n continue\n #print(\"Now writing the package\", pk_read.ackNo, \"into file\")\n self.f.write(e_pk_read.data)\n self.written_ack += 1\n continue\n\n ran = random.randint(1, 10)\n if ran < 2 and currAck != 0:\n print(\"Package\", pk.ackNo, \"drop\")\n pk_req = Packet()\n if currAck - 1 < 0:\n req_ack = 0\n else:\n req_ack = currAck - 1\n pk_req.make({'seqNo': 0, 'ackNo': req_ack, 'receiveWindowSize': self.rwnd, 'ACK': 0, 'SYN': 0, 'FIN': 0})\n self.sock.sendto(pk_req.bitStream, self.addr)\n continue\n\n if self.is_pk_in_cache(pk.ackNo):\n pk_back = Packet()\n pk_back.make({'seqNo': 0, 'ackNo': pk.ackNo, 'receiveWindowSize': self.rwnd, 'ACK': 0, 'SYN': 0, 'FIN': 0})\n resend = pk.ackNo\n self.sock.sendto(pk_back.bitStream, self.addr)\n continue\n\n print(\"Package\", pk.ackNo, \"save in cache\")\n self.pk_recv.append(pk)\n self.rwnd -= 1\n\n currAck += 1\n\n if self.rwnd < 0:\n self.rwnd = 0\n\n if pk.FIN == 1:\n m = 1\n else:\n m = 0\n\n pk_re = self.pk_recv[0]\n pk_re.make({'seqNo': 0, 'ackNo': pk.ackNo, 'receiveWindowSize': self.rwnd, 'ACK': 0, 'SYN': 0, 'FIN': m})\n self.sock.sendto(pk_re.bitStream, self.addr)\n\n if pk.FIN == 1:\n print(\"File getting is over\")\n break\n\n # read data from cache randomly\n random_read_cache = random.randint(1, 10)\n random_read_num = random.randint(1, 10)\n print(\"Cache size\", len(self.pk_recv))\n if random_read_cache > 5:\n count = 0\n while count < random_read_num and len(self.pk_recv) > 0:\n pk_read = self.pk_recv[0]\n del self.pk_recv[0]\n self.rwnd += 1\n count += 1\n if self.written_ack >= pk_read.ackNo:\n continue\n print(\"Now writing the package\", pk_read.ackNo, \"into file\")\n self.f.write(pk_read.data)\n self.written_ack += 1\n\n while len(self.pk_recv) > 0:\n pk_in_cache = self.pk_recv[0]\n del self.pk_recv[0]\n self.rwnd += 1\n if self.written_ack >= pk_in_cache.ackNo:\n continue\n print(\"Now writing the package\", pk_in_cache.ackNo, \"into file\")\n self.f.write(pk_in_cache.data)\n self.written_ack += 1\n\n print(\"Link with\", addr, \"end\")\n self.f.close()\n self.sock.close()\n\n def method_lget(self):\n try:\n self.f = open('server/' + self.filename, 'rb')\n filesize = os.path.getsize(\"server/\"+self.filename)\n m = 0\n if filesize % content_size != 0:\n m = 1\n block_num = int(filesize / content_size) + m\n print(\"bolck \"+str(block_num))\n except:\n print('open file error')\n\n re_send_num = 0\n while 1:\n if self.cwnd >= self.ssthresh and self.state == \"slow start\":\n self.from_slow_start_into_congestion_avoidance()\n if self.client_win_size > 0:\n # 发送一个包\n if not self.need_re_send:\n if self.current_send < block_num:\n if self.current_send == block_num - 1:\n f = 1\n content = self.f.read(filesize % content_size)\n else:\n f = 0\n content = self.f.read(content_size)\n\n pk = Packet()\n pk.make({'seqNo': 0, 'ackNo': self.current_send, 'receiveWindowSize': self.client_win_size,\n 'ACK': 0, 'SYN': 0, 'FIN': f, 'data': content})\n time.sleep(self.sleep_time)\n self.sock.sendto(pk.bitStream, self.addr)\n print('send a packet '+str(pk.ackNo))\n self.pk_recv.append(pk)\n self.current_send = self.current_send + 1\n if self.need_re_send:\n pk = self.pk_recv[0]\n self.sock.sendto(pk.bitStream, self.addr)\n print('resend a packet '+str(pk.ackNo))\n else:# 窗口满了,不发包,但可以接包\n pass\n # 接收一个包\n\n try:\n data, addr = self.sock.recvfrom(recvSize)\n except:\n pk = self.pk_recv[0]\n self.sock.sendto(pk.bitStream, self.addr)\n if self.state == \"slow start\":\n self.still_in_slow_start_when_timeout()\n if self.state == \"fast recovery\":\n self.from_fast_recovery_get_into_slow_start()\n if self.state == \"congestion avoidance\":\n self.from_congestion_avoidance_get_into_slow_start()\n continue\n pk = Packet()\n pk.decode(data)\n if pk.seqNo == 1:\n self.need_re_send = False\n self.client_ack = pk.ackNo\n continue\n\n self.client_win_size = pk.receiveWindowSize\n if self.client_win_size < self.cwnd:\n if self.sleep_time <= 0.00004:\n self.sleep_time += 0.00001\n else:\n if self.sleep_time > 0.00001:\n self.sleep_time -= 0.00001\n self.fin = pk.FIN\n print(\"receive \" + str(pk.ackNo))\n\n self.client_ack = pk.ackNo\n '''\n print(\"client_ack \", self.client_ack)\n print(\"current_ack \", self.current_ack)\n '''\n if self.current_ack == self.client_ack:\n if self.duplicate_ack >= 3:\n self.from_slow_start_get_into_fast_recovery()\n self.current_ack = self.current_ack + 1\n del self.pk_recv[0]\n if self.state == \"slow start\":\n self.still_in_slow_start_when_new_ack()\n if self.state == \"congestion avoidance\":\n self.still_in_congestion_avoidance_when_ne_ack()\n if self.state == \"fast recovery\":\n self.from_fast_recovery_get_into_congestion_avoidance()\n self.need_re_send = False\n else:\n self.need_re_send = True\n if self.state == \"congestion avoidance\":\n self.still_in_congestion_avoidance_when_duplicate_ack()\n if self.state == \"fast recovery\":\n self.still_in_fast_recovery_when_duplicate_ack()\n if self.state == \"slow start\":\n self.still_in_slow_start_when_duplicate_ack()\n\n # get three dupicate\n if self.duplicate_ack == 3:\n if self.state == \"slow start\":\n self.from_slow_start_get_into_fast_recovery()\n if self.state == \"congestion avoidance\":\n self.from_congestion_avoidance_into_fast_recovery()\n if self.state == \"fast recovery\":\n self.from_fast_recovery_get_into_slow_start()\n\n if self.duplicate_ack > 3:\n self.cwnd += 1\n\n if self.fin:\n print(\"server end \", self.addr)\n break\n self.sock.close()\n\n def server_client(self):\n if self.method == 'lget':\n self.method_lget()\n else:\n self.lsend()\n\n\ndef main():\n global gain_port\n while 1:\n print(\"begin\")\n method, filename, addr = get_command()\n print(method)\n print(filename)\n print(addr)\n\n thread = threading.Thread(target=handle, args=(method, filename, addr, gain_port,))\n print(\"gain \"+str(gain_port))\n gain_port = gain_port + 1\n thread.start()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":15202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"444978698","text":"class Student:\n def __init__(self,n='.',m=0):\n self.name = n\n self.marks = m\n\n def display(self):\n print('hi',self.name)\n print('your name',self.marks)\n\ns = Student()\ns.display()\nprint('')\n\ns1 = Student('Ak',880)\ns1.display()\n","sub_path":"func2.py","file_name":"func2.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"433588714","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport rospy\nimport tf2_ros\nimport common\n\nfrom geometry_msgs.msg import TwistStamped, Pose, PoseStamped\nfrom sensor_msgs.msg import JointState\nfrom std_srvs.srv import Trigger, TriggerResponse\nfrom rtabmap_ros.msg import MapData\nfrom common.msg import BodyJoint, ManipulationReference, NavigationReference, OperatorMode, RobotState\nfrom common.srv import MessageTrigger, MessageTriggerResponse\nfrom csv_logger import MyCSVLogger, CSVSimpleLogger\nfrom rosbag_logger import MyRosbagLogger\nimport my_ros_utils\nfrom util_functions import init_tf_stamped, update_tf, wait_for_tf, transform2pose, get_msg_data_list, \\\n get_msg_attr_list\nfrom tf2pose_stamped import get_tf_pose_stamped_topic_name\n\nDEFAULT_NODE_NAME = 'my_logger'\nPARAM_NAME_TIME_LAP_LOGGING_SERVICE = '/time_lap_logging_srv'\n\nPARAM_NAME_BODY_JOINTS_TOPIC = '/body_joints_topic'\nPARAM_NAME_MANI_REF_TOPIC = '/manipulation_reference_topic'\nPARAM_NAME_NAVI_REF_TOPIC = '/navigation_reference_topic'\nPARAM_NAME_OPE_MODE_TOPIC = '/operator_mode_topic'\nPARAM_NAME_BS_INPUT_TOPIC = '/blackship_input_topic'\nPARAM_NAME_MICO_INPUT_TOPIC = '/mico_eevel_input_topic'\nPARAM_NAME_JOINT_STATE_TOPIC = '/joint_state_topic_for_urdf'\nPARAM_NAME_ROBOT_STATE_TOPIC = '/robot_state_topic'\nPARAM_NAME_MAP_DATA_TOPIC = '/map_data_topic'\nPARAM_NAME_NEW_LOGGING_SERVICE = '/new_logging_srv'\nPARAM_NAME_CLOSE_LOGGING_SERVICE = '/close_logging_srv'\nPARAM_NAME_ORIGIN_FRAME_ID = '/system_origin_frame_id'\nPARAM_NAME_LOGGING_FRAME_IDS = '/logging_target_frame_id_param_names'\nPARAM_NAME_CSV_SAVING_FRAME_IDS = '/csv_saving_frame_id_param_names'\nPARAM_NAME_REF_TRAJECTORY_FRAME_IDS = '/ref_trajectory_frame_ids'\n\nLOGGING_TOPIC_MAP = {\n rospy.get_param(PARAM_NAME_BODY_JOINTS_TOPIC): BodyJoint,\n rospy.get_param(PARAM_NAME_MANI_REF_TOPIC): ManipulationReference,\n rospy.get_param(PARAM_NAME_NAVI_REF_TOPIC): NavigationReference,\n rospy.get_param(PARAM_NAME_OPE_MODE_TOPIC): OperatorMode,\n rospy.get_param(PARAM_NAME_BS_INPUT_TOPIC): TwistStamped,\n rospy.get_param(PARAM_NAME_MICO_INPUT_TOPIC): TwistStamped,\n rospy.get_param(PARAM_NAME_JOINT_STATE_TOPIC): JointState,\n rospy.get_param(PARAM_NAME_MAP_DATA_TOPIC): MapData,\n rospy.get_param(PARAM_NAME_ROBOT_STATE_TOPIC): RobotState\n}\n\nFRAME_ID_LIST = [[rospy.get_param('/' + ids[0]),\n rospy.get_param('/' + ids[1])] for ids in rospy.get_param(PARAM_NAME_LOGGING_FRAME_IDS)]\n\nfor FRAME_IDS in FRAME_ID_LIST:\n LOGGING_TOPIC_MAP.update({get_tf_pose_stamped_topic_name(FRAME_IDS[0],\n FRAME_IDS[1]): PoseStamped})\n\nTRAJECTORY_FRAME_IDS = rospy.get_param(PARAM_NAME_REF_TRAJECTORY_FRAME_IDS)\n\n\nclass RefTrajectoryCSVLogger(object):\n def __init__(self, trajectory_waypoint_frame_ids,\n file_name='ref_trajectory', prefix='', registered_tf_buf=None):\n self._logger = CSVSimpleLogger(prefix + '_' + file_name)\n self._logger.write(get_msg_attr_list(Pose()))\n\n if registered_tf_buf is None:\n self._tf_buf = tf2_ros.Buffer()\n tf2_ros.TransformListener(self._tf_buf)\n else:\n self._tf_buf = registered_tf_buf\n\n self._frame_ids = trajectory_waypoint_frame_ids\n\n def activate(self):\n for frame_id in self._frame_ids:\n tf = wait_for_tf(self._tf_buf, 'origin', frame_id)\n self._logger.write(get_msg_data_list(transform2pose(tf.transform)))\n\n\nclass TimeCSVLogger(object):\n def __init__(self, file_name='time', prefix=''):\n self._logger = CSVSimpleLogger(prefix + '_' + file_name)\n self._logger.write(['cnt', 'time'])\n\n def record(self, key):\n rospy.loginfo(str(key) + ': lap time!')\n self._logger.write([key, rospy.Time.now().to_sec()])\n\n\nclass TimeLapManager(object):\n '''\n @:param: callback\n When the lap_service is called, this is called with the count as a parameter\n '''\n\n def __init__(self, callback=None):\n self._cb = callback\n self._counter = 0\n self._srv = rospy.Service(rospy.get_param(PARAM_NAME_TIME_LAP_LOGGING_SERVICE),\n Trigger, self._lap_logging_srv_handler)\n\n def _lap_logging_srv_handler(self, req):\n if self._cb is not None:\n self._cb(self._counter)\n self._counter += 1\n return TriggerResponse()\n\n def close(self):\n self._srv.shutdown()\n\n\nclass MyLogger(object):\n def __init__(self, prefix):\n self._tf_buf = tf2_ros.Buffer()\n tf2_ros.TransformListener(self._tf_buf)\n self._lap_manager = TimeLapManager(callback=self._time_lap_callback)\n self._time_logger = TimeCSVLogger(prefix=prefix)\n self._rosbag_logger = MyRosbagLogger('commandline').logger(LOGGING_TOPIC_MAP,\n filename=prefix)\n self._ref_trajectory_logger = RefTrajectoryCSVLogger(TRAJECTORY_FRAME_IDS,\n registered_tf_buf=self._tf_buf,\n prefix=prefix)\n\n frame_id_list = [[rospy.get_param('/' + ids[0]),\n rospy.get_param('/' + ids[1])] for ids in rospy.get_param(PARAM_NAME_CSV_SAVING_FRAME_IDS)]\n self._csv_loggers = [MyCSVLogger(prefix + '_' + frame_ids[1],\n get_tf_pose_stamped_topic_name(frame_ids[0], frame_ids[1]),\n PoseStamped) for frame_ids in frame_id_list]\n self.prefix = prefix\n\n def _time_lap_callback(self, cnt):\n if cnt == 0:\n self._start_callback()\n self._time_logger.record(cnt)\n\n def _start_callback(self):\n self._rosbag_logger.activate()\n self._ref_trajectory_logger.activate()\n for csv_logger in self._csv_loggers:\n csv_logger.activate()\n\n return TriggerResponse()\n\n def close(self):\n self._rosbag_logger.close()\n self._lap_manager.close()\n\n\n# --------------------------------------------\nif __name__ == '__main__':\n rospy.init_node(DEFAULT_NODE_NAME, anonymous=True)\n\n my_logger = None\n\n\n def new_logging_srv_handler(req):\n global my_logger\n if my_logger is not None:\n my_logger.close()\n my_logger = MyLogger(req.trigger_message)\n rospy.loginfo('New Logger: ' + req.trigger_message)\n return MessageTriggerResponse()\n\n\n rospy.Service(rospy.get_param(PARAM_NAME_NEW_LOGGING_SERVICE),\n MessageTrigger, new_logging_srv_handler)\n\n\n def close_logging_srv_handler(req):\n global my_logger\n if my_logger is not None:\n my_logger.close()\n my_logger = None\n return TriggerResponse()\n\n\n rospy.Service(rospy.get_param(PARAM_NAME_CLOSE_LOGGING_SERVICE),\n Trigger, close_logging_srv_handler)\n\n rospy.spin()\n\n if my_logger is not None:\n my_logger.close()\n","sub_path":"experiment_manager/src/my_logger.py","file_name":"my_logger.py","file_ext":"py","file_size_in_byte":7014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"449817229","text":"import discord\nfrom discord.ext import commands\nfrom discord.ext.commands import Bot\nimport asyncio\nimport aiohttp\nimport json\nimport re\nfrom datetime import datetime\n\n\nPREFIX = open(\"core/prefix.txt\", \"r\").read().replace(\"\\n\", \"\")\nclient = Bot(command_prefix = \"{0}\".format(PREFIX))\n\nunresolved_ids = 0\n\n\n############# Notification variables ################\nTWITCH_CLIENT_ID = open(\"multimedia/twitch_client_id.txt\", \"r\").read().replace(\"\\n\", \"\")\nTWITCH_SECRET_ID = open(\"multimedia/twitch_secret_id.txt\", \"r\").read().replace(\"\\n\", \"\")\nunresolved_ids = 0\n\n# Reset all sent key values to false\nwith open('multimedia/local.json', 'r') as fp:\n reset_values = json.load(fp)\nfor streams_index in reset_values['streams']:\n streams_index['sent'] = 'false'\nwith open('multimedia/local.json', 'w') as fp:\n json.dump(reset_values, fp, indent=2)\n\n\nwith open('multimedia/local.json', 'r') as fp:\n local = json.load(fp)\n\nwith open('multimedia/userlist.json', 'r') as fp:\n user_list = json.load(fp)\n\napi = {}\n\nglobal counter\nglobal first_startup\nfirst_startup = 1\n\n\nasync def dump_json():\n with open('multimedia/local.json' , 'w') as fp:\n json.dump(local, fp, indent=2)\n\n with open('multimedia/userlist.json' , 'w') as fp:\n json.dump(user_list, fp, indent=2)\n\n\n# Retourne la réponse de twitch api\nasync def get_streams(c_id, session, url, response_type):\n # Param qui contient l'ID client\n headers = {\n 'Client-ID': '{}'.format(c_id)\n }\n\n # Obtient et retourne la réponse de twitch api, en utilisant l'en-tête défini ci-dessus.\n async with session.get(url, headers=headers, timeout=10) as response:\n if response_type == 'text':\n return await response.text()\n elif response_type == 'json':\n return await response.json()\n\n\nasync def get_game(c_id, session, url, response_type):\n # Param qui contient l'ID client\n headers = {\n 'Client-ID': '{}'.format(c_id)\n }\n\n # Obtient et retourne la réponse de twitch api, en utilisant l'en-tête défini ci-dessus.\n async with session.get(url, headers=headers, timeout=10) as response:\n if response_type == 'text':\n return await response.text()\n elif response_type == 'json':\n return await response.json()\n\n\n# Retourne la réponse de twitch api\nasync def get_users(token, session, url, response_type):\n\n # Param qui contient l'ID client\n headers = {\n 'Authorization': 'Bearer {}'.format(token)\n }\n\n # Obtient et retourne la réponse de twitch api, en utilisant l'en-tête défini ci-dessus.\n async with session.get(url, headers=headers, timeout=10) as response:\n if response_type == 'text':\n return await response.text()\n elif response_type == 'json':\n return await response.json()\n\n\nasync def make_token(client_id, client_secret):\n print('\\nObtention du token TWITCH...')\n token_url = 'https://id.twitch.tv/oauth2/token?client_id={}&client_secret={}&grant_type=client_credentials'.format(\n client_id, client_secret)\n async with aiohttp.ClientSession() as session:\n async with session.post(token_url) as response:\n response = await response.json()\n token = response['access_token']\n print('Token: ' + token + '\\n------')\n return token\n\n\n# Créer et renvoyer l'URL de l'API des flux Twitch avec les user_logins dans local.json\nasync def make_streams_url():\n streams = local['streams']\n\n url = 'https://api.twitch.tv/helix/streams?user_login='\n\n for index, login in enumerate(streams):\n if index == 0:\n url = url + login['login']\n else:\n url = url + '&user_login=' + login['login']\n\n return url\n\n\n# Créer et renvoyer l'URL de l'API des flux Twitch avec les user_logins dans local.json\nasync def make_users_url():\n stream = local['streams']\n\n url = 'https://api.twitch.tv/helix/users?login='\n\n for index, login in enumerate(stream):\n if index == 0:\n url = url + login['login']\n else:\n url = url + '&login=' + login['login']\n\n return url\n\n\nasync def fill_ids(users_response):\n global unresolved_ids\n counter = 0\n\n print('\\nRemplir les identifiants manquants ...')\n for local_user in local['streams']:\n if local_user['id'] == \"\":\n for user in users_response['data']:\n if local_user['login'] == user['login']:\n counter += 1\n print(\"ID manquant rempli pour l'utilisateur: \" + local_user['login'] + \" : \" + user['id'])\n local_user['id'] = user['id']\n\n if counter == 0:\n print('Aucun identifiant manquant.')\n else:\n print('\\n' + str(counter) + ' ID remplis.')\n\n unresolved_ids = 0\n await dump_json()\n\n\nclass Notification(commands.Cog):\n\n def __init__(self, ctx):\n return(None)\n\n @commands.command(pass_context=True)\n async def notif_list(self, ctx):\n \"\"\"Affiche la liste des notifications\"\"\"\n channel_id = ctx.message.channel.id\n channel_exists = 0\n has_subscriptions = 0\n\n # print('\\n------\\n\\nTime: ' + str(datetime.now()))\n # print('Demande de liste du channel' + str(channel_id))\n\n msg = 'Vous recevez actuellement des notifications pour les channels suivants:\\n'\n for channel in local['channels']:\n\n # Vérifiez si le channel a été ajouté à local.json\n if channel['id'] == channel_id:\n channel_exists = 1\n for stream in channel['subscribed']:\n has_subscriptions = 1\n msg = msg + '\\n' + stream\n\n # Si le channel n'existe pas, envoyez un message à ctx et retournez\n if channel_exists == 0:\n msg = \"Ce channel discord n'a pas encore été vérifié.\"\n # print(\"Impossible de supprimer le flux, le channel n'a pas été ajouté au bot.\\n------\\n\")\n await ctx.channel.send(msg)\n return\n\n elif not has_subscriptions:\n msg = \"Vous n'avez ajouté aucun twitch channels.\"\n # print('Aucun abonnement ajouté.\\n------\\n')\n await ctx.channel.send(msg)\n return\n\n else:\n # print('\\n------\\n')\n await ctx.channel.send(msg)\n\n @commands.command(pass_context=True)\n async def checklive(self, ctx):\n \"\"\"Affiche le nombre de flux en direct\"\"\"\n c_id = ctx.message.channel.id\n streams_live = []\n\n for channel in local['channels']:\n if c_id == channel['id']:\n if len(channel['subscribed']) == 0:\n msg = \"Vous n'avez ajouté aucun twitch channels.\"\n await ctx.channel.send(msg)\n return\n\n for stream in local['streams']:\n if stream['status'] == 'live':\n streams_live.append(stream['login'])\n\n if len(streams_live) == 1:\n msg = 'Depuis vos notifications, il y a actuellement 1 flux en direct:\\n\\n'\n for login in streams_live:\n msg = msg + '{}\\n'.format(login)\n\n elif len(streams_live) > 0:\n msg = 'Depuis vos notifications, il y a actuellement {} flux en direct:\\n\\n'.format(len(streams_live))\n for login in streams_live:\n msg = msg + '{}\\n'.format(login)\n\n else:\n msg = \"Il n'y a pas de flux en direct.\"\n\n await ctx.channel.send(msg)\n\n @commands.command(pass_context=True)\n async def removestream(self, ctx, arg):\n \"\"\"Permet de retirer un flux des notifications\"\"\"\n channel_id = ctx.message.channel.id\n channel_exists = 0\n arg = str(arg.lower())\n\n # print('\\n------\\n\\nTime: ' + str(datetime.now()))\n # print('Remove request from channel ' + str(channel_id) + ' for stream name ' + arg)\n\n # Vérifiez si le channel a été ajouté à local.json\n for channel in local['channels']:\n if channel['id'] == channel_id:\n channel_exists = 1\n\n # Si le channel n'existe pas, envoyez un message à ctx et retournez\n if channel_exists == 0:\n msg = \"Ce channel discord n'a pas encore été vérifié.\"\n # print(\"Impossible de supprimer le flux, le channel n'a pas été ajouté au bot.\")\n await ctx.channel.send(msg)\n return\n\n if not re.match('^[a-zA-Z0-9_]+$', arg):\n msg = 'Le nom ne doit pas contenir de caractères spéciaux.'\n # print(msg)\n await ctx.channel.send(msg)\n return\n\n # Vérifiez la liste des chaînes dans local.json pour éviter les doublons.\n for i, channel in enumerate(local['channels']):\n subscription_exists = 0\n\n if channel['id'] == channel_id:\n for stream in channel['subscribed']:\n if stream == arg:\n subscription_exists = 1\n\n if subscription_exists:\n subscriptions = channel['subscribed']\n subscriptions.remove(arg)\n await dump_json()\n\n # print('\\nENLEVÉ: \\nSTREAM: ' + arg + '\\nCHANNEL ID: ' + str(channel_id) + '\\n------\\n')\n\n msg = 'Enlevé ' + arg + '.'\n await ctx.channel.send(msg)\n\n else:\n # print(arg + \" n'existe pas dans les abonnements aux chaînes\")\n\n msg = arg + \" n'est pas actuellement dans vos notifications.\"\n await ctx.channel.send(msg)\n\n @commands.command(pass_context=True)\n async def addstream(self, ctx, arg):\n \"\"\"Ajouter un flux twitch aux notifications de channel\"\"\"\n global unresolved_ids\n channel_id = ctx.message.channel.id\n stream_exists = 0\n channel_exists = 0\n subscription_exists = 0\n arg = str(arg.lower())\n new_stream = {\n \"login\": arg,\n \"sent\": \"false\",\n \"id\": \"\",\n \"status\": \"\",\n \"game\": \"\"\n }\n\n print('\\n------\\n\\nTime: ' + str(datetime.now()))\n print('Ajouter une demande du channel ' + str(channel_id) + ' pour le flux ' + arg)\n\n if not re.match('^[a-zA-Z0-9_]+$', arg):\n msg = 'Le nom ne doit pas contenir de caractères spéciaux.'\n # print(msg)\n await ctx.channel.send(msg)\n return\n\n # Vérifiez la liste des flux dans local.json pour éviter les doublons\n for index in local['streams']:\n if index['login'] == arg:\n stream_exists = 1\n\n # Vérifiez la liste des chaînes dans local.json pour éviter les doublons.\n for channel in local['channels']:\n\n # Vérifiez si le channel a été ajouté à local.json\n if channel['id'] == channel_id:\n channel_exists = 1\n\n for stream in channel['subscribed']:\n\n # Vérifier si le flux est déjà dans les abonnements de la chaîne\n if stream == arg:\n subscription_exists = 1\n\n # Si le channel n'existe pas, envoyez un message à ctx et retournez\n if channel_exists == 0:\n msg = \"Ce channel discord n'a pas encore été vérifié.\"\n # print(\"Impossible d'ajouter le flux, le channel n'a pas été ajouté au bot.\")\n await ctx.channel.send(msg)\n return\n\n # Agit sur les contrôles ci-dessus\n if subscription_exists == 0 and stream_exists == 0:\n local.setdefault('streams', []).append(new_stream)\n unresolved_ids = 1\n\n for channel in local['channels']:\n if channel['id'] == channel_id:\n change = channel['subscribed']\n change.append(arg)\n\n await dump_json()\n\n # print('\\nAJOUTÉ: \\nSTREAM: ' + arg + '\\nCHANNEL ID: ' + str(channel_id) + '\\nAJOUTÉ AUX FLUX\\n------\\n')\n\n msg = arg + ' a été ajouté à vos notifications.'\n await ctx.channel.send(msg)\n\n elif subscription_exists == 1 and stream_exists == 0:\n local.setdefault('streams', []).append(new_stream)\n unresolved_ids = 1\n\n await dump_json()\n\n # print('\\nAJOUTÉ AUX FLUX\\n------\\n')\n\n msg = arg + ' est déjà dans vos notifications.'\n await ctx.channel.send(msg)\n\n elif subscription_exists == 0 and stream_exists == 1:\n for channel in local['channels']:\n if channel['id'] == channel_id:\n change = channel['subscribed']\n change.append(arg)\n\n # print('\\nAJOUTÉ: \\nSTREAM: ' + arg + '\\nCHANNEL ID: ' + str(channel_id) + '\\n------\\n')\n\n await dump_json()\n\n msg = 'Ajouté ' + arg + ' to your notifications.'\n await ctx.channel.send(msg)\n\n elif subscription_exists == 1 and stream_exists == 1:\n # print('DÉJÀ AJOUTÉ')\n msg = arg + ' a déjà été ajouté à vos notifications!'\n await ctx.channel.send(msg)\n\n @commands.command(pass_context=True)\n async def addchannel(self, ctx):\n \"\"\"Ajouter un channel au bot\"\"\"\n s_name = ctx.message.guild.name\n c_name = ctx.message.channel.name\n c_id = ctx.message.channel.id\n u_id = ctx.message.author.id\n u_name = ctx.message.author.name\n\n verified = 0\n duplicate = 0\n # print('\\n------\\n\\nTime: ' + str(datetime.now()))\n # print(\"Requete d'ajout de channel pour:\\nSERVER: {}\\nCHANNEL: {} avec l'ID {}\"\n # \"\\nUSER: {} avec l'ID {}\".format(s_name, c_name, c_id, u_name, u_id))\n\n # Vérifier si l'utilisateur est autorisé à ajouter des channels\n for id in user_list['verified_users']:\n if u_id == id:\n verified = 1\n\n # Si l'utilisateur peut être vérifié, recherchez les doublons, puis ajoutez le channel\n if verified:\n\n # Vérifier les ID de channel en double\n for channel in local['channels']:\n if channel['id'] == c_id:\n duplicate = 1\n\n # Act on duplicate check\n if not duplicate:\n new_channel = {\n \"id\": c_id,\n \"guild_name\": s_name,\n \"channel_name\": c_name,\n \"added_by_name\": u_name,\n \"added_by_id\": u_id,\n \"subscribed\": []\n }\n\n local['channels'].append(new_channel)\n await dump_json()\n\n msg = 'Channel ajouté!'\n # print(msg + '\\n------\\n')\n await ctx.channel.send(msg)\n\n else:\n msg = 'Ce channel a déjà été ajouté!'\n # print(msg + '\\n------\\n')\n await ctx.channel.send(msg)\n\n else:\n # print(\"L'utilisateur n'est pas autorisé à ajouter des channels.\\n------\\n\")\n msg = \"Vous n'êtes pas autorisé à ajouter des channels.\"\n await ctx.channel.send(msg)\n\n @commands.command(pass_context=True)\n async def removechannel(self, ctx):\n \"\"\"Supprimer le channel du bot\"\"\"\n c_id = ctx.message.channel.id\n u_id = ctx.message.author.id\n\n verified = 0\n channel_exists = 0\n\n # print('\\n------\\n\\nTime: ' + str(datetime.now()))\n # print(\"Requete de suppression de channel pour:\\nSERVER: {}\\nCHANNEL: {} avec l'ID {}\"\n # \"\\nUSER: {} avec l'ID {}\".format(s_name, c_name, c_id, u_name, u_id))\n\n # Check if user is allowed to add channels\n for id in user_list['verified_users']:\n if u_id == id:\n verified = 1\n\n # If user can be verified, try remove channel with correct id\n if verified:\n channel_list = local['channels']\n for channel in channel_list:\n if channel['id'] == c_id:\n channel_exists = 1\n channel_list.remove(channel)\n await dump_json()\n\n if channel_exists:\n msg = 'Channel supprimé!'\n # print(msg + '\\n------\\n')\n await ctx.channel.send(msg)\n\n else:\n msg = \"Le channel a déjà été supprimée ou n'a jamais été ajoutée.\"\n # print(msg + '\\n------\\n')\n await ctx.channel.send(msg)\n\n else:\n # print(\"L'utilisateur n'est pas autorisé à supprimer des channels.\\n------\\n\")\n msg = \"Vous n'êtes pas autorisé à supprimer des channels.\"\n await ctx.channel.send(msg)\n\n @commands.command(pass_context=True)\n async def adduser(self, ctx, arg):\n \"\"\"Ajouter un utilisateur à la liste vérifiée. Cela ne peut être fait que par des utilisateurs maîtres.\"\"\"\n u_id = ctx.message.author.id\n\n # print('\\n------\\n\\nTime: ' + str(datetime.now()))\n # print('Verify User request from:\\nSERVER: {}\\nCHANNEL: {} with ID {}'\n # '\\nUSER: {} with ID {}\\nFor user ID: {}'.format(s_name, c_name, c_id, u_name, u_id, arg))\n\n # Check if user is master user\n if u_id not in user_list['master_users']:\n msg = 'You are not authorized to add users.'\n # print('User is not a master user.')\n await ctx.channel.send(msg)\n return\n\n # Make the argument into an int\n try:\n arg = int(arg)\n except ValueError:\n # print('Request cancelled, invalid argument.\\n------\\n')\n await ctx.channel.send(\"That didn't work, please try again.\")\n return\n\n # If user is not already verified, add it\n if arg not in user_list['verified_users']:\n user_list['verified_users'].append(arg)\n await dump_json()\n\n msg = 'User ID {} is now verified.'.format(str(arg))\n # print(msg + '\\n------\\n')\n await ctx.channel.send(msg)\n\n else:\n msg = 'User ID {} is already verified.'.format(str(arg))\n # print(msg + '\\n------\\n')\n await ctx.channel.send(msg)\n\n @commands.command(pass_context=True)\n async def removeuser(self, ctx, arg):\n \"\"\"Supprimer un utilisateur de la liste vérifiée. Cela ne peut être fait que par des utilisateurs maîtres.\"\"\"\n u_id = ctx.message.author.id\n\n # print('\\n------\\n\\nTime: ' + str(datetime.now()))\n # print('Remove Verified User request from:\\nSERVER: {}\\nCHANNEL: {} with ID {}'\n # '\\nUSER: {} with ID {}\\nFor user ID: {}'.format(s_name, c_name, c_id, u_name, u_id, arg))\n\n # Check if user is master user\n if u_id not in user_list['master_users']:\n msg = 'You are not authorized to remove users.'\n # print('User is not a master user.')\n await ctx.channel.send(msg)\n return\n\n # Make the argument into an int\n try:\n arg = int(arg)\n except ValueError:\n # print('Request cancelled, invalid argument.\\n------\\n')\n await ctx.channel.send(\"That didn't work, please try again.\")\n return\n\n list = user_list['verified_users']\n try:\n list.remove(arg)\n await dump_json()\n\n msg = 'Removed user ID {} from verified users.'.format(str(arg))\n # print(msg + '\\n------\\n')\n await ctx.channel.send(msg)\n\n except ValueError:\n msg = 'User ID {} is not a verified user.'.format(str(arg))\n # print(msg + '\\n------\\n')\n await ctx.channel.send(msg)\n\n\ndef setup(bot):\n bot.add_cog(Notification(bot))\n open(\"help/cogs.txt\", \"a\").write(\"Notification\\n\")\n","sub_path":"multimedia/notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":19910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"554455045","text":"#\n# Copyright (c) 2011, Pavel Paulau \n#\n# All rights reserved.\n#\n# Redistribution and use of this software in source and binary forms, with or\n# without modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the author nor the names of contributors may be used\n# to endorse or promote products derived from this software without specific\n# prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\nimport os\nfrom csv import reader, writer\nfrom datetime import datetime\n\nimport gtk\nfrom numpy import arange\nfrom matplotlib.dates import MinuteLocator, DateFormatter\nimport pylab\nfrom lxml import etree\n\n\nclass jmlog:\n\n \"\"\"JMeter Log Class\n \"\"\"\n\n def __init__(self, path, throughput_range, time_range):\n # Options: Throughput (kB/s vs. MB/s) and Time (ms vs. s)\n self.throughput_range = throughput_range\n self.time_range = time_range\n\n # Read the first log line for further validation\n log_file = open(path, \"r\")\n first_line = log_file.readline()\n log_file.close()\n\n # Guess file format and perform basic check\n if first_line == '\\n':\n if self.validate_xml(path):\n self.read_xml()\n else:\n return None\n else:\n if self.validate_csv(first_line):\n self.read_csv(path)\n else:\n return None\n\n def validate_csv(self, line):\n \"\"\"Validate CSV file header\"\"\"\n header = (\"timeStamp\", \"elapsed\", \"label\", \"success\", \"bytes\",\n \"allThreads\", \"Latency\")\n\n for label in header:\n if not line.count(label):\n self.status = \"Invalid CSV header\"\n return False\n\n self.status = \"Valid\"\n return True\n\n def read_csv(self, path):\n # Open CSV log file from local disk\n log_file = open(path, \"r\")\n\n # Initialize container and read data from log\n self.data = list()\n self.data.extend(reader(log_file))\n log_file.close()\n\n # Append additional column to data array - Seconds from start\n self.data[0].append(\"secFromStart\")\n\n # Obtain indexes for each column\n self.sec_index = self.index(\"secFromStart\")\n self.ts_index = self.index(\"timeStamp\")\n self.et_index = self.index(\"elapsed\")\n self.lt_index = self.index(\"Latency\")\n self.b_index = self.index(\"bytes\")\n self.lbl_index = self.index(\"label\")\n self.err_index = self.index(\"success\")\n self.vu_index = self.index(\"allThreads\")\n\n # New arrays for sample and transaction labels\n self.labels = list()\n self.transactions = list()\n\n # Time borders\n start_time = long(self.data[1][self.ts_index])\n self.start = 0\n self.start_time = 0\n self.end_time = 0\n\n # Parse data array\n for row in range(1, len(self.data)):\n # Calculate additional column value - Seconds from start\n current_time = long(self.data[row][self.ts_index])\n self.data[row].append(int((current_time - start_time) / 1000))\n # Transform string values to integer type\n self.data[row][self.et_index] = int(self.data[row][self.et_index])\n self.data[row][self.lt_index] = int(self.data[row][self.lt_index])\n self.data[row][self.b_index] = int(self.data[row][self.b_index]) / 1024\n try:\n self.data[row][self.vu_index] = int(self.data[row][self.vu_index])\n except:\n None\n # Update end time\n if self.end_time < self.data[row][-1]:\n self.end_time = self.data[row][-1]\n # Update list of labels\n if not self.data[row][self.lbl_index] in self.labels:\n self.labels.append(self.data[row][self.lbl_index])\n # Time borders\n self.end = self.end_time\n\n def validate_xml(self, path):\n # Basic XML parsing\n try:\n self.tree = etree.parse(path)\n except etree.XMLSyntaxError as e:\n self.status = str(e)\n return False\n\n # Prepare DTD file\n schema = '''\n \n \n \n \n \n '''\n\n dtd_filename = os.getcwd() + '/temp_dtd.xml'\n dtd_file = open(dtd_filename, 'w')\n dtd_file.write(schema)\n dtd_file.close()\n\n dtd = etree.DTD(dtd_filename)\n os.remove(dtd_filename)\n\n # DTD Validation\n if dtd.validate(self.tree):\n self.status = \"Valid\"\n return True\n else:\n self.status = \"XML validation failed (DTD)\"\n return False\n\n def read_xml(self):\n # Data container\n self.data = list()\n\n # Data header\n self.data.append((\n \"timeStamp\", \"elapsed\", \"Latency\", \"bytes\",\n \"label\", \"success\",\n \"allThreads\", \"secFromStart\",\n \"type\"))\n\n # Time borders\n start_time = 0\n\n # Parse data array\n for sample in self.tree.findall(\"sample\"):\n # Transcation level\n row = list()\n # Append column to transaction row for each attribute\n row.append(long(sample.get(\"ts\")))\n row.append(long(sample.get(\"t\")))\n row.append(long(sample.get(\"lt\")))\n row.append(long(sample.get(\"by\")) / 1024)\n row.append(sample.get(\"lb\"))\n row.append(sample.get(\"s\"))\n row.append(int(sample.get(\"na\")))\n\n # Set start time\n if not start_time:\n start_time = row[0]\n\n row.append(int((row[0] - start_time) / 1000))\n row.append(\"sample\")\n\n # HTTP sample level\n elapsedTime = 0\n latency = 0\n\n for httpSample in sample.getchildren():\n subRow = list()\n # Append column to sample row for each attribute\n subRow.append(long(httpSample.get(\"ts\")))\n subRow.append(long(httpSample.get(\"t\")))\n subRow.append(long(httpSample.get(\"lt\")))\n subRow.append(long(httpSample.get(\"by\")) / 1024)\n subRow.append(httpSample.get(\"lb\"))\n subRow.append(httpSample.get(\"s\"))\n subRow.append(int(httpSample.get(\"na\")))\n subRow.append(int((subRow[0] - start_time) / 1000))\n subRow.append(\"httpSample\")\n\n # Append data to global array\n self.data.append(subRow)\n\n # Add saple time and latentcy to current transaction\n elapsedTime += subRow[1]\n latency += subRow[2]\n\n # Update transactiob time and latency\n row[1] = elapsedTime\n row[2] = latency\n\n # Append data to global array\n self.data.append(row)\n\n # Obtain indexes for each column\n self.sec_index = self.index(\"secFromStart\")\n self.ts_index = self.index(\"timeStamp\")\n self.et_index = self.index(\"elapsed\")\n self.lt_index = self.index(\"Latency\")\n self.b_index = self.index(\"bytes\")\n self.lbl_index = self.index(\"label\")\n self.err_index = self.index(\"success\")\n self.vu_index = self.index(\"allThreads\")\n self.type_index = self.index(\"type\")\n\n # New arrays for sample and transaction labels\n self.labels = list()\n self.transactions = list()\n\n # Time borders\n self.start_time = 0\n self.start = 0\n self.end_time = 0\n\n # What is this block for?\n for row in range(1, len(self.data)):\n if self.end_time < self.data[row][self.sec_index]:\n self.end_time = self.data[row][self.sec_index]\n if self.data[row][self.type_index] == \"httpSample\":\n if not self.data[row][self.lbl_index] in self.labels:\n self.labels.append(self.data[row][self.lbl_index])\n else:\n if not self.data[row][self.lbl_index] in self.transactions:\n self.transactions.append(self.data[row][self.lbl_index])\n\n # Time borders\n self.end = self.end_time\n\n def index(self, column):\n \"\"\"Return numerical index for string value (key-value hash)\"\"\"\n for i in range(len(self.data[0])):\n if self.data[0][i] == column:\n return i\n\n def log_agg(self, time_int, label, mode):\n \"\"\"Calculate and average performance metrics (set by 'mode' parameter)\n for specified transaction label and time interval.\"\"\"\n prev_step = self.start\n next_step = prev_step + time_int\n\n points = dict()\n points[prev_step] = 0\n\n # Poor algorithm for start time calculation - to fix!!!\n for i in range(1, len(self.data)):\n if self.data[i][self.sec_index] >= prev_step:\n row = i\n break\n\n # Calculation of data points\n count = 0\n while prev_step < self.end and row < len(self.data):\n # Check whether timestamp in current interval\n if self.data[row][self.sec_index] < next_step:\n # Is transaction metric?\n if self.data[row][self.lbl_index] == label:\n # Calculate point for each mode (aka metric)\n if mode == 'bpt':\n points[prev_step] += self.data[row][self.b_index]\n elif mode == 'art':\n points[prev_step] += self.data[row][self.et_index]\n count += 1\n elif mode == 'lat':\n points[prev_step] += self.data[row][self.lt_index]\n count += 1\n elif mode == 'rpt':\n points[prev_step] += 1\n elif mode == 'err' or mode == 'errc':\n if self.data[row][self.err_index] == 'false':\n points[prev_step] += 1\n # Or aggregative metric?\n elif mode == 'err_total' or mode == 'errc_total':\n if self.data[row][self.err_index] == 'false':\n points[prev_step] += 1\n elif mode == 'bpt_total':\n try:\n if self.data[row][self.type_index] == 'sample':\n points[prev_step] += self.data[row][self.b_index]\n except:\n points[prev_step] += self.data[row][self.b_index]\n elif mode == 'rpt_total':\n points[prev_step] += 1\n elif mode == 'vusers':\n points[prev_step] = self.data[row][self.vu_index]\n row += 1\n else:\n # Finalize averaging\n if mode == 'errc' or mode == 'errc_total':\n points[next_step] = points[prev_step]\n elif mode == 'vusers':\n None\n elif mode == 'art' or mode == 'lat':\n if count:\n points[prev_step] /= count\n count = 0\n if next_step < self.end:\n points[next_step] = 0\n else:\n points[prev_step] /= (time_int * 1.0)\n if next_step < self.end:\n points[next_step] = 0\n # Next time interval\n prev_step = next_step\n next_step += time_int\n return points\n\n def trend(self, array=list()):\n \"\"\"Smooth graph using moving average algorithm\"\"\"\n ma = list()\n for i in range(5):\n ma.append(array[i])\n for i in range(5, len(array) - 5):\n smoothed = 0\n for j in range(i - 5, i + 5):\n smoothed += array[j] / 10\n ma.append(smoothed)\n for i in range(len(array) - 5, len(array)):\n ma.append(array[i])\n return ma\n\n def export2csv(self, path):\n \"\"\"Convert XML log to CSV format\"\"\"\n log_file = open(path, \"wb\")\n output = writer(log_file)\n\n for row in range(len(self.data)):\n current_row = (\n self.data[row][self.ts_index],\n self.data[row][self.et_index],\n self.data[row][self.lbl_index],\n self.data[row][self.err_index],\n self.data[row][self.b_index],\n self.data[row][self.vu_index],\n self.data[row][self.lt_index]\n )\n output.writerow(current_row)\n\n log_file.close()\n\n def plot(self, graph='bpt_total', time_int=30, label=None, l_opt=False,\n ttl=None, trend=False, pnts=False):\n \"\"\"Check whether 'Legend' is set and customize plot mode\"\"\"\n if l_opt:\n ax = pylab.subplot(2, 1, 1)\n else:\n ax = pylab.subplot(1, 1, 1)\n\n # Set graph title\n pylab.title(ttl)\n\n # Extract data points for specified time interval, transaction label\n # and graph type\n points = self.log_agg(time_int, label, graph)\n\n # Adjust range\n throughput_coeff = 1\n time_coeff = 1\n if self.throughput_range and graph.count('bpt'):\n for key, value in points.items():\n points[key] /= 1024.0\n if self.time_range and (graph.count('lat') or graph.count('art')):\n for key, value in points.items():\n points[key] /= 1000.0\n\n # Set graph label\n if graph == 'bpt_total':\n label = 'Total Throughput'\n elif graph == 'rpt_total':\n label = 'Total Hits'\n elif graph == 'err_total':\n label = 'Total Error Rate'\n elif graph == 'errc_total':\n label = 'Total Error Count'\n\n # Initializes data points arrays\n x = list()\n y = list()\n\n for key in sorted(points.keys()):\n # Defines time value (X axis)\n days = key / 86400\n hours = (key - 86400 * days) / 3600\n minutes = (key - 86400 * days - 3600 * hours) / 60\n seconds = key - 86400 * days - 3600 * hours - 60 * minutes\n days += 1\n x.append(datetime(1970, 1, days, hours, minutes, seconds))\n # Defines time value (Y axis)\n y.append(points[key])\n\n # Check whether 'Points' is set and customize graph\n if pnts:\n pylab.plot(x, y, linestyle='solid', marker='.', markersize=5,\n label=label, linewidth=0.5)\n else:\n pylab.plot(x, y, label=label, linewidth=0.5)\n\n # Check whether 'Trend' is set and customize graph\n if trend:\n pylab.plot(x, self.trend(y), label=label + ' (Trend)', linewidth=1)\n\n # Activate grid mode\n pylab.grid(True)\n\n # Evaluate time markers\n max_min = self.end / 60\n min_min = self.start / 60\n\n time_int = (int((max_min - min_min) / 10.0)) / 10 * 10\n\n if not time_int:\n if max_min > 75:\n time_int = 10\n else:\n time_int = 5\n\n if time_int > 30:\n time_int = 60\n\n if time_int <= 60:\n pylab.xlabel('Elapsed time (hh:mm)')\n ax.xaxis.set_major_locator(\n MinuteLocator(arange(0, max_min, time_int))\n )\n ax.xaxis.set_minor_locator(\n MinuteLocator(arange(0, max_min, time_int / 5))\n )\n ax.xaxis.set_major_formatter(DateFormatter('%H:%M'))\n else:\n pylab.xlabel('Elapsed time (dd;hh:mm)')\n labels = pylab.ax.get_xticklabels()\n ax.xaxis.set_major_formatter(DateFormatter('%d;%H:%M'))\n pylab.setp(labels, rotation=0, fontsize=8)\n\n # Check whether 'Legend' is set and customize graph\n if l_opt:\n pylab.legend(bbox_to_anchor=(0, -0.2), loc=2, ncol=1)\n\n\nclass PyLan:\n\n \"\"\"GUI\n \"\"\"\n\n def destroy(self, widget):\n gtk.main_quit()\n\n def __init__(self):\n # Main window\n self.window = gtk.Dialog()\n self.window.connect(\"destroy\", self.destroy)\n self.window.set_title(\"PyLan\")\n self.window.set_border_width(5)\n self.window.set_position(gtk.WIN_POS_CENTER_ALWAYS)\n self.window.show()\n\n # Menubar\n self.menubar = self.get_main_menu(self.window)\n self.window.vbox.pack_start(self.menubar, True, True, 0)\n self.menubar.show()\n\n # Initial options\n self.init = 1\n\n self.throughput_range = False\n self.time_range = False\n self.legend_status = False\n self.trend_status = False\n self.points_status = False\n\n pylab.rcParams['font.size'] = 8\n\n self.dpi = 96\n\n self.title = 'Average Response Time (ms)'\n self.active = 'art'\n\n self.preview()\n\n def get_main_menu(self, window):\n # Menu Items\n self.menu_items = (\n (\"/_File\", None, None, 0, \"\"),\n (\"/File/_Open\", \"O\", self.open_log, 0, None),\n (\"/File/_Save Chart\", \"S\", self.save_chart, 0, None),\n (\"/File/Save Log\", None, self.save_log, 0, None),\n (\"/File/sep1\", None, None, 0, \"\"),\n (\"/File/Quit\", \"Q\", gtk.main_quit, 0, None),\n (\"/_Chart\", None, None, 0, \"\"),\n (\"/Chart/Reponse Time\", None, self.chart_selector, 0, \"\"),\n (\"/Chart/Latency\", None, self.chart_selector, 1, \"/Chart/Reponse Time\"),\n (\"/Chart/Responses per Second\", None, self.chart_selector, 2, \"/Chart/Reponse Time\"),\n (\"/Chart/Throughput\", None, self.chart_selector, 3, \"/Chart/Reponse Time\"),\n (\"/Chart/Error Rate\", None, self.chart_selector, 4, \"/Chart/Reponse Time\"),\n (\"/Chart/Error Count\", None, self.chart_selector, 5, \"/Chart/Reponse Time\"),\n (\"/Chart/Active Threads\", None, self.chart_selector, 6, \"/Chart/Reponse Time\"),\n (\"/_Options\", None, None, 0, \"\"),\n (\"/Options/Show Legend\", None, self.option_selector, 0, \"\"),\n (\"/Options/Show Trends\", None, self.option_selector, 1, \"\"),\n (\"/Options/Show Points\", None, self.option_selector, 2, \"\"),\n (\"/Options/sep1\", None, None, 0, \"\"),\n (\"/Options/Throughput/kB\\/s\", None, self.range_selector, 0, \"\"),\n (\"/Options/Throughput/MB\\/s\", None, self.range_selector, 1, \"/Options/Throughput/kB\\/s\"),\n (\"/Options/Time/Milliseconds\", None, self.range_selector, 2, \"\"),\n (\"/Options/Time/Seconds\", None, self.range_selector, 3, \"/Options/Time/Milliseconds\"),\n (\"/Options/Resolution/96 dpi\", None, self.dpi_selector, 96, \"\"),\n (\"/Options/Resolution/72 dpi\", None, self.dpi_selector, 72, \"/Options/Resolution/96 dpi\"),\n (\"/Options/Resolution/120 dpi\", None, self.dpi_selector, 120, \"/Options/Resolution/96 dpi\"),\n (\"/Options/Font Size/8 pt\", None, self.font_selector, 8, \"\"),\n (\"/Options/Font Size/10 pt\", None, self.font_selector, 10, \"/Options/Font Size/8 pt\"),\n (\"/Options/Font Size/12 pt\", None, self.font_selector, 12, \"/Options/Font Size/8 pt\"),\n )\n\n # Accelerator group\n accel_group = gtk.AccelGroup()\n\n # This function initializes the item factory.\n item_factory = gtk.ItemFactory(gtk.MenuBar, \"
\", accel_group)\n\n # This method generates the menu items. Pass to the item factory\n # the list of menu items\n item_factory.create_items(self.menu_items)\n\n # Attach the new accelerator group to the window.\n window.add_accel_group(accel_group)\n\n # Need to keep a reference to item_factory to prevent its destruction\n self.item_factory = item_factory\n # Finally, return the actual menu bar created by the item factory.\n return item_factory.get_widget(\"
\")\n\n def open_log(self, stub1, stub2):\n # Open file dialog\n dialog = gtk.FileChooserDialog(\"Open JMeter Log File\",\n None,\n gtk.FILE_CHOOSER_ACTION_OPEN,\n (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,\n gtk.STOCK_OPEN, gtk.RESPONSE_OK))\n dialog.set_default_response(gtk.RESPONSE_OK)\n\n # File filter\n filter = gtk.FileFilter()\n filter.set_name(\"JMeter Logs\")\n filter.add_pattern(\"*.jtl\")\n filter.add_pattern(\"*.xml\")\n filter.add_pattern(\"*.csv\")\n filter.add_pattern(\"*.log\")\n dialog.add_filter(filter)\n\n filter = gtk.FileFilter()\n filter.set_name(\"All files\")\n filter.add_pattern(\"*\")\n dialog.add_filter(filter)\n\n # Read file name\n response = dialog.run()\n self.filename = dialog.get_filename()\n dialog.destroy()\n\n # Process response\n if response == gtk.RESPONSE_OK:\n # Read log\n self.log = jmlog(self.filename, self.throughput_range,\n self.time_range)\n\n # Actions based on validation status\n if self.log.status == \"Valid\":\n self.window.set_title(\"PyLan - \" + self.filename)\n self.window.vbox.remove(self.table)\n\n self.init = 0\n self.preview()\n else:\n ww = WarnWindow(self.log.status)\n\n def preview(self):\n # Height basis\n shift = 24\n\n # Main canvas\n self.table = gtk.Table(shift + 2, 13, True)\n self.table.show()\n self.table.set_col_spacings(10)\n self.table.set_row_spacings(0)\n self.window.vbox.pack_start(self.table, True, True, 0)\n\n # Granularity (Seconds)\n label = gtk.Label(\"Granularity (seconds):\")\n label.show()\n self.table.attach(label, 0, 2, 0 + shift, 1 + shift)\n\n self.sec = self.time_scale(60)\n self.table.attach(self.sec, 2, 5, 0 + shift, 1 + shift)\n\n # Granularity (Minutes)\n label = gtk.Label(\"Granularity (minutes):\")\n label.show()\n self.table.attach(label, 5, 7, 0 + shift, 1 + shift)\n\n self.min = self.time_scale(21)\n self.table.attach(self.min, 7, 10, 0 + shift, 1 + shift)\n self.min.set_value(1)\n\n # Start time\n label = gtk.Label()\n label.set_markup(\"Start Time:\")\n label.show()\n self.table.attach(label, 0, 1, 1 + shift, 2 + shift)\n label = gtk.Label(\"Hours:\")\n label.show()\n self.table.attach(label, 1, 2, 1 + shift, 2 + shift)\n label = gtk.Label(\"Minutes:\")\n label.show()\n self.table.attach(label, 3, 4, 1 + shift, 2 + shift)\n\n self.spinner_sh = gtk.SpinButton(gtk.Adjustment(0.0, 0.0, 23.0, 1.0, 4.0, 0.0), 0, 0)\n self.spinner_sh.show()\n self.table.attach(self.spinner_sh, 2, 3, 1 + shift, 2 + shift)\n\n self.spinner_sm = gtk.SpinButton(gtk.Adjustment(0.0, 0.0, 59.0, 1.0, 10.0, 0.0), 0, 0)\n self.spinner_sm.show()\n self.table.attach(self.spinner_sm, 4, 5, 1 + shift, 2 + shift)\n\n # End time\n label = gtk.Label()\n label.set_markup(\"End Time:\")\n label.show()\n self.table.attach(label, 5, 6, 1 + shift, 2 + shift)\n label = gtk.Label(\"Hours:\")\n label.show()\n self.table.attach(label, 6, 7, 1 + shift, 2 + shift)\n label = gtk.Label(\"Minutes:\")\n label.show()\n self.table.attach(label, 8, 9, 1 + shift, 2 + shift)\n\n if not self.init:\n self.spinner_eh = gtk.SpinButton(gtk.Adjustment(int(self.log.end_time / 3600), 0.0, 23.0, 1.0, 4.0, 0.0), 0, 0)\n else:\n self.spinner_eh = gtk.SpinButton(gtk.Adjustment(0.0, 0.0, 23.0, 1.0, 4.0, 0.0), 0, 0)\n self.spinner_eh.show()\n self.table.attach(self.spinner_eh, 7, 8, 1 + shift, 2 + shift)\n\n if not self.init:\n self.spinner_em = gtk.SpinButton(gtk.Adjustment(int((self.log.end_time - int(self.log.end_time / 3600) * 3600) / 60), 0.0, 59.0, 1.0, 5.0, 0.0), 0, 0)\n else:\n self.spinner_em = gtk.SpinButton(gtk.Adjustment(0, 0.0, 59.0, 1.0, 5.0, 0.0), 0, 0)\n self.spinner_em.show()\n self.table.attach(self.spinner_em, 9, 10, 1 + shift, 2 + shift)\n\n # List of Labels\n self.scrolled_window = self.label_win()\n self.table.attach(self.scrolled_window, 10, 13, 0, shift + 2)\n\n # Refresh chart\n if not self.init:\n self.refresh(None, None)\n\n def refresh(self, stub1, stub2):\n # Refresh current chart\n if not self.init:\n try:\n time_int = int(self.sec.get_value() + 60 * self.min.get_value())\n except:\n time_int = 60\n\n end_point = self.spinner_em.get_value() * 60 + \\\n self.spinner_eh.get_value() * 3600\n start_point = self.spinner_sm.get_value() * 60 + \\\n self.spinner_sh.get_value() * 3600\n if end_point < self.log.end_time:\n self.log.end = max(300, int(end_point))\n else:\n self.log.end = self.log.end_time\n if start_point < self.log.end:\n self.log.start = int(start_point)\n else:\n self.log.start = max(0, int(self.log.end) - 300)\n\n if time_int:\n pylab.clf()\n if self.active == 'vusers':\n self.log.plot(self.active, time_int, None, False,\n self.title, False, False)\n else:\n for label in self.label_list:\n self.log.plot(self.active, time_int, label,\n self.legend_status, self.title,\n self.trend_status, self.points_status)\n if self.total_status and self.active != 'art' and \\\n self.active != 'lat':\n self.log.plot(self.active + '_total', time_int, None,\n self.legend_status, self.title,\n self.trend_status, self.points_status)\n pylab.savefig(\"preview.png\", dpi=self.dpi, transparent=False,\n format=\"png\")\n\n try:\n self.table.remove(self.button)\n except:\n None\n\n # Image object\n self.image = gtk.Image()\n self.image.set_from_file(\"preview.png\")\n self.image.show()\n os.remove(\"preview.png\")\n\n # Button container\n self.button = gtk.Button()\n self.button.add(self.image)\n self.button.show()\n self.button.connect(\"clicked\", self.refresh, \"1\")\n\n self.table.attach(self.button, 0, 10, 0, 24)\n\n def save_chart(self, stub1, stub2):\n \"\"\"Save chart to PNG file\"\"\"\n if not self.init:\n dialog = gtk.FileChooserDialog(\"Save...\",\n None,\n gtk.FILE_CHOOSER_ACTION_SAVE,\n (gtk.STOCK_CANCEL,\n gtk.RESPONSE_CANCEL,\n gtk.STOCK_SAVE, gtk.RESPONSE_OK))\n dialog.set_default_response(gtk.RESPONSE_OK)\n filter = gtk.FileFilter()\n filter.set_name(\"PNG Images\")\n filter.add_pattern(\"*.png\")\n dialog.add_filter(filter)\n response = dialog.run()\n if response == gtk.RESPONSE_OK:\n if dialog.get_filename()[-4:] != '.png':\n filename = dialog.get_filename() + '.png'\n else:\n filename = dialog.get_filename()\n pylab.savefig(filename, dpi=self.dpi, transparent=False,\n format=\"png\")\n dialog.destroy()\n\n def save_log(self, stub1, stub2):\n \"\"\"Save log in CSV format\"\"\"\n if not self.init:\n dialog = gtk.FileChooserDialog(\"Save...\",\n None,\n gtk.FILE_CHOOSER_ACTION_SAVE,\n (gtk.STOCK_CANCEL,\n gtk.RESPONSE_CANCEL,\n gtk.STOCK_SAVE, gtk.RESPONSE_OK))\n dialog.set_default_response(gtk.RESPONSE_OK)\n filter = gtk.FileFilter()\n filter.set_name(\"JMeter Logs\")\n filter.add_pattern(\"*.jtl\")\n filter.add_pattern(\"*.csv\")\n filter.add_pattern(\"*.log\")\n dialog.add_filter(filter)\n response = dialog.run()\n if response == gtk.RESPONSE_OK:\n if dialog.get_filename()[-4:] != '.jtl':\n filename = dialog.get_filename() + '.jtl'\n else:\n filename = dialog.get_filename()\n self.log.export2csv(filename)\n dialog.destroy()\n\n def label_win(self):\n \"\"\"Sub-window with list of labels and transactions\"\"\"\n\n # Initial data\n self.label_list = list()\n self.total_status = False\n\n # Window object\n scrolled_window = gtk.ScrolledWindow()\n scrolled_window.set_border_width(0)\n scrolled_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n scrolled_window.show()\n\n # Container\n table = gtk.Table(30, 3, True)\n table.show()\n scrolled_window.add_with_viewport(table)\n\n # Populate container\n if not self.init:\n row = 1\n\n label = gtk.Label()\n label.set_markup(\"Transactions:\")\n label.show()\n table.attach(label, 0, 3, 0, 1)\n\n for label in sorted(self.log.transactions):\n button = gtk.CheckButton(label)\n button.connect(\"clicked\", self.label_options, label)\n button.set_alignment(0, 0.5)\n button.show()\n table.attach(button, 0, 3, row, row + 1)\n row += 1\n\n label = gtk.Label()\n label.set_markup(\"Samples:\")\n label.show()\n table.attach(label, 0, 3, row, row + 1)\n\n row += 1\n\n for label in sorted(self.log.labels):\n button = gtk.CheckButton(label)\n button.connect(\"clicked\", self.label_options, label)\n button.set_alignment(0, 0.5)\n button.show()\n table.attach(button, 0, 3, row, row + 1)\n row += 1\n button = gtk.CheckButton('Total')\n button.connect(\"clicked\", self.total)\n button.show()\n table.attach(button, 0, 3, row, row + 1)\n\n return scrolled_window\n\n def time_scale(self, scale=60):\n \"\"\"Unknown method\"\"\"\n Hscale = gtk.HScale(gtk.Adjustment(0, 0, scale, 1, 1, 1))\n Hscale.set_update_policy(gtk.UPDATE_CONTINUOUS)\n Hscale.set_digits(0)\n Hscale.set_value_pos(gtk.POS_LEFT)\n Hscale.set_draw_value(True)\n Hscale.show()\n\n return Hscale\n\n def label_options(self, widget, label=None):\n \"\"\"Callback for checkboxes\"\"\"\n if widget.get_active():\n self.label_list.append(label)\n else:\n self.label_list.remove(label)\n\n def total(self, widget):\n \"\"\"Callback for checkbox\"\"\"\n self.total_status = not self.total_status\n\n def option_selector(self, option, stub):\n \"\"\"Update settings/options\"\"\"\n if option == 0:\n self.legend_status = not self.legend_status\n elif option == 1:\n self.trend_status = not self.trend_status\n elif option == 2:\n self.points_status = not self.points_status\n\n def range_selector(self, option, stub):\n # Throughput\n if option == 0:\n self.throughput_range = False\n if self.title.count(\"Throughput\"):\n self.title = 'Throughput (kB/s)'\n if option == 1:\n self.throughput_range = True\n if self.title.count(\"Throughput\"):\n self.title = 'Throughput (MB/s)'\n # Time\n elif option == 2:\n self.time_range = False\n if self.title.count(\"Response Time\"):\n self.title = 'Average Response Time (ms)'\n elif self.title.count(\"Latency\"):\n self.title = 'Average Latency (ms)'\n elif option == 3:\n self.time_range = True\n if self.title.count(\"Response Time\"):\n self.title = 'Average Response Time (s)'\n elif self.title.count(\"Latency\"):\n self.title = 'Average Latency (s)'\n if not self.init:\n self.log.throughput_range = self.throughput_range\n self.log.time_range = self.time_range\n\n def dpi_selector(self, option, stub):\n \"\"\"Update DPI settings\"\"\"\n self.dpi = option\n\n def font_selector(self, option, stub):\n \"\"\"Update Font settings\"\"\"\n pylab.rcParams['font.size'] = option\n\n def chart_selector(self, chart_type, stub):\n \"\"\"Set chart title and type\"\"\"\n if chart_type == 0:\n if not self.time_range:\n self.title = 'Average Response Time (ms)'\n else:\n self.title = 'Average Response Time (s)'\n self.active = 'art'\n elif chart_type == 1:\n if not self.time_range:\n self.title = 'Average Latency (ms)'\n else:\n self.title = 'Average Latency (s)'\n self.active = 'lat'\n elif chart_type == 2:\n self.title = 'Responses per Second'\n self.active = 'rpt'\n elif chart_type == 3:\n if not self.throughput_range:\n self.title = 'Throughput (kB/s)'\n else:\n self.title = 'Throughput (MB/s)'\n self.active = 'bpt'\n elif chart_type == 4:\n self.title = 'Error Rate'\n self.active = 'err'\n elif chart_type == 5:\n self.title = 'Error Count'\n self.active = 'errc'\n elif chart_type == 6:\n self.title = 'Active Threads'\n self.active = 'vusers'\n\n\nclass ProgressBar:\n\n \"\"\"Obsolete class\"\"\"\n\n def __init__(self):\n \"\"\"Create the ProgressBar\"\"\"\n self.progress = gtk.Window(gtk.WINDOW_POPUP)\n self.progress.set_position(gtk.WIN_POS_CENTER_ALWAYS)\n self.progress.set_border_width(10)\n self.progress.show()\n\n self.bar = gtk.ProgressBar()\n self.bar.show()\n self.progress.add(self.bar)\n\n\nclass WarnWindow:\n\n \"\"\"Warnings\"\"\"\n\n def __init__(self, status):\n md = gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT,\n gtk.MESSAGE_WARNING, gtk.BUTTONS_CLOSE, status)\n md.run()\n md.destroy()\n\n\nif __name__ == '__main__':\n PyLan()\n gtk.main()\n","sub_path":"pylan.py","file_name":"pylan.py","file_ext":"py","file_size_in_byte":38131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"408078775","text":"import requests\nimport time\nimport json\nfrom os.path import dirname, abspath\n\nclass Transaction(object):\n def __init__(self):\n self.custom_timers = {}\n \n self.post_body = {'species': [\"Ceratotherium simum\", \"Bison bison bison\"]}\n\n \n def run(self):\n start_timer = time.time()\n jsonPayload = json.dumps(self.post_body)\n response = requests.post(\"http://phylo.cs.nmsu.edu:5013/phylotastic_ws/sd/eol/habitat_conservation\", data=jsonPayload, headers={'content-type': 'application/json'})\n latency = time.time() - start_timer\n\n self.custom_timers['Latency'] = latency\n assert (response.status_code == 200), 'Bad Response: HTTP %s' % response.status_code\n \n\nif __name__ == '__main__':\n trans = Transaction()\n trans.run()\n #print trans.custom_timers\n","sub_path":"QoS/ws_28/test_scripts/intensive_user.py","file_name":"intensive_user.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"279827987","text":"from django.shortcuts import render, reverse\nfrom .form import SignUpForm\nfrom django.contrib.auth import logout\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic.base import View\nfrom django.contrib import auth\nfrom django.urls import reverse\nfrom django.views.generic import ListView, DetailView\nfrom .models import Pokemon\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom .form import AddPokemon\n\nclass PostsListView(ListView):\n model = Pokemon\n context_object_name = 'content'\n template_name = 'main/index.html'\n\n def get_queryset(self):\n content = Pokemon.objects.all()\n # Отбираем первые 10 статей\n paginator = Paginator(content, 5)\n page = self.request.GET.get('page')\n try:\n content = paginator.page(page)\n except PageNotAnInteger:\n content = paginator.page(1)\n except EmptyPage:\n content = paginator.page(paginator.num_pages)\n return content\n\nclass PostDetailView(DetailView):\n model = Pokemon\n template_name = 'main/detail.html'\n\n\ndef register(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n form.save()\n return render(request, 'main/login.html')\n else:\n form = SignUpForm()\n return render(request, 'main/register.html', {'form': form})\n\ndef login(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = auth.authenticate(username=username, password=password)\n if user is not None and user.is_active:\n auth.login(request, user)\n return HttpResponseRedirect(reverse('main:index'))\n else:\n # Отображение страницы с ошибкой\n return HttpResponseRedirect(\"/pokemon/login\")\n else:\n return render(request, \"main/login.html\")\n\nclass LogoutView(View):\n def get(self, request):\n # Выполняем выход для пользователя, запросившего данное представление.\n logout(request)\n\n # После чего, перенаправляем пользователя на главную страницу.\n return HttpResponseRedirect(\"/pokemon/\")\n\ndef delete(request, pk,mark):\n pokemon = Pokemon.objects.get(pk=pk)\n\n if mark == '1':\n pokemon.weigh = 0\n pokemon.save()\n if mark == '2':\n pokemon.gender = '$$$'\n pokemon.save()\n if mark == '3':\n pokemon.type_p = \"$$$\"\n pokemon.save()\n\n return HttpResponseRedirect(reverse('main:index'))\n\n\n\n\n\ndef add_pokemon(request):\n if request.method == \"POST\":\n form = AddPokemon(request.POST)\n # check whether it's valid:\n if form.is_valid():\n new_pokemon = AddPokemon(request.POST)\n new_pokemon.save()\n return HttpResponseRedirect(reverse(\"main:index\"))\n\n else:\n form = AddPokemon()\n\n return render(request, 'main/add.html', {'form': form})\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"pokemon/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"63566727","text":"from controller import Controller\nimport Xbox\nimport time\n\nxboxCont = Xbox.XboxController(\n controllerCallBack=None,\n joystickNo=0,\n deadzone=0.1,\n scale=1,\n invertYAxis=False)\nhornby = Controller()\n\n\n# def reverse_call_back(value):\n# hornby.parse_instruction(2, 1)\n\n# xboxCont.setupControlCallback(\n# xboxCont.XboxControls.Y,\n# reverse_call_back\n# )\n\ndef control_xbox(min_speed=0, volts=0):\n hornby.parse_instruction(0, 255)\n if min_speed == 0:\n print(\"Enter train minimum speed (0-255):\")\n min_speed = int(input(\">>\"))\n if volts == 0:\n print(\"Enter train voltage (0-255):\")\n min_speed = int(input(\">>\"))\n range_delay = 255 - min_speed\n hornby.parse_instruction(1, min_speed)\n hornby.parse_instruction(0, volts)\n print(\"Ready\")\n off = True\n lock = False\n while True:\n if xboxCont.RTRIGGER > 0:\n hornby.parse_instruction(1, int(xboxCont.RTRIGGER * range_delay + min_speed))\n elif xboxCont.RTRIGGER == 0:\n hornby.parse_instruction(1, min_speed)\n if xboxCont.Y == 1:\n hornby.parse_instruction(2, 1)\n time.sleep(0.2)\n if xboxCont.BACK == 1:\n hornby.parse_instruction(1, 1)\n time.sleep(0.5)\n break\n if xboxCont.RB == 1:\n if min_speed + 5 > 255:\n min_speed = 255\n time.sleep(0.2)\n else:\n min_speed += 5\n time.sleep(0.2)\n # if xboxCont.LB == 1:\n # if min_speed - 5 < 0:\n # min_speed = 0\n # else:\n # min_speed -= 5\n\n\ndef test_min_speed():\n print(\"Enter train voltage (0-255):\")\n volts = int(input(\">>\"))\n hornby.parse_instruction(0, volts)\n\n for i in range(0, 255):\n hornby.parse_instruction(1, i)\n print(\"Speed: %d\" % (i))\n if xboxCont.A == 1:\n return int(i * 0.60), volts\n\n if xboxCont.BACK == 1:\n time.sleep(0.5)\n return 0\n time.sleep(0.1)\n return 0, volts\n\n\ndef main():\n try:\n min_speed = 0\n volts = 0\n xboxCont.start()\n port_selected = False\n while not port_selected:\n try:\n hornby.select_port()\n port_selected = True\n except IndexError:\n print(\"Unknown Device\")\n except ValueError:\n print(\"Enter number of port to use\")\n print(\"Press Start to control train, or X to test train minimum speed\")\n while True:\n print(\"Menu\")\n if xboxCont.START == 1:\n print(\"Starting Controller\")\n control_xbox(min_speed, volts)\n if xboxCont.X == 1:\n print(\"Testing Speed\")\n try:\n min_speed, volts = test_min_speed()\n if min_speed > 0:\n print(\"Min speed set to %d\" % min_speed)\n hornby.parse_instruction(1, min_speed)\n hornby.parse_instruction(0, volts)\n except TypeError:\n print(\"User Canceled\")\n hornby.parse_instruction(1, 1)\n hornby.parse_instruction(0, 255)\n\n if xboxCont.BACK == 1:\n xboxCont.stop()\n hornby.arduino.SerialDevice.close()\n exit()\n except KeyboardInterrupt:\n print(\"Keyboard Interrupt\")\n xboxCont.stop()\n try:\n hornby.arduino.SerialDevice.close()\n except AttributeError:\n exit()\n exit()\nif __name__ == '__main__':\n main()\n","sub_path":"XboxControl.py","file_name":"XboxControl.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"453197057","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Copyright 2020 The Chromium OS Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\"\"\"Tool for generating go code from automation.js\n\nUsed to create src/chromiumos/tast/local/chrome/ui/constants.go.\n\nUsage example:\n# ./generate_automation_constants.py \\\n /path/to/chromium/src/third_party/closure_compiler/externs/automation.js > \\\n /path/to/chromeos/src/platform/tast-tests/src/chromiumos/tast/local/chrome/ui/constants.go\n\nMake sure to apply gofmt to the output of this script.\n\nTODO(hirokisato): Currently this script reads closure compiler's definition js\nfile, because it's easy to parse. But the automation IDL file is the source of\ntruth.\n\"\"\"\n\nimport sys\nimport re\n\nHEADER = \"\"\"\\\n// Copyright 2020 The Chromium OS Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style license that can be\n// found in the LICENSE file.\n\n// This file is generated by `tools/generate_automation_constants.py`.\n\npackage ui\n\"\"\"\n\n# Some hardcorded attributes.\nFOOTER = \"\"\"\\\n// CheckedState describes the checked state of a chrome.automation AutomationNode.\ntype CheckedState string\n\n// As defined in https://chromium.googlesource.com/chromium/src/+/refs/heads/master/extensions/common/api/automation.idl\nconst (\n\\tCheckedStateTrue CheckedState = \"true\"\n\\tCheckedStateFalse CheckedState = \"false\"\n\\tCheckedStateMixed CheckedState = \"mixed\"\n)\n\"\"\"\n\n\ndef to_camel_case(snake_case_str):\n \"\"\"Converts a SNAKE_CASE string into a CamelCase string.\"\"\"\n return ''.join(s.lower().title() for s in snake_case_str.split('_'))\n\n\ndef print_definitions(lines, defined_type_name, go_type_name, description):\n \"\"\"Prints generated Golang type definitions.\n\n Args:\n lines: List of strings that is read from a js file.\n defined_type_name: Enum type name in a source file which is parsed here.\n go_type_name: Enum type name to be generated as Golang definition.\n description: A string that is used to generate a Go doc comment.\n \"\"\"\n item_pattern = re.compile(r'\\s*(\\w*):\\s\\'(\\w*)\\'')\n defs = []\n reading = False\n for l in lines:\n if reading:\n match = item_pattern.match(l)\n if not match:\n reading = False\n break\n defs.append((to_camel_case(match.group(1)), match.group(2)))\n elif l.startswith('chrome.automation.%s' % defined_type_name):\n reading = True\n\n print('// %s describes %s.' % (go_type_name, description))\n print('type %s string' % go_type_name)\n print()\n print(\n '// As defined in https://chromium.googlesource.com/chromium/src/+/refs/heads/master/extensions/common/api/automation.idl'\n )\n print('const (')\n\n for r in defs:\n print('\\t%s%s %s = \"%s\"' % (defined_type_name, r[0], go_type_name, r[1]))\n\n print(')')\n print()\n\n\ndef main(argv):\n f = open(argv[1], 'r')\n lines = f.readlines()\n f.close()\n\n print(HEADER)\n print_definitions(lines, 'StateType', 'StateType',\n 'characteristics of a chrome.automation AutomationNode')\n print_definitions(lines, 'RoleType', 'RoleType',\n 'the purpose of a chrome.automation AutomationNode')\n print_definitions(lines, 'EventType', 'EventType',\n 'the type of a chrome.automation AutomationEvent')\n print_definitions(\n lines, 'Restriction', 'RestrictionState',\n 'the restriction state of a chrome.automation AutomationNode')\n print(FOOTER)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"tools/generate_automation_constants.py","file_name":"generate_automation_constants.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"572105903","text":"\"\"\"\nCopyright (c) 2019 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom collections import defaultdict\n\nimport cv2\nimport numpy as np\n\n\nfrom ..adapters import Adapter\nfrom ..config import ConfigValidator, StringField, NumberField, BoolField, ConfigError\nfrom ..representation import TextDetectionPrediction, CharacterRecognitionPrediction\n\n\nclass TextDetectionAdapter(Adapter):\n __provider__ = 'text_detection'\n prediction_types = (TextDetectionPrediction, )\n\n @classmethod\n def parameters(cls):\n parameters = super().parameters()\n parameters.update({\n 'pixel_link_out': StringField(\n description=\"Name of layer containing information related to linkage \"\n \"between pixels and their neighbors.\"\n ),\n 'pixel_class_out': StringField(\n description=\"Name of layer containing information related to \"\n \"text/no-text classification for each pixel.\"\n ),\n 'pixel_class_confidence_threshold': NumberField(\n description='confidence threshold for valid segmentation mask',\n optional=True, default=0.8, value_type=float, min_value=0, max_value=1\n ),\n 'pixel_link_confidence_threshold': NumberField(\n description='confidence threshold for valid pixel links',\n optional=True, default=0.8, value_type=float, min_value=0, max_value=1\n ),\n 'min_area': NumberField(\n value_type=int, min_value=0, default=0, optional=True,\n description='minimal area for valid text prediction'\n ),\n 'min_height': NumberField(\n value_type=int, min_value=0, default=0, optional=True,\n description='minimal height for valid text prediction'\n )\n })\n\n return parameters\n\n def validate_config(self):\n super().validate_config(on_extra_argument=ConfigValidator.WARN_ON_EXTRA_ARGUMENT)\n\n def configure(self):\n self.pixel_link_out = self.get_value_from_config('pixel_link_out')\n self.pixel_class_out = self.get_value_from_config('pixel_class_out')\n self.pixel_link_confidence_threshold = self.get_value_from_config('pixel_link_confidence_threshold')\n self.pixel_class_confidence_threshold = self.get_value_from_config('pixel_class_confidence_threshold')\n self.min_area = self.get_value_from_config('min_area')\n self.min_height = self.get_value_from_config('min_height')\n\n def process(self, raw, identifiers=None, frame_meta=None):\n results = []\n predictions = self._extract_predictions(raw, frame_meta)\n\n def _input_parameters(input_meta):\n input_shape = next(iter(input_meta.get('input_shape').values()))\n original_image_size = input_meta.get('image_size')\n layout = 'NCHW' if input_shape[1] == original_image_size[2] else 'NHWC'\n\n return original_image_size, layout\n raw_output = zip(identifiers, frame_meta, predictions[self.pixel_link_out], predictions[self.pixel_class_out])\n for identifier, current_frame_meta, link_data, cls_data in raw_output:\n image_size, layout = _input_parameters(current_frame_meta)\n if layout == 'NCHW':\n link_data = link_data.transpose((1, 2, 0))\n cls_data = cls_data.transpose((1, 2, 0))\n new_link_data = link_data.reshape([*link_data.shape[:2], 8, 2])\n new_link_data = self.softmax(new_link_data)\n cls_data = self.softmax(cls_data)\n decoded_rects = self.to_boxes(image_size, cls_data[:, :, 1], new_link_data[:, :, :, 1])\n results.append(TextDetectionPrediction(identifier, decoded_rects))\n\n return results\n\n def mask_to_bboxes(self, mask, image_shape):\n \"\"\" Converts mask to bounding boxes. \"\"\"\n\n def rect_to_xys(rect, image_shape):\n \"\"\" Converts rotated rectangle to points. \"\"\"\n\n height, width = image_shape[0:2]\n\n def get_valid_x(x_coord):\n return np.clip(x_coord, 0, width - 1)\n\n def get_valid_y(y_coord):\n return np.clip(y_coord, 0, height - 1)\n\n rect = ((rect[0], rect[1]), (rect[2], rect[3]), rect[4])\n points = cv2.boxPoints(rect)\n points = points.astype(np.int0)\n for i_xy, (x_coord, y_coord) in enumerate(points):\n x_coord = get_valid_x(x_coord)\n y_coord = get_valid_y(y_coord)\n points[i_xy, :] = [x_coord, y_coord]\n\n return points\n\n def min_area_rect(contour):\n \"\"\" Returns minimum area rectangle. \"\"\"\n\n (center_x, center_y), (width, height), theta = cv2.minAreaRect(contour)\n return [center_x, center_y, width, height, theta], width * height\n\n image_h, image_w = image_shape[0:2]\n\n bboxes = []\n max_bbox_idx = mask.max()\n mask = cv2.resize(mask, (image_w, image_h), interpolation=cv2.INTER_NEAREST)\n\n for bbox_idx in range(1, max_bbox_idx + 1):\n bbox_mask = (mask == bbox_idx).astype(np.uint8)\n cnts = cv2.findContours(bbox_mask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)[-2]\n if np.size(cnts) == 0:\n continue\n cnt = cnts[0]\n rect, rect_area = min_area_rect(cnt)\n\n box_width, box_height = rect[2:-1]\n if min(box_width, box_height) < self.min_height:\n continue\n\n if rect_area < self.min_area:\n continue\n\n xys = rect_to_xys(rect, image_shape)\n bboxes.append(xys)\n\n return bboxes\n\n @staticmethod\n def softmax(logits):\n \"\"\" Returns softmax given logits. \"\"\"\n\n max_logits = np.max(logits, axis=-1, keepdims=True)\n numerator = np.exp(logits - max_logits)\n denominator = np.sum(numerator, axis=-1, keepdims=True)\n\n return numerator / denominator\n\n def to_boxes(self, image_shape, segm_pos_scores, link_pos_scores):\n \"\"\" Returns boxes for each image in batch. \"\"\"\n\n mask = self.decode_image(segm_pos_scores, link_pos_scores)\n mask = np.asarray(mask, np.int32)[...]\n bboxes = self.mask_to_bboxes(mask, image_shape)\n\n return bboxes\n\n def decode_image(self, segm_scores, link_scores):\n \"\"\" Convert softmax scores to mask. \"\"\"\n\n segm_mask = segm_scores >= self.pixel_class_confidence_threshold\n link_mask = link_scores >= self.pixel_link_confidence_threshold\n points = list(zip(*np.where(segm_mask)))\n height, width = np.shape(segm_mask)\n group_mask = dict.fromkeys(points, -1)\n\n def find_parent(point):\n return group_mask[point]\n\n def set_parent(point, parent):\n group_mask[point] = parent\n\n def is_root(point):\n return find_parent(point) == -1\n\n def find_root(point):\n root = point\n update_parent = False\n while not is_root(root):\n root = find_parent(root)\n update_parent = True\n\n if update_parent:\n set_parent(point, root)\n\n return root\n\n def join(point1, point2):\n root1 = find_root(point1)\n root2 = find_root(point2)\n\n if root1 != root2:\n set_parent(root1, root2)\n\n def get_neighbours(x_coord, y_coord):\n \"\"\" Returns 8-point neighbourhood of given point. \"\"\"\n\n return [\n (x_coord - 1, y_coord - 1), (x_coord, y_coord - 1), (x_coord + 1, y_coord - 1),\n (x_coord - 1, y_coord), (x_coord + 1, y_coord),\n (x_coord - 1, y_coord + 1), (x_coord, y_coord + 1), (x_coord + 1, y_coord + 1)\n ]\n\n def is_valid_coord(x_coord, y_coord, width, height):\n \"\"\" Returns true if given point inside image frame. \"\"\"\n return 0 <= x_coord < width and 0 <= y_coord < height\n\n def get_all():\n root_map = {}\n\n def get_index(root):\n if root not in root_map:\n root_map[root] = len(root_map) + 1\n return root_map[root]\n\n mask = np.zeros_like(segm_mask, dtype=np.int32)\n for point in points:\n point_root = find_root(point)\n bbox_idx = get_index(point_root)\n mask[point] = bbox_idx\n return mask\n\n for point in points:\n y_coord, x_coord = point\n neighbours = get_neighbours(x_coord, y_coord)\n for n_idx, (neighbour_x, neighbour_y) in enumerate(neighbours):\n if is_valid_coord(neighbour_x, neighbour_y, width, height):\n link_value = link_mask[y_coord, x_coord, n_idx]\n segm_value = segm_mask[neighbour_y, neighbour_x]\n if link_value and segm_value:\n join(point, (neighbour_y, neighbour_x))\n\n mask = get_all()\n return mask\n\n\nclass LPRAdapter(Adapter):\n __provider__ = 'lpr'\n prediction_types = (CharacterRecognitionPrediction,)\n\n def configure(self):\n if not self.label_map:\n raise ConfigError('LPR adapter requires dataset label map for correct decoding.')\n\n def process(self, raw, identifiers=None, frame_meta=None):\n raw_output = self._extract_predictions(raw, frame_meta)\n predictions = raw_output[self.output_blob]\n result = []\n for identifier, output in zip(identifiers, predictions):\n decoded_out = self.decode(output.reshape(-1))\n result.append(CharacterRecognitionPrediction(identifier, decoded_out))\n\n return result\n\n def decode(self, outputs):\n decode_out = str()\n for output in outputs:\n if output == -1:\n break\n decode_out += str(self.label_map[int(output)])\n\n return decode_out\n\n\nclass BeamSearchDecoder(Adapter):\n __provider__ = 'beam_search_decoder'\n prediction_types = (CharacterRecognitionPrediction, )\n\n @classmethod\n def parameters(cls):\n parameters = super().parameters()\n parameters.update({\n 'beam_size': NumberField(\n optional=True, value_type=int, min_value=1, default=10,\n description=\"Size of the beam to use during decoding.\"\n ),\n 'blank_label': NumberField(\n optional=True, value_type=int, min_value=0, description=\"Index of the CTC blank label.\"\n ),\n 'softmaxed_probabilities': BoolField(\n optional=True, default=False, description=\"Indicator that model uses softmax for output layer \"\n )\n })\n return parameters\n\n def validate_config(self):\n super().validate_config(on_extra_argument=ConfigValidator.IGNORE_ON_EXTRA_ARGUMENT)\n\n def configure(self):\n if not self.label_map:\n raise ConfigError('Beam Search Decoder requires dataset label map for correct decoding.')\n\n self.beam_size = self.get_value_from_config('beam_size')\n self.blank_label = self.launcher_config.get('blank_label', len(self.label_map))\n self.softmaxed_probabilities = self.get_value_from_config('softmaxed_probabilities')\n\n def process(self, raw, identifiers=None, frame_meta=None):\n raw_output = self._extract_predictions(raw, frame_meta)\n output = raw_output[self.output_blob]\n output = np.swapaxes(output, 0, 1)\n\n result = []\n for identifier, data in zip(identifiers, output):\n if self.softmaxed_probabilities:\n data = np.log(data)\n seq = self.decode(data, self.beam_size, self.blank_label)\n decoded = ''.join(str(self.label_map[char]) for char in seq)\n result.append(CharacterRecognitionPrediction(identifier, decoded))\n return result\n\n @staticmethod\n def decode(probabilities, beam_size=10, blank_id=None):\n \"\"\"\n Decode given output probabilities to sequence of labels.\n Arguments:\n probabilities: The output log probabilities for each time step.\n Should be an array of shape (time x output dim).\n beam_size (int): Size of the beam to use during decoding.\n blank_id (int): Index of the CTC blank label.\n Returns the output label sequence.\n \"\"\"\n def make_new_beam():\n return defaultdict(lambda: (-np.inf, -np.inf))\n\n def log_sum_exp(*args):\n if all(a == -np.inf for a in args):\n return -np.inf\n a_max = np.max(args)\n lsp = np.log(np.sum(np.exp(a - a_max) for a in args))\n\n return a_max + lsp\n\n times, symbols = probabilities.shape\n # Initialize the beam with the empty sequence, a probability of 1 for ending in blank\n # and zero for ending in non-blank (in log space).\n beam = [(tuple(), (0.0, -np.inf))]\n\n for time in range(times):\n # A default dictionary to store the next step candidates.\n next_beam = make_new_beam()\n\n for symbol_id in range(symbols):\n current_prob = probabilities[time, symbol_id]\n\n for prefix, (prob_blank, prob_non_blank) in beam:\n # If propose a blank the prefix doesn't change.\n # Only the probability of ending in blank gets updated.\n if symbol_id == blank_id:\n next_prob_blank, next_prob_non_blank = next_beam[prefix]\n next_prob_blank = log_sum_exp(\n next_prob_blank, prob_blank + current_prob, prob_non_blank + current_prob\n )\n next_beam[prefix] = (next_prob_blank, next_prob_non_blank)\n continue\n # Extend the prefix by the new character symbol and add it to the beam.\n # Only the probability of not ending in blank gets updated.\n end_t = prefix[-1] if prefix else None\n next_prefix = prefix + (symbol_id,)\n next_prob_blank, next_prob_non_blank = next_beam[next_prefix]\n if symbol_id != end_t:\n next_prob_non_blank = log_sum_exp(\n next_prob_non_blank, prob_blank + current_prob, prob_non_blank + current_prob\n )\n else:\n # Don't include the previous probability of not ending in blank (prob_non_blank) if symbol\n # is repeated at the end. The CTC algorithm merges characters not separated by a blank.\n next_prob_non_blank = log_sum_exp(next_prob_non_blank, prob_blank + current_prob)\n\n next_beam[next_prefix] = (next_prob_blank, next_prob_non_blank)\n # If symbol is repeated at the end also update the unchanged prefix. This is the merging case.\n if symbol_id == end_t:\n next_prob_blank, next_prob_non_blank = next_beam[prefix]\n next_prob_non_blank = log_sum_exp(next_prob_non_blank, prob_non_blank + current_prob)\n next_beam[prefix] = (next_prob_blank, next_prob_non_blank)\n\n beam = sorted(next_beam.items(), key=lambda x: log_sum_exp(*x[1]), reverse=True)[:beam_size]\n\n best = beam[0]\n\n return best[0]\n","sub_path":"openvino_2019.2.242/deployment_tools/open_model_zoo/tools/accuracy_checker/accuracy_checker/adapters/text_detection.py","file_name":"text_detection.py","file_ext":"py","file_size_in_byte":16077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"626765697","text":"#\n# Copyright (c) 2018 Pilz GmbH & Co. KG\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport xml.etree.ElementTree as ET\nimport re\nfrom geometry_msgs.msg import Pose\nfrom math import pi\n\nDEFAULT_VEL = 0.01\nDEFAULT_ACC = 0.01\nDEFAULT_BLEND_RADIUS = 0.01\n\nNAME_STR = \"name\"\nTYPE_STR = \"type\"\nBLEND_RADIUS_STR = \"blend_radius\"\n\nSTART_POS_STR = \"startPos\"\nEND_POS_STR = \"endPos\"\nPLANNING_GROUP_STR = \"planningGroup\"\nLINK_NAME_STR = \"targetLink\"\nINTERIM_POS_STR = \"interimPos\"\nCENTER_POS_STR = \"centerPos\"\nAUXILIARY_POS_STR = \"auxiliaryPos\"\nDEFAULT_LINK_NAME = \"prbt_tcp\"\n\nVEL_STR = \"vel\"\nACC_STR = \"acc\"\n\n_PATH_TO_JOINTS = \"./poses/pos[@name='{pose_name}']/group[@name='{group_name}']/joints\"\n_PATH_TO_POSE = \"./poses/pos[@name='{pose_name}']/group[@name='{group_name}']/xyzQuat\"\n_PATH_TO_PTP = \"./ptps/ptp[@name='{name_of_cmd}']\"\n_PATH_TO_LIN = \"./lins/lin[@name='{name_of_cmd}']\"\n_PATH_TO_CIRC = \"./circs/circ[@name='{name_of_cmd}']\"\n_PATH_TO_SEQUENCE = \"./sequences/sequence[@name='{name_of_cmd}']\"\n\n\nclass XmlTestdataLoader:\n\n def __init__(self, path_to_xml_file):\n self._tree = ET.parse(path_to_xml_file)\n self._root = self._tree.getroot()\n\n # Returns the joint values for the given group and position.\n def get_joints(self, pose_name, group_name):\n joint_node = self._root.find(_PATH_TO_JOINTS.format(pose_name=pose_name, group_name=group_name))\n if joint_node is None:\n return None\n return [eval(elem) for elem in re.split(r'[^\\S\\n\\t]+', joint_node.text)]\n\n def get_pose(self, pose_name, group_name):\n node = self._root.find(_PATH_TO_POSE.format(pose_name=pose_name, group_name=group_name))\n if node is None:\n return None\n pose_list = [eval(elem) for elem in re.split(r'[^\\S\\n\\t]+', node.text)]\n pose = Pose()\n pose.position.x = pose_list[0]\n pose.position.y = pose_list[1]\n pose.position.z = pose_list[2]\n pose.orientation.x = pose_list[3]\n pose.orientation.y = pose_list[4]\n pose.orientation.z = pose_list[5]\n pose.orientation.w = pose_list[6]\n return pose\n\n # Returns the start- and end-position, as well as\n # the velocity and acceleration of the ptp command given by its name.\n # In case of an error 'None' is returned.\n def get_ptp(self, name_of_cmd):\n return self._get_cmd(_PATH_TO_PTP, name_of_cmd)\n\n # Returns the start- and end-position, as well as\n # the velocity and acceleration of the lin command given by its name.\n # In case of an error 'None' is returned.\n def get_lin(self, name_of_cmd):\n return self._get_cmd(_PATH_TO_LIN, name_of_cmd)\n\n # Returns the start-, end- and auxility-position, as well as\n # the velocity and acceleration of the circ command given by its name.\n #\n # Please note: It is also necessary to state if the auxiliary point\n # of the circ command is stored as intermediate or center point.\n def get_circ(self, name_of_cmd, auxiliaray_pos_type=INTERIM_POS_STR):\n cmdRes = self._get_cmd(_PATH_TO_CIRC, name_of_cmd)\n if cmdRes is None:\n return None\n\n cmdNode = self._root.find(_PATH_TO_CIRC.format(name_of_cmd=name_of_cmd))\n if cmdNode is None:\n return None\n auxiliaryNode = cmdNode.find(\"./{}\".format(auxiliaray_pos_type))\n if auxiliaryNode is None:\n return None\n\n return {START_POS_STR: cmdRes[START_POS_STR], auxiliaray_pos_type: auxiliaryNode.text,\n END_POS_STR: cmdRes[END_POS_STR], VEL_STR: cmdRes[VEL_STR], ACC_STR: cmdRes[ACC_STR],\n PLANNING_GROUP_STR: cmdRes[PLANNING_GROUP_STR], LINK_NAME_STR: cmdRes[LINK_NAME_STR]}\n\n # Returns a list of dictionaries containing the cmds which make-up the\n # sequence cmd. The cmds in the list are in the order of execution.\n # In case of an error 'None' is returned.\n def get_sequence(self, name_of_cmd):\n # Find the sequence command with the given name\n sequenceNode = self._root.find(_PATH_TO_SEQUENCE.format(name_of_cmd=name_of_cmd))\n if sequenceNode is None:\n return None\n\n # Loop over all blend commands\n sequenceCmds = []\n for sequenceCmdNode in sequenceNode.getchildren():\n cmd_name = sequenceCmdNode.get(NAME_STR)\n if cmd_name is None:\n return None\n\n cmd_type = sequenceCmdNode.get(TYPE_STR)\n if cmd_type is None:\n return None\n\n blend_radius = sequenceCmdNode.get(BLEND_RADIUS_STR, DEFAULT_BLEND_RADIUS)\n sequenceCmds.append({NAME_STR: cmd_name, TYPE_STR: cmd_type, BLEND_RADIUS_STR: blend_radius})\n\n return sequenceCmds\n\n # Returns the start- and end-position, as well as\n # the velocity and acceleration of the given command type, given by its name.\n # The values are returned as dictionaries.\n # In case of an error 'None' is returned.\n def _get_cmd(self, path_to_cmd_type, name_of_cmd):\n cmd_node = self._root.find(path_to_cmd_type.format(name_of_cmd=name_of_cmd))\n if cmd_node is None:\n return None\n\n start_pos_node = cmd_node.find(\"./{}\".format(START_POS_STR))\n if start_pos_node is None:\n return None\n\n end_pos_node = cmd_node.find(\"./{}\".format(END_POS_STR))\n if end_pos_node is None:\n return None\n\n planning_group_node = cmd_node.find(\"./{}\".format(PLANNING_GROUP_STR))\n if planning_group_node is None:\n return None\n\n # Optional parameters\n vel_node = cmd_node.find(\"./{}\".format(VEL_STR))\n vel = DEFAULT_VEL if vel_node is None else float(vel_node.text)\n\n acc_node = cmd_node.find(\"./{}\".format(ACC_STR))\n acc = DEFAULT_ACC if acc_node is None else float(acc_node.text)\n\n target_link_node = cmd_node.find(\"./{}\".format(LINK_NAME_STR))\n target_link_name = DEFAULT_LINK_NAME if target_link_node is None else target_link_node.text\n\n return {START_POS_STR: start_pos_node.text, END_POS_STR: end_pos_node.text, VEL_STR: vel, ACC_STR: acc,\n PLANNING_GROUP_STR: planning_group_node.text, LINK_NAME_STR: target_link_name}\n","sub_path":"pilz_industrial_motion_testutils/src/pilz_industrial_motion_testutils/xml_testdata_loader.py","file_name":"xml_testdata_loader.py","file_ext":"py","file_size_in_byte":6693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"171927836","text":"# Hack112\r\n# Calvin Lui, John Hewitt, Rolando Garcia, Samuel Yip\r\n# Bias Indicator - Logistic Regression Classification\r\n\r\nimport string\r\nimport numpy as np\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\ndef trainModel():\r\n dataset_x = \"bias_dataset_x.txt\" # dataset on GitHub is a sample of larger dataset\r\n dataset_x = open(dataset_x, \"r\").read()\r\n dataset_y = \"bias_dataset_y.txt\" # dataset on GitHub is a sample of larger dataset\r\n dataset_y = open(dataset_y, \"r\").read()\r\n\r\n X = dataset_x.splitlines()\r\n for l in range(len(X)):\r\n X[l] = X[l].split(\" \")\r\n Y = dataset_y.splitlines()\r\n\r\n X = np.array(X)\r\n Y = np.array(Y)\r\n X = X.astype(int)\r\n Y = Y.astype(int)\r\n\r\n # Features: # of Left-Leaning Terms, # of Right-Leaning Terms, Bias of Source (-1, 0, 1)\r\n # Classes: Left-Leaning (0), Right-Leaning (1)\r\n\r\n regressionModel = LogisticRegression(C=1e4)\r\n regressionModel.fit(X, Y)\r\n\r\n return regressionModel\r\n","sub_path":"BiasClassifier.py","file_name":"BiasClassifier.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"351240498","text":"# from django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import url\nfrom .import views\nfrom django.conf import settings\n# from django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.conf.urls.static import static\n\n\n\nurlpatterns = [\n path('',views.index,name='index'),\n url(r'adminLogin',views.adminLogin,name=\"adminLogin\"),\n url(r'adminpannel',views.adminpannel,name=\"adminpannel\"),\n url(r'^studentDetails/(?P\\d+)/$',views.studentDetails,name=\"student_details\"),\n url(r'register',views.register,name=\"register\"),\n url(r'marksheet',views.marksheet,name=\"marksheet\"), \n \n url(r'home',views.home,name=\"home\"),\n url(r'course',views.course,name=\"course\"),\n url(r'student',views.student,name=\"student\"),\n url(r'subjects',views.subjects,name=\"subjects\"),\n url(r'addSubject',views.addSubjects,name=\"addSubjects\"),\n url(r'subData',views.subData,name=\"subData\"),\n url(r'faculties',views.faculties,name=\"faculties\"), \n url(r'^TeacherDetail/(?P\\d+)/$',views.TeacherDetail,name=\"TeacherDetail\"), \n url(r'timeTable',views.timeTable,name='timeTable'),\n url(r'^users',views.users,name=\"users\"),\n url(r'studentFine',views.studentFine,name=\"studentFine\"),\n url(r'projectReport',views.projectReport,name=\"projectReport\"),\n url(r'syllabus',views.syllabus,name=\"syllabus\"),\n url(r'assignments',views.assignments,name=\"assignments\"), \n\n url(r'assignSubject',views.assignSubject,name=\"assignSubject\"),\n url(r'enterMarks',views.enterMarks,name=\"enterMarks\"), \n url(r'markAttendance',views.markAttendance,name='markAttendance'),\n url(r'attendanceReport',views.attendanceReport,name=\"attendanceReport\"),\n url(r'searchStudent',views.searchStudent,name=\"searchStudent\"),\n \n \n]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"kccproject/students/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"554138032","text":"import os\r\n\r\n\r\nclass POSCAR():\r\n def __init__(self,path=os.getcwd()):\r\n self.path=path\r\n\r\n def getVolume(self):\r\n \"\"\"\r\n Get unit cell volume\r\n \"\"\"\r\n import numpy as np\r\n f = open(self.path+'/CONTCAR', 'r')\r\n lines = f.readlines()\r\n sf = np.array(str(lines[1]).strip().split()).astype(np.float)\r\n a = np.array(str(lines[2]).strip().split()).astype(np.float) * sf\r\n b = np.array(str(lines[3]).strip().split()).astype(np.float) * sf\r\n c = np.array(str(lines[4]).strip().split()).astype(np.float) * sf\r\n Volume = np.dot(np.cross(a, b), c)\r\n return Volume\r\n\r\n\r\nclass OUTCAR():\r\n def __init__(self,path=os.getcwd()):\r\n self.path = path\r\n self.parameter = {}\r\n\r\n def getParameter(self):\r\n '''\r\n get some parameters in OUTCAR\r\n '''\r\n import re\r\n\r\n f = open(self.path+\"/OUTCAR\", 'r')\r\n lines = f.readlines()\r\n for line in lines:\r\n if \"LORBIT\" in line:\r\n LORBIT = re.compile(\r\n r\"(?<=LORBIT =)\\s*\\d+\\.?\\d*\").findall(line)\r\n LORBIT = list(map(int, LORBIT))\r\n if LORBIT != []:\r\n self.parameter['LORBIT'] = LORBIT[0] # print(line)\r\n if \"ISPIN\" in line:\r\n ISPIN = re.compile(\r\n r\"(?<=ISPIN =)\\s*\\d+\\.?\\d*\").findall(line)\r\n ISPIN = list(map(int, ISPIN))\r\n if ISPIN != []:\r\n self.parameter['ISPIN'] = ISPIN[0]\r\n if \"energy without entropy=\" in line:\r\n Enthalpy = re.compile(\r\n r\"(?<=energy without entropy=)\\s*\\-\\d+\\.?\\d*\").findall(line)\r\n Enthalpy = list(map(float, Enthalpy))\r\n if Enthalpy != []:\r\n self.parameter['Enthalpy'] = Enthalpy[0]\r\n\r\n f.close()\r\n # print(self.parameter['ISPIN'])\r\n # test\r\n return self.parameter\r\n\r\n\r\nif __name__ == \"__main__\":\r\n poscar = POSCAR()\r\n print(poscar.getVolume())\r\n\r\n outcar = OUTCAR()\r\n print(outcar.getParameter())\r\n","sub_path":"phonon/OUTPUT.py","file_name":"OUTPUT.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"113034086","text":"'''\n$ sudo pip3 install fastapi\n$ pip install uvicorn[standard]\n$ uvicorn main:app --reload\n'''\nfrom typing import Optional\n\nfrom fastapi import FastAPI\n\nimport requests\n\nimport json\n\napp = FastAPI()\n\nfile='https://raw.githubusercontent.com/ComputationalMethods/Evaluacion_2021-1/main/calificaciones.json'\n\n#JSON SCHEME\n#[{\"student_id\": str,\n# \"Evaluation 1\":{\"value\": int,\n# \"%\": int,\n# \"Description\": str\n# }, \n# ...\n# }\n#]\n\n@app.get(\"/\")\ndef read_item(student_id: str = \"\"):\n '''\n http://clustercien.udea.edu.co:8000/?student_id=1113674432\n '''\n #Real time JSON file\n r=requests.get(file)\n db=r.json()\n #with open(file) as json_file:\n # db=json.load(json_file)\n\n if not student_id:\n \treturn db\n else:\n \treturn [ d for d in db if d.get('student_id')==student_id ]\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"637292071","text":"#条件判断 if else\n\n#举例 现在有一个商品要判断是否符合优惠条件,规则如下:\n# 1.商品价格大于100元,结算可减价50;\n\n# 判断语句if\n# 格式: if 判断条件 :\n# 注意不要手动去输入空格,tab\ngoodsPrice = 100\nif goodsPrice > 100 :\n price = 102 - 50\n print('结算价格为:%d'%price)\n#print(\"这句话在判断外边\")\n\n# 2.商品价格大于100元,结算可减价50,\n# 否则;加10元运费;\ngoodsPrice = 120\nif goodsPrice > 100:\n price = goodsPrice - 50\n print('结算价格为:%d' % price)\nelse:\n price = goodsPrice + 10\n print('你的结算价格为:%d' % price)\nprint(\"这句话在判断外边\")\n\n# 3.商品满200减50;\n# 满150减20\n# 满100减10\n\n# 比较奇葩的写法-\ngoodsPrice = 120\nif goodsPrice >= 200:\n print(\"1.结算价格=%d\"%(goodsPrice - 50))\nelse:\n if goodsPrice >= 150:\n print(\"2.结算价格=%d\" % (goodsPrice - 20))\n else:\n if goodsPrice >= 100:\n print(\"3.结算价格=%d\" % (goodsPrice - 10))\n else:\n print(\"4.给你减个锤子!\")\n\n# 推荐写法 if 条件判断语句 :代码块\n# elif(else if简写) 条件判断语句:代码块\n# else 代码块\n\ngoodsPrice = 280\nif goodsPrice >= 200:\n print(\"1.结算价格=%d\"%(goodsPrice - 50))\nelif goodsPrice >= 150:\n print(\"2.结算价格=%d\" % (goodsPrice - 20))\nelif goodsPrice >= 100:\n print(\"3.结算价格=%d\" % (goodsPrice - 10))\nelif goodsPrice >= 50:\n print(\"4.结算价格=%d\" % (goodsPrice - 5))\nelse:\n print(\"5.给你减个锤子!\")\n\n# 实际场景中条件判断的运用\ngoods = { 'img': 'https://ss0.bdstatic.com/70cFvHSh_Q1YnxGkpoWK1HF6hhy/it/u=1079555585,1801783759&fm=27&gp=0.jpg',\n 'name' : '急支糖浆', 'guiGe': '500g', 'price': 1801}\nmingZi = goods['name']\njiaGe = goods['price']\n\nif jiaGe/100 > 16:\n print(\"是指定优惠商品\")\nelse:\n print(\"不是优惠商品\")\n\n","sub_path":"com/zl/Day-5.py","file_name":"Day-5.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"305720176","text":"#!/usr/bin/env python3\n\n\nfrom lightsweeper.lsapi import *\n\nfrom collections import defaultdict\nimport random\nimport time\n\n\nfrom lightsweeper import lsdisplay\nfrom lightsweeper import lsanimate\n\n\n\ndef makeWaves(origin):\n# This is a generator that produces a list of cells in expanding \"waves\" from the origin\n irange = lambda low, high: range(low, high+1) # inclusive range function\n step = 1\n x = origin[0]\n y = origin[1]\n while True:\n thisWave = list()\n for r in irange(x-step, x+step):\n for c in irange(y-step, y+step):\n if r == x-step or r == x+step or c == y-step or c == y+step: # We only want the edges\n rowCol = (r, c)\n thisWave.append(rowCol)\n yield(thisWave)\n step += 1\n\ndef exploder():\n# This generator returns a sequence of masks that makes a tile look as though it is exploding\n explosionSequence =[(0,0,0),\n (Shapes.DASH, 0,0),\n (Shapes.H, Shapes.DASH,0),\n (Shapes.EIGHT, Shapes.H, Shapes.DASH),\n (Shapes.EIGHT, Shapes.EIGHT, Shapes.H),\n (Shapes.EIGHT, Shapes.EIGHT, Shapes.EIGHT),\n (Shapes.ZERO, Shapes.ZERO, Shapes.ZERO),\n (Shapes.OFF, Shapes.OFF, Shapes.OFF)\n ]\n for mask in explosionSequence:\n yield mask\n\ndef explodeThenThrob():\n# This generator returns a sequence of masks that makes a tile look as though it is exploding\n idx = 0\n while True:\n if idx < len(LSExplosion.explosion):\n mask = LSExplosion.explosion[idx]\n #print(\" explodeThenThrob yields \" + repr(mask))\n idx = idx + 1\n else:\n mask = LSExplosion.bombThrobs[LSExplosion.throbPhase]\n #print(\" explodeThenThrob throbs \" + repr(mask))\n yield mask\n\ndef animateWavefront(dist = 1, mine = (0,0)):\n# This generator returns a sequence of masks to animate a wavefront\n# strength of wavefront depends on dist\n#\n# This generator can make several \"random\" styles of wavefronts\n# that stay consistent for each mine, to represent different kinds of mines.\n# This is disabled because the variety makes the wavefronts too confusing.\n idx = 0\n strongWave = dist<=2\n style = 0 # 1\n #style = 1\n #style = (mine[0] + mine[1]) % 3 # comment out for single style wavefronts\n #print(\" animateWavefront making style \" + repr(style))\n while True:\n if idx < len(LSExplosion.waves):\n if strongWave:\n if style == 0:\n mask = LSExplosion.waves[idx]\n mask = LSExplosion.wiggleWaves[idx]\n else:\n mask = LSExplosion.waves2[idx]\n else:\n if style == 0:\n mask = LSExplosion.weakWaves[idx]\n else:\n mask = LSExplosion.weakWaves2[idx]\n\n #print(\" animateWavefront yields \" + repr(mask))\n idx = idx + 1\n else:\n mask = LSExplosion.blank\n #print(\" animateWavefront blanking now\")\n yield mask\n\n\n# TODO - decide if this should subclass LSFrameGen\nclass LSExplosion:\n#class OBS_LSFrameGen:\n\n blank = (0, 0, 0)\n #colormask = (Shapes.ZERO, 0,0)\n #diffmask = (Shapes.ONE, Shapes.TWO, Shapes.THREE)\n redZero = (Shapes.ZERO, Shapes.OFF, Shapes.OFF)\n yellowZero = (Shapes.ZERO, Shapes.ZERO, 0)\n violetZero = (Shapes.ZERO, 0, Shapes.ZERO)\n whiteZero = (Shapes.ZERO, Shapes.ZERO, Shapes.ZERO)\n greenZero = (0, Shapes.ZERO, 0)\n blueZero = (0, 0, 126)\n cyanZero = (0, Shapes.ZERO, Shapes.ZERO)\n cyanZero = (0, Shapes.ZERO, Shapes.ZERO)\n redH = (Shapes.H, Shapes.OFF, Shapes.OFF)\n redEight = (Shapes.EIGHT, Shapes.OFF, Shapes.OFF)\n whiteEight = (Shapes.EIGHT, Shapes.EIGHT, Shapes.EIGHT)\n yellowEight = (Shapes.EIGHT, Shapes.EIGHT, Shapes.OFF)\n redDash = (Shapes.DASH, Shapes.OFF, Shapes.OFF)\n violetDash = (Shapes.DASH, Shapes.OFF, Shapes.DASH)\n greenDash = (Shapes.OFF, Shapes.DASH, Shapes.OFF)\n yellowDash = (Shapes.DASH, Shapes.DASH, Shapes.OFF)\n blueDash = (Shapes.OFF, Shapes.OFF, Shapes.DASH)\n cyanDash = (Shapes.OFF, Shapes.DASH, Shapes.DASH)\n redX = (Shapes.H, 0, 0)\n yellowEightPlus = (Shapes.EIGHT, Shapes.EIGHT, Shapes.H)\n\n # more shapes\n pipes = Shapes.SEG_B + Shapes.SEG_C + Shapes.SEG_E + Shapes.SEG_F # pipes is ||\n dashX3 = Shapes.SEG_A + Shapes.SEG_D + Shapes.SEG_G # three horizontal dashes\n\n # weaker wavefront farther from explosion\n cyanPipes = (0, pipes, pipes)\n violetPipes = (pipes, 0, pipes)\n weakWaves = (cyanDash, cyanPipes, violetPipes, violetDash)\n\n # weaker wavefront farther from explosion\n zig = Shapes.SEG_B + Shapes.SEG_G + Shapes.SEG_E\n zag = Shapes.SEG_C + Shapes.SEG_G + Shapes.SEG_F\n cyanZag = (0, zag, zag)\n cyanZig = (0, zig, zig)\n violetZag = (zag, 0, zag)\n violetZig = (zig, 0, zig)\n weakWaves = (cyanDash, cyanZig, violetZag, violetDash) # pretty good\n yellowZig = (zig, zig, 0)\n yellowZag = (zag, zag, 0)\n weakWaves2 = (yellowDash, yellowZig, yellowZag, yellowDash)\n weakWaves2 = (yellowDash, yellowDash, yellowDash, yellowDash)\n\n # strong wavefronts\n waves = (whiteZero, redZero, yellowZero)\n waves = (greenDash, greenZero, blueZero, blueDash)\n waves = (cyanDash, cyanZero, violetZero, violetDash) # pretty good\n\n yellowDashX3 = (dashX3, dashX3, 0)\n waves2 = (yellowDash, yellowZero, yellowZero, yellowDash)\n waves2 = (yellowDash, yellowDashX3, yellowDashX3, yellowDash) # pretty good\n\n #wiggleWaves = (cyanZag, cyanZig, violetZag, violetZig) # looks much like weakWaves\n cyanFive = (0, Shapes.FIVE, Shapes.FIVE)\n cyanTwo = (0, Shapes.TWO, Shapes.TWO)\n violetFive = (Shapes.FIVE, 0, Shapes.FIVE)\n violetTwo = (Shapes.TWO, 0, Shapes.TWO)\n wiggleWaves = (cyanFive, cyanTwo, violetFive, violetTwo) # more flash than weakWaves\n\n #bomb0 = (Shapes.DASH, 0,0)\n #bomb1 = (Shapes.H, Shapes.DASH,0)\n #bomb2 = (Shapes.EIGHT, Shapes.H, Shapes.DASH)\n #bomb3 = (Shapes.EIGHT, Shapes.EIGHT, Shapes.H)\n #bomb4 = whiteEight # (Shapes.EIGHT, Shapes.EIGHT, Shapes.EIGHT)\n #bomb5 = redEight # (Shapes.EIGHT, Shapes.OFF, Shapes.OFF)\n #bomb6 = (Shapes.OFF, Shapes.OFF, Shapes.OFF)\n #bomb7 = (Shapes.ZERO, Shapes.ZERO, Shapes.ZERO)\n #bomb8 = (Shapes.OFF, Shapes.OFF, Shapes.OFF)\n #bombs = [bomb0, bomb0, bomb1, bomb2, bomb3, bomb4, bomb7, bomb6]\n #explosion = [bomb0, bomb1, bomb2, bomb3, bomb4, bomb5, bomb6, bomb7]\n explosion = [redDash, redZero, redEight, yellowEightPlus, whiteEight, redEight, yellowEight, whiteZero]\n\n bombThrob0 = (Shapes.EIGHT, Shapes.OFF, Shapes.OFF)\n bombThrob1 = (Shapes.H, Shapes.OFF, Shapes.OFF)\n bombThrob2 = (Shapes.DASH, Shapes.OFF, Shapes.OFF)\n bombThrobs = [bombThrob0, bombThrob1, bombThrob2]\n\n bombThrob0 = (Shapes.ZERO, Shapes.OFF, Shapes.OFF)\n bombThrob1 = (Shapes.SEG_A+Shapes.SEG_B+Shapes.SEG_C+Shapes.SEG_D, Shapes.OFF, Shapes.OFF)\n bombThrob2 = (Shapes.ZERO, Shapes.OFF, Shapes.OFF)\n bombThrob3 = (Shapes.SEG_D+Shapes.SEG_E+Shapes.SEG_F+Shapes.SEG_A, Shapes.OFF, Shapes.OFF)\n bombThrobs = [bombThrob0,bombThrob1, bombThrob2, bombThrob3]\n\n bombThrob0 = (Shapes.ZERO, Shapes.OFF, Shapes.OFF)\n bombThrob1 = (Shapes.SEG_A+Shapes.SEG_B+Shapes.SEG_C+Shapes.SEG_D, Shapes.OFF, Shapes.OFF)\n bombThrob2 = (Shapes.ZERO, Shapes.OFF, Shapes.OFF)\n bombThrob3 = (Shapes.SEG_C+Shapes.SEG_D+Shapes.SEG_E+Shapes.SEG_F, Shapes.OFF, Shapes.OFF)\n bombThrob4 = (Shapes.ZERO, Shapes.OFF, Shapes.OFF)\n bombThrob5 = (Shapes.SEG_E+Shapes.SEG_F+Shapes.SEG_A+Shapes.SEG_B, Shapes.OFF, Shapes.OFF)\n bombThrobs = [bombThrob0,bombThrob1,bombThrob2,bombThrob3,bombThrob4,bombThrob5]\n\n # spinning red circle segments\n throb0 = (Shapes.SEG_A+Shapes.SEG_B+Shapes.SEG_C+Shapes.SEG_D, Shapes.OFF, Shapes.OFF)\n throb1 = (Shapes.SEG_B+Shapes.SEG_C+Shapes.SEG_D+Shapes.SEG_E, Shapes.OFF, Shapes.OFF)\n throb2 = (Shapes.SEG_C+Shapes.SEG_D+Shapes.SEG_E+Shapes.SEG_F, Shapes.OFF, Shapes.OFF)\n throb3 = (Shapes.SEG_D+Shapes.SEG_E+Shapes.SEG_F+Shapes.SEG_A, Shapes.OFF, Shapes.OFF)\n throb4 = (Shapes.SEG_E+Shapes.SEG_F+Shapes.SEG_A+Shapes.SEG_B, Shapes.OFF, Shapes.OFF)\n throb5 = (Shapes.SEG_F+Shapes.SEG_A+Shapes.SEG_B+Shapes.SEG_C, Shapes.OFF, Shapes.OFF)\n bombThrobs = [throb0,throb1,throb2,throb3,throb4,throb5]\n\n # spinning red circle with one segment missing\n throb0 = (Shapes.SEG_A+Shapes.SEG_B+Shapes.SEG_C+Shapes.SEG_D+Shapes.SEG_E, Shapes.OFF, Shapes.OFF)\n throb1 = (Shapes.SEG_C+Shapes.SEG_D+Shapes.SEG_E+Shapes.SEG_F+Shapes.SEG_A, Shapes.OFF, Shapes.OFF)\n throb2 = (Shapes.SEG_E+Shapes.SEG_F+Shapes.SEG_A+Shapes.SEG_B+Shapes.SEG_C, Shapes.OFF, Shapes.OFF)\n bombThrobs = [throb0,throb1,throb2]\n\n # alternating red H and 0\n bombThrobs = [redZero, redH] # not bad, have been using a while\n bombThrobs = [redZero, redDash] # maybe better, more throbby\n\n throbPhase = 0 # mines throb together after exploding - class vble for generators to use\n\n version = 0;\n\n def __init__(self, rows, cols, mine, mines):\n self.rows = rows\n self.cols = cols\n self.frameNum = 0\n self.frame = defaultdict(lambda: defaultdict(int))\n self.firstMine = mine\n self.allMines = mines\n self.wavefront = makeWaves(mine)\n self.boom = exploder()\n self.stage = 0\n self.throbbing = False\n self.wi = 0 # A cyclic iterator, each wavefront stays active for 3 frames\n self.allCells = list()\n self.unblasted = list()\n for row in range(self.rows):\n for col in range(self.cols):\n rowCol = (row, col)\n self.allCells.append(rowCol)\n self.unblasted.append(rowCol)\n self.edit(row,col, self.blank) # TODO - init to last display\n #self.phase = 0 # phase of wavefronts\n self.explosionStarts = {} # track when each mine starts explosion\n self.explosionStarts[mine] = self.frameNum\n self.unblasted.remove(mine) # remove known mine from undisturbed list\n self.wavefrontPassed = set() # track tiles passed by wavefront\n\n LSExplosion.version = (LSExplosion.version + 1) % 3\n #LSExplosion.version = 1\n print(\"Animation version \" + repr(LSExplosion.version))\n\n\n # Allows you to edit an existing frame structure, if no colormask is set\n # then the tile will keep its current colormask\n def edit(self,row,col,colormask):\n if row > self.rows or col > self.cols:\n print(\"Edit error, index out of range\")\n raise Exception\n self.frame[row][col] = [colormask]\n\n def fill(self,colormask):\n for row in range(0, self.rows):\n for col in range(0, self.cols):\n self.frame[row][col] = [colormask]\n\n def print(self):\n for row in self.frame:\n print([self.frame[row][col] for col in self.frame[row]])\n\n def get(self):\n frameOut = list()\n frameOut.append(self.cols)\n for row in range(0, self.rows):\n for col in range(0, self.cols):\n try:\n cell = self.frame[row][col][0]\n frameOut.append(cell[0])\n frameOut.append(cell[1])\n frameOut.append(cell[2])\n except:\n print(\"Warning: ({:d},{:d}) has no update\".format(row,col))\n frameOut.append(128)\n frameOut.append(128)\n frameOut.append(128)\n return(frameOut) # frameOut is a list consisting of the number of columns in the frame\n # followed by a repeating pattern of 3 integers, each representing a\n # subsequent tile's red, green, and blue colormasks\n\n def flamefront(self):\n\n # wavefront triggers mine explosions\n if LSExplosion.version == 1:\n return self.newflamefront()\n\n # use generators for explosions and wavefronts\n elif LSExplosion.version == 2:\n return self.genflamefront()\n\n # original explosion animation\n if self.stage is 1:\n if self.wi == 0:\n self.thisWave = next(self.wavefront)\n self.wi += 1\n elif self.wi == 2:\n self.wi = 0\n else:\n self.wi += 1\n else:\n self.thisWave = list()\n if (True not in [ i in self.allCells for i in self.thisWave ]) and len(self.thisWave) > 0:\n # The wave has reached the edge of the board\n self.stage = 2 # Stop making waves\n self.boom = exploder() # Reset the explosion for the untriggered mines\n if self.stage is 2:\n try:\n self.boomMask = next(self.boom)\n except StopIteration:\n self.stage = 3\n self.throbbing = True\n\n for col in range(0,self.cols):\n for row in range(0,self.rows):\n tile = (row,col)\n # mine cells blow up then throb forever\n if tile == self.firstMine:\n mask = self.blank\n if self.stage is 0:\n try:\n mask = next(self.boom)\n except StopIteration:\n self.stage = 1\n elif self.stage is 2 and tile in self.allMines:\n mask = self.boomMask\n else:\n #mask = self.redZero\n mask = self.blank\n # TODO - would be better to not disturb until wavefront gets here\n if tile in self.thisWave:\n if self.wi == 1:\n mask = self.redZero\n elif self.wi == 2:\n mask = self.yellowZero\n else:\n mask = self.whiteZero\n if self.throbbing is True and tile in self.allMines:\n maskIdx = self.frameNum % len(self.bombThrobs)\n mask = self.bombThrobs[maskIdx]\n self.edit(row,col,mask)\n self.frameNum = self.frameNum + 1\n #print(\"Computed frame \" + repr(self.frameNum))\n\n def newflamefront(self):\n #print(\"Computing frame \" + repr(self.frameNum))\n self.phasePerWave = len(self.waves)\n wavePhase = self.frameNum % self.phasePerWave\n LSExplosion.throbPhase = self.frameNum % len(self.bombThrobs) # all throb together\n\n # run explosion animation\n for tile in self.explosionStarts.keys():\n animIdx = self.frameNum - self.explosionStarts[tile]\n # mine cells blow up\n if animIdx < len(self.explosion):\n mask = self.explosion[animIdx]\n #print(repr(tile) + \" is exploding\")\n # then throb forever\n else:\n mask = self.bombThrobs[LSExplosion.throbPhase]\n #print(repr(tile) + \" is throbbing\")\n self.edit(tile[0],tile[1],mask)\n\n # animation for wavefront passing tile\n for tile in self.wavefrontPassed:\n dist = self.inWavefront(tile)[0] # dist is first val in tuple\n\n # if wavefront has passed tile, it should be blank\n if dist == -1:\n mask = self.blank\n # strong wavefront\n elif dist <= 2: # 2 rings of strong wavefront\n mask = self.waves[wavePhase]\n #print(repr(tile) + \" is in strong wavefront\")\n # weaker wavefront farther away\n else:\n mask = self.weakWaves[wavePhase]\n #print(repr(tile) + \" is in weak wavefront\")\n self.edit(tile[0],tile[1],mask)\n\n # process cells that have not been blasted\n for tile in self.unblasted[:]: # use slice so remove during iterate works\n if wavePhase == 0: # wavefront moves into tile in first phase\n continue; # no need to check for changes\n # animation for wavefront passing tile\n dist = self.inWavefront(tile)[0] # dist is first val in tuple\n if dist > 0:\n self.unblasted.remove(tile) # remove blasted tile\n # mine explodes when wavefront reaches it\n if tile in self.allMines:\n self.explosionStarts[tile] = self.frameNum\n mask = self.explosion[0]\n #print(repr(tile) + \" just exploded!\")\n # strong wavefront\n elif dist <= 2: # 2 rings of strong wavefront\n mask = self.waves[wavePhase]\n self.wavefrontPassed.add(tile) # can always add to set\n #print(repr(tile) + \" is in strong wavefront\")\n # weaker wavefront farther away\n else:\n mask = self.weakWaves[wavePhase]\n self.wavefrontPassed.add(tile) # can always add to set\n #print(repr(tile) + \" is in weak wavefront\")\n self.edit(tile[0],tile[1],mask)\n\n self.frameNum = self.frameNum + 1\n\n def genflamefront(self):\n # use generators for explosions and wavefronts\n #print(\"Gen frame \" + repr(self.frameNum))\n if self.frameNum == 0:\n self.explosionGens = {} # store explosion generators\n self.explosionGens[self.firstMine] = explodeThenThrob()\n self.wavefrontGens = {} # store wavefront generators\n self.phasePerWave = len(self.waves)\n wavePhase = self.frameNum % self.phasePerWave\n LSExplosion.throbPhase = self.frameNum % len(self.bombThrobs) # all throb together\n\n # run explosion animation generator on exploded mines\n # exploded mines stay in explosionGens forever\n for tile in self.explosionGens.keys():\n mask = next(self.explosionGens[tile])\n self.edit(tile[0],tile[1],mask)\n\n # run wavefront passing tile animation generator\n # mines in wavefront stay in wavefrontGens forever\n for tile in self.wavefrontGens.keys():\n # wavefront moves into tile in first phase\n # no need to check for changes in other phases\n if wavePhase == 0:\n waveTuple = self.inWavefront(tile)\n dist = waveTuple[0] # dist is first val in tuple\n waveMine = waveTuple[1] # active mine is second val in tuple\n if dist > 0:\n #print(repr(tile) + \" is back in wavefront\")\n # mine explodes when wavefront reaches it\n # SHOULD NOT HAPPEN HERE - remove when satisfied\n if tile in self.allMines:\n self.explosionStarts[tile] = self.frameNum # used by inWavefront()\n self.explosionGens[tile] = explodeThenThrob()\n mask = next(self.explosionGens[tile]) # first explosion animation\n print(repr(tile) + \" exploded, but was already in wavefrontGens!\")\n else:\n # tile in wavefront again, set new generator\n self.wavefrontGens[tile] = animateWavefront(dist, waveMine)\n\n # generate wavefront animation each phase\n mask = next(self.wavefrontGens[tile])\n self.edit(tile[0],tile[1],mask)\n\n # test undisturbed tiles to see if in wavefront\n # wavefront moves into tile in first phase\n # no need to check for changes in other phases\n # tiles do not change until hit by a wavefront\n if wavePhase == 0:\n for tile in self.unblasted[:]: # use slice so remove during iterate works\n waveTuple = self.inWavefront(tile)\n dist = waveTuple[0] # dist is first val in tuple\n waveMine = waveTuple[1] # active mine is second val in tuple\n if dist > 0:\n self.unblasted.remove(tile) # remove disturbed tile\n # mine explodes when wavefront reaches it\n if tile in self.allMines:\n self.explosionStarts[tile] = self.frameNum # used by inWavefront()\n self.explosionGens[tile] = explodeThenThrob()\n mask = next(self.explosionGens[tile]) # first explosion animation\n #print(repr(tile) + \" just exploded!\")\n # tile is in wavefront\n else:\n self.wavefrontGens[tile] = animateWavefront(dist, waveMine)\n mask = next(self.wavefrontGens[tile]) # first wavefront animation\n #print(repr(tile) + \" is in wavefront\")\n self.edit(tile[0],tile[1],mask)\n\n # genflamefront is done with this frame\n self.frameNum = self.frameNum + 1\n\n # returns distance from mine if in wavefront or -1 if not\n # client indicates the minimum distance it cares about,\n # typically the max distance for closest-in wavefront\n # returns tuple (distance, active mine)\n def inWavefront(self, tile, distThresh=2):\n waves = 0\n closeDist = 999\n closeMine = None\n for mine in self.explosionStarts.keys():\n dist = self.distToMine(tile,mine)\n if dist == ((self.frameNum - self.explosionStarts[mine]) // self.phasePerWave):\n #print(repr(tile) + \" is in wavefront of \" + repr(mine))\n waves = waves + 1\n if closeDist > dist:\n closeDist = dist\n closeMine = mine\n # stop looking if explosion is as close as we care about\n if dist <= distThresh:\n break\n # return the closest wavefront we found\n if waves > 0:\n #if waves > 1: print(repr(tile) + \" is in \" + repr(waves) + \" wavefronts\")\n return (closeDist, closeMine)\n return (-1, closeMine)\n\n def distToMine(self, tile, mine):\n rowDist = abs(tile[0] - mine[0])\n colDist = abs(tile[1] - mine[1])\n # round off wavefront by noting the corners of the square are farther out\n if rowDist == colDist and rowDist >= 2:\n dist = rowDist + 1\n return dist\n dist = max(rowDist, colDist)\n #print(repr(tile) + \" to \" + repr(mine) + \" = \" + repr(dist))\n return dist\n\ndef test_explosion():\n print(\"TODO: testing lsexplosion\")\n\n useRealFloor = True\n try:\n realTiles = LSOpen()\n except Exception as e:\n useRealFloor = False\n\n d = lsdisplay.LSDisplay(realFloor = True, simulatedFloor = True, initScreen=False)\n rows = d.rows\n cols = d.cols\n\n ourAnimation = lsanimate.LSAnimation()\n\n #frame = lsanimate.LSFrameGen(rows,cols)\n mine = (2,3) # this mine is the first to blow\n #mines = [(0,1), (1,2), (2,4), (3,1), mine] # all the mines in the floor\n mines = [(0,1), (1,2), (3,0), mine] # all the mines in the floor\n # TODO - should initialize the frame from the existing floor\n # and not modify tiles until the wavefront reaches them\n print(mines)\n frame = LSExplosion(rows,cols, mine, mines)\n\n # HACK - pretend to initialize per existing display\n # TODO - add init function or arg to constructor\n #frame.edit(mine[0],mine[1]-1, LSExplosion.yellowEight)\n #frame.edit(mine[0],mine[1]-2, LSExplosion.yellowEight)\n #frame.edit(mine[0]-1,mine[1], LSExplosion.yellowEight)\n #frame.edit(mine[0]-2,mine[1], LSExplosion.yellowEight)\n for row in range(rows):\n for col in range(cols):\n frame.edit(row,col, LSExplosion.greenZero)\n\n starttime = time.time()\n\n #for frameNum in range(0,50):\n for frameNum in range(0,40):\n frame.flamefront()\n ourAnimation.addFrame(frame.get())\n\n endtime = time.time()\n gentime = endtime - starttime\n # newflamefront, with no printing, 6x8 38 frames <= .09 sec\n print(\"frame calcs took {:f} seconds\".format(gentime))\n\n #ourAnimation.deleteFrame(7) # Because I'm too lazy to do it right\n #ourAnimation.deleteFrame(7) # Yes, we need both of these\n\n ourAnimation.play(d)\n\nwait=time.sleep\n\nclass Cell(object):\n def __init__(self, is_mine, is_visible=False, is_flagged=False):\n self.is_mine = is_mine\n self.is_visible = is_visible\n self.is_flagged = is_flagged\n self.is_defused = False\n\n def show(self):\n self.is_visible = True\n\n def flag(self):\n self.is_flagged = not self.is_flagged\n\n def place_mine(self):\n self.is_mine = True\n\n def set_defused(self):\n if self.is_mine:\n self.is_defused = True\n\n\nclass Board():\n\n def __init__(self):\n self.is_playing = True\n\n def create_board(self, rows, cols, mines):\n print(\"creating board\")\n self.board = tuple([tuple([Cell(False) for col in range(cols)])\n for row in range(rows)])\n available_pos = list(range((rows) * (cols)))\n print(\"creating mines\")\n for i in range(mines):\n new_pos = random.choice(available_pos)\n available_pos.remove(new_pos)\n (row_id, col_id) = random.randint(0, rows-1), random.randint(0, cols-1) #(new_pos // (cols), new_pos % (rows))\n self.place_mine(row_id, col_id)\n self.is_playing = True\n return\n\n def getCellState(self,row_id, col_id):\n # print (\"min_repr for: \",row_id,col_id)\n cell = self.board[row_id][col_id]\n if cell.is_defused:\n return \"D\"\n elif cell.is_visible:\n if cell.is_mine:\n return \"M\"\n else:\n surr = self.count_surrounding(row_id, col_id)\n return str(surr) if surr else \" \"\n elif cell.is_flagged:\n return \"F\"\n else:\n return \".\" #u\"\\uff18\"\n\n def set_display(self, display):\n print(\"setting display\")\n self.display = display\n \n def show(self, row_id, col_id):\n self.showingMultiple = False\n #print(\"given:\", row_id, col_id, \"board:\", len(self.board), len(self.board[0]))\n cell = self.board[row_id][col_id]\n if not cell.is_visible:\n #print(\"board.show\", row_id, col_id)\n cell.show()\n # self.display.show(row_id, col_id)\n if (cell.is_mine and not cell.is_flagged):\n self.is_playing = False\n print(\" ( ( ( ( *** B00M! *** ) ) ) ) \")\n elif self.is_solved():\n self.is_playing = False\n elif self.count_surrounding(row_id, col_id) == 0:\n self.showingMultiple = True\n for (surr_row, surr_col) in self.get_neighbours(row_id, col_id):\n if self.is_in_range(surr_row, surr_col):\n self.show(surr_row, surr_col) \n\n def show_all(self):\n for row in self.board:\n for cell in row:\n cell.show() \n \n def flag(self, row_id, col_id):\n cell = self.board[row_id][col_id]\n if not cell.is_visible:\n cell.flag()\n else:\n print(\"Cannot add flag, cell already visible.\")\n\n def place_mine(self, row_id, col_id):\n self.board[row_id][col_id].place_mine()\n\n def count_surrounding(self, row_id, col_id):\n return sum(1 for (surr_row, surr_col) in self.get_neighbours(row_id, col_id)\n if (self.is_in_range(surr_row, surr_col) and\n self.board[surr_row][surr_col].is_mine))\n\n def get_neighbours(self, row_id, col_id):\n SURROUNDING = ((-1, -1), (-1, 0), (-1, 1),\n (0 , -1), (0 , 1),\n (1 , -1), (1 , 0), (1 , 1))\n return ((row_id + surr_row, col_id + surr_col) for (surr_row, surr_col) in SURROUNDING)\n\n def is_in_range(self, row_id, col_id):\n return 0 <= row_id < len(self.board) and 0 <= col_id < len(self.board[0])\n\n def remaining_mines(self):\n remaining = 0\n for row in self.board:\n for cell in row:\n if cell.is_mine and not cell.is_visible:\n remaining += 1\n if cell.is_flagged:\n remaining -= 1\n return remaining\n \n def remaining_hidden(self):\n remaining = 0\n for row in self.board:\n for cell in row:\n if not cell.is_visible:\n remaining += 1\n return remaining\n\n def set_all_defused(self):\n for row in self.board:\n for cell in row:\n cell.set_defused()\n self.is_playing = False\n\n def is_solved(self):\n #return all((cell.is_visible or cell.is_flagged) for row in self.board for cell in row)\n #print(\"Remaining Mines: \", self.remaining_mines(), \" Remaining Hidden: \", self.remaining_hidden())\n return self.remaining_mines() == self.remaining_hidden()\n\n def list_mines(self):\n r = 0\n c = 0\n out = []\n for row in self.board:\n for cell in row:\n if cell.is_mine is True:\n rowCol = (r, c)\n out.append(rowCol)\n c += 1\n r += 1\n c = 0\n return out\n \n\n\nclass Minesweeper(LSGame):\n\n staleDisplay = defaultdict(lambda: defaultdict(str))\n\n def init(game):\n game.LowTimeWins()\n board = Board()\n mines = random.randint(int(game.cols*game.rows*.1), int(game.cols*game.rows*.3))\n if mines is 0:\n mines = 1\n print(\"{:d} mines...\".format(mines))\n board.create_board(game.rows, game.cols, mines)\n game.board = board\n game.animatingEnd = False\n game.firstStep = True\n game.updateBoard(game.board)\n game.display.setAll(Shapes.ZERO, Colors.GREEN)\n game.startTime = time.time()\n game.isWon = False\n\n\n def stepOn(game, row, col):\n game.lastMove = (row, col)\n playSound = True\n if game.board.board[row][col].is_visible:\n playSound = False\n if game.firstStep:\n if game.board.board[row][col].is_mine:\n game.board.board[row][col].is_mine = False # TODO: Should replace the mine somewhere else\n print(\"Saved from the mine!\")\n game.firstStep = False\n game.board.show(row, col)\n if game.board.board[row][col].is_mine:\n game.display.set(row, col, Shapes.ZERO, Colors.RED)\n game.audio.playSound(\"Explosion.wav\")\n elif playSound:\n game.audio.playSound(\"Blop.wav\")\n cell = game.board.getCellState(row, col)\n if cell != \" \":\n game.display.set(row, col, Shapes.digitToHex(int(cell)), Colors.YELLOW)\n\n def heartbeat(game, sensorsChanged):\n if game.board.is_playing:\n game.updateBoard(game.board)\n if not game.board.is_playing and not game.animatingEnd:\n if game.board.is_solved():\n print(\"Well done! You solved the board!\")\n game.isWon = True\n game.endAnim = EndAnimation(True, game.display, game.lastMove, game.board.list_mines())\n game.animatingEnd = True\n game.audio.playSound(\"Success.wav\")\n else:\n #self.audio.playSound(\"Explosion.wav\")\n game.board.show_all()\n game.endAnim = EndAnimation(False, game.display, game.lastMove, game.board.list_mines())\n game.animatingEnd = True\n elif game.animatingEnd:\n frame = game.endAnim.getFrame()\n if frame:\n #update display of each tile\n game.display.setFrame(frame)\n if game.endAnim.ended:\n game.endAnim.animation.play(game.display, frameRate=10)\n if game.isWon:\n game.over(int(time.time() - game.startTime))\n else:\n game.over()\n\n # currently this is just iterating across all the cells in the internal game state and pushing\n # the corresponding shape/color to the display for the given tile's position. a slightly better design would\n # be to only need to push info for the tiles that have actually changed\n def updateBoard(game, board):\n for row in range(game.rows):\n for col in range(game.cols):\n if board != None:\n cell = board.getCellState(row, col)\n staleCell = game.staleDisplay[row][col]\n if cell == \"D\":\n if staleCell != \"D\":\n game.display.set(row, col, Shapes.DASH, Colors.MAGENTA)\n elif cell == '.':\n if staleCell != \".\":\n game.display.set(row, col, Shapes.ZERO, Colors.GREEN)\n elif cell == ' ' or cell == '':\n if staleCell != \" \" and staleCell != \"\":\n game.display.set(row, col, Shapes.DASH, Colors.BLACK)\n elif cell == 'M':\n if staleCell != \"M\":\n game.display.set(row, col, Shapes.ZERO, Colors.RED)\n elif cell == 'F':\n print(\"A flag?!\")\n break\n else:\n cell = int(cell)\n if cell is 1:\n color = Colors.YELLOW\n elif cell is 2:\n color = Colors.WHITE\n elif cell is 3:\n color = Colors.CYAN\n elif cell is 4:\n color = Colors.BLUE\n else:\n color = Colors.MAGENTA\n game.display.set(row, col, Shapes.digitToHex(cell), color) # Should use setDigit?\n game.staleDisplay[row][col] = cell\n return\n\n\nclass EndAnimation:\n def __init__(self, win, display, lastMove, mines):\n self.rows = display.rows\n self.cols = display.cols\n self.ended = False\n self.currentFrame = None\n self.frames = []\n frame = display\n if win:\n redDash = (1, 0, 0)\n greenDash = (0, 1, 0)\n blueDash = (0, 0, 1)\n dashes = [redDash, greenDash, blueDash]\n redMine = (Shapes.H, 0, 0)\n greenMine = (0, Shapes.H, 0)\n blueMine = (0, 0, Shapes.H)\n\n winningAnimation = lsanimate.LSAnimation()\n\n frame = lsanimate.LSFrameGen(self.rows,self.cols)\n \n for _ in range(0,15):\n for i in range(0,self.cols):\n #frame.edit(0,i,redDash)\n #frame.edit(1,i,greenDash)\n #frame.edit(2,i,blueDash)\n for row in range(0,self.rows):\n idx = (0+row) % 3\n frame.edit(row,i,dashes[idx])\n for mine in mines:\n frame.edit(mine[0],mine[1],redMine)\n winningAnimation.addFrame(frame.get())\n\n for i in range(0,self.cols):\n #frame.edit(1,i,redDash)\n #frame.edit(2,i,greenDash)\n #frame.edit(0,i,blueDash)\n for row in range(0,self.rows):\n idx = (1+row) % 3\n frame.edit(row,i,dashes[idx])\n for mine in mines:\n frame.edit(mine[0],mine[1],greenMine)\n winningAnimation.addFrame(frame.get())\n\n for i in range(0,self.cols):\n #frame.edit(2,i,redDash)\n #frame.edit(1,i,greenDash)\n #frame.edit(0,i,blueDash)\n for row in range(0,self.rows):\n idx = (2+row) % 3\n frame.edit(row,i,dashes[idx])\n for mine in mines:\n frame.edit(mine[0],mine[1],blueMine)\n winningAnimation.addFrame(frame.get())\n\n self.animation = winningAnimation\n\n else:\n\n losingAnimation = lsanimate.LSAnimation()\n\n frame = LSExplosion(self.rows, self.cols, lastMove, mines)\n \n for frameNum in range(0,50):\n frame.flamefront()\n losingAnimation.addFrame(frame.get())\n\n losingAnimation.deleteFrame(7) # Because I'm too lazy to do it right\n losingAnimation.deleteFrame(7) # Yes, we need both of these\n\n self.animation = losingAnimation\n\n def getFrame(self):\n if len(self.frames) is 0:\n self.ended = True\n return None\n if self.currentFrame and self.currentFrame.heartbeats > 0:\n self.currentFrame.heartbeats -= 1\n else:\n self.currentFrame = self.frames.pop()\n return self.currentFrame\n\ndef main():\n gameEngine = LSGameEngine(Minesweeper)\n gameEngine.beginLoop()\n\nif __name__ == '__main__':\n main()\n","sub_path":"Minesweeper.py","file_name":"Minesweeper.py","file_ext":"py","file_size_in_byte":37863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"611600182","text":"import collections\nimport matplotlib.pyplot as plt\nimport functools\nfrom sklearn import metrics\nfrom sklearn.utils.multiclass import unique_labels\n\nfrom keras.models import Sequential, load_model\n# from keras.metrics import\nfrom keras.layers import Dense, Conv2D, Flatten, MaxPool2D, Dropout\nfrom sklearn.preprocessing import MinMaxScaler\n\n# create model\n\nimport cv2\nimport os\nimport numpy as np\nfrom os import listdir\nfrom os.path import isfile, join\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\n# train_files = [f for f in listdir('data_train_preprocessed_reshape') if isfile(join('data_train', f))]\nfrom nn_final_image.ETools import EImage\n\n\n# def specificity(y_true, y_pred):\n# \"\"\"\n# param:\n# y_pred - Predicted labels\n# y_true - True labels\n# Returns:\n# Specificity score\n# \"\"\"\n# neg_y_true = 1 - y_true\n# neg_y_pred = 1 - y_pred\n# fp = K.sum(neg_y_true * y_pred)\n# tn = K.sum(neg_y_true * neg_y_pred)\n# specificity = tn / (tn + fp + K.epsilon())\n# return specificity\n\ndef plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = metrics.confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n # classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax\n\n\ndef is_eq(list1, list2):\n if len(list1) != len(list2):\n return False\n else:\n for index_row in range(len(list1)):\n if list1[index_row] != list2[index_row]:\n print('{} and {} are NOT equal'.format(list1[index_row], list2[index_row]))\n return False\n\n return True\n\n\n\n\n# train_files = [f for f in listdir('train_2') if isfile(join('train_2', f))]\ntest_files = [f for f in listdir('final_data/pre_preprocessed_train') if isfile(join('final_data/pre_preprocessed_train', f))]\n\nX_TEST_DATA = []\nY_TEST_label = []\n\nmodel_id = 'model_deep_nn_6_pre.h5'\n# model_id = '3'\n\nfor reshape_file in test_files:\n if '.jpg' not in reshape_file:\n continue\n # x_image = cv2.imread('preprocessed_train/' + reshape_file)\n x_image = EImage.read_image('final_data/pre_preprocessed_train/' + reshape_file, if_read_as_grayscale=True)\n # print(x_image.shape)\n y_label = int(reshape_file.split('_')[0])\n X_TEST_DATA.append(x_image)\n y_data = None\n # 5, 10, 25, 50 ,100\n if y_label == 0:\n y_data = 0\n else:\n y_data = 1\n # else:\n # print('ERROR-Y_TRAIN_label -> unvalid classlabel ', reshape_file)\n # print('Y_TRAIN_label: ', y_label)\n # print('Y_TRAIN_label.type: ', type(y_label))\n # break\n\n Y_TEST_label.append(y_data)\n\n\n\nX_TEST_DATA = np.array(X_TEST_DATA)\n# Y_TEST_label = np.array(Y_TEST_label)\n\n\nX_TEST_DATA_SCALED = X_TEST_DATA / 255.\n# X_train, X_test, y_train, y_test = train_test_split(X_TRAIN_DATA_SCALED, Y_TEST_label, test_size=0.33)\n\nmodel = load_model(model_id)\n\nscores = model.evaluate(X_TEST_DATA_SCALED, Y_TEST_label)\n\n# scores = model.evaluate(X_TEST_DATA_SCALED, Y_TEST_label, verbose=0)\nprint('score: {}'.format(scores))\nY_PREDICT = model.predict(X_TEST_DATA_SCALED)\nfor index_x in range(Y_PREDICT.shape[0]):\n for index_y in range(Y_PREDICT.shape[1]):\n Y_PREDICT[index_x,index_y] = 1 if Y_PREDICT[index_x,index_y]>=0.5 else 0\nprint(Y_PREDICT.shape)\nprint(Y_PREDICT.tolist())\nprint(Y_TEST_label)\n\n# true_counts=0\n# LIST_Y_PREDICT = []\n# for index_row in range(len(Y_PREDICT)):\n# # if collections.Counter(Y_PREDICT[index_row].tolist()) == collections.Counter([1,0,0]):\n# list_index = Y_PREDICT[index_row]\n# LIST_Y_PREDICT.insert(index_row, list_index.tolist().index(max(list_index.tolist())))\n# # if is_eq(list_index, [1,0,0]):\n# # if Y_PREDICT[index_row] == [1,0,0]:\n# # LIST_Y_PREDICT.insert(index_row, 1)\n# # elif is_eq(list_index, [0,1,0]):\n# # LIST_Y_PREDICT.insert(index_row, 2)\n# # elif is_eq(list_index, [0,0,1]):\n# # LIST_Y_PREDICT.insert(index_row, 3)\n# # else:\n# # print('ERROR {}'.format(list_index))\n#\n# LIST_Y_TRUE = []\n# for index_row in range(len(Y_TEST_label)):\n# list_index = Y_TEST_label[index_row]\n# LIST_Y_TRUE.insert(index_row, list_index.tolist().index(max(list_index.tolist())))\n# # if is_eq(list_index, [1,0,0]):\n# # if Y_PREDICT[index_row] == [1,0,0]:\n# # LIST_Y_TRUE.insert(index_row, 1)\n# # elif is_eq(list_index, [0,1,0]):\n# # LIST_Y_TRUE.insert(index_row, 2)\n# # elif is_eq(list_index, [0,0,1]):\n# # LIST_Y_TRUE.insert(index_row, 3)\n# # else:\n# # print('ERROR {}'.format(list_index))\n#\n# print('SIZE ARE EQUAL? {}'.format(len(LIST_Y_PREDICT)==len(LIST_Y_TRUE)))\n# for index_row in range(len(LIST_Y_PREDICT)):\n# if LIST_Y_PREDICT[index_row]==LIST_Y_TRUE[index_row]:\n# true_counts+=1\n#\n#\n# # print(LIST_Y_TRUE)\n# # print(LIST_Y_P)\n# print('score: {}'.format(true_counts*100/len(Y_PREDICT)))\n#\n# # print(\"score: \", scores)\n# # print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1] * 100))\n#\nrep_classification_report = metrics.classification_report(Y_TEST_label,\n Y_PREDICT,\n # target_names=[0,1,2]\n )\n\nrep_cm = metrics.confusion_matrix(Y_TEST_label,Y_PREDICT)\ntn, fp, fn, tp = metrics.confusion_matrix(Y_TEST_label,Y_PREDICT).ravel()\n# rep_sc = metrics.SCORERS\nsensitivity = tp/(tp+fn)\nspecificity = tn/(tn+fp)\nprint(rep_classification_report)\nprint(rep_cm)\n# print(rep_sc)\nprint(tn, fp, fn, tp)\nprint('sensitivity: {}'.format(sensitivity))\nprint('specificity: {}'.format(specificity))\n#\n# ax = plot_confusion_matrix(Y_TEST_label, Y_PREDICT,classes=[0,1], title='Confusion Matrix')\n# cm = metrics.confusion_matrix(Y_TEST_label, Y_PREDICT)\n# ax.imshow('cm_{}.jpg'.format(model_id))\n# print(cm)\n#\n# model.save('model_nn3.h5')\n\n","sub_path":"main_nn_6_pre_train_evaluate.py","file_name":"main_nn_6_pre_train_evaluate.py","file_ext":"py","file_size_in_byte":7517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"651722199","text":"from django.views.generic import ListView, DetailView\n\nfrom issuetracking.models import Project, Issue\n\n\nclass ProjectMixin(object):\n model = Project\n\n\nclass IssueMixin(object):\n model = Issue\n\n\nclass ProjectListView(ProjectMixin, ListView):\n context_object_name = \"project_list\"\n\n\nclass ProjectDetailView(ProjectMixin, DetailView):\n context_object_name = \"project\"\n\n\nclass IssueDetailView(IssueMixin, DetailView):\n context_object_name = \"issue\"\n","sub_path":"issuetracking/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"79028543","text":"from random import Random, uniform\n\ndef main():\n f = open('update_properties_villas.sql', 'w')\n for i in range(5):\n f.write('UPDATE \"property_properties\"\\n' \\\n + \"SET price = \" \\\n + str(round((uniform(1, 5)), 2) * 1000000) + '\\n' \\\n + \"WHERE description = 'villa' AND \"+'\"propertyId\" = ' + str(i+6) + \";\\n\\n\")\n f.close()\n\nmain()","sub_path":"populate_scripts/update_properties_villas_size.py","file_name":"update_properties_villas_size.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"630170002","text":"# 1. 섬을 어떻게 구분할 것인지 => bfs 각 섬의 숫자를 다르게 \n# 2. 섬과 섬 사이 최단 거리를 어떻게 찾을 것인지 => 각 섬을 돌며 bfs를 이용, 최단거리 구하기\n\nimport sys\nfrom collections import deque\n\ninput = sys.stdin.readline\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\n\n# 섬을 구분해주는 bfs\ndef bfs1(i, j):\n global count\n q = deque()\n q.append([i, j])\n vis[i][j] = True\n arr[i][j] = count\n\n while q:\n x, y = q.popleft()\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if 0 <= nx < n and 0 <= ny < n and arr[nx][ny] == 1 and not vis[nx][ny]:\n vis[nx][ny] = True\n arr[nx][ny] = count\n q.append([nx, ny])\n\n\n# 바다를 건너며 가장 짧은 거리를 구한다.\ndef bfs2(z):\n global answer\n dist = [[-1] * n for _ in range(n)] # 거리가 저장될 배열\n q = deque()\n\n for i in range(n):\n for j in range(n):\n if arr[i][j] == z:\n q.append([i, j])\n dist[i][j] = 0\n\n while q:\n x, y = q.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n # 갈 수 없는 곳이면 continue\n if nx < 0 or nx >= n or ny < 0 or ny >= n:\n continue\n # 다른 땅을 만나면 기존 답과 비교하여 짧은 거리 선택\n if arr[nx][ny] > 0 and arr[nx][ny] != z:\n answer = min(answer, dist[x][y])\n return\n # 바다를 만나면 dist를 1씩 늘린다.\n if arr[nx][ny] == 0 and dist[nx][ny] == -1:\n dist[nx][ny] = dist[x][y] + 1\n q.append([nx, ny])\n\n\nn = int(input())\n\narr = [list(map(int, input().split())) for _ in range(n)]\nvis = [[False] * n for _ in range(n)]\ncount = 1\nanswer = sys.maxsize\n\nfor i in range(n):\n for j in range(n):\n if not vis[i][j] and arr[i][j] == 1:\n bfs1(i, j)\n count += 1\n\n# print(arr)\n\nfor i in range(1, count):\n bfs2(i)\n\nprint(answer)\n","sub_path":"210702/bj2146_3.py","file_name":"bj2146_3.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"99938281","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mutual_info_score\nimport operator\n\nfrom Multi_THIP import Multi_TensorHIP\n\n\ndef compute_error(cost_vector):\n return np.sqrt(cost_vector.sum() / len(cost_vector))\n\ndef group_tags_by_date(df, hashtag, all_dates, print_plot=False):\n df_tags = df[df.hashtag == hashtag]\n tags_grouped = df_tags.groupby([df_tags['create_time'].dt.date]).size()\n tags_grouped = tags_grouped.reset_index()\n tags_grouped.columns = ['Time', 'Frequency']\n vals = all_dates.set_index('Time').join(tags_grouped.set_index('Time')).fillna(0).astype('int').values.flatten()\n\n if print_plot == True:\n plt.plot(vals)\n plt.title(\"#\" + hashtag + \" Occurances by Day\")\n plt.show()\n\n return vals\n\ndef plot_predictions(truth_value, adam_pred, adagrad_pred, hip_pred, num_train, num_test, title=\"\"):\n last_index = num_train + num_test\n plt.axvline(num_train, color='k')\n\n plt.plot(np.arange(last_index), truth_value[:last_index], 'k--', label='observed #views')\n\n if len(adam_pred) > 0:\n plt.plot(np.arange(last_index), adam_pred[:last_index], 'b-', label='ADAM fit')\n if len(adagrad_pred) > 0:\n plt.plot(np.arange(last_index), adagrad_pred[:last_index], 'y-', label='ADAGRAD fit')\n if len(hip_pred) > 0:\n plt.plot(np.arange(last_index), hip_pred[:last_index], 'g-', label='HIP fit')\n\n plt.legend([plt.Line2D((0, 1), (0, 0), color='k', linestyle='--'),\n plt.Line2D((0, 1), (0, 0), color='g'),\n plt.Line2D((0, 1), (0, 0), color='b'),\n plt.Line2D((0, 1), (0, 0), color='y')],\n ['Observed view', 'Original HIP Fit', 'TF ADAM', 'TF ADAGRAD'],\n frameon=False, loc='upper center', bbox_to_anchor=(0.5, -0.125),\n fancybox=True, shadow=True, ncol=4)\n\n plt.xlabel('Day')\n\n plt.title(title)\n\n plt.show()\n\ndef plot_improved_predictions(truth_value, single_pred, multi_pred, hip_pred, num_train, num_test, title=\"\"):\n last_index = num_train + num_test\n plt.axvline(num_train, color='k')\n\n plt.plot(np.arange(last_index), truth_value[:last_index], 'k--', label='observed #views')\n\n if len(single_pred) > 0:\n plt.plot(np.arange(last_index), single_pred[:last_index], 'b-', label='Single TensorHIP fit')\n if len(multi_pred) > 0:\n plt.plot(np.arange(last_index), multi_pred[:last_index], 'y-', label='Multi TensorHIP fit')\n if len(hip_pred) > 0:\n plt.plot(np.arange(last_index), hip_pred[:last_index], 'g-', label='HIP fit')\n\n plt.legend([plt.Line2D((0, 1), (0, 0), color='k', linestyle='--'),\n plt.Line2D((0, 1), (0, 0), color='g'),\n plt.Line2D((0, 1), (0, 0), color='b'),\n plt.Line2D((0, 1), (0, 0), color='y')],\n ['Observed view', 'Original HIP Fit', 'Single TensorHIP fit', 'Multi TensorHIP fit'],\n frameon=False, loc='upper center', bbox_to_anchor=(0.5, -0.125),\n fancybox=True, shadow=True, ncol=4)\n\n plt.xlabel('Day')\n\n plt.title(title)\n\n plt.show() \n \ndef plot_multistream_predictions(truth_value, top_weighted_pred, top_mi_pred, num_train, num_test, title=\"\"):\n last_index = num_train + num_test\n plt.axvline(num_train, color='k')\n\n plt.plot(np.arange(last_index), truth_value[:last_index], 'k--', label='observed #views')\n \n plt.plot(np.arange(last_index), top_weighted_pred[:last_index], 'g-', label='TopK Weighted Predictors')\n plt.plot(np.arange(last_index), top_mi_pred[:last_index], 'b-', label='Topk Mutual Information Scores Predictors')\n \n plt.legend([plt.Line2D((0, 1), (0, 0), color='k', linestyle='--'),\n plt.Line2D((0, 1), (0, 0), color='g'),\n plt.Line2D((0, 1), (0, 0), color='b')],\n ['Observed view', 'TopK Weights', 'TopK MI'],\n frameon=False, loc='upper center', bbox_to_anchor=(0.5, -0.125),\n fancybox=True, shadow=True, ncol=4)\n\n plt.xlabel('Day')\n\n plt.title(title)\n\n plt.show()\n\ndef get_hashtag_predictors(freq_dict, target):\n predictors = dict()\n\n for key, value in freq_dict.items():\n if key not in [target]:\n predictors[key] = value\n\n return list(predictors.keys()), list(predictors.values())\n\ndef get_mutual_info_scores(freq_dict, hashtag, sort=True):\n mi_scores = dict()\n\n for key, value in freq_dict.items():\n if key not in [hashtag]:\n mi_scores[key] = mutual_info_score(freq_dict[hashtag], value) \n\n if sort == True:\n mi_scores = sorted(mi_scores.items(), key=operator.itemgetter(1), reverse=True)\n \n return mi_scores\n\ndef get_topk_mi_scores(freq_dict, hashtag, k=20):\n sorted_scores = get_mutual_info_scores(freq_dict, hashtag)\n keys = [score[0] for score in sorted_scores[:k]]\n values = [freq_dict[key] for key in keys]\n\n return keys, values\n\n\ndef run_experiment(freq_dict, hashtag, num_train, num_test, k=20, num_initializations=5, print_plot=True, verbose=True):\n topk_mi_keys, topk_mi_vals = get_topk_mi_scores(freq_dict, hashtag, k)\n topk_mi_timeseries = [freq_dict[key] for key in topk_mi_keys]\n\n predictor_keys, predictor_values = get_hashtag_predictors(freq_dict, hashtag)\n\n if verbose == True:\n print(\"** Optimizing model with all hashtags\")\n # get topk weighted in optimized model\n hip = Multi_TensorHIP(predictor_values, freq_dict[hashtag], num_train, num_test)\n hip.fit(num_initializations, op='adagrad', verbose=verbose)\n\n top_weighted = list(np.array(predictor_keys)[np.absolute(np.array(hip.get_mu()[0])).argsort()[-k:][::-1]])\n top_weighted_timeseries = [freq_dict[key] for key in top_weighted]\n \n if verbose == True: \n print(\"** Optimizing model with topk MI hashtags\")\n mi_hip = Multi_TensorHIP(topk_mi_timeseries, freq_dict[hashtag], num_train, num_test)\n mi_preds, mi_losses = mi_hip.fit(num_initializations, op='adagrad', verbose=verbose)\n\n if verbose == True:\n print(\"** Optimizing model with topk weighted hashtags\")\n weight_hip = Multi_TensorHIP(top_weighted_timeseries, freq_dict[hashtag], num_train, num_test)\n weight_preds, weight_losses = weight_hip.fit(num_initializations, op='adagrad', verbose=verbose)\n\n mi_error = compute_error(mi_losses)\n weight_error = compute_error(weight_losses)\n \n if verbose == True: \n print(\"MI Error = \" + str(mi_error))\n print(\"Top Weighted Error = \" + str(weight_error))\n\n if print_plot == True:\n plot_multistream_predictions(freq_dict[hashtag], weight_preds, mi_preds, num_train, num_test, \\\n title=\"Models for #\" + hashtag) \n\n return topk_mi_keys, top_weighted, mi_error, weight_error\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"472675302","text":"import random\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef get_next_point(p_list):\n ver = random.randint(1, 100)\n r_ver = 0\n for point in p_list:\n if point == 0:\n continue\n l_ver = r_ver\n r_ver += point * 100\n if l_ver < ver <= r_ver:\n return p_list.index(point)\n raise ValueError\n\n\ndef main():\n # f_count = 10000\n mpv_size = 5\n mpv = [0.21, 0.17, 0.02, 0.28, 0.32,\n 0, 0.46, 0, 0.54, 0,\n 0.42, 0.3, 0.03, 0.16, 0.09,\n 0, 0.39, 0, 0.33, 0.28,\n 0.36, 0, 0, 0.18, 0.46]\n\n p_list = [0] * mpv_size\n\n print(\"Enter experiments count:\")\n f_count = int(input())\n\n print(\"Enter step count: \")\n m_step_count = int(input())\n\n print(\"First point: \")\n m_point = int(input()) - 1\n\n for _ in [x for x in range(f_count)]:\n point = m_point\n step_count = m_step_count\n print_str = \"start(p{}) -> \".format(point + 1)\n while step_count > 0:\n mpv_current_point_index = mpv_size * point\n point = get_next_point(mpv[mpv_current_point_index: mpv_current_point_index + mpv_size])\n p_list[point] += 1\n print_str += \"p{} -> \".format(point + 1)\n step_count -= 1\n\n print(\"{}end\".format(print_str))\n\n\n\n print(p_list)\n p_list = list(map(lambda p: p / (f_count * m_step_count), p_list))\n print(p_list)\n plt.bar([x for x in range(5)], p_list, width=0.5)\n\n\n mpv = [[0.21, 0.17, 0.02, 0.28, 0.32],\n [0, 0.46, 0, 0.54, 0],\n [0.42, 0.3, 0.03, 0.16, 0.09],\n [0, 0.39, 0, 0.33, 0.28],\n [0.36, 0, 0, 0.18, 0.46]]\n mpv_t = np.array(mpv)\n\n p2 = np.array([0, 0, 1, 0, 0])\n p3 = np.linalg.matrix_power(mpv, m_step_count)\n p4 = np.matmul(p2, p3)\n print(p4)\n\n\n plt.bar([x for x in range(5)], p4, width=0.5, align='edge')\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"lab01/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"157121392","text":"#!/usr/bin/python\n\nfrom pylatex import Document, Section, Subsection, Table\n\ndoc = Document(\"multirow\")\nsection = Section('Multirow Test')\n\ntest1 = Subsection('Multicol')\ntest2 = Subsection('Multirow')\n\ntable1 = Table('|c|c|')\ntable1.add_hline()\ntable1.add_multicolumn(2, '|c|', 'Multicol')\ntable1.add_hline()\ntable1.add_row((1, 2))\ntable1.add_hline()\ntable1.add_row((3, 4))\ntable1.add_hline()\n\ntable2 = Table('|c|c|c|')\ntable2.add_hline()\ntable2.add_multirow(3, '*', 'Multirow', cells=((1, 2), (3, 4), (5, 6)))\ntable2.add_hline()\ntable2.add_multirow(3, '*', 'Multirow2')\ntable2.add_hline()\n\ntest1.append(table1)\ntest2.append(table2)\n\nsection.append(test1)\nsection.append(test2)\n\ndoc.append(section)\ndoc.generate_pdf()\n","sub_path":"tests/multirow_test.py","file_name":"multirow_test.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"545314147","text":"import discord\nimport safygiphy\nimport requests\nimport io\nimport json\nfrom discord.ext import commands\n\nglobal msg\nwith open(\"language/i18n.json\") as f:\n\tmsg = json.load(f)\n\nglobal prefixes\nwith open(\"data/prefixes.json\", 'r') as f:\n\tprefixes = json.load(f)\n\nclass Fun:\n\t\"\"\"\n\tComandi dedicati allo svago\n\t\"\"\"\n\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\n\t@commands.command()\n\tasync def gif(self, ctx, tag=\"cat\"):\n\t\t\"\"\"\n\t\tInvia una GIF\n\t\t\"\"\"\n\t\tawait ctx.trigger_typing()\n\t\tg = safygiphy.Giphy()\n\n\t\trgif = g.random(tag=str(tag))\n\t\tresponse = requests.get(str(rgif.get(\"data\", {}).get('image_original_url')), stream=True)\n\t\tawait ctx.send(msg.get('fun-gif-title', 'Gif by `{0}`').format(tag.title()), file=discord.File(io.BytesIO(response.raw.read()), 'funny.gif'))\n\n\t@commands.command()\n\tasync def ping(ctx):\n\t\t\"\"\"\n\t\tPong!\n\t\t\"\"\"\n\t\tawait ctx.send(':ping_pong: Pong!')\n\ndef setup(bot):\n\tbot.add_cog(Fun(bot))","sub_path":"athomos/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"272994610","text":"# coding=utf-8\n\"\"\"\nThis script applies any changes made to the game files to matching resources\nin any packages.\n\"\"\"\nimport os\nimport shutil\nimport sys\n\n# Location of Portal 2\nGAME_FOLDER = r'F:\\SteamLibrary\\SteamApps\\common\\Portal 2'\nEXTRA_FILE_LOC = 'extra_files'\n\n# Skip these files, if they exist in the source folders.\n# Users won't need them.\nSKIPPED_EXTENSIONS = ('vmx', 'log', 'bsp', 'prt', 'lin')\n\nresource_paths = set()\n\ndef check_file(pack_path, rel_path):\n if rel_path.startswith('instances'):\n game_path = os.path.join(\n GAME_FOLDER,\n 'sdk_content',\n 'maps',\n 'instances',\n 'bee2',\n rel_path[10:],\n )\n else:\n game_path = os.path.join(GAME_FOLDER, 'bee2_dev', rel_path)\n if os.path.isfile(game_path):\n print('Applying changes to \"{}\"'.format(rel_path))\n shutil.copyfile(game_path, pack_path)\n else:\n print('Removing \"{}\"'.format(rel_path), file=sys.stderr)\n os.remove(pack_path)\n\ndef do_folder(path):\n \"\"\"Check a folder to see if it's a package.\n\n If it is, check any resources.\n \"\"\"\n for package in os.listdir(path):\n package_path = os.path.join(path, package)\n if os.path.isdir(package_path):\n if os.path.isfile(os.path.join(package_path,'info.txt')):\n res_folder = os.path.join(package_path, 'resources')\n if not os.path.isdir(res_folder):\n print('Package has no resources!')\n for base, dirs, files in os.walk(res_folder):\n if base == res_folder:\n # For the root, stop us from looking in the BEE2 folder\n for ind, folder in enumerate(dirs):\n if folder.casefold() == 'bee2':\n del dirs[ind]\n break\n continue\n for file in files:\n full_path = os.path.normpath(os.path.join(base, file))\n rel_path = os.path.relpath(full_path, res_folder)\n resource_paths.add(rel_path.casefold())\n check_file(\n full_path,\n rel_path,\n )\n else:\n do_folder(package_path)\n\ndef check_extra(game_subfolder, set_prefix):\n full_folder = os.path.join(GAME_FOLDER, game_subfolder)\n for base, dirs, files in os.walk(full_folder):\n for file in files:\n if file[-3:] in SKIPPED_EXTENSIONS:\n continue\n full_path = os.path.normpath(os.path.join(base, file))\n rel_path = os.path.relpath(full_path, full_folder)\n if os.path.join(set_prefix, rel_path).casefold() not in resource_paths:\n print('Extra file: \"{}\"'.format(\n os.path.join(set_prefix, rel_path)\n ))\n dest = os.path.join(EXTRA_FILE_LOC, set_prefix, rel_path)\n os.makedirs(os.path.dirname(dest), exist_ok=True)\n shutil.copy(full_path, dest)\n\nif __name__ == '__main__':\n do_folder(os.path.join(os.getcwd(), 'packages'))\n print('Cleaning extra_files\\\\!')\n shutil.rmtree(EXTRA_FILE_LOC + '/', ignore_errors=True)\n print('Done!')\n check_extra('bee2_dev\\\\models\\\\', 'models')\n check_extra('bee2_dev\\\\materials\\\\', 'materials')\n check_extra('bee2_dev\\\\sounds\\\\', 'sounds')\n check_extra('bee2_dev\\\\scripts\\\\', 'scripts')\n check_extra('bee2_dev\\\\particles\\\\', 'particles')\n check_extra('sdk_content\\\\maps\\\\instances\\\\bee2', 'instances')\n print('Complete!')","sub_path":"reverse_cache.py","file_name":"reverse_cache.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"497258378","text":"# PROBLEM DESCRIPTION :\n# The problem is to predict the number of monthly sales of champagne for the Perrin Freres label\n# (named for a region in France). The dataset provides the number of monthly sales of champagne\n# from January 1964 to September 1972, or just under 10 years of data. The values are a count\n# of millions of sales and there are 105 observations. The dataset is credited to Makridakis and\n# Wheelwright, 1989.\n\nfrom pandas import read_csv\nfrom matplotlib import pyplot\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom statsmodels.tsa.arima_model import ARIMAResults\n\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\nimport numpy\n\n# create a differenced series\ndef difference(dataset, interval=1):\n diff = list()\n for i in range(interval, len(dataset)):\n value = dataset[i] - dataset[i - interval]\n diff.append(value)\n return diff\n\n# invert differenced value\ndef inverse_difference(history, yhat, interval=1):\n return yhat + history[-interval]\n\n# load and prepare datasets\ndataset = read_csv('dataset.csv', header=None, index_col=0, parse_dates=True, squeeze=True)\nX = dataset.values.astype('float32')\nhistory = [x for x in X]\nmonths_in_year = 12\nvalidation = read_csv('validation.csv', header=None, index_col=0, parse_dates=True, squeeze=True)\ny = validation.values.astype('float32')\n\n\n# load model\nmodel_fit = ARIMAResults.load('model.pkl')\nbias = numpy.load('model_bias.npy')\n\n# make first prediction\npredictions = list()\nyhat = float(model_fit.forecast()[0])\nyhat = bias + inverse_difference(history, yhat, months_in_year)\npredictions.append(yhat)\nhistory.append(y[0])\nprint('>Predicted=%.3f, Expected=%.3f' % (yhat, y[0]))\n\n# rolling forecasts\nfor i in range(1, len(y)):\n # difference data\n months_in_year = 12\n diff = difference(history, months_in_year)\n # predict\n model = ARIMA(diff, order=(0,0,1))\n model_fit = model.fit(trend='nc', disp=0)\n yhat = model_fit.forecast()[0]\n yhat = bias + inverse_difference(history, yhat, months_in_year)\n predictions.append(yhat)\n # observation\n obs = y[i]\n history.append(obs)\n print('>Predicted=%.3f, Expected=%.3f' % (yhat, obs))\n \n# report performance\nrmse = sqrt(mean_squared_error(y, predictions))\nprint('RMSE: %.3f' % rmse)\npyplot.plot(y)\npyplot.plot(predictions, color='red')\npyplot.show()\n","sub_path":"MonthlySalesFrenchChampageTimeSeriesPrediction.py","file_name":"MonthlySalesFrenchChampageTimeSeriesPrediction.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"406268462","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThe :mod:`registered_instructions` module defines functions that handle adding\r\nnew Push instructions and retrieving previously registered Push instructions.\r\n\"\"\"\r\nfrom __future__ import absolute_import, division, print_function, unicode_literals\r\n\r\n\r\nimport warnings\r\n\r\nfrom ... import exceptions as e\r\n\r\n'''\r\nList of all registered push instructions.\r\n'''\r\nregistered_instructions = set()\r\n\r\ndef register_instruction(instruction):\r\n \"\"\"Registers an instruction, excluding duplicates.\r\n\r\n :param PushInstruction instruction: The instruction object to register.\r\n \"\"\" \r\n if len([i for i in registered_instructions if i.name == instruction.name]) > 0:\r\n warnings.warn('Duplicate instructions registered: ' + instruction.name + '. Duplicate ignored.')\r\n else:\r\n registered_instructions.update([instruction])\r\n\r\n\r\ndef get_instruction(name):\r\n \"\"\"Gets a registered instruction by its name.\r\n\r\n :param str name: Name of instruction\r\n :returns: A PushInstruction with ``name``, or throws UnknownInstructionName.\r\n \"\"\"\r\n l = [i for i in registered_instructions if name == i.name]\r\n if len(l) > 0:\r\n return l[0]\r\n else:\r\n raise e.UnknownInstructionName(name)\r\n\r\n\r\ndef get_instructions_by_pysh_type(pysh_type):\r\n \"\"\"Returns list of instructions that deal with the given pysh_type\r\n\r\n :param str pysh_type: Pysh type string (ie ``'_integer'``) to filter by.\r\n :returns: List if PushInstruction objects that are associated with ``pysh_type``.\r\n \"\"\"\r\n return [i for i in registered_instructions if pysh_type in i.stack_types]","sub_path":"pyshgp/push/instructions/registered_instructions.py","file_name":"registered_instructions.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"491270196","text":"##########################################################################\n#\n# pgAdmin 4 - PostgreSQL Tools\n#\n# Copyright (C) 2013 - 2020, The pgAdmin Development Team\n# This software is released under the PostgreSQL Licence\n#\n##########################################################################\n\nimport json\nimport uuid\n\nfrom pgadmin.browser.server_groups.servers.databases.schemas.tests import \\\n utils as schema_utils\nfrom pgadmin.browser.server_groups.servers.databases.tests import utils as \\\n database_utils\nfrom pgadmin.utils.route import BaseTestGenerator\nfrom regression import parent_node_dict\nfrom regression.python_test_utils import test_utils as utils\nfrom . import utils as tables_utils\n\n\nclass TableUpdateColumnTestCase(BaseTestGenerator):\n \"\"\"This class will update the column node from table\"\"\"\n scenarios = [\n # Fetching default URL for table node.\n ('Add privileges for existing column',\n dict(url='/browser/table/obj/')\n )\n ]\n\n def setUp(self):\n self.db_name = parent_node_dict[\"database\"][-1][\"db_name\"]\n schema_info = parent_node_dict[\"schema\"][-1]\n self.server_id = schema_info[\"server_id\"]\n self.db_id = schema_info[\"db_id\"]\n db_con = database_utils.connect_database(self, utils.SERVER_GROUP,\n self.server_id, self.db_id)\n if not db_con['data'][\"connected\"]:\n raise Exception(\"Could not connect to database to add a table.\")\n self.schema_id = schema_info[\"schema_id\"]\n self.schema_name = schema_info[\"schema_name\"]\n schema_response = schema_utils.verify_schemas(self.server,\n self.db_name,\n self.schema_name)\n if not schema_response:\n raise Exception(\"Could not find the schema to add a table.\")\n self.table_name = \"test_table_column_put_%s\" % (str(uuid.uuid4())[1:8])\n\n self.table_id = tables_utils.create_table(\n self.server, self.db_name,\n self.schema_name,\n self.table_name)\n\n def runTest(self):\n \"\"\"This function will fetch added table under schema node.\"\"\"\n table_response = tables_utils.verify_table(self.server, self.db_name,\n self.table_id)\n if not table_response:\n raise Exception(\"Could not find the table to update.\")\n\n data = {\n \"columns\": {\n \"changed\": [{\n \"attnum\": 1,\n \"attacl\": {\n \"added\": [{\n \"grantee\": self.server[\"username\"],\n \"grantor\": self.server[\"username\"],\n \"privileges\": [\n {\"privilege_type\": \"a\", \"privilege\": True,\n \"with_grant\": True},\n {\"privilege_type\": \"r\", \"privilege\": True,\n \"with_grant\": True},\n {\"privilege_type\": \"w\", \"privilege\": True,\n \"with_grant\": True},\n {\"privilege_type\": \"x\", \"privilege\": True,\n \"with_grant\": True\n }\n ]\n }]\n }\n }]\n }\n }\n\n response = self.tester.put(\n self.url + str(utils.SERVER_GROUP) + '/' +\n str(self.server_id) + '/' + str(self.db_id) + '/' +\n str(self.schema_id) + '/' + str(self.table_id),\n data=json.dumps(data), follow_redirects=True)\n self.assertEquals(response.status_code, 200)\n\n def tearDown(self):\n # Disconnect the database\n database_utils.disconnect_database(self, self.server_id, self.db_id)\n","sub_path":"web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/test_column_privileges_put.py","file_name":"test_column_privileges_put.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"126101194","text":"from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\nfrom mysite.views import hello, current_datetime, my_homepage_view, hours_ahead,mypage, display_meta\nfrom books import views\nfrom contact.views import contact \nurlpatterns = patterns('',\n\turl(r'^$', my_homepage_view),\n\turl(r'^hello/$', hello),\t\n\turl(r'^time/$', current_datetime),\n\turl(r'^time/plus/(\\d{1,2})/$', hours_ahead),\n\turl(r'^mypage/$', mypage),\n\turl(r'^meta/$', display_meta),\n\turl(r'search-form/$', views.search_form),\n\turl(r'search/$', views.search),\n\turl(r'contact/$', contact),\n # Examples:\n # url(r'^$', 'mysite.views.home', name='home'),\n # url(r'^mysite/', include('mysite.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"47561768","text":"import pip\n\ndef install(package):\n if hasattr(pip, 'main'):\n pip.main(['install', package])\n else:\n pip._internal.main(['install', package])\n\n\ninstall('pymongo')\ninstall('nltk')\ninstall('feedparser')\ninstall('numpy')\ninstall('sympy')\ninstall('scipy')\ninstall('matplotlib')\ninstall('beautifulsoup4')\ninstall('requests')\n\n\nimport nltk\nnltk.download('all')\n","sub_path":"python/sourcing/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"549137995","text":"import copy\nfrom collections import deque \nimport sys\nimport re\n\nkeywords = [\"vessels\", \"source\",\"people\",\"capacity\",\"horizon\"]\n#size\nvessels = 0\nsource = 0\npeople = 0\ncapacity = {}\nhorizon = 0\n\ndef bfs(initialArray,capacities,initial_steps,people,splitAmount,horizon):\n alreadyExplored = []\n queue = deque([(initialArray,initial_steps)]) #create queue \n while len(queue)>0: #make sure there are nodes to check left\n node = queue.popleft() #grab the first node\n currentConfiguration = node[0]\n steps = node[1]\n # print(\"trying:\",currentMatrix,\"\\nSteps: \",numberOfSteps)\n #we already saw this one\n if currentConfiguration in alreadyExplored:\n continue\n #check if its the answer\n if is_correct(currentConfiguration,splitAmount,people):\n return \"split(yes).\"\n if horizon < steps:\n return (\"split(no).\")\n # #add it to already explored\n alreadyExplored.append(currentConfiguration)\n\n #if not an answer we generate all posible moves from this graph\n index = 0\n options = []\n for i in currentConfiguration:\n #add to queue here check if already in checked before adding to queue\n for j in range(len(currentConfiguration)):\n currentState = getState(currentConfiguration,index, j, capacities)\n options.append(currentState)\n # print(currentState)\n index+=1\n # print(\"i:\\n\",i)\n #remove duplicates\n b_set = set(map(tuple,options)) #need to convert the inner lists to tuples so they are hashable\n b = map(list,b_set) #Now convert tuples back into lists \n # print(list(b))\n\n for lst in b:\n #put in queue\n queue.append([lst,steps+1])\n return (\"split(no).\")\n\n\n\ndef getState(arrayCurrentState ,currentVessel, toVessel, capacities):\n # print(\"inside get states\")\n arr = arrayCurrentState[:]\n toVesselMaxCapacity = capacities.get(toVessel+1)\n # print(\"ARRAY: \",toVessel,\" current \", currentVessel)\n toVesselCurrentCapacity = arr[toVessel]\n currentVesselCurrentCapacity = arr[currentVessel]\n # print(arr[currentVessel])\n # print(arr[toVessel], \"capacity: \", toVesselMaxCapacity, \"toVesselCurrentCapacity:\",toVesselCurrentCapacity)\n if(toVesselMaxCapacity-toVesselCurrentCapacity)>=0 and (currentVessel != toVessel):\n needed = toVesselMaxCapacity-toVesselCurrentCapacity\n if(currentVesselCurrentCapacity-needed) >= 0:\n # print(\"Left: \",currentVesselCurrentCapacity-needed)\n tranfer = currentVesselCurrentCapacity-needed\n #update the array with new value\n arr[currentVessel] = tranfer\n arr[toVessel] = arr[toVessel] + needed\n # print(\"new array: \", arr)\n elif(needed > currentVesselCurrentCapacity):\n # print(\"test\")\n arr[toVessel] = arr[toVessel] + currentVesselCurrentCapacity\n arr[currentVessel] = 0\n # print(\"new array: \", arr)\n # print(\"needed: \", needed)\n return arr\n\ndef create_initial_state(source, capacities):\n size = len(capacities)\n # print(\"source key:, \",source) \n arr = []\n for key, value in capacities.items():\n if key == source:\n arr.append(value)\n else:\n arr.append(0)\n # print(arr)\n return arr\n\ndef is_correct(arr, partition, numberPeople):\n total = 0\n peopleCounter = 0\n for i in arr:\n if(total+i) < partition:\n total+=i\n elif (total+i) == partition:\n peopleCounter+=1\n total=0\n if peopleCounter > numberPeople:\n break\n return (peopleCounter == numberPeople)\n\ndef nonblank_lines(f):\n for l in f:\n line = l.rstrip()\n line = l.lstrip()\n for key in keywords:\n if (re.match(key+r'\\(\\d+(, \\d+)*\\)\\.',line)):\n parse_line(line, key)\n\ndef parse_line(op, key):\n global vessels\n global source\n global people\n global capacity\n global horizon\n\n values = re.findall('\\d+', op)\n if key is keywords[0]:\n vessels = int(values.pop())\n elif key is keywords[1]:\n source = int(values.pop())\n elif key is keywords[2]:\n people = int(values.pop())\n elif key is keywords[3]:\n capacity.update({int(values.pop(0)):int(values.pop(0))})\n elif key is keywords[4]:\n horizon = int(values.pop())\n\ndef print_current_values():\n print (\"vessels: \", vessels, \"\\nsource:\",source\n ,\"\\npeople:\",people,\"\\ncapacity:\",capacity,\"\\nhorizon:\",horizon)\n\ndef main():\n f = open(sys.argv[1], 'r')\n nonblank_lines(f)\n initial_array = create_initial_state(source,capacity)\n print(bfs(initial_array,capacity,0,people,(capacity.get(source)/people),horizon))\n\n f.close()\n\n\nmain()","sub_path":"Aditional class work/pour.py","file_name":"pour.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"614166376","text":"import pandas as pd\r\nfrom nltk.corpus import stopwords\r\nimport re\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.metrics import accuracy_score,classification_report\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nimport pickle\r\n\r\n#Data Reading:\r\ndf = pd.read_csv(\"C:\\\\Users\\\\sudhakarceemavaram_n\\\\Desktop\\\\Dataset\\\\Kaggle\\\\train_data.csv\")\r\nstop = stopwords.words(\"english\")\r\n\r\n#Data Cleaning:\r\ndef cleaning(raw):\r\n alpha_only = re.sub('[^a-zA-Z]',\" \",raw)\r\n all_lower = alpha_only.lower()\r\n words = all_lower.split()\r\n for i in words:\r\n if i in stop:\r\n words.remove(i)\r\n return \" \".join(words)\r\n\r\nx = list(map(cleaning,df.content))\r\ny = df[\"sentiment\"]\r\nxtrain,xtest,ytrain,ytest = train_test_split(x,y,test_size=0.1,random_state=7)\r\n\r\n\r\n#Vectorization:\r\nvectx = CountVectorizer(min_df=5, ngram_range=(1,2)).fit(xtrain)\r\nvecty = CountVectorizer(min_df=5, ngram_range=(1,2)).fit(ytrain)\r\n\r\nxtrainvector = vectx.transform(xtrain)\r\nytrainvector = vecty.transform(ytrain)\r\n\r\n#print(type(xtrainvector))\r\n#print(type(ytrainvector))\r\n\r\n#Model:\r\nmodel = LogisticRegression(penalty=\"l2\",solver=\"lbfgs\",multi_class=\"multinomial\")\r\nmodel.fit(xtrainvector,ytrain)\r\n\r\npredict = model.predict(vectx.transform(xtest))\r\n\r\nscore = accuracy_score(predict,ytest)\r\nreport = classification_report(ytest,predict)\r\n\r\nprint(\"The predicted value is: \",predict)\r\nprint(\"The accuracy score is: \",score)\r\nprint(\"Report: \",report)\r\n\r\n#Saving the pickle file:\r\n#file = \"Human_mood.model\"\r\n#pickle.dump(model,open(file,\"wb\"))\r\n\r\n'''\r\ndf1 = pd.read_csv(\"C:\\\\Users\\\\sudhakarceemavaram_n\\\\Desktop\\\\Dataset\\\\Kaggle\\\\test_data.csv\")\r\nx1 = list(map(cleaning,df1.content))\r\nxvector = vectx.transform(x1)\r\nloadmodel = pickle.load(open(\"C:\\\\Users\\\\sudhakarceemavaram_n\\\\PycharmProjects\\\\pygame\\\\object\\\\Human_mood.model\",\"rb\"))\r\nvalue = list(loadmodel.predict(xvector))\r\ndf1[\"Sentiment_updated\"] = value\r\nprint(df1.head())\r\n\r\ndf1.to_csv(\"C:\\\\Users\\\\sudhakarceemavaram_n\\\\Desktop\\\\Dataset\\\\Kaggle\\\\Updated_Sentiments.csv\")\r\n'''\r\n","sub_path":"ML_algorithms.py","file_name":"ML_algorithms.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"189426725","text":"import json, os.path\n\nclass jsonSaver:\n\n defaultFileName = ''\n\n def __init__(self, fileName = 'file.json'):\n super().__init__()\n self.defaultFileName = fileName\n \n def saveJson (self, inputObj):\n jsonSerializedObj = json.dumps(inputObj)\n file2save = open(self.defaultFileName, 'w')\n file2save.write(jsonSerializedObj)\n file2save.close()\n \n def openJson (self):\n if not os.path.exists(self.defaultFileName):\n return False\n file2read = open(self.defaultFileName,'r')\n fileRawContent = file2read.read()\n file2read.close()\n jsonDeserializedObj = json.loads(fileRawContent)\n return jsonDeserializedObj\n\n","sub_path":"lacusClient_p2pTest/app_infrastructure/jsonFileController/jsonSave.py","file_name":"jsonSave.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"621889813","text":"# -*- coding: utf-8 -*-\n\"\"\"Repetition.\n\nRevision ID: 3362b720863\nRevises: 143a88a7b01\nCreate Date: 2015-11-26 00:40:43.789890\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nfrom dontforget.database import add_required_column\n\nrevision = \"3362b720863\"\ndown_revision = \"143a88a7b01\"\n\n\ndef upgrade():\n \"\"\"Upgrade the database.\"\"\"\n add_required_column(\"alarm\", \"updated_at\", sa.TIMESTAMP(True), \"current_timestamp\")\n\n with op.batch_alter_table(\"chore\") as batch_op:\n add_required_column(\"chore\", \"repeat_from_completed\", sa.Boolean(), False)\n batch_op.add_column(sa.Column(\"repetition\", sa.String(), nullable=True))\n\n\ndef downgrade():\n \"\"\"Downgrade the database.\"\"\"\n with op.batch_alter_table(\"chore\") as batch_op:\n batch_op.drop_column(\"repetition\")\n batch_op.drop_column(\"repeat_from_completed\")\n\n with op.batch_alter_table(\"alarm\") as batch_op:\n batch_op.drop_column(\"updated_at\")\n","sub_path":"migrations/versions/3362b720863_repetition.py","file_name":"3362b720863_repetition.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"647631526","text":"# Write a function that will create a dictionary of every word in a text file as well as the number\n# of occurrences of that word.\n#\n# Then, write the following functions:\n# - See if a user-supplied word exists in the dictionary, if so print the number of\n# occurrences. Else, print that it doesn't exist.\n# - Find the word with the highest number of occurrences.\n# - Find the word with the lowest number of occurrences.\n#\n# Upon running, your user should be able to search for as many words as they want until\n# they are ready to quit the program.\n\n\ndef read_file(file):\n stream = open(file, 'r')\n lines = stream.readlines()\n stream.close()\n return lines\n\n\ndef create_dict(lines):\n dict = {} # {key (word) : {value (number of occurrences)}\n\n for line in lines:\n words = line.split(' ')\n for word in words:\n word = word.strip(',.;:?!/()[]{}<>\\'\\\"\\n\\r\\t').lower()\n if word in dict:\n dict[word] += 1\n else:\n dict[word] = 1\n\n return dict\n\n\ndef exists(word, dict):\n if word in dict:\n num_occurrences = dict[word]\n print(f'The word {word} exists {num_occurrences} times.')\n else:\n print(f'The word {word} does not exist.')\n\n\ndef word_with_most_occurrences(dict):\n words = []\n most_occurrences = max(dict.values())\n\n for word in dict:\n num_occurrences = dict[word]\n if num_occurrences == most_occurrences:\n words.append(word)\n\n return words\n\n\ndef word_with_least_occurrences(dict):\n words = []\n least_occurrences = min(dict.values())\n\n for word in dict:\n num_occurrences = dict[word]\n if num_occurrences == least_occurrences:\n words.append(word)\n\n return words\n\n\ndef main():\n print('Project 2, Problem 4\\n')\n\n lines = read_file('SherlockHolmes.txt')\n\n dict = create_dict(lines)\n\n while True:\n entry = input('Enter a word to search: ')\n if entry == '':\n print()\n break\n exists(entry, dict)\n print()\n\n print(f'word with the highest number of occurrences is: {word_with_most_occurrences(dict)}')\n print(f'word with the lowest number of occurrences is: {word_with_least_occurrences(dict)}')\n\n\nmain()\n","sub_path":"Project2/Problem4.py","file_name":"Problem4.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"320066653","text":"import re\n\nimport py\nimport pytest\n\nfrom tox.config import parseconfig\nfrom tox.package import get_build_info, get_package\nfrom tox.session import Reporter, Session\n\n\ndef test_make_sdist(initproj):\n initproj(\n \"example123-0.5\",\n filedefs={\n \"tests\": {\"test_hello.py\": \"def test_hello(): pass\"},\n \"tox.ini\": \"\"\"\n \"\"\",\n },\n )\n config = parseconfig([])\n session = Session(config)\n sdist = get_package(session)\n assert sdist.check()\n assert sdist.ext == \".zip\"\n assert sdist == config.distdir.join(sdist.basename)\n sdist2 = get_package(session)\n assert sdist2 == sdist\n sdist.write(\"hello\")\n assert sdist.stat().size < 10\n sdist_new = get_package(Session(config))\n assert sdist_new == sdist\n assert sdist_new.stat().size > 10\n\n\ndef test_make_sdist_distshare(tmpdir, initproj):\n distshare = tmpdir.join(\"distshare\")\n initproj(\n \"example123-0.6\",\n filedefs={\n \"tests\": {\"test_hello.py\": \"def test_hello(): pass\"},\n \"tox.ini\": \"\"\"\n [tox]\n distshare={}\n \"\"\".format(\n distshare\n ),\n },\n )\n config = parseconfig([])\n session = Session(config)\n sdist = get_package(session)\n assert sdist.check()\n assert sdist.ext == \".zip\"\n assert sdist == config.distdir.join(sdist.basename)\n sdist_share = config.distshare.join(sdist.basename)\n assert sdist_share.check()\n assert sdist_share.read(\"rb\") == sdist.read(\"rb\"), (sdist_share, sdist)\n\n\ndef test_sdistonly(initproj, cmd):\n initproj(\n \"example123\",\n filedefs={\n \"tox.ini\": \"\"\"\n \"\"\"\n },\n )\n result = cmd(\"-v\", \"--sdistonly\")\n assert not result.ret\n assert re.match(r\".*sdist-make.*setup.py.*\", result.out, re.DOTALL)\n assert \"-mvirtualenv\" not in result.out\n\n\ndef test_separate_sdist_no_sdistfile(cmd, initproj, tmpdir):\n distshare = tmpdir.join(\"distshare\")\n initproj(\n (\"pkg123-foo\", \"0.7\"),\n filedefs={\n \"tox.ini\": \"\"\"\n [tox]\n distshare={}\n \"\"\".format(\n distshare\n )\n },\n )\n result = cmd(\"--sdistonly\")\n assert not result.ret\n distshare_files = distshare.listdir()\n assert len(distshare_files) == 1\n sdistfile = distshare_files[0]\n assert \"pkg123-foo-0.7.zip\" in str(sdistfile)\n\n\ndef test_separate_sdist(cmd, initproj, tmpdir):\n distshare = tmpdir.join(\"distshare\")\n initproj(\n \"pkg123-0.7\",\n filedefs={\n \"tox.ini\": \"\"\"\n [tox]\n distshare={}\n sdistsrc={{distshare}}/pkg123-0.7.zip\n \"\"\".format(\n distshare\n )\n },\n )\n result = cmd(\"--sdistonly\")\n assert not result.ret\n sdistfiles = distshare.listdir()\n assert len(sdistfiles) == 1\n sdistfile = sdistfiles[0]\n result = cmd(\"-v\", \"--notest\")\n assert not result.ret\n assert \"python inst: {}\".format(sdistfile) in result.out\n\n\ndef test_sdist_latest(tmpdir, newconfig):\n distshare = tmpdir.join(\"distshare\")\n config = newconfig(\n [],\n \"\"\"\n [tox]\n distshare={}\n sdistsrc={{distshare}}/pkg123-*\n \"\"\".format(\n distshare\n ),\n )\n p = distshare.ensure(\"pkg123-1.4.5.zip\")\n distshare.ensure(\"pkg123-1.4.5a1.zip\")\n session = Session(config)\n sdist_path = get_package(session)\n assert sdist_path == p\n\n\ndef test_installpkg(tmpdir, newconfig):\n p = tmpdir.ensure(\"pkg123-1.0.zip\")\n config = newconfig([\"--installpkg={}\".format(p)], \"\")\n session = Session(config)\n sdist_path = get_package(session)\n assert sdist_path == p\n\n\ndef test_package_isolated_no_pyproject_toml(initproj, cmd):\n initproj(\n \"package_no_toml-0.1\",\n filedefs={\n \"tox.ini\": \"\"\"\n [tox]\n isolated_build = true\n \"\"\"\n },\n )\n result = cmd(\"--sdistonly\")\n assert result.ret == 1\n assert result.outlines == [\"ERROR: missing {}\".format(py.path.local().join(\"pyproject.toml\"))]\n\n\ndef toml_file_check(initproj, version, message, toml):\n initproj(\n \"package_toml-{}\".format(version),\n filedefs={\n \"tox.ini\": \"\"\"\n [tox]\n isolated_build = true\n \"\"\",\n \"pyproject.toml\": toml,\n },\n )\n reporter = Reporter(None)\n\n with pytest.raises(SystemExit, message=1):\n get_build_info(py.path.local(), reporter)\n toml_file = py.path.local().join(\"pyproject.toml\")\n msg = \"ERROR: {} inside {}\".format(message, toml_file)\n assert reporter.reported_lines == [msg]\n\n\ndef test_package_isolated_toml_no_build_system(initproj, cmd):\n toml_file_check(initproj, 1, \"build-system section missing\", \"\")\n\n\ndef test_package_isolated_toml_no_requires(initproj, cmd):\n toml_file_check(\n initproj,\n 2,\n \"missing requires key at build-system section\",\n \"\"\"\n [build-system]\n \"\"\",\n )\n\n\ndef test_package_isolated_toml_no_backend(initproj, cmd):\n toml_file_check(\n initproj,\n 3,\n \"missing build-backend key at build-system section\",\n \"\"\"\n [build-system]\n requires = []\n \"\"\",\n )\n\n\ndef test_package_isolated_toml_bad_requires(initproj, cmd):\n toml_file_check(\n initproj,\n 4,\n \"requires key at build-system section must be a list of string\",\n \"\"\"\n [build-system]\n requires = \"\"\n build-backend = \"\"\n \"\"\",\n )\n\n\ndef test_package_isolated_toml_bad_backend(initproj, cmd):\n toml_file_check(\n initproj,\n 5,\n \"build-backend key at build-system section must be a string\",\n \"\"\"\n [build-system]\n requires = []\n build-backend = []\n \"\"\",\n )\n\n\ndef test_dist_exists_version_change(mock_venv, initproj, cmd):\n base = initproj(\n \"package_toml-{}\".format(\"0.1\"),\n filedefs={\n \"tox.ini\": \"\"\"\n [tox]\n isolated_build = true\n \"\"\",\n \"pyproject.toml\": \"\"\"\n [build-system]\n requires = [\"setuptools >= 35.0.2\"]\n build-backend = 'setuptools.build_meta'\n \"\"\",\n },\n )\n result = cmd(\"-e\", \"py\")\n assert result.ret == 0, result.out\n\n new_code = base.join(\"setup.py\").read_text(\"utf-8\").replace(\"0.1\", \"0.2\")\n base.join(\"setup.py\").write_text(new_code, \"utf-8\")\n\n result = cmd(\"-e\", \"py\")\n assert result.ret == 0, result.out\n","sub_path":"tests/unit/test_package.py","file_name":"test_package.py","file_ext":"py","file_size_in_byte":6600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"211047856","text":"import logging\nimport os\nfrom xml.sax.saxutils import quoteattr\n\nfrom flask import Flask, make_response as _make_response\nfrom flask import request, redirect, url_for, render_template\n\nimport borders.borders\nfrom converters import teryt\n\napp = Flask(__name__)\n\n\ndef make_response(ret, code):\n resp = _make_response(ret, code)\n resp.mimetype = 'text/xml; charset=utf-8'\n return resp\n\n\n@app.route(\"/all/.osm\", methods=[\"GET\", ])\ndef get_all_borders(terc):\n resp = make_response(borders.borders.get_borders(terc), 200)\n resp.headers['Content-Disposition'] = 'attachment; filename={0}.osm'.format(terc)\n return resp\n\n\n@app.route(\"/nosplit/.osm\", methods=[\"GET\", ])\ndef get_nosplit_borders(terc):\n resp = make_response(borders.borders.get_borders(terc, borders_mapping=lambda x: x, do_clean_borders=False), 200)\n resp.headers['Content-Disposition'] = 'attachment; filename={0}.osm'.format(terc)\n return resp\n\n\n@app.route(\"/error\", methods=[\"GET\", ])\ndef error(stuff):\n raise ValueError(\"Sample error\")\n\n\n@app.route(\"/.osm\", methods=[\"GET\", ])\ndef get_lvl8_borders(terc):\n resp = make_response(borders.borders.get_borders(terc, lambda x: x.tags.get('admin_level') == \"8\"), 200)\n resp.headers['Content-Disposition'] = 'attachment; filename={0}.osm'.format(terc)\n return resp\n\n\n@app.route(\"/prg/gminy/.osm\", methods=[\"GET\", ])\ndef get_gminy(terc):\n resp = make_response(borders.borders.gminy_prg_as_osm(terc), 200)\n resp.headers['Content-Disposition'] = 'attachment; filename={0}-gminy.osm'.format(terc)\n return resp\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return redirect(url_for(\"list_all\"))\n\n\n@app.route(\"/list/\")\ndef list_all():\n return render_list(None)\n\n\n@app.route(\"/list/\")\ndef render_list(terc):\n if terc:\n items = [(k, v) for k, v in teryt.teryt.items() if k.startswith(terc) and len(k) > len(terc)]\n else:\n items = [(k, v) for k, v in teryt.teryt.items() if len(k) < 7]\n return render_template('list.html', items=items, teryt=teryt.teryt)\n\n\ndef report_exception(e):\n app.logger.error('{0}: {1}'.format(request.path, e), exc_info=(type(e), e, e.__traceback__))\n resp = make_response(\n \"\"\"\n \n \n \n \n \"\"\" % quoteattr(repr(e)), 200)\n resp.headers['Content-Disposition'] = 'attachment; filename=error.osm'\n return resp\n\n\ndef start_rest_server():\n ADMINS = ['logi-osm@vink.pl']\n DEBUG = bool(os.environ.get('DEBUG', False))\n os.sys.stderr.write(\"Debug mode: {0}\\n\".format(DEBUG))\n MAILLOG = bool(os.environ.get('MAILLOG', False))\n MAILHOST = os.environ.get('MAILHOST', '127.0.0.1')\n os.sys.stderr.write(\"Mail logging mode: {0}. SMTP host: {1}\\n\".format(MAILLOG, MAILHOST))\n if MAILLOG:\n from logging.handlers import SMTPHandler\n\n mail_handler = SMTPHandler(MAILHOST,\n 'server-error@vink.pl',\n ADMINS, 'OSM Rest-Server Failed')\n mail_handler.setLevel(logging.INFO)\n app.logger.addHandler(mail_handler)\n\n if not DEBUG:\n app.errorhandler(Exception)(report_exception)\n\n app.run(host='0.0.0.0', port=5002, debug=DEBUG)\n\n\nif __name__ == '__main__':\n start_rest_server()\n","sub_path":"rest_server.py","file_name":"rest_server.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"17730150","text":"import configparser\nimport os\n\nconfig = configparser.RawConfigParser()\nabspath = os.path.abspath('../keys/data.ini')\nconfig.read(abspath)\nkeys = config['API_KEYS']\n\nclass Config:\n PATH = os.path.abspath('../../datas')\n config.read(PATH)\n MAX_PAGE = 100 # naver v4 max page\n PUBLIC_API_KEY = keys['PUBLIC_API_KEY']\n COUNT = 100 # naver v5 review max count\n SCROLL_NUM = 10 # naver v5 scroll count\n WEBHOOK_URL = keys['WEBHOOK_URL'] # slack webhook url\n DO_LIST = {'충북': '충청북도', '충남': '충청남도',\n '경북': '경상북도', '경남': '경상남도',\n '전북': '전라북도', '전남': '전라남도',\n '강원': '강원도', '경기': '경기도',\n '인천': '인천광역시', '인천시': '인천광역시',\n '부산': '부산광역시', '울산': '울산광역시', '대전': '대전광역시',\n '대구': '대구광역시', '광주': '광주광역시',\n '서울': '서울특별시', '서울시': '서울특별시',\n '제주': '제주특별자치도', '제주도': '제주특별자치도'}","sub_path":"camping_server2/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"408781584","text":"#! /usr/bin/env python\n\nimport uvmf_gen\n\n\n## The input to this call is the name of the desired bench and the name of the top \n## environment package\n## BenchClass(,)\nben = uvmf_gen.BenchClass('block_c','block_c',{})\n\n## Specify parameters for this interface package.\n## These parameters can be used when defining signal and variable sizes.\n# ben.addParamDef(,,)\n\n# Load shared objects from pkt_pkg C functions and block_b_env_pkg C functions\nben.addDPILibName('pktPkgCFunctions')\n\n## Specify clock and reset details\nben.clockHalfPeriod = '6ns'\nben.clockPhaseOffset = '21ns'\nben.resetAssertionLevel = True\nben.resetDuration = '250ns'\n\n## Import QVIP protocol packages so that the test bench can use sequence items and sequences from QVIP library.\nben.addImport('mvc_pkg')\nben.addImport('mgc_pcie_v2_0_pkg')\nben.addImport('mgc_axi4_v1_0_pkg')\nben.addImport('mgc_apb3_v1_0_pkg')\n\n\n## The addQvipBfm() lines below were copied from comments in the QVIP Configurator generated package named qvip_agents_pkg.sv.\n## qvip sub environment agents\n## addQvipBfm(,,)\nben.addQvipBfm('pcie_ep', 'qvip_agents', 'ACTIVE', unique_id='uvm_test_top.environment.qvip_env.')\nben.addQvipBfm('axi4_master_0', 'qvip_agents', 'ACTIVE', unique_id='uvm_test_top.environment.qvip_env.')\nben.addQvipBfm('axi4_master_1', 'qvip_agents', 'ACTIVE', unique_id='uvm_test_top.environment.qvip_env.')\nben.addQvipBfm('axi4_slave', 'qvip_agents', 'ACTIVE', unique_id='uvm_test_top.environment.qvip_env.')\nben.addQvipBfm('apb3_config_master', 'qvip_agents', 'ACTIVE',unique_id='uvm_test_top.environment.qvip_env.')\n\n## Specify the agents contained in this bench\n## addBfm(,,,,)\nben.addBfm('mem_in', 'mem', 'clock', 'reset', 'ACTIVE',agentInstName='mem_in')\nben.addBfm('mem_out', 'mem', 'clock', 'reset', 'ACTIVE',agentInstName='mem_out')\nben.addBfm('pkt_out', 'pkt', 'pclk', 'prst', 'ACTIVE',agentInstName='pkt_out')\n\n# This API identifies make targets that compile C code.\n# Compilation of c code is needed as a dependency for using the VINFO flow\nben.addVinfoDependency('comp_pkt_pkg_c_files')\n\n# Identify scoreboards to be disabled during register test\nben.addScoreboard(\"environment.mem_sb\")\nben.addScoreboard(\"environment.pkt_sb\")\nben.addScoreboard(\"environment.axi4_slave_sb\")\nben.addScoreboard(\"environment.apb3_cfg_sb\")\n\n## This will prompt the creation of all bench files in their specified\n## locations\nben.create()\n","sub_path":"example_code/python/api_files/block_c_bench_config.py","file_name":"block_c_bench_config.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"287412814","text":"# testing image loading in bin/atlas\n\nimport pyglet as pyg\nfrom pyglet.gl import *\nimport objects\nimport utils\nimport os\n\nimage_filenames = utils.list_files(\"./art/textures\")\n\nimages = [] # contains loaded texture regions in alphabetical order\nname_map = {} # contains loaded images with filename \"name\" -> textureregion\n\nbin = pyg.image.atlas.TextureBin() # bin take care of texture memory\n# loaded single images are put together on a bigger texture to share memory on gpu\n# call image by textureregion which is sub texture of big textur\n\nfor filename in image_filenames:\n try:\n name = utils.remove_file_ending(filename)\n tex = pyg.image.load(\"./art/textures/\"+filename)\n tex_region = bin.add(tex)\n images.append(tex_region)\n name_map[name] = tex_region\n except:\n print(\"could not load file: \"+ name)\nglTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)\n\ndef get(name):\n # get textureregion by filename\n if name in name_map:\n return name_map[name]\n else:\n raise ValueError(\"no texture region with that name loaded:\" + name) \n\n","sub_path":"test_image_load.py","file_name":"test_image_load.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"28819630","text":"\n# Тест проверяет возможность создания, изменения, удаления регламента\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait as WDW\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException\nimport pytest\nimport time\n\n#@pytest.mark.skip()\n# Тест 1 \"Создание нового регламента\"\ndef test_1_create_definition(browser, help):\n\n# Создаем тестовую группу регламентов\n # заходим в главное меню регламенты - группы регаламентов\n href = '[href=\"#/definition_groups\"]'\n help.definition(browser, href)\n\n # нажимаем кнопку Добавить\n xpath = '//button[text()=\"Добавить\"]'\n help.f_xpath(browser, xpath)\n\n # заполняем поле Наименование\n text = \"Группа для проверки регламентов\"\n selector = '#name'\n help.past_text(browser, selector, text)\n\n # заполняем поле Примечание\n text = \"Примечания для тестовой группы регламентов\"\n selector = '#comment'\n help.past_text(browser, selector, text)\n\n # нажимаем кнопку Сохранить\n xpath = '//*[text()=\"Сохранить\"]'\n help.f_xpath(browser, xpath)\n\n# Добавляем новый регламент\n # заходим в главное меню регламенты - регламенты\n href = '[href=\"#/definitions\"]'\n help.definition(browser, href)\n\n # нажимаем кнопку Добавить\n xpath = '//*[text()=\"Добавить \"]'\n help.f_xpath(browser, xpath)\n\n # заполняем поле Код\n text = \"НТР\"\n selector = '#code_a'\n help.past_text(browser, selector, text)\n\n # заполняем поле Наименование\n text = \"Новый тестовый регламент\"\n selector = '#name_a'\n help.past_text(browser, selector, text)\n\n # выбираем Группу регламентов\n selector = '#group_a'\n help.f_selectors(browser, selector)\n xpath = '//option[text()=\" Группа для проверки регламентов \"]'\n help.f_xpath(browser, xpath)\n\n # выбираем Состояние регламентов\n selector = '#state_a'\n help.f_selectors(browser, selector)\n selector = '[value=\"З\"]'\n help.f_selectors(browser, selector)\n\n # заполняем поле Описание\n text = \"Описание для тестового регламента\"\n selector = '#description_a'\n help.past_text(browser, selector, text)\n\n # нажимаем кнопку Сохранить\n xpath = '//*[text()=\"Сохранить\"]'\n help.f_xpath(browser, xpath)\n\n\n # Проверяем наличие тестового регламента\n xpath = '//*[text() = \"НОВЫЙ ТЕСТОВЫЙ РЕГЛАМЕНТ\"]'\n #element = WDW(browser, 10).until(EC.element_to_be_clickable((By.ID, xpath)))\n #element.click()\n assert help.check_exists_by_xpath(browser, xpath), \"Новый тестовый регламент не создан\"\n\n\n#@pytest.mark.skip()\n# Тест 2 \"Смена названия и состояния регламента\"\ndef test_2_change_state_of_definition(browser, help):\n\n # заходим �� главное меню регламенты - регламенты\n href = '[href=\"#/definitions\"]'\n help.definition(browser, href)\n\n # ищем строку Новый тестовый регламент\n xpath = '//b[text()=\"НОВЫЙ ТЕСТОВЫЙ РЕГЛАМЕНТ\"]'\n help.f_xpath(browser, xpath)\n\n # изменяем Состояние регламентов\n selector = '#state'\n help.f_selectors(browser, selector)\n selector = '[value=\"О\"]'\n help.f_selectors(browser, selector)\n\n # заполняем поле Наименование\n text = \"Измененный новый тестовый регламент\"\n selector = '#name'\n help.past_text(browser, selector, text)\n\n # нажимаем кнопку Сохранить изменения\n browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n xpath = '//*[text()=\"Сохранить изменения \"]'\n help.f_xpath(browser, xpath)\n\n # Проверяем наличие тестового регламента\n xpath = '//*[text() = \"ИЗМЕНЕННЫЙ НОВЫЙ ТЕСТОВЫЙ РЕГЛАМЕНТ\"]'\n assert help.check_exists_by_xpath(browser, xpath), \"Новый тестовый регламент не изменен\"\n\n\n#@pytest.mark.skip()\n# Тест 3 \"Удаление регламента\"\ndef test_3_del_definition_group(browser, help):\n\n # заходим в главное меню регламенты - регламенты\n href = '[href=\"#/definitions\"]'\n help.definition(browser, href)\n\n # Ищем строку Измененный новый тестовый регламент\n line_definition = browser.find_elements_by_css_selector('[class=\"text-pointer\"]')\n deleg_definition = browser.find_element_by_xpath('//b[text()=\"ИЗМЕНЕННЫЙ НОВЫЙ ТЕСТОВЫЙ РЕГЛАМЕНТ\"]')\n deleg_definition.click()\n i = 0\n while line_definition[i] != deleg_definition:\n i += 1\n # browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n # нажимаем кнопку Удалить\n selector = '[class=\"remove-icon text-black\"]'\n help.f_selectors(browser, selector, i)\n\n # подтвердить удаление\n alert = WDW(browser, 10).until(EC.alert_is_present())\n alert.accept()\n\n # Проверяем отсутствие тестового регламента\n xpath = '//b[text() = \"ИЗМЕНЕННЫЙ НОВЫЙ ТЕСТОВЫЙ РЕГЛАМЕНТ\"]'\n time.sleep(1)\n assert help.check_no_exists_by_xpath(browser, xpath), \"Новый тестовый регламент не удален\"\n\n # заходим в главное меню регламенты - группы регаламентов\n href = '[href=\"#/definition_groups\"]'\n help.definition(browser, href)\n\n # Ищем строку Группа для проверки регламентов\n line_definition = browser.find_elements_by_css_selector('[class=\"text-pointer\"]')\n deleg_definition = browser.find_element_by_xpath('//b[text()=\"ГРУППА ДЛЯ ПРОВЕРКИ РЕГЛАМЕНТОВ\"]')\n deleg_definition.click()\n i = 0\n while line_definition[i] != deleg_definition:\n i += 1\n # browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n # удаляем группу регламентов\n selector = '[class=\"remove-icon text-black\"]'\n help.f_selectors(browser, selector, i)\n\n # подтвердить удаление\n alert = WDW(browser, 10).until(EC.alert_is_present())\n alert.accept()\n\n # Проверяем отсутствие измененной тестовой группы регламентов\n xpath = '//*[text() = \"ИЗМЕНЕННЫЙ НОВЫЙ ТЕСТОВЫЙ РЕГЛАМЕНТ\"]'\n time.sleep(1)\n assert help.check_no_exists_by_xpath(browser, xpath), \"Группа для проверки регламентов не удалена\"\n\n print(\"Тестовые данные удалены\")\n\n\n\n","sub_path":"onegin_definitions.py","file_name":"onegin_definitions.py","file_ext":"py","file_size_in_byte":7710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"114136707","text":"# -*- coding: UTF-8 -*-\nimport os\n\nimport numpy as np\nimport pandas\nimport torch\nimport torch.optim\nfrom skimage import io,transform\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision import datasets, transforms\n\n\nclass CusDataset(Dataset):\n def __init__(self,csv_file,root_dir,Wem,transform=None):\n \"\"\"\n arg:\n csv_file(string):数据集标签的文件路径\n root_dir(string):图片路径\n Wem(int):0是男,1是女\n transform(optional):图像变换方法\n \"\"\"\n self.labels = pandas.read_csv(csv_file)\n image = []\n age = []\n gender = []\n temp = 0\n for i in range(len(self.labels)):\n if self.labels.iloc[i, 3] == Wem:\n image.append(self.labels.iloc[i, 1])\n if self.labels.iloc[i, 2] >= 0 and self.labels.iloc[i, 2] <5:\n temp = 0\n elif self.labels.iloc[i, 2] >= 5 and self.labels.iloc[i, 2] <10:\n temp = 1\n elif self.labels.iloc[i, 2] >= 10 and self.labels.iloc[i, 2] <15:\n temp = 2\n elif self.labels.iloc[i, 2] >= 15 and self.labels.iloc[i, 2] <20:\n temp = 3\n elif self.labels.iloc[i, 2] >= 20 and self.labels.iloc[i, 2] <30:\n temp = 4\n elif self.labels.iloc[i, 2] >= 30 and self.labels.iloc[i, 2] <40:\n temp = 5\n elif self.labels.iloc[i, 2] >= 40 and self.labels.iloc[i, 2] <50:\n temp = 6\n elif self.labels.iloc[i, 2] >= 50 and self.labels.iloc[i, 2] <60:\n temp = 7\n elif self.labels.iloc[i, 2] >= 60 and self.labels.iloc[i, 2] <70:\n temp = 8\n elif self.labels.iloc[i, 2] >= 70:\n temp = 9\n #aget = np.zeros([10])\n #aget = temp\n age.append(temp)\n gender.append(self.labels.iloc[i, 3])\n else:\n continue\n self.labels = {'imagename':image,'age':age,'gender':gender}\n self.root_dir = root_dir\n self.transform = transform\n def __len__(self):\n return len(self.labels['imagename'])\n def __getitem__(self,idx):\n \n img_name = os.path.join(self.root_dir,\n self.labels['imagename'][idx])\n image = io.imread(img_name)\n image = transform.resize(image,output_shape=(256,256))\n age = self.labels['age'][idx]\n gender = self.labels['gender'][idx]\n sample = {'imagename':image,'age':age,'gender':gender}\n if self.transform:\n image = self.transform(sample['imagename'])\n sample = {\n 'imagename':image,\n 'age':torch.from_numpy(np.array([age])),\n 'gender':torch.from_numpy(np.array([gender]))\n }\n return sample\nclass ToTensor(object):\n \"\"\"将ndarrays的样本转化为Tensors的样本\"\"\"\n def __call__(self, sample):\n image,age,gender = sample['imagename'], sample['age'],sample['gender']\n # 交换颜色通道, 因为\n # numpy图片: H x W x C\n # torch图片 : C X H X W\n image = image.transpose((2, 0, 1))\n return {'imagename': torch.from_numpy(image),\n 'age': torch.from_numpy(np.array([age])),\n 'gender':torch.from_numpy(np.array([gender]))\n }\nif __name__ == '__main__':\n face_dataset = CusDataset(\n csv_file='../UTKFace.csv',\n root_dir='../data/UTKface/UTKface',\n Wem = 0,\n transform=transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))\n ]\n )\n )\n dataloader = DataLoader(\n face_dataset,\n batch_size=2,\n shuffle=True,\n num_workers=4\n )\n m_face = iter(face_dataset)\n sample_batched = m_face.__next__()\n print(sample_batched['age'])\n #for i_batch,sample_batched in enumerate(dataloader):\n print(\"\\nimagesize:{},age:{},gender:{}\".format(sample_batched['imagename'].size(),sample_batched['age'],sample_batched['gender']))\n","sub_path":"CycleGANS/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"330366467","text":"from django.shortcuts import render, redirect\n\n# Create your views here.\n\ndef view_cart(request):\n \"\"\" A veiw that renders the shopping cart contents \"\"\"\n\n return render(request, 'cart/cart.html')\n\ndef add_to_cart(request, item_id):\n \"\"\" Add a quantity of a product to the shopping cart \"\"\"\n\n quantity = int(request.POST.get('amount'))\n redirect_url = request.POST.get('redirect_url')\n cart = request.session.get('cart', {})\n\n if item_id in list(cart.keys()):\n cart[item_id] += quantity\n else:\n cart[item_id] = quantity\n\n request.session['cart'] = cart\n return redirect(redirect_url)\n\n\ndef uppdate_cart(request, item_id):\n \"\"\" Edit quantity of a product to the shopping cart \"\"\"\n\n quantity = int(request.POST.get('amount'))\n redirect_url = request.POST.get('redirect_url')\n cart = request.session.get('cart', {})\n cart[item_id] = quantity\n request.session['cart'] = cart\n return redirect(redirect_url)\n\n\ndef delete_from_cart(request, item_id):\n cart = request.session.get('cart', {})\n cart.pop(item_id)\n request.session['cart'] = cart\n\n return render(request, 'cart/cart.html')","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"403255153","text":"'''\nAuthor @ Subhamoy Karmakar, Vivek Roy\nDate : 9th March, 2018\n\nThis is the Module where we are going to do the Compliance Checking\n\nInput:\nCompliance Target Name, Policy Statement, Log Store Name, Rule Frames\n\nOutput:\nIntermediate Policy Frames, Rules Frames\n'''\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nimport LogLocation as logloc\nimport ParseWindowsLogs as parsewin\nimport ComplianceCheck as compcheck\nimport sqlOperations\nimport os, math, csv\nimport LogsDBOperations as dbop\nimport CompCheckSQLOp as dbOPS\nimport logging\nimport configuration as cfg\nfrom LogMessageResource import *\nimport collectLogSQLOps as dbops\nfrom pymongo import MongoClient\nimport datetime\nimport uuid\nimport re\nimport fnmatch\nimport time\nimport RHELNormalizer as rhelnorm\n\nlog = {\n '_id': '',\n 'startdate': '',\n 'starttime': '',\n 'enddate': '',\n 'endtime': '',\n 'contextname': '',\n 'sourcehost': '',\n 'logtype': '',\n 'timeelapsed': '',\n 'source': '',\n 'severity': '',\n 'message': '',\n 'additionalFields': '',\n 'logsource': '',\n 'compstatus': '',\n 'compmessage': ''\n}\n\n\nclass complianceCheckCompliance(QWidget):\n def __init__(self):\n logging.basicConfig(\n filename=cfg.FILE_LOG_APPLICATION,\n level=logging.INFO,\n filemode='a',\n format='%(asctime)s %(levelname)s %(filename)s %(funcName)s %(message)s',\n datefmt='%Y-%m-%d %I:%M:%S %p'\n )\n super(complianceCheckCompliance, self).__init__()\n self.selectedPolicyTitles = {}\n\n # Global Variable\n self.policyStmtsList = []\n\n # contents\n self.contextLabel = QLabel('Compliance Target')\n self.contextCombo = QComboBox()\n\n self.tree = QTreeWidget()\n self.tree.setHeaderLabel(\"Selected Policy Statements\")\n self.tree.setMaximumWidth(200)\n\n self.logName = QComboBox()\n\n self.selectedPolicyStatement = QComboBox()\n\n self.progressBar = QProgressBar()\n\n self.checkComplianceButton = QPushButton('Check')\n self.checkComplianceButton.setCursor(Qt.PointingHandCursor)\n self.viewComplianceReport = QPushButton('View Report')\n self.viewComplianceReport.setCursor(Qt.PointingHandCursor)\n\n self.statementList = QTableWidget()\n self.statementList.setColumnCount(2)\n self.statementList.setHorizontalHeaderLabels(['Check compliance', 'Policy Statements'])\n self.statementList.horizontalHeader().setResizeMode(1, QHeaderView.Stretch)\n self.statementList.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.statementList.setColumnWidth(0, 150)\n\n self.complianceCheckingStatus = QLabel('')\n self.complianceCheckingStatus.setMaximumWidth(200)\n\n # listners\n self.selectedPolicyStatement.currentIndexChanged.connect(self.updatePolicyStatements)\n self.statementList.itemClicked.connect(self.handlePolicyAdd)\n self.contextCombo.currentIndexChanged.connect(self.updateLogNames)\n self.checkComplianceButton.clicked.connect(self.checkCompliance)\n self.viewComplianceReport.clicked.connect(self.viewReportSummary)\n self.logName.currentIndexChanged.connect(self.logStoreSelected)\n\n # layout\n hLayout = QHBoxLayout()\n hLayout.addWidget(self.tree)\n gridLayout = QGridLayout()\n\n policyStatementLabel = QLabel('Policy Id')\n policyStatementLabel.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))\n gridLayout.addWidget(policyStatementLabel, 0, 0)\n gridLayout.addWidget(self.selectedPolicyStatement, 0, 1)\n\n logStoreLabel = QLabel('Log store')\n gridLayout.addWidget(self.contextLabel, 1, 0)\n gridLayout.addWidget(self.contextCombo, 1, 1)\n logStoreLabel.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))\n\n # self.btnAddLogStore = QPushButton('Select Log Store...')\n gridLayout.addWidget(logStoreLabel, 2, 0)\n gridLayout.addWidget(self.logName, 2, 1)\n\n gridLayout.addWidget(self.statementList, 3, 0, 1, 2)\n\n complianceButtonHLayout = QHBoxLayout()\n complianceButtonHLayout.addStretch()\n # TODO : Add my prigress bar here\n # complianceButtonHLayout.addWidget(self.complianceCheckingStatus)\n complianceButtonHLayout.addWidget(self.progressBar)\n complianceButtonHLayout.addWidget(self.checkComplianceButton)\n complianceButtonHLayout.addWidget(self.viewComplianceReport)\n\n gridLayout.addLayout(complianceButtonHLayout, 4, 0, 1, 2)\n labelsHLayout = QHBoxLayout()\n gridLayout.addLayout(labelsHLayout, 5, 0, 1, 2)\n hLayout.addLayout(gridLayout)\n self.setLayout(hLayout)\n\n # Gets the log entries list as per compliance target selected\n def updateLogNames(self):\n storeNames = dbOPS.getContextLogsList(str(self.contextCombo.currentText()))\n if str(self.contextCombo.currentText()).__len__() > 0:\n storeNames.append('ADD NEW LOG STORE...')\n self.logName.clear()\n self.logName.addItems(storeNames)\n\n def logStoreSelected(self):\n if self.logName.currentText() == 'ADD NEW LOG STORE...':\n dialog = QDialog()\n dialog.setWindowTitle('Input Logs')\n dialog.resize(1000, 600)\n vLayout = self.getExternalLogs()\n dialog.setLayout(vLayout)\n dialog.exec_()\n self.refreshContent()\n\n def disableColumnList(self):\n self.startdateCol.setEnabled(False)\n self.starttimeCol.setEnabled(False)\n self.enddateCol.setEnabled(False)\n self.endtimeCol.setEnabled(False)\n self.severityCol.setEnabled(False)\n self.sourceCol.setEnabled(False)\n self.logSourceCol.setEnabled(False)\n self.logtypeCol.setEnabled(False)\n self.timeelapsedCol.setEnabled(False)\n self.messageCol.setEnabled(False)\n self.sourcehostCol.setEnabled(False)\n\n def enableColumnList(self):\n self.startdateCol.setEnabled(True)\n self.starttimeCol.setEnabled(True)\n self.enddateCol.setEnabled(True)\n self.endtimeCol.setEnabled(True)\n self.severityCol.setEnabled(True)\n self.sourceCol.setEnabled(True)\n self.logSourceCol.setEnabled(True)\n self.logtypeCol.setEnabled(True)\n self.timeelapsedCol.setEnabled(True)\n self.messageCol.setEnabled(True)\n self.sourcehostCol.setEnabled(True)\n\n def populateContextNames(self):\n names = dbop.getContextListDetails()\n self.contextName.clear()\n self.contextName.addItem('')\n for name in names:\n self.contextName.addItem(str(name[0]))\n\n def populateContextVersions(self):\n name = str(self.contextName.currentText())\n versions = sqlOperations.getContextVersions(name)\n self.contextVersionList.clear()\n for ver in versions:\n self.contextVersionList.addItem(str(ver))\n\n def populateLogHistory(self):\n result = sqlOperations.getLogsHistory()\n self.table.setRowCount(0)\n if (len(result) < 1):\n return\n for row in result:\n self.table.insertRow(self.table.rowCount())\n self.table.setItem(self.table.rowCount() - 1, 0, QTableWidgetItem(row[0]))\n self.table.setItem(self.table.rowCount() - 1, 1, QTableWidgetItem(row[1]))\n self.table.setItem(self.table.rowCount() - 1, 2, QTableWidgetItem(row[2]))\n self.table.setItem(self.table.rowCount() - 1, 3, QTableWidgetItem(row[3]))\n self.table.setItem(self.table.rowCount() - 1, 4, QTableWidgetItem(row[4]))\n\n def populateContextEntries(self):\n self.contextEntry.clear()\n if str(self.contextName.currentText()) != '':\n entries = dbop.getContextDetails(str(self.contextName.currentText()))\n for entry in entries:\n self.contextEntry.addItem(entry)\n\n # Gets the Policy Statements as Per policy title selected\n def updatePolicies(self):\n ids = sqlOperations.getPolicyIDs()\n self.selectedPolicyStatement.clear()\n for ID in ids:\n self.selectedPolicyStatement.addItem(\n str(ID) + \" - \" + sqlOperations.viewPolicyTitle(str(ID)))\n\n # Lists out the policy statements as a list\n def updatePolicyStatements(self):\n string = str(self.selectedPolicyStatement.currentText())\n if (string == '' or string == ' - '):\n self.statementList.setRowCount(0)\n return\n id = int(string[:string.find(' - ')])\n self.statementList.setRowCount(0)\n (title, policies) = sqlOperations.viewPolicy(id)\n for policy, loggen in policies:\n if (loggen):\n self.statementList.insertRow(self.statementList.rowCount())\n self.statementList.setItem(\n self.statementList.rowCount() - 1, 1,\n QTableWidgetItem(policy))\n chkBoxItem = QTableWidgetItem()\n chkBoxItem.setFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled)\n if string in self.selectedPolicyTitles:\n if policy in self.selectedPolicyTitles[string]:\n chkBoxItem.setCheckState(Qt.Checked)\n else:\n chkBoxItem.setCheckState(Qt.Unchecked)\n else:\n chkBoxItem.setCheckState(Qt.Unchecked)\n self.statementList.setItem(\n self.statementList.rowCount() - 1, 0, chkBoxItem)\n self.statementList.resizeRowsToContents()\n\n def handlePolicyAdd(self, item):\n policyStmt = str(self.statementList.item(item.row(), 1).text())\n policyTitle = str(self.selectedPolicyStatement.currentText())\n\n if item.checkState() == Qt.Checked:\n if policyTitle in self.selectedPolicyTitles:\n self.selectedPolicyTitles[policyTitle].append(policyStmt)\n else:\n self.selectedPolicyTitles[policyTitle] = [policyStmt]\n self.populateSidebar()\n else:\n if (policyTitle in self.selectedPolicyTitles) and (policyStmt in self.selectedPolicyTitles[policyTitle]):\n self.selectedPolicyTitles[policyTitle].remove(policyStmt)\n self.populateSidebar()\n\n # Populate the side bar with the policy statements selected.\n def populateSidebar(self):\n self.clearTree()\n del self.policyStmtsList[:]\n for key, value in self.selectedPolicyTitles.iteritems():\n if len(value) > 0:\n element = QTreeWidgetItem(self.tree.invisibleRootItem(), [key])\n for stmt in value:\n QTreeWidgetItem(element, [stmt])\n self.policyStmtsList.append(str(stmt))\n element.setExpanded(True)\n\n # Clear the side bar.\n def clearTree(self):\n while (self.tree.invisibleRootItem().childCount() > 0):\n self.tree.invisibleRootItem().removeChild(self.tree.invisibleRootItem().child(0))\n\n def refreshContent(self):\n self.contextCombo.clear()\n contextList = dbOPS.getUniqueContextList()\n s = ''\n for i in range(len(contextList)):\n s = str(contextList[i])\n self.contextCombo.addItem(s)\n self.updatePolicies()\n\n # Traverse the nodes in the statement tree shown in the side bar\n def traverseNode(self, item):\n node = []\n node.append(item)\n for i in range(0, item.childCount()):\n node.extend(self.traverseNode(item.child(i)))\n return node\n\n # View the Report Summary of a particular compliance target in a new window\n def viewReportSummary(self):\n if len(str(self.contextCombo.currentText())) > 0:\n complyPopup = QDialog()\n complyPopup.setWindowTitle('Report Summary')\n complyPopup.resize(900, 700)\n\n # Widgets\n self.filteredResults = QTableWidget()\n checkBoxGroup = QGroupBox()\n self.allResultCheck = QRadioButton('All')\n self.compResultCheck = QRadioButton('Compliant')\n self.nonCompResultCheck = QRadioButton('Non-Compliant')\n self.pnCompResultCheck = QRadioButton('Potential Non-Compliant')\n\n self.btnSeeDetails = QPushButton('View Details')\n self.btnSeeDetails.setCursor(Qt.PointingHandCursor)\n self.reportIdCombo = QComboBox()\n self.reportIdCombo.clear()\n self.reportIdCombo.addItems(dbOPS.getComplianceReportList(str(self.contextCombo.currentText())))\n\n # Widget Prooerties\n self.allResultCheck.setChecked(True)\n\n checkBoxHLayout = QHBoxLayout()\n checkBoxHLayout.addWidget(self.allResultCheck)\n checkBoxHLayout.addWidget(self.compResultCheck)\n checkBoxHLayout.addWidget(self.nonCompResultCheck)\n checkBoxHLayout.addWidget(self.pnCompResultCheck)\n checkBoxHLayout.setStretch(3, 3)\n checkBoxGroup.setLayout(checkBoxHLayout)\n\n # Listeners\n self.allResultCheck.toggled.connect(self.getAllCompliance)\n self.compResultCheck.toggled.connect(self.getCompliance)\n self.nonCompResultCheck.toggled.connect(self.getNonCompliance)\n self.pnCompResultCheck.toggled.connect(self.getPotentialNonCompliance)\n self.reportIdCombo.currentIndexChanged.connect(self.getAllCompliance)\n self.btnSeeDetails.clicked.connect(self.showReportLogDetial)\n\n # Widget Properties\n self.filteredResults.insertColumn(0)\n self.filteredResults.insertColumn(1)\n self.filteredResults.insertColumn(2)\n self.filteredResults.insertColumn(3)\n self.filteredResults.insertColumn(4)\n self.filteredResults.insertColumn(5)\n self.filteredResults.insertColumn(6)\n self.filteredResults.setHorizontalHeaderLabels(\n ['Log Id', 'Compliance Status', 'Policy-ID', 'Device ID', 'IP', 'Site', 'Message']\n )\n self.filteredResults.setColumnWidth(1, 120)\n self.filteredResults.horizontalHeader().setStretchLastSection(True)\n self.filteredResults.setEditTriggers(QAbstractItemView.NoEditTriggers)\n\n # Enter Compliance Data into table\n self.getAllCompliance()\n\n vLayout = QVBoxLayout()\n vLayout.addWidget(QLabel('Compliance Target Name : %s' % QString(self.contextCombo.currentText())))\n vLayout.addWidget(QLabel('Report IDs :'))\n vLayout.addWidget(self.reportIdCombo)\n vLayout.addWidget(checkBoxGroup)\n vLayout.addWidget(self.filteredResults)\n vLayout.addWidget(self.btnSeeDetails)\n complyPopup.setLayout(vLayout)\n complyPopup.exec_()\n else:\n msg = QMessageBox()\n msg.setWindowTitle('Error')\n msg.setText('Please select a valid Compliance Target to View its Report')\n msg.setIcon(QMessageBox.Critical)\n msg.exec_()\n\n # Gets the list of all Compliance Report for a particular report\n def getAllCompliance(self):\n a = 0\n b = 0\n c = 0\n if self.allResultCheck.isChecked():\n self.compResultCheck.setChecked(False)\n self.nonCompResultCheck.setChecked(False)\n self.filteredResults.clear()\n self.filteredResults.setHorizontalHeaderLabels(\n ['Log Id', 'Compliance Status', 'Policy-ID', 'Device ID', 'IP', 'Site', 'Message']\n )\n reportId = str(self.reportIdCombo.currentText())\n self.filteredResults.setRowCount(500)\n if len(reportId) > 0:\n i = 0\n con = MongoClient()\n db = con.logstore\n\n logIds = db.complaince_log.aggregate([{'$group': {'_id': '$logId'}}])\n for ids in logIds:\n _ID = str(ids['_id'])\n\n cur = db.complaince_log.find({'report-id': str(reportId), 'logId': str(_ID)}).limit(1)\n\n for d in cur:\n deviceId = dbOPS.getEntryName(str(self.contextCombo.currentText()).lower(), str(d['logsource'])) # entry name\n ip = dbOPS.getIP(str(self.contextCombo.currentText()).lower(), deviceId)\n site = dbOPS.getSite(str(self.contextCombo.currentText()).lower(), deviceId)\n if str(d['compmessage']) != '':\n self.filteredResults.setItem(i, 0, QTableWidgetItem(str(d['logId'])))\n self.filteredResults.setItem(i, 1, QTableWidgetItem(str(d['compstatus'][0])))\n self.filteredResults.setItem(i, 2, QTableWidgetItem(str(d['policyId'])))\n self.filteredResults.setItem(i, 3, QTableWidgetItem(str(deviceId)))\n self.filteredResults.setItem(i, 4, QTableWidgetItem(str(ip)))\n self.filteredResults.setItem(i, 5, QTableWidgetItem(str(site)))\n strMSG = str(d['compmessage'])\n self.filteredResults.setItem(i, 6, QTableWidgetItem(strMSG))\n i = i + 1\n if str(d['compstatus'][0]) == 'C':\n a = a + 1\n elif str(d['compstatus'][0]) == 'NC':\n b = b + 1\n elif str(d['compstatus'][0]) == 'PNC':\n c = c + 1\n\n self.allResultCheck.setText('All : ' + str(i))\n self.compResultCheck.setText('Compliant : ' + str(a))\n self.nonCompResultCheck.setText('Non-Compliant : ' + str(b))\n self.pnCompResultCheck.setText('Potential Non-Compliant : ' + str(c))\n\n\n # Gets the list of complied Compliance Report for a particular report\n def getCompliance(self):\n if self.compResultCheck.isChecked():\n self.nonCompResultCheck.setChecked(False)\n self.allResultCheck.setChecked(False)\n self.pnCompResultCheck.setChecked(False)\n self.filteredResults.clear()\n self.filteredResults.setHorizontalHeaderLabels(\n ['Log Id', 'Compliance Status', 'Policy-ID', 'Device ID', 'IP', 'Site', 'Message']\n )\n reportId = str(self.reportIdCombo.currentText())\n self.filteredResults.setRowCount(500)\n if len(reportId) > 0:\n i = 0\n con = MongoClient()\n db = con.logstore\n\n logIds = db.complaince_log.aggregate([{'$group': {'_id': '$logId'}}])\n for ids in logIds:\n _ID = str(ids['_id'])\n\n cur = db.complaince_log.find(\n {'report-id': str(reportId), 'logId': str(_ID), 'compstatus': 'C'}).limit(1)\n\n for d in cur:\n deviceId = dbOPS.getEntryName(str(self.contextCombo.currentText()).lower(), str(d['logsource'])) # entry name\n ip = dbOPS.getIP(str(self.contextCombo.currentText()).lower(), deviceId)\n site = dbOPS.getSite(str(self.contextCombo.currentText()).lower(), deviceId)\n if str(d['compmessage']) != '':\n self.filteredResults.setItem(i, 0, QTableWidgetItem(str(d['logId'])))\n self.filteredResults.setItem(i, 1, QTableWidgetItem(str(d['compstatus'][0])))\n self.filteredResults.setItem(i, 2, QTableWidgetItem(str(d['policyId'])))\n self.filteredResults.setItem(i, 3, QTableWidgetItem(str(deviceId)))\n self.filteredResults.setItem(i, 4, QTableWidgetItem(str(ip)))\n self.filteredResults.setItem(i, 5, QTableWidgetItem(str(site)))\n strMSG = str(d['compmessage'])\n strMSG = strMSG[:strMSG.index(':')]\n self.filteredResults.setItem(i, 6, QTableWidgetItem(strMSG))\n i = i + 1\n\n # Gets the list of non-complied Compliance Report for a particular report\n def getNonCompliance(self):\n if self.nonCompResultCheck.isChecked():\n self.allResultCheck.setChecked(False)\n self.compResultCheck.setChecked(False)\n self.pnCompResultCheck.setChecked(False)\n self.filteredResults.clear()\n self.filteredResults.setHorizontalHeaderLabels(\n ['Log Id', 'Compliance Status', 'Policy-ID', 'Device ID', 'IP', 'Site', 'Message']\n )\n reportId = str(self.reportIdCombo.currentText())\n if len(reportId) > 0:\n i = 0\n con = MongoClient()\n db = con.logstore\n\n logIds = db.complaince_log.aggregate([{'$group': {'_id': '$logId'}}])\n for ids in logIds:\n _ID = str(ids['_id'])\n\n cur = db.complaince_log.find(\n {'report-id': str(reportId), 'logId': str(_ID), 'compstatus': 'NC'}).limit(1)\n\n for d in cur:\n deviceId = dbOPS.getEntryName(str(self.contextCombo.currentText()).lower(), str(d['logsource'])) # contextId\n ip = dbOPS.getIP(str(self.contextCombo.currentText()), deviceId)\n site = dbOPS.getSite(str(self.contextCombo.currentText()), deviceId)\n if str(d['compmessage']) != '':\n self.filteredResults.setItem(i, 0, QTableWidgetItem(str(d['logId'])))\n self.filteredResults.setItem(i, 1, QTableWidgetItem(str(d['compstatus'][0])))\n self.filteredResults.setItem(i, 2, QTableWidgetItem(str(d['policyId'])))\n self.filteredResults.setItem(i, 3, QTableWidgetItem(str(deviceId)))\n self.filteredResults.setItem(i, 4, QTableWidgetItem(str(ip)))\n self.filteredResults.setItem(i, 5, QTableWidgetItem(str(site)))\n strMSG = str(d['compmessage'])\n self.filteredResults.setItem(i, 6, QTableWidgetItem(strMSG))\n i = i + 1\n\n def getPotentialNonCompliance(self):\n if self.pnCompResultCheck.isChecked():\n self.allResultCheck.setChecked(False)\n self.compResultCheck.setChecked(False)\n self.nonCompResultCheck.setChecked(False)\n self.filteredResults.clear()\n self.filteredResults.setHorizontalHeaderLabels(\n ['Log Id', 'Compliance Status', 'Policy-ID', 'Device ID', 'IP', 'Site', 'Message']\n )\n reportId = str(self.reportIdCombo.currentText())\n if len(reportId) > 0:\n i = 0\n con = MongoClient()\n db = con.logstore\n\n logIds = db.complaince_log.aggregate([{'$group': {'_id': '$logId'}}])\n for ids in logIds:\n _ID = str(ids['_id'])\n\n cur = db.complaince_log.find(\n {'report-id': str(reportId), 'logId': str(_ID), 'compstatus': 'PNC'}).limit(1)\n\n for d in cur:\n deviceId = dbOPS.getEntryName(str(self.contextCombo.currentText()).lower(), str(d['logsource'])) # contextId\n ip = dbOPS.getIP(str(self.contextCombo.currentText()), deviceId)\n site = dbOPS.getSite(str(self.contextCombo.currentText()), deviceId)\n if str(d['compmessage']) != '':\n self.filteredResults.setItem(i, 0, QTableWidgetItem(str(d['logId'])))\n self.filteredResults.setItem(i, 1, QTableWidgetItem(str(d['compstatus'][0])))\n self.filteredResults.setItem(i, 2, QTableWidgetItem(str(d['policyId'])))\n self.filteredResults.setItem(i, 3, QTableWidgetItem(str(deviceId)))\n self.filteredResults.setItem(i, 4, QTableWidgetItem(str(ip)))\n self.filteredResults.setItem(i, 5, QTableWidgetItem(str(site)))\n strMSG = str(d['compmessage'])\n self.filteredResults.setItem(i, 6, QTableWidgetItem(strMSG))\n i = i + 1\n\n # On clicking the show details the Log details are shown\n def showReportLogDetial(self):\n con = MongoClient()\n db = con.logstore\n r = self.filteredResults.currentRow()\n if r >= 0:\n logId = str(self.filteredResults.item(r, 0).text())\n logPopup = QDialog()\n logPopup.setWindowTitle('Log Details')\n logPopup.resize(600, 150)\n logPopup.setMaximumWidth(600)\n curReport = db.complaince_log.find({\n 'logId': logId,\n 'report-id' : str(self.reportIdCombo.currentText())\n })\n platf = ''\n compMsg = ''\n compStat = ''\n curLog = []\n for c in curReport:\n platf = c['platform']\n compMsg = c['compmessage']\n compStat = c['compstatus']\n\n platf = str(platf).lower()\n if platf in ['ubuntu', 'rhel', 'linux']:\n curLog = db.tempLogs.find({\n 'log_id': logId\n })\n elif platf == 'windows':\n curLog = db.tempLogsW.find({\n 'log_id': logId\n })\n\n temp = ''\n for c in curLog:\n temp = c['message']\n\n if str(compStat[0]) == 'C':\n compS = 'Compliant'\n elif str(compStat[0]) == 'NC':\n compS = 'Non-Compliant'\n elif str(compStat[0]) == 'PNC':\n compS = 'Potential Non-Compliant'\n\n vLayout = QVBoxLayout()\n vLayout.addWidget(QLabel('Status :: ' + compS))\n vLayout.addWidget(QLabel('Logs Message :: ' + str(temp)))\n vLayout.addWidget(QLabel('Comp Message :: ' + str(compMsg)))\n vLayout.addStretch(1)\n logPopup.setLayout(vLayout)\n\n logPopup.exec_()\n else:\n msg = QMessageBox()\n msg.setWindowTitle('Error')\n msg.setText('Please select a report row to continue.')\n msg.setIcon(QMessageBox.Critical)\n msg.exec_()\n\n # This function checks the compliance and stores the report data in Mongo and its summary in mysql\n # @input Rule Frames\n # @output Compliance Report\n def checkCompliance(self):\n checkComplianceReportId = str(uuid.uuid4())[:8] + '_' + str(datetime.datetime.now().date())\n conName = str(self.contextCombo.currentText())\n # Compliance target name\n conName = conName.replace('_cv', '-cfv').replace('_v', '-ctv')\n # CT Entry Name\n entryName = dbOPS.getEntryName(str(self.logName.currentText()), str(self.contextCombo.currentText()))\n # Log Store Platform\n platform = dbop.getLogPlatform(str(self.logName.currentText()), str(self.contextCombo.currentText()))\n\n # platform details\n platformDetails = dbOPS.getPlatformDetails(str(self.contextCombo.currentText()), platform, entryName)\n\n for stm in self.policyStmtsList:\n s = str(stm)\n policyId = dbop.getPolicyId(s)\n ruleDirXMLList = []\n files = []\n for oss in platformDetails:\n dirTemp = 'interm-caseframe/' + conName + '/' + entryName + '/' + oss + '/'\n allFileList = []\n pattern = '*.xml'\n for path, dirs, files in os.walk(os.path.abspath(dirTemp)):\n for filename in fnmatch.filter(files, pattern):\n allFileList.append(os.path.join(path, filename))\n\n files = []\n for fileList in allFileList:\n if fileList.__contains__('/' + policyId) and fileList.__contains__('rule'):\n files.append(fileList)\n\n if len(files) == 0:\n msg = QMessageBox()\n msg.setWindowTitle('Error')\n msg.setText('The rule for the policy ID :: ' + str(policyId) + ' does not Exist please enforce the policy for the compliance target and then retry for this policy')\n logging.error(MESSAGE['0006'])\n msg.exec_()\n elif len(files) > 0:\n logsource = str(self.logName.currentText())\n logging.info(MESSAGE['0043'])\n self.progressBar.setValue(0)\n self.complianceCheckingStatus.setText('Compliance Checking in Progress...')\n self.complianceCheckingStatus.setMinimumWidth(200)\n self.checkComplianceButton.setEnabled(False)\n self.viewComplianceReport.setEnabled(False)\n # This function checks the compliance\n start_time = time.time()\n self.compCheckStartTime = datetime.datetime.now().time()\n compcheck.checkComplaince(s, logsource, conName, platform, checkComplianceReportId, files, entryName, self.progressBar)\n elapsed_time = time.time() - start_time\n self.compCheckEndTime = datetime.datetime.now().time()\n self.progressBar.setValue(100)\n self.complianceCheckingStatus.setText('Compliance Checking Completed...')\n\n dbOPS.insertReportSummary(checkComplianceReportId, conName, logsource)\n msg = QMessageBox()\n self.complianceCheckingStatus.setText('')\n msg.setText('Your Compliance Report has been Saved with the Name : \\n%s \\nStart Time - %s hrs\\nEnd Time - %s hrs\\nElapsed Time - %s seconds'\n % (checkComplianceReportId, self.compCheckStartTime, self.compCheckEndTime, elapsed_time))\n logging.info(MESSAGE['0005'] + checkComplianceReportId)\n msg.setIcon(QMessageBox.Information)\n msg.exec_()\n self.checkComplianceButton.setEnabled(True)\n self.viewComplianceReport.setEnabled(True)\n self.complianceCheckingStatus.setText('')\n\n def getExternalLogs(self):\n self.progressBar1 = QProgressBar()\n self.progressBar1.setValue(0)\n self.insertLogLabel = QLabel('')\n\n self.logFile = QLineEdit()\n self.logFile.setReadOnly(True)\n browseButton = QPushButton('Browse')\n self.contextName = QComboBox()\n self.contextEntry = QComboBox()\n self.table = QTableWidget()\n self.storeName = QLineEdit()\n submitButton = QPushButton('Submit')\n self.contextVersionList = QComboBox()\n\n self.logPlatform = QComboBox()\n self.logPlatform.addItems(['', 'Windows', 'Ubuntu', 'RHEL'])\n\n self.table.insertColumn(0)\n self.table.insertColumn(1)\n self.table.insertColumn(2)\n self.table.insertColumn(3)\n self.table.insertColumn(4)\n self.table.setHorizontalHeaderLabels(\n ['Date', 'Time', 'Compliance Target', 'Compliance Target Entry', 'Store Name'])\n self.table.horizontalHeader().setStretchLastSection(True)\n self.table.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.table.setColumnWidth(2, 120)\n self.table.setColumnWidth(3, 120)\n\n # listeners\n browseButton.clicked.connect(self.browseClick)\n self.contextName.currentIndexChanged.connect(self.contextChanged)\n self.contextVersionList.currentIndexChanged.connect(self.populateContextEntries)\n submitButton.clicked.connect(self.submitClick)\n self.logPlatform.currentIndexChanged.connect(self.changeLogTypeList)\n\n # layout\n vLayout = QVBoxLayout()\n gridLayout = QGridLayout()\n gridLayout.addWidget(QLabel('Compliance Target'), 1, 0)\n gridLayout.addWidget(self.contextName, 1, 1)\n gridLayout.addWidget(QLabel('Platform:'), 1, 2)\n gridLayout.addWidget(QLabel('Compliance Target entry'), 2, 0)\n gridLayout.addWidget(self.contextEntry, 2, 1)\n gridLayout.addWidget(self.logPlatform, 2, 2)\n gridLayout.addWidget(QLabel('Input File'), 3, 0)\n gridLayout.addWidget(self.logFile, 3, 1)\n gridLayout.addWidget(browseButton, 3, 2)\n gridLayout.addWidget(QLabel('Store Name'), 4, 0)\n gridLayout.addWidget(self.storeName, 4, 1)\n gridLayout.addWidget(submitButton, 4, 2)\n\n # Add the sequence\n contentHLayout = QHBoxLayout()\n contentHLayout.addWidget(self.table)\n # contentHLayout.addLayout(logColVLayout)\n contentHLayout.addLayout(self.drawLogColSelect())\n logTypeList = QComboBox()\n logTypeList.setMinimumHeight(30)\n logTypeList.addItems(['', 'authentication', 'history', 'syslog'])\n self.lst = [\n 'startdate', 'starttime', 'enddate', 'endtime',\n 'CTname', 'severity', 'source', 'logsource', 'logtype',\n 'timeelapsed', 'message', 'additionalFields', 'sourcehost'\n ]\n gridLayout.addLayout(contentHLayout, 5, 0, 1, 3)\n\n gridLayout.addWidget(self.insertLogLabel, 6, 0)\n gridLayout.addWidget(self.progressBar1, 6, 1)\n\n vLayout.addLayout(gridLayout)\n\n return vLayout\n\n # UI for Custom log Column number GRID\n def drawLogColSelect(self):\n qGrid = QGridLayout()\n lbl = QLabel('Log Source')\n lbl.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n qGrid.addWidget(lbl, 0, 0)\n self.logTypeCombo = QComboBox()\n qGrid.addWidget(self.logTypeCombo, 1, 0, 1, 2)\n\n qGrid.addWidget(QLabel('Start Date'), 2, 0)\n self.startdateCol = QLineEdit()\n self.startdateCol.setText('0')\n self.startdateCol.setMaximumHeight(25)\n self.startdateCol.setMaximumWidth(25)\n qGrid.addWidget(self.startdateCol, 2, 1)\n\n qGrid.addWidget(QLabel('Start Time'), 3, 0)\n self.starttimeCol = QLineEdit()\n self.starttimeCol.setText('0')\n self.starttimeCol.setMaximumHeight(25)\n self.starttimeCol.setMaximumWidth(25)\n qGrid.addWidget(self.starttimeCol, 3, 1)\n\n qGrid.addWidget(QLabel('End Date'), 4, 0)\n self.enddateCol = QLineEdit()\n self.enddateCol.setText('0')\n self.enddateCol.setMaximumHeight(25)\n self.enddateCol.setMaximumWidth(25)\n qGrid.addWidget(self.enddateCol, 4, 1)\n\n qGrid.addWidget(QLabel('End Time'), 5, 0)\n self.endtimeCol = QLineEdit()\n self.endtimeCol.setText('0')\n self.endtimeCol.setMaximumHeight(25)\n self.endtimeCol.setMaximumWidth(25)\n qGrid.addWidget(self.endtimeCol, 5, 1)\n\n qGrid.addWidget(QLabel('Severity'), 6, 0)\n self.severityCol = QLineEdit()\n self.severityCol.setText('0')\n self.severityCol.setMaximumHeight(25)\n self.severityCol.setMaximumWidth(25)\n qGrid.addWidget(self.severityCol, 6, 1)\n\n qGrid.addWidget(QLabel('Source'), 7, 0)\n self.sourceCol = QLineEdit()\n self.sourceCol.setText('0')\n self.sourceCol.setMaximumHeight(25)\n self.sourceCol.setMaximumWidth(25)\n qGrid.addWidget(self.sourceCol, 7, 1)\n\n qGrid.addWidget(QLabel('Log Source'), 8, 0)\n self.logSourceCol = QLineEdit()\n self.logSourceCol.setText('0')\n self.logSourceCol.setMaximumHeight(25)\n self.logSourceCol.setMaximumWidth(25)\n qGrid.addWidget(self.logSourceCol, 8, 1)\n\n qGrid.addWidget(QLabel('Log Type'), 9, 0)\n self.logtypeCol = QLineEdit()\n self.logtypeCol.setText('0')\n self.logtypeCol.setMaximumHeight(25)\n self.logtypeCol.setMaximumWidth(25)\n qGrid.addWidget(self.logtypeCol, 9, 1)\n\n qGrid.addWidget(QLabel('Time Elapsed'), 10, 0)\n self.timeelapsedCol = QLineEdit()\n self.timeelapsedCol.setText('0')\n self.timeelapsedCol.setMaximumHeight(25)\n self.timeelapsedCol.setMaximumWidth(25)\n qGrid.addWidget(self.timeelapsedCol, 10, 1)\n\n qGrid.addWidget(QLabel('Message'), 11, 0)\n self.messageCol = QLineEdit()\n self.messageCol.setText('0')\n self.messageCol.setMaximumHeight(25)\n self.messageCol.setMaximumWidth(25)\n qGrid.addWidget(self.messageCol, 11, 1)\n\n qGrid.addWidget(QLabel('Source Host'), 12, 0)\n self.sourcehostCol = QLineEdit()\n self.sourcehostCol.setText('0')\n self.sourcehostCol.setMaximumHeight(25)\n self.sourcehostCol.setMaximumWidth(25)\n qGrid.addWidget(self.sourcehostCol, 12, 1)\n\n # Listeners\n self.logTypeCombo.currentIndexChanged.connect(self.logTypeComboChanged)\n self.refreshExternalLogCollectUI()\n return qGrid\n\n def refreshExternalLogCollectUI(self):\n self.storeName.setText('')\n self.logFile.setText('')\n self.logTypeCombo.setCurrentIndex(0)\n self.populateContextNames()\n self.populateContextVersions()\n self.populateContextEntries()\n self.populateLogHistory()\n self.disableColumnList()\n\n\n def logTypeComboChanged(self):\n type = str(self.logTypeCombo.currentText())\n if type == 'custom':\n self.enableColumnList()\n msgBox = QMessageBox()\n msgBox.setWindowTitle('Information')\n msgBox.setText(\n 'Please enter the column indexes for you log file. Please that the columns must be \\\"Space\\\" separated.')\n msgBox.setIcon(QMessageBox.Information)\n msgBox.exec_()\n else:\n self.disableColumnList()\n\n def browseClick(self):\n self.fname = QFileDialog.getOpenFileName(caption='Browse Log file')\n if (self.fname == ''):\n return\n inputFileName = str(self.fname)\n inputFileName = inputFileName[::-1]\n if inputFileName.__contains__('.'):\n inputFileName = inputFileName[:inputFileName.index('.')]\n inputFileName = inputFileName[::-1]\n regexp = re.compile('[0-9]+')\n else:\n inputFileName = 'NA'\n\n if inputFileName != 'NA':\n if str(self.logPlatform.currentText()) == '':\n msg = QMessageBox()\n msg.setWindowTitle('Error')\n msg.setIcon(QMessageBox.Critical)\n msg.setText('Please select a platform before you select a log file for Input.')\n logging.error(MESSAGE['0008'])\n msg.exec_()\n else:\n if inputFileName == 'csv' and str(self.logPlatform.currentText()).lower() == 'windows':\n self.logFile.setText(self.fname)\n elif str(self.logPlatform.currentText()).lower() in ['rhel']:\n self.logFile.setText(self.fname)\n elif inputFileName in ['log', 'txt'] and str(self.logPlatform.currentText()).lower() in ['linux', 'ubuntu', 'rhel']:\n self.logFile.setText(self.fname)\n elif regexp.search(inputFileName) and str(self.logPlatform.currentText()).lower() in ['linux', 'ubuntu', 'rhel']:\n self.logFile.setText(self.fname)\n else:\n self.logFile.clear()\n msg = QMessageBox()\n msg.setWindowTitle('Error')\n msg.setIcon(QMessageBox.Critical)\n msg.setText('Invalid Log Input type. Please click into additional information for details.')\n msg.setDetailedText(\n 'Windows : Only .csv files are allowed\\nOthers : Only .log, .txt and .[number] files are allowed')\n logging.error(MESSAGE['0009'])\n msg.exec_()\n elif inputFileName == 'NA':\n self.logFile.clear()\n msg = QMessageBox()\n msg.setWindowTitle('Error')\n msg.setIcon(QMessageBox.Critical)\n msg.setText('Invalid Log Input type. Please click into additional information for details.')\n msg.setDetailedText(\n 'Windows : Only .csv files are allowed\\nOthers : Only .log, .txt and .[number] files are allowed')\n logging.error(MESSAGE['0009'])\n msg.exec_()\n\n def contextChanged(self):\n if self.contextName.currentText() != '':\n logloc.generateDialogBox().exec_()\n self.populateContextEntries()\n\n\n def populateContextEntries(self):\n self.contextEntry.clear()\n if str(self.contextName.currentText()) != '':\n entries = dbop.getContextDetails(str(self.contextName.currentText()))\n for entry in entries:\n self.contextEntry.addItem(entry)\n\n def changeLogTypeList(self):\n plat = str(self.logPlatform.currentText()).lower()\n self.logTypeCombo.clear()\n if plat == 'windows':\n self.logTypeCombo.addItems(['', 'application', 'system', 'security', 'custom'])\n elif plat == 'ubuntu':\n self.logTypeCombo.addItems(['', 'authorization', 'history', 'syslog', 'custom'])\n elif plat == 'rhel':\n self.logTypeCombo.addItems(['', 'rhel'])\n else:\n self.logTypeCombo.addItems([''])\n\n def submitClick(self):\n okStatus = True\n title = ''\n message = ''\n if str(self.contextName.currentText()) == '':\n title = 'Error'\n message = 'Please select a context before uploading logs.'\n okStatus = False\n elif str(self.contextEntry.currentText()) == '':\n title = 'Error'\n message = 'Please select a context entry before uploading logs.'\n okStatus = False\n elif str(self.logFile.text()) == '':\n title = 'Error'\n message = 'No log files were selected.'\n okStatus = False\n elif str(self.storeName.text()) == '' or dbop.checkStoreNameValidity(str(self.storeName.text())) == False:\n title = 'Error'\n message = 'Please enter a valid store name.'\n okStatus = False\n elif str(self.logTypeCombo.currentText()) == '':\n title = 'Error'\n message = 'Please Enter a valid logtype'\n okStatus = False\n if str(self.logTypeCombo.currentText()) == 'unknown':\n status = self.checkColIndexListStatus()\n if status == False:\n okStatus = False\n title = 'Required'\n message = 'Additional Column Values are required to parse the log file.'\n\n if okStatus == False:\n msgBox = QMessageBox()\n msgBox.setWindowTitle(title)\n msgBox.setText(message)\n logging.error(message)\n msgBox.setIcon(QMessageBox.Critical)\n msgBox.exec_()\n else:\n\n reply = ''\n if str(self.logTypeCombo.currentText()).lower() == 'custom':\n self.customLogParser()\n else:\n platform = str(self.logPlatform.currentText())\n self.insertLogLabel.setText('Uploading...')\n if platform == 'Ubuntu':\n self.progressBar1.setValue(0)\n self.parseUbuntuLogsForNormalization()\n self.progressBar1.setValue(100)\n self.insertLogLabel.setText('Uploading...Done!')\n elif platform == 'Windows':\n self.progressBar1.setValue(0)\n ifile = open(self.fname, \"rb\")\n reader = csv.reader(ifile)\n resCount = sum(1 for row in reader)\n resolution = int(math.floor(resCount * 0.01))\n if resolution == 0: resolution = 1\n parsewin.parseWindowsLogs(str(self.fname), str(self.storeName.text()),\n str(self.contextName.currentText()), str(self.logTypeCombo.currentText()), self.progressBar1, resolution)\n self.progressBar1.setValue(100)\n self.insertLogLabel.setText('Uploading...Done!')\n elif platform == 'RHEL':\n self.progressBar1.setValue(0)\n self.normalizeRHEL(\n str(self.fname),\n str(self.storeName.text()),\n str(self.contextName.currentText()),\n str(self.logTypeCombo.currentText())\n )\n self.progressBar1.setValue(100)\n else:\n msgBox = QMessageBox()\n msgBox.setWindowTitle('Error')\n msgBox.setText('Please select a valid platform which the logs belong to.')\n logging.error(MESSAGE['0010'])\n msgBox.setIcon(QMessageBox.Critical)\n msgBox.setDefaultButton(QMessageBox.Ok)\n reply = msgBox.exec_()\n\n if reply != QMessageBox.Ok and str(self.logTypeCombo.currentText()) != 'custom':\n date = time.strftime(\"%d/%m/%Y\")\n currtime = time.strftime(\"%H:%M:%S\")\n contextName = str(self.contextName.currentText())\n contextEntry = str(self.contextEntry.currentText())\n storeName = str(self.storeName.text())\n\n self.progressBar1.setValue(0)\n self.insertLogLabel.setText('')\n result = sqlOperations.addToLogHistory(date, currtime, contextName, contextEntry, storeName, platform)\n msg = QMessageBox()\n msg.setWindowTitle('Success...')\n msg.setIcon(QMessageBox.Information)\n msg.setText('Log successfully uploaded.')\n msg.exec_()\n\n self.storeName.setText('')\n self.logFile.setText('')\n self.logTypeCombo.setCurrentIndex(0)\n # self.contextName.clear()\n self.contextName.setCurrentIndex(0)\n self.logPlatform.setCurrentIndex(0)\n self.contextVersionList.setCurrentIndex(0)\n\n filename = str(self.logFile.text())\n if result >= 0:\n logging.info(MESSAGE['0016'] + storeName)\n self.table.insertRow(self.table.rowCount())\n self.table.setItem(self.table.rowCount() - 1, 0, QTableWidgetItem(date))\n self.table.setItem(self.table.rowCount() - 1, 1, QTableWidgetItem(currtime))\n self.table.setItem(self.table.rowCount() - 1, 4, QTableWidgetItem(storeName))\n self.table.setItem(self.table.rowCount() - 1, 2, QTableWidgetItem(contextName))\n self.table.setItem(self.table.rowCount() - 1, 3, QTableWidgetItem(contextEntry))\n elif result == -1:\n msgBox = QMessageBox()\n msgBox.setText('Adding Log Failed. Store with same name already exists!')\n logging.error(MESSAGE['0011'])\n msgBox.setWindowTitle('Error')\n msgBox.setIcon(QMessageBox.Critical)\n msgBox.exec_()\n elif result == -2:\n msgBox = QMessageBox()\n msgBox.setText('Adding Log Failed. Please supply a unique store name!')\n logging.error(MESSAGE['0011'])\n msgBox.setWindowTitle('Error')\n msgBox.setIcon(QMessageBox.Critical)\n msgBox.exec_()\n elif result == -3:\n msgBox = QMessageBox()\n msgBox.setText('Adding Log Failed. Please select a Compliance Target name. If no Compliance Target name exists, consider adding compliance targets in the New compliance target tab of compliance target Management.')\n logging.error(MESSAGE['0012'])\n msgBox.setWindowTitle('Error')\n msgBox.setIcon(QMessageBox.Critical)\n msgBox.exec_()\n elif result == -4:\n msgBox = QMessageBox()\n msgBox.setText('Adding Log Failed. Please select a compliance target entry. If no compliance target entry exists, consider adding compliance target entries in the New compliance target tab or Update compliance target tab of compliance target Management.')\n logging.error(MESSAGE['0013'])\n msgBox.setWindowTitle('Error')\n msgBox.setIcon(QMessageBox.Critical)\n msgBox.exec_()\n elif result == -5:\n msgBox = QMessageBox()\n msgBox.setText('Adding Log Failed. Unhandled SQL error. Please contact the application developers for more help.')\n msgBox.setWindowTitle('Error')\n logging.error(MESSAGE['0014'])\n msgBox.setIcon(QMessageBox.Critical)\n msgBox.exec_()\n\n def normalizeRHEL(self, fname, storeName, contextName, logTypeCombo):\n ifile = open(fname, \"rb\")\n reader = csv.reader(ifile)\n resCount = sum(1 for row in reader)\n resolution = int(math.floor(resCount * 0.01))\n if resolution == 0: resolution = 1\n rhelnorm.normalizeRHEL(fname, storeName, contextName, logTypeCombo, self.progressBar1, resolution)\n\n def parseUbuntuLogsForNormalization(self):\n indexList = {}\n # Indexes\n indexList['startdate'] = int(self.startdateCol.text())\n indexList['starttime'] = int(self.starttimeCol.text())\n indexList['enddate'] = int(self.enddateCol.text())\n indexList['endtime'] = int(self.endtimeCol.text())\n indexList['severity'] = int(self.severityCol.text())\n indexList['source'] = int(self.sourceCol.text())\n indexList['logsource'] = int(self.logSourceCol.text())\n indexList['logtype'] = int(self.logtypeCol.text())\n indexList['timeelapsed'] = int(self.timeelapsedCol.text())\n indexList['message'] = int(self.messageCol.text())\n indexList['sourcehost'] = int(self.sourcehostCol.text())\n\n # Field Value\n startdate = ''\n starttime = ''\n enddate = ''\n endtime = ''\n contextname = str(self.contextName.currentText())\n severity = ''\n source = ''\n logsource = ''\n logtype = ''\n timeelapsed = ''\n message = ''\n additionalFields = []\n sourcehost = ''\n\n months = [\n 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'\n ]\n countNonZeroIndex = 0\n for val in indexList.values():\n if int(val) > 0:\n countNonZeroIndex = countNonZeroIndex + 1\n\n # Parser for History Logs for Ubuntu\n if str(self.logTypeCombo.currentText()) == 'history':\n filename = self.fname\n with open(filename) as f:\n lines = f.readlines()\n\n cnt = -1\n resolution = int(math.ceil(len(lines) * 0.01))\n progress = 0\n\n if resolution == 0: resolution = 1\n\n install = []\n upgrade = []\n remove = []\n userName = ''\n for line in lines:\n cnt = cnt + 1\n if cnt % resolution == 0:\n progress = progress + 1\n self.progressBar1.setValue(progress)\n\n if 'Start-Date' in line:\n line = line[0:(len(line)) - 1]\n line = line[line.index(': ') + 2:]\n startdate = line[:line.index(' ')]\n starttime = line[line.index(' ') + 2:]\n elif 'Requested-By' in line: # Requested-By: cdc (1000)\n line = str(line)\n userName = str(re.split(' ', line)[1])\n elif 'Commandline' in line:\n line = line[0:(len(line)) - 1]\n commandline = line[line.index(':') + 2:]\n elif 'Install' in line:\n line = line[0:(len(line)) - 1]\n instTemp = line[line.index(':') + 2:]\n inst = instTemp.split('),')\n for i in range(0, len(inst)):\n tmpStr = inst[i];\n tmpStr = tmpStr[0:tmpStr.index(':')]\n inst[i] = tmpStr\n install.append(inst[i])\n elif 'Upgrade' in line:\n line = line[0:(len(line)) - 1]\n tmpUp = line[line.index(':') + 2:]\n upg = tmpUp.split('), ')\n for i in range(0, len(upg)):\n upg[i] = upg[i][:upg[i].index(' ')]\n upg[i] = upg[i][:upg[i].index(':')]\n upgrade.append(upg[i])\n elif 'Remove' in line:\n line = line[0:(len(line)) - 1]\n tmpPrg = line[line.index(':') + 2:]\n upg = tmpPrg.split('), ')\n for i in range(0, len(upg)):\n upg[i] = upg[i][:upg[i].index(' ')]\n upg[i] = upg[i][:upg[i].index(':')]\n remove.append(upg[i])\n elif 'End-Date' in line:\n line = line[0:(len(line)) - 1]\n line = line[line.index(':') + 2:]\n enddate = line[0:line.index(' ')]\n endtime = line[line.index(' ') + 2:]\n additionalFields.append({'install': install})\n additionalFields.append({'upgrade': upgrade})\n additionalFields.append({'remove': remove})\n\n # Bring User name from auth.log from MongoDB\n if userName == '':\n userName = dbops.getUserNameFromAuth(startdate, starttime, commandline)\n\n\n log = {\n 'log_id': str(uuid.uuid4()),\n 'startdate': startdate,\n 'starttime': starttime,\n 'enddate': enddate,\n 'endtime': endtime,\n 'contextname': str(self.contextName.currentText()),\n 'sourcehost': '',\n 'logtype': str(self.logTypeCombo.currentText()),\n 'timeelapsed': '',\n 'source': '',\n 'severity': '',\n 'message': str(commandline + ' __ ' + userName),\n 'additionalFields': additionalFields,\n 'logsource': str(self.storeName.text()),\n 'platform': 'linux'\n\n }\n dbops.justInsert(log)\n\n else: # Parser for all other Logs except history logs\n filename = self.fname\n with open(filename) as f:\n line = f.readlines()\n\n cnt = -1\n resolution = int(math.floor(len(line) * 0.01))\n progress = 0\n if resolution == 0: resolution = 1\n\n for l in line:\n cnt = cnt + 1\n if cnt % resolution == 0:\n progress = progress + 1\n self.progressBar1.setValue(progress)\n\n inDate = l[0:4]\n l = l[5:]\n monD = l[0:7]\n month = monD[0:monD.index(' ')]\n date = (monD[monD.index(' ') + 1:]).strip(' ')\n # this is the input date format in SQL format\n indexMonth = month.index(month) + 1\n val = []\n if indexMonth <= 10:\n inDate = inDate + '-' + '0' + str(indexMonth)\n else:\n inDate = inDate + '-' + str(indexMonth)\n\n if len(date) == 1:\n inDate = inDate + '-' + '0' + str(date)\n else:\n inDate = inDate + '-' + str(date)\n val.append(inDate)\n sLine = l[7:len(l) - 1]\n # TODO:: delete this line later\n countNonZeroIndex = 3\n for c in range(1, countNonZeroIndex):\n val.append(sLine[:sLine.index(' ')])\n sLine = sLine[sLine.index(' ') + 1:]\n val.append(sLine)\n dbops.insertIntoMongo(val, str(self.contextName.currentText()), str(self.storeName.text()),\n str(self.logTypeCombo.currentText()), str(self.logPlatform.currentText())\n )\n\n def checkColIndexListStatus(self):\n status = False\n if int(self.startdateCol.text()) > 0 or int(self.starttimeCol.text()) > 0 or \\\n int(self.enddateCol.text()) > 0 or int(self.endtimeCol.text()) > 0 or \\\n int(self.severityCol.text()) > 0 or int(self.sourceCol.text()) > 0 or \\\n int(self.logSourceCol.text()) > 0 or int(self.logtypeCol.text()) > 0 or \\\n int(self.timeelapsedCol.text()) > 0 or int(self.messageCol.text()) > 0 or int(\n self.sourcehostCol.text()) > 0:\n status = True\n return status\n\n def customLogParser(self):\n startdateCol = str(self.startdateCol.text())\n starttimeCol = str(self.starttimeCol.text())\n enddateCol = str(self.enddateCol.text())\n endtimeCol = str(self.endtimeCol.text())\n severityCol = str(self.severityCol.text())\n sourceCol = str(self.sourceCol.text())\n logSourceCol = str(self.logSourceCol.text())\n logtypeCol = str(self.logtypeCol.text())\n timeelapsedCol = str(self.timeelapsedCol.text())\n messageCol = str(self.messageCol.text())\n sourceHostCol = str(self.sourcehostCol.text())\n if self.checkColIndexListStatus() == True:\n allIsNumber = True\n try:\n lst = [\n int(startdateCol), int(starttimeCol), int(enddateCol), int(endtimeCol), int(severityCol),\n int(sourceCol), int(logSourceCol), int(logtypeCol), int(timeelapsedCol), int(messageCol),\n int(sourceHostCol)\n ]\n except ValueError:\n allIsNumber = False\n msg = QMessageBox()\n msg.setWindowTitle('Error')\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\n 'Some/All the values you have entered are not Integers. Please check the numbers and try again.')\n msg.exec_()\n\n if allIsNumber == True:\n dict = {\n lst[0]: 'startdate',\n lst[1]: 'starttime',\n lst[2]: 'enddate',\n lst[3]: 'endtime',\n lst[4]: 'severity',\n lst[5]: 'source',\n lst[6]: 'logsource',\n lst[7]: 'logtype',\n lst[8]: 'timeelapsed',\n lst[9]: 'message',\n lst[10]: 'sourcehost'\n }\n nonZV = []\n for l in lst:\n if l != 0:\n nonZV.append(l)\n\n nonZV.sort()\n repeatStatus = False\n for i in range(0, len(nonZV) - 1):\n if nonZV in nonZV[i + 1:]:\n repeatStatus = True\n break\n\n if repeatStatus == True:\n msg = QMessageBox()\n msg.setWindowTitle('Error')\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\n 'Some/All the values you have entered are repeating Integers. Please check the numbers and try again.')\n msg.exec_()\n else:\n filename = self.fname\n with open(filename) as f:\n lines = f.readlines()\n for l in lines:\n log = {\n 'log_id': str(uuid.uuid4()),\n 'startdate': '',\n 'starttime': '',\n 'enddate': '',\n 'endtime': '',\n 'contextname': str(self.contextName.currentText()),\n 'sourcehost': '',\n 'logtype': str(self.logTypeCombo.currentText()),\n 'timeelapsed': '',\n 'source': '',\n 'severity': '',\n 'message': '',\n 'additionalFields': '',\n 'logsource': str(self.storeName.text()),\n 'platform': str(self.logPlatform.currentText())\n }\n logLine = str(l)\n for i in range(1, len(nonZV)):\n tmpCurrVal = logLine[:logLine.index(' ')]\n logLine = logLine[logLine.index(' ') + 1:]\n colNm = dict[i]\n log[colNm] = tmpCurrVal\n i = i + 1\n colNm = dict[i]\n log[colNm] = logLine\n dbops.justInsert(log)\n else:\n msg = QMessageBox()\n msg.setWindowTitle('Error')\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\n 'All the values for the column index you have entered are 0. Please enter atleast 1 '\n 'non-zero value to continue'\n )\n logging.error(MESSAGE['0015'])\n msg.exec_()","sub_path":"PHASE-1.1_IMPROVEMENT_PHASE/POLICOMP_TOOL/complianceCheckCompliance.py","file_name":"complianceCheckCompliance.py","file_ext":"py","file_size_in_byte":62206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"270212629","text":"#!/usr/bin/env python3\n# updated : 15-01-2019\n\nimport re\nimport sys\n\nmorseAlphabet = {\n \"A\": \".-\",\n \"B\": \"-...\",\n \"C\": \"-.-.\",\n \"D\": \"-..\",\n \"E\": \".\",\n \"F\": \"..-.\",\n \"G\": \"--.\",\n \"H\": \"....\",\n \"I\": \"..\",\n \"J\": \".---\",\n \"K\": \"-.-\",\n \"L\": \".-..\",\n \"M\": \"--\",\n \"N\": \"-.\",\n \"O\": \"---\",\n \"P\": \".--.\",\n \"Q\": \"--.-\",\n \"R\": \".-.\",\n \"S\": \"...\",\n \"T\": \"-\",\n \"U\": \"..-\",\n \"V\": \"...-\",\n \"W\": \".--\",\n \"X\": \"-..-\",\n \"Y\": \"-.--\",\n \"Z\": \"--..\",\n \" \": \" \",\n \"0\": \"-----\",\n \"1\": \".----\",\n \"2\": \"..---\",\n \"3\": \"...--\",\n \"4\": \"....-\",\n \"5\": \".....\",\n \"6\": \"-....\",\n \"7\": \"--...\",\n \"8\": \"---..\",\n \"9\": \"----.\",\n}\n\n\ndef CodeWithMorse(message):\n \"\"\"\n encode message into morse code.\n words are seperated by three spaces\n letters are seperated by one space\n \"\"\"\n\n # return value\n n_msg = \"\"\n\n try:\n for i in message:\n n_msg += morseAlphabet[i.upper()] + \" \"\n\n return n_msg\n\n except KeyError:\n print(\"only alphabet character and numbers are allowed !!!\")\n\n\ndef DecodeWithMorse(message):\n \"\"\"\n decode message with morse code\n \"\"\"\n\n # return value\n d_msg = \"\"\n\n # split message into words (list)\n # excpet words to be seperated by three space in message\n words_list = re.split(r\"\\s\\s\\s\", message, flags=0)\n\n # split words into letters (list)\n letters_list = [re.split(r\"\\s\", word, flags=0) for word in words_list]\n\n # change each letter with the MorseCode from the dict above\n for word in letters_list:\n for letter in word:\n for key in morseAlphabet:\n if morseAlphabet[key] == letter:\n letters_list[letters_list.index(word)][word.index(letter)] = key\n\n # construct the d_msg\n for word in letters_list:\n for letter in word:\n d_msg += letter\n d_msg += \" \"\n\n return d_msg\n\n\nif __name__ == \"__main__\":\n\n usage = \"usage : python3 morse_code.py -e this is message to encode\"\n usage += \"\\nusage : python3 morse_code.py -d this is message to decode\"\n\n if len(sys.argv) == 2 and sys.argv in (\"-h\", \"--help\"):\n print(usage)\n exit()\n\n if len(sys.argv) >= 3:\n option = sys.argv[1]\n message = sys.argv[2:]\n\n if option not in (\"-e\", \"-d\"):\n print(usage)\n exit()\n\n if option == \"-e\":\n message = \" \".join(message)\n print(\n \"original message : {}\\nencoded message : {}\".format(\n message, CodeWithMorse(message)\n )\n )\n else:\n message = \" \".join(message)\n print(\n \"original message : {}\\ndecoded message : {}\".format(\n message, DecodeWithMorse(message).lower()\n )\n )\n\n else:\n print(usage)\n exit()\n","sub_path":"python/morse_code.py","file_name":"morse_code.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"551755172","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nApologies, this is completely hardwired right now... Will get it fixed soonish!\r\n\r\nDE: a stochastic population based method that is useful for global optimization\r\n problems because it does not use the gradient of the problem being\r\n optimized, which means there is no need for the optimization problem to be\r\n differentiable.\r\n https://en.wikipedia.org/wiki/Differential_evolution\r\n** Storn, R and Price, K, Differential Evolution - a Simple and Efficient\r\n Heuristic for Global Optimization over Continuous Spaces, Journal of Global\r\n Optimization, 1997, 11, 341 - 359.\r\n\r\nPowell: a conjugate direction method. It performs sequential one-dimensional\r\n minimizations along each vector of the directions set, which is updated\r\n at each iteration of the main minimization loop. The function need not\r\n be differentiable, and no derivatives are taken.\r\n** Press, William H., et al. Numerical recipes. Vol. 3. Cambridge:\r\n Cambridge University Press, 1989.\r\n\r\nDA: a stochastic approach which combines the generalization of CSA\r\n (Classical Simulated Annealing) and FSA (Fast Simulated Annealing) coupled\r\n to a strategy for applying a local search on accepted locations.\r\n** Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated Annealing Algorithm\r\n and Its Application to the Thomson Model. Physics Letters A, 233, 216-220\r\n (1997).\r\n** Xiang Y, Gong XG. Efficiency of Generalized Simulated Annealing. Physical\r\n Review E, 62, 4473 (2000).\r\n\"\"\"\r\n\r\n__title__ = \"\"\r\n__author__ = \"[Manon Sabot]\"\r\n__version__ = \"1.0 (16.01.2019)\"\r\n__email__ = \"m.e.b.sabot@gmail.com\"\r\n\r\n\r\n#==============================================================================\r\n\r\nimport warnings # ignore these warnings\r\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\r\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\r\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning)\r\n\r\n# general modules\r\nimport os # check for files, paths\r\nimport sys # check for files, paths\r\nimport shutil # move files\r\nfrom itertools import groupby\r\nimport random # pick a random day for the forcings to be generated\r\nimport numpy as np # array manipulations, math operators\r\nimport pandas as pd # read/write dataframes, csv files\r\n\r\n# change the system path to load modules from TractLSM\r\nscript_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\r\nsys.path.append(os.path.abspath(os.path.join(script_dir, '..')))\r\n\r\n# own modules\r\nfrom TractLSM.Utils import get_main_dir # get the project's directory\r\nfrom TractLSM.Utils import read_csv # read in files\r\nfrom TractLSM.SPAC import net_radiation # preset it for model training\r\nfrom fit_models import NLMFIT # training functions\r\n\r\n#==============================================================================\r\n\r\ndef prep_training_N_target(ifile, ofile):\r\n\r\n # read in the input and output data\r\n df1, __ = read_csv(ifile)\r\n df2, __ = read_csv(ofile)\r\n\r\n # add the predawn soil moisture profile to the input data\r\n df1['Ps_pd'] = df1['Ps'].copy() # daily pre-dawn soil water potentials\r\n\r\n # non time-sensitive: last valid value propagated until next valid\r\n df1.fillna(method='ffill', inplace=True)\r\n\r\n # add Rnet to the input (no ET or soil albedo feedbacks, this can be done)\r\n df1['Rnet'] = net_radiation(df1)\r\n df1['scale2can'] = 1.\r\n\r\n # in mmol m-2 s-1\r\n Y = df2['gs'] * 1000.\r\n\r\n return df1, Y\r\n\r\n\r\n#==============================================================================\r\n\r\nto_fit = False\r\n\r\n# declare empty dataframe which will be used to analyse the calibrations\r\nodf = pd.DataFrame(columns=['Model', 'training', 'solver', 'BIC', 'Rank', 'p1',\r\n 'v1', 'p2', 'v2'])\r\n\r\nbase_dir = get_main_dir() # working paths\r\n\r\n# fitting files\r\nipath = os.path.join(os.path.join(os.path.join(base_dir, 'input'),\r\n 'calibrations'), 'obs_driven')\r\nopath = os.path.join(os.path.join(os.path.join(base_dir, 'output'),\r\n 'calibrations'), 'obs_driven')\r\n\r\nxfiles = sorted([e for e in os.listdir(ipath) if e.endswith('_x.csv')])[-2:]\r\nyfiles = sorted([e for e in os.listdir(ipath) if e.endswith('_y.csv')])[-2:]\r\n\r\nif to_fit:\r\n\r\n for ifile, ofile in zip(xfiles, yfiles): # loop over the files\r\n\r\n X, Y = prep_training_N_target(os.path.join(ipath, ifile),\r\n os.path.join(ipath, ofile))\r\n\r\n # where should the calibration output be stored?\r\n out = os.path.join(opath, ofile.split('_y')[0])\r\n\r\n if not os.path.isdir(out): # make new dirs if they don't exist\r\n os.makedirs(out)\r\n\r\n # use a non-linear least square minimiser to train the models\r\n for test in ['leastsq']: #['differential_evolution', 'powell', 'dual_annealing']:\r\n\r\n XX = X.copy()\r\n nlmfit = NLMFIT(method=test, store=out, inf_gb=False)\r\n\r\n #__ = nlmfit.run(XX, Y, 'Medlyn')\r\n #__ = nlmfit.run(XX, Y, 'Tuzet')\r\n #__ = nlmfit.run(XX, Y, 'Eller')\r\n\r\n #__ = nlmfit.run(XX, Y, 'SOX-OPT')\r\n #__ = nlmfit.run(XX, Y, 'CAP')\r\n #__ = nlmfit.run(XX, Y, 'MES')\r\n #__ = nlmfit.run(XX, Y, 'LeastCost')\r\n #__ = nlmfit.run(XX, Y, 'ProfitMax2')\r\n\r\n # use ProfitMax's kmax\r\n fkmax = nlmfit.run(XX, Y, 'ProfitMax')\r\n #XX['kmax'] = fkmax['kmax']\r\n\r\n #__ = nlmfit.run(XX, Y, 'WUE-LWP')\r\n #__ = nlmfit.run(XX, Y, 'CMax')\r\n #__ = nlmfit.run(XX, Y, 'CGain')\r\n\r\n exit(1)\r\n\r\nelse: # read over the calibration files and analyse these outputs\r\n opath = os.path.join(os.path.join(os.path.join(base_dir, 'output'),\r\n 'calibrations'), 'obs_driven')\r\n fname = os.path.join(opath, 'overview_of_fits.csv')\r\n\r\n if not os.path.isfile(fname):\r\n\r\n site_spp = [e[1] for e in os.walk(opath)][0] # directories\r\n\r\n for training in site_spp: # loop over the site x spp combinations\r\n\r\n for file in os.listdir(os.path.join(opath, training)):\r\n\r\n if file.endswith('.txt') and not file.endswith('zet2.txt'):\r\n f = open(os.path.join(os.path.join(opath, training), file),\r\n 'r')\r\n model = file.split('.txt')[0]\r\n lines = f.readlines()\r\n\r\n # info to keep\r\n k1 = 'fitting method'\r\n k2 = 'function evals'\r\n k3 = 'data points'\r\n k4 = 'Bayesian info crit'\r\n k5 = '%) ' # calibrated parameters\r\n k6 = '(init' # calibrated parameters\r\n k7 = '+/-' # calibrated parameters\r\n k8 = '(fixed' # calibrated parameters\r\n k9 = '==' # calibrated parameters\r\n info = [e.split('=') if (k1 in e) else\r\n [e.split('=')[1]] if ((k2 in e) or (k3 in e) or\r\n (k4 in e)) else\r\n e.split(k5)[0].split(k7)[0].split(':')\r\n if (k5 in e) else\r\n e.split(k6)[0].split(k7)[0].split(':')\r\n if (k6 in e) else\r\n e.split(k8)[0].split(':') if (k8 in e) else\r\n e.split(k9)[0].split(':') if (k9 in e) else\r\n [''] for e in lines]\r\n info = [e.strip('\\n') for sub in info for e in sub\r\n if e != '']\r\n info = [e.replace(' ', '') if (':' in e) else e.strip()\r\n for e in info ]\r\n\r\n # split into sublists containing each solver's info\r\n by_solver = [list(sub) for e, sub in\r\n groupby(info, lambda x: k1 not in x) if e]\r\n\r\n # put that info in a dataframe\r\n for solver in by_solver:\r\n\r\n # append the df row\r\n dic = {'Model': model, 'training': training,\r\n 'solver': solver[0], 'Ntotal': float(solver[1]) *\r\n float(solver[2]),\r\n 'BIC': float(solver[3]), 'p1': solver[4],\r\n 'v1': float(solver[5])}\r\n\r\n if len(solver) > 6:\r\n if model == 'SOX-OPT': # deal with the 'factor'\r\n dic['p2'] = solver[8]\r\n dic['v2'] = float(solver[9])\r\n\r\n else:\r\n dic['p2'] = solver[6]\r\n dic['v2'] = float(solver[7])\r\n\r\n odf = odf.append(dic, ignore_index=True)\r\n\r\n # add the median param info to rerank the models\r\n by = ['Model', 'training']\r\n odf['med1'] = (odf['v1'] / odf.groupby(by)['v1'].transform('median')\r\n - 1.).abs()\r\n odf['med2'] = (odf['v2'] / odf.groupby(by)['v2'].transform('median')\r\n - 1.).abs()\r\n odf['med'] = odf[['med1', 'med2']].mean(axis=1)\r\n\r\n # rank the solvers (absolute ranking)\r\n odf['Rank'] = (odf.sort_values(['BIC', 'med'])\r\n .groupby(by)[['BIC', 'med']].rank(method='first')\r\n .astype(int))\r\n\r\n # change param name for ProfitMax2 to allow differentiation\r\n odf['p1'].loc[odf['Model'] == 'ProfitMax2'] = 'kmax2'\r\n\r\n # column order\r\n columns = ['Model', 'training', 'solver', 'Rank', 'BIC', 'Ntotal', 'p1',\r\n 'v1', 'p2', 'v2']\r\n\r\n # save the overview file\r\n odf[columns].to_csv(fname, index=False, na_rep='', encoding='utf-8')\r\n\r\n else:\r\n odf = (pd.read_csv(fname, header=[0]).dropna(axis=0, how='all')\r\n .dropna(axis=1, how='all').squeeze())\r\n\r\n fname = os.path.join(opath, 'best_fit.csv')\r\n\r\n if not os.path.isfile(fname): # pick best param\r\n\r\n # check whether the calibrated parameters are stuck at boundary\r\n stuck = []\r\n nlmfit = NLMFIT()\r\n\r\n for ifile in xfiles:\r\n\r\n df, __ = read_csv(os.path.join(ipath, ifile)) # ref params\r\n sub = odf[odf['training'] == ifile.split('_x.csv')[0]]\r\n\r\n for e in np.append(sub['p1'].unique(), sub['p2'].dropna().unique()):\r\n\r\n sub1 = sub[sub['p1'] == e]\r\n sub2 = sub[sub['p2'] == e]\r\n min, max = nlmfit.param_space(e, P88=df.loc[0, 'P88'])\r\n min += 0.05 * min # above min is not stuck at bound\r\n max -= 0.05 * max # below max is not stuck at bound\r\n\r\n if len(sub1) > 0:\r\n lims = np.logical_or(sub1['v1'] < min, sub1['v1'] > max)\r\n\r\n if any(lims):\r\n stuck += sub1['v1'][lims].index.to_list()\r\n\r\n if len(sub2) > 0:\r\n lims = np.logical_or(sub2['v2'] < min, sub2['v2'] > max)\r\n\r\n if any(lims):\r\n stuck += sub2['v2'][lims].index.to_list()\r\n # boundary params\r\n if len(stuck) > 0:\r\n sub = odf[odf.index.isin(stuck)]\r\n eq = sub.groupby(['Model', 'training']).size().le(2)\r\n eq_models = eq[eq == True].index.get_level_values(0)\r\n eq_trainings = eq[eq == True].index.get_level_values(1)\r\n\r\n for i in range(len(eq_models)):\r\n\r\n where = np.logical_and(odf['Model'] == eq_models[i],\r\n odf['training'] == eq_trainings[i])\r\n sub = odf[where]\r\n\r\n # which of these values are at the boundary?\r\n if not all(sub[sub.index.isin(stuck)]['Rank'].values >\r\n sub[~sub.index.isin(stuck)]['Rank'].values):\r\n odf.loc[sub[sub.index.isin(stuck)].index, 'Rank'] = 3\r\n\r\n while all(sub.loc[~sub.index.isin(stuck), 'Rank'] > 1):\r\n odf.loc[sub[~sub.index.isin(stuck)].index, 'Rank'] -= 1\r\n sub.loc[~sub.index.isin(stuck), 'Rank'] -= 1\r\n\r\n # are there still several equal best ranks within a group?\r\n eq = odf.groupby(['Model', 'training'])['Rank'].nunique().le(2)\r\n eq_models = eq[eq == True].index.get_level_values(0)\r\n eq_trainings = eq[eq == True].index.get_level_values(1)\r\n\r\n for i in range(len(eq_models)):\r\n\r\n where = np.logical_and(odf['Model'] == eq_models[i],\r\n odf['training'] == eq_trainings[i])\r\n sub = odf[where]\r\n\r\n # if min rank duplicated, assign 1 to median params\r\n if len(sub[sub['Rank'] == sub['Rank'].min()]) > 1:\r\n odf['Rank'][where] = 3 # deal with duplicated Rank = 1\r\n idx = sub[sub['v1'] == sub['v1'].median()].index\r\n\r\n if len(idx) > 1: # if params are equal, pick fastest\r\n sub = sub.loc[idx]\r\n idx = sub[sub['Ntotal'] == sub['Ntotal'].min()].index\r\n\r\n odf.loc[idx, 'Rank'] = 1\r\n\r\n # add params to Tuzet, WUE-LWP, CGain, CMax\r\n odf['p3'] = np.nan # own kmax\r\n odf['v3'] = np.nan\r\n\r\n # specific param names on a per model basis\r\n odf['p2'].loc[odf['Model'] == 'WUE-LWP'] = 'kmaxWUE'\r\n odf['p2'].loc[odf['Model'] == 'CGain'] = 'kmaxCN'\r\n odf['p3'].loc[odf['Model'] == 'CMax'] = 'kmaxCM'\r\n\r\n for training in odf['training'].unique(): # add the param values\r\n\r\n sub = odf[odf['training'] == training]\r\n\r\n for solver in sub['solver'].unique():\r\n\r\n # own kmax\r\n value = (sub[np.logical_and(sub['solver'] == solver,\r\n sub['Model'] == 'ProfitMax')]).v1\r\n\r\n idx = (sub[np.logical_and(sub['solver'] == solver,\r\n sub['Model'].isin(['WUE-LWP',\r\n 'CGain']))]\r\n .index)\r\n odf.loc[idx, 'v2'] = float(value)\r\n\r\n idx = (sub[np.logical_and(sub['solver'] == solver,\r\n sub['Model'].isin(['CMax']))]\r\n .index)\r\n odf.loc[idx, 'v3'] = float(value)\r\n\r\n # Rank = 1 is assumed to be the best param\r\n odf = odf[odf['Rank'] == 1].drop(['Rank'], axis=1)\r\n\r\n # column order\r\n columns = ['Model', 'training', 'solver', 'BIC', 'Ntotal', 'p1',\r\n 'v1', 'p2', 'v2', 'p3', 'v3']\r\n\r\n # best calibrations\r\n odf[columns].to_csv(fname, index=False, na_rep='', encoding='utf-8')\r\n\r\n exit(1)\r\n","sub_path":"src/calibrations/calib_model_2_obs.py","file_name":"calib_model_2_obs.py","file_ext":"py","file_size_in_byte":15019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"523450226","text":"# Taken from https://www.ursinaengine.org/\nfrom ursina import Entity, Ursina, color, held_keys\n\napp = Ursina()\n\nplayer = Entity(model=\"cube\", color=color.orange, scale_y=2)\n\n\ndef update(): # update gets automatically called.\n player.x += held_keys[\"d\"] * 0.1\n player.x -= held_keys[\"a\"] * 0.1\n\n\napp.run() # opens a window and starts the game.\n","sub_path":"game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"225581205","text":"import cx_Oracle\n\nusername = 'system'\npassword = 'databon'\ndsn = 'localhost/xe'\n\nconnection = cx_Oracle.connect(username, password, dsn)\ncursor = connection.cursor()\n\n# Запит 1 - вивести аеропорти та кількість зафіксованих ними явищ\nquery = '''\nSELECT airportcode, COUNT(*) AS occured_events\nFROM workshop_queries\nGROUP BY airportcode\n'''\n\nprint('Запит 1')\ncursor.execute(query)\nrow = cursor.fetchone()\nwhile row:\n print(row)\n row = cursor.fetchone()\nprint('\\n\\n')\n\n# Запит 2 - для кожного погодного явища вивести його\n# відсоток відносно усієї кількості зафіксованих явищ\nquery = '''\nSELECT eType AS event, ROUND(COUNT(eType) * 100 / (SELECT COUNT(*) FROM Event), 2)\nAS percentage\nFROM workshop_queries\nGROUP BY eType\n'''\n\nprint('Запит 2')\ncursor.execute(query)\nrow = cursor.fetchone()\nwhile row:\n print(row)\n row = cursor.fetchone()\nprint('\\n\\n')\n\n# Запит 3 - вивести динаміку дощів по місяцях за 2016 рік\nquery = '''\nSELECT month, COUNT(*) AS times_occured\nFROM (\n SELECT EXTRACT(MONTH FROM starttime) AS month\n FROM workshop_queries\n WHERE EXTRACT(YEAR FROM starttime) = '2016' AND EXTRACT(YEAR FROM endtime) = '2016' AND TRIM(eType)='Rain'\n)\nGROUP BY month\nORDER BY month\n'''\n\nprint('Запит 3')\ncursor.execute(query)\nrow = cursor.fetchone()\nwhile row:\n print (row)\n row = cursor.fetchone()\n\ncursor.close()\nconnection.close()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"404113283","text":"#\n# @lc app=leetcode id=62 lang=python3\n#\n# [62] Unique Paths\n#\n# https://leetcode.com/problems/unique-paths/description/\n#\n# algorithms\n# Medium (49.53%)\n# Likes: 2630\n# Dislikes: 186\n# Total Accepted: 416.2K\n# Total Submissions: 803K\n# Testcase Example: '3\\n2'\n#\n# A robot is located at the top-left corner of a m x n grid (marked 'Start' in\n# the diagram below).\n# \n# The robot can only move either down or right at any point in time. The robot\n# is trying to reach the bottom-right corner of the grid (marked 'Finish' in\n# the diagram below).\n# \n# How many possible unique paths are there?\n# \n# \n# Above is a 7 x 3 grid. How many possible unique paths are there?\n# \n# \n# Example 1:\n# \n# \n# Input: m = 3, n = 2\n# Output: 3\n# Explanation:\n# From the top-left corner, there are a total of 3 ways to reach the\n# bottom-right corner:\n# 1. Right -> Right -> Down\n# 2. Right -> Down -> Right\n# 3. Down -> Right -> Right\n# \n# \n# Example 2:\n# \n# \n# Input: m = 7, n = 3\n# Output: 28\n# \n# \n# \n# Constraints:\n# \n# \n# 1 <= m, n <= 100\n# It's guaranteed that the answer will be less than or equal to 2 * 10 ^ 9.\n# \n# \n#\n\n# @lc code=start\n# class Solution:\n# def uniquePaths(self, m: int, n: int) -> int:\n# if m == 1 or n == 1:\n# return 1\n# else:\n# start = [1,1]\n# end = [m,n]\n# ans = 0\n# loop = []\n# loop.append(start)\n# while loop:\n# current = loop.pop()\n# if current == end:\n# ans += 1\n# elif current[0] == m:\n# loop.append([current[0],current[1]+1])\n# elif current[1] == n:\n# loop.append([current[0]+1,current[1]])\n# else:\n# loop.append([current[0]+1,current[1]])\n# loop.append([current[0],current[1]+1])\n# return ans\n# class Solution:\n# def uniquePaths(self, m : int, n: int) -> int:\n# if m == 1 or n == 1:\n# return 1\n# else:\n# return self.uniquePaths(m-1,n)+self.uniquePaths(m,n-1)\nclass Solution:\n def uniquePaths(self,m : int, n:int) -> int:\n if m == 1 or n == 1:\n return 1\n else:\n ans = [[1 for x in range(n)] for y in range(m)]\n for i in range(1,m):\n for j in range(1,n):\n ans[i][j] = ans[i][j-1]+ ans[i-1][j]\n return ans[m-1][n-1]\n# @lc code=end\nif __name__ == \"__main__\":\n Test = Solution()\n print(Test.uniquePaths(3,2))\n print(Test.uniquePaths(7,3))\n","sub_path":"62.unique-paths.py","file_name":"62.unique-paths.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"20094779","text":"import json\nimport re\n# from tabulate import tabulate\n# import pandas as pd\n\n# appenddata = []\naddressblank = []\ncityappend = []\nlanguageappend = []\nofficeappend = []\nphoneappend = []\nstateappend = []\nzipappend = []\naccnewpatappend = []\nfnameappend = []\nftypeappend = []\nfirstname = []\nmname = []\nlname = []\nuname = []\ngender = []\nnpi = []\npcp = []\npcpid = []\nptypeind = []\nptypefac = []\nsuid = []\nnetworks = []\ntier = []\ngroupaff = []\nhospaff = []\nspec = []\n\nfilename = 'deduped.json'\nwith open(filename, buffering=90000000) as sumdata:\n size = sum(1 for _ in sumdata)\n\nwith open(filename, buffering=90000000) as f:\n for i in f:\n data = json.loads(i)\n dicti = dict(data)\n for lines in data['addresses']:\n addstng = lines['address_string']\n # appenddata.append(a)\n if addstng == '' or None:\n addressblank.append(addstng)\n\n city = lines['city']\n if city == '' or None:\n cityappend.append(city)\n\n match = re.search('languages\\D+\\s\\[\\]', str(data))\n if match!=None:\n languageappend.append(match)\n # for names in language:\n # lang = names['name']\n officename = lines['office_name']\n if officename == '' or None:\n officeappend.append(officename)\n\n phoneval = re.search('phones\\D+\\s\\[\\]', str(data))\n if phoneval!=None:\n phoneappend.append(phoneval)\n\n state = lines['state']\n if state == '' or None:\n stateappend.append(state)\n\n zip = lines['zip']\n if zip == '' or None:\n zipappend.append(zip)\n\n anp = dicti['provider']['accepting_new_patients']\n if anp == None or '':\n accnewpatappend.append(anp)\n\n npiname = dicti['provider']['npi']\n if npiname == '' or None:\n npi.append(npiname)\n\n pcpname = dicti['provider']['pcp']\n if pcpname == None or '':\n pcp.append(pcpname)\n\n pcpidname = dicti['provider']['pcp_id']\n if pcpidname == None or '':\n pcpid.append(pcpidname)\n\n site_uid = dicti['provider']['site_uid']\n if site_uid == None or '':\n suid.append(site_uid)\n\n unpname = dicti['provider']['unparsed_name']\n if unpname == None or '':\n uname.append(unpname)\n\n protype = dicti['provider']['provider_type']\n if protype == 'facility':\n ptypefac.append(protype)\n\n facname = dicti['provider']['facility_name']\n if facname == None or '':\n fnameappend.append(facname)\n\n ftype = dicti['provider']['facility_type']\n if ftype == None or '':\n ftypeappend.append(ftype)\n\n if protype == 'individual':\n ptypeind.append(protype)\n\n finame = dicti['provider']['first_name']\n if finame == None or '':\n firstname.append(finame)\n\n laname = dicti['provider']['last_name']\n if laname == None or '':\n lname.append(laname)\n\n miname = dicti['provider']['middle_name']\n if miname == None or '':\n mname.append(miname)\n\n gen = dicti['provider']['gender']\n if gen == None or '':\n gender.append(gen)\n\n network = re.search('networks\\D+\\s\\[\\]', str(data))\n if network!= None:\n networks.append(network)\n\n for net in data['networks']:\n tiername = net['tier']\n if tiername == None or '':\n tier.append(tiername)\n\n grpaff = re.search('group_affiliations\\\"\\:\\s+\\[\\]', i)\n if grpaff!= None:\n groupaff.append(groupaff)\n\n hosaff = re.search('hospital_affiliations\\\"\\:\\s+\\[\\]', i)\n if hosaff != None:\n hospaff.append(hospaff)\n\n specialities = re.search('specialties\\\"\\:\\s+\\[\\]', i)\n if specialities != None:\n spec.append(specialities)\n\ntotalprovider = size\naddblank = len(addressblank)\ntotaladd = totalprovider-addblank\nper = (addblank/totalprovider*100)\n\ncityblank = len(cityappend)\ntotalcity = totalprovider-cityblank\npercity = (cityblank/totalprovider*100)\n\nlanguageblank = len(languageappend)\ntotallang = totalprovider-languageblank\nperlang = (languageblank/totalprovider*100)\n\nofficeblank = len(officeappend)\ntotaloffice = totalprovider-officeblank\nperoffice = (officeblank/totalprovider*100)\n\nphoneblank = len(phoneappend)\nphonetotal = totalprovider-phoneblank\nperphone = (phoneblank/totalprovider*100)\n\nstateblank = len(stateappend)\nstatetotal = totalprovider-stateblank\nperstate = (stateblank/totalprovider*100)\n\nzipblank = len(zipappend)\nziptotal = totalprovider-zipblank\nperzip = (zipblank/totalprovider*100)\n\nanpblank = len(accnewpatappend)\nanptotal = totalprovider-anpblank\nperanp = (anpblank/totalprovider*100)\n\nnpiblank = len(npi)\nnpitotal = totalprovider-npiblank\npernpi = (npiblank/totalprovider*100)\n\npcpblank = len(pcp)\npcptoral = totalprovider-pcpblank\nperpcp = (pcpblank/totalprovider*100)\n\nsuidblank = len(suid)\nsuidtotal = totalprovider-suidblank\npersuid = (suidblank/totalprovider*100)\n\nunameblank = len(uname)\nunametotal = totalprovider-unameblank\nperuname = (unameblank/totalprovider*100)\n\npindi = len(ptypeind)\nperindi = (pindi/totalprovider*100)\n\npfaci = len(ptypefac)\nperfaci = (pfaci/totalprovider*100)\n\nfacnameblank = len(fnameappend)\ntotalfname = pfaci-facnameblank\nperfacname = (facnameblank/pfaci*100)\n\nftypeblank = len(ftypeappend)\ntotalftype = pfaci-ftypeblank\nperftype = (ftypeblank/pfaci*100)\n\nfnameblank = len(firstname)\ntotalfirstname = pindi-fnameblank\nperfname = (fnameblank/pindi*100)\n\nlnameblank = len(lname)\ntotallname = pindi-lnameblank\nperlname = (lnameblank/pindi*100)\n\nmnameblank = len(mname)\ntotalmname = pindi-mnameblank\npermname = (mnameblank/pindi*100)\n\ngenderblank = len(gender)\ntotalgender = pindi-genderblank\npergender = (genderblank/pindi*100)\n\nnetworkblank = len(networks)\ntotalnetworks = totalprovider-networkblank\npernet = (networkblank/totalprovider*100)\n\ntierblank = len(tier)\ntotaltier = totalprovider\npertier = (tierblank/totalprovider*100)\n\ngrpblank = len(groupaff)\ntotalgrp = totalprovider-grpblank\npergrp = (grpblank/totalprovider*100)\n\nhosblank = len(hospaff)\ntotalhosp = totalprovider-hosblank\nperhosp = (hosblank/totalprovider*100)\n\nspecblank = len(spec)\ntotalspec = totalprovider-specblank\nperspec = (specblank/totalprovider*100)\n\n\nif per > 8:\n result = '
address_string needs to be fixed!!!
'\nelse:\n result = '
address_string looks okay!!!
'\n\nif percity > 8:\n resultcity = '
city needs to be fixed!!!
'\nelse:\n resultcity = '
city looks okay!!!
'\n\nif perlang > 8:\n resutllang = '
language needs to be fixed!!!
'\nelse:\n resutllang = '
language looks okay!!!
'\n\nif peroffice > 8:\n resultofc = '
office_name needs to be fixed!!!
'\nelse:\n resultofc = '
office_name looks okay!!!
'\n\nif perphone > 8:\n resultphone = '
phone needs to be fixed!!!
'\nelse:\n resultphone = '
phone looks okay!!!
'\n\nif perstate > 8:\n resultstate = '
state needs to be fixed!!!
'\nelse:\n resultstate = '
state looks okay!!!
'\n\nif perzip > 8:\n resultzip = '
zip needs to be fixed!!!
'\nelse:\n resultzip = '
zip looks okay!!!
'\n\nif peranp > 8:\n resultanp = '
accepting_new_patient needs to be fixed!!!
'\nelse:\n resultanp = '
accepting_new_patient looks okay!!!
'\n\nif pernpi > 8:\n resultnpi = '
npi needs to be fixed!!!
'\nelse:\n resultnpi = '
npi looks okay!!!
'\n\nif perpcp > 8:\n resultpcp = '
pcp needs to be fixed!!!
'\nelse:\n resultpcp = '
pcp looks okay!!!
'\n\nif persuid > 8:\n resultsuid = '
site_uid needs to be fixed!!!
'\nelse:\n resultsuid = '
site_uid looks okay!!!
'\n\nif peruname > 8:\n resultuname = '
Unparsed Name needs to be fixed!!!
'\nelse:\n resultuname = '
Unparsed Name looks okay!!!
'\n\nif perfacname > 8:\n resultfacname = '
Facility Name needs to be fixed!!!
'\nelse:\n resultfacname = '
Facility Name looks okay!!!
'\n\nif ftypeblank > 8:\n resultftype = '
Facility Type needs to be fixed!!!
'\nelse:\n resultftype = '
Facility Type looks okay!!!
'\n\nif perfname > 8:\n resultfname = '
First Name needs to be fixed!!!
'\nelse:\n resultfname = '
First Name looks okay!!!
'\n\nif perlname > 8:\n resultlname = '
Last Name needs to be fixed!!!
'\nelse:\n resultlname = '
Last Name looks okay!!!
'\n\nif permname > 8:\n resultmname = '
Middle Name needs to be fixed!!!
'\nelse:\n resultmname = '
Middle Name looks okay!!!
'\n\nif pergender > 8:\n resultgender = '
Gender needs to be fixed!!!
'\nelse:\n resultgender = '
Gender looks okay!!!
'\n\nif pernet > 8:\n resultnet = '
Network needs to be fixed!!!
'\nelse:\n resultnet = '
Network looks okay!!!
'\n\nif pertier > 8:\n resulttier = '
Network Tier needs to be fixed!!!
'\nelse:\n resulttier = '
Network Tier looks okay!!!
'\n\nif pergrp > 8:\n resultgrp = '
Group Affiliations need to be fixed!!!
'\nelse:\n resultgrp = '
Group Affiliations look okay!!!
'\n\nif perhosp > 8:\n resulthosp = '
Hospital Affiliations need to be fixed!!!
'\nelse:\n resulthosp = '
Hospital Affiliations look okay!!!
'\n\nif perspec > 8:\n resultspec = '
Specialties need to be fixed!!!
'\nelse:\n resultspec = '
Specialties look okay!!!
'\n\n\nhtmlcollapse = '''\n\n\nVericred Summary Report\n\n\n\n\n\n
\n

Vericred Project Report Summary

\n\n
\n

'''\n\ntotalpro = 'Total Provider: {}'.format(totalprovider)\nmissingadd = '

  • Missing Address String: {} ({}%)
  • '.format(addblank, round(per, 2), '(%)')\ntotaladdstring = '
  • Total Address String: {}
  • '.format(totaladd)\naddressstring = missingadd+totaladdstring+result\n\ncitymissing = '
  • Missing City: {} ({}%)
  • '.format(cityblank, round(percity, 2), '(%)')\ntotalcities = '
  • Total City: {}
  • '.format(totalcity)\ncities = citymissing+totalcities+resultcity\n\nlangmissing = '
  • Missing Language: {} ({}%)
  • '.format(languageblank, round(perlang, 2), '(%)')\nlangtotal = '
  • Total Language: {}
  • '.format(totallang)\nlangu = langmissing+langtotal+resutllang\n\n\nofcmissing = '
  • Missing Office: {} ({}%)
  • '.format(officeblank, round(peroffice, 2), '(%)')\nofctotal = '
  • Total Office: {}
  • '.format(totaloffice)\nofc = ofcmissing+ofctotal+resultofc\n\nphonemissing = '
  • Missing Phone: {} ({}%)
  • '.format(phoneblank, round(perphone, 2), '(%)')\nphntotal = '
  • Total Phone: {}
  • '.format(phonetotal)\nphn = phonemissing+phntotal+resultphone\n\nstatemissing = '
  • Missing State: {} ({}%)
  • '.format(stateblank, round(perstate, 2), '(%)')\nstatetotal = '
  • Total State: {}
  • '.format(statetotal)\nstateall = statemissing+statetotal+resultstate\n\nzipmissing = '
  • Missing Zip: {} ({}%)
  • '.format(zipblank, round(perzip, 2), '(%)')\nzipstotal = '
  • Total Zip: {}
  • '.format(ziptotal)\nzipall = zipmissing+zipstotal+resultzip\n\naddchart = '''
    \n\n
    '''\nallchart = addchart + str(alldata) + rem\nfinaldata = re.sub('data:\\s+\\[\\(','data: [', allchart)\nfinalchart = re.sub('\\)\\]',']', finaldata)\n\n# allchart = addchart+str(datas)+chart\nprov = '''\n
    \n
    \n

    \n'''\n\n\nanpmissing = '

  • Missing Accepting New Patient: {} ({}%)
  • '.format(anpblank, round(peranp, 2), '(%)')\nanpatotal = '
  • Total Accepting New Patient: {}
  • '.format(anptotal)\nanpall = totalpro+anpmissing+anpatotal+resultanp\n\nnpimissing = '
  • Missing NPI: {} ({}%)
  • '.format(npiblank, round(pernpi, 2), '(%)')\nnpit = '
  • Total NPI: {}
  • '.format(npitotal)\nnpiall = npimissing+npit+resultnpi\n\npcpmissing = '
  • Missing PCP: {} ({}%)
  • '.format(pcpblank, round(perpcp, 2), '(%)')\npcpt = '
  • Total PCP: {}
  • '.format(pcptoral)\npcpall = pcpmissing+pcpt+resultpcp\n\nsuidmissing = '
  • Missing Site_Uid: {} ({}%)
  • '.format(suidblank, round(persuid, 2), '(%)')\nsuidt = '
  • Total Site_Uid: {}
  • '.format(suidtotal)\nsuidtotal = suidmissing+suidt+resultsuid\n\nunamemissing = '
  • Missing Unparsed Name: {} ({}%)
  • '.format(unameblank, round(peruname, 2), '(%)')\nunamet = '
  • Total Site_Uid: {}
  • '.format(unametotal)\nupnametotal = unamemissing+unamet+resultuname\n\ntotalindi = 'Total Individual Count: {} ({}%)
    '.format(pindi, round(perindi, 2), '(%)')\ntotalfac = 'Total Facility Count: {} ({}%)

    '.format(pfaci, round(perfaci, 2), '(%)')\nprovider = totalindi+totalfac\n\nfacnamemissing = '
  • Missing Facility Name: {} ({}%)
  • '.format(facnameblank, round(perfacname, 2), '(%)')\nfacnametotal = '
  • Total Facility Name: {}
  • '.format(totalfname)\nfactotal = facnamemissing+facnametotal+resultfacname\n\nftypemissing = '
  • Missing Facility Type: {} ({}%)
  • '.format(ftypeblank, round(perftype, 2), '(%)')\nftypetotal = '
  • Total Facility Type: {}
  • '.format(totalftype)\nftypet = ftypemissing+ftypetotal+resultftype\n\nfirstnamemissing = '
  • Missing First Name: {} ({}%)
  • '.format(fnameblank, round(perfname, 2), '(%)')\nfirsttotal = '
  • Total First Name: {}
  • '.format(totalfirstname)\nfirstnametotal = firstnamemissing+firsttotal+resultfname\n\nlnamemissing = '
  • Missing Last Name: {} ({}%)
  • '.format(lnameblank, round(perlname, 2), '(%)')\nlnametotal = '
  • Total Last Name: {}
  • '.format(totallname)\nlastnametotal = lnamemissing+lnametotal+resultlname\n\nmnamemissing = '
  • Missing Middle Name: {} ({}%)
  • '.format(mnameblank, round(permname, 2), '(%)')\nmnametotal = '
  • Total Middle Name: {}
  • '.format(totalmname)\nmitotal = mnamemissing+mnametotal+resultmname\n\ngendermissing = '
  • Missing Gender: {} ({}%)
  • '.format(genderblank, round(pergender, 2), '(%)')\ngentotal = '
  • Total Gender: {}
  • '.format(totalgender)\ngendertotal = gendermissing+gentotal+resultgender\n\n\nnetworkhtml = '''\n
    \n
    \n

    \n'''\n\nnetmissing = '

  • Missing Network: {} ({}%)
  • '.format(networkblank, round(pernet, 2), '(%)')\nnettotal = '
  • Total Network: {}
  • '.format(totalnetworks)\nnetworktotal = netmissing+nettotal+resultnet\n\ntiermissing = '
  • Missing Tier: {} ({}%)
  • '.format(tierblank, round(pertier, 2), '(%)')\ntotaltier = '
  • Total Tier: {}
  • '.format(totaltier)\ntierall = tiermissing+totaltier+resulttier\n\n\naffiliation = '''\n
    \n
    \n

    \n'''\n\ngroupblank = '

  • Missing Group Affiliations: {} ({}%)
  • '.format(grpblank, round(pergrp, 2), '(%)')\ntotalgroup = '
  • Total Group Affiliations: {}
  • '.format(totalgrp)\ngrpall = groupblank+totalgroup+resultgrp\n\nhospitalblank = '
  • Missing Hospital Affiliations: {} ({}%)
  • '.format(hosblank, round(perhosp, 2), '(%)')\ntotalhos = '
  • Total Hospital Affiliations: {}
  • '.format(totalhosp)\nhospall = hospitalblank+totalhos+resulthosp\n\n\nspeci = '''\n
    \n
    \n

    \n'''\nspecmissing = '

  • Missing Specialties: {} ({}%)
  • '.format(specblank, round(perspec, 2), '(%)')\ntotalspecia = '
  • Total Specialties: {}
  • '.format(totalspec)\nspecall = specmissing+totalspecia+resultspec\n\n\ncan = '''

    \n
    \n
    \n
    \n'''\n\nall = '''\n\n\n\n\n\n\n'''\n\nwith open('vericred_summary_report.html', 'w', buffering=90000000) as html:\n html.write(htmlcollapse+totalpro+addressstring+cities+langu+ofc+phn+stateall+zipall+finalchart+\n prov+anpall+npiall+pcpall+suidtotal+upnametotal+provider+factotal+\n ftypet+firstnametotal+lastnametotal+mitotal+gendertotal+networkhtml+\n totalpro+networktotal+tierall+affiliation+totalpro+grpall+hospall+speci+totalpro+specall+all)","sub_path":"generalscripts_python/reporthtmlnewchanges.py","file_name":"reporthtmlnewchanges.py","file_ext":"py","file_size_in_byte":21362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"21689640","text":"from django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import HttpResponseRedirect, HttpResponseForbidden\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import View, TemplateView, ListView, DetailView\nfrom django.views.generic.detail import SingleObjectMixin\nfrom django_fsm_log.models import StateLog\nfrom wastd.utils import ListViewBreadcrumbMixin, DetailViewBreadcrumbMixin, ResourceDownloadMixin\n\nfrom .admin import (\n EncounterAdmin,\n AnimalEncounterAdmin,\n TurtleNestEncounterAdmin,\n LineTransectEncounterAdmin,\n)\nfrom .filters import (\n SurveyFilter,\n EncounterFilter,\n AnimalEncounterFilter,\n TurtleNestEncounterFilter,\n LineTransectEncounterFilter,\n)\nfrom .models import (\n Survey,\n Encounter,\n AnimalEncounter,\n TurtleNestEncounter,\n LineTransectEncounter,\n TagObservation,\n)\nfrom .resources import (\n SurveyResource,\n EncounterResource,\n AnimalEncounterResource,\n TurtleNestEncounterResource,\n LineTransectEncounterResource,\n)\n\n\nclass MapView(TemplateView):\n template_name = \"observations/map.html\"\n\n\nclass SurveyList(ListViewBreadcrumbMixin, ResourceDownloadMixin, ListView):\n model = Survey\n template_name = \"default_list.html\"\n paginate_by = 20\n filter_class = SurveyFilter\n resource_class = SurveyResource\n resource_formats = ['csv', 'xlsx']\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"list_filter\"] = SurveyFilter(self.request.GET, queryset=self.get_queryset())\n context[\"object_count\"] = self.get_queryset().count()\n context[\"page_title\"] = f\"{settings.SITE_CODE} | Surveys\"\n return context\n\n def get_queryset(self):\n qs = super().get_queryset().prefetch_related(\"reporter\", \"site\", \"encounter_set\", \"campaign\").order_by(\"-start_time\")\n return SurveyFilter(self.request.GET, queryset=qs).qs\n\n\nclass SurveyDetail(DetailViewBreadcrumbMixin, DetailView):\n model = Survey\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n obj = self.get_object()\n context[\"page_title\"] = f\"{settings.SITE_CODE} | Survey {obj.pk}\"\n return context\n\n\ndef close_survey_duplicates(request, pk):\n \"\"\"Close duplicates for a given Survey PK with the request user as actor.\n\n All duplicate Surveys will be curated and marked as \"not production\".\n The given Survey will be curated and marked as \"production\",\n adopt all Encounters from all duplicate surveys, and adjust its duration.\n\n See Survey.close_duplicates() for implementation details.\n \"\"\"\n s = Survey.objects.get(pk=pk)\n msg = s.close_duplicates(actor=request.user)\n messages.success(request, msg)\n return HttpResponseRedirect(s.get_absolute_url())\n\n\nclass EncounterList(ListViewBreadcrumbMixin, ResourceDownloadMixin, ListView):\n model = Encounter\n template_name = \"default_list.html\"\n paginate_by = 20\n filter_class = EncounterFilter\n resource_class = EncounterResource\n resource_formats = ['csv', 'xlsx']\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"list_filter\"] = EncounterFilter(\n self.request.GET, queryset=self.get_queryset()\n )\n context[\"model_admin\"] = EncounterAdmin\n context[\"page_title\"] = f\"{settings.SITE_CODE} | Encounters\"\n return context\n\n def get_queryset(self):\n qs = (\n super(EncounterList, self)\n .get_queryset()\n .prefetch_related(\"observer\", \"reporter\", \"area\", \"site\")\n .order_by(\"-when\")\n )\n return EncounterFilter(self.request.GET, queryset=qs).qs\n\n\nclass EncounterDetail(DetailViewBreadcrumbMixin, DetailView):\n model = Encounter\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n obj = self.get_object()\n context[\"page_title\"] = f\"{settings.SITE_CODE} | Encounter {obj.pk}\"\n # data['tags'] = TagObservation.objects.filter(encounter__in=[self.get_object()])\n return context\n\n\nclass EncounterCurate(LoginRequiredMixin, SingleObjectMixin, View):\n \"\"\"Minimal view to handle HTTP request to mark a record as curated.\n \"\"\"\n model = Encounter\n\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n # FIXME: Permission check\n if not request.user.is_staff:\n return HttpResponseForbidden(\"You do not have permission to curate this record\")\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n obj = self.get_object()\n obj.curate(by=request.user, description=\"Curated record as trustworthy\")\n obj.save()\n messages.success(request, f\"Curated {obj} as trustworthy\")\n return HttpResponseRedirect(obj.get_absolute_url())\n\n\nclass EncounterFlag(LoginRequiredMixin, SingleObjectMixin, View):\n \"\"\"Minimal view to handle HTTP request to mark a record as flagged.\n \"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n # FIXME: Permission check\n if not request.user.is_staff:\n return HttpResponseForbidden(\"You do not have permission to flag this record\")\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n obj = self.get_object()\n obj.flag(by=request.user, description=\"Flagged record as untrustworthy\")\n obj.save()\n messages.warning(request, f\"Flagged {obj} as untrustworthy\")\n return HttpResponseRedirect(obj.get_absolute_url())\n\n\nclass EncounterReject(LoginRequiredMixin, SingleObjectMixin, View):\n \"\"\"Minimal view to handle HTTP request to mark a record as rejected.\n \"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n # FIXME: Permission check\n if not request.user.is_staff:\n return HttpResponseForbidden(\"You do not have permission to reject this record\")\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n obj = self.get_object()\n obj.reject(by=request.user, description=\"Rejected record as untrustworthy\")\n obj.save()\n messages.error(request, f\"Rejected {obj} as untrustworthy\")\n return HttpResponseRedirect(obj.get_absolute_url())\n\n\nclass AnimalEncounterList(ListViewBreadcrumbMixin, ResourceDownloadMixin, ListView):\n model = AnimalEncounter\n template_name = \"default_list.html\"\n paginate_by = 20\n filter_class = AnimalEncounterFilter\n resource_class = AnimalEncounterResource\n resource_formats = [\"csv\", \"xlsx\"]\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n qs = self.get_queryset()\n context[\"list_filter\"] = AnimalEncounterFilter(self.request.GET, queryset=qs)\n context[\"model_admin\"] = AnimalEncounterAdmin\n context[\"object_count\"] = qs.count()\n context[\"page_title\"] = f\"{settings.SITE_CODE} | Animal encounters\"\n return context\n\n def get_queryset(self):\n qs = (\n super(AnimalEncounterList, self)\n .get_queryset()\n .prefetch_related(\n \"observer\",\n \"reporter\",\n \"area\",\n \"site\",\n \"site_of_first_sighting\",\n \"site_of_last_sighting\",\n )\n .order_by(\"-when\")\n )\n return AnimalEncounterFilter(self.request.GET, queryset=qs).qs\n\n\nclass AnimalEncounterDetail(DetailViewBreadcrumbMixin, DetailView):\n model = AnimalEncounter\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n obj = self.get_object()\n context[\"tag_observations\"] = TagObservation.objects.filter(encounter__in=[obj])\n context[\"state_logs\"] = StateLog.objects.for_(obj)\n context[\"page_title\"] = f\"{settings.SITE_CODE} | Animal encounter {obj.pk}\"\n return context\n\n\nclass AnimalEncounterCurate(EncounterCurate):\n model = AnimalEncounter\n\n\nclass AnimalEncounterFlag(EncounterFlag):\n model = AnimalEncounter\n\n\nclass AnimalEncounterReject(EncounterReject):\n model = AnimalEncounter\n\n\nclass TurtleNestEncounterList(ListViewBreadcrumbMixin, ResourceDownloadMixin, ListView):\n model = TurtleNestEncounter\n template_name = \"default_list.html\"\n paginate_by = 20\n filter_class = TurtleNestEncounterFilter\n resource_class = [\n TurtleNestEncounterResource,\n ]\n resource_formats = ['csv', 'xlsx']\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n qs = self.get_queryset()\n context[\"list_filter\"] = TurtleNestEncounterFilter(self.request.GET, queryset=qs)\n context[\"model_admin\"] = TurtleNestEncounterAdmin\n context[\"object_count\"] = qs.count()\n context[\"page_title\"] = f\"{settings.SITE_CODE} | Turtle nest encounters\"\n return context\n\n def get_queryset(self):\n # FIXME: filtering via permissions model.\n qs = super().get_queryset().prefetch_related(\"observer\", \"reporter\", \"area\", \"site\").order_by(\"-when\")\n return TurtleNestEncounterFilter(self.request.GET, queryset=qs).qs\n\n\nclass TurtleNestEncounterDetail(DetailViewBreadcrumbMixin, DetailView):\n # FIXME: filtering via permissions model.\n model = TurtleNestEncounter\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n obj = self.get_object()\n context[\"state_logs\"] = StateLog.objects.for_(obj)\n context[\"page_title\"] = f\"{settings.SITE_CODE} | Turtle nest encounter {obj.pk}\"\n return context\n\n\nclass TurtleNestEncounterCurate(EncounterCurate):\n model = TurtleNestEncounter\n\n\nclass TurtleNestEncounterFlag(EncounterFlag):\n model = TurtleNestEncounter\n\n\nclass TurtleNestEncounterReject(EncounterReject):\n model = TurtleNestEncounter\n\n\nclass LineTransectEncounterList(ListViewBreadcrumbMixin, ResourceDownloadMixin, ListView):\n model = LineTransectEncounter\n template_name = \"default_list.html\"\n paginate_by = 20\n filter_class = LineTransectEncounterFilter\n resource_class = LineTransectEncounterResource\n resource_formats = ['csv', 'xlsx']\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"list_filter\"] = LineTransectEncounterFilter(self.request.GET, queryset=self.get_queryset())\n context[\"model_admin\"] = LineTransectEncounterAdmin\n return context\n\n def get_queryset(self):\n qs = (\n super(LineTransectEncounterList, self)\n .get_queryset()\n .prefetch_related(\"observer\", \"reporter\", \"area\", \"site\")\n .order_by(\"-when\")\n )\n return LineTransectEncounterFilter(self.request.GET, queryset=qs).qs\n\n\nclass LineTransectEncounterDetail(DetailViewBreadcrumbMixin, DetailView):\n model = LineTransectEncounter\n\n def get_context_data(self, **kwargs):\n data = super(LineTransectEncounterDetail, self).get_context_data(**kwargs)\n # data['tags'] = TagObservation.objects.filter(encounter__in=[self.get_object()])\n return data\n","sub_path":"observations/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"144696064","text":"# Copyright 2016 Nokia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nfrom oslo_log import log as logging\nfrom oslotest import base\nfrom vitrage_tempest_tests.tests.base_mock import BaseMock\n\nimport vitrage_tempest_tests.tests.utils as utils\n\nLOG = logging.getLogger(__name__)\n\n\nclass BaseVitrageTest(base.BaseTestCase):\n \"\"\"Base test class for Vitrage API tests.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n super(BaseVitrageTest, cls).setUpClass()\n cls.conf = utils.get_conf()\n\n def _create_graph_by_mock(self):\n \"\"\"Create MOCK Graph and copied to the string \"\"\"\n self.mock_client = BaseMock()\n processor = self.mock_client.create_processor_with_graph()\n entity_graph = processor.entity_graph\n mock_graph_output = entity_graph.output_graph()\n LOG.info(\"The mock graph is : \" + mock_graph_output)\n\n @staticmethod\n def get_flavor_id_from_list():\n text_out = utils.run_vitrage_command(\"nova flavor-list\")\n try:\n flavor_id = utils.get_regex_result(\"\\|\\s+(\\d+)\\s+\\|\",\n text_out.splitlines()[3])\n except Exception as e:\n LOG.exception(\"Failed to get flavor id from the list %s \", e)\n return None\n\n LOG.debug(\"The flavor id from the list is \" + flavor_id)\n return flavor_id\n\n @staticmethod\n def get_image_id_from_list():\n text_out = utils.run_vitrage_command(\"glance image-list\")\n try:\n image_id = utils.get_regex_result(\"\\|\\s+(.*)\\s+\\|\",\n text_out.splitlines()[3])\n image_id = image_id.split(\" \")[0]\n except Exception as e:\n LOG.exception(\"Failed to get image id from the list %s \", e)\n return None\n\n LOG.debug(\"The image id from the list is \" + image_id)\n return image_id\n\n @staticmethod\n def get_instance_id_by_name(vm_name):\n text_out = utils.run_vitrage_command(\"nova list\")\n for line in text_out.splitlines():\n if vm_name in line:\n vm_id = utils.get_regex_result(\"\\|\\s+(.*)\\s+\\|\", line)\n vm_id = vm_id.split(\" \")[0]\n LOG.debug(\"The instance id from the nova list is \" + vm_id)\n return vm_id\n return None\n\n @staticmethod\n def get_volume_id_by_name(vol_name):\n text_out = utils.run_vitrage_command(\"cinder list\")\n for line in text_out.splitlines():\n if vol_name in line:\n vol_id = utils.get_regex_result(\"\\|\\s+(.*)\\s+\\|\", line)\n vol_id = vol_id.split(\" \")[0]\n LOG.debug(\"The volume id from the cinder list is \" + vol_id)\n return vol_id\n return None\n\n @staticmethod\n def create_vm_with_exist_image(vm_name, flavor_id, image_id):\n utils.run_vitrage_command(\"nova boot \" + vm_name + \" --flavor \" +\n flavor_id + \" --image \" + image_id)\n\n text_out = utils.run_vitrage_command(\"nova list\")\n if vm_name in text_out:\n LOG.debug(\"The expected vm exist in the nova list\")\n else:\n LOG.error(\"The expected vm not exist in the nova list\")\n\n @staticmethod\n def create_volume_with_exist_size(vol_name):\n utils.run_vitrage_command(\"cinder create --name \" + vol_name + \" 5\")\n\n text_out = utils.run_vitrage_command(\"cinder list\")\n if vol_name in text_out:\n LOG.debug(\"The expected volume exist in the cinder list\")\n else:\n LOG.error(\"The expected volume not exist in the cinder list\")\n\n def attach_volume(self, vm_name, vol_name):\n vm_id = self.get_instance_id_by_name(vm_name)\n vol_id = self.get_volume_id_by_name(vol_name)\n\n utils.run_vitrage_command(\"nova volume-attach \" + vm_id + \" \" + vol_id)\n\n text_out = utils.run_vitrage_command(\"cinder list\")\n if vm_id in text_out:\n LOG.debug(\"The expected volume attached to vm\")\n else:\n LOG.error(\"The expected volume did not attached to vm\")\n","sub_path":"vitrage_tempest_tests/tests/api/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"137983203","text":"import abilities\nfrom combat import targets\nfrom combat.attacks.base import Attack\nfrom combat.enums import DamageType\nfrom echo import functions\nfrom stats.enums import StatsEnum\nfrom util import check_roller\nfrom util import dice\n\n\nclass Bite(Attack):\n name = \"Bite\"\n target_type = targets.Single\n description = \"Basic bite attack.\"\n\n actor_message = \"You lunge at {defender}\"\n observer_message = \"{attacker} lunges at {defender}\"\n\n @classmethod\n def can_execute(cls, attack_context):\n attacker = attack_context.attacker\n if attack_context.distance_to <= 1:\n attacker_body = attacker.body\n if attacker_body:\n return bool(attacker_body.get_ability(abilities.Bite, 1))\n return False\n\n @classmethod\n def execute(cls, attack_context):\n attacker = attack_context.attacker\n defender = attack_context.defender\n hit_modifier = attacker.stats.strength.modifier\n attack_result = cls.make_hit_roll(attack_context, hit_modifier)\n attack_result.attack_message = cls.get_message(attacker, defender)\n attack_result.context.attacker_weapon = \"fangs\"\n attack_result.damage_message = \"biting into\"\n\n cls.make_damage_roll(attack_result, hit_modifier)\n\n return attack_result,\n\n @classmethod\n def make_damage_roll(cls, attack_result, str_modifier):\n melee_damage_dice = cls.get_melee_damage_dice(attack_result.context.attacker)\n total_damage = check_roller.roll_damage(\n dice_stacks=(melee_damage_dice,),\n modifiers=str_modifier,\n critical=attack_result.critical\n )\n attack_result.total_damage = total_damage\n attack_result.separated_damage = [(total_damage, DamageType.Pierce)]\n\n return attack_result\n\n @classmethod\n def get_melee_damage_dice(cls, attacker):\n bite_ability = attacker.body.get_ability(abilities.Bite, 1)\n\n return dice.DiceStack(bite_ability.value, dice.D4)\n\n @classmethod\n def get_message(cls, actor, target):\n if actor.is_player:\n return cls.actor_message.format(defender=target.name)\n else:\n return cls.observer_message.format(\n attacker=actor.name,\n defender=functions.name_or_you(target)\n )\n","sub_path":"combat/attacks/unarmed/bite.py","file_name":"bite.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"22092497","text":"#! /usr/bin/env python\n# Copyright (c) 2010 Brian Edwards\n\nfrom pprint import pprint\nimport simplejson as json\n\ndef _get_pkgs(filename, pkg_name):\n file = open(filename)\n try:\n ext_jsb2 = json.load(file)\n finally:\n file.close()\n for pkg in ext_jsb2['pkgs']:\n if pkg['name']==pkg_name:\n pkgs = pkg['pkgDeps']\n break\n else:\n args = (filename, pkg_name)\n raise Exception('No package named %s in %s' % args)\n return pkgs\n\ndef main():\n pkgs = _get_pkgs('ext.jsb2', 'Ext All')\n scripts = []\n for pkg in pkgs:\n debug_pkg = '/js/%s-debug.js' % pkg[:-len('.js')]\n scripts.append(debug_pkg)\n file = open('silt/generated/extall.py', 'w')\n try:\n file.write('scripts = ')\n pprint(scripts, file)\n finally:\n file.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"jsb2.py","file_name":"jsb2.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"474663855","text":"import qi\nfrom naoqi import ALProxy\nimport argparse\nimport sys\nimport pdb\nimport time\nimport pandas as pd\nimport numpy as np\n\n\nclass QuestionAnalyzer():\n def __init__(self,filename):\n self.orig_data = pd.read_csv(filename, header=0)\n self.current_data = pd.read_csv(filename, header=0)\n self.questions_idxs = np.arange(0,len(self.current_data.columns)-1) # list of questions not asked yet\n\n def faces_in_play(self,faces_list):\n '''\n Gets list of strings of faces that are in play\n Changes self.current_data to have only these faces\n '''\n self.current_data = self.current_data[self.current_data['Name'].isin(faces_list)]\n\n def get_information_gain(self):\n '''\n Get information gain for each available question\n Return -1 for a question that is not available\n '''\n information_gain = np.array([])\n # pdb.set_trace()\n num_characters = self.current_data.shape[0]\n entropy = np.log2(num_characters)\n for question_idx in self.questions_idxs:\n question = self.current_data.columns[question_idx+1] # offset since first column is name\n subset = self.current_data[question]\n data_false = subset[subset<0.5].shape[0]\n data_true = subset[subset>0.5].shape[0]\n if data_true==num_characters or data_false==num_characters:\n conditional_entropy = entropy\n else:\n conditional_entropy = (float(data_false)/num_characters)*np.log2(data_false) + (float(data_true)/num_characters)*np.log2(data_true)\n information_gain = np.append(information_gain,entropy-conditional_entropy)\n # pdb.set_trace()\n return information_gain\n \n def rank_questions(self):\n information_gain = self.get_information_gain()\n # pdb.set_trace()\n sorted_questions_idxs = np.argsort(information_gain) # find indices to sort from smallest to greatest\n sorted_questions_idxs = sorted_questions_idxs[::-1] # sort from greatest to least\n sorted_questions = []\n for idx in sorted_questions_idxs:\n sorted_questions.append(self.questions_idxs[idx])\n # pdb.set_trace()\n return sorted_questions\n \n\n def choose_question(self):\n '''\n Chooses a question to ask human\n Returns index of question to ask\n Returns -1 if there is only one person left\n '''\n if self.current_data.shape[0]==1:\n return -1\n sorted_questions = self.rank_questions()\n # pdb.set_trace()\n question_idx= sorted_questions[0] # Best questions\n # question_idx=sorted_questions[1] # SEcond best question\n return question_idx\n\n def update_current_data(self,question_index,answer):\n '''\n Given question index and humans answer, update current data df and array of question idxs\n '''\n question = self.current_data.columns[question_index+1]\n subset = self.current_data[question]\n # pdb.set_trace()\n if answer<0.5:\n self.current_data = self.current_data[subset<0.5]\n else:\n self.current_data = self.current_data[subset>0.5]\n # pdb.set_trace()\n # mask = np.ones(len(self.questions_idxs),dtype=bool)\n # mask[question_index] = False\n self.questions_idxs = self.questions_idxs[np.invert(self.questions_idxs==question_index)]\n # pdb.set_trace()\n # self.questions_idxs = self.questions_idxs[mask]\n # pdb.set_trace()\n print(self.current_data)\n\nclass Robot:\n IP = \"192.168.86.55\"\n PORT = 9559\n ACTIONS = [\"Yes_1\", \"Yes_2\", \"Yes_3\", \"Please_1\", \"Explain_1\", \"Explain_2\", \"IDontKnow_1\", \"IDontKnow_2\",\n \"No_3\", \"No_8\", \"No_9\"]\n # QUESTIONS = [\"Can your person fly?\"]\n # TODO populate the whole list of questions\n QUESTIONS = [\"Does your person have a mask or is wearing a helmet?\",\n \"Is your person wearing a helmet?\",\n \"Does your person have hair that is visible?\",\n \"Is your person a male?\",\n \"Is your person a Marvel character?\",\n \"Is your person a DC character?\",\n \"Does your person have facial hair?\",\n \"Is your person a hero?\",\n \"Is your person a villian?\",\n \"Can your person fly?\",\n \"Is your person an Avenger?\",\n \"Is your person in the Justice League?\",\n \"Is your person an X-man?\",\n \"Does your person have their own stand alone movie?\",\n \"Is your person wearing red?\",\n \"Is your person considered royalty?\"]\n roster = [\"joker\", \"wonderWoman\", \"theFlash\", \"greenGoblin\", \"catwoman\", \"cyborg\",\n \"theHulk\", \"captainAmerica\", \"wolverine\", \"superman\", \"ironMan\", \"aquaman\", \"mystique\", \"blackPanther\",\n \"batman\", \"harleyQuinn\", \"spiderman\", \"thor\", \"storm\", \"blackWidow\"]\n\n def __init__(self):\n self.qa = QuestionAnalyzer(\"../data/guesswho_superherodata3.csv\")\n self.attendance = {}\n self.selected_characters = []\n self.head_pat = 0\n self.tts = ALProxy(\"ALTextToSpeech\", self.IP, self.PORT)\n self.session = qi.Session()\n try:\n self.session.connect(\"tcp://\" + self.IP + \":\" + str(self.PORT))\n except RuntimeError:\n print (\"Can't connect to Naoqi at ip \\\"\" + self.IP + \"\\\" on port \" + str(self.PORT) + \".\\n\"\n \"Please check your script arguments. Run with -h option for help.\")\n sys.exit(1)\n self.action_service = self.session.service(\"ALAnimationPlayer\")\n self.memory = self.session.service(\"ALMemory\")\n self.initialize_shared_memory()\n self.robot_won_game = False\n # self.check_attendance()\n # pdb.set_trace()\n\n def speak(self, string):\n self.tts.say(string)\n\n def act(self, action):\n\n # Action list:\n # yes, no, explain, you, warm\n\n self.action_service.run(\"animations/Stand/Gestures/\"+action, _async=True)\n # future.value()\n # self.action_service.run(\"animations/Stand/Gestures/\"+action)\n\n # Returns list of selected characters\n def get_selected_characters(self):\n return self.selected_characters\n\n # Sets all values in the shared memory to zero, and sets all stored attendance values to zero.\n # Also resets selected characters\n def initialize_shared_memory(self):\n for i in self.roster:\n self.attendance[i] = 0\n self.memory.insertData(i, 0)\n self.selected_characters = []\n self.head_pat = 0\n\n def check_attendance(self):\n for r in self.roster:\n if self.memory.getData(r) == 1:\n self.attendance[r] = 1\n else:\n self.attendance[r] = 0\n\n def game_start(self):\n self.act(\"Yes_1\")\n self.speak(\"Would you like to play a game?\")\n print(\"Would human like to start a game? (y/n):\")\n answer = raw_input()\n if answer == 'y':\n self.act(\"Enthusiastic_4\")\n self.speak(\"Yay!\")\n self.speak(\"Please show me a set of characters that you would like to play with.\")\n self.speak(\"Pat my head when you've finished\")\n return 0\n else:\n self.speak(\"Okay, maybe next time!\")\n self.act(\"BowShort_1\")\n return 1\n\n def ask_question(self, idx):\n self.act(\"IDontKnow_2\")\n self.speak(self.QUESTIONS[idx])\n print(\"Question from robot: {}\".format(self.QUESTIONS[idx]))\n print(\"Human's answer to robot's question:\")\n answer = raw_input()\n if answer == 'y':\n self.act(\"Yes_1\")\n self.speak(\"Hmmm, okay\")\n return 1\n else:\n self.act(\"No_1\")\n self.speak(\"That's interesting\")\n return 0\n \n\n def ask_to_answer(self):\n self.act(\"Explain_1\")\n time.sleep(1)\n self.speak(\"Now you ask me a question\")\n\n def answer_question(self):\n print(\"Robots answer to humans question(y/n/w):\")\n answer = raw_input()\n if answer == 'y':\n self.act(\"Yes_1\")\n self.speak(\"Yes!\")\n return 0\n elif answer == 'w':\n self.act(\"Yes_1\")\n self.speak(\"Yes! You guess my person! You win. You are a superstar.\")\n return 1\n else:\n self.act(\"No_1\")\n if np.random.random()>0.20:\n self.speak(\"Nope!\")\n else:\n self.speak(\"Yeh\")\n time.sleep(0.5)\n self.speak(\"Actually, no. Sike! You are such a loser!\")\n return 0\n\n def roll_call(self):\n self.check_attendance()\n valid_set = 0\n for hero in self.attendance:\n if self.attendance[hero] == 1:\n valid_set = 1\n\n if valid_set:\n self.speak(\"The following characters have been selected:\")\n for hero in self.attendance:\n if self.attendance[hero] == 1:\n self.selected_characters.append(hero)\n self.speak(hero)\n\n else:\n self.speak(\"No Characters Have Been Selected.\")\n self.speak(\"Please show me at least one character before patting my head again.\")\n # self.speak(\"Fuck this bullshit!\")\n self.head_pat = 0\n self.memory.insertData(\"headPat\", 0)\n self.observe_faces()\n\n def observe_faces(self):\n self.head_pat = 0\n self.memory.insertData(\"headPat\", 0)\n while self.head_pat != 1:\n self.head_pat = self.memory.getData(\"headPat\")\n self.roll_call()\n\n def start_game(self):\n self.act(\"Explain_1\")\n self.speak(\"We are going to start the game now. Because I'm so nice, I'll let you start.\")\n\n def game(self):\n val = self.game_start()\n # self.observe_faces()\n # faces_list = self.get_selected_characters()\n faces_list = self.roster\n self.qa.faces_in_play(faces_list)\n pdb.set_trace()\n question_idx = 0\n self.start_game()\n while True:\n self.ask_to_answer()\n human_guessed_person = self.answer_question()\n question_idx = self.qa.choose_question()\n if question_idx<0:\n name = self.qa.current_data['Name'].tolist()\n name = name[0]\n self.speak(\"I figured out your person!\")\n self.speak(\"You are thinking of {}\".format(name))\n self.robot_won_game = True\n break\n answer = self.ask_question(question_idx)\n self.qa.update_current_data(question_idx,answer)\n \n if human_guessed_person:\n break\n if self.robot_won_game==True:\n self.speak(\"Good game! Come play again.\")\n self.act(\"Enthusiastic_5\")\n else:\n self.speak(\"You beat me! Good job!\")\n self.act(\"BowShort_1\")\n self.speak(\"I am now yours to command.\")\n\nif __name__ == '__main__':\n journey = Robot()\n # journey.speak(\"Hello everyone\")\n # journey.act(\"Yes_1\")\n journey.game()\n","sub_path":"project_code/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":11344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"192349721","text":"import sys\nimport math\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QPainter\nfrom PyQt5.QtWidgets import QApplication, QWidget\n\n\nclass DrawPoint(QWidget):\n \"\"\"主窗口\"\"\"\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n self.setWindowTitle(\"QPainter 使用drawPoints()\")\n self.resize(300, 200)\n\n def paintEvent(self, QPaintEvent):\n \"\"\"重载QWidget.paintEvent\"\"\"\n painter = QPainter()\n painter.begin(self)\n self.to_draw_points(painter)\n painter.end()\n\n def to_draw_points(self, painter):\n painter.setPen(Qt.red)\n size = self.size() # 获取当前窗口的大小\n\n for i in range(1000):\n # 绘制正选函数图形, 周期[-100, 100]\n x = 100 * (-1+2.0*i/1000) + size.width()/2.0\n y = -50*math.sin((x-size.width()/2.0)*math.pi/50) + size.height()/2.0\n painter.drawPoint(x, y)\n\n\nif __name__ == \"__main__\":\n\n app = QApplication(sys.argv)\n win = DrawPoint()\n win.show()\n sys.exit(app.exec())","sub_path":"02_基本窗口控件/示例内容/26_QPainter.drawPoint()绘制点.py","file_name":"26_QPainter.drawPoint()绘制点.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"23658646","text":"# Lab 12 RNN\nimport numpy as np\nimport tensorflow as tf\n\n\nfrom datetime import datetime\n# for tensorboard\nlogdir=\"logs/\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)\n\n\nidx2char = ['h', 'i', 'e', 'l', 'o']\n# Teach hello: hihell -> ihello\n# x_data = [[0, 1, 0, 2, 3, 3]] # hihell\ny_data = [[1, 0, 2, 3, 3, 4]] # ihello\n\nnum_classes = 5\ninput_dim = 5 # one-hot size, same as hidden_size to directly predict one-hot\nsequence_length = 6 # |ihello| == 6\nlearning_rate = 0.1\n\nx_one_hot = np.array([[[1, 0, 0, 0, 0], # h 0\n [0, 1, 0, 0, 0], # i 1\n [1, 0, 0, 0, 0], # h 0\n [0, 0, 1, 0, 0], # e 2\n [0, 0, 0, 1, 0], # l 3\n [0, 0, 0, 1, 0]]], # l 3\n dtype=np.float32)\n\ny_one_hot = tf.keras.utils.to_categorical(y_data, num_classes=num_classes)\nprint(x_one_hot.shape)\nprint(y_one_hot.shape)\n\ntf.model = tf.keras.Sequential()\n\n# make cell and add it to RNN layer\n# input_shape = (1,6,5) => number of sequence (batch), length of sequence, size of input dim\n\n# Basic RNN\n\ntf.model.add(tf.keras.layers.SimpleRNN(units=5, activation='tanh', input_shape=(sequence_length, input_dim), return_sequences=True))\n\n# fully connected layer\ntf.model.add(tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(units=num_classes, activation='softmax')))\n\ntf.model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(lr=learning_rate),\n metrics=['accuracy'])\n\n# train\ntf.model.fit(x_one_hot, y_one_hot, epochs=50, callbacks=[tensorboard_callback])\ntf.model.summary()\n\npredictions = tf.model.predict(x_one_hot)\nfor i, prediction in enumerate(predictions):\n print(prediction)\n # print char using argmax, dict\n result_str = [idx2char[c] for c in np.argmax(prediction, axis=1)]\n print(\"\\tPrediction str: \", ''.join(result_str))\n\nprint(tf.model.trainable_weights)\n\n#logdir\n\n# python -m tensorboard.main --logdir=\"logs/20210103-153113\" --port=6006","sub_path":"py_basic/lab-12-tf2-12-1-hello-rnn.py","file_name":"lab-12-tf2-12-1-hello-rnn.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"250354596","text":"import unittest\nfrom the import the, _TheBe, _TheA\n\n\nclass TestTheSpecialCase(unittest.TestCase):\n def test_the_be(self):\n it = the(1)\n self.assertTrue(isinstance(it.be, _TheBe))\n self.assertEqual(it.be.should, it)\n\n def test_be_callable(self):\n self.assertTrue(the(1).be(1))\n with self.assertRaises(AssertionError):\n the(True).should.be(False)\n\n def test_the_a(self):\n it = the(1)\n self.assertTrue(isinstance(it.a, _TheA))\n self.assertEqual(it.a.should, it)\n\n def test_a_callable(self):\n self.assertTrue(the(\"1\").should.be.a(str))\n with self.assertRaises(AssertionError):\n the(1).should.be.a(str)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_the_special_case.py","file_name":"test_the_special_case.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"284315434","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport json\nimport shutil\nimport settings\nfrom utils.parse import ConfigLoader\nfrom utils.products import get_working_directory, export_files, mkdir_if_not_exist\nfrom core.storage.shortcuts import fetch_all, upload_all, fetch_list, construct_context\n\nthismodule = sys.modules[__name__]\n\n# a\\b\\c.txt => c.txt\n# a/b/c.txt => c.txt\ndef posix_basename(path):\n\treturn os.path.basename(to_local_path(path))\n\n# TODO: changes the pathsep to local system \ndef to_local_path(path):\n\treturn path.replace(\"\\\\\", '/')\n\ndef refine(queryset):\n\tdata = []\n\tfor item in queryset:\n\t\tsource, result = item['source'], item['result']\n\t\tmark_result = clean(result)\n\t\tmark_result_str = json.dumps(mark_result, ensure_ascii=False).encode('utf-8')\n\t\tfile_path = to_local_path(source['fileName'])\n\t\tdata.append((file_path, mark_result_str))\n\treturn data\n\n\nredundant_props = [u'_personInProjectId', u'Workload', u'_guid', u'markCount', u'_createTime', u'_id']\ndef clean(result):\n\tfor prop in redundant_props:\n\t\tresult.pop(prop)\n\treturn result\n\n\ndef upload(config):\n\targs = config.upload\n\tcontainer_names = args['task_id']\n\tsubdirs = args['subdir']\n\troot = config.common['root']\n\trelpath = config.common['relpath']\n\tfor subdir, container_name in construct_context(subdirs, container_names):\n\t\tcontainer_name = str(container_name)\n\t\tindex_file = get_working_directory(container_name+'.txt')\n\t\tblob_names = upload_all(os.path.join(root, subdir), os.path.join(relpath, subdir), container_name, index_file, suffix=('.png', '.jpg'))\n\t\twith open(index_file, 'w') as f:\n\t\t\tfor blob_name in blob_names:\n\t\t\t\titem = {\"fileName\": blob_name, \"title\": os.path.basename(blob_name)}\n\t\t\t\tf.write(json.dumps(item, ensure_ascii=False).encode('utf-8')+'\\n')\n\n\n\ndef extended_export_files(data, dirpath, picroot):\n\tfor name, anno in data:\n\t\ttarget_file = os.path.join(dirpath, name)\n\t\t\n\t\t# creates the directory\n\t\tmkdir_if_not_exist(os.path.dirname(target_file))\n\n\t\t# copy original images\n\t\tpic_path = os.path.join(picroot, name)\n\t\tshutil.copy(pic_path, target_file)\n\n\t\t# writes the annotation files\n\t\ttext_path = os.path.splitext(target_file)[0]+'.txt'\n\t\twith open(text_path, 'w') as f:\n\t\t\tf.write(anno)\n\n\ndef export(config):\n\targs = config.export\n\ttitle = args['title']\n\n\tif isinstance(title, str) or isinstance(title, unicode):\n\t title = [title]\n\n\tqueryset = fetch_list(title)\n\tif args.get('filenames'):\n\t\tfilenames = args['filenames']\n\t\tqueryset = filter(lambda x: posix_basename(x['source']['fileName']) in filenames, queryset)\n\t\tif len(filenames) != len(queryset):\n\t\t\timport pdb;pdb.set_trace()\n\n\tresult = refine(queryset)\n\trelpath = config.common['relpath']\n\tworkdir = get_working_directory('&'.join(title))\n\n\textended_export_files(result, workdir, relpath)\n\t\n\ndef main():\n config = ConfigLoader()\n getattr(thismodule, config.common['task'])(config)\n\n\ndef usage():\n return u\"道路线5万张精细标注\"","sub_path":"src/apps/app_road5w.py","file_name":"app_road5w.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"48073381","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 16 07:54:29 2019\r\n\r\n@author: Deepti.Venugopal\r\n\"\"\"\r\n\r\n#Normal way of appending into list\r\nmystring = 'hello'\r\nmylist = []\r\n\r\nfor letter in mystring:\r\n mylist.append(letter)\r\n\r\nprint(mylist)\r\n\r\n#Effective Way of appending\r\nmystring = 'hello'\r\nmylist = [letter for letter in mystring]\r\nprint(mylist)\r\n\r\nmylist = [item for item in 'word']\r\nprint(mylist)\r\n\r\nmylist = [num for num in range(0,11)]\r\nprint(mylist)\r\n\r\n#can do operations on this\r\nmylist=[num+num for num in range(0,11)] #add numbers in the range\r\nprint(mylist)\r\n\r\nmylist=[num**2 for num in range(0,11)] #get squared of num\r\nprint(mylist)\r\n\r\nmylist = [num for num in range(0,11) if num % 2==0] # get even numbers\r\nprint(mylist)\r\n\r\nmylist = [num**2 for num in range(0,11) if num % 2==0] # get even numbers of the squared\r\nprint(mylist)\r\n\r\n#convert celcius to fahrenheit\r\ncelcius = [0,10,20,34.5]\r\nfahrenheit = [((9/5)*temp + 32) for temp in celcius]\r\nprint(fahrenheit)\r\n\r\n#if else in list comprehension - not to use much\r\nresult = [x if x%2 ==0 else 'ODD' for x in range(0,11)]\r\nprint(result)\r\n\r\n#nested for loops in normal way\r\nmylist = []\r\nfor x in [2,4,6]:\r\n for y in [1,10,1000]:\r\n mylist.append(x*y)\r\n\r\nprint(mylist)\r\n\r\n#list comprehension of nested loop\r\nmylist = [x*y for x in [2,4,6] for y in [1,10,1000]]\r\nprint(mylist)","sub_path":"Python_Statements_List_Comprehensions.py","file_name":"Python_Statements_List_Comprehensions.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"323337431","text":"# %load q02_plot/build.py\n# Default imports\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nimport seaborn as sns\n\ndata = pd.read_csv('data/house_prices_multivariate.csv')\nfacet=None\nnum_cols = ['LotArea','GarageArea','OpenPorchSF','SalePrice']\n# Write your code here:\ndef plot(num_cols):\n for i in range(0,len(num_cols),2):\n if(len(num_cols)>i+1):\n plt.figure(figsize = (10,4))\n plt.subplot(121)\n sns.boxplot(facet,num_cols[i],data=data)\n plt.subplot(122)\n sns.boxplot(facet,num_cols[i+1],data=data)\n plt.tight_layout()\n plt.show()\n else:\n sns.boxplot(facet,num_cols[i],data=data)\n #plt.show()\nplot(num_cols) \n\n\n","sub_path":"q02_plot/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"459587975","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.contrib.auth.models import User\n\n\nfrom .models import Toppings, Pizza, RegularPizza, SicilianPizza, Subs, Sub_Additions, Pasta, Salads, DinnerPlatters, UserCart, UserOrder, OrderStatus, OrderNumber\n\n# Create your views here.\n\n\ndef index(request):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n context = {\n \"userlogged\": request.user\n }\n return render(request, \"orders/index.html\", context)\n\n\ndef login_view(request):\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"orders/login.html\", {\"message\": \"Invalid credentials.\"})\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n\ndef logout_view(request):\n logout(request)\n return render(request, \"orders/login.html\", {\"message\": \"Logged out.\"})\n\n\ndef register(request):\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n first_name = request.POST[\"first_name\"]\n last_name = request.POST[\"last_name\"]\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n password = request.POST[\"password\"]\n password2 = request.POST[\"password2\"]\n # Check if password and password confirmation are matching.\n if not password == password2:\n return render(request, \"orders/register.html\", {\"message\": \"Passwords do not match.\"})\n # Check if username already exist in database.\n try:\n usernameExist = User.objects.get(username=username)\n return render(request, \"orders/register.html\", {\"message\": \"Username is not available. Please try a different username.\"})\n except KeyError:\n return render(request, \"orders/register.html\", {\"message\": \"Missing information in field.\"})\n except User.DoesNotExist:\n # If username is available, create entry into DB with user details\n user = User.objects.create_user(username, email, password)\n user.first_name = first_name\n user.last_name = last_name\n user.save()\n # Log new user in automatically after registering for new account\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render(request, \"orders/register.html\", {\"message\": None})\n\n\ndef menu(request, msg):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n if msg == 'ItemAdded':\n message = \"Item has been added. You can go to My Cart to proceed with order.\"\n elif msg == 'OrderPlaced':\n message = \"Your order has been placed. You can go to My Order to check on your order status.\"\n else:\n message = None\n\n context = {\n \"message\": message,\n \"userlogged\": request.user\n }\n\n return render(request, \"orders/menu.html\", context)\n\n\ndef regularpizza(request):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n context = {\n \"userlogged\": request.user,\n \"regularpizza\": RegularPizza.objects.all().order_by('id'),\n \"toppings\": Toppings.objects.all().order_by('id'),\n \"category\": \"Regular Pizza\"\n }\n return render(request, \"orders/regularpizza.html\", context)\n\n\ndef sicilianpizza(request):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n context = {\n \"userlogged\": request.user,\n \"sicilianpizza\": SicilianPizza.objects.all().order_by('id'),\n \"toppings\": Toppings.objects.all().order_by('id'),\n \"category\": \"Sicilian Pizza\"\n }\n return render(request, \"orders/sicilianpizza.html\", context)\n\n\ndef subs(request):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n context = {\n \"userlogged\": request.user,\n \"subs\": Subs.objects.all().order_by('id'),\n \"additions\": Sub_Additions.objects.all().order_by('id'),\n \"category\": \"Subs\"\n }\n return render(request, \"orders/subs.html\", context)\n\n\ndef pasta(request):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n context = {\n \"userlogged\": request.user,\n \"pasta\": Pasta.objects.all().order_by('id'),\n \"category\": \"Pasta\"\n }\n return render(request, \"orders/pasta.html\", context)\n\n\ndef salads(request):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n context = {\n \"userlogged\": request.user,\n \"salads\": Salads.objects.all().order_by('id'),\n \"category\": \"Salads\"\n }\n return render(request, \"orders/salads.html\", context)\n\n\ndef dinnerplatters(request):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n context = {\n \"userlogged\": request.user,\n \"dinnerplatters\": DinnerPlatters.objects.all().order_by('id'),\n \"category\": \"Dinner Platters\"\n }\n return render(request, \"orders/dinnerplatters.html\", context)\n\n\ndef addtoCart(request):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n if request.method == \"POST\":\n\n user_id = request.user.id\n category = request.POST[\"category\"]\n item = request.POST[\"item\"]\n addition = request.POST[\"addition\"]\n size = request.POST[\"size\"]\n quant = int(request.POST[\"quant\"])\n priceEach = float(request.POST[\"priceEach\"])\n\n cartItem = UserCart.objects.create(user_id=user_id, category=category, item=item, additions=addition,\n size=size, quantity=quant, priceEach=priceEach)\n cartItem.save()\n\n return HttpResponseRedirect(reverse(\"menu\", args=(\"ItemAdded\",)))\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef myCart(request, msg):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n cartItems = UserCart.objects.filter(user_id=request.user.id).order_by('id')\n\n cartItemsLst = []\n total = 0\n\n for item in cartItems:\n myItem = {}\n myItem[\"itemID\"] = item.id\n myItem[\"detailedItem\"] = item.category + ' - ' + item.item\n if item.additions:\n myItem[\"addition\"] = item.additions\n else:\n myItem[\"addition\"] = ''\n myItem[\"size\"] = item.size\n myItem[\"quantity\"] = item.quantity\n myItem[\"priceEach\"] = item.priceEach\n myItem[\"itemTotal\"] = item.quantity * item.priceEach\n cartItemsLst.append(myItem)\n total += myItem[\"itemTotal\"]\n\n if msg == 'Warning':\n message = \"Quantity value is inappropriate.\"\n elif msg == 'NoItem':\n message = \"No such item.\"\n elif msg == 'ItemUpdated':\n message = \"Item has been updated.\"\n elif msg == 'ItemRemoved':\n message = \"Item has been removed.\"\n elif msg == 'AllItemsRemoved':\n message = \"All items are removed.\"\n else:\n message = None\n\n context = {\n \"message\": message,\n \"userlogged\": request.user,\n \"cartItemsLst\": cartItemsLst,\n \"total\": total\n }\n\n return render(request, \"orders/myCart.html\", context)\n\n\ndef update_quantity(request, itemID):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n if request.method == \"POST\":\n\n updatedquantity = request.POST[str(itemID)]\n if updatedquantity == \"\":\n return HttpResponseRedirect(reverse(\"myCart\", args=(\"Warning\",)))\n else:\n try:\n ItemToUpdate = UserCart.objects.get(user_id=request.user.id, id=itemID)\n ItemToUpdate.quantity = int(updatedquantity)\n ItemToUpdate.save()\n return HttpResponseRedirect(reverse(\"myCart\", args=(\"ItemUpdated\",)))\n except UserCart.DoesNotExist:\n return HttpResponseRedirect(reverse(\"myCart\", args=(\"NoItem\",)))\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef deleteItem(request, itemID):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n if request.method == \"POST\":\n try:\n ItemToDelete = UserCart.objects.get(user_id=request.user.id, id=itemID)\n ItemToDelete.delete()\n return HttpResponseRedirect(reverse(\"myCart\", args=(\"ItemRemoved\",)))\n except UserCart.DoesNotExist:\n return HttpResponseRedirect(reverse(\"myCart\", args=(\"NoItem\",)))\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef deleteAllItem(request):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n if request.method == \"POST\":\n\n ItemsToDelete = UserCart.objects.filter(user_id=request.user.id)\n if ItemsToDelete.count() == 0:\n return HttpResponseRedirect(reverse(\"myCart\", args=(\"NoItem\",)))\n\n ItemsToDelete.delete()\n return HttpResponseRedirect(reverse(\"myCart\", args=(\"AllItemsRemoved\",)))\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef submitOrder(request):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n if request.method == \"POST\":\n # Get current order number, increase by 1 as current order confirmation number\n currentOrderNo = OrderNumber.objects.get(id=1)\n currentOrderNo.order_no = currentOrderNo.order_no + 1\n currentOrderNo.save()\n confirmationNo = currentOrderNo.order_no\n # Set status as pending for all items to be ordered\n statusPending = OrderStatus.objects.get(status=\"Pending\")\n\n # Get items in user's cart\n cartItems = UserCart.objects.filter(user_id=request.user.id).order_by('id')\n\n for item in cartItems:\n orderItem = UserOrder.objects.create(order_no=confirmationNo, user_id=request.user.id, username=request.user.username,\n category=item.category, item=item.item, additions=item.additions, size=item.size,\n quantity=item.quantity, priceEach=item.priceEach, orderStatus=statusPending)\n orderItem.save()\n\n # Remove all items that have been ordered in user's cart\n ItemsToDelete = UserCart.objects.filter(user_id=request.user.id)\n ItemsToDelete.delete()\n\n return HttpResponseRedirect(reverse(\"menu\", args=(\"OrderPlaced\",)))\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef myOrder(request):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n orderItems = UserOrder.objects.filter(user_id=request.user.id).order_by('id')\n\n OrderNoLst = []\n uniqueNo = 0\n for item in orderItems:\n if item.order_no != uniqueNo:\n OrderNoLst.append(item.order_no)\n uniqueNo = item.order_no\n\n orderList = []\n for order in OrderNoLst:\n\n orderGroup = {}\n orderGroup[\"orderNo\"] = order\n # Get items by this order number\n orderByNo = UserOrder.objects.filter(user_id=request.user.id, order_no=order).order_by('id')\n itemsByNo = []\n allStatusCompleted = 0\n total = 0\n\n for item in orderByNo:\n myItem = {}\n myItem[\"detailedItem\"] = item.category + ' - ' + item.item\n if item.additions:\n myItem[\"addition\"] = item.additions\n else:\n myItem[\"addition\"] = ''\n myItem[\"size\"] = item.size\n myItem[\"quantity\"] = item.quantity\n myItem[\"priceEach\"] = item.priceEach\n myItem[\"itemTotal\"] = item.quantity * item.priceEach\n itemsByNo.append(myItem)\n total += myItem[\"itemTotal\"]\n if item.orderStatus.status == 'Completed':\n allStatusCompleted += 1\n\n orderGroup[\"itemsByNo\"] = itemsByNo\n orderGroup[\"total\"] = total\n if allStatusCompleted == orderByNo.count():\n orderGroup[\"status\"] = 'Completed'\n else:\n orderGroup[\"status\"] = 'Pending'\n\n # Append order group by order number into main list\n # orderList [orderGroup{orderNo: , itemsByNo: itemsByNo[myItem{item key-pair values }, myItem{}, .. ] , total: , status: }, orderGroup{}, orderGroup{}, orderGroup{}, ..]\n orderList.append(orderGroup)\n\n context = {\n \"userlogged\": request.user,\n \"orderList\": orderList\n }\n\n return render(request, \"orders/myOrder.html\", context)\n\n\ndef CustomerOrder(request):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n if not request.user.is_superuser:\n return HttpResponseRedirect(reverse(\"index\"))\n\n # Customer Order page only to be accessed and updated by superuser\n orderItems = UserOrder.objects.all().order_by('id')\n\n OrderNoLst = []\n uniqueNo = 0\n for item in orderItems:\n if item.order_no != uniqueNo:\n OrderNoLst.append(item.order_no)\n uniqueNo = item.order_no\n\n orderList = []\n for order in OrderNoLst:\n\n orderGroup = {}\n orderGroup[\"orderNo\"] = order\n # Get items by this order number\n orderByNo = UserOrder.objects.filter(order_no=order).order_by('id')\n # Get customer ID to display name on page\n customerID = orderByNo[0].user_id\n customer = User.objects.get(id=customerID)\n orderGroup[\"customerName\"] = customer.last_name + ', ' + customer.first_name\n\n itemsByNo = []\n allStatusCompleted = 0\n total = 0\n\n for item in orderByNo:\n myItem = {}\n myItem[\"detailedItem\"] = item.category + ' - ' + item.item\n if item.additions:\n myItem[\"addition\"] = item.additions\n else:\n myItem[\"addition\"] = ''\n myItem[\"size\"] = item.size\n myItem[\"quantity\"] = item.quantity\n myItem[\"priceEach\"] = item.priceEach\n myItem[\"itemTotal\"] = item.quantity * item.priceEach\n itemsByNo.append(myItem)\n total += myItem[\"itemTotal\"]\n if item.orderStatus.status == 'Completed':\n allStatusCompleted += 1\n\n orderGroup[\"itemsByNo\"] = itemsByNo\n orderGroup[\"total\"] = total\n if allStatusCompleted == orderByNo.count():\n orderGroup[\"status\"] = OrderStatus.objects.get(status=\"Completed\").id\n else:\n orderGroup[\"status\"] = OrderStatus.objects.get(status=\"Pending\").id\n\n # Append order group by order number into main list\n # orderList [orderGroup{orderNo: , itemsByNo: itemsByNo[myItem{item key-pair values }, myItem{}, .. ] , total: , status: }, orderGroup{}, orderGroup{}, orderGroup{}, ..]\n orderList.append(orderGroup)\n\n context = {\n \"userlogged\": request.user,\n \"orderList\": orderList,\n \"orderStatus\": OrderStatus.objects.all().order_by('id')\n }\n\n return render(request, \"orders/CustomerOrder.html\", context)\n\n\ndef updateOrdStatus(request, orderNo):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n if request.method == \"POST\":\n updatedStatus = int(request.POST[\"status\"])\n\n # Get user items with orderNo\n itemsToUpdate = UserOrder.objects.filter(order_no=orderNo)\n for item in itemsToUpdate:\n item.orderStatus_id = updatedStatus\n item.save()\n\n return HttpResponseRedirect(reverse(\"CustomerOrder\"))\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef deleteOrder(request, orderNo):\n # Ensure that user is logged in\n if not request.user.is_authenticated:\n return render(request, \"orders/login.html\", {\"message\": None})\n\n if request.method == \"POST\":\n # Delete items with orderNo\n UserOrder.objects.filter(order_no=orderNo).delete()\n\n return HttpResponseRedirect(reverse(\"CustomerOrder\"))\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return HttpResponseRedirect(reverse(\"index\"))\n","sub_path":"orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"147667517","text":"from django.test import TestCase, tag\nfrom django.contrib.auth.models import User\nfrom blog.models import Blog\nfrom comment.models import Comment\nfrom blog.forms import BlogForm\nfrom comment.forms import CommentForm\nfrom home.forms import ContactForm\n\nclass BlogTest(TestCase):\n def setUp(self):\n self.user = User.objects.create_superuser(\n 'foo',\n 'foo@test.com',\n 'password'\n )\n self.user1 = User.objects.create_user(\n username=\"radi\",\n email=\"chuck.jones@acme.edu\",\n password=\"password\",\n first_name=\"Chuck\",\n last_name=\"Jones\"\n )\n self.logged_in = self.client.login(\n username='foo',\n password='password'\n )\n\n self.blog1 = Blog.objects.create(\n title='test blog',\n body='body test blot1',\n author=self.user\n )\n self.blog2 = Blog.objects.create(\n title='test blog',\n body='body test blot2',\n author=self.user\n )\n self.blog3 = Blog.objects.create(\n title='test blog',\n body='body test blot2',\n author=self.user\n )\n\n def test_blog_slugify(self):\n self.assertEqual(self.blog1.slug, 'test-blog')\n self.assertEqual(self.blog2.slug, 'test-blog-1')\n self.assertEqual(self.blog3.slug, 'test-blog-2')\n\n @tag('fast')\n def test_cud_on_blog_and_comments(self): # create update and delete a blog\n # log user1 in and user (admin) will be automatically logged out\n self.client.login(username='radi', password='password')\n\n # create new blog\n blogform_data = {'title': 'new test blog', 'body': 'blog body'}\n blogform = BlogForm(data=blogform_data)\n createblog_response = self.client.post('/blog/createblog/', blogform_data)\n num_of_blogs = Blog.objects.all().count()\n blog = Blog.objects.get(title=blogform_data['title'])\n self.assertTrue(blogform.is_valid())\n self.assertEqual(blog.slug, 'new-test-blog')\n self.assertEqual(blog.author, self.user1)\n self.assertEqual(num_of_blogs, 4) # three blogs create in setUp\n self.assertRedirects(\n createblog_response,\n '/blog/new-test-blog/',\n status_code=302,\n target_status_code=200\n )\n hit_count = blog.hit_count\n self.assertEqual(hit_count, 1) # first view by the auther after blog created\n\n\n # test comment form as a parent comment\n commentform_data = {\n 'content': 'new comment',\n # button name and value attribut shall be passed through the data dict\n 'commentform': 'comment', # The data dict should map input names to values.\n }\n commentform = CommentForm(data=commentform_data)\n comment_response = self.client.post(\n '/blog/new-test-blog/',\n commentform_data,\n follow=True\n )\n print(comment_response)\n comment = Comment.objects.get(object_id=blog.id)\n self.assertTrue(commentform.is_valid())\n self.assertEqual(blog.get_content_type, comment.content_type)\n self.assertEqual(blog.comments.first(), comment)\n self.assertEqual(comment.content, commentform_data['content'])\n self.assertTemplateUsed(comment_response, 'blog/blogdetail.html')\n self.assertRedirects(\n comment_response,\n '/blog/new-test-blog/',\n status_code=302,\n target_status_code=200\n )\n\n\n # test child comment by another logged in user\n self.client.login(username='foo', password='password')\n childform_data = {\n 'content': 'child comment',\n 'parent_id': comment.id,\n 'commentform': 'comment'\n }\n childform = CommentForm(data=childform_data)\n child_response = self.client.post('/blog/new-test-blog/',childform_data, follow=True)\n child_comment = Comment.objects.get(parent=comment)\n self.assertTrue(childform.is_valid())\n self.assertEqual(blog.comments.first().replies.first(), child_comment)\n self.assertEqual(comment.replies.first(), child_comment)\n self.assertEqual(child_comment.content, childform_data['content'])\n self.assertEqual(child_comment.parent, comment)\n self.assertEqual(child_comment.user, self.user)\n self.assertRedirects(\n child_response,\n '/blog/new-test-blog/',\n status_code=302,\n target_status_code=200\n )\n\n\n # test creating a comment by anonymous user.\n self.client.logout()\n form_data = {\n 'content': 'child comment',\n 'parent_id': comment.id,\n 'commentform': 'comment'\n }\n form = CommentForm(data=childform_data)\n response = self.client.post('/blog/new-test-blog/',childform_data, follow=True)\n self.assertEqual(response.status_code, 403)\n\n\n # test updating and delete blog by anonymous user\n # update\n formanon_data = {'title': 'another user', 'body': 'another blog body'}\n anon_response = self.client.post('/blog/new-test-blog/update', formanon_data)\n self.assertRedirects(\n anon_response,\n '/accounts/login/?next=/blog/new-test-blog/update', # redirect anonymous to login\n status_code=302,\n target_status_code=200\n )\n\n # delete\n anondelete_resp = self.client.post('/blog/new-test-blog/delete')\n self.assertRedirects(\n anondelete_resp,\n '/accounts/login/?next=/blog/new-test-blog/delete', # redirect anonymous to login\n status_code=302,\n target_status_code=200\n )\n\n # test updating and deleting blog by another user\n # update\n self.client.login(username='foo', password='password')\n form2_data = {'title': 'another user', 'body': 'another blog body'}\n foo_response = self.client.post('/blog/new-test-blog/update', form2_data)\n self.assertRedirects(\n foo_response,\n '/accounts/profile/foo', # redirect user who is not the blog owner to his profile page\n status_code=302,\n target_status_code=200\n )\n\n # delete\n foodelete_resp = self.client.post('/blog/new-test-blog/delete')\n self.assertRedirects(\n foo_response,\n '/accounts/profile/foo', # redirect user who is not the blog owner to his profile page\n status_code=302,\n target_status_code=200\n )\n\n\n # test update and delete blog by its Author\n self.client.login(username='radi', password='password')\n\n # update\n updateform_data = {\n 'title': 'update new test blog',\n 'body': 'update blog body'\n }\n updateform = BlogForm(data=updateform_data)\n updateblog_response = self.client.post('/blog/new-test-blog/update', updateform_data)\n updated_blog = Blog.objects.get(title=updateform_data['title'])\n self.assertRedirects(\n updateblog_response,\n '/blog/new-test-blog/',\n status_code=302,\n target_status_code=200\n )\n self.assertTrue(updateform.is_valid())\n self.assertEqual(updated_blog.title, 'update new test blog')\n self.assertEqual(updated_blog.body, 'update blog body')\n self.assertEqual(updated_blog.slug, 'new-test-blog') # update blog doesn't change SlugField\n\n # delete\n comment = Comment.objects.filter(content_type=blog.get_content_type).first()\n auth_resp = self.client.post('/blog/new-test-blog/delete')\n num_of_blogs = Blog.objects.all().count()\n comment1 = Comment.objects.filter(content_type=blog.get_content_type).first()\n comment2 = Comment.objects.filter(content_type=updated_blog.get_content_type).first()\n deleted_blog = Blog.objects.filter(slug='new-test-blog').first()\n self.assertRedirects(\n auth_resp,\n '/blog/',\n status_code=302,\n target_status_code=200\n )\n self.assertEqual(deleted_blog, None)\n self.assertEqual(num_of_blogs, 3) # three blogs created in setUp\n self.assertNotEqual(comment, None) # the comments are exist before deleting the blog\n self.assertEqual(comment1, None) # delete a blog will delete associated comments\n self.assertEqual(comment2, None)\n\n\n def test_contact_form_post(self):\n # test valid contact form\n form_data1 = {\n 'first_name':'radi',\n 'family_name':'mus',\n 'message':'hello',\n 'email':'mus.radi85@gmail.com',\n 'phone': '+4915146825243',\n 'next': '/blog/test-blog/'\n }\n form1 = ContactForm(data=form_data1)\n response1 = self.client.post('/blog/test-blog/',form_data1, follow=True)\n self.assertTrue(form1.is_valid())\n self.assertRedirects(\n response1,\n form_data1['next'],\n status_code=302,\n target_status_code=200\n )\n\n # test invalid contact form\n form_data2 = {\n 'first_name':'radi',\n 'family_name':'mus',\n 'message':'hello',\n 'email':'mus.radi85@gmail.com',\n 'phone': '15168247', # invalid phone number\n 'next': '/blog/'\n }\n\n form2 = ContactForm(data=form_data2)\n response2 = self.client.post('/blog/',form_data2, follow=True)\n self.assertFalse(form2.is_valid())\n self.assertTemplateUsed(response2, 'contactform.html')\n # self.assertRedirects(response2, '/blog/',status_code=302, target_status_code=200)\n","sub_path":"blog/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":9816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"566843962","text":"# -*- coding:utf-8 -*-\nimport time\nfrom flask import Flask\nfrom flask_mail import Mail,Message\n\napp = Flask(__name__)\n\n# 配置邮件:服务器/端口/安全套接字层/邮箱名/授权码\napp.config['MAIL_SERVER'] = \"smtp.qq.com\"\napp.config['MAIL_PORT'] = 465\napp.config['MAIL_USE_SSL'] = True\napp.config['MAIL_USERNAME'] = \"1370174361@qq.com\"\napp.config['MAIL_PASSWORD'] = \"lvlyhoktzkmzgfab\"\napp.config['MAIL_DEFAULT_SENDER'] = 'FlaskAdmin<1370174361@qq.com>'\n\nmail = Mail(app)\n\n\n@app.route('/')\ndef hello_world():\n return '
    发送邮件'\n\n@app.route('/send_mail')\ndef send_mail():\n msg = Message('天高地迥,觉宇宙之无穷;兴尽悲来,识盈虚之有数',recipients=['1337418776@qq.com'],body='小猪佩奇身上纹,掌声送给社会人')\n # msg = Message('天高地迥,觉宇宙之无穷;兴尽悲来,识盈虚之有数',recipients=['1370174361@qq.com'],body='小猪佩奇身上纹,掌声送给社会人')\n while True:\n time.sleep(0.5)\n mail.send(msg)\n return 'send_mail'\n\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"flask1-5/flask_04/demo2_email.py","file_name":"demo2_email.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"210424811","text":"\"\"\"Couchdb extension\"\"\"\n\nimport os\nimport sys\nimport couchdb\n\nfrom cement.core import backend, handler, hook\n\nfrom scilifelab.pm.core import command\nfrom scilifelab.utils.http import check_url\nfrom scilifelab.utils.timestamp import utc_time\n\nLOG = backend.minimal_logger(__name__)\n\nclass CouchdbCommandHandler(command.CommandHandler):\n \"\"\" \n This class is an implementation of the :ref:`ICommand\n ` interface.\n \"\"\" \n\n class Meta:\n \"\"\"Handler meta-data\"\"\"\n \n interface = command.ICommand\n \"\"\"The interface that this class implements.\"\"\"\n\n label = 'couchdb'\n \"\"\"The string identifier of this handler.\"\"\"\n\n conn = None\n \"\"\"The database connection\"\"\"\n\n url = None\n \"\"\"The database url\"\"\"\n\n user = None\n \"\"\"The user\"\"\"\n \n pw = None\n \"\"\"The password used to connect\"\"\"\n\n views = {}\n \"\"\"Temporary views for speeding up update functions\"\"\"\n\n ### Must be present\n def command(self):\n pass\n\n def connect(self, url, port=\"5984\"):\n def runpipe():\n self._meta.url=\"http://{}:{}\".format(url,port)\n if not check_url(self._meta.url):\n self.app.log.warn(\"Connecting to server at {} failed. No such url.\" % self._meta.url)\n return\n self._meta.conn = couchdb.Server(url=self._meta.url)\n self.app.log.info(\"Connecting to server at {} succeeded\".format(self._meta.url))\n return self.dry(\"Connecting to database @{}:{}\".format(url, port), runpipe)\n\n def db(self, dbname):\n \"\"\"Get database .\n\n :param dbname: Database name\n\n :returns: database on success, otherwise False\n \"\"\"\n def runpipe():\n try:\n db = self._meta.conn[dbname]\n except:\n self.app.log.warn(\"No such database {} \".format(dbname))\n return False\n return db\n return self.dry(\"Retrieving database {} from {}\".format(dbname, self._meta.url), runpipe)\n\n def save(self, dbname, obj, update_fn=None):\n \"\"\"Save/update database object in database . If\n already exists and is passed, update will\n only take place if object has been modified\n\n :param dbname: database name\n :param obj: database object to save\n :param update_fn: function that operates on object and makes sure it doesn't already exist\n \"\"\"\n def runpipe():\n db = self.db(dbname)\n if not update_fn:\n db.save(obj)\n self.app.log.info(\"Saving object {} with id {}\".format(repr(obj), obj[\"_id\"]))\n else:\n new_obj = update_fn(db, obj)\n if not new_obj is None:\n self.app.log.info(\"Saving object {} with id {}\".format(repr(new_obj), new_obj[\"_id\"]))\n db.save(new_obj)\n else:\n self.app.log.info(\"Object {} with id {} present and not in need of updating\".format(repr(obj), obj[\"_id\"]))\n return self.dry(\"Saving object {}\".format(repr(obj)), runpipe)\n\n def get_view(self, dbname, design, name):\n \"\"\"Get view from a database with design document , named \n\n :param dbname: database name\n :param design: design document\n :param name: view name\n \"\"\"\n db = self.db(dbname)\n return db.view(\"{}/{}\".format(design, name))\n\n\n def _view(self, dbname, key):\n \"\"\"Create or retrieve a 'view' in database that maps to _id and _rev.\n \n :param dbname: database name\n :param key: database key\n\n \"\"\"\n db = self.db(dbname)\n k = \"{}_{}\".format(dbname, key)\n if not self._meta.views.has_key(k):\n self.app.log.info(\"generating view in database '{}' for key '{}'\".format(dbname, key))\n self._meta.views[k] = {}\n for dbid in db:\n dbobj = db.get(dbid)\n value = dbobj.get(k)\n self._meta.views[k][value] = (dbobj.get(\"_id\"), dbobj.get(\"_rev\"))\n return self._meta.views[k]\n\n def _get(self, dbname, key, query):\n \"\"\"Get a field from a view where field == .\n\n :param dbname: database name\n :param key: database key\n :param query: query\n \"\"\"\n db = self.db(dbname)\n view = self._view(dbname, key)\n return view.get(query, None)\n \ndef add_shared_couchdb_options(app):\n \"\"\"\n Adds shared couchdb arguments to the argument object.\n \n :param app: The application object.\n \n \"\"\"\n app.args.add_argument('--url', help=\"Database url (excluding http://)\", nargs=\"?\", type=str)\n app.args.add_argument('--port', help=\"Database port. Default 5984\", nargs=\"?\", default=\"5984\", type=str)\n app.args.add_argument('--dbname', help=\"Database name\", default=None, type=str)\n app.args.add_argument('--user', help=\"Database user\", nargs=\"?\", default=None, type=str)\n app.args.add_argument('--password', help=\"Database password\", default=None, type=str)\n\n\ndef add_couchdb_option(app):\n \"\"\"\n Adds the '--couchdb' argument to the argument object.\n \n :param app: The application object.\n \n \"\"\"\n app.args.add_argument('--couchdb', dest='output_handler', \n action='store_const', help='toggle couchdb output handler', const='couchdb')\n\n\ndef set_couchdb_handler(app):\n \"\"\"\n Overrides the configured command handler if ``--couchdb`` is passed at the\n command line.\n \n :param app: The application object.\n \n \"\"\"\n if '--couchdb' in app._meta.argv:\n app._meta.cmd_handler = 'couchdb'\n app._setup_cmd_handler()\n\ndef load():\n \"\"\"Called by the framework when the extension is 'loaded'.\"\"\"\n hook.register('post_setup', add_couchdb_option)\n hook.register('post_setup', add_shared_couchdb_options)\n hook.register('pre_run', set_couchdb_handler)\n handler.register(CouchdbCommandHandler)\n","sub_path":"scilifelab/pm/ext/ext_couchdb.py","file_name":"ext_couchdb.py","file_ext":"py","file_size_in_byte":6111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"19715942","text":"# 4B - prediction model with slr:\r\n# https://towardsdatascience.com/a-beginners-guide-to-linear-regression-in-python-with-scikit-learn-83a8f7ae2b4f\r\n\r\n# modules for prediction model\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn import metrics\r\n\r\n# reshape cost columns as predictor variables\r\nX = df_california['cases'].values.reshape(-1,1)\r\ny = df_california['deaths'].values.reshape(-1,1)\r\n\r\n# split data into training and test datasets\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\r\n\r\n# create prediction model and fit data to model\r\nregressor = LinearRegression()\r\nregressor.fit(X_train, y_train)\r\n\r\n# output the intercept/coefficient\r\n# note our coefficients will be different from stats.linregress(), as here we fit on a training *subset* of data\r\nprint('*** Intercept and Coefficient Values ***')\r\nprint('Intercept Value (Prediction Model) =', regressor.intercept_)\r\nprint('Coefficient Value (Prediction Model) =', regressor.coef_)\r\nprint('')\r\n\r\n# Predict total cost with linear regresion\r\ny_pred = regressor.predict(X_test)\r\n\r\n# Output error metrics\r\nprint('*** Error Metrics ***')\r\nprint('Mean Absolute Error =', metrics.mean_absolute_error(y_test, y_pred))\r\nprint('Mean Squared Error =', metrics.mean_squared_error(y_test, y_pred))\r\nprint('Root Mean Squared Error =', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))\r\n\r\n# Output results\r\ndf_results = pd.DataFrame(\r\n {'Actual Values': y_test.flatten(), 'Predicted Values': y_pred.flatten()}\r\n)\r\ndf_results\r\n","sub_path":"modules_nb/04b_slr_pred.py","file_name":"04b_slr_pred.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"527199152","text":"import cv2 as cv\nimport numpy as np\n\n\ndef getLogImage(img):\n c = 255 / np.log(1 + np.max(img))\n log_image = c * (np.log(img + 1))\n log_image = np.array(log_image, np.uint8)\n return log_image\n\ndef resize(img, scale_pct) :\n ht = int(img.shape[1]*scale_pct)\n wt = int(img.shape[0]*scale_pct)\n I = cv.resize(img, (wt, ht), interpolation=cv.INTER_AREA)\n return I\n\n\nI = cv.imread(\"./images/pic.jpg\")\n\n# apply Gaussian filter on resized image\nIg = cv.GaussianBlur(I, (49, 49), cv.BORDER_DEFAULT)\nIe = getLogImage(I) - getLogImage(Ig)\n\nprint(Ie.shape, I.shape)\n\ncv.imshow(\"I\", I)\ncv.imshow(\"Ig\", Ig)\ncv.imshow(\"Ie\", Ie)\ncv.waitKey(0)\n","sub_path":"GaussianFilter.py","file_name":"GaussianFilter.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"598665031","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\n# from tastypie.api import Api\nfrom .import views\n# from api.resources import DistrictResource, ProvinceResource, BmfPubMfResource\n\n# v1_api = Api(api_name='v1')\n# v1_api.register(DistrictResource())\n# v1_api.register(ProvinceResource())\n# v1_api.register(BmfPubMfResource())\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^', include('dashboard.urls', namespace='dashboard')),\n url(r'^dashboard/', include('dashboard.urls', namespace='dashboard')),\n # url(r'^api/', include(v1_api.urls)),\n url(r'^health/', include('health.urls', namespace='health')),\n url(r'^education/', include('education.urls', namespace='education')),\n url(r'^users/', include('users.urls', namespace='users')),\n url(r'^reports/', include('reports.urls', namespace='reports')),\n url(r'^charts/', include('charts.urls', namespace='charts')),\n\n # report builder\n # url(r'^report_builder/', include('report_builder.urls')),\n\n # common functions\n url(r'^bs_get_data$', views.bs_get_data, name='bs_get_data'),\n url(r'^bs_get_data_mock$', views.bs_get_data_mock, name='bs_get_data_mock'),\n url(r'^bs_fetch_edit_data$', views.bs_fetch_edit_data, name='bs_fetch_edit_data'),\n url(r'^bs_save_data$', views.bs_save_data, name='bs_save_data'),\n url(r'^fetch_entities$', views.fetch_entities, name='fetch_entities'),\n url(r'^add_entity$', views.add_entity, name='add_entity'),\n url(r'^get_entity$', views.get_entity, name='get_entity'),\n url(r'^fetch_entities_all$', views.fetch_entities_all, name='fetch_entities_all'),\n\n url(r'^fetch_company_tele$', views.fetch_company_tele, name='fetch_company_tele'),\n\n # add_entity_with_district\n url(r'^add_entity_with_district$', views.add_entity_with_district, name='add_entity_with_district'),\n\n url(r'^dl_save_data$', views.dl_save_data, name='dl_save_data'),\n url(r'^dl_get_data$', views.dl_get_data, name='dl_get_data'),\n url(r'^dl_fetch_edit_data$', views.dl_fetch_edit_data, name='dl_fetch_edit_data'),\n url(r'^dl_fetch_district_disagtn$', views.dl_fetch_district_disagtn, name='dl_fetch_district_disagtn'),\n url(r'^dl_fetch_summary_disagtn$', views.dl_fetch_summary_disagtn, name='dl_fetch_summary_disagtn'),\n url(r'^dl_fetch_summary_dis_disagtn$', views.dl_fetch_summary_dis_disagtn, name='dl_fetch_summary_dis_disagtn'),\n\n url(r'^fetch_incident_districts$', views.fetch_incident_districts, name='fetch_incident_districts'),\n url(r'^fetch_incident_provinces$', views.fetch_incident_provinces, name='fetch_incident_provinces'),\n\n # get Business Types\n url(r'^fetch_business_types$', views.fetch_business_types, name='fetch_business_types'),\n\n # get Business Types\n url(r'^fetch_tourism_infrastructure_types$', views.fetch_tourism_infrastructure_types, name='fetch_tourism_infrastructure_types'),\n\n # get entities all data\n url(r'^fetch_entities_plain$', views.fetch_entities_plain, name='fetch_entities_plain'),\n\n url(r'^fetch_entities_plain_column$', views.fetch_entities_plain_column, name='fetch_entities_plain_column'),\n\n\n\n\n\n\n\n\n # agri_irrigation\n url(r'^agri_irrigation/', include('agri_irrigation.urls', namespace='agri_irrigation')),\n\n # agri_agrarian\n url(r'^agri_agrarian/', include('agri_agrarian.urls', namespace='agri_agrarian')),\n\n # agri_livestock\n url(r'^agri_livestock/', include('agri_livestock.urls', namespace='agri_livestock')),\n\n # agri_fisheries\n url(r'^agri_fisheries/', include('agri_fisheries.urls', namespace='agri_fisheries')),\n\n # agri_summary\n url(r'^agri_summary/', include('agri_summary.urls', namespace='agri_summary')),\n\n # water_supply\n url(r'^water_supply/', include('water_supply.urls', namespace='water_supply')),\n\n # power_supply\n url(r'^power_supply/', include('power_supply.urls', namespace='power_supply')),\n\n # housing\n url(r'^housing/', include('housing.urls', namespace='housing')),\n\n # telecommunication\n url(r'^telecommunication/', include('telecommunication.urls', namespace='telecommunication')),\n\n # tourism\n url(r'^tourism/', include('tourism.urls', namespace='tourism')),\n\n # industry_services\n url(r'^industry_services/', include('industry_services.urls', namespace='industry_services')),\n\n # other govn services\n url(r'^other_govn_services/', include('other_govn_services.urls', namespace='other_govn_services')),\n\n # mining\n url(r'^mining/', include('mining.urls', namespace='mining')),\n url(r'^bs_mining_fetch_edit_data$', views.bs_mining_fetch_edit_data, name='bs_mining_fetch_edit_data'),\n url(r'^dl_fetch_district_disagtn$', views.dl_fetch_district_disagtn, name='dl_fetch_district_disagtn'),\n url(r'^dl_fetch_total_data$', views.dl_fetch_total_data, name='dl_fetch_total_data'),\n url(r'^dl_fetch_summary_disagtn$', views.dl_fetch_summary_disagtn, name='dl_fetch_summary_disagtn'),\n\n # transport_rail\n url(r'^transport_rail/', include('transport_rail.urls', namespace='transport_rail')),\n\n # transport-water\n url(r'^transport_water/', include('transport_water.urls', namespace='transport_water')),\n\n # transport-land\n url(r'^transport_land/', include('transport_land.urls', namespace='transport_land')),\n\n # transport_air\n url(r'^transport_air/', include('transport_air.urls', namespace='transport_air')),\n\n # transport_summary\n url(r'^transport_summary/', include('transport_summary.urls', namespace='transport_summary')),\n\n # test\n # url(r'^transport_summary/', include('transport_summary.urls', namespace='transport_summary')),\n\n\n\n\n\n]\n","sub_path":"dala/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"1614995","text":"\"\"\"\nGeneral utilities to open a Twitter file.\n\nAuthor: Alexandra DeLucia\n\"\"\"\n# Standard imports\nimport argparse\nimport logging\nimport random\n\nfrom typing import Iterable, List, Optional, Set, Union, Dict\n\n# Third-party imports\nimport regex\n\n# Local modules\nfrom littlebird import TweetReader\n\n# Configurations\nlogging.basicConfig(level=logging.INFO)\n\n\nclass LanguageNotSupportedError(ValueError):\n def __init__(self, lang: str):\n self.lang = lang\n\n#####\n# Settings\n#####\n# Supported languages\nsupport_langs: [Iterable[str]] = set([\"en\"])\n\n# List curated by Keith Harriggian\n# Only supports English\nCONTRACTIONS: Dict[str, str] = { \n \"ain't\": \"is not\",\n \"aren't\": \"are not\",\n \"can't\": \"can not\",\n \"can't've\": \"can not have\",\n \"cannot\": \"can not\",\n \"'cause\": \"because\",\n \"could've\": \"could have\",\n \"couldn't\": \"could not\",\n \"couldn't've\": \"could not have\",\n \"didn't\": \"did not\",\n \"doesn't\": \"does not\",\n \"don't\": \"do not\",\n \"hadn't\": \"had not\",\n \"hadn't've\": \"had not have\",\n \"hasn't\": \"has not\",\n \"haven't\": \"have not\",\n \"he'd\": \"he would\",\n \"he'd've\": \"he would have\",\n \"he'll\": \"he will\",\n \"he'll've\": \"he will have\",\n \"he's\": \"he is\",\n \"how'd\": \"how did\",\n \"how'd'y\": \"how do you\",\n \"how'll\": \"how will\",\n \"how's\": \"how is\",\n \"i'd\": \"i would\",\n \"i'd've\": \"i would have\",\n \"i'll\": \"i will\",\n \"i'll've\": \"i will have\",\n \"i'm\": \"i am\",\n \"i've\": \"i have\",\n \"isn't\": \"is not\",\n \"it'd\": \"it would\",\n \"it'd've\": \"it would have\",\n \"it'll\": \"it will\",\n \"it'll've\": \"it will have\",\n \"it's\": \"it is\",\n \"let's\": \"let us\",\n \"ma'am\": \"madam\",\n \"mayn't\": \"may not\",\n \"might've\": \"might have\",\n \"mightn't\": \"might not\",\n \"mightn't've\": \"might not have\",\n \"must've\": \"must have\",\n \"mustn't\": \"must not\",\n \"mustn't've\": \"must not have\",\n \"needn't\": \"need not\",\n \"needn't've\": \"need not have\",\n \"o'clock\": \"of the clock\",\n \"oughtn't\": \"ought not\",\n \"oughtn't've\": \"ought not have\",\n \"shan't\": \"shall not\",\n \"sha'n't\": \"shall not\",\n \"shan't've\": \"shall not have\",\n \"she'd\": \"she would\",\n \"she'd've\": \"she would have\",\n \"she'll\": \"she will\",\n \"she'll've\": \"she will have\",\n \"she's\": \"she is\",\n \"should've\": \"should have\",\n \"shouldn't\": \"should not\",\n \"shouldn't've\": \"should not have\",\n \"so've\": \"so have\",\n \"so's\": \"so is\",\n \"that'd\": \"that would\",\n \"that'd've\": \"that would have\",\n \"that's\": \"that is\",\n \"there'd\": \"there would\",\n \"there'd've\": \"there would have\",\n \"there's\": \"there is\",\n \"they'd\": \"they would\",\n \"they'd've\": \"they would have\",\n \"they'll\": \"they will\",\n \"they'll've\": \"they will have\",\n \"they're\": \"they are\",\n \"they've\": \"they have\",\n \"to've\": \"to have\",\n \"wasn't\": \"was not\",\n \"we'd\": \"we would\",\n \"we'd've\": \"we would have\",\n \"we'll\": \"we will\",\n \"we'll've\": \"we will have\",\n \"we're\": \"we are\",\n \"we've\": \"we have\",\n \"weren't\": \"were not\",\n \"what'll\": \"what will\",\n \"what'll've\": \"what will have\",\n \"what're\": \"what are\",\n \"what's\": \"what is\",\n \"what've\": \"what have\",\n \"when's\": \"when is\",\n \"when've\": \"when have\",\n \"where'd\": \"where did\",\n \"where's\": \"where is\",\n \"where've\": \"where have\",\n \"who'll\": \"who will\",\n \"who'll've\": \"who will have\",\n \"who's\": \"who is\",\n \"who've\": \"who have\",\n \"why's\": \"why is\",\n \"why've\": \"why have\",\n \"will've\": \"will have\",\n \"won't\": \"will not\",\n \"won't've\": \"will not have\",\n \"would've\": \"would have\",\n \"wouldn't\": \"would not\",\n \"wouldn't've\": \"would not have\",\n \"y'all\": \"you all\",\n \"y'all'd\": \"you all would\",\n \"y'all'd've\": \"you all would have\",\n \"y'all're\": \"you all are\",\n \"y'all've\": \"you all have\",\n \"you'd\": \"you would\",\n \"you'd've\": \"you would have\",\n \"you'll\": \"you will\",\n \"you'll've\": \"you will have\",\n \"you're\": \"you are\",\n \"you've\": \"you have\",\n \"that'll\": \"that will\",\n}\n\n\n#####\n# Define tokenizer class\n#####\nclass TweetTokenizer:\n \"\"\"\n Open Twitter files and process the text content.\n \"\"\"\n\n def __init__(\n self,\n language: str = \"en\",\n token_pattern: str = r\"\\b\\w+\\b\",\n stopwords: Optional[Iterable[str]] = None,\n remove_hashtags: bool = False,\n lowercase: bool = True,\n expand_contractions: bool = True\n ):\n \"\"\"\n Currently only English and Arabic are support languages (\"en\" and \"ar\").\n There are many options for the token pattern, and the token pattern should be different depending upon your use case.\n Default: r\"\\b\\w+\\b\"\n Only letters: \"\\p{L}+\"\n Letters and numbers: \"[\\p{L}\\p{N}]+\"\n Starts with a letter but can contain numbers: \"\\p{L}[\\p{L}\\p{N}]+\"\n The default stopwords None does not remove stopwords\n User handle pattern: r\"(? List[str]:\n \"\"\"\n :param tweets:\n :return: tokens\n \"\"\"\n if self.remove_hashtags:\n tweet = self.HASHTAG_RE.sub(\" \", tweet)\n\n # Remove URLs, handles, \"RT\"\n tweet = self.REMOVAL_RE.sub(\" \", tweet)\n\n # Lowercase\n if self.lowercase:\n tweet = tweet.lower()\n\n # Remove contractions (only matches lowercase)\n if self.expand_contractions:\n for contraction, expanded in CONTRACTIONS.items():\n tweet = regex.sub(contraction, expanded, tweet)\n\n # Tokenize\n tokens = self.TOKEN_RE.findall(tweet)\n\n # Remove stopwords\n if self.stopwords:\n tokens = [t for t in tokens if t not in self.stopwords]\n return tokens\n\n def tokenize_tweet_file(\n self, input_file: str, sample_size: int = -1, return_tokens: bool = False\n ) -> Optional[Union[List[str], List[List[str]]]]:\n \"\"\"\n Return tokenize tweets in file\n\n :param input_file: path to input file\n :param sample_size: size of sample to take of tweets. The sample is min(sample, number of tweets in file)\n \"\"\"\n # Get all tweet content\n all_tweet_text = []\n reader = TweetReader(input_file)\n for tweet in reader.read_tweets():\n if \"extended_tweet\" in tweet:\n text = tweet[\"extended_tweet\"][\"full_text\"]\n else:\n text = tweet[\"text\"]\n all_tweet_text.append(text)\n\n num_tweets = len(all_tweet_text)\n # Check for empty file\n if num_tweets == 0:\n logging.warning(f\"{input_file} has no tweets.\")\n return None\n\n # Sample from the file's tweets\n if sample_size != -1:\n if sample_size < num_tweets:\n all_tweet_text = random.sample(all_tweet_text, k=sample_size)\n\n # Tokenize the tweets and return\n # Some tweets have no valid tokens. Skip them.\n tweet_text_ = map(self.tokenize, all_tweet_text)\n tweet_text: Union[List[str], List[List[str]]]\n if return_tokens:\n tweet_text = [t for t in tweet_text_ if t != []]\n else:\n tweet_text = [\" \".join(t) for t in tweet_text_ if t != []]\n return tweet_text\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"Command-line parser for use with scripting\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--input-files\", type=str, nargs=\"+\", help=\"List of GZIP'd Tweet files\"\n )\n parser.add_argument(\n \"--sample\",\n type=int,\n default=-1,\n help=\"Number of tweets to use for the keyword counts. Only for Tweet files.\",\n )\n parser.add_argument(\"--language\", choices=[\"en\", \"ar\"])\n parser.add_argument(\"--output-dir\")\n parser.add_argument(\"--output-file\")\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n tokenizer = TweetTokenizer(remove_hashtags=True)\n tweet_text = tokenizer.tokenize_tweet_file(\n \"/home/aadelucia/files/minerva/raw_tweets/tweets_en/2014_01_01_MA.gz\",\n sample_size=10,\n )\n print(tweet_text)\n","sub_path":"littlebird/tweet_tokenizer.py","file_name":"tweet_tokenizer.py","file_ext":"py","file_size_in_byte":9356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"59888956","text":"import apache_beam as beam \nfrom apache_beam.options.pipeline_options import PipelineOptions\nfrom apache_beam.options.pipeline_options import StandardOptions\nfrom apache_beam.options.pipeline_options import GoogleCloudOptions\nfrom apache_beam.runners import DirectRunner, DataflowRunner\nfrom apache_beam.transforms.combiners import Count\nimport logging\nimport argparse\nimport json\nimport datetime,time\n\ndef checkValueelement(element): \n key = element[0]\n value = element[1]\n if (value != 1):\n return beam.pvalue.TaggedOutput(\"duplicates\", element)\n\nclass CheckDupeTransform(beam.PTransform):\n def expand(self, pcol_element):\n count_per_keys = (pcol_element | \"MapKeys\" >> beam.Map(lambda x: (x[\"id\"], x))\n | \"CountPerKey\" >> Count.PerKey() \n )\n check_dupe_ids = (count_per_keys | \"CountDuplicates\" >> beam.FlatMap(checkValueelement).with_outputs())\n\n duplicate_records = check_dupe_ids.duplicates\n return duplicate_records\n\n\nparser = argparse.ArgumentParser(description=\"Playing with Avro data in Beam\")\n\n#Google Cloud options\n#parser.add_argument(\"--runner\", required=True, help=\"Please enter Apache Beam Runner\")\n#parser.add_argument(\"--staging_location\", required=True, help=\"Please enter staging GCS Bucket\")\n#parser.add_argument(\"--temp_location\", required=True, help=\"Please enter temporary GCS location\")\n#parser.add_argument(\"--region\", required=True, help=\"Please enter Apache Beam Runner\")\n#parser.add_argument(\"--project\", required=True, help=\"Please enter the GCP Project\")\n\n#Pipeline Specific Arguments\nparser.add_argument(\"--input\", required=True, help=\"Please enter the input GCS object path\")\nparser.add_argument(\"--output_bucket\", required=True, help=\"Please enter the GCS bucket to write intermmidiate output\")\nparser.add_argument(\"--output_table\", required=True, help=\"Please enter BQ table name\")\n\nopts, pipeline_opts = parser.parse_known_args()\noptions = PipelineOptions(pipeline_opts, save_main_session = True)\n#options.view_as(GoogleCloudOptions).project = opts.project\n#options.view_as(GoogleCloudOptions).region = opts.region\n#options.view_as(GoogleCloudOptions).staging_location = opts.staging_location\n#options.view_as(GoogleCloudOptions).temp_location = opts.temp_location\n#options.view_as(StandardOptions).runner = opts.runner\n#options.view_as(GoogleCloudOptions).job_name = f\"avro-test-{time.time_ns()}\"\n\ninput = opts.input\noutput_bucket = opts.output_bucket\noutput_table = opts.output_table\n\ntable_schema = {\n \"fields\": [\n\n {\n \"name\": \"registration_dttm\",\n \"type\": \"TIMESTAMP\"\n },\n {\n \"name\": \"id\",\n \"type\": \"INTEGER\"\n },\n {\n \"name\": \"first_name\",\n \"type\": \"STRING\"\n },\n {\n \"name\": \"last_name\",\n \"type\": \"STRING\"\n },\n {\n \"name\": \"email\",\n \"type\": \"STRING\"\n },\n {\n \"name\": \"gender\",\n \"type\": \"STRING\"\n },\n {\n \"name\": \"ip_address\",\n \"type\": \"STRING\"\n },\n {\n \"name\": \"cc\",\n \"type\": \"INTEGER\"\n },\n {\n \"name\": \"country\",\n \"type\": \"STRING\"\n },\n {\n \"name\": \"birthdate\",\n \"type\": \"STRING\"\n },\n {\n \"name\": \"salary\",\n \"type\": \"FLOAT\"\n },\n {\n \"name\": \"title\",\n \"type\": \"STRING\"\n },\n {\n \"name\": \"comments\",\n \"type\": \"STRING\"\n }\n ]\n}\n\n#options = PipelineOptions(options=options)\nwith beam.Pipeline(options = options) as p:\n avro_records = (p \n | \"ReadAvroRecords\" >> beam.io.ReadFromAvro(input)\n #| \"WriteAsText\" >> beam.io.WriteToText(output)\n )\n\n dupe_ids = (avro_records | \"CheckDupeIds\" >> CheckDupeTransform())\n\n write_dupe_ids = dupe_ids | \"WriteAsText\" >> beam.io.WriteToText(output_bucket, file_name_suffix=\".txt\")\n\n write_to_bq = (avro_records | \"WriteToBigQuery\" >> beam.io.WriteToBigQuery(\n output_table,\n schema=table_schema,\n create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,\n write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND\n )\n )","sub_path":"beam_flex_template_pipeline.py","file_name":"beam_flex_template_pipeline.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"27346501","text":"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\n\n\nclass LmCrossEntropyLoss(nn.Module):\n def __init__(self, ignore_index=None, reduction='batchmean') -> None:\n super(LmCrossEntropyLoss, self).__init__()\n assert reduction in ['none', 'batchmean', 'sum', 'mean']\n self.reduction = reduction\n self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_index, reduction='none')\n\n def forward(self, input: Tensor, target: Tensor) -> Tensor:\n loss = self.compute_loss(input, target)\n return self._reduce(loss)\n\n def compute_loss(self, input: Tensor, target: Tensor) -> Tensor:\n batch_size, _, num_embeddings = input.shape\n loss = self.criterion(\n input.view(-1, num_embeddings),\n target.view(-1)\n ).view(batch_size, -1)\n return loss\n\n def _reduce(self, loss: Tensor) -> Tensor:\n if self.reduction == 'batchmean':\n return loss.sum(dim=1).mean(dim=0)\n if self.reduction == 'sum':\n return loss.sum()\n if self.reduction == 'mean':\n return loss.mean()\n return loss\n \n \nclass LabelSmoothedLmCrossEntropyLoss(nn.Module):\n def __init__(self, ignore_index=None, reduction='batchmean', label_smoothing=0.1) -> None:\n super(LabelSmoothedLmCrossEntropyLoss, self).__init__()\n assert reduction in ['none', 'batchmean', 'sum', 'mean']\n assert label_smoothing > 0\n\n self.ignore_index = ignore_index\n self.reduction = reduction\n self.label_smoothing = label_smoothing\n\n def forward(self, input: Tensor, target: Tensor) -> Tensor:\n loss = self.compute_loss(input, target)\n return self._reduce(loss)\n\n def compute_loss(self, input: Tensor, target: Tensor) -> Tensor:\n if target.dim() == input.dim() - 1:\n target = target.unsqueeze(-1)\n\n lprobs = F.log_softmax(input, dim=-1)\n nll_loss = -lprobs.gather(dim=-1, index=target)\n smooth_loss = -lprobs.sum(dim=-1, keepdim=True)\n if self.ignore_index is not None:\n pad_mask = target.eq(self.ignore_index)\n nll_loss.masked_fill_(pad_mask, 0.0)\n smooth_loss.masked_fill_(pad_mask, 0.0)\n else:\n nll_loss = nll_loss.squeeze(-1)\n smooth_loss = smooth_loss.squeeze(-1)\n\n epsilon = self.label_smoothing\n eps_i = epsilon / lprobs.size(-1)\n loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss\n return loss\n\n def _reduce(self, loss: Tensor) -> Tensor:\n if self.reduction == 'batchmean':\n return loss.sum(dim=1).mean(dim=0)\n if self.reduction == 'sum':\n return loss.sum()\n if self.reduction == 'mean':\n return loss.mean()\n return loss","sub_path":"criterion.py","file_name":"criterion.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"59714641","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include('shop.urls')),\n path('login/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('login/refresh', TokenRefreshView.as_view(), name='token_refresh'),\n path('', include('rest_registration.api.urls')),\n path('', include('facebook.urls')),\n path('', include('google.urls')),\n path('', include('account.urls')),\n path('', include('permission.urls')),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"645374175","text":"# -*- coding: utf-8 -*-\n\ndef is_palindrome(n):\n\tdef char2num(s):\n\t\treturn {'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9}[s]\n\t\n\tn = str(n)\n\tnumber = list(map(char2num, n))\n\tl = len(n)-1\n\ta = 0\n\twhile a 291 and self.limit:\n # rewards.append(result)\n # elif \"alpine1\" in self.race and time > 543 and self.limit:\n # rewards.append(result)\n # elif \"alpine2\" in self.race and time > 353 and self.limit:\n # rewards.append(result)\n # elif \"brondehach\" in self.race and time > 297 and self.limit:\n # rewards.append(result)\n # elif \"corkscrew\" in self.race and time > 317 and self.limit:\n # rewards.append(result)\n # elif \"dirt1\" in self.race and time > 122 and self.limit:\n # rewards.append(result)\n # elif \"dirt3\" in self.race and time > 218 and self.limit:\n # rewards.append(result)\n # elif \"etrack2\" in self.race and time > 448 and self.limit:\n # rewards.append(result)\n # elif \"etrack3\" in self.race and time > 385 and self.limit:\n # rewards.append(result)\n # elif \"etrack4\" in self.race and time > 423 and self.limit:\n # rewards.append(result)\n # elif \"etrack6\" in self.race and time > 355 and self.limit:\n # rewards.append(result)\n elif \"forza\" in self.race and time > 390 and self.limit:\n rewards.append(result)\n elif \"gtrack1\" in self.race and time > 149 and self.limit:\n rewards.append(result)\n elif \"gtrack3\" in self.race and time > 275 and self.limit:\n rewards.append(result)\n elif \"mixed1\" in self.race and time > 154 and self.limit:\n rewards.append(result)\n elif \"ruudskogen\" in self.race and time > 270 and self.limit:\n rewards.append(result)\n elif \"spring\" in self.race and time > 1740 and self.limit:\n rewards.append(result)\n elif \"street1\" in self.race and time > 310 and self.limit:\n rewards.append(result)\n elif \"wheel1\" in self.race and time > 330 and self.limit:\n rewards.append(result)\n elif \"wheel2\" in self.race and time > 450 and self.limit:\n rewards.append(result)\n else:\n # Did complete the laps, hence calculate the score\n\n # Check how many cars in front crashed\n num_cars_crashed = 0\n for rank2, driver_index2, _, time2, laps2, dmg2 in results:\n if driver_index2 < driver_index and laps2 != 3:\n num_cars_crashed += 1\n\n # Time component\n result += (50000 / time)\n\n # Overtaking component\n start_rank = driver_index + 1\n result += 50 * (start_rank - rank - num_cars_crashed)\n\n # Damage\n result -= dmg / 100\n\n # Minimum of 0\n if result < 0:\n result = 0\n\n rewards.append(result)\n\n return rewards\n\n def compute_rewards(self, model_sets):\n \"\"\"Launch drivers & torcs and get their rewards for 1 race\"\"\"\n reward_vector = np.zeros(self.population_size)\n\n # Remove old drivers:\n for filename in glob.glob(\"models/temp_models/evol_driver{}-{}*\".format(self.standard_dev, self.learning_rate)):\n os.remove(filename)\n\n # Start drivers\n procs = []\n for i, model in enumerate(model_sets):\n proc = self.init_drivers(i, model.state_dict())\n procs.append(proc)\n\n # Start torcs and wait for it to finish\n torcs_proc, race, start = self.run_torcs()\n res = torcs_proc.communicate()\n\n end = time.time()\n print(\"Finished torcs at {:04.3f}, took {:04.3f} seconds\".format(\n end, end - start))\n\n results = self.get_results(race)\n print(\"Race results:\\n {}\".format(\"\\n \".join(str(r) for r in results)))\n\n reward_vector = self.combine_results(results)\n\n if not self.test_mode:\n if sum(reward_vector) == 0:\n self.i -= 1\n\n print(\"Rewards:\\n {}\".format(reward_vector))\n return reward_vector\n\n def update_parameters(self, reward_vector, noise_sets):\n \"\"\"\n Update the parameters for the base driver based on the reward vector\n \"\"\"\n gradient = []\n for p in self.model.parameters():\n gradient.append(torch.zeros(p.size()))\n # Per reward, update the parameters with higher scoring reward having\n # a larger weight\n for i, reward in enumerate(reward_vector):\n # Multiple sets of parameters\n for j, noise in enumerate(noise_sets[i]):\n if self.standard_dev == 0:\n update = (1 / self.population_size) * reward * noise\n else:\n update = (self.population_size *\n self.standard_dev) * reward * noise\n\n gradient[j] += update\n\n for i, parameter in enumerate(self.model.parameters()):\n parameter.data += self.learning_rate * gradient[i]\n\n def run(self):\n for i in range(0, self.iterations):\n print(\"Iteration: {}\".format(i))\n # Get noised parameter sets\n model_sets, noise_sets = self.noise_models()\n # Compute reward based on a simulated race\n reward_vector = self.compute_rewards(model_sets)\n # Update parameters using the noised parameters and the race\n # outcome\n self.update_parameters(reward_vector, noise_sets)\n if (i + 1) % 25 == 0:\n torch.save(self.model.state_dict(),\n \"models/it{}.pt\".format(i + 1))\n torch.save(self.model.state_dict(\n ), \"models/output_gen_end{}-{}.pt\".format(self.standard_dev, self.learning_rate))\n\n\ndef main(model_file, exec_params, es_params):\n ev = Evolution(model_file, exec_params, es_params)\n print(\"Running with ES parameters:\\n {}\".format(es_params))\n ev.run()\n\n\nif __name__ == '__main__':\n filepath = os.path.realpath(__file__)\n\n parser = argparse.ArgumentParser(\n description=\"\")\n parser.add_argument(\n \"-i\", \"--iterations\", help=\"Number of iterations for the ES algorithm\",\n default=76, type=int\n )\n parser.add_argument(\n \"-s\", \"--standard_dev\", help=\"Standard deviation for the noise imposed \\\n during training\",\n default=1e-06, type=float\n )\n parser.add_argument(\n \"-lr\", \"--learning_rate\", help=\"Learning rate of the ES algorithm\",\n default=1e-06, type=float\n )\n parser.add_argument(\n \"-c\", \"--race_config\", help=\"Race configuration files (xml) directory. \\\n This will also choose the right population size (name of subdirectory)\",\n default=os.path.dirname(filepath) + \"/race-config/headless/10/\"\n )\n parser.add_argument(\n \"-m\", \"--init_model\", help=\"initial model (for pytorch)\",\n default=os.path.dirname(filepath) + \"/models/NNdriver2-100-300.pt\"\n )\n parser.add_argument(\n \"--no-headless\", help=\"Run with graphical output\",\n action=\"store_true\"\n )\n parser.add_argument(\n \"-server\", \"--server\", default=False\n )\n parser.add_argument(\n \"--test_races\", default=False, action=\"store_true\", help=\"Sets the flag\\\n to perform test races. This means there is no noise added to the \\\n drivers\"\n )\n parser.add_argument(\n \"-limit\", \"--limit\", default=False\n )\n\n args = parser.parse_args()\n if os.path.isdir(args.race_config):\n # use folder name to find population size\n folder = os.path.basename(os.path.abspath(args.race_config))\n popsize = int(folder)\n else:\n exit(\"Error determining population size! \\\n {} might not be a valid config folder\".format(args.race_config))\n\n # Parameters used in the ES algorithm\n ES_params = {\n 'iterations': args.iterations,\n 'population_size': popsize,\n 'standard_dev': args.standard_dev,\n 'learning_rate': args.learning_rate\n }\n # Parameters used in running torcs and the clients\n exec_params = {\n 'race_config': args.race_config,\n 'headless': not args.no_headless,\n 'server': args.server,\n 'limit': args.limit,\n 'test_races': args.test_races\n }\n\n main(args.init_model, exec_params, ES_params)\n","sub_path":"run_ES.py","file_name":"run_ES.py","file_ext":"py","file_size_in_byte":14243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"438633969","text":"#import stdin from interviewstreet\nfrom sys import stdin\n\n#find out how many terms the input has\nline = stdin.readline().strip('\\n')\nN = int(line)\n\n#create blank dictonary for counting up\ncounter = {}\n\n#for each of the items, check if we have seen the item\nfor i in xrange(N):\n term = stdin.readline().strip('\\n')\n \n #if we have, add +1 to the counter array\n if term in counter:\n counter[term] = counter[term] + 1\n \n #if we have not seen it, create the counter\n else:\n counter[term] = 1\n \n#check how many items we want to see print out\nline = stdin.readline().strip('\\n')\nprint_count = int(line)\n\n#sort items by decending order of view count\n#then in alphabetical order\nresults = sorted([(-count, term) for (term, count) in counter.items()])[:print_count]\n\n#for the terms, print them out\nfor term in results:\n print (term[1])","sub_path":"Frequent_Terms.py","file_name":"Frequent_Terms.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"49582908","text":"from observer import *\n\nimport sys\nimport os\nimport subprocess\nimport wave, struct, math, random\nfrom threading import Thread\nfrom wav_audio import *\nimport sqlite3\nconn = sqlite3.connect('frequencies.db')\nc = conn.cursor()\n\nif sys.version_info.major == 2 and sys.version_info.minor == 7 :\n print(sys.version)\n import Tkinter as tk\n import tkFileDialog as filedialog\nelif sys.version_info.major == 3 and sys.version_info.minor == 6 :\n print(sys.version)\n import tkinter as tk\n from tkinter import filedialog\nelse :\n print(\"Your python version is : \")\n print(sys.version_info.major,sys.version_info.minor)\n print(\"... I guess it will work !\")\n\n\nclass SoundGeneratorView(Observer):\n def __init__(self,parent,model,bg=\"white\",width=715,height=390):\n Observer.__init__(self)\n self.parent = parent\n \n self.model = model\n self.ctrl=None\n self.path=\"./GeneratedSounds\"\n self.topFrame=tk.LabelFrame(self.parent,labelanchor='n',text=\"Creation\")\n self.bottomFrame=tk.LabelFrame(self.parent,labelanchor='n',text=\"Notes générées\")\n self.bottomListFrame=tk.Frame(self.bottomFrame)\n self.createFileList(self.bottomListFrame)\n self.createPlayButton(self.bottomFrame)\n self.packing()\n \n\n def createFileList(self,frame):\n self.filesListBox = tk.Listbox(frame,bg='white')\n self.filesListBox.bind(\"\",self.checkFileList)\n self.filesListBox.pack(side=tk.LEFT)\n \n self.scrollbarDirectory = tk.Scrollbar(frame)\n self.scrollbarDirectory.pack(side=tk.LEFT, fill=tk.Y)\n\n # attach listbox to scrollbarDirectory\n self.filesListBox.config(yscrollcommand=self.scrollbarDirectory.set)\n self.scrollbarDirectory.config(command=self.filesListBox.yview)\n\n self.filesListBoxChords = tk.Listbox(frame,bg='white')\n self.filesListBoxChords.bind(\"\",self.checkFileList)\n self.filesListBoxChords.pack(side=tk.LEFT)\n\n\n self.scrollbarDirectoryChords = tk.Scrollbar(frame)\n self.scrollbarDirectoryChords.pack(side=tk.RIGHT, fill=tk.Y)\n\n # attach listbox to scrollbarDirectory\n self.filesListBoxChords.config(yscrollcommand=self.scrollbarDirectoryChords.set)\n self.scrollbarDirectoryChords.config(command=self.filesListBoxChords.yview)\n\n self.update()\n\n\n\n\n\n def createPlayButton(self,frame):\n self.playButton = tk.Button(frame,text=\"Play\",state='disable',width=20,command=self.playSound)\n\n def checkFileList(self,event=None):\n selectedFile = self.filesListBox.curselection()\n selectedChords = self.filesListBoxChords.curselection()\n if not selectedFile and not selectedChords:\n print(\"No file selected\")\n self.playButton[\"state\"]=\"disabled\"\n else :\n print(\"File selected\")\n self.playButton[\"state\"]=\"normal\"\n\n def packing(self,mainFrame=True):\n if not mainFrame :\n self.topFrame.pack(side=tk.LEFT,fill=tk.Y)\n self.bottomFrame.pack(side=tk.LEFT,padx=15,fill=tk.Y)\n self.bottomListFrame.pack(side=tk.TOP)\n self.playButton.pack(side=tk.BOTTOM,pady=15)\n else :\n # Placer la bottom frame sur la gauche pour gagner de la hauteur dans le programme piano.py\n self.topFrame.pack(side=tk.LEFT,fill=tk.Y)\n self.bottomFrame.pack(side=tk.LEFT,padx=15,fill=tk.Y)\n self.bottomListFrame.pack(side=tk.TOP)\n self.playButton.pack(side=tk.BOTTOM,pady=15)\n\n def playSound(self):\n selectedFile = self.filesListBox.curselection()\n selectedChords = self.filesListBoxChords.curselection()\n\n if not selectedFile:\n dir = self.filesListBoxChords.get(selectedChords)\n else :\n dir = self.filesListBox.get(selectedFile)\n\n self.filesListBox.select_clear(0,tk.END)\n self.filesListBoxChords.select_clear(0,tk.END)\n self.playButton[\"state\"]=\"disable\"\n print(\"Playing : \",self.path+\"/\"+dir)\n path=self.path+\"/\"+dir\n soundPlayer = SoundPlayer(path,self.model.piano) #In a thread to not stop the execution\n soundPlayer.start()\n self.model.colorPiano(dir) #Couleur sur le piano\n\n\n def update(self,subject=None):\n self.filesListBox.delete(0,tk.END)\n self.filesListBoxChords.delete(0,tk.END)\n\n\n for root, dirs, files in os.walk(self.path):\n for filename in files:\n print(filename)\n if(\".wav\" in filename): #Display only .wav files\n if((\"Free\" in filename)or (\"Major\" in filename) or (\"Minor\" in filename)): #Si c'est un accord\n self.filesListBoxChords.insert(tk.END,filename)\n else : \n self.filesListBox.insert(tk.END,filename)\n\n \n\nclass SoundPlayer(Thread):\n def __init__(self,file,piano):\n Thread.__init__(self)\n self.file = file\n self.piano = piano\n\n def run(self):\n subprocess.call([\"aplay\",self.file]) #In a thread to not stop the execution\n if self.piano!=None:\n for keyb in self.piano.controls:\n for elem in keyb.buttons:\n if elem[0].endswith(\"#\"):\n elem[1].config(bg=\"black\")\n else :\n elem[1].config(bg=\"white\")\n\nclass SoundGeneratorController():\n def __init__(self,parent,model,view,bg=\"white\",width=715,height=390):\n self.parent = parent\n self.view = view\n self.model = model\n self.width,self.height=width,height\n self.parent.bind(\"\",self.resize)\n\n self.ready = False\n\n self.frameNote = tk.LabelFrame(self.parent,labelanchor='n',text=\"Creation de note\")\n self.frameAccord = tk.LabelFrame(self.parent,labelanchor='n',text=\"Creation d'accord\")\n self.frameGeneration = tk.Frame(self.parent,bd =3)\n self.frameParameter = tk.LabelFrame(self.frameGeneration,labelanchor='n',text=\"Parametres\",padx=15,pady=10)\n self.radioButtonFrame = tk.Frame(self.frameGeneration)\n\n\n self.createNoteList(self.frameNote)\n self.createAccordList(self.frameAccord)\n self.createMajorMinorButton(self.radioButtonFrame)\n \n self.createSoundDurationSlider(self.frameParameter)\n self.createFolderAsking(self.frameParameter)\n \n self.createNoteButton(self.frameGeneration)\n self.createAccordButton(self.frameGeneration)\n\n def resize(self,event):\n if event:\n self.width,self.height=event.width,event.height\n # self.octaveListBox[\"width\"]=int(0.2*self.width)\n # self.noteListBox[\"width\"]=int(0.2*self.width)\n # self.accordList[\"width\"]=int(0.2*self.width)\n # self.accordListChosen[\"width\"]=int(0.2*self.width)\n\n \n def packing(self) :\n self.frameGeneration.grid(row=1,column=0,columnspan=2)\n self.frameNote.grid(row=0,column=0)\n self.frameAccord.grid(row=0,column=1)\n\n \n # Notes\n self.noteLabel.grid(row=0,column=0)\n self.noteListBox.grid(row=1,column=0)\n self.octaveLabel.grid(row=0,column=1)\n self.octaveListBox.grid(row=1,column=1)\n\n # Accords\n self.accordLabel.pack()\n self.accordList.pack(side=tk.LEFT)\n self.scrollbarAccord.pack(side=tk.LEFT, fill=tk.Y)\n\n # Generation\n self.noteWarning.grid(row=0,column=0)\n self.noteButton.grid(row=2,column=0)\n self.frameParameter.grid(row=0,column=1,rowspan=3)\n self.accordWarning.grid(row=0,column=2)\n self.radioButtonFrame.grid(row=1,column=2)\n self.accordButton.grid(row=2,column=2)\n \n\n # Parameters\n self.durationSlider.pack(side=tk.TOP)\n self.pathLabel.pack(side=tk.LEFT)\n self.directoryButton.pack(side=tk.LEFT)\n\n def createNoteList(self,frame):\n self.octaveLabel = tk.Label(frame,text=\"Octave\")\n self.octaveListBox = tk.Listbox(frame,exportselection=0)\n self.octaveListBox.bind(\"\",self.checkNoteList)\n\n self.noteLabel = tk.Label(frame,text=\"Note\")\n self.noteListBox = tk.Listbox(frame,exportselection=0)\n self.noteListBox.bind(\"\",self.checkNoteList)\n\n for item in self.model.getOctave():\n self.octaveListBox.insert(tk.END,item)\n\n for item in self.model.getNotes():\n self.noteListBox.insert(tk.END,item)\n\n def createAccordList(self,frame):\n\n self.accordLabel= tk.Label(frame,text=\"Notes\")\n self.accordList = tk.Listbox(frame,selectmode='multiple',exportselection=0)\n self.accordList.bind(\"\",self.checkAccordList)\n\n\n self.scrollbarAccord = tk.Scrollbar(frame)\n # attach accordList to scrollbarDirectory\n self.accordList.config(yscrollcommand=self.scrollbarAccord.set)\n self.scrollbarAccord.config(command=self.accordList.yview)\n\n\n self.updateNotesList()\n\n\n def createAccordButton(self,frame):\n self.accordLabelVar = tk.StringVar()\n self.accordLabelVar.set(\"Choisissez la tonique\")\n\n self.accordWarning=tk.Label(frame,textvariable=self.accordLabelVar,width=20,height=2)\n\n self.accordButton = tk.Button(frame,text=\"Générer un accord\",state='disable',width=20,command=self.generateSoundsChords)\n\n def createMajorMinorButton(self,frame):\n valeurs = ['Major', 'Minor','Free']\n etiquettes = ['Accord Majeur', 'Accord Mineur','Accord Libre']\n self.majorMinorVar = tk.StringVar()\n self.majorMinorVar.set(valeurs[0])\n self.model.setMajor()\n self.updateNotesList()\n\n b= tk.Radiobutton(self.radioButtonFrame, variable=self.majorMinorVar, text=etiquettes[0], value=valeurs[0])\n b.bind(\"\",self.adaptNoteAvailable)\n b.grid(row=0,column=0)\n \n b= tk.Radiobutton(self.radioButtonFrame, variable=self.majorMinorVar, text=etiquettes[1], value=valeurs[1])\n b.bind(\"\",self.adaptNoteAvailable)\n b.grid(row=0,column=1)\n \n b= tk.Radiobutton(self.radioButtonFrame, variable=self.majorMinorVar, text=etiquettes[2], value=valeurs[2])\n b.bind(\"\",self.adaptNoteAvailable)\n b.grid(row=1,column=0,columnspan=2)\n\n \n def adaptNoteAvailable(self,event=None):\n\n self.model.resetSelection()\n if (self.majorMinorVar.get()==\"Major\"):\n print(\"Major Chords selected\")\n self.model.setMajor()\n self.updateNotesList()\n elif (self.majorMinorVar.get()==\"Minor\"):\n print(\"Minor Chords selected\")\n self.model.setMinor()\n self.updateNotesList()\n else :\n print(\"Free Chords selected\")\n self.model.setFree()\n self.updateNotesList()\n\n\n\n def createNoteButton(self,frame):\n self.noteLabelVar = tk.StringVar()\n self.noteLabelVar.set(\"Selectionnez une note\\n et une octave\")\n\n self.noteWarning=tk.Label(frame,textvariable=self.noteLabelVar,width=20,height=2)\n\n self.noteButton = tk.Button(frame,text=\"Générer une note\",state='disable',width=20,command=self.generateSound)\n\n\n def updateNotesList(self):\n newNotes = self.model.getCurrentNotes()\n self.accordList.delete(0,tk.END)\n\n #don't display all of the note, chords can't begin with the lasts notes\n\n if len(newNotes)>5: #Au moment de choisir la tonique\n for i in range(len(newNotes)-7):\n self.accordList.insert(tk.END,newNotes[i]) \n \n else : #Si on est dans le cas où seulement 3 notes sont proposé pour finir l'accord\n for item in newNotes:\n self.accordList.insert(tk.END,item)\n\n\n #check selectedItem in Model\n selectedNotes = self.model.getNoteListChord()\n for i in range(self.accordList.size()):\n for selectedNote in selectedNotes:\n if (self.accordList.get(i) == selectedNote):\n self.accordList.selection_set(i)\n\n\n def checkAccordList(self,event=None):\n noteList = []\n sel = self.accordList.curselection()\n for i in sel:\n noteList.append(self.accordList.get(i))\n self.model.checkAccord(noteList)\n self.updateNotesList()\n\n #Update Label \n listFromModel = self.model.getNoteListChord()\n if(len(listFromModel)==3):\n txt=\"Vous pouvez générez\\nun accord\"\n self.accordButton[\"state\"]=\"normal\"\n elif(len(listFromModel)==2):\n txt=\"Voici les deux notes\\npour completer l'accord\"\n self.accordButton[\"state\"]=\"disable\"\n elif(len(listFromModel)==1):\n txt=\"Voici les deux notes\\npour completer l'accord\"\n self.accordButton[\"state\"]=\"disable\"\n elif(len(listFromModel)==0):\n txt=\"Choisissez la tonique\"\n self.accordButton[\"state\"]=\"disable\"\n\n self.accordLabelVar.set(txt)\n\n def checkNoteList(self,event=None):\n selectedNote = self.noteListBox.curselection()\n selectedOctave = self.octaveListBox.curselection()\n okOctave = False\n okNote = False\n\n if not selectedNote : #empty ?\n print(\"No note selected\")\n else :\n okNote=True\n\n if not selectedOctave :\n print(\"No note selected\")\n else :\n okOctave=True\n\n if(okNote and not okOctave):\n self.noteLabelVar.set(\"Selectionnez une octave\")\n self.noteButton[\"state\"]='disabled'\n elif(okOctave and not okNote):\n self.noteLabelVar.set(\"Selectionnez une note\")\n self.noteButton[\"state\"]='disabled'\n elif(okNote and okOctave):\n self.noteLabelVar.set(\"Vous pouvez créer\\nune note\")\n self.noteButton[\"state\"]='normal'\n else:\n self.noteLabelVar.set(\"Selectionnez une note\\n et une octave\")\n self.noteButton[\"state\"]='disabled'\n\n def createSoundDurationSlider(self,frame):\n self.duration = tk.DoubleVar()\n self.duration.set(0.5)\n self.durationSlider=tk.Scale(frame,variable=self.duration,label=\"Duration\",orient=\"horizontal\",resolution=0.1,length=250,from_=0.1,to=3.1,tickinterval=0.5)\n\n def generateSound(self):\n # destinationFolder = \"GeneratedSounds/\"\n destinationFolder = self.completePath\n degree = self.octaveListBox.get(self.octaveListBox.curselection())\n name = self.noteListBox.get(self.noteListBox.curselection())\n duration = self.duration.get()\n self.model.generateNote(degree,name,duration*1000,folder=destinationFolder)\n\n self.noteListBox.select_clear(0,tk.END)\n self.octaveListBox.select_clear(0,tk.END)\n\n self.noteLabelVar.set(\"Generation terminée\")\n\n self.noteWarning.after(1500, self.checkNoteList)\n\n\n def generateSoundsChords(self):\n self.model.generateChords(self.model.noteListChord[0],self.model.noteListChord[1],self.model.noteListChord[2],self.model.major,self.model.minor,self.completePath)\n self.accordList.select_clear(0,tk.END)\n self.model.resetSelection()\n # self.accordListChosen.select_clear(0,tk.END)\n\n self.accordLabelVar.set(\"Generation terminée\")\n\n self.accordWarning.after(1500, self.checkAccordList)\n\n\n def createFolderAsking(self,frame):\n self.displayedPath = tk.StringVar()\n path = os.path.dirname(os.path.abspath(__file__))\n path += \"/GeneratedSounds\"\n self.completePath = path\n self.displayedPath.set(path[-26:]) #set the 26 last char\n self.pathLabel = tk.Label(frame,textvariable=self.displayedPath,bg=\"white\",width=26)\n\n self.directoryButton = tk.Button(frame,text=\"Directory\")\n self.directoryButton.bind(\"\",self.askDir)\n\n\n def askDir(self,event):\n path = filedialog.askdirectory()\n print(path)\n if not path: #if empty keep the last one\n path = self.completePath\n print(\"No path selected, keep the old one\")\n self.completePath=path\n self.displayedPath.set(path[-26:])\n self.view.path=path\n self.view.update()\n\n\nclass SoundGeneratorModel(Subject):\n def __init__(self,piano=None):\n Subject.__init__(self)\n\n self.piano=piano\n self.octaves = [\"1\",\"2\",\"3\",\"4\",\"5\"]\n self.notes = [\"C\",\"C#\",\"D\",\"D#\",\"E\",\"F\",\"F#\",\"G\",\"G#\",\"A\",\"A#\",\"B\"]\n self.currentNotes = self.getAllNotes()\n self.major=True\n self.minor=False\n self.noteListChord=[]\n\n\n def getOctave(self):\n return self.octaves\n \n def getNoteListChord(self):\n return self.noteListChord\n \n def resetSelection(self):\n self.noteListChord = []\n \n def getNotes(self):\n return self.notes\n\n def getCurrentNotes(self):\n return self.currentNotes\n \n def getAllNotes(self):\n noteList = []\n for octave in self.octaves:\n for note in self.notes:\n s = note + octave\n noteList.append(s)\n return noteList\n\n\n def setMajor(self):\n self.major=True\n self.minor=False\n self.currentNotes=self.getAllNotes()\n self.notify()\n \n def setMinor(self):\n self.major=False\n self.minor=True\n self.currentNotes=self.getAllNotes()\n self.notify()\n \n def setFree(self):\n self.major=False\n self.minor=False\n self.currentNotes=self.getAllNotes()\n self.notify()\n\n def forceNote(self,noteList):\n self.currentNotes=noteList\n\n def checkAccord(self,notes):\n print(\"Checking chord\")\n\n print(self.major)\n print(self.minor)\n\n previousLen=len(self.noteListChord)\n\n\n if(len(self.noteListChord)==0 and len(notes)>0): #Si la liste est vide\n self.noteListChord.append(notes[0])\n print(\"Ajout de la premiere note\")\n\n elif(len(self.noteListChord)len(notes)): #l'utilisateur a retiré un element\n self.noteListChord.pop()\n print(\"l'utilisateur a retiré un element\")\n \n if(self.noteListChord==[]):\n if(self.major):\n self.setMajor()\n elif(self.minor): \n self.setMinor()\n else :\n self.setFree()\n \n print(self.noteListChord)\n\n if(len(self.noteListChord)==1 and previousLen==0):\n currentNoteList=self.getCurrentNotes()\n indexNote0 = currentNoteList.index(self.noteListChord[0])\n authNoteList = []\n if (self.major):\n authNoteList.append(currentNoteList[indexNote0])\n authNoteList.append(currentNoteList[indexNote0+4])\n authNoteList.append(currentNoteList[indexNote0+7])\n elif(self.minor) :\n authNoteList.append(currentNoteList[indexNote0])\n authNoteList.append(currentNoteList[indexNote0+3])\n authNoteList.append(currentNoteList[indexNote0+7])\n else:\n authNoteList=self.getAllNotes()\n\n self.forceNote(authNoteList)\n\n\n def getFrequencyFromNote(self,octave,note): \n translateList = [\"C\",\"CSharp\",\"D\",\"DSharp\",\"E\",\"F\",\"FSharp\",\"G\",\"GSharp\",\"A\",\"ASharp\",\"B\"]\n originalList = [\"C\",\"C#\",\"D\",\"D#\",\"E\",\"F\",\"F#\",\"G\",\"G#\",\"A\",\"A#\",\"B\"]\n \n #On traduit de la notation C# à CSharp\n noteIndex = originalList.index(note)\n translateNote = translateList[noteIndex]\n\n #on cherche cette note dans la base de donnée a la bonne octave\n query = 'SELECT '+translateNote+' FROM frequencies WHERE octave=?'\n\n c.execute(query,(octave,))\n frequency = c.fetchone()\n\n # print(\"Frequency = \",frequency)\n return frequency[0] #tuple\n\n\n def generateNote(self,degree,name,duration=1000,sampling=44100,folder=\"GeneratedSounds\") :\n if type(degree) != str :\n degree=str(degree)\n\n frequency = self.getFrequencyFromNote(degree,name)\n file=folder+\"/\"+name+degree+\".wav\"\n sound=wave.open(file,'w')\n nb_channels = 2 # stéreo\n nb_bytes = 1 # taille d'un échantillon : 1 octet = 8 bits\n left_level = 1 # niveau canal de gauche (0 à 1) ? '))\n right_level= 1 # niveau canal de droite (0 à 1) ? '))\n nb_samples = int((duration/1000)*sampling)\n params = (nb_channels,nb_bytes,sampling,nb_samples,'NONE','not compressed')\n sound.setparams(params) # création de l'en-tête (44 octets)\n\n # niveau max dans l'onde positive : +1 -> 255 (0xFF)\n # niveau max dans l'onde négative : -1 -> 0 (0x00)\n # niveau sonore nul : 0 -> 127.5 (0x80 en valeur arrondi)\n\n left_magnitude = 127.5*left_level\n right_magnitude= 127.5*right_level\n\n for i in range(0,nb_samples):\n # canal gauche\n # 127.5 + 0.5 pour arrondir à l'entier le plus proche\n left_value = wave.struct.pack('B',int(128.0 + left_magnitude*math.sin(2.0*math.pi*frequency*i/sampling)))\n # canal droit\n right_value = wave.struct.pack('B',int(128.0 + right_magnitude*math.sin(2.0*math.pi*frequency*i/sampling)))\n sound.writeframes(left_value + right_value) # écriture frame\n\n sound.close()\n self.notify()\n\n\n def generateChords(self,note1,note2,note3,major,minor,destFolder):\n print(\"Generate Sounds\")\n print(\"Note 1 :\",note1)\n print(\"Note 2 :\",note2)\n print(\"Note 3 :\",note3)\n if major :\n fileName = destFolder+\"/\"+note1+\"Major.wav\"\n elif minor :\n fileName = destFolder+\"/\"+note1+\"Minor.wav\"\n else :\n fileName = destFolder+\"/\"+note1+\"Free.wav\"\n\n print(\"Name:\",fileName)\n\n origFolder = \"Sounds/\"\n note1=origFolder+str(note1)+'.wav'\n note2=origFolder+str(note2)+'.wav'\n note3=origFolder+str(note3)+'.wav'\n\n data1,framerate1 = open_wav(note1)\n data2,framerate2 = open_wav(note2)\n data3,framerate3 = open_wav(note3)\n\n data = [] # liste des échantillons de l'accord\n\n for i in range(len(data1)):\n data.append((1/3)*(data1[i]+data2[i]+data3[i])) # calcul de la moyenne de chacun des échantillons de même index issus des trois listes \n\n save_wav(fileName,data,framerate1)\n\n self.notify()\n\n\n def colorPiano(self,note):\n if self.piano!=None:\n #Extract octave and note\n #remove .wav\n note=note[:-4]\n if note[-1:] in [\"1\",\"2\",\"3\",\"4\",\"5\"]: #only display note, not chords\n oct=int(note[-1:])\n #remove octave from note string\n note=note[:-1]\n keyb = self.piano.controls[oct-1]\n for elem in keyb.buttons:\n if elem[0]==note:\n elem[1].config(bg=\"coral\")\n\n else :\n octaves = [\"1\",\"2\",\"3\",\"4\",\"5\"]\n notes = [\"C\",\"C#\",\"D\",\"D#\",\"E\",\"F\",\"F#\",\"G\",\"G#\",\"A\",\"A#\",\"B\"]\n noteList = []\n for o in octaves:\n for n in notes:\n noteList.append(n+o)\n\n print(noteList)\n major=False\n free=False\n if \"Free\" in note:\n #Accord libre, on ne peux pas afficher les notes\n free=True\n elif \"Major\" in note:\n major=True\n\n if not free:\n firstNote = note[:-5]\n \n if(major):\n #Si l'accord est majeur\n print('Major :',firstNote)\n indexInNoteList = noteList.index(firstNote)\n\n note1=firstNote\n note2=noteList[indexInNoteList+4]\n note3=noteList[indexInNoteList+7]\n oct1 =int(note1[-1:])\n oct2 =int(note2[-1:])\n oct3 =int(note3[-1:])\n note1=note1[:-1]\n note2=note2[:-1]\n note3=note3[:-1]\n \n\n keyb = self.piano.controls[oct1-1]\n for elem in keyb.buttons:\n print(elem)\n if elem[0]==note1:\n elem[1].config(bg=\"coral\")\n \n keyb = self.piano.controls[oct2-1]\n for elem in keyb.buttons:\n if elem[0]==note2:\n elem[1].config(bg=\"coral\")\n\n keyb = self.piano.controls[oct3-1]\n for elem in keyb.buttons:\n if elem[0]==note3:\n elem[1].config(bg=\"coral\")\n\n\n elif (not major):\n #Si l'accord est mineur\n print('Minor :',firstNote)\n\n indexInNoteList = noteList.index(firstNote)\n\n note1=firstNote\n note2=noteList[indexInNoteList+3]\n note3=noteList[indexInNoteList+7]\n oct1 =int(note1[-1:])\n oct2 =int(note2[-1:])\n oct3 =int(note3[-1:])\n note1=note1[:-1]\n note2=note2[:-1]\n note3=note3[:-1]\n \n\n keyb = self.piano.controls[oct1-1]\n for elem in keyb.buttons:\n print(elem)\n if elem[0]==note1:\n elem[1].config(bg=\"coral\")\n \n keyb = self.piano.controls[oct2-1]\n for elem in keyb.buttons:\n if elem[0]==note2:\n elem[1].config(bg=\"coral\")\n\n keyb = self.piano.controls[oct3-1]\n for elem in keyb.buttons:\n if elem[0]==note3:\n elem[1].config(bg=\"coral\")\n\n \n\nif __name__ == \"__main__\" :\n root=tk.Tk()\n root.title(\"Vue Creation Note\")\n root.minsize(1170, 420)\n model=SoundGeneratorModel()\n view=SoundGeneratorView(root,model)\n view.packing()\n ctrl=SoundGeneratorController(view.topFrame,model,view)\n ctrl.packing()\n model.attach(view)\n # model.attach(view)\n\n # view.update(model)\n root.mainloop()\n\n # model=SoundGeneratorModel()\n # model.getAllNotes()","sub_path":"generatorVMC.py","file_name":"generatorVMC.py","file_ext":"py","file_size_in_byte":27370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"571885172","text":"#! usr/bin/env python3\n# -*- coding: utf-8 -*-\n# author: Kwinner Chen\n# python: v 3.6.4\n\n\nfrom manager import list_work_thread, task_woker_thread_maker, storage_process\nfrom storage import Oracle\nfrom multiprocessing import Queue, Process\nfrom threading import Thread\n# from IPPool_66.IPPool import IPPool\nfrom cache import Cache\nfrom task_manager_server import get_queue, run_task_server\nimport config\nimport os\n\n\nos.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'\n\n# def proxy_pool(q_proxy, pool):\n# while True:\n# if not q_proxy.full():\n# proxy = pool.get_ip\n# q_proxy.put(proxy)\n\n\nif __name__ == '__main__':\n # q_proxy = Queue(30) # 代理列队\n # 关于队列,分布时如果使用队列,则必须使用multiprocessor中的Queue。\n # 运行一个任务列队的服务进程。\n p_task = Process(target=run_task_server, args=((config.TASK_KEY['host'], config.TASK_KEY['port']), config.TASK_KEY['authorkey']))\n p_task.start()\n\n q_next_url = get_queue(addr=(config.TASK_KEY['host'], config.TASK_KEY['port']), authkey=config.TASK_KEY['authorkey'])\n q_content_task = Queue(config.NEWS_QUEUE_NUM) # 新闻详情列队,需要分布式,把次列队发布出去。\n q_storage_task = Queue(config.RESAULT_QUEUE_NUM) # 结果列队,待存\n start_url = config.START_URL\n agent_pool = config.AGENT_POOL\n delay = config.DELAY\n table_name = config.TABLE_NAME\n key_name = config.KEY_NAME\n\n thread_num = config.THREAD_NUM\n thread_list = []\n\n # ippool = IPPool()\n cac = Cache()\n db = Oracle(config.DATABASE['user'], config.DATABASE['password'], config.DATABASE['host'])\n\n # p_proxy = Process(target=proxy_pool, args=(q_proxy, ippool)) # 开启代理池进程\n # p_proxy.start()\n\n # while True:\n # if q_proxy.full():\n # break\n\n # 新闻列表线程\n # url是列表的起始链接\n list_work_thread(cac, key_name, q_content_task, q_next_url, thread_list, {'agent_pool':agent_pool, 'delay':delay}) # 新闻列表线程\n task_woker_thread_maker(thread_num, thread_list, q_content_task, q_storage_task, {'agent_pool':agent_pool, 'delay':delay}) # 新闻内容线程\n\n p_storage = Thread(target=storage_process, args=(db, q_storage_task, table_name))\n p_storage.start()\n\n\n # 向q_next_url添加启动链接\n for url in config.START_URL:\n q_next_url.put(url)\n\n\n for t in thread_list:\n if t.name == 'news_list_thread':\n t.join()\n thread_list.remove(t)\n\n\n # 向详情线程添加终止标识\n for i in range(thread_num):\n q_content_task.put(None)\n\n\n # 待详情线程结束\n for t in thread_list:\n t.join()\n\n\n # 向存储线程添加结束标识\n q_storage_task.put(None)\n\n # 等待存储完成\n p_storage.join()\n\n # 终止代理池进程\n # p_proxy.terminate()\n print('爬取结束!')\n\n\n\n \n","sub_path":"新闻爬虫/news_spider/凤凰汽车v1.1.1_kettle/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"217416095","text":"from tkinter import *\ndef get_values():\n print(f\"Foodservices: {foodservicevalues.get()}\")\nroot=Tk()\nroot.geometry(\"344x244\")\nLabel(root,text=\"Welcome to Travel Agency\",font=\"comicsunsms 19 bold\",pady=15).grid(row=0,column=2)\nname=Label(root,text=\"Name\")\nphone=Label(root,text=\"Phone Number\")\ngender=Label(root,text=\"Gender\")\nemergency=Label(root,text=\"Emergency Contact\")\npayment=Label(root,text=\"Payment Mode\")\n\nname.grid(row=1,column=1)\nphone.grid(row=2,column=1)\ngender.grid(row=3,column=1)\nemergency.grid(row=4,column=1)\npayment.grid(row=5,column=1)\n\nnamevalues=StringVar()\nphonevalues=StringVar()\ngendervalues=StringVar()\nemergencyvalues=StringVar()\npaymentvalues=StringVar()\nfoodservicevalues=IntVar() #for checkbox\n\nnameentry=Entry(root,textvariable=namevalues).grid(row=1,column=2)\nphoneentry=Entry(root,textvariable=phonevalues).grid(row=2,column=2)\ngenderentry=Entry(root,textvariable=gendervalues).grid(row=3,column=2)\nemergencyentry=Entry(root,textvariable=emergencyvalues).grid(row=4,column=2)\npaymententry=Entry(root,textvariable=paymentvalues).grid(row=5,column=2)\n\nfoodservice=Checkbutton(text=\"Want to add meals?\",variable=foodservicevalues)\nfoodservice.grid(row=6,column=2)\nButton(text=\"Submit\",command=get_values,padx=10).grid(column=2)\nroot.mainloop()","sub_path":"Tkinter/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"453246102","text":"import sys\nimport sqlite3\n\n# establish database connection\nconn = sqlite3.connect('database/app_data.db')\n\ndef view(arg):\n\t# a list of dictionaries\n\tresult_dict = []\n\n\t# if the person name was specified\n\tif arg[\"\"]:\n\t\tname = format(arg[\"\"])\n\t\tview_result = conn.execute(\"SELECT Phone_number, Recipient, Message_body from Messages WHERE Recipient LIKE '%\"+name+\"%'\")\t\t\n\n\t\t# add results to list\n\t\tfor x in view_result:\n\t\t\tresult_dict.append({x[0]: {str(x[1]): str(x[2])} })\n\n\t\tif len(result_dict) != 0:\n\t\t\t# there was a result found from query\n\t\t\treturn \"Displaying sent messages to contacts matching '%s':\\n %s\" %(name, result_dict)\n\n\telse:\n\t\t# get all the messages sent\n\t\tview_result = conn.execute(\"SELECT Phone_number, Recipient, Message_body from Messages\")\n\n\t\t# add results to list\n\t\tfor x in view_result:\n\t\t\tresult_dict.append({x[0]: {str(x[1]): str(x[2])} })\n\n\t\tif len(result_dict) != 0:\n\t\t\t# there was a result found from query\n\t\t\treturn \"Displaying all sent messages:\\n %s\" %(result_dict)\n\n\t# there are no messages sent yet\n\treturn \"You have not sent any messages yet. Add a few contacts to get started :)\"","sub_path":"view_messages/viewMessages.py","file_name":"viewMessages.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"161352864","text":"# Copyright (c) 2012-2013, Mark Peek \n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom aws import Action as BaseAction\nfrom aws import BaseARN\n\nservice_name = 'Amazon Lookout for Vision'\nprefix = 'lookoutvision'\n\n\nclass Action(BaseAction):\n def __init__(self, action=None):\n sup = super(Action, self)\n sup.__init__(prefix, action)\n\n\nclass ARN(BaseARN):\n def __init__(self, resource='', region='', account=''):\n sup = super(ARN, self)\n sup.__init__(service=prefix, resource=resource, region=region,\n account=account)\n\n\nCreateDataset = Action('CreateDataset')\nCreateModel = Action('CreateModel')\nCreateProject = Action('CreateProject')\nDeleteDataset = Action('DeleteDataset')\nDeleteModel = Action('DeleteModel')\nDeleteProject = Action('DeleteProject')\nDescribeDataset = Action('DescribeDataset')\nDescribeModel = Action('DescribeModel')\nDescribeProject = Action('DescribeProject')\nDescribeTrialDetection = Action('DescribeTrialDetection')\nDetectAnomalies = Action('DetectAnomalies')\nListDatasetEntries = Action('ListDatasetEntries')\nListModels = Action('ListModels')\nListProjects = Action('ListProjects')\nListTrialDetections = Action('ListTrialDetections')\nStartModel = Action('StartModel')\nStartTrialDetection = Action('StartTrialDetection')\nStopModel = Action('StopModel')\nUpdateDatasetEntries = Action('UpdateDatasetEntries')\n","sub_path":"awacs/lookoutvision.py","file_name":"lookoutvision.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"204581378","text":"import os\nimport random\nfrom colorama import Fore, Back, Style\n\njogarNovamente = \"s\"\njogadas = 0\nquemJoga = 2 # 1 = CPU - 2 = Jogador\nmaxJogadas = 9\nvit = \"n\"\nvelha = [\n [\" \", \" \", \" \"],\n [\" \", \" \", \" \"],\n [\" \", \" \", \" \"]\n]\n\ndef tela():\n global velha\n global jogadas\n os.system(\"cls\")\n print(\" 0 1 2\")\n print(\"0: \" + velha[0][0] + \" | \" + velha[0][1] + \" | \" + velha[0][2])\n print(\" -----------\")\n print(\"1: \" + velha[1][0] + \" | \" + velha[1][1] + \" | \" + velha[1][2])\n print(\" -----------\")\n print(\"2: \" + velha[2][0] + \" | \" + velha[2][1] + \" | \" + velha[2][2])\n print(\"jogadas: \" + Fore.GREEN + str(jogadas) + Fore.RESET)\n\ndef jogadorJoga():\n global jogadas\n global quemJoga\n global vit\n global maxJogadas\n if quemJoga == 2 and jogadas < maxJogadas:\n try: \n l = int(input(\"Linha..: \"))\n c = int(input(\"Coluna.: \"))\n while velha[1][c] != \" \":\n l = int(input(\"Linha..: \"))\n c = int(input(\"Coluna.: \"))\n velha[1][c] = \"X\"\n quemJoga = 1\n jogadas += 1\n except:\n print(\"Jogada inválida!\")\n os.system(\"pause\")\n #vit = \"n\"\n\ndef cpuJoga():\n global jogadas\n global quemJoga\n global maxJogadas\n if quemJoga == 1 and jogadas < maxJogadas:\n l = random.randrange(0,3)\n c = random.randrange(0,3)\n while velha[1][c] != \" \":\n l = random.randrange(0,3)\n c = random.randrange(0,3)\n velha[1][c] = \"O\"\n jogadas += 1\n quemJoga = 2\n\ndef verificarVitoria():\n global jogoDaVelha\n vitoria = \"n\"\n simbolos = [\"X\",\"O\"]\n for s in simbolos: \n vitoria = \"n\"\n # Verificar Linhas\n il = ic = 0 # indice de linha e indice de coluna recebem 0\n while il < 3:\n soma = 0\n ic = 0\n while ic < 3:\n if(velha[il][ic] == s):\n soma += 1\n ic += 1\n if(soma == 3):\n vitoria = s\n break\n il += 1\n if(vitoria != \"n\"):\n break\n # Verificar colunas\n il = ic = 0 # indice de linha e indice de coluna recebem 0\n while ic < 3:\n soma = 0\n il = 0\n while il < 3:\n if(velha[il][ic] == s):\n soma += 1\n ic += 1\n if(soma == 3):\n vitoria = s\n break\n ic += 1\n if(vitoria != \"n\"):\n break\n # Verifica diagonal 1\n soma = 0\n idiag = 0\n while idiag < 3:\n if(velha[idiag][idiag] == s):\n soma += 1\n idiag += 1\n if(soma == 3):\n vitoria = s\n break\n # Verifica diagonal 2\n soma = 0\n idiagl = 0\n idiagc = 2\n while idiagc >= 0:\n if(velha[idiagl][idiagc] == s):\n soma += 1\n idiagl += 1\n idiagc -= 1\n if(soma == 3):\n vitoria = s\n break\n return vitoria\n\ndef redefinir():\n global velha\n global jogadas\n global quemJoga\n global maxJogadas\n global vit\n jogadas = 0\n quemJoga = 2 # 1 = CPU - 2 = Jogador\n maxJogadas = 9\n vit = \"n\"\n velha = [\n [\" \", \" \", \" \"],\n [\" \", \" \", \" \"],\n [\" \", \" \", \" \"]\n ] \n\nwhile(jogarNovamente == \"s\" or jogarNovamente == \"S\"):\n while True:\n tela()\n jogadorJoga()\n cpuJoga()\n tela()\n vit = verificarVitoria()\n if(vit != \"n\") or (jogadas >= maxJogadas):\n break \n\n print(Fore.RED + \"FIM DO JOGO\" + Fore.YELLOW)\n if(vit == \"X\" or vit == \"O\"):\n print(\"Resultado: Joagador\" + vit + \"venceu\")\n else:\n print(\"Resultado: Empate\") \n jogarNovamente = input(Fore.BLUE + \"Jogar Novamente? [s/n]: \" + Fore.RESET)\n redefinir()\n \n\n","sub_path":"jogo_da_velha.py","file_name":"jogo_da_velha.py","file_ext":"py","file_size_in_byte":4015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"52762176","text":"import cv2\nimport numpy as np\nimport os \nrecognizer = cv2.face.LBPHFaceRecognizer_create()\nrecognizer.read('trainer.yml')\n\nfaceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml');\nfont = cv2.FONT_HERSHEY_PLAIN\n\n# starting id\nid = 0\n# id names\n# 0-None 1-Suyaib 2-Chris 3-Mignote\nnames = ['None', 'Suyaib', 'Chris', 'Mignote'] \n\ncam = cv2.VideoCapture(0)\ncam.set(3,640)\ncam.set(4,480)\n\ndef act_cam():\n global id\n while True:\n ret, img =cam.read()\n\n # Convert into grayscale\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n \n # Detect faces\n faces = faceCascade.detectMultiScale(gray, 1.1, 4)\n\n # Draw rectangle around the faces\n for(x,y,w,h) in faces:\n cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)\n # Prediction function\n id, confidence = recognizer.predict(gray[y:y+h,x:x+w])\n \n # This is for displaying the correct name\n # confidence is actually opposite, so 100-confidence is printed\n if (confidence < 100):\n id = names[id]\n confidence = \"{0}%\".format(round(100 - confidence))\n else:\n id = \"unknown\"\n confidence = \"{0}%\".format(round(100 - confidence))\n cv2.putText(img, str(id), (x+5,y-5), font, 2, (255,255,255), 2)\n cv2.putText(img, str(confidence), (x+5,y+h-5), font, 2, (0,255,0), 2) \n\n # Display the output\n cv2.imshow('camera',img) \n\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n\n cam.release()\n cv2.destroyAllWindows()\n\n# new code for voice command to work \nif __name__ == \"__main__\":\n act_cam()","sub_path":"face_recognition_FINAL/facerecognition.py","file_name":"facerecognition.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"150073684","text":"import csv\nfrom collections import defaultdict\nimport time\nimport heapq\nimport numpy\nfrom collections import namedtuple\n\ndef raw_data():\n bingfa_dict = {}\n with open(\"D://data/seeds/spidertask_data.csv\",\"r\",encoding=\"gbk\")as f1:\n dict_lichu = csv.DictReader(f1)\n for i in dict_lichu:\n bingfa_dict[i[\"任务名称\"]] =i[\"并发\"]\n\n with open(\"D://data/seeds/spidertask_output.csv\",\"r\",encoding=\"gbk\")as f:\n data = csv.reader(f,delimiter=',')\n header = next(data)\n\n task_dict = defaultdict(list)\n\n for i in data:\n\n task_name = i[0]\n\n start_time = int(time.mktime(time.strptime(i[1],\"%Y/%m/%d %H:%M:%S\")))\n end_time = int(time.mktime(time.strptime(i[2],\"%Y/%m/%d %H:%M:%S\")))\n\n time_yunxin = end_time-start_time\n schedule_data = namedtuple(\"schedule\",[\"start\",\"end\",\"concurrent\"])#为这个任务中的基本数据\n task_dict[task_name].append(schedule_data(start_time,end_time,bingfa_dict.get(task_name,0)))#一个任务的多次调度数据\n return task_dict\ndef concurrent_time(timestamp,data):#根据时间点是否在单次调度的时间内返回并发数\n if timestamp >int(data.start) and timestamp < int(data.end):\n return int(data.concurrent)\n else:\n return 0\ndef timestamp_get(time_now = time.time()):\n year = str(time.gmtime(time_now).tm_year)\n mouth = str(time.gmtime(time_now).tm_mon)\n day = str(time.gmtime(time_now).tm_mday)\n hour = str(time.gmtime(time_now).tm_hour)\n timestamp = time.mktime(time.strptime(\"{}-{}-{} {}\".format(year,mouth,day,hour),\"%Y-%m-%d %H\"))\n timestamp_list = []\n for i in range(24):\n timestamp_list.append(timestamp)\n timestamp -= 3600\n return timestamp_list\ntimestamp_iter = timestamp_get()\n\ntask_dict = raw_data()\ntotle_concurrent = 0\ntotle_concurrent_dict = defaultdict(int)\nfor i,y in task_dict.items():\n print(i)\n\n now_data = max(y,key=lambda x:x.end)\n three_last = heapq.nlargest(3,y,key=lambda x:x.end)\n mead_runtime = int(numpy.mean([i.end-i.start for i in three_last]))#取最近三次的平均时间\n concurrent_last = now_data.concurrent\n for j in y:\n print(j)\n print(1)\n for time_i in timestamp_iter:\n concurrent = concurrent_time(int(time_i),j)\n if concurrent != 0:\n print(concurrent)\n totle_concurrent_dict[time_i] += concurrent\n\nprint(totle_concurrent_dict)\n\n\n\n","sub_path":"test/requests_test/example/任务管理.py","file_name":"任务管理.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"565349541","text":"import RPi.GPIO as GPIO\nimport time\nlist=[(8508,0.98),(8485,1.22),(8501,1.14),(8498.5,0.9),(8588,1.87)]\ndef readcount(DT,SCK):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(DT,GPIO.IN)\n GPIO.setup(SCK,GPIO.OUT,initial=0)\n count=0\n GPIO.setup(DT,GPIO.OUT)\n GPIO.output(DT,1)\n GPIO.output(SCK,0)\n GPIO.setup(DT,GPIO.IN)\n while GPIO.input(DT) ==1:\n pass\n for i in range(24):\n GPIO.output(SCK,1)\n GPIO.output(SCK,0)\n count = (count << 1) | GPIO.input(DT)\n GPIO.output(SCK,1)\n GPIO.output(SCK,0)\n if (count&(1<<(24-1)))!=0:\n count=count-(1<<24)\n count=count^0x800000\n return count\nif __name__=='__main__':\n count0=readcount(17,4)\n count1=readcount(26,19)\n count2=readcount(25,24)\n count3=readcount(27,22)\n count4=readcount(16,20)\n #count5=readcount(13,6) \n print(\"0号桶重量:\",-round((count0/1000+list[0][0])/list[0][1]),\"g\")\n print(\"1号桶重量:\",-round((count1/1000+list[1][0])/list[1][1]),\"g\")\n print(\"2号桶重量:\",-round((count2/1000+list[2][0])/list[2][1]),\"g\")\n print(\"3号桶重量:\",-round((count3/1000+list[3][0])/list[3][1]),\"g\")\n print(\"4号桶重量:\",round((count4/1000-list[4][0])/list[4][1]),\"g\")\n #print(\"5号桶重量:\",-round((count5/1000)),\"g\") \n\n\n","sub_path":"HX711.py","file_name":"HX711.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"423516145","text":"# import modules\r\nimport numpy as np\r\nimport os\r\nfrom PIL import Image\r\nimport matplotlib.pylab as plt\r\n\r\nfor path, folder, files in os.walk('photos'):\r\n pass\r\nlen(files)\r\n# declare variables\r\nimg_arr = np.zeros([len(files), 150, 200, 3])\r\nnew_shape = (200, 150)\r\n# define functions\r\ndef resize_image():\r\n if os.path.exists('resize_photos') == False:\r\n os.mkdir('resize_photos')\r\n i = 0\r\n for img in files:\r\n im = Image.open('photos\\\\'+img)\r\n resize_img = im.resize(new_shape)\r\n resize_img.save('resize_photos'+'\\\\'+img,\"JPEG\")\r\n resize_arr = plt.imread('resize_photos'+'\\\\'+img)\r\n v = img_arr[i] = resize_arr\r\n i += 1\r\ndef checker():\r\n try:\r\n for img in files:\r\n image = plt.imread('photos\\\\'+img)\r\n except OSError as e:\r\n print(f\"{e} : Error occurred because other extension file exits in this folder : {os.getcwd()}\\\\photos\")\r\n\r\nif __name__ == \"__main__\":\r\n checker()\r\n resize_image()","sub_path":"Image Resizer Program/image_resizer.py","file_name":"image_resizer.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"574554511","text":"import re\nimport string\n\n# retrieves the training data and splits it into a list of documents\ndef getTrainDocs():\n file = open('data.train')\n text = file.read()\n return text.split('\\n')[0:-1]\n\n# retrives the list of pairs of plagiarised documents from the specified file\ndef getTruthPairs(fileName):\n file = open(fileName)\n text = file.read()\n lines = text.split('\\n')\n lines = lines[0:-1]\n pairs = []\n for line in lines:\n pair = (line.split())\n pair = (int(pair[0][1:]),int(pair[1][1:]))\n pairs.append(pair)\n return pairs\n \n# removes punctuation and whitespace from a string\ndef removePunc(text):\n text = text.lower()\n punct = re.compile('[\\t\\r\\\\\\~\\`\\!\\@\\#\\$\\%\\^\\&\\*\\(\\)\\_\\-\\+\\=\\[\\]\\{\\}\\|\\:\\;\\\"\\'\\<\\>\\\\\\,\\.\\?/]|[\\x7f-\\xff]')\n result = re.sub(punct,' ',text)\n return result\n\n# removes the doc id from the document\ndef removeID(doc, doc_type):\n digits = '0123456789'\n if doc_type == 1:\n if doc[0] == 't':\n doc = doc[1:]\n while doc[0] in digits:\n doc = doc[1:]\n elif doc_type == 2:\n while doc[0] in digits:\n doc = doc[1:]\n\n return doc\n","sub_path":"ass3/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"363563840","text":"#!/usr/bin/env python\n#encoding: utf-8\nimport sys\n# import colored\n# import imp\n# imp.reload(sys)\n# # sys.setdefaultencoding('utf-8')\nsys.path.append(\"../\")\n\nfrom model_trainer.base_task import RNN, Attention_RNN1, Attention_RNN2, Attention_RNN3, Attention_RNN4, \\\n Attention_RNN5, Attention_RNN6, CNN\nfrom record import do_record\nfrom confusion_matrix import Alphabet\nfrom confusion_matrix import ConfusionMatrix\nimport datetime\nfrom tensorflow.contrib import learn\nimport config\nimport data_helpers\nimport util\nimport tensorflow as tf\nimport numpy as np\nfrom scorer import get_rank_score_by_file\nimport time\n\nfrom pycorenlp import StanfordCoreNLP\nimport re\nfrom tree_util import ConstTree, DepTree, ConstNode, DepNode\nfrom vocab import Vocab\n\nPAD_WORD = ''\nUNK_WORD = ''\nBOS_WORD = ''\nEOS_WORD = ''\nNUM_WORD = ''\n\nPUNC_TAG = ''\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n\n# cd D:\\stanford-corenlp-full-2016-10-31\n# java -mx4g -cp \"*\" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000 -timeout 15000\n\n\n\n# from sys import version_infovim\n# print(tf.__version__)\n# print(version_info)\ntimestamp = time.time()\n\n# Data set\n# tf.flags.DEFINE_string(\"level1_sense\", \"Comparison\", \"level1_sense (default: Comparison)\")\n# tf.flags.DEFINE_string(\"dataset_type\", \"PDTB_imp\", \"dataset_type (default: PDTB_imp)\")\ntf.flags.DEFINE_string(\"train_data_dir\", \"\", \"train data dir\")\ntf.flags.DEFINE_boolean(\"blind\", False, \"blind(default: 'False')\")\n\n# models\ntf.flags.DEFINE_string(\"model\", \"RNN\", \"model(default: 'RNN')\")\n\n# Model Hyperparameters\n''' RNN '''\ntf.flags.DEFINE_boolean(\"share_rep_weights\", True, \"share_rep_weights\")\ntf.flags.DEFINE_boolean(\"bidirectional\", True, \"bidirectional\")\n# cell\ntf.flags.DEFINE_string(\"cell_type\", \"BasicLSTM\", \"Cell Type(default: 'BasicLSTM')\")\ntf.flags.DEFINE_integer(\"hidden_size\", 100, \"Number of Hidden Size (default: 100)\")\ntf.flags.DEFINE_integer(\"num_layers\", 1, \"Number of RNN Layer (default: 1)\")\n\n# Training parameters\ntf.flags.DEFINE_float(\"dropout_keep_prob\", 0.5, \"Dropout keep probability (default: 0.5)\")\ntf.flags.DEFINE_float(\"l2_reg_lambda\", 0.0, \"L2 regularizaion lambda (default: 0.0)\")\ntf.flags.DEFINE_float(\"learning_rate\", 0.005, \"Learning Rate (default: 0.005)\")\n\ntf.flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (default: 64)\")\ntf.flags.DEFINE_integer(\"num_epochs\", 15, \"Number of training epochs (default: 10)\")\n\ntf.flags.DEFINE_integer(\"evaluate_every\", 10, \"Evaluate model on dev set after this many steps (default: 100)\")\ntf.flags.DEFINE_integer(\"checkpoint_every\", 100, \"Save model after this many steps (default: 100)\")\n\n# Misc Parameters\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n\n\nFLAGS = tf.flags.FLAGS\nFLAGS._parse_flags()\n# FLAGS.flag_values_dict()\nprint(\"\\nParameters:\")\nfor attr, value in sorted(FLAGS.__flags.items()):\n print((\"{}={}\".format(attr.upper(), value)))\nprint(\"\")\n\n\n\nmodel_mapping = {\n \"RNN\": RNN,\n \"Attention_RNN1\": Attention_RNN1,\n \"Attention_RNN2\": Attention_RNN2,\n \"Attention_RNN3\": Attention_RNN3,\n \"Attention_RNN4\": Attention_RNN4,\n \"Attention_RNN5\": Attention_RNN5,\n \"Attention_RNN6\": Attention_RNN6\n}\n\n# for recording\n\ntrain_data_dir = FLAGS.train_data_dir\nif \"conll\" in train_data_dir:\n if FLAGS.blind:\n record_file = config.RECORD_PATH + \"/base_task/conll_blind.csv\"\n else:\n record_file = config.RECORD_PATH + \"/base_task/conll.csv\"\nelif \"ZH\" in train_data_dir:\n if FLAGS.blind:\n record_file = config.RECORD_PATH + \"/base_task/zh_blind.csv\"\n else:\n record_file = config.RECORD_PATH + \"/base_task/zh.csv\"\nelse:\n record_file = config.RECORD_PATH + \"/base_task/four_way.csv\"\n\nprint(\"==> record path: %s\" % record_file)\nprint()\n\nevaluation_result = {\n \"f1\": 0.0,\n \"p\": 0.0,\n \"r\": 0.0,\n \"acc\": 0.0\n}\nconfiguration = {\n \"train_data_dir\": FLAGS.train_data_dir,\n \"model\": FLAGS.model,\n \"share_rep_weights\": FLAGS.share_rep_weights,\n \"bidirectional\": FLAGS.bidirectional,\n\n \"cell_type\": FLAGS.cell_type,\n \"hidden_size\": FLAGS.hidden_size,\n \"num_layers\": FLAGS.num_layers,\n\n \"dropout_keep_prob\": FLAGS.dropout_keep_prob,\n \"l2_reg_lambda\": FLAGS.l2_reg_lambda,\n \"Optimizer\": \"AdaOptimizer\",\n \"learning_rate\": FLAGS.learning_rate,\n\n \"batch_size\": FLAGS.batch_size,\n \"num_epochs\": FLAGS.num_epochs,\n\n \"w2v_type\": \"cqa 100维\",\n}\nadditional_conf = {}\n\n\n\n# Data Preparation\n# ==================================================\n\n# Load data\nprint(\"Loading data...\")\n# level1_sense = FLAGS.level1_sense\n# dataset_type = FLAGS.dataset_type\n\n\n\ntrain_data_dir = FLAGS.train_data_dir # 'D:/PY/Pycode/project/end-to-end-discourse-parser/data/four_way/PDTB_imp'\n# 准备原始数据文本\ntrain_arg1s, train_arg2s, train_labels = data_helpers.load_data_and_labels(\"%s/train\" % train_data_dir)\ndev_arg1s, dev_arg2s, dev_labels = data_helpers.load_data_and_labels(\"%s/dev\" % train_data_dir)\ntest_arg1s, test_arg2s, test_labels = data_helpers.load_data_and_labels(\"%s/test\" % train_data_dir)\n\nnum_classes = train_labels.shape[1]\n\n\nprint(\"num_classes\", num_classes)\nprint('train/dev/test:',len(train_arg1s),len(dev_arg1s),len(test_arg1s))\n\ncore_nlp = StanfordCoreNLP('http://localhost:9000')\nannotate_func = lambda x: core_nlp.annotate(x, properties={\n 'annotators': 'tokenize,ssplit,pos,lemma,parse,depparse',\n 'outputFormat': 'json',\n # 'ssplit.isOneSentence': True\n})\n\ndef extract_json_ob(json_ob, lowercase=True, use_lemma=True, replace_num=True):\n processed_words = []\n dep_trees = []\n const_trees = []\n # there may be multiple sentences\n for sentence_info in json_ob['sentences']:\n # extract the words of the sentence\n sent_words = []\n for token in sentence_info['tokens']:\n word = token['lemma'] if use_lemma else token['word']\n word = word.lower() if lowercase else word\n if replace_num and any(c.isdigit() for c in word):\n word = re.sub('[.|,|/| ]', '', word.lstrip('-'))\n if word.isdigit():\n word = NUM_WORD\n sent_words.append(word)\n # build the dependency tree\n nodes = [DepNode(len(processed_words) + word_idx) for word_idx in range(len(sent_words))]\n root_node = None\n for dep in sentence_info['basicDependencies']:\n if dep['governor'] == 0:\n root_node = nodes[dep['dependent'] - 1]\n else:\n nodes[dep['governor'] - 1].add_child(nodes[dep['dependent'] - 1])\n dep_tree = DepTree()\n dep_tree.assign_root(root_node)\n dep_trees.append(dep_tree)\n # build the constituency tree\n const_tree = ConstTree()\n const_tree.load_from_string(sentence_info['parse'])\n const_tree.compress()\n const_tree.binarize()\n const_trees.append(const_tree)\n # add the sent_words to processed_words\n processed_words.extend(sent_words)\n if len(dep_trees) > 1:\n for dep_tree in dep_trees[1:]:\n dep_trees[0].merge(dep_tree)\n if len(const_trees) > 1:\n for const_tree in const_trees[1:]:\n const_trees[0].merge(const_tree)\n const_trees[0].binarize()\n return processed_words, dep_trees[0], const_trees[0]\n\n\nclass PDTBInstance:\n def __init__(self):\n self.left_words = []\n self.right_words = []\n self.left_const_tree = None\n self.right_const_tree = None\n self.left_dep_tree = None\n self.right_dep_tree = None\n self.label = None\n\n# train_arg1s_parse_result = []\ndef data2Inst(arg1s, arg2s, label):\n data_set = []\n for arg1,arg2, lab in zip(arg1s,arg2s,label):\n inst = PDTBInstance()\n arg1_parse_result = annotate_func(arg1)\n arg2_parse_result = annotate_func(arg2)\n # train_arg1s_parse_result.append(parse_result)\n inst.left_words, inst.left_dep_tree, inst.left_const_tree = extract_json_ob(arg1_parse_result)\n inst.right_words, inst.right_dep_tree, inst.right_const_tree = extract_json_ob(arg2_parse_result)\n inst.label = lab\n data_set.append(inst)\n return data_set\n\n# {list}, every element is a {PDTBInstance}\n# train_set = data2Inst(train_arg1s, train_arg2s, train_labels)\ndev_set = data2Inst(dev_arg1s, dev_arg2s, dev_labels)################# test\n# test_set = data2Inst(test_arg1s, test_arg2s, test_labels)\n\n# PDTBDataSet function, get all words in dataset\ndef get_all_words(dataset):\n words = []\n for inst in dataset:\n words.extend(inst.left_words)\n words.extend(inst.right_words)\n return words\n\ndef get_all_tags(dataset):\n tags = []\n for inst in dataset:\n tags.extend([node.tag for node in inst.left_const_tree.bfs_tranverse()])\n tags.extend([node.tag for node in inst.right_const_tree.bfs_tranverse()])\n return tags\n\n\n### load embedding ###\nword_vocab = Vocab(mannual_add=[PAD_WORD, UNK_WORD, BOS_WORD, EOS_WORD, NUM_WORD])\n# for word in get_all_words(train_set)+get_all_words(dev_set)+get_all_words(test_set):\nfor word in get_all_words(dev_set): ################# test\n word_vocab.add(word)\n\n# word_vocab.load_pretrained_emb(PathConfig.embedding_path)\nembedding_path='D:\\\\data\\\\glove.6B\\\\glove.6B.50d.txt'\nword_vocab.load_pretrained_emb(embedding_path) # save as 'word_vocab.obj'\n\n\n# load tag embedding\ntag_vocab = Vocab()\n# for tag in get_all_tags(train_set)+get_all_tags(dev_set)+get_all_tags(test_set):################# test\nfor tag in get_all_tags(dev_set):\n tag_vocab.add(tag)\nprint('Size of tag vocab: {}'.format(tag_vocab.size()))\n# tag_vocab.init_embed(ModelConfig.tag_embed_dim)\ntag_embed_dim = 50\ntag_vocab.init_embed(tag_embed_dim) # save as 'tag_vocab.obj'\n# torch.save(tag_vocab, os.path.join(PathConfig.experiment_data_dir, 'tag_vocab.obj'))\n\n# dev_set.format_instances_to_torch_var(word_vocab, tag_vocab) 将树节点的单词、tag等换为torch的数字 save as dataset\n\nprint('mmm')\nprint('fff')\n### load embedding end ###\n\n\n# # Build vocabulary\n# max_document_length = 100\n# all_text = train_arg1s + train_arg2s + dev_arg1s + dev_arg2s + test_arg1s + test_arg2s\n# vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)\n# vocab_processor.fit(all_text)\n#\n# # transform 将arg从list文本变为对应的词的编号的二维数组\n# # arg:\n# # 句子1:word1 word2 word3 ... wordn\n# # 句子2:。。。\n# # 句子n:...\n# # -------->[n,100]:\n# # [\n# # [345,56,453...34,0,0,0,0,0](总共100维)\n# # ...\n# # [23,.....] (第n行)\n# # ]\n# train_arg1s = np.array(list(vocab_processor.transform(train_arg1s)))\n# train_arg2s = np.array(list(vocab_processor.transform(train_arg2s)))\n# dev_arg1s = np.array(list(vocab_processor.transform(dev_arg1s)))\n# dev_arg2s = np.array(list(vocab_processor.transform(dev_arg2s)))\n# test_arg1s = np.array(list(vocab_processor.transform(test_arg1s)))\n# test_arg2s = np.array(list(vocab_processor.transform(test_arg2s)))\n#\n#\n# # load word embedding matrix 词向量:(n,m)n为所有文本单词个数,即下面的Vocabulary Size,m为词向量维度,google_news中为300。\n# vocab_embeddings = \\\n# util.load_google_word2vec_for_vocab(train_data_dir, vocab_processor.vocabulary_._mapping, from_origin=True)\n#\n#\n#\n# print((\"Vocabulary Size: {:d}\".format(len(vocab_processor.vocabulary_))))\n# print((\"Train/Dev/Test split: {:d}/{:d}/{:d}\".format(len(train_labels), len(dev_labels), len(test_labels))))\n#\n#\n# ''' Training '''\n# # ==================================================\n#\n# with tf.Graph().as_default():\n# tf.set_random_seed(1)\n#\n# # Assume that you have 12GB of GPU memory and want to allocate ~4GB:\n# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)\n# session_conf = tf.ConfigProto(\n# allow_soft_placement=FLAGS.allow_soft_placement,\n# log_device_placement=FLAGS.log_device_placement,\n# gpu_options=gpu_options\n# )\n#\n# sess = tf.Session(config=session_conf)\n# with sess.as_default():\n# if FLAGS.model == \"CNN\":\n# model = CNN(\n# w2v_length=train_arg1s.shape[1],\n# vocab_embeddings=vocab_embeddings,\n# num_classes=train_labels.shape[1],\n#\n# filter_sizes=[4, 6, 13],\n# num_filters=100,\n# )\n# else:\n# model = model_mapping[FLAGS.model](\n# sent_length=train_arg1s.shape[1],\n# vocab_embeddings=vocab_embeddings,\n# num_classes=train_labels.shape[1],\n#\n# cell_type=FLAGS.cell_type,\n# hidden_size=FLAGS.hidden_size,\n# num_layers=FLAGS.num_layers,\n# bidirectional=FLAGS.bidirectional,\n# share_rep_weights=FLAGS.share_rep_weights,\n# batch_size=FLAGS.batch_size,\n# l2_reg_lambda=FLAGS.l2_reg_lambda,\n# additional_conf = additional_conf\n# )\n#\n#\n# # Define Training procedure\n# global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n# optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)\n# grads_and_vars = optimizer.compute_gradients(model.loss)\n# train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)\n#\n#\n# # write to logs\n# merged = tf.summary.merge_all()\n# writer = tf.summary.FileWriter(\"logs/\", sess.graph)\n# # tensorboard - -logdir = logs\n#\n#\n# # Initialize all variables\n# sess.run(tf.global_variables_initializer())\n#\n# # save model\n# saver = tf.train.Saver(max_to_keep=1)\n#\n# def train_step(s1_batch, s2_batch, y_batch):\n# \"\"\"\n# A single training step\n# \"\"\"\n# feed_dict = {\n# model.input_s1: s1_batch,\n# model.input_s2: s2_batch,\n# model.input_y: y_batch,\n# model.dropout_keep_prob: FLAGS.dropout_keep_prob\n# }\n# _, step, loss, accuracy = sess.run(\n# [train_op, global_step, model.loss, model.accuracy],\n# feed_dict)\n#\n# # np.set_printoptions(threshold=np.nan)\n# # print \"==\" * 40\n# # print outputs1[0].shape\n# # print output_1[0].shape\n# # print outputs1[0][:50]\n# # print output_1[0]\n#\n#\n# time_str = datetime.datetime.now().isoformat()\n# print((\"\\r {}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy)),end='')\n#\n#\n# def test_step(s1_all, s2_all, y_all, test_str):\n# \"\"\"\n# Evaluates model on a dev/test set\n# \"\"\"\n# golds = []\n# predictions = []\n#\n#\n# feed_dict = {\n# model.input_s1: s1_all,\n# model.input_s2: s2_all,\n# model.input_y: y_all,\n# model.dropout_keep_prob: 1.0\n# }\n#\n# step, loss, accuracy, softmax_scores, curr_predictions, curr_golds = sess.run(\n# [global_step, model.loss, model.accuracy, model.softmax_scores,\n# model.predictions, model.golds], feed_dict)\n#\n# # print('\\t %s loss:' % test_str, loss)\n#\n# golds += list(curr_golds) # 真实label:[2,1,2,2。。。]\n# predictions += list(curr_predictions)\n#\n#\n# alphabet = Alphabet()\n# for i in range(num_classes):\n# alphabet.add(str(i))\n# confusionMatrix = ConfusionMatrix(alphabet)\n# predictions = list(map(str, predictions)) #[2,3,1,0,2...2]-->['2','3','1','0','2'...'2']\n# golds = list(map(str, golds))\n# confusionMatrix.add_list(predictions, golds) # 将预测predictions和golds填入confusionMatrix.matrix的4*4表格\n# confusionMatrix.loss = loss\n#\n# return confusionMatrix # 主要就是4*4表格\n#\n#\n# def _prediction_on_dev(best_score, best_output_string):\n#\n# confusionMatrix = test_step(dev_arg1s, dev_arg2s, dev_labels, test_str='dev') #得到4*4的矩阵\n#\n# acc = confusionMatrix.get_accuracy()\n# # 对着 f1 调\n# p, r, f1 = confusionMatrix.get_average_prf()\n#\n# # current performance\n# curr_output_string = confusionMatrix.get_matrix() + confusionMatrix.get_summary()\n# # + confusionMatrix.get_micro_f1()\n#\n# flag = 0\n# if f1 >= best_score:\n# flag = 1\n# best_score = f1\n#\n# best_output_string = confusionMatrix.get_matrix() + confusionMatrix.get_summary()\n# # + confusionMatrix.get_micro_f1()\n#\n# # print(\"\")\n# # print(\"\\nEvaluation on Test:\")\n# # confusionMatrix = test_step(test_arg1s, test_arg2s, test_labels)\n# # confusionMatrix.print_out()\n# # print(\"\")\n#\n# acc = confusionMatrix.get_accuracy()\n# p, r, f1 = confusionMatrix.get_average_prf()\n#\n# evaluation_result[\"acc\"] = \"%.4f\" % acc\n# evaluation_result[\"f1\"] = \"%.4f\" % f1\n# evaluation_result[\"p\"] = \"%.4f\" % p\n# evaluation_result[\"r\"] = \"%.4f\" % r\n#\n# # save model\n# saver.save(sess, 'ckpt/best.ckpt')\n#\n# # color = colored.bg('black') + colored.fg('green')\n# # reset = colored.attr('reset')\n#\n# print(\"\\nEvaluation on Dev:\")\n# print(\"Current Performance:\")\n# print('\\033[34m',curr_output_string, '\\033[0m') #当前结果蓝色(34)显示\n#\n# if flag == 1:\n# print(\" \" * 40 + '❤️')\n#\n# # print((color + 'Best Performance' + reset))\n# # print((color + best_output_string + reset))\n# print('\\033[32m Best Performance\\033[0m') #最佳结果绿色(32)显示\n# print(('\\033[32m' + best_output_string + '\\033[0m'))\n#\n# return best_score, best_output_string\n#\n#\n# def _prediction_on_test():\n# # 恢复模型\n# model_file = tf.train.latest_checkpoint('ckpt/')\n# saver.restore(sess, model_file)\n#\n# confusionMatrix = test_step(dev_arg1s, dev_arg2s, dev_labels, test_str='dev') #得到4*4的矩阵\n# acc = confusionMatrix.get_accuracy()\n# curr_output_string = confusionMatrix.get_matrix() + confusionMatrix.get_summary()\n# print(\"\\nEvaluation on Dev:\")\n# print(\"Current Performance:\")\n# print('\\033[33m',curr_output_string, '\\033[0m') #当前结果黄色(33)显示\n#\n#\n#\n# confusionMatrix = test_step(test_arg1s, test_arg2s, test_labels, test_str='test') #得到4*4的矩阵\n# acc = confusionMatrix.get_accuracy()\n# curr_output_string = confusionMatrix.get_matrix() + confusionMatrix.get_summary()\n# print(\"\\nEvaluation on test:\")\n# print(\"Current Performance:\")\n# print('\\033[33m',curr_output_string, '\\033[0m') #当前结果黄色(33)显示\n#\n#\n# # Generate batches\n# batches = data_helpers.batch_iter(\n# list(zip(train_arg1s, train_arg2s, train_labels)), FLAGS.batch_size, FLAGS.num_epochs, shuffle=True)\n#\n# best_score = 0.0\n# best_output_string = \"\"\n# # Training loop. For each batch...\n# for batch in batches:\n# s1_batch, s2_batch, y_batch = list(zip(*batch))\n# train_step(s1_batch, s2_batch, y_batch)\n# current_step = tf.train.global_step(sess, global_step)\n# if current_step % FLAGS.evaluate_every == 0:\n# if num_classes == 4:\n# best_score, best_output_string = _prediction_on_dev(best_score, best_output_string)\n#\n# _prediction_on_test()\n#\n#\n# # record the configuration and result\n# fieldnames = [\"f1\", \"p\", \"r\", \"acc\", \"train_data_dir\", \"model\", \"share_rep_weights\",\n# \"bidirectional\", \"cell_type\", \"hidden_size\", \"num_layers\",\n# \"dropout_keep_prob\", \"l2_reg_lambda\", \"Optimizer\", \"learning_rate\", \"batch_size\", \"num_epochs\", \"w2v_type\",\n# \"additional_conf\"\n# ]\n# do_record(fieldnames, configuration, additional_conf, evaluation_result, record_file)\n\n\n","sub_path":"model_trainer/train_tree_task.py","file_name":"train_tree_task.py","file_ext":"py","file_size_in_byte":20623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"71316654","text":"import numpy as np\nimport time\nimport subprocess\nfrom SCEconomy_LSC_nltax import Economy, split_shock\n\nimport pickle\n\n\nif __name__ == '__main__':\n\n def curvedspace(begin, end, curve, num=100):\n import numpy as np\n ans = np.linspace(0, (end - begin)**(1.0/curve), num) ** (curve) + begin\n ans[-1] = end #so that the last element is exactly end\n return ans\n\n\n agrid = curvedspace(0.0, 50., 2.0, 40)\n\n\n alpha = 0.3 #new!\n theta = 0.41\n ynb_p_gdp = 0.25\n xnb_p_gdp = 0.105\n g_p_gdp = 0.13\n \n pure_sweat_share = 0.10\n yc_init = 1.04\n \n GDP_implied = yc_init/(1. - ynb_p_gdp - pure_sweat_share/(1.-alpha))\n \n ynb = ynb_p_gdp*GDP_implied\n xnb = xnb_p_gdp*GDP_implied\n g = g_p_gdp*GDP_implied\n \n\n taup = 0.20\n taub = np.array([0.137, 0.185, 0.202, 0.238, 0.266, 0.28]) * 0.50 #large one\n psib = np.array([0.12837754, 0.14071072, 0.15, 0.20081269, 0.30081419, 0.37107904])\n \n\n ### additional info\n zgrid2 = np.load('./input_data/zgrid.npy') ** 2.0\n\n # zgrid2 = np.load('./input_data/zgrid_09_0075.npy') ** 2.0\n # prob2 = np.load('./input_data/prob_epsz_07_09_01_0075.npy')\n\n path_to_shock = './tmp/data_i_s'\n from markov import calc_trans, Stationary\n \n num_pop = 100_000\n sim_time = 3_000\n\n data_i_s = np.ones((num_pop, sim_time), dtype = int)\n #need to set initial state for zp\n data_i_s[:, 0] = 7\n\n # prob = np.load('./input_data/transition_matrix.npy')\n prob = np.load('./DeBacker/prob_epsz.npy')\n np.random.seed(0)\n data_rand = np.random.rand(num_pop, sim_time)\n calc_trans(data_i_s, data_rand, prob)\n data_i_s = data_i_s[:, 2000:]\n\n np.save(path_to_shock + '.npy' , data_i_s)\n\n p_, rc_ , ome_ = 0.235642284701831,0.0529813388265726, 0.753033796603102\n\n ###define additional parameters###\n num_core = 4 #7 or 8 must be the best for Anmol's PC. set 3 or 4 for Yuki's laptop\n\n split_shock(path_to_shock, 100_000, int(num_core))\n\n\n ###end defining additional parameters###\n\n print('Solving the model with the given prices...')\n print('Do not simulate more than one models at the same time...')\n\n econ = Economy(path_to_data_i_s = path_to_shock, prob = prob, zgrid = zgrid2, agrid = agrid,\n g = g, yn = ynb, xnb = xnb, ome = ome_, chi = 0.25,\n scaling_n = GDP_implied, scaling_b = GDP_implied,\n taub = taub, psib = psib, taup = taup, \n alpha = alpha, theta = theta)\n #taub = taub, psib = psib,taup = taup,\n \n econ.set_prices(p = p_, rc = rc_)\n with open('econ.pickle', mode='wb') as f: pickle.dump(econ, f)\n\n t0 = time.time()\n\n result = subprocess.run(['mpiexec', '-n', str(num_core), 'python', 'SCEconomy_LSC_nltax.py'], stdout=subprocess.PIPE)\n \n t1 = time.time()\n\n with open('econ.pickle', mode='rb') as f: econ = pickle.load(f)\n\n\n # w = econ.w\n p = econ.p\n rc = econ.rc\n moms = econ.moms\n \n dist = np.sqrt(moms[0]**2.0 + moms[1]**2.0)\n \n if p != p_ or rc != rc_:\n print('err: input prices and output prices do not coincide.')\n print('p = ', p, ', p_ = ', p_)\n print('rc = ', rc, ', rc_ = ', rc_)\n\n \n #c\n econ.print_parameters()\n \n econ.calc_moments()\n ###calculate other important variables###\n # econ.calc_sweat_eq_value()\n econ.calc_age()\n econ.simulate_other_vars()\n econ.save_result()\n \n\n \n \n","sub_path":"simulate_LSC_nltax.py","file_name":"simulate_LSC_nltax.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"81916740","text":"##############################################################\n# Copyright (c) 2012-2017 Datrium, Inc. All rights reserved. #\n# -- Datrium Confidential -- #\n##############################################################\n\n\"\"\"\nPB message classes are dynamically instantiated via reflection. This makes it\nhard for them to be direct subclasses of Exception. To get around this, we\nhacked the protoc compiler (python_generator.cc) to generate two classes for\neach DaException. The first class ends with \"Message\" and is the PB message\nclass. The second class is the actual Exception. Each DaException* class derives\nfrom DaException (defined here). This is so that:\n\n1. DaExceptions can be raised naturally in Python server code.\n\n2. The PyBridge server implementation can easily cast DaExceptions to\n DaExceptionBase(Message), which is the defined over-the-wire format for\n DaExceptions.\n\n3. The PyBridge client implementation can easily create Python DaExceptions from\n DaErrs.\n\n4. The pure Python server implementation can easily create Python DaExceptions\n from DaExceptionBase(Message) messages.\n\nHere are some doctest examples.\n\n# All of the Exceptions defined in Exception_pb2.py are sub-classes of\n# DaException, which is why imports are done lazily.\n>>> from IDL.Protos.Extensions.Exception_pb2 import *\n\n# Can create and raise a DaException just as you would any other\n# Exception. Message-specific attributes defined in Exception.proto should be\n# specified as keywords, and are stored in attributes of the DaException.\n>>> e = DaExceptionObjectNotFound('Container not found', notFoundId='0000000012345678')\n>>> e.notFoundId\n'0000000012345678'\n\n# The error code is 1101. The error code chain lists all of its base codes and\n# is for backward compatibility.\n>>> e.ERROR_CODE\n1101\n>>> e.errorCodes\n[1101, 1100, 1002]\n\n# It is a real subclass.\n>>> isinstance(e, DaExceptionNotFound)\nTrue\n>>> isinstance(e, DaExceptionUnchecked)\nTrue\n>>> isinstance(e, DaExceptionBase)\nTrue\n>>> isinstance(e, DaExceptionChecked)\nFalse\n>>> isinstance(e, DaExceptionLocalOnly)\nFalse\n\n# Can get the error message from either Exception.message or\n# DaException.errorMsg (or if you are really perverse,\n# DaException.base_message().errorMsg).\n>>> e.message\n'Container not found'\n>>> e.errorMsg\n'Container not found'\n>>> e.base_message().errorMsg\nu'Container not found'\n\n# The pure Python RPC client uses create_exception_from_base_message() to\n# convert over-the-wire DaExceptionBase to DaException.\n>>> f = DaException.create_exception_from_base_message(e.base_message())\n>>> e.base_message() == f.base_message()\nTrue\n>>> f.notFoundId\nu'0000000012345678'\n\n# The PyBridge RPC client uses create_exception() to convert DaErr to\n# DaException to throw to Python.\n>>> g = DaException.create_exception(e.ERROR_CODE, e.errorMsg, e.fileName, e.lineNum, \\\n notFoundId=e.notFoundId)\n>>> e.base_message() == g.base_message()\nTrue\n>>> g.notFoundId\n'0000000012345678'\n\"\"\"\n\nfrom datrium.utils import kv_utils\nfrom IDL.Protos.Extensions.KeyValueTypes_pb2 import KeyValue, TypedValue\n\nclass DaException(Exception):\n def __init__(self, errorMsg=None, fileName=None, lineNum=None, errorCodes=None, **kwargs):\n \"\"\"\n Create a new instance of a DaException* subclass. The DaException class\n should not be instantiated directly!\n\n :param errorMsg: If not None, the error message text.\n :param fileName: If not None, the file where the error was generated.\n :param lineNum: If not None, the line number in the file where the error was generated.\n :param kwargs: Extended attributes.\n :returns: New instance.\n \"\"\"\n\n Exception.__init__(self, errorMsg)\n\n self.errorMsg = errorMsg\n self.fileName = fileName\n self.lineNum = lineNum\n self.errorCodes = errorCodes\n\n # Get fileName and lineNum from traceback, if not specified.\n if self.fileName is None and self.lineNum is None:\n import traceback\n self.fileName, self.lineNum, funcName, line = traceback.extract_stack()[-2]\n\n # Derive error codes of this class and all base classes.\n if self.errorCodes is None:\n self.errorCodes = []\n exception = self\n while exception.BASE_CLASS != None:\n assert exception.ERROR_CODE not in self.errorCodes\n self.errorCodes.append(exception.ERROR_CODE)\n exception = exception.BASE_CLASS\n\n # Sort them in reverse order by error code.\n self.errorCodes.sort(reverse=True)\n\n # Set additional attributes from kwargs.\n for key, val in kwargs.iteritems():\n if key in self.DESCRIPTOR.fields_by_name:\n setattr(self, key, val)\n\n @property\n def message(self):\n \"\"\"\n Get the error message text.\n\n :returns: Current error message text.\n \"\"\"\n\n return self.errorMsg\n\n def __repr__(self):\n \"\"\"\n Get a best-effort representation of how the current DaException instance\n could be re-instantiated.\n\n :returns: Representation of the DaException instance.\n \"\"\"\n\n # Basic arguments.\n args = map(repr, [self.errorMsg, self.fileName, self.lineNum, self.errorCodes])\n\n # Additional attributes.\n for key, field in self.DESCRIPTOR.fields_by_name.iteritems():\n val = getattr(self, key, None)\n if val is not None:\n args.append(key + \"=\" + repr(val))\n\n return type(self).__name__ + \"(\" + \", \".join(args) + \")\"\n\n def __str__(self):\n \"\"\"\n Get the string representation of the DaException.\n\n :returns: String representation of the PB message.\n \"\"\"\n\n return repr(self) + \"\\n\" + str(self.base_message())\n\n def base_message(self):\n \"\"\"\n Encode the DaException to a DaExceptionBaseMessage and return it.\n\n :returns: DaExceptionBaseMessage.\n \"\"\"\n\n from IDL.Protos.Extensions.Exception_pb2 import DaExceptionBaseMessage\n\n base_message = DaExceptionBaseMessage()\n\n assert self.errorCodes is not None\n base_message.errorCodes[:] = self.errorCodes\n\n if self.errorMsg is not None:\n base_message.errorMsg = self.errorMsg\n if self.fileName is not None:\n base_message.fileName = self.fileName\n if self.lineNum is not None:\n base_message.lineNum = self.lineNum\n\n # Set base_message.attributes from additional attributes.\n for key, field in self.DESCRIPTOR.fields_by_name.iteritems():\n if key not in (\"errorMsg\", \"fileName\", \"lineNum\", \"errorCodes\"):\n val = getattr(self, key, None)\n if val is not None:\n attribute = base_message.attributes.add()\n attribute.key = key\n kv_utils.set_typed_val_from_val(attribute.val, val)\n\n return base_message\n\n @staticmethod\n def create_exception(error_code, errorMsg=None, fileName=None, lineNum=None, **kwargs):\n \"\"\"\n Factory method. Given an error code, return an instance of the\n appropriate DaException subclass. Make sure to specify any required\n extended attributes in kwargs!\n\n :returns: New instance of the appropriate DaException subclass, or\n DaExceptionBase if not found.\n \"\"\"\n\n from IDL.Protos.Extensions.Exception_pb2 import EXCEPTIONS_BY_ERROR_CODE, DaExceptionBase\n\n if error_code in EXCEPTIONS_BY_ERROR_CODE:\n exception_class = EXCEPTIONS_BY_ERROR_CODE[error_code]\n # None means derive self.errorCodes[].\n errorCodes = None\n else:\n # Cannot find an appropriate class. Return DaExceptionBase.\n exception_class = DaExceptionBase\n errorCodes = [error_code]\n\n # We currently save the possibly bogus error code in self.errorCodes[]\n # for debugging. Maybe we should assert instead.\n\n return exception_class(errorMsg, fileName, lineNum, errorCodes, **kwargs)\n\n @staticmethod\n def create_exception_from_base_message(base_message, **kwargs):\n \"\"\"\n Factory method. Given a DaExceptionBaseMessage, return an instance of\n the appropriate DaException subclass.\n\n :returns: New instance of the appropriate DaException subclass, or\n DaExceptionBase if not found.\n \"\"\"\n\n from IDL.Protos.Extensions.Exception_pb2 import EXCEPTIONS_BY_ERROR_CODE, DaExceptionBase\n\n for error_code in base_message.errorCodes:\n if error_code in EXCEPTIONS_BY_ERROR_CODE:\n exception_class = EXCEPTIONS_BY_ERROR_CODE[error_code]\n break\n else:\n # Cannot find an appropriate class. Return DaExceptionBase.\n exception_class = DaExceptionBase\n\n # Set additional attributes from base_message.attributes.\n # XXX (mhuang): What do we do if the base_message does not specify\n # required attributes?!\n attributes = dict((attribute.key, kv_utils.get_val_from_typed_val(attribute.val))\n for attribute in base_message.attributes\n if attribute.key not in (\"errorMsg\", \"fileName\", \"lineNum\", \"errorCodes\"))\n\n # kwargs will override any attributes from the base message.\n for k,v in kwargs.iteritems():\n attributes[k] = v\n\n # We currently save the possibly bogus error codes in self.errorCodes[]\n # for debugging. Maybe we should assert instead.\n\n return exception_class(base_message.errorMsg if base_message.HasField(\"errorMsg\") else None,\n base_message.fileName if base_message.HasField(\"fileName\") else None,\n base_message.lineNum if base_message.HasField(\"lineNum\") else None,\n base_message.errorCodes[:],\n **attributes)\n\nif __name__ == \"__main__\":\n # This runs the test examples above.\n import doctest\n doctest.testmod(verbose=True)\n","sub_path":"api/PythonRoot/datrium/exception.py","file_name":"exception.py","file_ext":"py","file_size_in_byte":10191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"407998358","text":"#have some problems with test results\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution: \n # @return a list of tree node \n def getTree(self,start,end): \n if start>end: \n return [None] \n solution=[] \n for rootval in range(start,end+1): \n left=self.getTree(start,rootval-1) \n right=self.getTree(rootval+1,end) \n for i in left: \n for j in right: \n root=TreeNode(rootval) \n root.left=i \n root.right=j \n solution.append(root) \n return solution \n def generateTrees(self, n): \n return self.getTree(1,n) ","sub_path":"#96 Unique Binary Search Trees II.py","file_name":"#96 Unique Binary Search Trees II.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"407040505","text":"N,S = map(int, input().split())\nab = list(map(int, input().split()))\n\n\nstart,end=0,0\nanswer=float('inf') # 길이\ntmp=ab[0]\n\nwhile start >= 0 and end \")\ndef actor_detail_page(id):\n actor = select(\"name,image\",\"movie\",\"id={}\".format(id),asDict=True)\n movies = select(\"movie.name, movie.likes, movie.dislikes, movie.image\",\n \"movie join index on movie.id=index.movie_id\",\"index.actor_id={}\".format(id), asDict=True)\n return render_template(\"actor_detail_page.html\", movies=movies, actor=actor)","sub_path":"views/actor.py","file_name":"actor.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"653376606","text":"# -*- encoding: utf-8 -*-\nfrom pyramid.httpexceptions import HTTPFound\nfrom pyramid.httpexceptions import HTTPForbidden\nfrom pyramid.renderers import render\nfrom pyramid.security import forget\nfrom pyramid.security import remember\nfrom pyramid.security import unauthenticated_userid\nfrom pyramid.view import view_config\nfrom pyramid.url import route_url\nfrom urllib2 import urlopen, Request\nimport pickle\nimport json\nfrom tempus_ui.views.api import TemplateAPI\n\n\n@view_config(\n route_name='invitacio',\n renderer='tempus_ui:templates/invitation.pt',\n request_method='GET')\ndef invitacio(context, request):\n page_title = \"Beta Tempus\"\n if 'credentials' not in request.session:\n return HTTPForbidden()\n api = TemplateAPI(context, request, page_title)\n return dict(api=api)\n\n\n@view_config(\n route_name='invitacio',\n renderer='tempus_ui:templates/invitation.pt',\n request_method='POST')\ndef invitacio_post(request):\n if 'credentials' not in request.session:\n return HTTPForbidden()\n if 'invitacio' not in request.params.keys():\n return HTTPForbidden()\n\n # Ara el podem afegir com a usuari\n # Fer petició HTTP a la API de forma segura\n clau_invitacio = request.params['invitacio']\n\n credentials = pickle.loads(request.session['credentials'])\n cred_py = json.loads(credentials.to_json())\n key_json = json.dumps({'key': clau_invitacio,\n 'credentials': cred_py,\n 'type': 'google',\n 'agent': request.user_agent,\n 'client_addr': request.client_addr})\n\n url = request.registry.settings['HTTPS_API_URL'] + 'useradd'\n req = Request(url, key_json, {'Content-Type': 'application/json'})\n response = urlopen(req)\n resultat_json = response.read()\n resultat = json.loads(resultat_json)\n\n if resultat['success']:\n # He d'obtenir el token temporal\n token = resultat['token']\n request.session.pop('credentials')\n headers = remember(request, token)\n return HTTPFound(\n location=route_url('user.profile', request),\n headers=headers)\n else:\n return HTTPFound(location=route_url('invitacio', request))\n\n\n\n# @view_config(route_name='login', renderer='tempus_ui:templates/login.pt')\n# def login(context, request):\n# page_title = \"Tempus Login\"\n# api = TemplateAPI(context, request, page_title)\n# return dict(api=api, glogin=route_url('google.oauth', request))\n\n\n\n@view_config(route_name='logout')\ndef logout(request):\n headers = forget(request)\n return HTTPFound(location=route_url('home', request),\n headers=headers)\n\n@view_config(context=HTTPForbidden, renderer='default/forbidden.mako')\ndef forbidden(request):\n return dict()\n\n","sub_path":"tempus_ui/views/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"361310877","text":"import sqlite3\nimport urllib\n\nimport pytest\nimport requests\n\nimport GooglePlayAdvancedSearch.DBUtils\nimport GooglePlayAdvancedSearch.tests.testUtils as testUtils\nfrom GooglePlayAdvancedSearch.Models import AppItem\n\n\ndef test_searchPermissionFilter(websiteUrl, dbFilePath):\n\t# com.tencent.mm uses permission 'read the contents of your USB storage'\n\t# We exclude this permission in the search, and make sure the result doesn't have com.tencent.mm.\n\n\ttestUtils.runScraper(['--pytest', '-p', 'com.tencent.mm'])\n\tconnection = sqlite3.connect(dbFilePath)\n\tcursor = connection.cursor()\n\tpermissions = GooglePlayAdvancedSearch.DBUtils.getAllPermissions(cursor)\n\tpid = next((k for k, v in permissions.items() if 'read the contents of your USB storage' in v), None)\n\n\tresponse = requests.get(websiteUrl + '/Api/Search?q=wechat&pids=' + str(pid), verify=True)\n\ttext = response.text\n\tassert 'com.tencent.mm' not in text, \"Search for wechat without storage permission. The search result should not have wechat.\"\n\n\tresponse = requests.get(websiteUrl + '/Api/Search?q=wechat', verify=True)\n\ttext = response.text\n\tassert 'com.tencent.mm' in text, \"Search for wechat allowing storage permission. The search result not have wechat.\"\n\n\ndef test_searchCategoryFilter(websiteUrl, dbFilePath):\n\t# com.facebook.katana uses category 'Social'\n\t# we exclude this category in the search, and make sure the result doesn't have com.facebook.katana.\n\n\ttestUtils.runScraper(['--pytest', '-p', 'com.facebook.katana'])\n\tconnection = sqlite3.connect(dbFilePath)\n\tcursor = connection.cursor()\n\tcategories = GooglePlayAdvancedSearch.DBUtils.getAllCategories(cursor)\n\tcid = next((k for k, v in categories.items() if 'Social' in v), None)\n\n\tresponse = requests.get(websiteUrl + '/Api/Search?q=facebook&cids=' + str(cid), verify=True)\n\ttext = response.text\n\tassert 'com.facebook.katana' not in text, \"Search for facebook without Social category. The search result should not have it.\"\n\n\tresponse = requests.get(websiteUrl + '/Api/Search?q=facebook', verify=True)\n\ttext = response.text\n\tassert 'com.facebook.katana' in text, \"Search for facebook allowing Social category. The search result should have it.\"\n\n\ndef test_searchResultUpperBound(websiteUrl, dbFilePath):\n\tconnection = sqlite3.connect(dbFilePath)\n\tcursor = connection.cursor()\n\ttry:\n\t\tappAccessor = GooglePlayAdvancedSearch.DBUtils.AppAccessor()\n\t\tinsertedCount = GooglePlayAdvancedSearch.DBUtils.MAX_SELECT + 1\n\t\tfor i in range(insertedCount):\n\t\t\tapp = AppItem()\n\t\t\tapp['id'] = 'GooglePlayAdvancedSearch.testApp' + str(i)\n\t\t\tapp['name'] = 'matched keyword'\n\t\t\tapp['rating'] = 0\n\t\t\tapp['install_fee'] = 0\n\t\t\tapp['app_icon'] = ''\n\n\t\t\tappAccessor.insertOrUpdateApp(app)\n\t\tdel appAccessor\n\n\t\tcursor.execute(\"select count(*) from App where id like 'GooglePlayAdvancedSearch.testApp%'\")\n\t\tassert int(cursor.fetchone()[0]) >= insertedCount, f\"failed to insert {insertedCount} rows.\"\n\n\t\tresponse = requests.get(websiteUrl + '/Api/Search?q=matched%20keyword', verify=True)\n\t\tdata = response.json()\n\t\tassert len(data['apps']) <= GooglePlayAdvancedSearch.DBUtils.MAX_SELECT, f\"At most returns {GooglePlayAdvancedSearch.DBUtils.MAX_SELECT}, actually returns {len(data['apps'])}.\"\n\tfinally:\n\t\tcursor.execute('delete from App where id like :id', {'id': 'GooglePlayAdvancedSearch.testApp%'})\n\n\ndef test_notReadingStaleInfo(websiteUrl, dbFilePath):\n\tlastException = None\n\ttryCount = 0\n\twhile tryCount < 2:\n\t\ttryCount += 1\n\t\ttry:\n\t\t\tconnection = sqlite3.connect(dbFilePath)\n\t\t\tcursor = connection.cursor()\n\t\t\tcursor.execute('select id, name from App where rating>1 limit 1')\n\t\t\tapp = cursor.fetchone()\n\t\t\tif app is None:\n\t\t\t\traise FileExistsError('cannot find an app with rating 1 for testing purpose.')\n\n\t\t\tcursor.execute(\"update App set updateDate=2000-01-01, rating=1 where id=:id\", {'id': app[0]})\n\t\t\tconnection.commit()\n\t\t\tresponse = requests.get(websiteUrl + '/Api/Search?q=' + urllib.parse.quote(app[1]))\n\t\t\tdata = response.json()\n\n\t\t\tnewApp = next(a for a in data['apps'] if a['id'] == app[0])\n\t\t\tassert newApp['rating'] > 1, f\"The rating of {app[1]} should be > 1 because the old rating was added on 2000-01-01.\"\n\t\t\treturn\n\t\texcept FileExistsError as e:\n\t\t\tlastException = e\n\t\texcept sqlite3.OperationalError as e:\n\t\t\t# maybe the database is empty. We need to load something\n\t\t\tlastException = e\n\n\t\trequests.get(websiteUrl + '/Api/Search?q=facebook')\n\tpytest.skip(str(lastException))\n\n\ndef test_recentSearches(websiteUrl, dbFilePath):\n\tconnection = sqlite3.connect(dbFilePath)\n\tcursor = connection.cursor()\n\n\ttry:\n\t\tcursor.execute(\"insert into Search(keyword, query, ip ,date) values('recentSearches-test1', '', 'pytest', datetime('now'))\")\n\t\tfor _ in range(10):\n\t\t\tcursor.execute(\"insert into Search(keyword, query, ip ,date) values('recentSearches-test2', '', 'pytest', '2010-01-01')\")\n\t\tconnection.commit()\n\n\t\tresponse = requests.get(websiteUrl + '/Api/RecentSearches')\n\t\tdata = response.json()\n\t\tassert data[0]['keyword'] == 'recentSearches-test1', \"Most recent search is recentSearches-test1, but the API does not return it.\"\n\tfinally:\n\t\tcursor.execute(\"delete from Search where ip='pytest'\")\n\t\tconnection.commit()\n","sub_path":"src/GooglePlayAdvancedSearch/tests/test_webApi.py","file_name":"test_webApi.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"633189330","text":"\"\"\"Tests for the ec2 scheduler.\"\"\"\n\nimport boto3\n\nfrom package.scheduler.cloudwatch_handler import CloudWatchAlarmScheduler\nfrom package.scheduler.spot_handler import SpotScheduler\n\nfrom .fixture import launch_ec2_spot\n\nimport pytest\n\n\n@pytest.mark.parametrize(\n \"aws_region, tag_key, tag_value, result_count\",\n [\n (\n \"eu-west-1\",\n \"tostop-spot-test-1\",\n \"true\",\n {\"Code\": 48, \"Name\": \"terminated\"},\n ),\n (\n \"eu-west-1\",\n \"badtagkey\",\n \"badtagvalue\",\n {\"Code\": 16, \"Name\": \"running\"},\n ),\n ],\n)\ndef test_terminate_spot_scheduler(\n aws_region, tag_key, tag_value, result_count\n):\n \"\"\"Verify terminate spot scheduler class method.\"\"\"\n client = boto3.client(\"ec2\", region_name=aws_region)\n instances = launch_ec2_spot(2, aws_region, tag_key, tag_value)\n instance_ids = [x[\"InstanceId\"] for x in instances[\"Instances\"]]\n\n try:\n client.get_waiter(\"instance_running\").wait(InstanceIds=instance_ids)\n spot_scheduler = SpotScheduler(aws_region)\n spot_scheduler.cloudwatch_alarm = CloudWatchAlarmScheduler(aws_region)\n spot_scheduler.terminate(\"tostop-spot-test-1\", \"true\")\n if tag_key == \"tostop-spot-test-1\" and tag_value == \"true\":\n client.get_waiter(\"instance_terminated\").wait(\n InstanceIds=instance_ids\n )\n\n ec2_describe = client.describe_instances(InstanceIds=instance_ids)\n for ec2 in ec2_describe[\"Reservations\"][0][\"Instances\"]:\n assert ec2[\"State\"] == result_count\n finally:\n # Clean aws account\n client.terminate_instances(InstanceIds=instance_ids)\n","sub_path":"tests/integration/test_spot_scheduler.py","file_name":"test_spot_scheduler.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"122583880","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n This module provides some functionality to diagnose thrown exceptions\n\"\"\"\n\n\nimport sys\nfrom functools import wraps\nfrom colorful import colorful\n\nfrom .terrain import world\nfrom .exceptions import RadishError, FeatureFileSyntaxError, StepDefinitionNotFoundError, HookError, SameStepError\n\n\n__RADISH_DOC__ = \"https://github.com/radish-bdd/radish\"\n\n\ndef write(text):\n \"\"\"\n Writes the given text to the console\n \"\"\"\n print(text)\n\n\ndef write_error(text):\n \"\"\"\n Writes the given text to the console\n \"\"\"\n write(\"{0}: {1}\".format(colorful.bold_red(\"Error\"), colorful.red(text)))\n\n\ndef write_failure(failure):\n \"\"\"\n Writes the failure to the console\n \"\"\"\n write(\"\\n{0}\".format(colorful.red(failure.traceback)))\n\n\ndef abort(return_code):\n \"\"\"\n Aborts the program with the given return_code\n \"\"\"\n sys.exit(return_code)\n\n\ndef error_oracle(func):\n \"\"\"\n Decorator to diagnose thrown exceptions\n \"\"\"\n @wraps(func)\n def _decorator(*args, **kwargs):\n \"\"\"\n The actual decorator\n \"\"\"\n try:\n return func(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except\n handle_exception(e)\n\n return _decorator\n\n\ndef catch_unhandled_exception(exc_type, exc_value, traceback):\n \"\"\"\n Catch all unhandled exceptions\n \"\"\"\n handle_exception(exc_value)\n\n\ndef handle_exception(exception):\n \"\"\"\n Handle the given exception\n\n This will print more information about the given exception\n\n :param Exception exception: the exception to handle\n \"\"\"\n if isinstance(exception, HookError): # handle exception from hook\n write_error(exception)\n write_failure(exception.failure)\n abort(1)\n elif isinstance(exception, FeatureFileSyntaxError):\n write_error(exception)\n write(\"\\nError Oracle says:\")\n write(\"\"\"You have a SyntaxError in your feature file!\nPlease have a look into the radish documentation to found out which\nfeatures radish supports and how you could use them:\nLink: {0}\n \"\"\".format(__RADISH_DOC__))\n abort(1)\n elif isinstance(exception, StepDefinitionNotFoundError):\n write_error(exception)\n write(\"\\nError Oracle says:\")\n write(\"\"\"There is no step defintion for '{0}'.\nAll steps should be declared in a module located in {1}.\nFor example you could do:\n\n@step(r\"{2}\")\ndef my_step(step):\n raise NotImplementedError(\"This step is not implemented yet\")\n \"\"\".format(exception.step.sentence, world.config.basedir, exception.step.sentence))\n abort(1)\n elif isinstance(exception, SameStepError):\n write_error(exception)\n write(\"\\nError Oracle says:\")\n write(\"\"\"You have defined two step definitions with the same Regular Expression.\nThis is invalid since radish does not know which one is the one to go with.\nIf you have two similar step definition expressions but ones sentence is a subset of the other\nyou may want to add a $ to mark the sentence's end - take care of the code order - first comes first serves.\n \"\"\")\n abort(1)\n elif isinstance(exception, RadishError):\n write_error(exception)\n abort(1)\n elif isinstance(exception, KeyboardInterrupt):\n write(\"Aborted by the user...\")\n abort(1)\n else:\n write_error(str(exception))\n abort(2)\n","sub_path":"radish/errororacle.py","file_name":"errororacle.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"413900619","text":"import cv2\r\nimport numpy as np \r\n\r\nimg=cv2.imread('IMG_3879.jpg')\r\nresized=cv2.resize(img,(640,640))\r\nimage=cv2.cvtColor(resized,cv2.COLOR_BGR2GRAY)\r\nkernel=np.ones((5,5))\r\ngaussian_blur=cv2.GaussianBlur(image,(5,5),2)\r\nedge=cv2.Canny(gaussian_blur,40,280)\r\ncontours,heirarchy=cv2.findContours(edge,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\nareas=[cv2.contourArea(c) for c in contours]\r\nmax_index=np.argmax(areas)\r\nmax_contour=contours[max_index]\r\n\r\nperimeter=cv2.arcLength(max_contour,True)\r\nROI=cv2.approxPolyDP(max_contour,0.01*perimeter,True)\r\ncv2.drawContours(img,[ROI],-1,(0,255,0),2)\r\n\r\npts_1=np.array([ROI[0],ROI[1],ROI[3],ROI[2]],np.float32)\r\npts_2=np.array([(0,0),(500,0),(0,500),(500,500)],np.float32)\r\nperspective=cv2.getPerspectiveTransform(pts_1,pts_2)\r\ntransformed=cv2.warpPerspective(resized,perspective,(500,500))\r\ncv2.imshow('img',edge)\r\ncv2.imshow('display',resized)\r\ncv2.imshow('output',transformed)\r\ncv2.waitKey(0)\r\n\r\n","sub_path":"day7/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"177527430","text":"import socket\nimport config\nimport getopt\nimport sys\nfrom automazione_tende import AutomazioneTende\nfrom logger import Logger\n\nHOST: str = config.Config.getValue(\"loopback_ip\", \"server\") # Standard loopback interface address (localhost)\nPORT: str = config.Config.getInt(\"port\", \"server\") # Port to listen on (non-privileged ports are > 1023)\nTELESCOPE_PLUGIN: str = \"simulator\"\nMOCK: bool = False\npark_alt = config.Config.getValue(\"park_alt\", \"telescope\")\npark_az = config.Config.getValue(\"park_az\", \"telescope\")\nflat_alt = config.Config.getValue(\"flat_alt\", \"telescope\")\nflat_az = config.Config.getValue(\"flat_az\", \"telescope\")\n\ntry:\n opts, _ = getopt.getopt(sys.argv[1:], \"ms\", [\"mock\", \"sky\"])\nexcept getopt.GetoptError:\n Logger.getLogger().exception(\"parametri errati\")\n exit(2) # esce dall'applicazione con errore\nfor opt, _1 in opts:\n if opt in ('-m', '--mock'):\n MOCK = True\n elif opt in ('-s', '--sky'):\n TELESCOPE_PLUGIN = \"theskyx\"\n\nautomazioneTende: AutomazioneTende = AutomazioneTende(TELESCOPE_PLUGIN, mock=MOCK)\nerror_level: int = 0\ntry:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST, PORT))\n s.listen()\n Logger.getLogger().info(\"Server avviato\")\n while True:\n conn, _ = s.accept()\n with conn:\n while True:\n Logger.getLogger().debug(automazioneTende.crac_status)\n data: bytes = conn.recv(1)\n Logger.getLogger().debug(\"Data: %s\", data)\n\n if not data or data == b'E':\n if automazioneTende.started:\n automazioneTende.started = False\n automazioneTende.park_curtains()\n automazioneTende.move_tele(0, park_alt, park_az)\n automazioneTende.close_roof()\n try:\n conn.close()\n finally:\n if data == b'-':\n automazioneTende.exit_program()\n exit(0)\n break\n\n elif data == b\"1\":\n automazioneTende.started = True\n\n elif data == b'0':\n automazioneTende.started = False\n\n elif data == b'R':\n Logger.getLogger().debug(\"chiamata del metodo per apertura tetto (automazioneTende.open_roof) \")\n automazioneTende.open_roof()\n\n elif data == b'T':\n Logger.getLogger().debug(\"chiamata del metodo per chiusura tetto (automazioneTende.open_roof) \")\n automazioneTende.close_roof()\n\n elif data == b'P':\n Logger.getLogger().debug(\"chiamata al metodo telescopio.park_tele\")\n automazioneTende.move_tele(0, park_alt, park_az)\n\n elif data == b'F':\n Logger.getLogger().debug(\"chiamata al metodo telescopio.flat_tele\")\n automazioneTende.move_tele(0, flat_alt, flat_az)\n\n elif data == b'L':\n Logger.getLogger().debug(\"chiamata al metodo accensione pannello flat\")\n automazioneTende.panel_on()\n\n elif data == b'D':\n Logger.getLogger().debug(\"chiamata al metodo spegnimento pannello flat\")\n automazioneTende.panel_off()\n\n elif data == b'W':\n Logger.getLogger().debug(\"chiamata al metodo accensione alimentatori\")\n automazioneTende.power_on_tele()\n\n elif data == b'X':\n Logger.getLogger().debug(\"chiamata al metodo spegnimento alimentatori\")\n automazioneTende.power_off_tele()\n\n elif data == b'K':\n Logger.getLogger().debug(\"chiamata al metodo accensione luci cupola\")\n automazioneTende.light_on()\n\n elif data == b'J':\n Logger.getLogger().debug(\"chiamata al metodo spegnimento luci cupola\")\n automazioneTende.light_off()\n\n elif data == b'A':\n Logger.getLogger().debug(\"chiamata al metodo accensione ausiliare\")\n automazioneTende.power_on_ccd()\n\n elif data == b'O':\n Logger.getLogger().debug(\"chiamata al metodo spegnimento ausiliare\")\n automazioneTende.power_off_ccd()\n\n elif data == b'S':\n Logger.getLogger().debug(\"chiamata al metodo sincronizzazione\")\n automazioneTende.time_sync()\n\n Logger.getLogger().debug(\"chiamata al metodo per muovere le tendine (automazioneTende.exec) %s\", automazioneTende.started)\n automazioneTende.exec()\n\n updated_crac_status = repr(automazioneTende.read()).encode(\"UTF-8\")\n Logger.getLogger().debug(updated_crac_status)\n conn.sendall(updated_crac_status)\n\nexcept (KeyboardInterrupt, SystemExit) as e:\n Logger.getLogger().info(\"Intercettato CTRL+C: \" + str(e))\nexcept Exception as e:\n Logger.getLogger().exception(\"altro errore: \" + str(e))\n error_level = -1\nfinally:\n automazioneTende.exit_program(error_level)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"642700138","text":"'''\nMake a program that reads 3 integer values and present the greatest one \nfollowed by the message \"eh o maior\". Use the following formula:\n\nEsta função (ABS) obtém o valor absoluto do seu argumento, \nque pode ser um número real positivo ou negativo, \nou um número complexo.\n\nMaiorAB = (a+b+abs(a-b))/2\n\nInput\nThe input file contains 3 integer values.\n\nOutput\nPrint the greatest of these three values followed by a space and \nthe message “eh o maior”.\n'''\nimport math\n\nvalores = input().split(\" \")\n\na = int(valores[0])\nb = int(valores[1])\nc = int(valores[2])\n\nmaiorab = (a+b+abs(a-b))/2\nmaiorac = (maiorab+c+abs(maiorab-c))/2\n\nprint(\"%d eh o maior\" %(maiorac))\n\n\n\n\n\n\n\n","sub_path":"exercicios_uri/beginner/1013_eb.py","file_name":"1013_eb.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"402306790","text":"import argparse\nimport csv\nimport pathlib\nimport typing as ty\nimport unittest\n\nPARSER = argparse.ArgumentParser()\nPARSER.add_argument(\"--input_csvs\", nargs=\"*\")\nPARSER.add_argument(\"outputfile\")\n\n\ndef write_to_outfile(lines: ty.Iterable[ty.List[str]], outpath: pathlib.Path) -> None:\n with outpath.open(\"w\") as outf:\n writer = csv.writer(outf)\n writer.writerows(lines)\n\n\ndef fileinputrows(inputpath: pathlib.Path) -> ty.Iterable[ty.List[str]]:\n inputname = inputpath.name\n with inputpath.open() as inputfile:\n reader = csv.reader(inputfile)\n for row in reader:\n yield [inputname] + list(row)\n\n\ndef parse_inputrows(inputpaths: ty.List[pathlib.Path]) -> ty.Iterable[ty.List[str]]:\n for inputpath in inputpaths:\n yield from fileinputrows(inputpath)\n\n\ndef main() -> None:\n args = PARSER.parse_args()\n input_csv = [pathlib.Path(inf) for inf in args.input_csvs]\n inputrows = parse_inputrows(input_csv)\n write_to_outfile(inputrows, pathlib.Path(args.outputfile))\n\n\nclass TestConcatCsv(unittest.TestCase):\n def test_parse_args(self):\n raw_args = [\"--input_csvs\", \"first.csv\", \"second.csv\", \"--\", \"output.csv\"]\n args = PARSER.parse_args(raw_args)\n self.assertEqual(args.input_csvs, [\"first.csv\", \"second.csv\"])\n self.assertEqual(args.outputfile, \"output.csv\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"samplecode/singularity/concat_csv.py","file_name":"concat_csv.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"198096260","text":"import torch\nimport torch.nn.functional as F\nimport warnings\nimport numpy as np\n\n\ndef th_cov(x):\n \"\"\"\n :param x: N x M x D\n :return: N x D x D\n \"\"\"\n\n x = x - torch.mean(x, dim=1, keepdim=True) # (N, M, D)\n cov = torch.bmm(x.permute(0, 2, 1), x) / x.size(1) # (N, D, D)\n return cov\n\n\ndef cdist(x, y):\n \"\"\"\n :param x: (N, M, D)\n :param y: (N, k, D)\n :return: (N, M, K)\n \"\"\"\n if x.dim() == 2:\n x = x.unsqueeze(dim=2)\n y = y.unsqueeze(dim=2)\n dist0 = torch.sum(x ** 2.0, dim=-1) # (N, M)\n dist1 = torch.sum(y ** 2.0, dim=-1) # (N, k)\n dist2 = torch.bmm(x, y.permute(0, 2, 1)) # (N, M, k)\n dist = dist0.unsqueeze(dim=2) + dist1.unsqueeze(dim=1) - 2.0 * dist2 # (N, M, k)\n return dist\n\n\nclass ClusterError(Exception):\n pass\n\n\ndef _missing_warn():\n \"\"\"Print a warning when called.\"\"\"\n warnings.warn(\"One of the clusters is empty. \"\n \"Re-run kmeans with a different initialization.\")\n\n\ndef _missing_raise():\n \"\"\"raise a ClusterError when called.\"\"\"\n raise ClusterError(\"One of the clusters is empty. \"\n \"Re-run kmeans with a different initialization.\")\n\n\n_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}\n\n\ndef _kpoints(data, k):\n \"\"\"\n data : ndarray\n A N x M x D array\n k : int\n Number of samples to generate.\n Returns\n -------\n x : ndarray\n A N x k x D containing the initial centroids\n \"\"\"\n idx = torch.stack([torch.randperm(data.size(1), device=data.device)[:k]\n for _ in range(data.size(0))], dim=0) # (N, k)\n if data.dim() > 2:\n idx = idx.unsqueeze(dim=2).expand(-1, -1, data.size(2)) # (N, k, D)\n init = torch.gather(data, dim=1, index=idx)\n return init\n\n\n# def _kpoints2(data, k):\n# \"\"\"\n# data : ndarray\n# A N x M x D array\n# k : int\n# Number of samples to generate.\n# Returns\n# -------\n# x : ndarray\n# A N x k x D containing the initial centroids\n# \"\"\"\n# idx = torch.stack([torch.randperm(data.size(1))[:k] + i * data.size(1) for i in range(data.size(0))], dim=0) # (N*k)\n# data = data.view(-1, data.size(2))\n# init = data[idx]\n# init = init.view(-1, k, data.size(1))\n# return init\n\n\ndef _krandinit(data, k):\n \"\"\"Returns k samples of a random variable which parameters depend on data.\n\n More precisely, it returns k observations sampled from a Gaussian random\n variable which mean and covariances are the one estimated from data.\n\n Parameters\n ----------\n data : ndarray N x M x D\n k : int\n Number of samples to generate.\n\n Returns\n -------\n x : ndarray\n A 'k' by 'N' containing the initial centroids\n \"\"\"\n mu = torch.mean(data, dim=1, keepdim=True) # (N, 1, D)\n\n if data.dim() == 2:\n std = torch.std(data, dim=1, keepdim=True) # (N, 1)\n x = torch.randn((data.size(0), k), dtype=data.dtype, device=data.device) # (N, k)\n x *= std # (N, k)\n elif data.size(2) > data.size(1):\n # initialize when the covariance matrix is rank deficient\n _, s, v = torch.svd(data - mu, some=True) # (N, M, M) (N, M) (N, D, M)\n x = torch.randn((s.size(0), k, s.size(1)), dtype=s.dtype, device=s.device) # (N, k, M)\n sVh = s[..., None] * v.permute(0, 2, 1) / np.sqrt(data.size(1) - 1) # (N, M, D) = (N, M, 1) * (N, M, D) / (M - 1)\n x = torch.bmm(x, sVh) # (N, k, D) = (N, k, M) x (N, M, D)\n else:\n cov = th_cov(data) # (N, D, D)\n eye = torch.eye(data.size(2), dtype=data.dtype, device=data.device) # (D, D)\n cov = cov + eye * 1e-8\n x = torch.randn((data.size(0), k, data.size(2)), dtype=data.dtype, device=data.device) # (N, k, D)\n x = torch.bmm(x, torch.cholesky(cov, upper=True)) # (N, k, D) = (N, k, D) x (N, D, D)\n x += mu\n return x\n\n\ndef _kpp(data, k):\n \"\"\" Picks k points in data based on the kmeans++ method\n\n Parameters\n ----------\n data : ndarray\n Expect a rank 1 or 2 array. Rank 1 are assumed to describe one\n dimensional data, rank 2 multidimensional data, in which case one\n row is one observation.\n k : int\n Number of samples to generate.\n\n Returns\n -------\n init : ndarray\n A 'k' by 'N' containing the initial centroids\n\n References\n ----------\n .. [1] D. Arthur and S. Vassilvitskii, \"k-means++: the advantages of\n careful seeding\", Proceedings of the Eighteenth Annual ACM-SIAM Symposium\n on Discrete Algorithms, 2007.\n \"\"\"\n if data.dim() > 2:\n init = torch.zeros((data.size(0), k, data.size(2)), dtype=data.dtype, device=data.device)\n else:\n init = torch.zeros((data.size(0), k), dtype=data.dtype, device=data.device)\n\n for i in range(k):\n if i == 0:\n idx = torch.randint(data.size(1), (data.size(0), 1), device=data.device) # (N, 1)\n else:\n D2 = torch.min(cdist(data, init[:, :i]), dim=2)[0] # (N, M)\n probs = D2 / (D2.sum(dim=1, keepdim=True) + 1e-8) # (N, M)\n cumprobs = torch.cumsum(probs, dim=1) # (N, M)\n r = torch.rand((data.size(0), 1), dtype=data.dtype, device=data.device) # (N, 1)\n cumprobs = torch.cat([cumprobs, r], dim=1) # (N, M+1)\n idx = torch.argsort(cumprobs, dim=1) # (N, M+1)\n idx = torch.nonzero(idx == cumprobs.size(1)-1)[:, 1:2] # (N, 1)\n idx[idx == cumprobs.size(1)-1] = cumprobs.size(1)-2 # (N, 1)\n if data.dim() > 2:\n idx = idx.unsqueeze(dim=2).expand(-1, -1, data.size(2)) # (N, 1, D)\n init[:, i:i+1] = torch.gather(data, dim=1, index=idx) # (N, k, D)\n return init\n\n\n_valid_init_meth = {'random': _krandinit, 'points': _kpoints, '++': _kpp}\n\n\ndef whiten(obs, check_finite=True):\n \"\"\"\n Normalize a group of observations on a per feature basis.\n\n Before running k-means, it is beneficial to rescale each feature\n dimension of the observation set with whitening. Each feature is\n divided by its standard deviation across all observations to give\n it unit variance.\n\n Parameters\n ----------\n obs : ndarray N x M x D\n Each row of the array is an observation. The\n columns are the features seen during each observation.\n check_finite : bool, optional\n Whether to check that the input matrices contain only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n Default: True\n\n Returns\n -------\n result : ndarray\n Contains the values in `obs` scaled by the standard deviation of each column.\n \"\"\"\n std_dev = torch.std(obs, dim=1, keepdim=True) # (N, 1, D)\n zero_std_mask = std_dev == 0\n if zero_std_mask.any():\n std_dev[zero_std_mask] = 1.0\n warnings.warn(\"Some columns have standard deviation zero. \"\n \"The values of these columns will not change.\",\n RuntimeWarning)\n return obs / std_dev\n\n\n# def vq(obs, code_book, check_finite=True):\n# \"\"\"\n# The algorithm computes the euclidian distance between each\n# observation and every frame in the code_book.\n# Returns\n# -------\n# code : ndarray\n# code[i] gives the label of the ith obversation, that its code is code_book[code[i]].\n# mind_dist : ndarray\n# min_dist[i] gives the distance between the ith observation and its corresponding code.\n# \"\"\"\n# if obs.dim() != code_book.dim():\n# raise ValueError(\"Observation and code_book should have the same rank\")\n#\n# if obs.dim() == 2:\n# obs = obs.unsqueeze(dim=-1)\n# code_book = code_book.unsqueeze(dim=-1)\n#\n# dist = cdist(obs, code_book)\n# min_dist, code = torch.min(dist, dim=-1)\n# return code, min_dist\n\n\ndef vq(obs, code_book, check_finite=True):\n \"\"\"\n The algorithm computes the euclidian distance between each\n observation and every frame in the code_book.\n Returns\n -------\n code : ndarray\n code[i] gives the label of the ith obversation, that its code is code_book[code[i]].\n mind_dist : ndarray\n min_dist[i] gives the distance between the ith observation and its corresponding code.\n \"\"\"\n if obs.dim() != code_book.dim():\n raise ValueError(\"Observation and code_book should have the same rank\")\n\n if obs.dim() == 2:\n obs = obs.unsqueeze(dim=-1)\n code_book = code_book.unsqueeze(dim=-1)\n\n dist = cdist(obs, code_book)\n code = torch.argmin(dist, dim=-1)\n return code\n\n\ndef update_cluster_means(obs, labels, nc):\n \"\"\"\n The update-step of K-means. Calculate the mean of observations in each\n cluster.\n Parameters\n ----------\n obs : ndarray\n N x M x D\n labels : ndarray\n N x M The label of each observation.\n nc : int\n The number of centroids.\n Returns\n -------\n cb : ndarray\n The new code book.\n has_members : ndarray\n A boolean array indicating which clusters have members.\n Notes\n -----\n The empty clusters will be set to all zeros and the curresponding elements\n in `has_members` will be `False`. The upper level function should decide\n how to deal with them.\n \"\"\"\n if labels.dim() != 2:\n raise ValueError('labels must be an 2d array')\n labels = F.one_hot(labels, num_classes=nc).type_as(obs) # (N, M, k)\n obs_count = torch.sum(labels, dim=1) # (N, k)\n cb = torch.bmm(labels.permute(0, 2, 1), obs) # (N, k, D)\n cb = cb / (obs_count.unsqueeze(dim=-1) + 1e-8) # (N, k, D)\n has_members = obs_count > 0 # (N, k)\n return cb, has_members\n\n\ndef kmeans2(data, k, iter=10, thresh=1e-5, minit='random', missing='warn', check_finite=True):\n\n if int(iter) < 1:\n raise ValueError(\"Invalid iter (%s), must be a positive integer.\" % iter)\n try:\n miss_meth = _valid_miss_meth[missing]\n except KeyError:\n raise ValueError(\"Unknown missing method %r\" % (missing,))\n\n if data.dim() == 2:\n d = 1\n elif data.dim() == 3:\n d = data.size(2)\n else:\n raise ValueError(\"Input of rank > 3 or rank < 2 is not supported.\")\n\n if data.numel() < 1:\n raise ValueError(\"Empty input is not supported.\")\n\n # If k is not a single value it should be compatible with data's shape\n if minit == 'matrix' or not np.isscalar(k):\n code_book = torch.as_tensor(k, dtype=data.dtype, device=data.device)\n if data.dim() != code_book.dim():\n raise ValueError(\"k array doesn't match data rank\")\n nc = code_book.size(1)\n if data.dim() > 2 and code_book.size(2) != d:\n raise ValueError(\"k array doesn't match data dimension\")\n else:\n nc = int(k)\n if nc < 1:\n raise ValueError(\"Cannot ask kmeans2 for %d clusters (k was %s)\" % (nc, k))\n elif nc != k:\n warnings.warn(\"k was not an integer, was converted.\")\n try:\n init_meth = _valid_init_meth[minit]\n except KeyError:\n raise ValueError(\"Unknown init method %r\" % (minit,))\n else:\n code_book = init_meth(data, k)\n\n for i in range(iter):\n # Compute the nearest neighbor for each obs using the current code book\n # label = vq(data, code_book)[0]\n label = vq(data, code_book)\n # Update the code book by computing centroids\n new_code_book, has_members = update_cluster_means(data, label, nc)\n if not has_members.all():\n miss_meth()\n # Set the empty clusters to their previous positions\n new_code_book[~has_members] = code_book[~has_members]\n code_book = new_code_book\n\n return code_book, label\n","sub_path":"PCB_CSALR/reid/models/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":12183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"281568617","text":"import sqlite3\nimport click\nfrom datetime import datetime\nfrom flask import current_app, g\nfrom flask.cli import with_appcontext\n\n# get the list of the lists for a specified userID\n\n\ndef get_lists(username):\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\"\"\" SELECT userID FROM users WHERE username = '{username}' ;\"\"\".format(\n username=username))\n userIdent = cursor.fetchone()[0]\n # print(userIdent)\n cursor.execute(\"\"\" SELECT listID, listname FROM lists WHERE userID = '{userID}'; \"\"\".format(\n userID=userIdent))\n db_lists = cursor.fetchall()\n print(db_lists)\n lists = []\n tasks = []\n\n for i in range(len(db_lists)):\n row = db_lists[i][0]\n lists.append(row)\n\n for l in lists:\n cursor.execute(\n \"\"\" SELECT listID, taskID,taskname,deadline,completion,status FROM tasks WHERE listID = '{listID}'; \"\"\".format(listID=l))\n db_tasks = cursor.fetchall()\n for t in db_tasks:\n tasks.append(t)\n print(db_tasks)\n\n connection.commit()\n cursor.close()\n connection.close()\n overdue = []\n for i in range(len(tasks)):\n if tasks[i][3] != None:\n now = datetime.now()\n date_object = datetime.strptime(tasks[i][3], \"%Y-%m-%d %H:%M:%S\")\n print(\"date_object =\", date_object)\n if now > date_object:\n print(\"overdue\")\n print(now)\n overdue.append(1)\n else:\n overdue.append(0)\n\n return db_lists, tasks, overdue\n\n\n# get the list of the tasks for a specified listID\ndef get_tasks(list):\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\n \"\"\" SELECT tasktname FROM tasks WHERE listID = '{list}' ORDER BY listID DESC; \"\"\")\n db_tasks = cursor.fetchall()\n tasks = []\n\n for i in range(len(db_tasks)):\n row = db_tasks[i][0]\n tasks.append(row)\n\n connection.commit()\n cursor.close()\n connection.close()\n\n return tasks\n\n\ndef get_tasktext(tid):\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\n \"\"\" SELECT taskname,deadline,completion,status FROM tasks WHERE taskID = '{tid}';\"\"\".format(tid=tid))\n tasktext = cursor.fetchone()\n\n connection.commit()\n cursor.close()\n connection.close()\n\n return tasktext\n\n# create a new list for a specified userID\n\n\ndef add_list(userID, listname):\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\"\"\" INSERT INTO lists(userID, listname, numberoftasks) VALUES ('{userID}', '{listname}', '{numberoftasks}')\"\"\".format(\n userID=userID, listname=listname, numberoftasks=0))\n\n connection.commit()\n cursor.close()\n connection.close()\n print(listname, \"successfully added to user\", userID)\n return listname + \" successfully added to user \" + str(userID)\n\n# create a new task for a specified listID\n\n\ndef add_task(listID, taskname):\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\"\"\" INSERT INTO tasks(listID, taskname, completion) VALUES ('{listID}', '{taskname}',0)\"\"\".format(\n listID=listID, taskname=taskname))\n\n cursor.execute(\n \"\"\" SELECT numberoftasks FROM lists WHERE listID = '{listID}' ORDER BY listID DESC; \"\"\".format(listID=listID))\n newnumberoftasks = cursor.fetchone()[0] + 1\n\n # cursor.execute(\"\"\" UPDATE lists SET numberoftasks = 2 WHERE listID = '{listID}'\"\"\".format(listID = listID))\n cursor.execute(\"\"\" UPDATE lists SET numberoftasks = '{newnumberoftasks}' WHERE listID = '{listID}'\"\"\".format(\n newnumberoftasks=newnumberoftasks, listID=listID))\n\n connection.commit()\n cursor.close()\n connection.close()\n\n return print(taskname, \"successfully added to list\", listID)\n\n# modify a task\n\n\ndef update_task(taskID, newtaskname, newdeadline, newcompletion, newstatus):\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\"\"\" UPDATE tasks SET taskname = '{newtaskname}', deadline='{newdeadline}',completion='{newcompletion}',status='{newstatus}' WHERE taskID = '{taskID}'\"\"\".format(\n newtaskname=newtaskname, taskID=taskID, newdeadline=newdeadline, newcompletion=newcompletion, newstatus=newstatus))\n\n connection.commit()\n cursor.close()\n connection.close()\n\n return print(\"Task ID\", taskID, \"successfully update to \", newtaskname)\n\n# deletes a task based on the taskID\n\n\ndef delete_task(taskID):\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n\n cursor.execute(\n \"\"\" SELECT listID FROM tasks WHERE taskID = '{taskID}' ORDER BY listID DESC; \"\"\".format(taskID=taskID))\n listID = cursor.fetchone()[0]\n\n cursor.execute(\n \"\"\" DELETE FROM tasks WHERE taskID = '{taskID}'\"\"\".format(taskID=taskID))\n\n cursor.execute(\n \"\"\" SELECT numberoftasks FROM lists WHERE listID = '{listID}' ORDER BY listID DESC; \"\"\".format(listID=listID))\n newnumberoftasks = max(cursor.fetchone()[0] - 1, 0)\n\n # cursor.execute(\"\"\" UPDATE lists SET numberoftasks = 2 WHERE listID = '{listID}'\"\"\".format(listID = listID))\n cursor.execute(\"\"\" UPDATE lists SET numberoftasks = '{newnumberoftasks}' WHERE listID = '{listID}'\"\"\".format(\n newnumberoftasks=newnumberoftasks, listID=listID))\n\n connection.commit()\n cursor.close()\n connection.close()\n\n return print(taskID, \"successfully deleted\")\n# modify a list\n\n\ndef update_list(listID, newlistname):\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\"\"\" UPDATE lists SET listname = '{newlistname}' WHERE listID = '{listID}'\"\"\".format(\n newlistname=newlistname, listID=listID))\n\n connection.commit()\n cursor.close()\n connection.close()\n\n return print(\"List ID\", listID, \"successfully update to \", newlistname)\n\n# deletes a list based on the listID\n\n\ndef delete_list(list):\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\n \"\"\" DELETE FROM lists WHERE listID = '{list}'\"\"\".format(list=list))\n\n connection.commit()\n cursor.close()\n connection.close()\n\n return print(list, \"successfully deleted\")\n\n\ndef check_pw(username):\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\"\"\" SELECT password FROM users WHERE username = '{username}' ORDER BY userID DESC; \"\"\".format(\n username=username))\n password = cursor.fetchone()[0]\n\n connection.commit()\n cursor.close()\n connection.close()\n\n return password\n\n\ndef check_pw_admin(username):\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\"\"\" SELECT password FROM admin WHERE username = '{username}' ORDER BY username DESC; \"\"\".format(\n username=username))\n\n password = cursor.fetchone()[0]\n connection.commit()\n cursor.close()\n connection.close()\n\n return password\n\n\ndef signup(username, firstname, lastname, password, email):\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\"\"\" SELECT password FROM users WHERE username = '{username}' ORDER BY userID DESC; \"\"\".format(\n username=username))\n exist = cursor.fetchone()\n\n if exist is None:\n cursor.execute(\"\"\" INSERT INTO users(username, firstname, lastname, password, email)VALUES('{username}', '{firstname}', '{lastname}','{password}','{email}')\"\"\".format(\n username=username, firstname=firstname, lastname=lastname, password=password, email=email))\n connection.commit()\n cursor.close()\n connection.close()\n else:\n return('User already existed!!')\n\n return 'You have successfully signed up!'\n\n\ndef updateSettings(username, firstname, lastname, password, email):\n\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\"\"\"UPDATE users SET firstname='{firstname}',lastname='{lastname}',email='{email}',password='{password}' WHERE username = '{username}'\"\"\".format(\n username=username, firstname=firstname, lastname=lastname, password=password, email=email))\n connection.commit()\n cursor.close()\n connection.close()\n return 'Settings successfully updated'\n\n\ndef check_users():\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\"\"\" SELECT username FROM users ORDER BY userID DESC; \"\"\")\n db_users = cursor.fetchall()\n users = []\n\n for i in range(len(db_users)):\n person = db_users[i][0]\n users.append(person)\n\n connection.commit()\n cursor.close()\n connection.close()\n\n return users\n\n\ndef get_users():\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\n \"\"\" SELECT username,userID,firstname,lastname,password,email FROM users ORDER BY userID DESC; \"\"\")\n db_users = cursor.fetchall()\n\n connection.commit()\n cursor.close()\n connection.close()\n print(db_users)\n return db_users\n\n\ndef get_username(userId):\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\n \"\"\" SELECT username FROM users WHERE userID='{userId}'; \"\"\".format(userId=userId))\n username = cursor.fetchone()[0]\n\n connection.commit()\n cursor.close()\n connection.close()\n # print(username)\n return username\n\n\ndef getUserId(username):\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\"\"\" SELECT userID FROM users WHERE username='{username}'; \"\"\".format(\n username=username))\n db_users = cursor.fetchall()\n\n connection.commit()\n cursor.close()\n connection.close()\n\n return db_users[0][0]\n\n\ndef getUserDetails(userId):\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\n \"\"\" SELECT username, firstname, lastname, userID,email,password FROM users WHERE userID='{userId}'; \"\"\".format(userId=userId))\n userDetails = cursor.fetchone()\n\n connection.commit()\n cursor.close()\n connection.close()\n print(userDetails)\n return userDetails\n\n\ndef setLog(userId):\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\n \"\"\" INSERT INTO logs(userId) VALUES ('{userId}');\"\"\".format(userId=userId))\n connection.commit()\n\n cursor.close()\n connection.close()\n\n\ndef getLogsCount():\n print(\"getLogs is running\")\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\n \"SELECT COUNT(*) FROM logs\")\n (countLogs,) = cursor.fetchone()\n connection.commit()\n cursor.close()\n connection.close()\n return countLogs\n\n\ndef getListsCount():\n print(\"getLists is running\")\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\n \"SELECT COUNT(*) FROM lists\")\n (countLists,) = cursor.fetchone()\n connection.commit()\n cursor.close()\n connection.close()\n return countLists\n\n\ndef getLastLogsCount():\n print(\"getLastLogs is running\")\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\n \"select count(*) from logs where log >= datetime('now','-24 hour');\")\n (countLogs,) = cursor.fetchone()\n connection.commit()\n cursor.close()\n connection.close()\n return countLogs\n\n\ndef getLastListsCount():\n print(\"getLastLists is running\")\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\n \"select count(*) from lists where timestamp >= datetime('now','-24 hour');\")\n (countLists,) = cursor.fetchone()\n connection.commit()\n cursor.close()\n connection.close()\n return countLists\n\n\ndef getLogsTable():\n print(\"getLogsTable is running\")\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n cursor.execute(\n \"SELECT * FROM logs ORDER BY log DESC\")\n logsTable = cursor.fetchall()\n connection.commit()\n cursor.close()\n connection.close()\n return logsTable\n\n\ndef delete_user(uid):\n print(\"Deleting user\", uid)\n username = get_username(uid)\n lists, tasks, unused = get_lists(username)\n print(username)\n print(lists)\n print(tasks)\n for task in tasks:\n delete_task(task[1])\n for list in lists:\n delete_list(list[0])\n\n connection = sqlite3.connect('project.db', check_same_thread=False)\n cursor = connection.cursor()\n\n cursor.execute(\n \"\"\" DELETE FROM users WHERE userID = '{uid}'\"\"\".format(uid=uid))\n cursor.execute(\n \"\"\" DELETE FROM logs WHERE userid = '{uid}'\"\"\".format(uid=uid))\n\n connection.commit()\n cursor.close()\n connection.close()\n\n return print(\"User ID\", uid, \"successfully deleted\")\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":13590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"419305670","text":"# # DP Solution \n\n# Approach 2: Dynamic Programming\n# Intuition\n# Preliminary computations in Approach 1 are quite heavy, and could be optimised with dynamic programming.\n# Let's start from the array of ugly numbers which contains just one number - 1. \n# Let's use three pointers i2i_2i2​, i3i_3i3​ and i5i_5i5​, to mark the last ugly number which was multiplied by 2, 3 and 5, correspondingly.\n# The algorithm is straightforward: choose the smallest ugly number among 2×nums[i2]2 \\times \\textrm{nums}[i_2]2×nums[i2​], 3×nums[i3]3 \\times \\textrm{nums}[i_3]3×nums[i3​], and 5×nums[i5]5 \\times \\textrm{nums}[i_5]5×nums[i5​] \n# and add it into the array. \n# Move the corresponding pointer by one step. Repeat till you'll have 1690 ugly numbers.\nclass Solution:\n def nthUglyNumber(self, n):\n res = [1]\n i2, i3, i5 = 0, 0, 0\n for i in range(1690):\n ugly = min(res[i2]*2, res[i3]*3, res[i5]*5)\n res.append(ugly)\n if ugly == res[i2] * 2:\n i2 += 1\n if ugly == res[i3] * 3:\n i3 += 1\n if ugly == res[i5] * 5:\n i5 += 1\n return res[n-1]\n\n# Heap Solution\n# from heapq import heappop, heappush\nclass Solution:\n def nthUglyNumber(self, n):\n seen = {1, }\n nums = nums = []\n heap = []\n heappush(heap, 1)\n\n for _ in range(n):\n curr_ugly = heappop(heap)\n nums.append(curr_ugly)\n for i in [2, 3, 5]:\n new_ugly = curr_ugly * i\n if new_ugly not in seen:\n seen.add(new_ugly)\n heappush(heap, new_ugly)\n print(heap)\n return nums[n - 1]\n\n# Slightly Better still times out\n# class Solution:\n# def nthUglyNumber(self, n: int) -> int:\n# from collections import deque\n# # We will store ugly numbers here\n# res = [1]\n# resCopy = set([1])\n# twos, threes, fives = deque(), deque(), deque()\n# while len(res) < n:\n# lastNum = res[-1]\n# twos.append(lastNum * 2)\n# threes.append(lastNum * 3)\n# fives.append(lastNum * 5)\n# if twos[0] <= threes[0] and twos[0] <= fives[0]:\n# if twos[0] not in resCopy:\n# res.append(twos[0])\n# resCopy.add(twos[0])\n# twos.popleft()\n# elif threes[0] <= twos[0] and threes[0] <= fives[0]:\n# if threes[0] not in resCopy:\n# res.append(threes[0])\n# resCopy.add(threes[0])\n# threes.popleft()\n# else:\n# if fives[0] not in resCopy:\n# res.append(fives[0])\n# resCopy.add(fives[0])\n# fives.popleft()\n# print(res)\n# return res[-1]\n\n\n# Brute Force\n# class Solution:\n# def nthUglyNumber(self, n: int) -> int:\n# curr, num = 1, 1\n# while num <= n:\n# if self.isUgly(curr):\n# num += 1\n# curr += 1\n# return curr-1\n \n# def isUgly(self, num: int) -> bool:\n# multiples = [30, 15, 6, 5, 3, 2]\n# for x in multiples:\n# while x <= num and num % x == 0:\n# num /= x\n# return True if num == 1 else False\n\nsoln = Solution()\nprint(soln.nthUglyNumber(100))","sub_path":"leetcode/264. Ugly Number II/soln.py","file_name":"soln.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"454792960","text":"from PyQt5.QtGui import QColor\nfrom PyQt5.QtCore import Qt, QPoint\n\n\nclass Figure:\n def __init__(self):\n self.__polygon = list()\n self.__polygon.append([])\n self.__polygon_state = 0\n self.__edge_lst = list()\n\n self.__state_fill = None\n\n self.__is_close = False\n\n # Функуии заливки\n\n def __drawLine(self, painter, xn, yn, xk, yk):\n\n painter.drawLine(xn, yn, xk, yk)\n\n # !!! вставка не относится к алгориитму отрисовки\n # текущее ребро\n current_edge = ((xn, yn), (xk, yk))\n\n self.__edge_lst.append(current_edge)\n # !!! конец вставки\n\n # закраска области\n def fill(self, point, painter, image, color):\n\n # Настройка цвета\n color_edge = Qt.black\n\n pen = painter.pen()\n pen.setColor(color)\n painter.setPen(pen)\n\n # ---------------------------------------------\n\n # Сохранение состояния\n if self.__state_fill is not None:\n stack = self.__state_fill\n else:\n stack = [point]\n\n # Извлечение пикселя (х,у) из стека\n x, y = stack.pop()\n\n # Запоминаем абсицссу\n current_x = x\n\n # Закраска пискселя (x, y)\n painter.drawPoint(x, y)\n\n # Left---------------------------------------\n # Заполнение интервала слева от затравки\n x -= 1\n while QColor(image.pixel(x, y)) != color_edge:\n painter.drawPoint(x, y)\n x = x - 1\n\n # Сохраняем крайний слева пиксел\n x_left = x + 1\n # End----------------------------------------\n\n x = current_x\n\n # Right--------------------------------------\n # Заполнение интервала справа от затравки\n x += 1\n while QColor(image.pixel(x, y)) != color_edge:\n painter.drawPoint(x, y)\n x = x + 1\n\n # Сохраняем крайний справа пиксел\n x_right = x - 1\n # End----------------------------------------\n\n # Find_Top-----------------------------------\n y = y + 1\n x = x_left\n # Ищем затравку на строке выше\n while x <= x_right:\n\n # Поиск незакрашенного пиксела\n flag = False\n cur_col = QColor(image.pixel(x, y))\n while cur_col != color_edge and cur_col != color and x <= x_right:\n if not flag:\n flag = True\n x += 1\n cur_col = QColor(image.pixel(x, y))\n\n # Если был найдет незакрашенный пиксел,\n # то в стэк помещается крайний правый незакрашенный пиксел.\n if flag:\n cur_col = QColor(image.pixel(x, y))\n if x == x_right and cur_col != color_edge and cur_col != color:\n stack.append((x, y))\n else:\n stack.append((x - 1, y))\n\n # Продолжаем поиск, если интервал был прерван\n current_x = x\n cur_col = QColor(image.pixel(x, y))\n while (cur_col == color_edge or cur_col == color) and x < x_right:\n x = x + 1\n cur_col = QColor(image.pixel(x, y))\n\n # Удостоверяемся, что перешли на следующий пиксел\n if x == current_x:\n x = x + 1\n # End----------------------------------------\n\n # Find_Down----------------------------------\n # Ищем затравку на строке ниже\n y = y - 2\n x = x_left\n while x <= x_right:\n\n # Поиск незакрашенного пиксела\n flag = False\n cur_col = QColor(image.pixel(x, y))\n while cur_col != color_edge and cur_col != color and x <= x_right:\n if not flag:\n flag = True\n x = x + 1\n cur_col = QColor(image.pixel(x, y))\n\n # Если был найдет незакрашенный пиксел,\n # то в стэк помещается крайний правый незакрашенный пиксел.\n if flag:\n cur_col = QColor(image.pixel(x, y))\n if x == x_right and cur_col != color_edge and cur_col != color:\n stack.append((x, y))\n else:\n stack.append((x - 1, y))\n\n # Продолжаем поиск, если инте��вал был прерван\n current_x = x\n cur_col = QColor(image.pixel(x, y))\n while (cur_col == color_edge or cur_col == color) and x < x_right:\n x = x + 1\n cur_col = QColor(image.pixel(x, y))\n\n # Удостоверяемся, что перешли на следующий пиксел\n if x == current_x:\n x = x + 1\n # End----------------------------------------\n\n self.__state_fill = stack\n return bool(len(stack))\n\n # служебные функции\n\n def is_close(self):\n return self.__is_close\n\n def set_close(self, f=False):\n self.__is_close = f\n self.__polygon.append([])\n self.__polygon_state += 1\n\n def add_point(self, x, y):\n self.__is_close = False\n self.__polygon[self.__polygon_state].append((x, y))\n\n def draw(self, painter):\n\n polygon_len = len(self.__polygon[self.__polygon_state])\n\n if polygon_len > 1:\n self.__drawLine(painter, self.__polygon[self.__polygon_state][polygon_len - 1][0],\n self.__polygon[self.__polygon_state][polygon_len - 1][1],\n self.__polygon[self.__polygon_state][polygon_len - 2][0],\n self.__polygon[self.__polygon_state][polygon_len - 2][1])\n\n def close(self):\n self.add_point(self.__polygon[self.__polygon_state][0][0],\n self.__polygon[self.__polygon_state][0][1])\n\n def clean(self):\n self.__polygon.clear()\n self.__polygon.append([])\n self.__polygon_state = 0\n self.__edge_lst.clear()\n self.__state_fill = None\n self.__is_close = False\n","sub_path":"Computer Graphics/lab_06/Figure.py","file_name":"Figure.py","file_ext":"py","file_size_in_byte":6658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"6410154","text":"\"\"\"Espacio para almacenar la informacion sobre CGS.\n\n.. module:: __init__\n\n.. versionadded:: 0.1.1\n.. versionchanged:: 0.1.3\n\"\"\"\n\nimport inspect\nfrom os.path import dirname, abspath\n\n__title__ = \"GCS\"\n__description__ = \"Gestor de Carteras de Seguros\"\n\n__version__ = \"0.1.2\"\n__status__ = \"alpha\"\n\n__version_info__ = tuple([int(num) for num in __version__.split('.')])\n\n__author__ = 'Alvarez Alejandro'\n__contact__ = 'contacto@codigopython.com.ar'\n__homepage__ = 'http://www.codigopython.com.ar'\n__docformat__ = 'restructuredtext'\n__license__ = 'GPL v3'\n__keywords__ = \"seguros cartera productor asesor polizas siniestros\"\n\n#Datos para la interfaz grafica\n__icon__ = ''\n__dir__ = dirname(abspath(inspect.getfile(inspect.currentframe())))\n__dirTemplates__ = __dir__ + \"/templates\"\n","sub_path":"gcs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"628164496","text":"from PyQt5 import QtCore\nimport sourse.default_config as def_conf\n# import config\n\n\ndef init_settings():\n settings = QtCore.QSettings(\"Agnus\", \"StrelA\")\n settings.Format(QtCore.QSettings.IniFormat)\n settings.Scope(QtCore.QSettings.UserScope)\n return settings\n\n\ndef reset_to_default(settings):\n settings.clear()\n\n settings.setValue('language', def_conf.LANGUAGE)\n\n settings.beginGroup(\"main_window\")\n settings.setValue('size', def_conf.MAIN_WINDOW_SIZE)\n settings.setValue('palette', def_conf.MAIN_WINDOW_PALETTE)\n settings.endGroup()\n\n settings.beginGroup(\"settings_window\")\n settings.setValue('size', def_conf.SETTINGS_WINDOW_SIZE)\n settings.setValue('palette', def_conf.SETTINGS_WINDOW_PALETTE)\n settings.endGroup()\n\n settings.beginGroup(\"connection_widget\")\n settings.setValue('data_class_name', def_conf.DATA_CLASS_NAME)\n settings.setValue('board_state_is_on', def_conf.BOARD_STATE)\n settings.setValue('packet_size', def_conf.PACKET_SIZE)\n settings.setValue('board_state_num', def_conf.BOARD_STATE_NUM)\n settings.setValue('state_list', def_conf.STATE_LIST)\n settings.setValue('errors_list', def_conf.ERRORS_LIST)\n settings.endGroup()\n\n settings.beginGroup(\"log\")\n settings.setValue('path', def_conf.LOG_PATH)\n settings.setValue('real_time', def_conf.REAL_TIME)\n settings.setValue('time_delay', def_conf.TIME_DELAY)\n settings.endGroup()\n\n settings.beginGroup(\"socket\")\n settings.setValue('path', def_conf.SOCKET_LOG_PATH)\n settings.setValue('title', def_conf.SOCKET_LOG_TITLE)\n settings.setValue('port', def_conf.SOCKET_PORT)\n settings.setValue('prefix', def_conf.SOCKET_PREFIX)\n settings.setValue('format', def_conf.DATA_FORMAT)\n settings.endGroup()\n\n settings.beginGroup(\"graph_widget\")\n settings.setValue('is_on', def_conf.GRAPH_WIDGET)\n settings.setValue('automatic_position', def_conf.GRAPH_WIDGET_AUTOPOSITION)\n settings.beginGroup(\"graph\")\n settings.beginGroup(\"acceleration\")\n settings.setValue('is_on', def_conf.ACCEL_GRAPH)\n settings.setValue('num', def_conf.ACCEL_NUMBER)\n settings.setValue('position', def_conf.ACCEL_POSITION)\n settings.setValue('converter', [def_conf.accel_graph_x_converter,\n def_conf.accel_graph_y_converter,\n def_conf.accel_graph_z_converter])\n settings.setValue('colour', def_conf.ACCEL_COLOUR)\n settings.endGroup()\n settings.beginGroup(\"gyro\")\n settings.setValue('is_on', def_conf.GYRO_GRAPH)\n settings.setValue('num', def_conf.GYRO_NUMBER)\n settings.setValue('position', def_conf.GYRO_POSITION)\n settings.setValue('converter', [def_conf.gyro_graph_x_converter,\n def_conf.gyro_graph_y_converter,\n def_conf.gyro_graph_z_converter])\n settings.setValue('colour', def_conf.GYRO_COLOUR)\n settings.endGroup()\n settings.beginGroup(\"mag\")\n settings.setValue('is_on', def_conf.MAG_GRAPH)\n settings.setValue('num', def_conf.MAG_NUMBER)\n settings.setValue('position', def_conf.MAG_POSITION)\n settings.setValue('converter', [def_conf.mag_graph_x_converter,\n def_conf.mag_graph_y_converter,\n def_conf.mag_graph_z_converter])\n settings.setValue('colour', def_conf.MAG_COLOUR)\n settings.endGroup()\n settings.beginGroup(\"pressure\")\n settings.setValue('is_on', def_conf.PRESSURE_GRAPH)\n settings.setValue('num', def_conf.PRESSURE_NUMBER)\n settings.setValue('position', def_conf.PRESSURE_POSITION)\n settings.setValue('converter', [def_conf.pressure_graph_converter])\n settings.setValue('colour', def_conf.PRESSURE_COLOUR)\n settings.endGroup()\n settings.beginGroup(\"temperature\")\n settings.setValue('is_on', def_conf.TEMPERATURE_GRAPH)\n settings.setValue('num', def_conf.TEMPERATURE_NUMBER)\n settings.setValue('position', def_conf.TEMPERATURE_POSITION)\n settings.setValue('converter', [def_conf.temperature_graph_converter])\n settings.setValue('colour', def_conf.TEMPERATURE_COLOUR)\n settings.endGroup()\n settings.beginGroup(\"lux\")\n settings.setValue('is_on', def_conf.LUX_GRAPH)\n settings.setValue('num', def_conf.LUX_NUMBER)\n settings.setValue('position', def_conf.LUX_POSITION)\n settings.setValue('converter', [def_conf.lux_graph_converter])\n settings.setValue('colour', def_conf.LUX_COLOUR)\n settings.endGroup()\n settings.beginGroup(\"altitude\")\n settings.setValue('is_on', def_conf.ALTITUDE_GRAPH)\n settings.setValue('num', def_conf.ALTITUDE_NUMBER)\n settings.setValue('position', def_conf.ALTITUDE_POSITION)\n settings.setValue('converter', [def_conf.altitude_graph_converter,\n def_conf.altitude_baro_graph_converter])\n settings.setValue('colour', def_conf.ALTITUDE_COLOUR)\n settings.endGroup()\n settings.beginGroup(\"voltage\")\n settings.setValue('is_on', def_conf.VOLTAGE_GRAPH)\n settings.setValue('num', def_conf.VOLTAGE_NUMBER)\n settings.setValue('position', def_conf.VOLTAGE_POSITION)\n settings.setValue('converter', [def_conf.voltage_graph_1_converter,\n def_conf.voltage_graph_2_converter,\n def_conf.voltage_graph_3_converter])\n settings.setValue('colour', def_conf.VOLTAGE_COLOUR)\n settings.endGroup()\n settings.endGroup()\n settings.endGroup()\n\n settings.beginGroup(\"map_widget\")\n settings.setValue('is_on', def_conf.MAP_WIDGET)\n settings.setValue('center', def_conf.MAP_DEFAULT_CENTER)\n settings.setValue('zoom', def_conf.MAP_DEFAULT_ZOOM)\n settings.setValue('num', def_conf.GPS_NUMBER)\n settings.setValue('converter', def_conf.gps_converter)\n settings.endGroup()\n\n settings.beginGroup(\"model_widget\")\n settings.setValue('is_on', def_conf.MODEL_WIDGET)\n settings.setValue('num', def_conf.MODEL_NUMBER)\n settings.setValue('converter', [def_conf.model_yaw_converter,\n def_conf.model_pitch_converter,\n def_conf.model_roll_converter])\n settings.endGroup()\n\n\n\ndef check_settings(settings):\n if settings.allKeys() == []:\n return False\n else:\n return True\n","sub_path":"GCS/sourse/settings_control.py","file_name":"settings_control.py","file_ext":"py","file_size_in_byte":6307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"213655618","text":"import math\n\n\n\"\"\"\nimport turtle\n\nmyturtle = turtle.Turtle()\nmyturtle.circle(50)\nmyturtle.getscreen()\n\n\"\"\"\n\n\"\"\"\nDRAW DIAMOND\n\"\"\"\n\"\"\"\nN = 19\nsol = [[1] * N for row in xrange(N)]\n\nprint sol\n\nwidth = 0\nfor row in xrange(N):\n for col in xrange(N):\n if col == N/2:\n print width\n sol[row][col - width] = 0\n sol[row][col + width] = 0\n if row < N/2:\n width += 1\n else:\n width -= 1\n\nfor row in sol:\n print row\n\"\"\"\n\"\"\"\nDRAW Circle: given radius\nhttp://quiz.geeksforgeeks.org/draw-circle-without-floating-point-arithmetic/\n\nSolution:\nConsider a square from -r to +r\nCheck for each point if that belongs within circumference (X^2 + Y^2 <= R^2)\n\"\"\"\nr = 5\nfor i in range(-r, r+1):\n for j in range(-r, r+1):\n # Check if this point in circumeference belongs to Circle\n if (i*i + j*j) <= (r*r) + 1:\n print(\"*\", end=' ')\n else:\n print(\" \", end=' ')\n print(\" \", end=' ')\n print(\"\\n\")\n","sub_path":"Misc/Matrix/Matrix_printCircle.py","file_name":"Matrix_printCircle.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"242899592","text":"import logging\nimport multiprocessing as mp\nimport os\nimport time\nimport threading\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import Tuple\nfrom typing import Optional\n\nfrom kubernetes.client.exceptions import ApiException\nimport yaml\n\nimport ray.autoscaler._private.monitor as monitor\nfrom ray._private import services\nfrom ray.autoscaler._private import commands\nfrom ray.ray_operator import operator_utils\nfrom ray.ray_operator.operator_utils import AUTOSCALER_RETRIES_FIELD\nfrom ray.ray_operator.operator_utils import STATUS_AUTOSCALING_EXCEPTION\nfrom ray.ray_operator.operator_utils import STATUS_ERROR\nfrom ray.ray_operator.operator_utils import STATUS_RUNNING\nfrom ray.ray_operator.operator_utils import STATUS_UPDATING\nfrom ray import ray_constants\n\nlogger = logging.getLogger(__name__)\n\n# Queue to process cluster status updates.\ncluster_status_q = mp.Queue() # type: mp.Queue[Tuple[str, str, str]]\n\n\nclass RayCluster():\n \"\"\"Manages an autoscaling Ray cluster.\n\n Attributes:\n config: Autoscaling configuration dict.\n subprocess: The subprocess used to create, update, and monitor the\n Ray cluster.\n \"\"\"\n\n def __init__(self, config: Dict[str, Any]):\n self.config = config\n self.name = self.config[\"cluster_name\"]\n self.namespace = self.config[\"provider\"][\"namespace\"]\n\n # Make directory for configs of clusters in the namespace,\n # if the directory doesn't exist already.\n namespace_dir = operator_utils.namespace_dir(self.namespace)\n if not os.path.isdir(namespace_dir):\n os.mkdir(namespace_dir)\n self.config_path = operator_utils.config_path(\n cluster_namespace=self.namespace, cluster_name=self.name)\n\n # Tracks metadata.generation field of associated custom resource.\n # K8s increments this field whenever the spec of the custom resource is\n # updated.\n self._generation = 0\n # Tracks metadata.labels.autoscalerRetries field of the CR.\n # The operator increments this field whenever we attempt recovery from\n # autoscaler failure.\n self._num_retries = 0\n\n # Monitor subprocess\n self.subprocess = None # type: Optional[mp.Process]\n # Monitor logs for this cluster will be prefixed by the monitor\n # subprocess name:\n self.subprocess_name = \",\".join([self.name, self.namespace])\n self.monitor_stop_event = mp.Event()\n\n self.setup_logging()\n\n def create_or_update(self, restart_ray: bool = False) -> None:\n \"\"\" Create/update the Ray Cluster and run the monitoring loop, all in a\n subprocess.\n\n The main function of the Operator is managing the\n subprocesses started by this method.\n\n Args:\n restart_ray: If True, restarts Ray to recover from failure.\n \"\"\"\n self.do_in_subprocess(self._create_or_update, args=(restart_ray, ))\n\n def _create_or_update(self, restart_ray: bool = False) -> None:\n try:\n self.start_head(restart_ray=restart_ray)\n self.start_monitor()\n except Exception:\n # Report failed autoscaler status to trigger cluster restart.\n cluster_status_q.put((self.name, self.namespace,\n STATUS_AUTOSCALING_EXCEPTION))\n # `status_handling_loop` will increment the\n # `status.AutoscalerRetries` of the CR. A restart will trigger\n # at the subsequent \"MODIFIED\" event.\n raise\n\n def start_head(self, restart_ray: bool = False) -> None:\n self.write_config()\n # Don't restart Ray on head unless recovering from failure.\n no_restart = not restart_ray\n # Create or update cluster head and record config side effects.\n self.config = commands.create_or_update_cluster(\n self.config_path,\n override_min_workers=None,\n override_max_workers=None,\n no_restart=no_restart,\n restart_only=False,\n yes=True,\n no_config_cache=True,\n no_monitor_on_head=True)\n # Write the resulting config for use by the autoscaling monitor:\n self.write_config()\n\n def start_monitor(self) -> None:\n \"\"\"Runs the autoscaling monitor.\"\"\"\n ray_head_pod_ip = commands.get_head_node_ip(self.config_path)\n port = operator_utils.infer_head_port(self.config)\n redis_address = services.address(ray_head_pod_ip, port)\n self.mtr = monitor.Monitor(\n redis_address=redis_address,\n autoscaling_config=self.config_path,\n redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,\n prefix_cluster_info=True,\n stop_event=self.monitor_stop_event)\n self.mtr.run()\n\n def do_in_subprocess(self, f: Callable[[], None], args: Tuple) -> None:\n # First stop the subprocess if it's alive\n self.clean_up_subprocess()\n # Reinstantiate process with f as target and start.\n self.subprocess = mp.Process(\n name=self.subprocess_name, target=f, args=args, daemon=True)\n self.subprocess.start()\n\n def clean_up_subprocess(self):\n \"\"\"\n Clean up the monitor process.\n\n Executed when CR for this cluster is \"DELETED\".\n Executed when Autoscaling monitor is restarted.\n \"\"\"\n if self.subprocess and self.subprocess.is_alive():\n # Triggers graceful stop of the monitor loop.\n self.monitor_stop_event.set()\n self.subprocess.join()\n # Clears the event for subsequent runs of the monitor.\n self.monitor_stop_event.clear()\n\n def clean_up(self) -> None:\n \"\"\"Executed when the CR for this cluster is \"DELETED\".\n\n The key thing is to end the monitoring subprocess.\n \"\"\"\n self.clean_up_subprocess()\n self.clean_up_logging()\n self.delete_config()\n\n def setup_logging(self) -> None:\n \"\"\"Add a log handler which appends the name and namespace of this\n cluster to the cluster's monitor logs.\n \"\"\"\n self.handler = logging.StreamHandler()\n # Filter by subprocess name to get this cluster's monitor logs.\n self.handler.addFilter(\n lambda rec: rec.processName == self.subprocess_name)\n # Lines start with \",:\"\n logging_format = \":\".join(\n [self.subprocess_name, ray_constants.LOGGER_FORMAT])\n self.handler.setFormatter(logging.Formatter(logging_format))\n operator_utils.root_logger.addHandler(self.handler)\n\n def clean_up_logging(self) -> None:\n operator_utils.root_logger.removeHandler(self.handler)\n\n def set_config(self, config: Dict[str, Any]) -> None:\n self.config = config\n\n def write_config(self) -> None:\n \"\"\"Write config to disk for use by the autoscaling monitor.\"\"\"\n with open(self.config_path, \"w\") as file:\n yaml.dump(self.config, file)\n\n def delete_config(self) -> None:\n os.remove(self.config_path)\n\n def set_generation(self, generation: int) -> None:\n self._generation = generation\n\n def set_num_retries(self, num_retries: int) -> None:\n self._num_retries = num_retries\n\n def get_generation(self) -> int:\n return self._generation\n\n def get_num_retries(self) -> int:\n return self._num_retries\n\n\n# Maps ray cluster (name, namespace) pairs to RayCluster python objects.\nray_clusters = {} # type: Dict[Tuple[str, str], RayCluster]\n\n\ndef run_event_loop():\n # Instantiate event stream.\n if operator_utils.NAMESPACED_OPERATOR:\n raycluster_cr_stream = operator_utils.namespaced_cr_stream(\n namespace=operator_utils.OPERATOR_NAMESPACE)\n else:\n raycluster_cr_stream = operator_utils.cluster_scoped_cr_stream()\n\n # Run control loop.\n for event in raycluster_cr_stream:\n cluster_cr = event[\"object\"]\n cluster_name = cluster_cr[\"metadata\"][\"name\"]\n cluster_namespace = cluster_cr[\"metadata\"][\"namespace\"]\n event_type = event[\"type\"]\n handle_event(event_type, cluster_cr, cluster_name, cluster_namespace)\n\n\ndef handle_event(event_type, cluster_cr, cluster_name, cluster_namespace):\n # TODO: This only detects errors in the parent process and thus doesn't\n # catch cluster-specific autoscaling failures. Fix that (perhaps at\n # the same time that we eliminate subprocesses).\n try:\n cluster_action(event_type, cluster_cr, cluster_name, cluster_namespace)\n except Exception:\n log_prefix = \",\".join(cluster_name, cluster_namespace)\n if event_type in [\"ADDED\", \"MODIFIED\"]:\n logger.exception(f\"{log_prefix}: Error while updating RayCluster.\")\n cluster_status_q.put((cluster_name, cluster_namespace,\n STATUS_ERROR))\n elif event_type == \"DELETED\":\n # Don't try to update CRD's status if the CRD is gone.\n logger.exception(\n f\"Error while deleting RayCluster {cluster_name}.\")\n\n\ndef cluster_action(event_type: str, cluster_cr: Dict[str, Any],\n cluster_name: str, cluster_namespace: str) -> None:\n\n cluster_config = operator_utils.cr_to_config(cluster_cr)\n cluster_identifier = (cluster_name, cluster_namespace)\n log_prefix = \",\".join(cluster_identifier)\n\n if event_type == \"ADDED\":\n operator_utils.check_redis_password_not_specified(\n cluster_config, cluster_identifier)\n\n cluster_status_q.put((cluster_name, cluster_namespace,\n STATUS_UPDATING))\n\n ray_cluster = RayCluster(cluster_config)\n\n # Track changes to the custom resource's spec field:\n generation = cluster_cr[\"metadata\"][\"generation\"]\n ray_cluster.set_generation(generation)\n\n logger.info(f\"{log_prefix}: Launching cluster.\")\n ray_cluster.create_or_update()\n\n ray_clusters[cluster_identifier] = ray_cluster\n\n cluster_status_q.put((cluster_name, cluster_namespace, STATUS_RUNNING))\n\n elif event_type == \"MODIFIED\":\n ray_cluster = ray_clusters[cluster_identifier]\n # Check metadata.generation to determine if there's a spec change.\n current_generation = cluster_cr[\"metadata\"][\"generation\"]\n # Check metadata.labels.autoscalerRetries to see if we need to restart\n # Ray processes.\n status = cluster_cr.get(\"status\", {})\n autoscaler_retries = status.get(AUTOSCALER_RETRIES_FIELD, 0)\n\n # True if there's been a chamge to the spec of the custom resource,\n # triggering an increment of metadata.generation:\n spec_changed = current_generation > ray_cluster.get_generation()\n # True if monitor has failed, triggering an increment of\n # status.autoscalerRetries:\n ray_restart_required = (autoscaler_retries >\n ray_cluster.get_num_retries())\n if ray_restart_required:\n logger.error(f\"{log_prefix}: Failed, restarting cluster.\")\n ray_cluster.set_num_retries(autoscaler_retries)\n if spec_changed:\n logger.info(f\"{log_prefix}: Updating cluster.\")\n ray_cluster.set_generation(current_generation)\n\n # Update if there's been a change to the spec or if we're attempting\n # recovery from autoscaler failure.\n if spec_changed or ray_restart_required:\n cluster_status_q.put((cluster_name, cluster_namespace,\n STATUS_UPDATING))\n ray_cluster.set_config(cluster_config)\n # Trigger Ray restart only if there's been a failure.\n ray_cluster.create_or_update(restart_ray=ray_restart_required)\n cluster_status_q.put((cluster_name, cluster_namespace,\n STATUS_RUNNING))\n\n elif event_type == \"DELETED\":\n ray_cluster = ray_clusters[cluster_identifier]\n ray_cluster.clean_up()\n del ray_clusters[cluster_identifier]\n\n\ndef status_handling_loop():\n while True:\n cluster_name, cluster_namespace, phase = cluster_status_q.get()\n try:\n operator_utils.set_status(cluster_name, cluster_namespace, phase)\n except Exception:\n log_prefix = \",\".join([cluster_name, cluster_namespace])\n logger.exception(f\"{log_prefix}: Error setting RayCluster status.\")\n\n\ndef main() -> None:\n # Run status-handling loop.\n status_handler = threading.Thread(target=status_handling_loop, daemon=True)\n status_handler.start()\n\n # Make directory for Ray cluster configs\n if not os.path.isdir(operator_utils.RAY_CONFIG_DIR):\n os.mkdir(operator_utils.RAY_CONFIG_DIR)\n\n while True:\n # This outer loop waits for creation of a RayCluster CRD if it hasn't\n # already been created.\n try:\n # Enter main event loop.\n run_event_loop()\n except ApiException as e:\n if e.status == 404:\n logger.warning(\"Waiting for creation of the RayCluster CRD\")\n time.sleep(5)\n else:\n logger.error(\"Failed to enter operator event loop.\")\n # Unforeseen startup error. Operator pod is\n # likely to end up in a crash loop.\n raise\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/ray/ray_operator/operator.py","file_name":"operator.py","file_ext":"py","file_size_in_byte":13409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"639875757","text":"import logging\n\nfrom pylons import request, response, session, tmpl_context as c\nfrom pylons.controllers.util import abort, redirect_to\nfrom pylons.decorators import validate\nfrom pylons.decorators.rest import restrict\n\nfrom rentfox.lib.base import BaseController, render\nfrom rentfox.model import Lease, Unit, Property, Transaction, Tenant, Tenant_lease, meta\nimport rentfox.lib.helpers as h\nfrom sqlalchemy import and_, or_, asc, desc\nimport datetime\nimport calendar\nimport json\nimport uuid\nfrom rentfox.lib import email as mailman\nfrom pylons import session\n\nlog = logging.getLogger(__name__)\n\nclass RentController(BaseController):\n\n @h.authorize(h.is_manager)\n @h.authenticate\n def __before__(self):\n c.menuSubmenu = 1\n c.menuProperty = 'on'\n c.submenuProperty = 'on'\n \n def markPaid(self):\n unitId = request.POST['unitId']\n leaseId = request.POST['leaseId']\n amount = request.POST['amount']\n due = int(request.POST['due'])\n forMonth = int(request.POST['forMonth'])\n forYear = int(request.POST['forYear'])\n markLatePaid = request.POST['markLatePaid']\n \n result = {}\n if markLatePaid == '1':\n Transaction.record_late_paid(unitId,forMonth,forYear)\n else:\n companyId = request.environ.get(\"COMPANY_ID\")\n now = datetime.datetime.today()\n id = str(uuid.uuid1())\n record = Unit.get_unit_info(unitId)\n \n curM = now.month\n curY = now.year\n \n if curM != forMonth or (curM == forMonth and curY != forYear):\n now = datetime.datetime.today()\n curHour = now.hour\n curMin = now.minute\n curSec = now.second\n date = datetime.datetime(forYear,forMonth,due,curHour,curMin,curSec)\n else:\n date = now\n \n Transaction.record_rent(\n id=str(uuid.uuid1()),\n companyid=companyId,\n propertyid=record[2],\n leaseid=leaseId,\n unitid=unitId,\n type='Rent',\n formonth=forMonth,\n foryear=forYear,\n date=date,\n income=1,\n name='Unit ' + record[0] + ', ' + record[1],\n amount=amount\n )\n \n return json.dumps(result)\n \n def markLate(self):\n unitId = request.POST['unitId']\n latefee = request.POST['latefee']\n forMonth = request.POST['forMonth']\n forYear = request.POST['forYear']\n date = request.POST['date']\n latefeeNum = latefee.replace('$','')\n Transaction.record_latefee(unitId,latefeeNum,forMonth,forYear,date)\n \n remind = True if request.POST['remind'] == 'true' and float(latefeeNum) > 0.0 else False\n if remind:\n tenants = Tenant_lease.get_tenants_from_unit(unitId)\n emails = [tenant.email for tenant in tenants]\n subj = '{0} - late fee owed'.format(request.environ.get('COMPANY_NAME'))\n msg = \"\"\"\\\nDear Tenant,\n\nThis is a notice from your landlord that your rent was turned in late.\n\nAmount owed: ${0}\n\nPlease pay this amount promptly.\n\nThanks,\n\n{1}\n\"\"\".format(latefee, request.environ.get('COMPANY_NAME'))\n mailman.send(emails, subj, msg)\n \n return json.dumps('')\n \n def markUnpaid(self):\n unitId = request.POST['unitId']\n forMonth = request.POST['forMonth']\n forYear = request.POST['forYear']\n \n Transaction.undo_rent(unitId,forMonth,forYear)\n return json.dumps('')\n \n def updateTransaction(self):\n unitId = request.POST['unitId']\n forMonth = request.POST['forMonth']\n forYear = request.POST['forYear']\n amount = request.POST['amount']\n \n Transaction.updateRent(amount,unitId,forMonth,forYear)\n return json.dumps('')\n \n def keynat(self, string=None):\n \"\"\" Sort list in alphanumeric order.\n \n >>> print(sorted(['a','3','c','B'], key=self.keynat))\n ['3', 'B', 'a', 'c']\n \"\"\"\n r = []\n for c in string:\n try:\n c = int(c)\n try: r[-1] = r[-1] * 10 + c\n except: r.append(c)\n except:\n r.append(c)\n return r\n \n def json(self):\n month = int(request.POST['month'])\n year = int(request.POST['year'])\n unit = request.POST['unit']\n status = request.POST['status']\n page = request.POST['page']\n prop_list = request.POST['propList']\n propList = prop_list.split(',')\n \n now = datetime.date.today()\n thisDay = now.day\n thisMonth = now.month\n thisYear = now.year\n \n lastDay = calendar.monthrange(year,month)\n lastDay = lastDay[1]\n firstDate = datetime.date(year,month,1)\n lastDate = datetime.date(year,month,int(lastDay))\n \n labelLike = unit == 'all' and '%' or unit\n \n startSlice = (int(page) - 1) * 20;\n endSlice = startSlice + 20;\n \n records = []\n \n if prop_list:\n records = meta.Session.query(Lease.id, Lease.rent, Lease.due, Lease.startdate, Lease.enddate, Lease.outdate,\\\n Unit.id, Unit.label, Transaction.id, Transaction.formonth, Transaction.foryear,\\\n Transaction.amount, Transaction.latefee, Transaction.latepaid, Transaction.deleted,\\\n Property.id, Property.name).\\\n outerjoin(Unit,(Transaction,Lease.id==Transaction.leaseid),(Property,Unit.propertyid==Property.id)).\\\n filter(Lease.deleted==None).\\\n filter(Unit.propertyid.in_(propList)).\\\n filter(or_(Transaction.formonth==None,Transaction.type=='Rent')).\\\n filter(or_(Transaction.id==None,Transaction.deleted==None)).\\\n filter(and_(Lease.startdate<=lastDate,\\\n or_(Lease.outdate==None,Lease.outdate>=firstDate)))\n \n allRecords = []\n rentList = []\n unitIdPaid = []\n unsortedRecords = []\n labelUnitIdList = []\n sortedRecords = []\n if records:\n for record in records:\n allRecords.append((record[8], record[9], record[10], record[6]))\n \n has_trans_records = []\n has_curMonth_trans = []\n dup_unitid = []\n cur_unitid = []\n for item in allRecords:\n transId = item[0]\n transMonth = item[1]\n transYear = item[2]\n unitId = item[3]\n if transId and not unitId in dup_unitid:\n has_trans_records.append(transId)\n if transMonth and transMonth == month and transYear == year:\n has_curMonth_trans.append(transId)\n cur_unitid.append(unitId)\n dup_unitid.append(unitId)\n \n records = records.filter(or_(Transaction.id==None,\\\n Transaction.id.in_(has_curMonth_trans),\\\n and_(Transaction.id.in_(has_trans_records),\\\n ~Unit.id.in_(cur_unitid)))).\\\n filter(Unit.label.like('%'+labelLike+'%')).\\\n order_by(Unit.label).all()\n \n \n for leaseid, rent, due, start, end, out, unitid, unitlabel, transid, formonth, foryear, amount, latefee, latepaid, deleted, propid, propname in records:\n if status == 'all' or\\\n (status == 'paid' and formonth and formonth == month and (latepaid == None or latepaid)) or\\\n (status == 'latefee' and not latepaid and formonth == month and foryear == year) or\\\n (status == 'overdue' and (not formonth or formonth != month)):\n if formonth == month and foryear == year and not unitid in unitIdPaid:\n unitIdPaid.append(unitid)\n obj = {\n 'leaseid': leaseid,\n 'rent': rent,\n 'due': due,\n 'start': start,\n 'end': end,\n 'out': out,\n 'unitid': unitid,\n 'unitlabel': unitlabel,\n 'transid': transid,\n 'formonth': formonth,\n 'foryear': foryear,\n 'amount': amount,\n 'latefee': latefee,\n 'latepaid': latepaid,\n 'deleted': deleted,\n 'propid': propid,\n 'propname': propname\n }\n labelId = unitlabel+'*'+unitid\n labelUnitIdList.append(labelId)\n \n unsortedRecords.append(obj)\n \n \n labelUnitIdList = sorted(labelUnitIdList, key=self.keynat)\n for labelId in labelUnitIdList:\n labelId = labelId.split('*')\n label = labelId[0]\n id = labelId[1]\n sortedRecords.extend([d for d in unsortedRecords if d['unitlabel'] == label and d['unitid'] == id])\n \n duplicateUnitId = []\n for record in sortedRecords:\n leaseid = record['leaseid']\n rent = record['rent']\n due = record['due']\n start = record['start']\n end = record['end']\n out = record['out']\n unitid = record['unitid']\n unitlabel = record['unitlabel']\n transid = record['transid']\n formonth = record['formonth']\n foryear = record['foryear']\n amount = record['amount']\n latefee = record['latefee']\n latepaid = record['latepaid']\n deleted = record['deleted']\n propid = record['propid']\n propname = record['propname']\n \n if not unitid in duplicateUnitId:\n duplicateUnitId.append(unitid)\n else:\n continue\n \n if start and start.month == month and start.year == year:\n perDay = rent / lastDay\n perDay = '%.2f' % perDay\n perDay = float(perDay)\n day = start.day\n day = lastDay - day + 1\n rent = round(day * perDay)\n elif out and out.month == month and out.year == year:\n perDay = round(rent / lastDay)\n day = out.day - 1\n rent = (day * perDay)\n \n curStatus = ''\n \n if formonth and month == formonth and year == foryear and deleted == None:\n if latepaid == 0:\n latefee = '%.2f' % latefee\n curStatus = 'Rent Received, still owes $' + str(latefee) + ' late fee'\n statusType = 3\n elif latepaid == 1:\n latefee = '%.2f' % latefee\n curStatus = 'Rent Received, including $' + str(latefee) + ' late fee'\n statusType = 4\n else:\n curStatus = 'Rent Received'\n statusType = 1\n else:\n # has only past records or no records\n if year == thisYear and month == thisMonth and int(due) == thisDay:\n curStatus = 'Due Today'\n statusType = 5\n elif (year == thisYear and month > thisMonth) or (year == thisYear and month == thisMonth and thisDay < int(due)) or year > thisYear:\n curStatus = \"Rent not due yet\"\n statusType = 6\n else:\n curStatus = 'Overdue'\n statusType = 2\n \n if amount and month == formonth and year == foryear and deleted == None:\n rent = amount\n \n obj = {\n 'propertyName': propname,\n 'propertyId': propid,\n 'unitId': unitid,\n 'unitLabel': unitlabel,\n 'leaseId': leaseid,\n 'rent': rent,\n 'due': due,\n 'statusType': statusType,\n 'status': curStatus\n }\n rentList.append(obj)\n \n slicedRecords = []\n counter = 0\n for item in rentList:\n if startSlice <= counter < endSlice:\n slicedRecords.append(item)\n counter = counter + 1\n \n rentList = slicedRecords\n \n rentRecords = {\n 'rent': rentList,\n 'totalRecords': len(rentList)\n }\n \n return json.dumps(rentRecords)\n \n \"\"\"\n rent json:\n {\n rent: [\n {\n propertyName: 'Shoreline',\n propertyId: 'ukef-dfsd-sdfe-360f',\n unitId: 'b06806c8-624d-11df-8256-000c29f0db84',\n unitLabel: '102',\n leaseId: '94f75ece-863e-11df-8256-000c29f0db84',\n rent: '2200',\n due: '5',\n statusType: 1, 2, 3, 4, 5, or 6, // see below for what type means \n status: 'Paid' or 'Overdue' or 'Paid, still owes $100.00 late fee' \n or 'Paid, including $100.00 late fee' or 'Due today'\n or 'Rent not due yet'\n },\n {\n propertyName: 'Shoreline',\n propertyId: 'ukef-dfsd-sdfe-360f',\n unitId: 'b06806c8-624d-11df-8256-000c29f0db84',\n unitLabel: '99',\n leaseId: '801311a8-85a1-11df-8256-000c29f0db84',\n rent: '1000',\n due: '1',\n statusType: 1, 2, 3, 4, 5, or 6, // see below for what type means \n status: 'Paid' or 'Overdue' or 'Paid, still owes $100.00 late fee' \n or 'Paid, including $100.00 late fee' or 'Due today'\n or 'Rent not due yet'\n }\n ],\n totalRecords: '2'\n }\n \"\"\"\n","sub_path":"rentfox/controllers/rent.py","file_name":"rent.py","file_ext":"py","file_size_in_byte":15552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"272512051","text":"import networkx as nx\nimport itertools\nimport numpy as np\nfrom qsim.graph_algorithms.graph import Graph\nfrom qsim.evolution.hamiltonian import HamiltonianMaxCut, HamiltonianDriver\nimport scipy.sparse as sparse\nfrom qsim.schrodinger_equation import SchrodingerEquation\nimport matplotlib.pyplot as plt\n\ndef sk_integer(n, verbose=False):\n graph = nx.complete_graph(n)\n weights = [-1, 1]\n for (i, j) in itertools.combinations(range(n), 2):\n graph[i][j]['weight'] = weights[np.random.randint(2)]\n if verbose:\n print(graph.edges.data())\n return Graph(graph)\n\n\ndef ground_states(h):\n return np.argwhere(h==np.min(h))\n\ndef first_excited_states(h):\n gs = ground_states(h).T\n h[gs[0], gs[1]] = np.inf\n return ground_states(h)\n\ndef find_gap(graph, nondegenerate=False):\n # Compute the number of ground states and first excited states\n cost = HamiltonianMaxCut(graph, cost_function=False, use_Z2_symmetry=True)\n # Generate a dummy graph with one fewer nodes\n # Cut cost and driver hamiltonian in half to account for Z2 symmetry\n driver = HamiltonianDriver(graph=Graph(nx.complete_graph(graph.n - 1)))\n driver.hamiltonian\n row = list(range(2 ** (graph.n - 1)))\n column = list(range(2 ** (graph.n - 1)))\n column.reverse()\n driver._hamiltonian = driver._hamiltonian + sparse.csr_matrix((np.ones(2 ** (graph.n - 1)), (row, column)))\n n_ground = len(ground_states(cost.hamiltonian))\n if nondegenerate:\n if n_ground > 1:\n return None\n times = np.arange(0, 1, .01)\n def schedule(t):\n cost.energies = (t,)\n driver.energies = (t-1,)\n min_gap = np.inf\n for i in range(len(times)):\n schedule(times[i])\n eigvals = SchrodingerEquation(hamiltonians=[cost, driver]).eig(k=n_ground+1, return_eigenvectors=False)\n eigvals = np.flip(eigvals)\n if eigvals[-1]-eigvals[0] < min_gap:\n #print(min_gap, eigvals, eigvals[-1]-eigvals[0], times[i], n_ground)\n min_gap = eigvals[-1]-eigvals[0]\n return min_gap\n\ndef collect_min_gap_statistics(n, iter=50, verbose=False):\n gaps = []\n for i in range(iter):\n if verbose:\n print(i)\n graph = sk_integer(n)\n gaps.append(find_gap(graph))\n return gaps\n\ndef min_gap_vs_n(ns, iters=None, verbose=False):\n for i in range(len(ns)):\n if verbose:\n print(ns[i])\n if iters is not None:\n gaps = collect_min_gap_statistics(ns[i], iters[i], verbose=verbose)\n else:\n gaps = collect_min_gap_statistics(ns[i], verbose=verbose)\n print(ns[i], np.mean(gaps), np.std(gaps))\n plt.errorbar(ns[i], np.mean(gaps), yerr=np.std(gaps), color='black')\n plt.scatter(ns[i], np.mean(gaps), color='blue')\n plt.show()\n\n\nif __name__ == '__main__':\n import sys\n index = sys.argv[1]\n index = int(index)\n n = 15\n print(n, collect_min_gap_statistics(n, iter=1, verbose=False)[0], flush=True)\n\n","sub_path":"qsim/scripts/sk_gap.py","file_name":"sk_gap.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"392594166","text":"\"\"\"\nThis is an empty file.\nIt may be used as a starting point for other tasks.\nThis file reads the mic input and outputs the data unchanged.\nUse Ctrl-C to quit the program safely.\n\"\"\"\n\nimport pyaudio\nimport numpy as np\n\n\n# initialize pyaudio\nCHUNK_SIZE = 2048 # samples per iteration\nCHANNELS = 1 # 1 channel = mono | 2 channels = stereo\nRATE = 44100 # sampling frequency in Hz\nP_AUDIO = pyaudio.PyAudio()\nAUDIO_STREAM = P_AUDIO.open(format=pyaudio.paFloat32,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n\ndef manipulate_stream(data_in):\n \"\"\"\n This function will get the input data and manipulate it in some way.\n \"\"\"\n\n data_out = data_in\n\n # return the manipulated data\n return np.array(data_out)\n\n\nprint(\"running\")\n\n# we catch the keyboard interrupt to exit the stream safely\ntry:\n while True:\n # get data, manipulate, then output\n DATA_IN = np.fromstring(\n AUDIO_STREAM.read(CHUNK_SIZE), dtype=np.float32)\n DATA_OUT = manipulate_stream(DATA_IN)\n AUDIO_STREAM.write(DATA_OUT, CHUNK_SIZE)\nexcept KeyboardInterrupt:\n print(\"keyboard interrupt\")\nfinally:\n # stop, close and terminate\n AUDIO_STREAM.stop_stream()\n AUDIO_STREAM.close()\n P_AUDIO.terminate()\n print(\"terminated completly\")\n","sub_path":"keller_dsp/f00_empty.py","file_name":"f00_empty.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"276068365","text":"\ndef xor (x, y):\n\tz = bytearray(len(x))\n\n\tfor i in range(len(x)):\n\t\tz[i] = x[i] ^ y[i]\n\n\treturn z\n\nkey = \"ICE\"\nplaintext = \"Burning 'em, if you ain't quick and nimble I go crazy when I hear a cymbal\" \n\nb_plaintext = bytearray(plaintext)\nb_key = bytearray(key)\n\nb_key_extended = bytearray(len(plaintext))\nfor i in range(len(b_plaintext)):\n\tb_key_extended[i] = b_key[i % 3]\n\nb_cyphertext = xor(b_plaintext, b_key_extended)\nprint(str(b_cyphertext).encode(\"hex\"))","sub_path":"CTFs/CryptoPals/Set1/challenge5.py","file_name":"challenge5.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"591316629","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\n\ndata = pd.read_csv('data_set/canada_per_capita_income.csv')\n\nreg_model_obj = linear_model.LinearRegression()\nreg_model_obj.fit(data[[\"year\"]], data[\"income\"])\n\nm = reg_model_obj.coef_ \nb = reg_model_obj.intercept_\n\npredicted = reg_model_obj.predict(data[[\"year\"]])\n\n# y = mx+b\n# line_data = m*data[\"year\"]+b\n\nplt.plot(data[\"year\"], data[\"income\"], color=\"red\")\nplt.plot(data[\"year\"], predicted)\nplt.xlabel(\"Years\")\nplt.ylabel(\"Income in USD\")\nplt.show()\n","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"486063475","text":"#!/usr/bin/python3\n'''\nThis scripts update a element from the states table.\n'''\n\n\nimport sys\nfrom model_state import Base, State\n\nfrom sqlalchemy import (create_engine)\nfrom sqlalchemy.orm import Session\n\nif __name__ == \"__main__\":\n engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format\n (sys.argv[1], sys.argv[2], sys.argv[3]),\n pool_pre_ping=True)\n Base.metadata.create_all(engine)\n\n session = Session(engine)\n state = session.query(State).filter(State.id == \"2\").one()\n state.name = \"New Mexico\"\n session.commit()\n session.close()\n","sub_path":"0x0F-python-object_relational_mapping/12-model_state_update_id_2.py","file_name":"12-model_state_update_id_2.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"113097379","text":"#!/usr/bin/env python -u\n# Copyright (c) 2012 Samsung SDS Co., LTD\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport sys\nimport traceback\nimport pika\nimport json\nimport uuid\nimport time\nfrom pika.exceptions import AMQPConnectionError, AMQPChannelError\n\npossible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),\n os.pardir, os.pardir))\nif os.path.exists(os.path.join(possible_topdir, \"synaps\", \"__init__.py\")):\n sys.path.insert(0, possible_topdir)\nfrom synaps import flags\nfrom synaps import log as logging\nfrom synaps import utils\n\nfrom storm import Spout, emit, log\n\nflags.FLAGS(sys.argv)\nutils.default_flagfile()\nlogging.setup()\nFLAGS = flags.FLAGS\n\nclass ApiSpout(Spout):\n SPOUT_NAME = \"APISpout\"\n \n def initialize(self, conf, context):\n self.pid = os.getpid() \n self.connect()\n \n def log(self, msg):\n log(\"[%s:%d] %s\" % (self.SPOUT_NAME, self.pid, msg))\n \n def tracelog(self, e):\n msg = traceback.format_exc(e)\n for line in msg.splitlines():\n self.log(\"TRACE: \" + line)\n \n def connect(self):\n while True:\n try:\n self._connect()\n except (AMQPConnectionError, AMQPChannelError):\n self.log(\"AMQP Connection Error. Retry in 3 seconds.\")\n time.sleep(3) \n else:\n break\n \n def _connect(self):\n self.conn = pika.BlockingConnection(\n pika.ConnectionParameters(\n host=FLAGS.get('rabbit_host'),\n port=FLAGS.get('rabbit_port'),\n credentials=pika.PlainCredentials(\n FLAGS.get('rabbit_userid'),\n FLAGS.get('rabbit_password')\n ),\n virtual_host=FLAGS.get('rabbit_virtual_host'),\n )\n ) \n \n self.channel = self.conn.channel()\n queue_args = {\"x-ha-policy\" : \"all\" }\n self.channel.queue_declare(queue='metric_queue', durable=True,\n arguments=queue_args)\n\n def ack(self, id):\n self.log(\"Acked message [%s]\" % id)\n \n def fail(self, id):\n self.log(\"Reject failed message [%s]\" % id)\n \n def nextTuple(self):\n try:\n (method_frame, header_frame, body) = self.channel.basic_get(\n queue=\"metric_queue\", no_ack=True\n )\n except (AMQPConnectionError, AMQPChannelError):\n self.log(\"AMQP Connection or Channel Error. While get a message.\")\n self.connect()\n return\n\n if method_frame:\n mq_msg_id = method_frame.delivery_tag\n msg_body = json.loads(body)\n msg_id, msg_uuid = msg_body['message_id'], msg_body['message_uuid']\n message = \"Start processing message in the queue - [%s:%s] %s\"\n self.log(message % (msg_id, msg_uuid, body))\n emit([body], id=msg_uuid)\n\nif __name__ == \"__main__\":\n ApiSpout().run()\n","sub_path":"synaps-storm/multilang/resources/synstorm_api_spout.py","file_name":"synstorm_api_spout.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"99061471","text":"import pytest\nimport requests\nfrom fastapi import FastAPI\nimport starlette\n\nimport ray\nfrom ray import serve\nfrom ray.serve._private.constants import RAY_SERVE_REQUEST_ID_HEADER\n\n\ndef test_request_id_header_by_default(serve_instance):\n \"\"\"Test that a request_id is generated by default and returned as a header.\"\"\"\n\n @serve.deployment\n class Model:\n def __call__(self):\n request_id = ray.serve.context._serve_request_context.get().request_id\n return request_id\n\n serve.run(Model.bind())\n resp = requests.get(\"http://localhost:8000\")\n assert resp.status_code == 200\n assert RAY_SERVE_REQUEST_ID_HEADER in resp.headers\n assert resp.text == resp.headers[RAY_SERVE_REQUEST_ID_HEADER]\n\n\nclass TestUserProvidedRequestIDHeader:\n def verify_result(self):\n resp = requests.get(\n \"http://localhost:8000\", headers={RAY_SERVE_REQUEST_ID_HEADER: \"123-234\"}\n )\n assert resp.status_code == 200\n assert resp.json() == 1\n assert RAY_SERVE_REQUEST_ID_HEADER in resp.headers\n assert resp.headers[RAY_SERVE_REQUEST_ID_HEADER] == \"123-234\"\n\n def test_basic(self, serve_instance):\n @serve.deployment\n class Model:\n def __call__(self) -> int:\n request_id = ray.serve.context._serve_request_context.get().request_id\n assert request_id == \"123-234\"\n return 1\n\n serve.run(Model.bind())\n self.verify_result()\n\n def test_fastapi(self, serve_instance):\n app = FastAPI()\n\n @serve.deployment\n @serve.ingress(app)\n class Model:\n @app.get(\"/\")\n def say_hi(self) -> int:\n request_id = ray.serve.context._serve_request_context.get().request_id\n assert request_id == \"123-234\"\n return 1\n\n serve.run(Model.bind())\n self.verify_result()\n\n def test_starlette_resp(self, serve_instance):\n @serve.deployment\n class Model:\n def __call__(self) -> int:\n request_id = ray.serve.context._serve_request_context.get().request_id\n assert request_id == \"123-234\"\n return starlette.responses.Response(\"1\", media_type=\"application/json\")\n\n serve.run(Model.bind())\n self.verify_result()\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.exit(pytest.main([\"-v\", \"-s\", __file__]))\n","sub_path":"python/ray/serve/tests/test_http_headers.py","file_name":"test_http_headers.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"440662157","text":"# Copyright (c) 2023 CNES\n#\n# All rights reserved. Use of this source code is governed by a\n# BSD-style license that can be found in the LICENSE file.\nimport dask.array as da\nimport numpy as np\nimport pytest\n\nfrom . import load_grid2d\nfrom .. import Axis, Histogram2D\n\n\ndef build_instance(dtype):\n \"\"\"Build an instance of Histogram2D with a given dtype.\"\"\"\n ds = load_grid2d()\n\n x_axis = Axis(np.arange(-180, 180, 5), is_circle=True)\n y_axis = Axis(np.arange(-90, 95, 5))\n hist2d = Histogram2D(x_axis, y_axis, bin_counts=40, dtype=dtype)\n assert x_axis == hist2d.x\n assert y_axis == hist2d.y\n assert isinstance(str(hist2d), str)\n\n lon, lat = np.meshgrid(ds.lon, ds.lat)\n hist2d.push(lon, lat, ds.mss)\n mean = hist2d.variable('mean')\n assert isinstance(mean, np.ndarray)\n median = hist2d.variable('quantile', 0.5)\n assert isinstance(median, np.ndarray)\n kurtosis = hist2d.variable('kurtosis')\n assert isinstance(kurtosis, np.ndarray)\n skewness = hist2d.variable('skewness')\n assert isinstance(skewness, np.ndarray)\n\n hist2d.clear()\n assert np.all(hist2d.variable('count') == 0)\n\n with pytest.raises(ValueError):\n hist2d.variable('_')\n\n\ndef test_histogram2d():\n \"\"\"Test Histogram2D class.\"\"\"\n build_instance(np.float64)\n build_instance(np.float32)\n\n with pytest.raises(ValueError):\n build_instance(np.int8)\n\n\ndef test_dask():\n \"\"\"Test Histogram2D with dask arrays.\"\"\"\n x_axis = Axis(np.linspace(-180, 180, 1), is_circle=True)\n y_axis = Axis(np.linspace(-80, 80, 1))\n hist2d = Histogram2D(x_axis, y_axis)\n\n x = da.full((4096 * 8, ), -180.0, dtype='f8', chunks=4096)\n y = da.full((4096 * 8, ), -80.0, dtype='f8', chunks=4096)\n z = da.random.uniform(size=4096 * 8, chunks=4096)\n\n hist2d = hist2d.push_delayed(x, y, z).compute()\n\n assert np.all(hist2d.variable('count') == 32768)\n assert hist2d.variable('mean')[0, 0] == pytest.approx(z.mean().compute())\n assert hist2d.variable('variance')[0, 0] == pytest.approx(\n z.std().compute()**2, rel=1e-4, abs=1e-4) # type: ignore\n","sub_path":"src/pyinterp/tests/test_histogram_2d.py","file_name":"test_histogram_2d.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"437728255","text":"from flask import Flask\r\nfrom flask import redirect\r\nfrom flask import render_template\r\nfrom flask import request\r\nfrom flask import session\r\nfrom flask import url_for\r\nfrom flask import flash\r\nfrom flask import send_from_directory\r\nfrom flask_wtf import FlaskForm, CSRFProtect\r\nfrom wtforms import TextField, PasswordField, TextAreaField, validators, SelectField, StringField, SubmitField\r\nfrom wtforms.validators import DataRequired\r\nfrom werkzeug.utils import secure_filename\r\n\r\nfrom flask_migrate import Migrate\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom sqlalchemy.orm.exc import NoResultFound\r\n\r\nimport os\r\nimport logging\r\nimport calendar\r\nimport config\r\nimport random\r\nimport datetime\r\nimport time\r\nimport re\r\nimport csv\r\nimport json\r\n\r\n# logger stuff\r\nlogger = logging.getLogger(__name__)\r\nformatter = logging.Formatter(\r\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\r\n)\r\nconsole = logging.StreamHandler()\r\nconsole.setLevel(logging.DEBUG)\r\nconsole.setFormatter(formatter)\r\nlogger.addHandler(console)\r\n\r\n# init app and load conf\r\napp = Flask(__name__, static_url_path='/static')\r\napp.config.from_object(config)\r\ncsrf = CSRFProtect(app)\r\n\r\n# init db\r\ndb = SQLAlchemy(app)\r\nmigrate = Migrate(app, db)\r\n\r\n# -----------------------------------------------------------------------\r\n# Database models\r\n# -----------------------------------------------------------------------\r\nclass Volunteers(db.Model):\r\n # Use DCI number as the key\r\n id = db.Column(db.Integer, primary_key=True)\r\n dci = db.Column(db.Integer)\r\n first_name = db.Column(db.String(255))\r\n last_name = db.Column(db.String(255))\r\n email = db.Column(db.String(255))\r\n emergency_info = db.Column(db.String(255))\r\n phone = db.Column(db.String(255))\r\n dm_tag = db.Column(db.String(255))\r\n tiers = db.Column(db.String(255))\r\n style = db.Column(db.String(255))\r\n slot_pref = db.Column(db.String(512))\r\n con_exp = db.Column(db.String(512))\r\n pronoun = db.Column(db.String(255))\r\n notes = db.Column(db.String(512))\r\n badge = db.Column(db.String(255))\r\n hotel = db.Column(db.String(255))\r\n hotel_pref = db.Column(db.String(512))\r\n other = db.Column(db.String(512))\r\n shirt_size = db.Column(db.String(255))\r\n adventure_types = db.Column(db.String(255))\r\n sessions = db.relationship('Sessions', backref='volunteers', lazy=True)\r\n\r\n\r\nclass Slots(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n number = db.Column(db.Integer)\r\n length = db.Column(db.Integer)\r\n datetime = db.Column(db.DateTime)\r\n sessions = db.relationship('Sessions', backref='sessions', lazy=True)\r\n\r\nclass Tables(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n number = db.Column(db.Integer)\r\n players = db.Column(db.Integer)\r\n sessions = db.relationship('Sessions', backref='tables', lazy=True)\r\n\r\nclass Events(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n number = db.Column(db.Integer)\r\n code = db.Column(db.String(255))\r\n title = db.Column(db.String(255))\r\n length = db.Column(db.Integer)\r\n description = db.Column(db.String(1024))\r\n tier = db.Column(db.Integer)\r\n sessions = db.relationship('Sessions', backref='events', lazy=True)\r\n\r\nclass Sessions(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n number = db.Column(db.Integer)\r\n event = db.Column(db.Integer, db.ForeignKey('events.id'))\r\n volunteer = db.Column(db.Integer, db.ForeignKey('volunteers.id'))\r\n table = db.Column(db.Integer, db.ForeignKey('tables.id'))\r\n slot = db.Column(db.Integer, db.ForeignKey('slots.id'))\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Forms\r\n# -----------------------------------------------------------------------\r\nclass LoginForm(FlaskForm):\r\n name = TextField('Name:', validators=[validators.required()])\r\n email = TextField('Email:', validators=[validators.required()])\r\n password = TextField('Password:', validators=[validators.required()])\r\n\r\nclass SlotForm(FlaskForm):\r\n number = TextField('Slot Number', validators=[validators.optional()])\r\n day = TextField('Day:', validators=[validators.optional()])\r\n length = TextField('Length:', validators=[validators.optional()])\r\n time = TextField('Time:', validators=[validators.optional()])\r\n submit = SubmitField(label='Submit')\r\n clear = SubmitField(label='Clear All Slots')\r\n delete = SubmitField(label='Delete')\r\n\r\n def reset(self):\r\n blankData = MultiDict([ ('csrf', self.reset_csrf() ) ])\r\n self.process(blankData)\r\n\r\nclass TableForm(FlaskForm):\r\n number = TextField('Table Number', validators=[validators.optional()])\r\n players = TextField('Number of Players:', validators=[validators.optional()])\r\n bulk_tables = TextField('Number of Tables:', validators=[validators.optional()])\r\n bulk_players = TextField('Number of Players:', validators=[validators.optional()])\r\n submit = SubmitField(label='Submit')\r\n clear = SubmitField(label='Clear All Slots')\r\n bulk_add = SubmitField(label='Add Multiple Tables')\r\n\r\n def reset(self):\r\n blankData = MultiDict([ ('csrf', self.reset_csrf() ) ])\r\n self.process(blankData)\r\n\r\nclass EventForm(FlaskForm):\r\n number = TextField('Table Number', validators=[validators.required()])\r\n code = TextField('Adventure Code', validators=[validators.required()])\r\n title = TextField('Adventure Code', validators=[validators.required()])\r\n length = TextField('Adventure Code', validators=[validators.required()])\r\n description = TextAreaField('Adventure Code', validators=[validators.required()])\r\n tier = TextField('Adventure Code', validators=[validators.required()])\r\n submit = SubmitField(label='Submit')\r\n clear = SubmitField(label='Clear All Slots')\r\n\r\n def reset(self):\r\n blankData = MultiDict([ ('csrf', self.reset_csrf() ) ])\r\n self.process(blankData)\r\n\r\nclass SessionForm(FlaskForm):\r\n table = SelectField('Table', coerce=int)\r\n slot = SelectField('Slot', coerce=int)\r\n volunteer = SelectField('Volunteer', coerce=int)\r\n event = SelectField('Event', coerce=str)\r\n session_info = SubmitField(label='Submit')\r\n delete = SubmitField(label='Delete Sessions')\r\n\r\nclass SessionDeleteForm(FlaskForm):\r\n sessions = SelectField('Session', coerce=str)\r\n delete = SubmitField(label='Delete Slots')\r\n\r\nclass FileForm(FlaskForm):\r\n selectfile = SelectField('Filename', validators=[validators.required()])\r\n submit = SubmitField(label='Submit')\r\n clear = SubmitField(label='Clear All Slots')\r\n\r\n# -----------------------------------------------------------------------\r\n# Internal Functions\r\n# -----------------------------------------------------------------------\r\n# -----------------------------------------------------------------------\r\n# Save Event information\r\n# -----------------------------------------------------------------------\r\ndef save_event(event):\r\n new_event = Events()\r\n\r\n event_exist = Events.query.get(event['number'])\r\n\r\n if event_exist is None:\r\n new_event.number = event['id']\r\n new_event.code = event['code']\r\n new_event.title = event['title']\r\n new_event.length = event['length']\r\n new_event.description = event['description']\r\n new_event.tier = event['tier']\r\n db.session.merge(new_event)\r\n try:\r\n db.session.commit()\r\n saved = 'saved'\r\n return (saved)\r\n except:\r\n logger.exception(\"Cannot save new event\")\r\n db.session.rollback()\r\n saved = 'failed'\r\n return (saved)\r\n else:\r\n saved = 'exists'\r\n return (saved)\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# List Event information\r\n# -----------------------------------------------------------------------\r\ndef list_events():\r\n events = Events()\r\n\r\n all_events = Events.query.order_by(events.number).all()\r\n\r\n return(all_events)\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Clear all events in database\r\n# -----------------------------------------------------------------------\r\ndef del_events():\r\n events = Events()\r\n\r\n all_events = Events.query.order_by(events.number).all()\r\n for e in all_events:\r\n db.session.delete(e)\r\n db.session.commit()\r\n return()\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Process a file with Event information\r\n# -----------------------------------------------------------------------\r\ndef bulk_event_upload(filename):\r\n with open(filename, \"r\", encoding='latin-1') as csvfile:\r\n reader = csv.DictReader(csvfile)\r\n for row in reader:\r\n print (row)\r\n save_event(row)\r\n return()\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Save Table information\r\n# -----------------------------------------------------------------------\r\ndef save_table(table):\r\n new_table = Tables()\r\n\r\n table_exist = Tables.query.get(table['number'])\r\n\r\n if table_exist is None:\r\n new_table.number = table['number']\r\n new_table.players = table['players']\r\n db.session.merge(new_table)\r\n try:\r\n db.session.commit()\r\n saved = 'saved'\r\n return (saved)\r\n except:\r\n logger.exception(\"Cannot save new table\")\r\n db.session.rollback()\r\n saved = 'failed'\r\n return (saved)\r\n else:\r\n saved = 'exists'\r\n return (saved)\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# List Table information\r\n# -----------------------------------------------------------------------\r\ndef list_tables():\r\n tables = Tables()\r\n\r\n all_tables = Tables.query.order_by(tables.number).all()\r\n return(all_tables)\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Clear all tables in database\r\n# -----------------------------------------------------------------------\r\ndef del_tables():\r\n table = Tables()\r\n\r\n all_tables = Tables.query.order_by(table.number).all()\r\n for t in all_tables:\r\n db.session.delete(t)\r\n db.session.commit()\r\n return()\r\n\r\n# -----------------------------------------------------------------------\r\n# Create more than 1 table in database\r\n# -----------------------------------------------------------------------\r\ndef create_bulk_tables(num_tables, num_players):\r\n tableinfo = {}\r\n tables = list_tables()\r\n for table in tables:\r\n t = int(table.number)\r\n itables = t + int(num_tables)\r\n\r\n while t <= itables:\r\n t += 1\r\n tableinfo['number'] = t\r\n tableinfo['players'] = num_players\r\n save_table(tableinfo)\r\n saved = 'saved'\r\n return (saved)\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Save slot information\r\n# -----------------------------------------------------------------------\r\ndef save_slot(slot):\r\n new_slot = Slots()\r\n slot_test = Slots.query.get(slot['number'])\r\n\r\n if slot_test is None:\r\n new_slot.number = slot['number']\r\n new_slot.length = slot['length']\r\n slot_datetime = slot['day'] + \" \" + slot['time']\r\n new_slot.datetime = datetime.datetime.strptime(slot_datetime, \"%A %I:%M %p\")\r\n db.session.add(new_slot)\r\n try:\r\n db.session.commit()\r\n saved = 'saved'\r\n return (saved)\r\n except:\r\n logger.exception(\"Cannot save new slot\")\r\n raise\r\n db.session.rollback()\r\n saved = 'failed'\r\n return (saved)\r\n else:\r\n saved = 'exists'\r\n return (saved)\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# List all slots in database\r\n# -----------------------------------------------------------------------\r\ndef list_slots():\r\n slots = Slots()\r\n\r\n all_slots = Slots.query.order_by(slots.number).all()\r\n db.session.close()\r\n return(all_slots)\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Match the slot day and time to a volunteer preference, return the slot id\r\n# for all matching slots.\r\n# -----------------------------------------------------------------------\r\ndef match_slots(slot_pref):\r\n all_slots = list_slots()\r\n v_slot_pref = []\r\n\r\n for slot in all_slots:\r\n for pref in slot_pref:\r\n pref_datetime = datetime.strptime(pref, \"%A %I:%M %p\")\r\n print (pref_datetime, slot.day, slot.time)\r\n if slot.day in pref and slot.time in pref:\r\n v_slot_pref.append(slot.number)\r\n poss_slot_time = timedelta(hours=+slot.length)\r\n return (v_slot_pref)\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Clear all slots in database\r\n# -----------------------------------------------------------------------\r\ndef del_slots():\r\n slot = Slots()\r\n\r\n all_slots = list_slots()\r\n for s in all_slots:\r\n db.session.delete(s)\r\n db.session.commit()\r\n return()# db.session.rollback()\r\n # print ('Delete Failed')\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Clear one slot in database\r\n# -----------------------------------------------------------------------\r\ndef slot_delete(slot):\r\n\r\n try:\r\n find = Slots.query.filter_by(number=slot).first()\r\n db.session.delete(find)\r\n print (slot, \"deleted\")\r\n except:\r\n print (slot, \"could not be deleted\")\r\n db.session.commit()\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# List all volunteers in database\r\n# -----------------------------------------------------------------------\r\ndef list_volunteers():\r\n volunteer = Volunteers()\r\n\r\n all_volunteers = Volunteers.query.order_by(volunteer.id).all()\r\n return(all_volunteers)\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Clear all Volunteers in database\r\n# -----------------------------------------------------------------------\r\ndef del_volunteers():\r\n volunteer = Volunteers()\r\n\r\n all_volunteer = list_volunteers()\r\n for v in all_volunteer:\r\n db.session.delete(v)\r\n db.session.commit()\r\n return()#\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Clear Volunteer in database\r\n# -----------------------------------------------------------------------\r\ndef volunteer_delete(volunteer):\r\n try:\r\n find = Volunteers.query.filter_by(id=volunteer).first()\r\n db.session.delete(find)\r\n success = (find.first_name, find.last_name, \"deleted\")\r\n space = \" \"\r\n result = space.join(success)\r\n db.session.commit()\r\n except:\r\n failure = (find.first_name, find.last_name, \"could not be deleted\")\r\n space = \" \"\r\n result = space.join(failure)\r\n return(result)\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Set the type of file to allow to be uploaded\r\n# -----------------------------------------------------------------------\r\ndef allowed_file(filename):\r\n extensions=config.ALLOWED_EXTENSIONS\r\n\r\n return '.' in filename and \\\r\n filename.rsplit('.', 1)[1].lower() in extensions\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Verify the volunteer can run the slot\r\n# -----------------------------------------------------------------------\r\ndef volunteer_match(volunteer, sessioninfo):\r\n find = Events.query.filter_by(number=sessioninfo['event']).first()\r\n session_check = None\r\n\r\n if str(find.tier) in volunteer.tiers:\r\n tier_check = True\r\n else:\r\n tier_check = False\r\n if sessioninfo['slot'] in volunteer.slot_pref:\r\n slot_check = True\r\n else:\r\n slot_check = False\r\n if not volunteer.sessions:\r\n session_check = False\r\n else:\r\n for sess in volunteer.sessions:\r\n if int(sessioninfo['slot']) is sess.slot:\r\n session_check = True\r\n print (session_check, sessioninfo['slot'], sess.slot)\r\n break\r\n else:\r\n session_check = False\r\n print (session_check, sessioninfo['slot'], sess.slot)\r\n\r\n if slot_check is True and tier_check is True and session_check is False:\r\n result = (volunteer.first_name, volunteer.last_name, 'matched the slot and tier requirements, volunteer is available')\r\n space = ' '\r\n saved = space.join(result)\r\n save_session(sessioninfo)\r\n else:\r\n result = ('Error!', volunteer.first_name, volunteer.last_name, 'did not match requirements or is unavailable')\r\n space = ' '\r\n saved = space.join(result)\r\n return (saved)\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Volunteer Save to Database\r\n# -----------------------------------------------------------------------\r\ndef volunteer_save(new_volunteer):\r\n tiers = []\r\n volunteer = Volunteers()\r\n\r\n all_volunteers = list_volunteers()\r\n\r\n volunteer.dci = new_volunteer['dci']\r\n volunteer.first_name = new_volunteer['first_name']\r\n volunteer.last_name = new_volunteer['last_name']\r\n try:\r\n volunteer.phone = new_volunteer['phone']\r\n except:\r\n pass\r\n try:\r\n volunteer.email = new_volunteer['email']\r\n except:\r\n pass\r\n try:\r\n volunteer.emergency_info = new_volunteer['emergency_info']\r\n except:\r\n pass\r\n try:\r\n if volunteer.dm_tag is 'Admin':\r\n volunteer.dm_tag = 'Admin'\r\n elif new_volunteer['role'] is 'DM':\r\n volunteer.dm_tag = 'DM'\r\n elif new_volunteer['role'] is 'Both':\r\n volunteer.dm_tag = 'Both'\r\n except:\r\n pass\r\n try:\r\n if 'Tier 1' in new_volunteer['tiers']:\r\n tiers.append('1')\r\n if 'Tier 2' in new_volunteer['tiers']:\r\n tiers.append('2')\r\n if 'Tier 3' in new_volunteer['tiers']:\r\n tiers.append('3')\r\n if 'Tier 4' in new_volunteer['tiers']:\r\n tiers.append('4')\r\n except:\r\n pass\r\n try:\r\n volunteer.tiers = ','.join(tiers)\r\n except:\r\n pass\r\n try:\r\n volunteer.style = new_volunteer['style']\r\n except:\r\n pass\r\n try:\r\n volunteer.con_exp = new_volunteer['con_exp']\r\n except:\r\n pass\r\n try:\r\n volunteer.pronoun = new_volunteer['pronoun']\r\n except:\r\n pass\r\n try:\r\n volunteer.notes = new_volunteer['notes']\r\n except:\r\n pass\r\n try:\r\n volunteer.badge = new_volunteer['badge']\r\n except:\r\n pass\r\n try:\r\n volunteer.hotel = new_volunteer['hotel']\r\n except:\r\n pass\r\n try:\r\n volunteer.hotel_pref = new_volunteer['hotel_pref']\r\n except:\r\n pass\r\n try:\r\n volunteer.other = new_volunteer['other']\r\n except:\r\n pass\r\n try:\r\n volunteer.shirt_size = new_volunteer['shirt_size']\r\n except:\r\n pass\r\n try:\r\n volunteer.adventure_types = new_volunteer['adventure_types']\r\n except:\r\n pass\r\n try:\r\n slots = match_slots(new_volunteer['slot_pref'])\r\n volunteer.slot_pref = json.dumps(slots)\r\n except:\r\n pass\r\n db.session.merge(volunteer)\r\n try:\r\n db.session.commit()\r\n #session.permanent = True\r\n saved = 'saved'\r\n return (saved)\r\n except:\r\n logger.exception(\"Cannot save volunteer\")\r\n db.session.rollback()\r\n saved = 'failed'\r\n return ()\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Volunteer Input\r\n# -----------------------------------------------------------------------\r\ndef volunteer_parse(filename):\r\n volunteer = {}\r\n newheader = []\r\n slottimes = []\r\n all_slots = []\r\n days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\r\n times = ['8:00am','10:00am', '12:00pm', '2:00pm', '4:00pm', '6:00pm', '8:00pm','10:00pm']\r\n counter = 0\r\n id_counter = 10000001\r\n\r\n with open(filename, newline='') as csvfile:\r\n reader = csv.DictReader(csvfile)\r\n for header in reader.fieldnames:\r\n if '[' and ']' not in header:\r\n if 'Timestamp' in header:\r\n newheader.append('timestamp')\r\n elif 'Email Address' in header:\r\n newheader.append('email')\r\n elif 'First Name' in header:\r\n newheader.append('first_name')\r\n elif 'Last Name' in header:\r\n newheader.append('last_name')\r\n elif 'Cell Phone Number' in header:\r\n newheader.append('phone')\r\n elif 'Emergency' in header:\r\n newheader.append('emergency_info')\r\n elif 'DCI Number' in header:\r\n newheader.append('dci')\r\n elif 'Role' in header:\r\n newheader.append('role')\r\n elif 'tiers' in header:\r\n newheader.append('tiers')\r\n elif 'style' in header:\r\n newheader.append('style')\r\n elif 'Previous Convention/D&D Volunteer experience' in header:\r\n newheader.append('con_exp')\r\n elif 'Pronoun' in header:\r\n newheader.append('pronoun')\r\n elif 'Notes' in header in header:\r\n newheader.append('notes')\r\n elif 'badge' in header:\r\n newheader.append('badge')\r\n elif 'hotel' in header:\r\n newheader.append('hotel')\r\n elif 'preference' in header:\r\n newheader.append('hotel_pref')\r\n elif 'Other comments' in header:\r\n newheader.append('other')\r\n elif 'Shirt' in header:\r\n newheader.append('shirt_size')\r\n elif 'type of Adventures' in header:\r\n newheader.append('adventure_types')\r\n # Parse templates that use brackets \"[]\" to make the header\r\n elif '[' and ']' in header:\r\n slot = header[header.find('[')+len('['):header.find(']')]\r\n all_slots.append(slot)\r\n newheader.append(slot)\r\n reader.fieldnames = newheader\r\n for vol in reader:\r\n volunteer_slots = []\r\n for h in vol:\r\n if h in days:\r\n v_slot = vol_parse_day(h,vol,times)\r\n volunteer_slots.extend(v_slot)\r\n elif h in times:\r\n v_slot = vol_parse_time(h,vol,days)\r\n volunteer_slots.extend(v_slot)\r\n vol['slot_pref'] = volunteer_slots\r\n saved = volunteer_save(vol)\r\n return(saved)\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Parse slots for a volunteer if columns are days\r\n# -----------------------------------------------------------------------\r\ndef vol_parse_day(v_day,volunteer,times):\r\n slot = []\r\n\r\n v_times = volunteer[v_day].split(\",\")\r\n for vt in v_times:\r\n at = vt.lstrip(' ')\r\n for t in times:\r\n if at.startswith(t):\r\n try:\r\n t = time.strptime(t,\"%I:%M%p\")\r\n except:\r\n pass\r\n s = time.strftime(\"%I:%M %p\",t)\r\n vs = (v_day + \" \" + s)\r\n slot.append(vs)\r\n return(slot)\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Parse slots for a volunteer if columns are times\r\n# -----------------------------------------------------------------------\r\ndef vol_parse_time(v_time,volunteer,days):\r\n volunteer_day = []\r\n return()\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Save Session information\r\n# -----------------------------------------------------------------------\r\ndef save_session(session):\r\n new_session = Sessions()\r\n\r\n session_exist = Sessions.query.get(session['number'])\r\n\r\n if session_exist is None:\r\n new_session.number = session['number']\r\n new_session.event = session['event']\r\n new_session.volunteer = session['volunteer']\r\n new_session.slot = session['slot']\r\n new_session.table = session['table']\r\n db.session.merge(new_session)\r\n try:\r\n db.session.commit()\r\n saved = 'saved'\r\n print (session, saved)\r\n return (saved)\r\n except:\r\n logger.exception(\"Cannot save new session\")\r\n db.session.rollback()\r\n saved = 'failed'\r\n print (session, 'save', saved)\r\n return (saved)\r\n else:\r\n saved = 'exists'\r\n return (saved)\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# List Session information\r\n# -----------------------------------------------------------------------\r\ndef list_sessions():\r\n sessions = Sessions()\r\n\r\n all_sessions = Sessions.query.order_by(sessions.number).all()\r\n return(all_sessions)\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Clear all sessions in database\r\n# -----------------------------------------------------------------------\r\ndef del_sessions():\r\n sesses = Sessions()\r\n\r\n all_sessions = Sessions.query.order_by(sesses.number).all()\r\n for s in all_sessions:\r\n db.session.delete(s)\r\n db.session.commit()\r\n return()\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Clear a specific Session in database\r\n# -----------------------------------------------------------------------\r\ndef session_delete(sess):\r\n sesses = Sessions()\r\n\r\n try:\r\n find = Sessions.query.filter_by(number=sess).first()\r\n db.session.delete(find)\r\n db.session.commit()\r\n return('deleted')\r\n except:\r\n db.session.rollback()\r\n db.session.commit()\r\n return('rollback')\r\n\r\n# -----------------------------------------------------------------------\r\n# Test if there is an existing event at the slot and table\r\n# -----------------------------------------------------------------------\r\ndef check_availability(sessioninfo, all_sessions):\r\n match = None\r\n for sess in all_sessions:\r\n if int(sessioninfo['table']) is sess.table and int(sessioninfo['slot']) is sess.slot:\r\n match = True\r\n break\r\n else:\r\n match = False\r\n return(match)\r\n# -----------------------------------------------------------------------\r\n# App Routes\r\n# -----------------------------------------------------------------------\r\n# -----------------------------------------------------------------------\r\n# Upload of Volunteer File Completed\r\n# -----------------------------------------------------------------------\r\n@app.route('/upload_completed/')\r\n# At some point this can be converted to a flash.\r\ndef upload_completed(filename):\r\n return render_template('upload_completed.html', **{'filename' : filename })\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Slot Addition\r\n# -----------------------------------------------------------------------\r\n@app.route('/slots', methods=['POST', 'GET'])\r\n# Slots are defined by a specific number, the day, time, and how long the slot runs.\r\n# length is important as some events may run across multiple slots.\r\ndef slots():\r\n form = SlotForm(request.form)\r\n slotinfo = {}\r\n all_slots = list_slots()\r\n for slot in all_slots:\r\n slot.datetime = slot.datetime.strftime(\"%A %I:%M %p\")\r\n\r\n if 'name' in session:\r\n name = session.get('name')\r\n\r\n if request.method == 'POST':\r\n if request.form['number'] is not None:\r\n slotinfo['number'] = request.form['number']\r\n else:\r\n flash('Error! Please enter a value')\r\n return redirect(request.url)\r\n if request.form['day'] is not None:\r\n slotinfo['day'] = request.form['day']\r\n else:\r\n flash('Error! Please enter a value')\r\n return redirect(request.url)\r\n if request.form['length'] is not None:\r\n slotinfo['length'] = request.form['length']\r\n else:\r\n flash('Error! Please enter a value')\r\n return redirect(request.url)\r\n if request.form['time'] is not None:\r\n slotinfo['time'] = request.form['time']\r\n else:\r\n flash('Error! Please enter a value')\r\n return redirect(request.url)\r\n\r\n if form.validate_on_submit():\r\n try:\r\n if request.form['submit']:\r\n saved = save_slot(slotinfo)\r\n if saved is 'exists':\r\n flash('Error! This slot already exists')\r\n return redirect(request.url)\r\n elif saved is 'failed':\r\n flash('Error! Slot did not Save')\r\n return redirect(request.url)\r\n elif saved is 'saved':\r\n return redirect(request.url)\r\n except:\r\n pass\r\n try:\r\n if request.form['clear']:\r\n del_slots()\r\n return redirect(request.url)\r\n except:\r\n pass\r\n try:\r\n if request.form['delete']:\r\n slot_delete(request.form['delete'])\r\n return redirect(request.url)\r\n except:\r\n pass\r\n\r\n else:\r\n flash(form.errors)\r\n return redirect(request.url)\r\n else:\r\n return redirect(url_for('login'))\r\n return render_template('slots.html', form=form, **{'name': name, 'all_slots': all_slots})\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# upload file for import Route\r\n# -----------------------------------------------------------------------\r\n@app.route('/upload', methods=['GET', 'POST'])\r\n# Display a visual to upload a CSV file.\r\ndef upload():\r\n folder=config.UPLOAD_FOLDER\r\n\r\n if request.method == 'POST':\r\n # check if the post request has the file part\r\n if 'file' not in request.files:\r\n flash('No file part')\r\n return redirect(request.url)\r\n file = request.files['file']\r\n # if user does not select file, browser also\r\n # submit an empty part without filename\r\n if file.filename == '':\r\n flash('No selected file')\r\n return redirect(request.url)\r\n if file and allowed_file(file.filename):\r\n filename = secure_filename(file.filename)\r\n file.save(os.path.join(folder, filename))\r\n return redirect(url_for('upload_completed', filename=filename))\r\n return render_template('upload.html')\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Route to select file to add volunteers\r\n# -----------------------------------------------------------------------\r\n@app.route('/volunteers', methods=['GET', 'POST'])\r\n# Display the files uploaded and select one to parse the volunteers from.\r\ndef volunteers():\r\n folder = config.UPLOAD_FOLDER\r\n files = os.listdir(folder)\r\n form = FileForm(request.form, obj=files)\r\n counter = 0\r\n f_choices = []\r\n all_volunteers = list_volunteers()\r\n\r\n form.selectfile.choices = [(file,file) for file in files]\r\n\r\n if 'name' in session:\r\n name = session.get('name')\r\n\r\n if request.method == 'POST':\r\n select = request.form.get('selectfile')\r\n if form.validate_on_submit():\r\n if request.form.get('submit') == 'submit':\r\n location = os.path.join(folder,select)\r\n saved = volunteer_parse(location)\r\n if saved is 'failed':\r\n flash('Error! Volunteers did not Save')\r\n all_volunteers = list_volunteers()\r\n return render_template('volunteers.html', form=form, **{'name': name, 'all_volunteers': all_volunteers})\r\n elif saved is 'saved':\r\n all_volunteers = list_volunteers()\r\n return render_template('volunteers.html', form=form, **{'name': name,'all_volunteers': all_volunteers})\r\n elif request.form['submit'] == 'clear':\r\n del_volunteers()\r\n return redirect(request.url)\r\n else:\r\n response = volunteer_delete(request.form['submit'])\r\n flash(response)\r\n return redirect(request.url)\r\n else:\r\n flash(form.errors)\r\n return redirect(request.url)\r\n else:\r\n return redirect(url_for('login'))\r\n return render_template('volunteers.html', form=form, **{'name': name, 'all_volunteers':all_volunteers})\r\n# -----------------------------------------------------------------------\r\n# Login to server route\r\n# -----------------------------------------------------------------------\r\n# This is a rudimentary authentication scheme. A more robuest auth setup will be implemented before I do final release.\r\n@app.route('/login', methods=['POST', 'GET'])\r\ndef login():\r\n form = LoginForm(request.form)\r\n session['id'] = 0\r\n\r\n if request.method == 'POST':\r\n name = request.form['name']\r\n user = request.form['email'] #\r\n password = request.form['password']\r\n\r\n if form.validate():\r\n if 'Eric' in name:\r\n session['name'] = name\r\n return redirect(url_for('index'))\r\n else:\r\n flash('Please enter a valid user')\r\n return redirect(request.url)\r\n else:\r\n flash('All the form fields are required.')\r\n return redirect(request.url)\r\n return render_template('login.html', form=form)\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Route for Game Table creation\r\n# -----------------------------------------------------------------------\r\n@app.route('/tables', methods=['POST', 'GET'])\r\n# Tables are simply a way to designate how many players you want to allow at a specific table.\r\n# I've added a bulk add so tables can be created more easily.\r\n\r\ndef tables():\r\n form = TableForm(request.form)\r\n tableinfo = {}\r\n all_tables = list_tables()\r\n\r\n if 'name' in session:\r\n name = session.get('name')\r\n\r\n if request.method == 'POST':\r\n tableinfo['number'] = request.form['number']\r\n tableinfo['players'] = request.form['players']\r\n\r\n if form.validate_on_submit():\r\n if request.form['submit'] == 'submit':\r\n saved = save_table(tableinfo)\r\n if saved is 'exists':\r\n flash('Error! This table already exists')\r\n all_tables = list_tables()\r\n return render_template('tables.html', form=form, **{'all_tables': all_tables, 'name': name})\r\n elif saved is 'failed':\r\n flash('Error! Slot did not Save')\r\n all_tables = list_tables()\r\n return render_template('tables.html', form=form, **{'all_tables': all_tables, 'name': name})\r\n elif saved is 'saved':\r\n all_tables = list_tables()\r\n return render_template('tables.html', form=form, **{'all_tables': all_tables, 'name': name})\r\n elif request.form['submit'] == 'multiple':\r\n num_tables = request.form['bulk_tables']\r\n num_players = request.form['bulk_players']\r\n create_bulk_tables(num_tables,num_players)\r\n return redirect(request.url)\r\n elif request.form['submit'] == 'clear':\r\n del_tables()\r\n return redirect(request.url)\r\n else:\r\n flash(form.errors)\r\n return redirect(request.url)\r\n else:\r\n return redirect(url_for('login'))\r\n return render_template('tables.html', form=form, **{'name': name, 'all_tables':all_tables})\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Route for Game Event creation\r\n# -----------------------------------------------------------------------\r\n@app.route('/events', methods=['POST', 'GET'])\r\n# Events are what is being run, typically an adventure.\r\n# An Event must have an id, a code (typically from DDAL or CCC if D&D), a Title, runtime length in hours, a description of the event, and if AL, what tier it is.\r\n# tier and time are extremely important for scheduling purposes!\r\n\r\ndef events():\r\n folder = config.UPLOAD_FOLDER\r\n files = os.listdir(folder)\r\n form = EventForm(request.form)\r\n fileform = FileForm(request.form, obj=files)\r\n eventinfo = {}\r\n all_events = list_events()\r\n\r\n fileform.selectfile.choices = [(file,file) for file in files]\r\n\r\n if 'name' in session:\r\n name = session.get('name')\r\n\r\n if request.method == 'POST':\r\n eventinfo['number'] = request.form['number']\r\n eventinfo['code'] = request.form['code']\r\n eventinfo['title'] = request.form['title']\r\n eventinfo['length'] = request.form['length']\r\n eventinfo['description'] = request.form['description']\r\n eventinfo['tier'] = request.form['tier']\r\n select = request.form.get('selectfile')\r\n location = os.path.join(folder,select)\r\n\r\n if form.validate_on_submit:\r\n if request.form['submit'] == 'submit':\r\n saved = save_event(eventinfo)\r\n if saved is 'exists':\r\n flash('Error! This event already exists')\r\n all_events = list_events()\r\n return render_template('events.html', form=form, fileform=fileform, **{'all_events': all_events, 'name': name})\r\n elif saved is 'failed':\r\n flash('Error! Slot did not Save')\r\n all_events = list_events()\r\n return render_template('events.html', form=form, fileform=fileform, **{'all_events': all_events, 'name': name})\r\n elif saved is 'saved':\r\n all_events = list_events()\r\n return render_template('events.html', form=form, fileform=fileform, **{'all_events': all_events, 'name': name})\r\n elif request.form['submit'] == 'clear':\r\n del_events()\r\n return redirect(request.url)\r\n elif request.form['submit'] == 'bulk_submit':\r\n bulk_event_upload(location)\r\n return redirect(request.url)\r\n else:\r\n flash(form.errors)\r\n flash(fileform.errors)\r\n return redirect(request.url)\r\n else:\r\n return redirect(url_for('login'))\r\n return render_template('events.html', form=form, fileform=fileform, **{'name': name, 'all_events': all_events})\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Route for Game Session creation\r\n# -----------------------------------------------------------------------\r\n@app.route('/sessions', methods=['POST', 'GET'])\r\n# Sessions pull data in from each table of the database: Volunteers, Slots, Events, Tables.\r\n# This page will let the user specify each parameter, and then save the Session to the database.\r\ndef sessions():\r\n form = SessionForm(request.form)\r\n all_events = list_events()\r\n all_tables = list_tables()\r\n all_slots = list_slots()\r\n all_volunteers = list_volunteers()\r\n sessioninfo = {}\r\n all_sessions = list_sessions()\r\n s = \" \"\r\n for slot in all_slots:\r\n slot.time = slot.datetime.strftime(\"%A %I:%M %p\")\r\n full_schedule = (str(slot.number), slot.datetime)\r\n slot_schedule = s.join(full_schedule)\r\n slot.schedule = slot_schedule\r\n for volunteer in all_volunteers:\r\n v_name = {volunteer.first_name, volunteer.last_name}\r\n v_full_name = s.join(v_name)\r\n volunteer.full_name= v_full_name\r\n form.table.choices = [(table.number, table.id) for table in all_tables]\r\n form.event.choices = [(event.number, event.code) for event in all_events]\r\n form.slot.choices = [(slot.number, slot.schedule) for slot in all_slots]\r\n form.volunteer.choices = [(volunteer.id, volunteer.full_name) for volunteer in all_volunteers]\r\n\r\n if 'name' in session:\r\n name = session.get('name')\r\n\r\n if request.method == 'POST':\r\n sessioninfo['slot'] = request.form['slot']\r\n sessioninfo['table'] = request.form['table']\r\n sessioninfo['event'] = request.form['event']\r\n sessioninfo['volunteer'] = request.form['volunteer']\r\n sessioninfo['number'] = random.randint(1,1001)\r\n\r\n if form.validate_on_submit():\r\n #Parse volunteers, call function to check if the volunteer\r\n #is avaialble that session and will run the tier of that adventure.\r\n results = check_availability(sessioninfo, all_sessions)\r\n if results:\r\n flash ('Error! Please select a table and slot that are free')\r\n return redirect(request.url)\r\n else:\r\n for v in all_volunteers:\r\n if int(sessioninfo['volunteer']) is v.id:\r\n saved = volunteer_match(v, sessioninfo)\r\n flash (saved)\r\n return redirect(request.url)\r\n else:\r\n flash(form.errors)\r\n return redirect(request.url)\r\n else:\r\n return redirect(url_for('login'))\r\n return render_template('sessions.html', form=form, **{\r\n 'name': name,\r\n 'all_sessions': all_sessions,\r\n 'all_events': all_events,\r\n 'all_tables': all_tables,\r\n 'all_slots': all_slots,\r\n 'all_volunteers': all_volunteers\r\n })\r\n\r\n\r\n# -----------------------------------------------------------------------\r\n# Delete Sessions\r\n# -----------------------------------------------------------------------\r\n@app.route('/delete_sessions', methods=['POST', 'GET'])\r\n# Page to delete Sessions\r\ndef delete_session():\r\n form = SessionDeleteForm(request.form)\r\n all_sessions = list_sessions()\r\n s = \" \"\r\n for sess in all_sessions:\r\n find_volunteer = Volunteers.query.filter_by(id=sess.volunteer).first()\r\n find_event = Events.query.filter_by(number=sess.event).first()\r\n full_data = (str(sess.table), str(sess.slot), find_volunteer.first_name, find_volunteer.last_name, find_event.code)\r\n sess_fulldata = s.join(full_data)\r\n sess.full_data = sess_fulldata\r\n form.sessions.choices = [(sess.number, sess.full_data) for sess in all_sessions]\r\n\r\n if 'name' in session:\r\n name = session.get('name')\r\n\r\n if request.method == 'POST':\r\n selected_session = request.form['sessions']\r\n results = session_delete(selected_session)\r\n if results == 'deleted':\r\n flash ('Session', 'deleted')\r\n return redirect(request.url)\r\n else:\r\n flash ('Error!', 'Session', 'not deleted')\r\n return redirect(request.url)\r\n\r\n return render_template('delete_sessions.html', form=form, **{\r\n 'name': name,\r\n 'all_sessions': all_sessions,\r\n })\r\n\r\n# -----------------------------------------------------------------------\r\n# Index Route\r\n# -----------------------------------------------------------------------\r\n@app.route('/')\r\ndef index():\r\n # Check to see if the user already exists.\r\n # If it does, pass the user's name to the render_template\r\n if 'name' in session:\r\n name = session.get('name')\r\n return render_template('base.html', **{'name' : name })\r\n else:\r\n #Otherwose, just load the page. Page has code to detect if name exists\r\n return render_template('base.html')\r\n\r\n# -----------------------------------------------------------------------\r\n# Run Program\r\n# -----------------------------------------------------------------------\r\nif __name__ == '__main__':\r\n app.run(port=config.PORT, host=config.HOST)\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":45027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"613373898","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Plot posteriors of CTC outputs (TIMIT corpus).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\n# import scipy.io.wavfile\nimport matplotlib.pyplot as plt\nimport audioread\nimport seaborn as sns\nplt.style.use('ggplot')\nsns.set_style(\"white\")\n\nblue = '#4682B4'\norange = '#D2691E'\ngreen = '#006400'\n\n\ndef plot_probs_ctc_phone(probs, save_path, wav_index, data_type, label_type):\n \"\"\"Plot posteriors of phones.\n Args:\n probs:\n save_path:\n wav_index: int\n data_type: train or dev or test\n label_type: phone39 or phone48 or phone61 or character\n \"\"\"\n # read wav file\n if data_type == 'train':\n TIMIT_PATH = '/n/sd8/inaguma/corpus/timit/original/train/'\n elif data_type == 'dev':\n return 0\n elif data_type == 'test':\n TIMIT_PATH = '/n/sd8/inaguma/corpus/timit/original/test/'\n\n speaker_name, file_name = wav_index.split('.')[0].split('_')\n region_paths = [os.path.join(TIMIT_PATH, region_name)\n for region_name in os.listdir(TIMIT_PATH)]\n for region_path in region_paths:\n speaker_paths = [os.path.join(region_path, speaker_name)\n for speaker_name in os.listdir(region_path)]\n for speaker_path in speaker_paths:\n if speaker_path.split('/')[-1] == speaker_name:\n file_paths = [os.path.join(speaker_path, file_name)\n for file_name in os.listdir(speaker_path)]\n for file_path in file_paths:\n if os.path.basename(file_path).split('.')[0] == file_name:\n if os.path.basename(file_path).split('.')[-1] == 'wav':\n wav_path = file_path\n\n with audioread.audio_open(wav_path) as f:\n # print(\"ch: %d, fs: %d, duration [s]: %.1f\" % (f.channels, f.samplerate, f.duration))\n channel = f.channels\n sampling_rate = f.samplerate\n duration = f.duration\n wav_barray = bytearray()\n for buf in f:\n wav_barray.extend(buf)\n\n # always read as 16bit\n wav_array = np.frombuffer(wav_barray, dtype=np.int16)\n # convert from short to float\n wav_float = pcm2float(wav_array)\n\n times_probs = np.arange(len(probs)) * 0.01\n plt.clf()\n plt.figure(figsize=(10, 4))\n\n ####################\n # waveform\n ####################\n plt.subplot(211)\n plt.title(wav_index)\n plt.tick_params(labelleft='off')\n sampling_interval = 1.0 / sampling_rate\n # wav_float = wav_float / 32768.0\n times_wav = np.arange(len(wav_float)) * sampling_interval\n plt.plot(times_wav, wav_float, color='grey')\n plt.ylabel('Amplitude', fontsize=12)\n plt.xlim([0, times_wav[-1]])\n plt.xticks(list(range(0, int(len(probs) / 100) + 1, 1)))\n plt.yticks(list(range(0, 2, 1)))\n\n ####################\n # phones\n ####################\n plt.subplot(212)\n plt.plot(times_probs, probs[:, 0],\n label='silence', color='black', linewidth=2)\n if label_type == 'phone39':\n blank_index = 39\n elif label_type == 'phone48':\n blank_index = 48\n elif label_type == 'phone61':\n blank_index = 61\n for i in range(1, blank_index, 1):\n plt.plot(times_probs, probs[:, i])\n plt.plot(times_probs, probs[:, blank_index],\n ':', label='blank', color='grey')\n plt.xlabel('Time[sec]', fontsize=12)\n plt.ylabel('Phones', fontsize=12)\n plt.xlim([0, duration])\n plt.ylim([0.05, 1.05])\n plt.xticks(list(range(0, int(len(probs) / 100) + 1, 1)))\n plt.yticks(list(range(0, 2, 1)))\n plt.legend(loc=\"upper right\", fontsize=12)\n\n # save_path = os.path.join(save_path, wav_index + '.png')\n # plt.savefig(save_path, dvi=500)\n plt.show()\n\n\ndef pcm2float(short_ndary):\n \"\"\"Convert from short to float.\"\"\"\n float_ndary = np.array(short_ndary, dtype=np.float64)\n return float_ndary\n # return np.where(float_ndary > 0.0, float_ndary / 32767.0, float_ndary /\n # 32768.0)\n","sub_path":"experiments/timit/plot/probs.py","file_name":"probs.py","file_ext":"py","file_size_in_byte":4128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"29659910","text":"import os\nimport sys\nimport csv\nimport time\nimport glob\nimport os.path\nimport itertools\n\nimport PyQt5\nimport serial\n\nfrom PyQt5 import QtCore\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QIntValidator\n\nfrom PyQt5.QtWidgets import (\n QLCDNumber,\n QLabel,\n QComboBox,\n QCheckBox,\n QPushButton,\n QFileDialog,\n QLineEdit,\n QGridLayout,\n QHBoxLayout,\n QVBoxLayout,\n QApplication,\n QFrame,\n QDialog\n)\n\nfrom bitstring import BitArray\n\nPACKET_LENGTH = 32\nLCD_DIGIT_COUNT = 6\nDELAY_CNT = 15\n\nMOVE_DONE = b'M'\nZERO_ALL = b'X'\n\nPACKET_HEADER = '0x55'\nACCEL_PREFIX = '0x51'\nVEL_PREFIX = '0x52'\nANGLE_PREFIX = '0x53'\n\nDEFAULT_PORT = 'COM14'\nIMU_BAUD = '115200'\nPLATFORM_BAUD = '9600'\nBAUDRATES = (\n '110',\n '300',\n '600',\n '1200',\n '2400',\n '4800',\n '9600',\n '14400',\n '19200',\n '28800',\n '38400',\n '56000',\n '57600',\n '115200'\n)\n\nSTYLE = \"\"\"\nQLCDNumber {\n background: black;\n color:green;\n}\n\nQLCDNumber:flat {\n border: none;\n}\n\nQLabel {\n color: black;\n font: bold\n}\n\nMargin {\n font: bold 60px;\n qproperty-alignment: AlignCenter;\n}\n\nHeader {\n font: 20px; \n qproperty-alignment: 'AlignBottom | AlignCenter';\n}\n\nConnectButton {\n font: bold 14px;\n padding-top: 0px;\n padding-bottom: 0px;\n padding-right: 2px;\n padding-left: 2px;\n}\n\nGoButton {\n font: bold 14px;\n padding-top: 0px;\n padding-bottom: 0px;\n padding-right: 2px;\n padding-left: 2px;\n}\n\nConnectButton:checked {\n color: red\n}\n\nBrowseButton {\n font: 14px;\n padding-top: 2px;\n padding-bottom: 2px;\n padding-right: 4px;\n padding-left: 4px;\n}\n\nAngleButton {\n font: bold 14px;\n padding-top: 1px;\n padding-bottom: 1px;\n padding-right: 3px;\n padding-left: 3px;\n}\n\"\"\"\n\n\ndef get_ports():\n \"\"\" Lists serial port names\n\n :raises EnvironmentError:\n On unsupported or unknown platforms\n :returns:\n A list of the serial ports available on the system\n \"\"\"\n if sys.platform.startswith('win'):\n import winreg\n\n path = 'HARDWARE\\\\DEVICEMAP\\\\SERIALCOMM'\n key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, path)\n ports = []\n\n for i in itertools.count():\n try:\n param, value, _ = winreg.EnumValue(key, i)\n if 'BthModem' not in param:\n ports.append(value)\n except EnvironmentError:\n break\n return ports\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result\n\n\n# noinspection PyArgumentList\nclass ImuSignal(QtCore.QObject):\n angle_x = QtCore.pyqtSignal(str)\n angle_y = QtCore.pyqtSignal(str)\n angle_z = QtCore.pyqtSignal(str)\n\n accel_x = QtCore.pyqtSignal(str)\n accel_y = QtCore.pyqtSignal(str)\n accel_z = QtCore.pyqtSignal(str)\n\n vel_x = QtCore.pyqtSignal(str)\n vel_y = QtCore.pyqtSignal(str)\n vel_z = QtCore.pyqtSignal(str)\n\n\n# noinspection PyArgumentList\nclass PlatformSignal(QtCore.QObject):\n move_done = QtCore.pyqtSignal(bool)\n\n\nclass ImuReadThread(QtCore.QThread):\n def __init__(self):\n super().__init__()\n\n self.signal = ImuSignal()\n\n self.imu_data = {\n 'accel_x': 0.0, 'accel_y': 0.0, 'accel_z': 0.0, 'accel_t': 0.0,\n 'vel_x': 0.0, 'vel_y': 0.0, 'vel_z': 0.0, 'vel_t': 0.0,\n 'angle_x': 0.0, 'angle_y': 0.0, 'angle_z': 0.0, 'angle_t': 0.0\n }\n\n self.start_angle_x = 0\n self.start_angle_y = 0\n self.start_angle_z = 0\n\n self.record_state = False\n\n def open_port(self, port, baudrate):\n \"\"\"\n\n :param port:\n :param baudrate:\n \"\"\"\n self.ser = serial.Serial(port, baudrate)\n\n def create_file(self, path):\n \"\"\"\n\n :param path:\n \"\"\"\n ext = '.csv'\n fname = os.path.join(\n path,\n time.strftime('%Y%m%d%H%M%S') + ext\n )\n self.fobject = open(fname, 'w', newline='')\n\n fieldnames = sorted(self.imu_data.keys())\n self.file_writer = csv.DictWriter(self.fobject, fieldnames=fieldnames)\n self.file_writer.writeheader()\n\n self.record_state = True\n\n def read_ser_data(self):\n \"\"\"\n\n :return:\n \"\"\"\n ser_data = BitArray()\n\n while True:\n packet_header = BitArray(self.ser.read())\n if packet_header == PACKET_HEADER:\n ser_data += packet_header\n break\n\n ser_data += BitArray(self.ser.read(PACKET_LENGTH))\n return ser_data\n\n def decode_imu_data(self, ser_data):\n \"\"\"\n\n :param ser_data:\n \"\"\"\n chunks = ser_data.unpack('bytes:11, bytes:11, bytes:11')\n\n for chunk in chunks:\n _, prefix, *bs = BitArray(chunk).unpack(\n 'bytes:1, bytes:1, intle:16, intle:16, intle:16, intle:16'\n )\n if BitArray(prefix) == ACCEL_PREFIX:\n self.imu_data['accel_x'] = bs[0] / 32768 * 16\n self.imu_data['accel_y'] = bs[1] / 32768 * 16\n self.imu_data['accel_z'] = bs[2] / 32768 * 16\n self.imu_data['accel_t'] = bs[3] / 340 + 36.25\n elif BitArray(prefix) == VEL_PREFIX:\n self.imu_data['vel_x'] = bs[0] / 32768 * 2000\n self.imu_data['vel_y'] = bs[1] / 32768 * 2000\n self.imu_data['vel_z'] = bs[2] / 32768 * 2000\n self.imu_data['vel_t'] = bs[3] / 340 + 36.25\n elif BitArray(prefix) == ANGLE_PREFIX:\n self.imu_data['angle_x'] = bs[0] / 32768 * 180\n self.imu_data['angle_y'] = bs[1] / 32768 * 180\n self.imu_data['angle_z'] = bs[2] / 32768 * 180\n self.imu_data['angle_t'] = bs[3] / 340 + 36.25\n else:\n pass # TODO: error handling here\n\n def set_relative_angle(self):\n \"\"\"\n\n \"\"\"\n self.start_angle_x = self.imu_data['angle_x']\n self.start_angle_y = self.imu_data['angle_y']\n self.start_angle_z = self.imu_data['angle_z']\n\n def set_absolute_angle(self):\n \"\"\"\n\n \"\"\"\n self.start_angle_x = 0\n self.start_angle_y = 0\n self.start_angle_z = 0\n\n def run(self):\n \"\"\"\n\n \"\"\"\n delay_cnt = DELAY_CNT\n\n while True:\n self.decode_imu_data(self.read_ser_data())\n\n if self.record_state:\n self.file_writer.writerow(self.imu_data)\n\n if delay_cnt:\n delay_cnt -= 1\n else:\n delay_cnt = DELAY_CNT\n\n self.signal.angle_x.emit(\n '{:=6.1f}'.format(\n self.imu_data['angle_x'] - self.start_angle_x\n )\n )\n self.signal.angle_y.emit(\n '{:=6.1f}'.format(\n self.imu_data['angle_y'] - self.start_angle_y\n )\n )\n self.signal.angle_z.emit(\n '{:=6.1f}'.format(\n self.imu_data['angle_z'] - self.start_angle_z\n )\n )\n self.signal.accel_x.emit(\n '{:=6.2f}'.format(self.imu_data['accel_x'])\n )\n self.signal.accel_y.emit(\n '{:=6.2f}'.format(self.imu_data['accel_y'])\n )\n self.signal.accel_z.emit(\n '{:=6.2f}'.format(self.imu_data['accel_z'])\n )\n self.signal.vel_x.emit(\n '{:=5.0f}'.format(self.imu_data['vel_x'])\n )\n self.signal.vel_y.emit(\n '{:=5.0f}'.format(self.imu_data['vel_y'])\n )\n self.signal.vel_z.emit(\n '{:=5.0f}'.format(self.imu_data['vel_z'])\n )\n\n\nclass PlatformThread(QtCore.QThread):\n def __init__(self):\n super().__init__()\n\n self.signal = PlatformSignal()\n\n def open_port(self, port, baudrate):\n \"\"\"\n\n :param port:\n :param baudrate:\n \"\"\"\n self.ser = serial.Serial(port, baudrate)\n\n def read_ser_data(self):\n \"\"\"\n\n :return:\n \"\"\"\n ser_data = self.ser.read()\n\n return ser_data\n\n def write_ser_data(self, data):\n \"\"\"\n\n :param data:\n \"\"\"\n self.ser.write(data)\n\n def run(self):\n \"\"\"\n\n \"\"\"\n while True:\n answer = self.read_ser_data()\n if answer == MOVE_DONE:\n self.signal.move_done.emit(True)\n elif answer == ZERO_ALL:\n print('All coordinates are zero.')\n\n\nclass Margin(QLabel):\n def __init__(self, txt):\n super().__init__(txt)\n\n\nclass Header(QLabel):\n def __init__(self, txt):\n super().__init__(txt)\n\n\nclass ConnectButton(QPushButton):\n def __init__(self, txt):\n super().__init__(txt)\n\n\nclass BrowseButton(QPushButton):\n def __init__(self, txt):\n super().__init__(txt)\n\n\nclass AngleButton(QPushButton):\n def __init__(self, txt):\n super().__init__(txt)\n\n\nclass GoButton(QPushButton):\n def __init__(self, txt):\n super().__init__(txt)\n\n\nclass Interface(QDialog):\n # noinspection PyUnresolvedReferences\n def __init__(self):\n super().__init__(\n None,\n QtCore.Qt.Window |\n QtCore.Qt.WindowTitleHint |\n QtCore.Qt.WindowCloseButtonHint\n )\n\n self.imu_connection_state = False\n self.platform_connection_state = False\n # self.ports = (DEFAULT_PORT,)\n self.ports = get_ports()\n\n self.imu_thread = ImuReadThread()\n self.imu_thread.finished.connect(self.close_imu_port)\n\n self.platform_thread = PlatformThread()\n self.platform_thread.finished.connect(self.close_platform_port)\n\n self.initUI()\n\n def connect_imu(self):\n \"\"\"\n\n :return:\n \"\"\"\n port = self.imu_ports_list.currentText()\n baudrate = int(self.imu_baud_list.currentText())\n\n if not port:\n return 1 # TODO: custom error handling here\n\n try:\n if not self.imu_connection_state:\n self.imu_thread.open_port(port, baudrate)\n self.record_box.setEnabled(False)\n\n if self.record_box.isChecked():\n self.imu_thread.create_file(self.file_path.text())\n\n self.imu_thread.start()\n self.imu_thread.set_absolute_angle()\n self.imu_connection_state = True\n else:\n self.imu_thread.terminate()\n self.imu_connection_state = False\n self.record_box.setEnabled(True)\n self.clear_lcds()\n except serial.SerialException as se:\n self.imu_connect_button.setChecked(False)\n print(se.args)\n\n def connect_platform(self):\n \"\"\"\n\n :return:\n \"\"\"\n port = self.platform_ports_list.currentText()\n baudrate = int(self.platform_baud_list.currentText())\n\n if not port:\n return 1 # TODO: custom error handling here\n\n try:\n if not self.platform_connection_state:\n self.platform_thread.open_port(port, baudrate)\n self.platform_thread.start()\n self.platform_go_button.setEnabled(True)\n self.platform_zero_button.setEnabled(True)\n self.platform_connection_state = True\n else:\n self.platform_thread.terminate()\n self.platform_go_button.setEnabled(False)\n self.platform_zero_button.setEnabled(False)\n self.platform_connection_state = False\n except serial.SerialException as se:\n self.platform_connect_button.setChecked(False)\n print(se.args)\n\n def close_imu_port(self):\n \"\"\"\n\n \"\"\"\n self.imu_thread.ser.flush()\n self.imu_thread.ser.close()\n\n if self.record_box.isChecked():\n self.imu_thread.fobject.flush()\n self.imu_thread.fobject.close()\n\n def close_platform_port(self):\n \"\"\"\n\n \"\"\"\n self.platform_thread.ser.flush()\n self.platform_thread.ser.close()\n\n def create_lcd(self, signal):\n \"\"\"\n\n :param signal:\n :return:\n \"\"\"\n lcd = QLCDNumber(self)\n\n lcd.setDigitCount(LCD_DIGIT_COUNT)\n lcd.setSegmentStyle(QLCDNumber.Flat)\n signal.connect(lcd.display)\n\n return lcd\n\n def clear_lcds(self):\n \"\"\"\n\n \"\"\"\n for lcd in self.findChildren(QLCDNumber):\n lcd.display(0)\n\n def show_select_dir_dialog(self):\n \"\"\"\n\n \"\"\"\n # noinspection PyCallByClass,PyTypeChecker,PyArgumentList\n path = QFileDialog.getExistingDirectory(\n self,\n 'Выберите директорию',\n os.getcwd(),\n QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks\n )\n if path:\n self.file_path.setText(str(path))\n\n def send_coords(self):\n message = '^MOVE,{},{},{},{}$'\n\n if not self.sync_box.isChecked():\n coord_1 = self.rod_1.text()\n coord_2 = self.rod_2.text()\n coord_3 = self.rod_3.text()\n coord_4 = self.rod_4.text()\n else:\n coord_1 = self.rod_1.text()\n coord_2 = '-' + self.rod_1.text()\n coord_3 = self.rod_3.text()\n coord_4 = '-' + self.rod_3.text()\n\n message = message.format(coord_1, coord_2, coord_3, coord_4)\n\n self.platform_thread.write_ser_data(message.encode())\n self.platform_go_button.setEnabled(False)\n\n def send_zero_all(self):\n self.platform_thread.write_ser_data(b'^ZERO$')\n self.rod_1.setText('0')\n self.rod_2.setText('0')\n self.rod_3.setText('0')\n self.rod_4.setText('0')\n\n @staticmethod\n def create_vline():\n \"\"\"\n\n :return:\n \"\"\"\n vline = QFrame()\n vline.setFrameShape(QFrame.VLine)\n vline.setFrameShadow(QFrame.Sunken)\n\n return vline\n\n @staticmethod\n def create_hline():\n \"\"\"\n\n :return:\n \"\"\"\n hline = QFrame()\n hline.setFrameShape(QFrame.HLine)\n hline.setFrameShadow(QFrame.Sunken)\n\n return hline\n\n def disable_roads(self):\n \"\"\"\n\n \"\"\"\n if self.sync_box.isChecked():\n self.rod_2.setEnabled(False)\n self.rod_4.setEnabled(False)\n self.rod_2.setText('')\n self.rod_4.setText('')\n self.label_2.setText('')\n self.label_4.setText('')\n else:\n self.rod_2.setEnabled(True)\n self.rod_4.setEnabled(True)\n self.label_2.setText('0')\n self.label_4.setText('0')\n self.rod_2.setText('0')\n self.rod_4.setText('0')\n\n # noinspection PyUnresolvedReferences\n def initUI(self):\n\n # _____________________________IMU MENU________________________________\n\n \"\"\"\n\n \"\"\"\n imu_menu = QHBoxLayout()\n\n imu_menu.addWidget(QLabel('Д'))\n imu_menu.addWidget(self.create_vline())\n\n rel_angle_button = AngleButton('0')\n rel_angle_button.setToolTip('Относительные значения угла')\n # noinspection PyUnresolvedReferences\n rel_angle_button.clicked.connect(self.imu_thread.set_relative_angle)\n imu_menu.addWidget(rel_angle_button)\n\n abs_angle_button = AngleButton('A')\n abs_angle_button.setToolTip('Абсолютные значения угла')\n # noinspection PyUnresolvedReferences\n abs_angle_button.clicked.connect(self.imu_thread.set_absolute_angle)\n imu_menu.addWidget(abs_angle_button)\n\n imu_menu.addWidget(self.create_vline())\n\n self.imu_connect_button = ConnectButton('\\U0001F50C')\n self.imu_connect_button.setToolTip('Подключить/отключить')\n self.imu_connect_button.setCheckable(True)\n if not self.ports:\n self.imu_connect_button.setEnabled(False)\n # noinspection PyUnresolvedReferences\n self.imu_connect_button.clicked.connect(self.connect_imu)\n imu_menu.addWidget(self.imu_connect_button)\n\n imu_menu.addWidget(QLabel('Порт:'))\n\n self.imu_ports_list = QComboBox(self)\n for port in self.ports:\n self.imu_ports_list.addItem(port)\n imu_menu.addWidget(self.imu_ports_list)\n\n imu_menu.addWidget(QLabel('Скорость:'))\n\n self.imu_baud_list = QComboBox(self)\n for baudrate in BAUDRATES:\n self.imu_baud_list.addItem(baudrate)\n self.imu_baud_list.setCurrentIndex(BAUDRATES.index(IMU_BAUD))\n imu_menu.addWidget(self.imu_baud_list)\n\n imu_menu.addWidget(self.create_vline())\n\n imu_menu.addWidget(QLabel('Запись:'))\n\n self.record_box = QCheckBox()\n imu_menu.addWidget(self.record_box)\n\n imu_menu.addWidget(QLabel('Путь:'))\n\n self.file_path = QLineEdit(os.getcwd())\n self.file_path.setReadOnly(True)\n imu_menu.addWidget(self.file_path)\n\n select_path_button = BrowseButton('...')\n select_path_button.setToolTip('Выбрать директорию')\n # noinspection PyUnresolvedReferences\n select_path_button.clicked.connect(self.show_select_dir_dialog)\n imu_menu.addWidget(select_path_button)\n\n # __________________________PLATFORM MENU______________________________\n\n platform_menu = QHBoxLayout()\n\n platform_menu.addWidget(QLabel('П'))\n\n platform_menu.addWidget(self.create_vline())\n\n self.platform_connect_button = ConnectButton('\\U0001F50C')\n self.platform_connect_button.setToolTip('Подключить/отключить')\n self.platform_connect_button.setCheckable(True)\n if not self.ports:\n self.platform_connect_button.setEnabled(False)\n # noinspection PyUnresolvedReferences\n self.platform_connect_button.clicked.connect(self.connect_platform)\n platform_menu.addWidget(self.platform_connect_button)\n\n platform_menu.addWidget(QLabel('Порт:'))\n\n self.platform_ports_list = QComboBox(self)\n for port in self.ports:\n self.platform_ports_list.addItem(port)\n\n platform_menu.addWidget(self.platform_ports_list)\n\n platform_menu.addWidget(QLabel('Скорость:'))\n\n self.platform_baud_list = QComboBox(self)\n for baudrate in BAUDRATES:\n self.platform_baud_list.addItem(baudrate)\n\n self.platform_baud_list.setCurrentIndex(BAUDRATES.index(PLATFORM_BAUD))\n platform_menu.addWidget(self.platform_baud_list)\n\n platform_menu.addWidget(self.create_vline())\n\n self.platform_zero_button = AngleButton('0') # FIXME: Add custom class\n self.platform_zero_button.setToolTip('Обнулить координаты платформы')\n self.platform_zero_button.setEnabled(False)\n # noinspection PyUnresolvedReferences\n self.platform_zero_button.clicked.connect(self.send_zero_all)\n platform_menu.addWidget(self.platform_zero_button)\n\n self.platform_go_button = GoButton('\\U000025BA')\n self.platform_go_button.setToolTip('Поехали')\n self.platform_go_button.setEnabled(False)\n # noinspection PyUnresolvedReferences\n self.platform_thread.signal.move_done.connect(\n self.platform_go_button.setEnabled\n )\n # noinspection PyUnresolvedReferences\n self.platform_go_button.clicked.connect(self.send_coords)\n platform_menu.addWidget(self.platform_go_button)\n\n steps_validator = QIntValidator(-99999, 99999)\n steps_in_mm_validator = QIntValidator(1, 99999)\n\n self.steps_in_mm = QLineEdit('300')\n self.steps_in_mm.setAlignment(Qt.AlignRight)\n self.steps_in_mm.setValidator(steps_in_mm_validator)\n\n self.label_1 = QLabel('0.000')\n self.label_1.setFixedWidth(60)\n self.label_1.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_2 = QLabel('0.000')\n self.label_2.setFixedWidth(60)\n self.label_2.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_3 = QLabel('0.000')\n self.label_3.setFixedWidth(60)\n self.label_3.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_4 = QLabel('0.000')\n self.label_4.setFixedWidth(60)\n self.label_4.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n\n platform_menu.addWidget(QLabel('X'))\n self.rod_1 = QLineEdit('0')\n self.rod_1.setAlignment(Qt.AlignRight)\n self.rod_1.setValidator(steps_validator)\n self.rod_1.textChanged.connect(\n lambda text: self.convert_steps_to_mm(text, self.label_1)\n )\n platform_menu.addWidget(self.rod_1)\n\n platform_menu.addWidget(QLabel('Y'))\n self.rod_2 = QLineEdit('0')\n self.rod_2.setAlignment(Qt.AlignRight)\n self.rod_2.setValidator(steps_validator)\n self.rod_2.textChanged.connect(\n lambda text: self.convert_steps_to_mm(text, self.label_2)\n )\n platform_menu.addWidget(self.rod_2)\n\n platform_menu.addWidget(QLabel('Z'))\n self.rod_3 = QLineEdit('0')\n self.rod_3.setAlignment(Qt.AlignRight)\n self.rod_3.setValidator(steps_validator)\n self.rod_3.textChanged.connect(\n lambda text: self.convert_steps_to_mm(text, self.label_3)\n )\n platform_menu.addWidget(self.rod_3)\n\n platform_menu.addWidget(QLabel('A'))\n self.rod_4 = QLineEdit('0')\n self.rod_4.setAlignment(Qt.AlignRight)\n self.rod_4.setValidator(steps_validator)\n self.rod_4.textChanged.connect(\n lambda text: self.convert_steps_to_mm(text, self.label_4)\n )\n platform_menu.addWidget(self.rod_4)\n\n platform_menu.addWidget(self.create_vline())\n\n platform_menu.addWidget(QLabel('Синхр.:'))\n\n self.sync_box = QCheckBox()\n # noinspection PyUnresolvedReferences\n self.sync_box.stateChanged.connect(self.disable_roads)\n platform_menu.addWidget(self.sync_box)\n\n # ___________________________INFO______________________________________\n\n info = QHBoxLayout()\n info.addWidget(QLabel('Шагов в мм:'))\n info.addWidget(self.steps_in_mm)\n info.addWidget(QLabel('Смещение по осям:'))\n info.addWidget(QLabel('X = '))\n info.addWidget(self.label_1)\n info.addWidget(QLabel('мм;'))\n info.addWidget(QLabel('Y = '))\n info.addWidget(self.label_2)\n info.addWidget(QLabel('мм;'))\n info.addWidget(QLabel('Z = '))\n info.addWidget(self.label_3)\n info.addWidget(QLabel('мм;'))\n info.addWidget(QLabel('A = '))\n info.addWidget(self.label_4)\n info.addWidget(QLabel('мм;'))\n\n # ___________________________VALUES TABLE______________________________\n\n table = QGridLayout()\n\n table.addWidget(Margin('X'), 1, 0)\n table.addWidget(Margin('Y'), 2, 0)\n table.addWidget(Margin('Z'), 3, 0)\n\n table.addWidget(Header('Угол (гр.)'), 0, 1)\n table.addWidget(Header('Ускорение (g)'), 0, 2)\n table.addWidget(Header('Скорость (гр./сек.)'), 0, 3)\n\n table.addWidget(self.create_lcd(self.imu_thread.signal.angle_x), 1, 1)\n table.addWidget(self.create_lcd(self.imu_thread.signal.angle_y), 2, 1)\n table.addWidget(self.create_lcd(self.imu_thread.signal.angle_z), 3, 1)\n\n table.addWidget(self.create_lcd(self.imu_thread.signal.accel_x), 1, 2)\n table.addWidget(self.create_lcd(self.imu_thread.signal.accel_y), 2, 2)\n table.addWidget(self.create_lcd(self.imu_thread.signal.accel_z), 3, 2)\n\n table.addWidget(self.create_lcd(self.imu_thread.signal.vel_x), 1, 3)\n table.addWidget(self.create_lcd(self.imu_thread.signal.vel_y), 2, 3)\n table.addWidget(self.create_lcd(self.imu_thread.signal.vel_z), 3, 3)\n\n table.setColumnStretch(0, 2)\n table.setColumnStretch(1, 5)\n table.setColumnStretch(2, 5)\n table.setColumnStretch(3, 5)\n\n table.setRowStretch(0, 1)\n table.setRowStretch(1, 10)\n table.setRowStretch(2, 10)\n table.setRowStretch(3, 10)\n\n # ___________________________LAYOUT____________________________________\n\n layout = QVBoxLayout()\n layout.addLayout(imu_menu)\n layout.addWidget(self.create_hline())\n layout.addLayout(platform_menu)\n layout.addWidget(self.create_hline())\n layout.addLayout(info)\n layout.addWidget(self.create_hline())\n layout.addLayout(table)\n\n self.setLayout(layout)\n\n self.setFixedSize(750, 400)\n self.setWindowTitle('MPU6050')\n self.show()\n\n def convert_steps_to_mm(self, text, label):\n try:\n value = int(text)\n steps_in_mm = int(self.steps_in_mm.text())\n except ValueError:\n value = steps_in_mm = 0\n\n if steps_in_mm > 0:\n value /= steps_in_mm\n label.setText('{:5.3f}'.format(value))\n else:\n label.setText('0.000')\n\n\n# noinspection PyCallByClass,PyArgumentList\ndef main():\n pyqt = os.path.dirname(PyQt5.__file__)\n # noinspection PyTypeChecker\n QApplication.addLibraryPath(os.path.join(pyqt, 'plugins'))\n\n app = QApplication(sys.argv)\n app.setStyleSheet(STYLE)\n\n ex = Interface()\n print(ex)\n\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"mpu6050-qt.py","file_name":"mpu6050-qt.py","file_ext":"py","file_size_in_byte":26258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}